Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/slab.c
   4 * Written by Mark Hemment, 1996/97.
   5 * (markhe@nextd.demon.co.uk)
   6 *
   7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   8 *
   9 * Major cleanup, different bufctl logic, per-cpu arrays
  10 *	(c) 2000 Manfred Spraul
  11 *
  12 * Cleanup, make the head arrays unconditional, preparation for NUMA
  13 * 	(c) 2002 Manfred Spraul
  14 *
  15 * An implementation of the Slab Allocator as described in outline in;
  16 *	UNIX Internals: The New Frontiers by Uresh Vahalia
  17 *	Pub: Prentice Hall	ISBN 0-13-101908-2
  18 * or with a little more detail in;
  19 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
  20 *	Jeff Bonwick (Sun Microsystems).
  21 *	Presented at: USENIX Summer 1994 Technical Conference
  22 *
  23 * The memory is organized in caches, one cache for each object type.
  24 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  25 * Each cache consists out of many slabs (they are small (usually one
  26 * page long) and always contiguous), and each slab contains multiple
  27 * initialized objects.
  28 *
  29 * This means, that your constructor is used only for newly allocated
  30 * slabs and you must pass objects with the same initializations to
  31 * kmem_cache_free.
  32 *
  33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  34 * normal). If you need a special memory type, then must create a new
  35 * cache for that memory type.
  36 *
  37 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  38 *   full slabs with 0 free objects
  39 *   partial slabs
  40 *   empty slabs with no allocated objects
  41 *
  42 * If partial slabs exist, then new allocations come from these slabs,
  43 * otherwise from empty slabs or new slabs are allocated.
  44 *
  45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  46 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  47 *
  48 * Each cache has a short per-cpu head array, most allocs
  49 * and frees go into that array, and if that array overflows, then 1/2
  50 * of the entries in the array are given back into the global cache.
  51 * The head array is strictly LIFO and should improve the cache hit rates.
  52 * On SMP, it additionally reduces the spinlock operations.
  53 *
  54 * The c_cpuarray may not be read with enabled local interrupts -
  55 * it's changed with a smp_call_function().
  56 *
  57 * SMP synchronization:
  58 *  constructors and destructors are called without any locking.
  59 *  Several members in struct kmem_cache and struct slab never change, they
  60 *	are accessed without any locking.
  61 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  62 *  	and local interrupts are disabled so slab code is preempt-safe.
  63 *  The non-constant members are protected with a per-cache irq spinlock.
  64 *
  65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  66 * in 2000 - many ideas in the current implementation are derived from
  67 * his patch.
  68 *
  69 * Further notes from the original documentation:
  70 *
  71 * 11 April '97.  Started multi-threading - markhe
  72 *	The global cache-chain is protected by the mutex 'slab_mutex'.
  73 *	The sem is only needed when accessing/extending the cache-chain, which
  74 *	can never happen inside an interrupt (kmem_cache_create(),
  75 *	kmem_cache_shrink() and kmem_cache_reap()).
  76 *
  77 *	At present, each engine can be growing a cache.  This should be blocked.
  78 *
  79 * 15 March 2005. NUMA slab allocator.
  80 *	Shai Fultheim <shai@scalex86.org>.
  81 *	Shobhit Dayal <shobhit@calsoftinc.com>
  82 *	Alok N Kataria <alokk@calsoftinc.com>
  83 *	Christoph Lameter <christoph@lameter.com>
  84 *
  85 *	Modified the slab allocator to be node aware on NUMA systems.
  86 *	Each node has its own list of partial, free and full slabs.
  87 *	All object allocations for a node occur from node specific slab lists.
  88 */
  89
  90#include	<linux/__KEEPIDENTS__B.h>
  91#include	<linux/__KEEPIDENTS__C.h>
  92#include	<linux/__KEEPIDENTS__D.h>
  93#include	<linux/__KEEPIDENTS__E.h>
  94#include	<linux/__KEEPIDENTS__F.h>
  95#include	<linux/__KEEPIDENTS__G.h>
  96#include	<linux/__KEEPIDENTS__H.h>
  97#include	<linux/__KEEPIDENTS__I.h>
  98#include	<linux/__KEEPIDENTS__J.h>
  99#include	<linux/proc_fs.h>
 100#include	<linux/__KEEPIDENTS__BA.h>
 101#include	<linux/__KEEPIDENTS__BB.h>
 102#include	<linux/__KEEPIDENTS__BC.h>
 
 103#include	<linux/cpu.h>
 104#include	<linux/__KEEPIDENTS__BD.h>
 105#include	<linux/__KEEPIDENTS__BE.h>
 106#include	<linux/rcupdate.h>
 107#include	<linux/__KEEPIDENTS__BF.h>
 108#include	<linux/__KEEPIDENTS__BG.h>
 109#include	<linux/__KEEPIDENTS__BH.h>
 110#include	<linux/kmemleak.h>
 111#include	<linux/__KEEPIDENTS__BI.h>
 112#include	<linux/__KEEPIDENTS__BJ.h>
 113#include	<linux/__KEEPIDENTS__CA-__KEEPIDENTS__CB.h>
 114#include	<linux/__KEEPIDENTS__CC.h>
 115#include	<linux/reciprocal_div.h>
 116#include	<linux/debugobjects.h>
 117#include	<linux/__KEEPIDENTS__CD.h>
 118#include	<linux/__KEEPIDENTS__CE.h>
 119#include	<linux/__KEEPIDENTS__CF/task_stack.h>
 120
 121#include	<net/__KEEPIDENTS__CG.h>
 122
 123#include	<asm/cacheflush.h>
 124#include	<asm/tlbflush.h>
 125#include	<asm/page.h>
 126
 127#include <trace/events/kmem.h>
 128
 129#include	"internal.h"
 130
 131#include	"slab.h"
 132
 133/*
 134 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 135 *		  0 for faster, smaller code (especially in the critical paths).
 136 *
 137 * STATS	- 1 to collect stats for /proc/slabinfo.
 138 *		  0 for faster, smaller code (especially in the critical paths).
 139 *
 140 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 141 */
 142
 143#ifdef CONFIG_DEBUG_SLAB
 144#define	DEBUG		1
 145#define	STATS		1
 146#define	FORCED_DEBUG	1
 147#else
 148#define	DEBUG		0
 149#define	STATS		0
 150#define	FORCED_DEBUG	0
 151#endif
 152
 153/* Shouldn't this be in a header file somewhere? */
 154#define	BYTES_PER_WORD		sizeof(void *)
 155#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
 156
 157#ifndef ARCH_KMALLOC_FLAGS
 158#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 159#endif
 160
 161#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
 162				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
 163
 164#if FREELIST_BYTE_INDEX
 165typedef unsigned char freelist_idx_t;
 166#else
 167typedef unsigned short freelist_idx_t;
 168#endif
 169
 170#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 171
 172/*
 173 * struct array_cache
 174 *
 175 * Purpose:
 176 * - LIFO ordering, to hand out cache-warm objects from _alloc
 177 * - reduce the number of linked list operations
 178 * - reduce spinlock operations
 179 *
 180 * The limit is stored in the per-cpu structure to reduce the data cache
 181 * footprint.
 182 *
 183 */
 184struct array_cache {
 185	unsigned int avail;
 186	unsigned int limit;
 187	unsigned int batchcount;
 188	unsigned int touched;
 189	void *entry[];	/*
 190			 * Must have this definition in here for the proper
 191			 * alignment of array_cache. Also simplifies accessing
 192			 * the entries.
 193			 */
 194};
 195
 196struct alien_cache {
 197	spinlock_t lock;
 198	struct array_cache ac;
 199};
 200
 201/*
 202 * Need this for bootstrapping a per node allocator.
 203 */
 204#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
 205static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
 206#define	CACHE_CACHE 0
 207#define	SIZE_NODE (MAX_NUMNODES)
 208
 209static int drain_freelist(struct kmem_cache *cache,
 210			struct kmem_cache_node *n, int tofree);
 211static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 212			int node, struct list_head *list);
 213static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
 214static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 215static void cache_reap(struct work_struct *unused);
 216
 217static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
 218						void **list);
 219static inline void fixup_slab_list(struct kmem_cache *cachep,
 220				struct kmem_cache_node *n, struct page *page,
 221				void **list);
 222static int slab_early_init = 1;
 223
 224#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
 225
 226static void kmem_cache_node_init(struct kmem_cache_node *parent)
 227{
 228	INIT_LIST_HEAD(&parent->slabs_full);
 229	INIT_LIST_HEAD(&parent->slabs_partial);
 230	INIT_LIST_HEAD(&parent->slabs_free);
 231	parent->total_slabs = 0;
 232	parent->free_slabs = 0;
 233	parent->shared = NULL;
 234	parent->alien = NULL;
 235	parent->colour_next = 0;
 236	spin_lock_init(&parent->list_lock);
 237	parent->free_objects = 0;
 238	parent->free_touched = 0;
 239}
 240
 241#define MAKE_LIST(cachep, listp, slab, nodeid)				\
 242	do {								\
 243		INIT_LIST_HEAD(listp);					\
 244		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
 245	} while (0)
 246
 247#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
 248	do {								\
 249	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
 250	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 251	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
 252	} while (0)
 253
 254#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
 255#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
 256#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
 257#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
 258
 259#define BATCHREFILL_LIMIT	16
 260/*
 261 * Optimization question: fewer reaps means less probability for unnessary
 262 * cpucache drain/refill cycles.
 263 *
 264 * OTOH the cpuarrays can contain lots of objects,
 265 * which could lock up otherwise freeable slabs.
 266 */
 267#define REAPTIMEOUT_AC		(2*HZ)
 268#define REAPTIMEOUT_NODE	(4*HZ)
 269
 270#if STATS
 271#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
 272#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
 273#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
 274#define	STATS_INC_GROWN(x)	((x)->grown++)
 275#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
 276#define	STATS_SET_HIGH(x)						\
 277	do {								\
 278		if ((x)->num_active > (x)->high_mark)			\
 279			(x)->high_mark = (x)->num_active;		\
 280	} while (0)
 281#define	STATS_INC_ERR(x)	((x)->errors++)
 282#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
 283#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
 284#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 285#define	STATS_SET_FREEABLE(x, i)					\
 286	do {								\
 287		if ((x)->max_freeable < i)				\
 288			(x)->max_freeable = i;				\
 289	} while (0)
 290#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
 291#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
 292#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
 293#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
 294#else
 295#define	STATS_INC_ACTIVE(x)	do { } while (0)
 296#define	STATS_DEC_ACTIVE(x)	do { } while (0)
 297#define	STATS_INC_ALLOCED(x)	do { } while (0)
 298#define	STATS_INC_GROWN(x)	do { } while (0)
 299#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
 300#define	STATS_SET_HIGH(x)	do { } while (0)
 301#define	STATS_INC_ERR(x)	do { } while (0)
 302#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
 303#define	STATS_INC_NODEFREES(x)	do { } while (0)
 304#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 305#define	STATS_SET_FREEABLE(x, i) do { } while (0)
 306#define STATS_INC_ALLOCHIT(x)	do { } while (0)
 307#define STATS_INC_ALLOCMISS(x)	do { } while (0)
 308#define STATS_INC_FREEHIT(x)	do { } while (0)
 309#define STATS_INC_FREEMISS(x)	do { } while (0)
 310#endif
 311
 312#if DEBUG
 313
 314/*
 315 * memory layout of objects:
 316 * 0		: objp
 317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 318 * 		the end of an object is aligned with the end of the real
 319 * 		allocation. Catches writes behind the end of the allocation.
 320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 321 * 		redzone word.
 322 * cachep->obj_offset: The real object.
 323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 324 * cachep->size - 1* BYTES_PER_WORD: last caller address
 325 *					[BYTES_PER_WORD long]
 326 */
 327static int obj_offset(struct kmem_cache *cachep)
 328{
 329	return cachep->obj_offset;
 330}
 331
 332static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 333{
 334	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 335	return (unsigned long long*) (objp + obj_offset(cachep) -
 336				      sizeof(unsigned long long));
 337}
 338
 339static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 340{
 341	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 342	if (cachep->flags & SLAB_STORE_USER)
 343		return (unsigned long long *)(objp + cachep->size -
 344					      sizeof(unsigned long long) -
 345					      REDZONE_ALIGN);
 346	return (unsigned long long *) (objp + cachep->size -
 347				       sizeof(unsigned long long));
 348}
 349
 350static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 351{
 352	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 353	return (void **)(objp + cachep->size - BYTES_PER_WORD);
 354}
 355
 356#else
 357
 358#define obj_offset(x)			0
 359#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 360#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 361#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
 362
 363#endif
 364
 365/*
 366 * Do not go above this order unless 0 objects fit into the slab or
 367 * overridden on the command line.
 368 */
 369#define	SLAB_MAX_ORDER_HI	1
 370#define	SLAB_MAX_ORDER_LO	0
 371static int slab_max_order = SLAB_MAX_ORDER_LO;
 372static bool slab_max_order_set __initdata;
 373
 374static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 375				 unsigned int idx)
 376{
 377	return page->s_mem + cache->size * idx;
 378}
 379
 380#define BOOT_CPUCACHE_ENTRIES	1
 381/* internal cache of cache description objs */
 382static struct kmem_cache kmem_cache_boot = {
 383	.batchcount = 1,
 384	.limit = BOOT_CPUCACHE_ENTRIES,
 385	.shared = 1,
 386	.size = sizeof(struct kmem_cache),
 387	.name = "kmem_cache",
 388};
 389
 390static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 391
 392static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 393{
 394	return this_cpu_ptr(cachep->cpu_cache);
 395}
 396
 397/*
 398 * Calculate the number of objects and left-over bytes for a given buffer size.
 399 */
 400static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
 401		slab_flags_t flags, size_t *left_over)
 402{
 403	unsigned int num;
 404	size_t slab_size = PAGE_SIZE << gfporder;
 405
 406	/*
 407	 * The slab management structure can be either off the slab or
 408	 * on it. For the latter case, the memory allocated for a
 409	 * slab is used for:
 410	 *
 411	 * - @buffer_size bytes for each object
 412	 * - One freelist_idx_t for each object
 413	 *
 414	 * We don't need to consider alignment of freelist because
 415	 * freelist will be at the end of slab page. The objects will be
 416	 * at the correct alignment.
 417	 *
 418	 * If the slab management structure is off the slab, then the
 419	 * alignment will already be calculated into the size. Because
 420	 * the slabs are all pages aligned, the objects will be at the
 421	 * correct alignment when allocated.
 422	 */
 423	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
 424		num = slab_size / buffer_size;
 425		*left_over = slab_size % buffer_size;
 426	} else {
 427		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
 428		*left_over = slab_size %
 429			(buffer_size + sizeof(freelist_idx_t));
 430	}
 431
 432	return num;
 433}
 434
 435#if DEBUG
 436#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 437
 438static void __slab_error(const char *function, struct kmem_cache *cachep,
 439			char *msg)
 440{
 441	pr_err("slab error in %s(): cache `%s': %s\n",
 442	       function, cachep->name, msg);
 443	dump_stack();
 444	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 445}
 446#endif
 447
 448/*
 449 * By default on NUMA we use alien caches to stage the freeing of
 450 * objects allocated from other nodes. This causes massive memory
 451 * inefficiencies when using fake NUMA setup to split memory into a
 452 * large number of small nodes, so it can be disabled on the command
 453 * line
 454  */
 455
 456static int use_alien_caches __read_mostly = 1;
 457static int __init noaliencache_setup(char *s)
 458{
 459	use_alien_caches = 0;
 460	return 1;
 461}
 462__setup("noaliencache", noaliencache_setup);
 463
 464static int __init slab_max_order_setup(char *str)
 465{
 466	get_option(&str, &slab_max_order);
 467	slab_max_order = slab_max_order < 0 ? 0 :
 468				min(slab_max_order, MAX_ORDER - 1);
 469	slab_max_order_set = true;
 470
 471	return 1;
 472}
 473__setup("slab_max_order=", slab_max_order_setup);
 474
 475#ifdef CONFIG_NUMA
 476/*
 477 * Special reaping functions for NUMA systems called from cache_reap().
 478 * These take care of doing round robin flushing of alien caches (containing
 479 * objects freed on different nodes from which they were allocated) and the
 480 * flushing of remote pcps by calling drain_node_pages.
 481 */
 482static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 483
 484static void init_reap_node(int cpu)
 485{
 486	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
 487						    node_online_map);
 488}
 489
 490static void next_reap_node(void)
 491{
 492	int node = __this_cpu_read(slab_reap_node);
 493
 494	node = next_node_in(node, node_online_map);
 495	__this_cpu_write(slab_reap_node, node);
 496}
 497
 498#else
 499#define init_reap_node(cpu) do { } while (0)
 500#define next_reap_node(void) do { } while (0)
 501#endif
 502
 503/*
 504 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 505 * via the workqueue/eventd.
 506 * Add the CPU number into the expiration time to minimize the possibility of
 507 * the CPUs getting into lockstep and contending for the global cache chain
 508 * lock.
 509 */
 510static void start_cpu_timer(int cpu)
 511{
 512	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 513
 514	if (reap_work->work.func == NULL) {
 515		init_reap_node(cpu);
 516		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 517		schedule_delayed_work_on(cpu, reap_work,
 518					__round_jiffies_relative(HZ, cpu));
 519	}
 520}
 521
 522static void init_arraycache(struct array_cache *ac, int limit, int batch)
 523{
 524	if (ac) {
 525		ac->avail = 0;
 526		ac->limit = limit;
 527		ac->batchcount = batch;
 528		ac->touched = 0;
 529	}
 530}
 531
 532static struct array_cache *alloc_arraycache(int node, int entries,
 533					    int batchcount, gfp_t gfp)
 534{
 535	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 536	struct array_cache *ac = NULL;
 537
 538	ac = kmalloc_node(memsize, gfp, node);
 539	/*
 540	 * The array_cache structures contain pointers to free object.
 541	 * However, when such objects are allocated or transferred to another
 542	 * cache the pointers are not cleared and they could be counted as
 543	 * valid references during a kmemleak scan. Therefore, kmemleak must
 544	 * not scan such objects.
 545	 */
 546	kmemleak_no_scan(ac);
 547	init_arraycache(ac, entries, batchcount);
 548	return ac;
 549}
 550
 551static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
 552					struct page *page, void *objp)
 553{
 554	struct kmem_cache_node *n;
 555	int page_node;
 556	LIST_HEAD(list);
 557
 558	page_node = page_to_nid(page);
 559	n = get_node(cachep, page_node);
 560
 561	spin_lock(&n->list_lock);
 562	free_block(cachep, &objp, 1, page_node, &list);
 563	spin_unlock(&n->list_lock);
 564
 565	slabs_destroy(cachep, &list);
 566}
 567
 568/*
 569 * Transfer objects in one arraycache to another.
 570 * Locking must be handled by the caller.
 571 *
 572 * Return the number of entries transferred.
 573 */
 574static int transfer_objects(struct array_cache *to,
 575		struct array_cache *from, unsigned int max)
 576{
 577	/* Figure out how many entries to transfer */
 578	int nr = min3(from->avail, max, to->limit - to->avail);
 579
 580	if (!nr)
 581		return 0;
 582
 583	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
 584			sizeof(void *) *nr);
 585
 586	from->avail -= nr;
 587	to->avail += nr;
 588	return nr;
 589}
 590
 591/* &alien->lock must be held by alien callers. */
 592static __always_inline void __free_one(struct array_cache *ac, void *objp)
 593{
 594	/* Avoid trivial double-free. */
 595	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
 596	    WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
 597		return;
 598	ac->entry[ac->avail++] = objp;
 599}
 600
 601#ifndef CONFIG_NUMA
 602
 603#define drain_alien_cache(cachep, alien) do { } while (0)
 604#define reap_alien(cachep, n) do { } while (0)
 605
 606static inline struct alien_cache **alloc_alien_cache(int node,
 607						int limit, gfp_t gfp)
 608{
 609	return NULL;
 610}
 611
 612static inline void free_alien_cache(struct alien_cache **ac_ptr)
 613{
 614}
 615
 616static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 617{
 618	return 0;
 619}
 620
 621static inline void *alternate_node_alloc(struct kmem_cache *cachep,
 622		gfp_t flags)
 623{
 624	return NULL;
 625}
 626
 627static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 628		 gfp_t flags, int nodeid)
 629{
 630	return NULL;
 631}
 632
 633static inline gfp_t gfp_exact_node(gfp_t flags)
 634{
 635	return flags & ~__GFP_NOFAIL;
 636}
 637
 638#else	/* CONFIG_NUMA */
 639
 640static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 641static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 642
 643static struct alien_cache *__alloc_alien_cache(int node, int entries,
 644						int batch, gfp_t gfp)
 645{
 646	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
 647	struct alien_cache *alc = NULL;
 648
 649	alc = kmalloc_node(memsize, gfp, node);
 650	if (alc) {
 651		kmemleak_no_scan(alc);
 652		init_arraycache(&alc->ac, entries, batch);
 653		spin_lock_init(&alc->lock);
 654	}
 655	return alc;
 656}
 657
 658static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 659{
 660	struct alien_cache **alc_ptr;
 661	int i;
 662
 663	if (limit > 1)
 664		limit = 12;
 665	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
 666	if (!alc_ptr)
 667		return NULL;
 668
 669	for_each_node(i) {
 670		if (i == node || !node_online(i))
 671			continue;
 672		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
 673		if (!alc_ptr[i]) {
 674			for (i--; i >= 0; i--)
 675				kfree(alc_ptr[i]);
 676			kfree(alc_ptr);
 677			return NULL;
 678		}
 679	}
 680	return alc_ptr;
 681}
 682
 683static void free_alien_cache(struct alien_cache **alc_ptr)
 684{
 685	int i;
 686
 687	if (!alc_ptr)
 688		return;
 689	for_each_node(i)
 690	    kfree(alc_ptr[i]);
 691	kfree(alc_ptr);
 692}
 693
 694static void __drain_alien_cache(struct kmem_cache *cachep,
 695				struct array_cache *ac, int node,
 696				struct list_head *list)
 697{
 698	struct kmem_cache_node *n = get_node(cachep, node);
 699
 700	if (ac->avail) {
 701		spin_lock(&n->list_lock);
 702		/*
 703		 * Stuff objects into the remote nodes shared array first.
 704		 * That way we could avoid the overhead of putting the objects
 705		 * into the free lists and getting them back later.
 706		 */
 707		if (n->shared)
 708			transfer_objects(n->shared, ac, ac->limit);
 709
 710		free_block(cachep, ac->entry, ac->avail, node, list);
 711		ac->avail = 0;
 712		spin_unlock(&n->list_lock);
 713	}
 714}
 715
 716/*
 717 * Called from cache_reap() to regularly drain alien caches round robin.
 718 */
 719static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
 720{
 721	int node = __this_cpu_read(slab_reap_node);
 722
 723	if (n->alien) {
 724		struct alien_cache *alc = n->alien[node];
 725		struct array_cache *ac;
 726
 727		if (alc) {
 728			ac = &alc->ac;
 729			if (ac->avail && spin_trylock_irq(&alc->lock)) {
 730				LIST_HEAD(list);
 731
 732				__drain_alien_cache(cachep, ac, node, &list);
 733				spin_unlock_irq(&alc->lock);
 734				slabs_destroy(cachep, &list);
 735			}
 736		}
 737	}
 738}
 739
 740static void drain_alien_cache(struct kmem_cache *cachep,
 741				struct alien_cache **alien)
 742{
 743	int i = 0;
 744	struct alien_cache *alc;
 745	struct array_cache *ac;
 746	unsigned long flags;
 747
 748	for_each_online_node(i) {
 749		alc = alien[i];
 750		if (alc) {
 751			LIST_HEAD(list);
 752
 753			ac = &alc->ac;
 754			spin_lock_irqsave(&alc->lock, flags);
 755			__drain_alien_cache(cachep, ac, i, &list);
 756			spin_unlock_irqrestore(&alc->lock, flags);
 757			slabs_destroy(cachep, &list);
 758		}
 759	}
 760}
 761
 762static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
 763				int node, int page_node)
 764{
 765	struct kmem_cache_node *n;
 766	struct alien_cache *alien = NULL;
 767	struct array_cache *ac;
 768	LIST_HEAD(list);
 769
 770	n = get_node(cachep, node);
 771	STATS_INC_NODEFREES(cachep);
 772	if (n->alien && n->alien[page_node]) {
 773		alien = n->alien[page_node];
 774		ac = &alien->ac;
 775		spin_lock(&alien->lock);
 776		if (unlikely(ac->avail == ac->limit)) {
 777			STATS_INC_ACOVERFLOW(cachep);
 778			__drain_alien_cache(cachep, ac, page_node, &list);
 779		}
 780		__free_one(ac, objp);
 781		spin_unlock(&alien->lock);
 782		slabs_destroy(cachep, &list);
 783	} else {
 784		n = get_node(cachep, page_node);
 785		spin_lock(&n->list_lock);
 786		free_block(cachep, &objp, 1, page_node, &list);
 787		spin_unlock(&n->list_lock);
 788		slabs_destroy(cachep, &list);
 789	}
 790	return 1;
 791}
 792
 793static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 794{
 795	int page_node = page_to_nid(virt_to_page(objp));
 796	int node = numa_mem_id();
 797	/*
 798	 * Make sure we are not freeing a object from another node to the array
 799	 * cache on this cpu.
 800	 */
 801	if (likely(node == page_node))
 802		return 0;
 803
 804	return __cache_free_alien(cachep, objp, node, page_node);
 805}
 806
 807/*
 808 * Construct gfp mask to allocate from a specific node but do not reclaim or
 809 * warn about failures.
 810 */
 811static inline gfp_t gfp_exact_node(gfp_t flags)
 812{
 813	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
 814}
 815#endif
 816
 817static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
 818{
 819	struct kmem_cache_node *n;
 820
 821	/*
 822	 * Set up the kmem_cache_node for cpu before we can
 823	 * begin anything. Make sure some other cpu on this
 824	 * node has not already allocated this
 825	 */
 826	n = get_node(cachep, node);
 827	if (n) {
 828		spin_lock_irq(&n->list_lock);
 829		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
 830				cachep->num;
 831		spin_unlock_irq(&n->list_lock);
 832
 833		return 0;
 834	}
 835
 836	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
 837	if (!n)
 838		return -ENOMEM;
 839
 840	kmem_cache_node_init(n);
 841	n->next_reap = jiffies + REAPTIMEOUT_NODE +
 842		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
 843
 844	n->free_limit =
 845		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
 846
 847	/*
 848	 * The kmem_cache_nodes don't come and go as CPUs
 849	 * come and go.  slab_mutex is sufficient
 850	 * protection here.
 851	 */
 852	cachep->node[node] = n;
 853
 854	return 0;
 855}
 856
 857#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
 858/*
 859 * Allocates and initializes node for a node on each slab cache, used for
 860 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
 861 * will be allocated off-node since memory is not yet online for the new node.
 862 * When hotplugging memory or a cpu, existing node are not replaced if
 863 * already in use.
 864 *
 865 * Must hold slab_mutex.
 866 */
 867static int init_cache_node_node(int node)
 868{
 869	int ret;
 870	struct kmem_cache *cachep;
 871
 872	list_for_each_entry(cachep, &slab_caches, list) {
 873		ret = init_cache_node(cachep, node, GFP_KERNEL);
 874		if (ret)
 875			return ret;
 876	}
 877
 878	return 0;
 879}
 880#endif
 881
 882static int setup_kmem_cache_node(struct kmem_cache *cachep,
 883				int node, gfp_t gfp, bool force_change)
 884{
 885	int ret = -ENOMEM;
 886	struct kmem_cache_node *n;
 887	struct array_cache *old_shared = NULL;
 888	struct array_cache *new_shared = NULL;
 889	struct alien_cache **new_alien = NULL;
 890	LIST_HEAD(list);
 891
 892	if (use_alien_caches) {
 893		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
 894		if (!new_alien)
 895			goto fail;
 896	}
 897
 898	if (cachep->shared) {
 899		new_shared = alloc_arraycache(node,
 900			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
 901		if (!new_shared)
 902			goto fail;
 903	}
 904
 905	ret = init_cache_node(cachep, node, gfp);
 906	if (ret)
 907		goto fail;
 908
 909	n = get_node(cachep, node);
 910	spin_lock_irq(&n->list_lock);
 911	if (n->shared && force_change) {
 912		free_block(cachep, n->shared->entry,
 913				n->shared->avail, node, &list);
 914		n->shared->avail = 0;
 915	}
 916
 917	if (!n->shared || force_change) {
 918		old_shared = n->shared;
 919		n->shared = new_shared;
 920		new_shared = NULL;
 921	}
 922
 923	if (!n->alien) {
 924		n->alien = new_alien;
 925		new_alien = NULL;
 926	}
 927
 928	spin_unlock_irq(&n->list_lock);
 929	slabs_destroy(cachep, &list);
 930
 931	/*
 932	 * To protect lockless access to n->shared during irq disabled context.
 933	 * If n->shared isn't NULL in irq disabled context, accessing to it is
 934	 * guaranteed to be valid until irq is re-enabled, because it will be
 935	 * freed after synchronize_rcu().
 936	 */
 937	if (old_shared && force_change)
 938		synchronize_rcu();
 939
 940fail:
 941	kfree(old_shared);
 942	kfree(new_shared);
 943	free_alien_cache(new_alien);
 944
 945	return ret;
 946}
 947
 948#ifdef CONFIG_SMP
 949
 950static void cpuup_canceled(long cpu)
 951{
 952	struct kmem_cache *cachep;
 953	struct kmem_cache_node *n = NULL;
 954	int node = cpu_to_mem(cpu);
 955	const struct cpumask *mask = cpumask_of_node(node);
 956
 957	list_for_each_entry(cachep, &slab_caches, list) {
 958		struct array_cache *nc;
 959		struct array_cache *shared;
 960		struct alien_cache **alien;
 961		LIST_HEAD(list);
 962
 963		n = get_node(cachep, node);
 964		if (!n)
 965			continue;
 966
 967		spin_lock_irq(&n->list_lock);
 968
 969		/* Free limit for this kmem_cache_node */
 970		n->free_limit -= cachep->batchcount;
 971
 972		/* cpu is dead; no one can alloc from it. */
 973		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
 974		free_block(cachep, nc->entry, nc->avail, node, &list);
 975		nc->avail = 0;
 976
 977		if (!cpumask_empty(mask)) {
 978			spin_unlock_irq(&n->list_lock);
 979			goto free_slab;
 980		}
 981
 982		shared = n->shared;
 983		if (shared) {
 984			free_block(cachep, shared->entry,
 985				   shared->avail, node, &list);
 986			n->shared = NULL;
 987		}
 988
 989		alien = n->alien;
 990		n->alien = NULL;
 991
 992		spin_unlock_irq(&n->list_lock);
 993
 994		kfree(shared);
 995		if (alien) {
 996			drain_alien_cache(cachep, alien);
 997			free_alien_cache(alien);
 998		}
 999
1000free_slab:
1001		slabs_destroy(cachep, &list);
1002	}
1003	/*
1004	 * In the previous loop, all the objects were freed to
1005	 * the respective cache's slabs,  now we can go ahead and
1006	 * shrink each nodelist to its limit.
1007	 */
1008	list_for_each_entry(cachep, &slab_caches, list) {
1009		n = get_node(cachep, node);
1010		if (!n)
1011			continue;
1012		drain_freelist(cachep, n, INT_MAX);
1013	}
1014}
1015
1016static int cpuup_prepare(long cpu)
1017{
1018	struct kmem_cache *cachep;
1019	int node = cpu_to_mem(cpu);
1020	int err;
1021
1022	/*
1023	 * We need to do this right in the beginning since
1024	 * alloc_arraycache's are going to use this list.
1025	 * kmalloc_node allows us to add the slab to the right
1026	 * kmem_cache_node and not this cpu's kmem_cache_node
1027	 */
1028	err = init_cache_node_node(node);
1029	if (err < 0)
1030		goto bad;
1031
1032	/*
1033	 * Now we can go ahead with allocating the shared arrays and
1034	 * array caches
1035	 */
1036	list_for_each_entry(cachep, &slab_caches, list) {
1037		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1038		if (err)
1039			goto bad;
1040	}
1041
1042	return 0;
1043bad:
1044	cpuup_canceled(cpu);
1045	return -ENOMEM;
1046}
1047
1048int slab_prepare_cpu(unsigned int cpu)
1049{
1050	int err;
1051
1052	mutex_lock(&slab_mutex);
1053	err = cpuup_prepare(cpu);
1054	mutex_unlock(&slab_mutex);
1055	return err;
1056}
1057
1058/*
1059 * This is called for a failed online attempt and for a successful
1060 * offline.
1061 *
1062 * Even if all the cpus of a node are down, we don't free the
1063 * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
1064 * a kmalloc allocation from another cpu for memory from the node of
1065 * the cpu going down.  The list3 structure is usually allocated from
1066 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
1067 */
1068int slab_dead_cpu(unsigned int cpu)
1069{
1070	mutex_lock(&slab_mutex);
1071	cpuup_canceled(cpu);
1072	mutex_unlock(&slab_mutex);
1073	return 0;
1074}
1075#endif
1076
1077static int slab_online_cpu(unsigned int cpu)
1078{
1079	start_cpu_timer(cpu);
1080	return 0;
1081}
1082
1083static int slab_offline_cpu(unsigned int cpu)
1084{
1085	/*
1086	 * Shutdown cache reaper. Note that the slab_mutex is held so
1087	 * that if cache_reap() is invoked it cannot do anything
1088	 * expensive but will only modify reap_work and reschedule the
1089	 * timer.
1090	 */
1091	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1092	/* Now the cache_reaper is guaranteed to be not running. */
1093	per_cpu(slab_reap_work, cpu).work.func = NULL;
1094	return 0;
1095}
1096
1097#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1098/*
1099 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1100 * Returns -EBUSY if all objects cannot be drained so that the node is not
1101 * removed.
1102 *
1103 * Must hold slab_mutex.
1104 */
1105static int __meminit drain_cache_node_node(int node)
1106{
1107	struct kmem_cache *cachep;
1108	int ret = 0;
1109
1110	list_for_each_entry(cachep, &slab_caches, list) {
1111		struct kmem_cache_node *n;
1112
1113		n = get_node(cachep, node);
1114		if (!n)
1115			continue;
1116
1117		drain_freelist(cachep, n, INT_MAX);
1118
1119		if (!list_empty(&n->slabs_full) ||
1120		    !list_empty(&n->slabs_partial)) {
1121			ret = -EBUSY;
1122			break;
1123		}
1124	}
1125	return ret;
1126}
1127
1128static int __meminit slab_memory_callback(struct notifier_block *self,
1129					unsigned long action, void *arg)
1130{
1131	struct memory_notify *mnb = arg;
1132	int ret = 0;
1133	int nid;
1134
1135	nid = mnb->status_change_nid;
1136	if (nid < 0)
1137		goto out;
1138
1139	switch (action) {
1140	case MEM_GOING_ONLINE:
1141		mutex_lock(&slab_mutex);
1142		ret = init_cache_node_node(nid);
1143		mutex_unlock(&slab_mutex);
1144		break;
1145	case MEM_GOING_OFFLINE:
1146		mutex_lock(&slab_mutex);
1147		ret = drain_cache_node_node(nid);
1148		mutex_unlock(&slab_mutex);
1149		break;
1150	case MEM_ONLINE:
1151	case MEM_OFFLINE:
1152	case MEM_CANCEL_ONLINE:
1153	case MEM_CANCEL_OFFLINE:
1154		break;
1155	}
1156out:
1157	return notifier_from_errno(ret);
1158}
1159#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1160
1161/*
1162 * swap the static kmem_cache_node with kmalloced memory
1163 */
1164static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1165				int nodeid)
1166{
1167	struct kmem_cache_node *ptr;
1168
1169	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1170	BUG_ON(!ptr);
1171
1172	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1173	/*
1174	 * Do not assume that spinlocks can be initialized via memcpy:
1175	 */
1176	spin_lock_init(&ptr->list_lock);
1177
1178	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1179	cachep->node[nodeid] = ptr;
1180}
1181
1182/*
1183 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1184 * size of kmem_cache_node.
1185 */
1186static void __init set_up_node(struct kmem_cache *cachep, int index)
1187{
1188	int node;
1189
1190	for_each_online_node(node) {
1191		cachep->node[node] = &init_kmem_cache_node[index + node];
1192		cachep->node[node]->next_reap = jiffies +
1193		    REAPTIMEOUT_NODE +
1194		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1195	}
1196}
1197
1198/*
1199 * Initialisation.  Called after the page allocator have been initialised and
1200 * before smp_init().
1201 */
1202void __init kmem_cache_init(void)
1203{
1204	int i;
1205
1206	kmem_cache = &kmem_cache_boot;
1207
1208	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1209		use_alien_caches = 0;
1210
1211	for (i = 0; i < NUM_INIT_LISTS; i++)
1212		kmem_cache_node_init(&init_kmem_cache_node[i]);
1213
1214	/*
1215	 * Fragmentation resistance on low memory - only use bigger
1216	 * page orders on machines with more than 32MB of memory if
1217	 * not overridden on the command line.
1218	 */
1219	if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1220		slab_max_order = SLAB_MAX_ORDER_HI;
1221
1222	/* Bootstrap is tricky, because several objects are allocated
1223	 * from caches that do not exist yet:
1224	 * 1) initialize the kmem_cache cache: it contains the struct
1225	 *    kmem_cache structures of all caches, except kmem_cache itself:
1226	 *    kmem_cache is statically allocated.
1227	 *    Initially an __init data area is used for the head array and the
1228	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1229	 *    array at the end of the bootstrap.
1230	 * 2) Create the first kmalloc cache.
1231	 *    The struct kmem_cache for the new cache is allocated normally.
1232	 *    An __init data area is used for the head array.
1233	 * 3) Create the remaining kmalloc caches, with minimally sized
1234	 *    head arrays.
1235	 * 4) Replace the __init data head arrays for kmem_cache and the first
1236	 *    kmalloc cache with kmalloc allocated arrays.
1237	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1238	 *    the other cache's with kmalloc allocated memory.
1239	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1240	 */
1241
1242	/* 1) create the kmem_cache */
1243
1244	/*
1245	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1246	 */
1247	create_boot_cache(kmem_cache, "kmem_cache",
1248		offsetof(struct kmem_cache, node) +
1249				  nr_node_ids * sizeof(struct kmem_cache_node *),
1250				  SLAB_HWCACHE_ALIGN, 0, 0);
1251	list_add(&kmem_cache->list, &slab_caches);
1252	slab_state = PARTIAL;
1253
1254	/*
1255	 * Initialize the caches that provide memory for the  kmem_cache_node
1256	 * structures first.  Without this, further allocations will bug.
1257	 */
1258	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1259				kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1260				kmalloc_info[INDEX_NODE].size,
1261				ARCH_KMALLOC_FLAGS, 0,
1262				kmalloc_info[INDEX_NODE].size);
1263	slab_state = PARTIAL_NODE;
1264	setup_kmalloc_cache_index_table();
1265
1266	slab_early_init = 0;
1267
1268	/* 5) Replace the bootstrap kmem_cache_node */
1269	{
1270		int nid;
1271
1272		for_each_online_node(nid) {
1273			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1274
1275			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1276					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1277		}
1278	}
1279
1280	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1281}
1282
1283void __init kmem_cache_init_late(void)
1284{
1285	struct kmem_cache *cachep;
1286
1287	/* 6) resize the head arrays to their final sizes */
1288	mutex_lock(&slab_mutex);
1289	list_for_each_entry(cachep, &slab_caches, list)
1290		if (enable_cpucache(cachep, GFP_NOWAIT))
1291			BUG();
1292	mutex_unlock(&slab_mutex);
1293
1294	/* Done! */
1295	slab_state = FULL;
1296
1297#ifdef CONFIG_NUMA
1298	/*
1299	 * Register a memory hotplug callback that initializes and frees
1300	 * node.
1301	 */
1302	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1303#endif
1304
1305	/*
1306	 * The reap timers are started later, with a module init call: That part
1307	 * of the kernel is not yet operational.
1308	 */
1309}
1310
1311static int __init cpucache_init(void)
1312{
1313	int ret;
1314
1315	/*
1316	 * Register the timers that return unneeded pages to the page allocator
1317	 */
1318	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1319				slab_online_cpu, slab_offline_cpu);
1320	WARN_ON(ret < 0);
1321
1322	return 0;
1323}
1324__initcall(cpucache_init);
1325
1326static noinline void
1327slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1328{
1329#if DEBUG
1330	struct kmem_cache_node *n;
1331	unsigned long flags;
1332	int node;
1333	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1334				      DEFAULT_RATELIMIT_BURST);
1335
1336	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1337		return;
1338
1339	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1340		nodeid, gfpflags, &gfpflags);
1341	pr_warn("  cache: %s, object size: %d, order: %d\n",
1342		cachep->name, cachep->size, cachep->gfporder);
1343
1344	for_each_kmem_cache_node(cachep, node, n) {
1345		unsigned long total_slabs, free_slabs, free_objs;
1346
1347		spin_lock_irqsave(&n->list_lock, flags);
1348		total_slabs = n->total_slabs;
1349		free_slabs = n->free_slabs;
1350		free_objs = n->free_objects;
1351		spin_unlock_irqrestore(&n->list_lock, flags);
1352
1353		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1354			node, total_slabs - free_slabs, total_slabs,
1355			(total_slabs * cachep->num) - free_objs,
1356			total_slabs * cachep->num);
1357	}
1358#endif
1359}
1360
1361/*
1362 * Interface to system's page allocator. No need to hold the
1363 * kmem_cache_node ->list_lock.
1364 *
1365 * If we requested dmaable memory, we will get it. Even if we
1366 * did not request dmaable memory, we might get it, but that
1367 * would be relatively rare and ignorable.
1368 */
1369static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1370								int nodeid)
1371{
1372	struct page *page;
1373
1374	flags |= cachep->allocflags;
1375
1376	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1377	if (!page) {
1378		slab_out_of_memory(cachep, flags, nodeid);
1379		return NULL;
1380	}
1381
1382	account_slab_page(page, cachep->gfporder, cachep);
1383	__SetPageSlab(page);
1384	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1385	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1386		SetPageSlabPfmemalloc(page);
1387
1388	return page;
1389}
1390
1391/*
1392 * Interface to system's page release.
1393 */
1394static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1395{
1396	int order = cachep->gfporder;
1397
1398	BUG_ON(!PageSlab(page));
1399	__ClearPageSlabPfmemalloc(page);
1400	__ClearPageSlab(page);
1401	page_mapcount_reset(page);
1402	page->mapping = NULL;
 
1403
1404	if (current->reclaim_state)
1405		current->reclaim_state->reclaimed_slab += 1 << order;
1406	unaccount_slab_page(page, order, cachep);
1407	__free_pages(page, order);
1408}
1409
1410static void kmem_rcu_free(struct rcu_head *head)
1411{
1412	struct kmem_cache *cachep;
1413	struct page *page;
1414
1415	page = container_of(head, struct page, rcu_head);
1416	cachep = page->slab_cache;
1417
1418	kmem_freepages(cachep, page);
1419}
1420
1421#if DEBUG
1422static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1423{
1424	if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1425		(cachep->size % PAGE_SIZE) == 0)
1426		return true;
1427
1428	return false;
1429}
1430
1431#ifdef CONFIG_DEBUG_PAGEALLOC
1432static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1433{
1434	if (!is_debug_pagealloc_cache(cachep))
1435		return;
1436
1437	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1438}
1439
1440#else
1441static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1442				int map) {}
1443
1444#endif
1445
1446static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1447{
1448	int size = cachep->object_size;
1449	addr = &((char *)addr)[obj_offset(cachep)];
1450
1451	memset(addr, val, size);
1452	*(unsigned char *)(addr + size - 1) = POISON_END;
1453}
1454
1455static void dump_line(char *data, int offset, int limit)
1456{
1457	int i;
1458	unsigned char error = 0;
1459	int bad_count = 0;
1460
1461	pr_err("%03x: ", offset);
1462	for (i = 0; i < limit; i++) {
1463		if (data[offset + i] != POISON_FREE) {
1464			error = data[offset + i];
1465			bad_count++;
1466		}
1467	}
1468	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1469			&data[offset], limit, 1);
1470
1471	if (bad_count == 1) {
1472		error ^= POISON_FREE;
1473		if (!(error & (error - 1))) {
1474			pr_err("Single bit error detected. Probably bad RAM.\n");
1475#ifdef CONFIG_X86
1476			pr_err("Run memtest86+ or a similar memory test tool.\n");
1477#else
1478			pr_err("Run a memory test tool.\n");
1479#endif
1480		}
1481	}
1482}
1483#endif
1484
1485#if DEBUG
1486
1487static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1488{
1489	int i, size;
1490	char *realobj;
1491
1492	if (cachep->flags & SLAB_RED_ZONE) {
1493		pr_err("Redzone: 0x%llx/0x%llx\n",
1494		       *dbg_redzone1(cachep, objp),
1495		       *dbg_redzone2(cachep, objp));
1496	}
1497
1498	if (cachep->flags & SLAB_STORE_USER)
1499		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1500	realobj = (char *)objp + obj_offset(cachep);
1501	size = cachep->object_size;
1502	for (i = 0; i < size && lines; i += 16, lines--) {
1503		int limit;
1504		limit = 16;
1505		if (i + limit > size)
1506			limit = size - i;
1507		dump_line(realobj, i, limit);
1508	}
1509}
1510
1511static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1512{
1513	char *realobj;
1514	int size, i;
1515	int lines = 0;
1516
1517	if (is_debug_pagealloc_cache(cachep))
1518		return;
1519
1520	realobj = (char *)objp + obj_offset(cachep);
1521	size = cachep->object_size;
1522
1523	for (i = 0; i < size; i++) {
1524		char exp = POISON_FREE;
1525		if (i == size - 1)
1526			exp = POISON_END;
1527		if (realobj[i] != exp) {
1528			int limit;
1529			/* Mismatch ! */
1530			/* Print header */
1531			if (lines == 0) {
1532				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1533				       print_tainted(), cachep->name,
1534				       realobj, size);
1535				print_objinfo(cachep, objp, 0);
1536			}
1537			/* Hexdump the affected line */
1538			i = (i / 16) * 16;
1539			limit = 16;
1540			if (i + limit > size)
1541				limit = size - i;
1542			dump_line(realobj, i, limit);
1543			i += 16;
1544			lines++;
1545			/* Limit to 5 lines */
1546			if (lines > 5)
1547				break;
1548		}
1549	}
1550	if (lines != 0) {
1551		/* Print some data about the neighboring objects, if they
1552		 * exist:
1553		 */
1554		struct page *page = virt_to_head_page(objp);
1555		unsigned int objnr;
1556
1557		objnr = obj_to_index(cachep, page, objp);
1558		if (objnr) {
1559			objp = index_to_obj(cachep, page, objnr - 1);
1560			realobj = (char *)objp + obj_offset(cachep);
1561			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1562			print_objinfo(cachep, objp, 2);
1563		}
1564		if (objnr + 1 < cachep->num) {
1565			objp = index_to_obj(cachep, page, objnr + 1);
1566			realobj = (char *)objp + obj_offset(cachep);
1567			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1568			print_objinfo(cachep, objp, 2);
1569		}
1570	}
1571}
1572#endif
1573
1574#if DEBUG
1575static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1576						struct page *page)
1577{
1578	int i;
1579
1580	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1581		poison_obj(cachep, page->freelist - obj_offset(cachep),
1582			POISON_FREE);
1583	}
1584
1585	for (i = 0; i < cachep->num; i++) {
1586		void *objp = index_to_obj(cachep, page, i);
1587
1588		if (cachep->flags & SLAB_POISON) {
1589			check_poison_obj(cachep, objp);
1590			slab_kernel_map(cachep, objp, 1);
1591		}
1592		if (cachep->flags & SLAB_RED_ZONE) {
1593			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1594				slab_error(cachep, "start of a freed object was overwritten");
1595			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1596				slab_error(cachep, "end of a freed object was overwritten");
1597		}
1598	}
1599}
1600#else
1601static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1602						struct page *page)
1603{
1604}
1605#endif
1606
1607/**
1608 * slab_destroy - destroy and release all objects in a slab
1609 * @cachep: cache pointer being destroyed
1610 * @page: page pointer being destroyed
1611 *
1612 * Destroy all the objs in a slab page, and release the mem back to the system.
1613 * Before calling the slab page must have been unlinked from the cache. The
1614 * kmem_cache_node ->list_lock is not held/needed.
1615 */
1616static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1617{
1618	void *freelist;
1619
1620	freelist = page->freelist;
1621	slab_destroy_debugcheck(cachep, page);
1622	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1623		call_rcu(&page->rcu_head, kmem_rcu_free);
1624	else
1625		kmem_freepages(cachep, page);
1626
1627	/*
1628	 * From now on, we don't use freelist
1629	 * although actual page can be freed in rcu context
1630	 */
1631	if (OFF_SLAB(cachep))
1632		kmem_cache_free(cachep->freelist_cache, freelist);
1633}
1634
1635/*
1636 * Update the size of the caches before calling slabs_destroy as it may
1637 * recursively call kfree.
1638 */
1639static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1640{
1641	struct page *page, *n;
1642
1643	list_for_each_entry_safe(page, n, list, slab_list) {
1644		list_del(&page->slab_list);
1645		slab_destroy(cachep, page);
1646	}
1647}
1648
1649/**
1650 * calculate_slab_order - calculate size (page order) of slabs
1651 * @cachep: pointer to the cache that is being created
1652 * @size: size of objects to be created in this cache.
1653 * @flags: slab allocation flags
1654 *
1655 * Also calculates the number of objects per slab.
1656 *
1657 * This could be made much more intelligent.  For now, try to avoid using
1658 * high order pages for slabs.  When the gfp() functions are more friendly
1659 * towards high-order requests, this should be changed.
1660 *
1661 * Return: number of left-over bytes in a slab
1662 */
1663static size_t calculate_slab_order(struct kmem_cache *cachep,
1664				size_t size, slab_flags_t flags)
1665{
1666	size_t left_over = 0;
1667	int gfporder;
1668
1669	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1670		unsigned int num;
1671		size_t remainder;
1672
1673		num = cache_estimate(gfporder, size, flags, &remainder);
1674		if (!num)
1675			continue;
1676
1677		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1678		if (num > SLAB_OBJ_MAX_NUM)
1679			break;
1680
1681		if (flags & CFLGS_OFF_SLAB) {
1682			struct kmem_cache *freelist_cache;
1683			size_t freelist_size;
1684
1685			freelist_size = num * sizeof(freelist_idx_t);
1686			freelist_cache = kmalloc_slab(freelist_size, 0u);
1687			if (!freelist_cache)
1688				continue;
1689
1690			/*
1691			 * Needed to avoid possible looping condition
1692			 * in cache_grow_begin()
1693			 */
1694			if (OFF_SLAB(freelist_cache))
1695				continue;
1696
1697			/* check if off slab has enough benefit */
1698			if (freelist_cache->size > cachep->size / 2)
1699				continue;
1700		}
1701
1702		/* Found something acceptable - save it away */
1703		cachep->num = num;
1704		cachep->gfporder = gfporder;
1705		left_over = remainder;
1706
1707		/*
1708		 * A VFS-reclaimable slab tends to have most allocations
1709		 * as GFP_NOFS and we really don't want to have to be allocating
1710		 * higher-order pages when we are unable to shrink dcache.
1711		 */
1712		if (flags & SLAB_RECLAIM_ACCOUNT)
1713			break;
1714
1715		/*
1716		 * Large number of objects is good, but very large slabs are
1717		 * currently bad for the gfp()s.
1718		 */
1719		if (gfporder >= slab_max_order)
1720			break;
1721
1722		/*
1723		 * Acceptable internal fragmentation?
1724		 */
1725		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1726			break;
1727	}
1728	return left_over;
1729}
1730
1731static struct array_cache __percpu *alloc_kmem_cache_cpus(
1732		struct kmem_cache *cachep, int entries, int batchcount)
1733{
1734	int cpu;
1735	size_t size;
1736	struct array_cache __percpu *cpu_cache;
1737
1738	size = sizeof(void *) * entries + sizeof(struct array_cache);
1739	cpu_cache = __alloc_percpu(size, sizeof(void *));
1740
1741	if (!cpu_cache)
1742		return NULL;
1743
1744	for_each_possible_cpu(cpu) {
1745		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1746				entries, batchcount);
1747	}
1748
1749	return cpu_cache;
1750}
1751
1752static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1753{
1754	if (slab_state >= FULL)
1755		return enable_cpucache(cachep, gfp);
1756
1757	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1758	if (!cachep->cpu_cache)
1759		return 1;
1760
1761	if (slab_state == DOWN) {
1762		/* Creation of first cache (kmem_cache). */
1763		set_up_node(kmem_cache, CACHE_CACHE);
1764	} else if (slab_state == PARTIAL) {
1765		/* For kmem_cache_node */
1766		set_up_node(cachep, SIZE_NODE);
1767	} else {
1768		int node;
1769
1770		for_each_online_node(node) {
1771			cachep->node[node] = kmalloc_node(
1772				sizeof(struct kmem_cache_node), gfp, node);
1773			BUG_ON(!cachep->node[node]);
1774			kmem_cache_node_init(cachep->node[node]);
1775		}
1776	}
1777
1778	cachep->node[numa_mem_id()]->next_reap =
1779			jiffies + REAPTIMEOUT_NODE +
1780			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1781
1782	cpu_cache_get(cachep)->avail = 0;
1783	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1784	cpu_cache_get(cachep)->batchcount = 1;
1785	cpu_cache_get(cachep)->touched = 0;
1786	cachep->batchcount = 1;
1787	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1788	return 0;
1789}
1790
1791slab_flags_t kmem_cache_flags(unsigned int object_size,
1792	slab_flags_t flags, const char *name,
1793	void (*ctor)(void *))
1794{
1795	return flags;
1796}
1797
1798struct kmem_cache *
1799__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1800		   slab_flags_t flags, void (*ctor)(void *))
1801{
1802	struct kmem_cache *cachep;
1803
1804	cachep = find_mergeable(size, align, flags, name, ctor);
1805	if (cachep) {
1806		cachep->refcount++;
1807
1808		/*
1809		 * Adjust the object sizes so that we clear
1810		 * the complete object on kzalloc.
1811		 */
1812		cachep->object_size = max_t(int, cachep->object_size, size);
1813	}
1814	return cachep;
1815}
1816
1817static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1818			size_t size, slab_flags_t flags)
1819{
1820	size_t left;
1821
1822	cachep->num = 0;
1823
1824	/*
1825	 * If slab auto-initialization on free is enabled, store the freelist
1826	 * off-slab, so that its contents don't end up in one of the allocated
1827	 * objects.
1828	 */
1829	if (unlikely(slab_want_init_on_free(cachep)))
1830		return false;
1831
1832	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1833		return false;
1834
1835	left = calculate_slab_order(cachep, size,
1836			flags | CFLGS_OBJFREELIST_SLAB);
1837	if (!cachep->num)
1838		return false;
1839
1840	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1841		return false;
1842
1843	cachep->colour = left / cachep->colour_off;
1844
1845	return true;
1846}
1847
1848static bool set_off_slab_cache(struct kmem_cache *cachep,
1849			size_t size, slab_flags_t flags)
1850{
1851	size_t left;
1852
1853	cachep->num = 0;
1854
1855	/*
1856	 * Always use on-slab management when SLAB_NOLEAKTRACE
1857	 * to avoid recursive calls into kmemleak.
1858	 */
1859	if (flags & SLAB_NOLEAKTRACE)
1860		return false;
1861
1862	/*
1863	 * Size is large, assume best to place the slab management obj
1864	 * off-slab (should allow better packing of objs).
1865	 */
1866	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1867	if (!cachep->num)
1868		return false;
1869
1870	/*
1871	 * If the slab has been placed off-slab, and we have enough space then
1872	 * move it on-slab. This is at the expense of any extra colouring.
1873	 */
1874	if (left >= cachep->num * sizeof(freelist_idx_t))
1875		return false;
1876
1877	cachep->colour = left / cachep->colour_off;
1878
1879	return true;
1880}
1881
1882static bool set_on_slab_cache(struct kmem_cache *cachep,
1883			size_t size, slab_flags_t flags)
1884{
1885	size_t left;
1886
1887	cachep->num = 0;
1888
1889	left = calculate_slab_order(cachep, size, flags);
1890	if (!cachep->num)
1891		return false;
1892
1893	cachep->colour = left / cachep->colour_off;
1894
1895	return true;
1896}
1897
1898/**
1899 * __kmem_cache_create - Create a cache.
1900 * @cachep: cache management descriptor
1901 * @flags: SLAB flags
1902 *
1903 * Returns a ptr to the cache on success, NULL on failure.
1904 * Cannot be called within a int, but can be interrupted.
1905 * The @ctor is run when new pages are allocated by the cache.
1906 *
1907 * The flags are
1908 *
1909 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1910 * to catch references to uninitialised memory.
1911 *
1912 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1913 * for buffer overruns.
1914 *
1915 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1916 * cacheline.  This can be beneficial if you're counting cycles as closely
1917 * as davem.
1918 *
1919 * Return: a pointer to the created cache or %NULL in case of error
1920 */
1921int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1922{
1923	size_t ralign = BYTES_PER_WORD;
1924	gfp_t gfp;
1925	int err;
1926	unsigned int size = cachep->size;
1927
1928#if DEBUG
1929#if FORCED_DEBUG
1930	/*
1931	 * Enable redzoning and last user accounting, except for caches with
1932	 * large objects, if the increased size would increase the object size
1933	 * above the next power of two: caches with object sizes just above a
1934	 * power of two have a significant amount of internal fragmentation.
1935	 */
1936	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
1937						2 * sizeof(unsigned long long)))
1938		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1939	if (!(flags & SLAB_TYPESAFE_BY_RCU))
1940		flags |= SLAB_POISON;
1941#endif
1942#endif
1943
1944	/*
1945	 * Check that size is in terms of words.  This is needed to avoid
1946	 * unaligned accesses for some archs when redzoning is used, and makes
1947	 * sure any on-slab bufctl's are also correctly aligned.
1948	 */
1949	size = ALIGN(size, BYTES_PER_WORD);
1950
1951	if (flags & SLAB_RED_ZONE) {
1952		ralign = REDZONE_ALIGN;
1953		/* If redzoning, ensure that the second redzone is suitably
1954		 * aligned, by adjusting the object size accordingly. */
1955		size = ALIGN(size, REDZONE_ALIGN);
1956	}
1957
1958	/* 3) caller mandated alignment */
1959	if (ralign < cachep->align) {
1960		ralign = cachep->align;
1961	}
1962	/* disable debug if necessary */
1963	if (ralign > __alignof__(unsigned long long))
1964		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1965	/*
1966	 * 4) Store it.
1967	 */
1968	cachep->align = ralign;
1969	cachep->colour_off = cache_line_size();
1970	/* Offset must be a multiple of the alignment. */
1971	if (cachep->colour_off < cachep->align)
1972		cachep->colour_off = cachep->align;
1973
1974	if (slab_is_available())
1975		gfp = GFP_KERNEL;
1976	else
1977		gfp = GFP_NOWAIT;
1978
1979#if DEBUG
1980
1981	/*
1982	 * Both debugging options require word-alignment which is calculated
1983	 * into align above.
1984	 */
1985	if (flags & SLAB_RED_ZONE) {
1986		/* add space for red zone words */
1987		cachep->obj_offset += sizeof(unsigned long long);
1988		size += 2 * sizeof(unsigned long long);
1989	}
1990	if (flags & SLAB_STORE_USER) {
1991		/* user store requires one word storage behind the end of
1992		 * the real object. But if the second red zone needs to be
1993		 * aligned to 64 bits, we must allow that much space.
1994		 */
1995		if (flags & SLAB_RED_ZONE)
1996			size += REDZONE_ALIGN;
1997		else
1998			size += BYTES_PER_WORD;
1999	}
2000#endif
2001
2002	kasan_cache_create(cachep, &size, &flags);
2003
2004	size = ALIGN(size, cachep->align);
2005	/*
2006	 * We should restrict the number of objects in a slab to implement
2007	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2008	 */
2009	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2010		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2011
2012#if DEBUG
2013	/*
2014	 * To activate debug pagealloc, off-slab management is necessary
2015	 * requirement. In early phase of initialization, small sized slab
2016	 * doesn't get initialized so it would not be possible. So, we need
2017	 * to check size >= 256. It guarantees that all necessary small
2018	 * sized slab is initialized in current slab initialization sequence.
2019	 */
2020	if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2021		size >= 256 && cachep->object_size > cache_line_size()) {
2022		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2023			size_t tmp_size = ALIGN(size, PAGE_SIZE);
2024
2025			if (set_off_slab_cache(cachep, tmp_size, flags)) {
2026				flags |= CFLGS_OFF_SLAB;
2027				cachep->obj_offset += tmp_size - size;
2028				size = tmp_size;
2029				goto done;
2030			}
2031		}
2032	}
2033#endif
2034
2035	if (set_objfreelist_slab_cache(cachep, size, flags)) {
2036		flags |= CFLGS_OBJFREELIST_SLAB;
2037		goto done;
2038	}
2039
2040	if (set_off_slab_cache(cachep, size, flags)) {
2041		flags |= CFLGS_OFF_SLAB;
2042		goto done;
2043	}
2044
2045	if (set_on_slab_cache(cachep, size, flags))
2046		goto done;
2047
2048	return -E2BIG;
2049
2050done:
2051	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2052	cachep->flags = flags;
2053	cachep->allocflags = __GFP_COMP;
2054	if (flags & SLAB_CACHE_DMA)
2055		cachep->allocflags |= GFP_DMA;
2056	if (flags & SLAB_CACHE_DMA32)
2057		cachep->allocflags |= GFP_DMA32;
2058	if (flags & SLAB_RECLAIM_ACCOUNT)
2059		cachep->allocflags |= __GFP_RECLAIMABLE;
2060	cachep->size = size;
2061	cachep->reciprocal_buffer_size = reciprocal_value(size);
2062
2063#if DEBUG
2064	/*
2065	 * If we're going to use the generic kernel_map_pages()
2066	 * poisoning, then it's going to smash the contents of
2067	 * the redzone and userword anyhow, so switch them off.
2068	 */
2069	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2070		(cachep->flags & SLAB_POISON) &&
2071		is_debug_pagealloc_cache(cachep))
2072		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2073#endif
2074
2075	if (OFF_SLAB(cachep)) {
2076		cachep->freelist_cache =
2077			kmalloc_slab(cachep->freelist_size, 0u);
2078	}
2079
2080	err = setup_cpu_cache(cachep, gfp);
2081	if (err) {
2082		__kmem_cache_release(cachep);
2083		return err;
2084	}
2085
2086	return 0;
2087}
2088
2089#if DEBUG
2090static void check_irq_off(void)
2091{
2092	BUG_ON(!irqs_disabled());
2093}
2094
2095static void check_irq_on(void)
2096{
2097	BUG_ON(irqs_disabled());
2098}
2099
2100static void check_mutex_acquired(void)
2101{
2102	BUG_ON(!mutex_is_locked(&slab_mutex));
2103}
2104
2105static void check_spinlock_acquired(struct kmem_cache *cachep)
2106{
2107#ifdef CONFIG_SMP
2108	check_irq_off();
2109	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2110#endif
2111}
2112
2113static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2114{
2115#ifdef CONFIG_SMP
2116	check_irq_off();
2117	assert_spin_locked(&get_node(cachep, node)->list_lock);
2118#endif
2119}
2120
2121#else
2122#define check_irq_off()	do { } while(0)
2123#define check_irq_on()	do { } while(0)
2124#define check_mutex_acquired()	do { } while(0)
2125#define check_spinlock_acquired(x) do { } while(0)
2126#define check_spinlock_acquired_node(x, y) do { } while(0)
2127#endif
2128
2129static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2130				int node, bool free_all, struct list_head *list)
2131{
2132	int tofree;
2133
2134	if (!ac || !ac->avail)
2135		return;
2136
2137	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2138	if (tofree > ac->avail)
2139		tofree = (ac->avail + 1) / 2;
2140
2141	free_block(cachep, ac->entry, tofree, node, list);
2142	ac->avail -= tofree;
2143	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2144}
2145
2146static void do_drain(void *arg)
2147{
2148	struct kmem_cache *cachep = arg;
2149	struct array_cache *ac;
2150	int node = numa_mem_id();
2151	struct kmem_cache_node *n;
2152	LIST_HEAD(list);
2153
2154	check_irq_off();
2155	ac = cpu_cache_get(cachep);
2156	n = get_node(cachep, node);
2157	spin_lock(&n->list_lock);
2158	free_block(cachep, ac->entry, ac->avail, node, &list);
2159	spin_unlock(&n->list_lock);
2160	ac->avail = 0;
2161	slabs_destroy(cachep, &list);
2162}
2163
2164static void drain_cpu_caches(struct kmem_cache *cachep)
2165{
2166	struct kmem_cache_node *n;
2167	int node;
2168	LIST_HEAD(list);
2169
2170	on_each_cpu(do_drain, cachep, 1);
2171	check_irq_on();
2172	for_each_kmem_cache_node(cachep, node, n)
2173		if (n->alien)
2174			drain_alien_cache(cachep, n->alien);
2175
2176	for_each_kmem_cache_node(cachep, node, n) {
2177		spin_lock_irq(&n->list_lock);
2178		drain_array_locked(cachep, n->shared, node, true, &list);
2179		spin_unlock_irq(&n->list_lock);
2180
2181		slabs_destroy(cachep, &list);
2182	}
2183}
2184
2185/*
2186 * Remove slabs from the list of free slabs.
2187 * Specify the number of slabs to drain in tofree.
2188 *
2189 * Returns the actual number of slabs released.
2190 */
2191static int drain_freelist(struct kmem_cache *cache,
2192			struct kmem_cache_node *n, int tofree)
2193{
2194	struct list_head *p;
2195	int nr_freed;
2196	struct page *page;
2197
2198	nr_freed = 0;
2199	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2200
2201		spin_lock_irq(&n->list_lock);
2202		p = n->slabs_free.prev;
2203		if (p == &n->slabs_free) {
2204			spin_unlock_irq(&n->list_lock);
2205			goto out;
2206		}
2207
2208		page = list_entry(p, struct page, slab_list);
2209		list_del(&page->slab_list);
2210		n->free_slabs--;
2211		n->total_slabs--;
2212		/*
2213		 * Safe to drop the lock. The slab is no longer linked
2214		 * to the cache.
2215		 */
2216		n->free_objects -= cache->num;
2217		spin_unlock_irq(&n->list_lock);
2218		slab_destroy(cache, page);
2219		nr_freed++;
2220	}
2221out:
2222	return nr_freed;
2223}
2224
2225bool __kmem_cache_empty(struct kmem_cache *s)
2226{
2227	int node;
2228	struct kmem_cache_node *n;
2229
2230	for_each_kmem_cache_node(s, node, n)
2231		if (!list_empty(&n->slabs_full) ||
2232		    !list_empty(&n->slabs_partial))
2233			return false;
2234	return true;
2235}
2236
2237int __kmem_cache_shrink(struct kmem_cache *cachep)
2238{
2239	int ret = 0;
2240	int node;
2241	struct kmem_cache_node *n;
2242
2243	drain_cpu_caches(cachep);
2244
2245	check_irq_on();
2246	for_each_kmem_cache_node(cachep, node, n) {
2247		drain_freelist(cachep, n, INT_MAX);
2248
2249		ret += !list_empty(&n->slabs_full) ||
2250			!list_empty(&n->slabs_partial);
2251	}
2252	return (ret ? 1 : 0);
2253}
2254
2255int __kmem_cache_shutdown(struct kmem_cache *cachep)
2256{
2257	return __kmem_cache_shrink(cachep);
2258}
2259
2260void __kmem_cache_release(struct kmem_cache *cachep)
2261{
2262	int i;
2263	struct kmem_cache_node *n;
2264
2265	cache_random_seq_destroy(cachep);
2266
2267	free_percpu(cachep->cpu_cache);
2268
2269	/* NUMA: free the node structures */
2270	for_each_kmem_cache_node(cachep, i, n) {
2271		kfree(n->shared);
2272		free_alien_cache(n->alien);
2273		kfree(n);
2274		cachep->node[i] = NULL;
2275	}
2276}
2277
2278/*
2279 * Get the memory for a slab management obj.
2280 *
2281 * For a slab cache when the slab descriptor is off-slab, the
2282 * slab descriptor can't come from the same cache which is being created,
2283 * Because if it is the case, that means we defer the creation of
2284 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2285 * And we eventually call down to __kmem_cache_create(), which
2286 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2287 * This is a "chicken-and-egg" problem.
2288 *
2289 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2290 * which are all initialized during kmem_cache_init().
2291 */
2292static void *alloc_slabmgmt(struct kmem_cache *cachep,
2293				   struct page *page, int colour_off,
2294				   gfp_t local_flags, int nodeid)
2295{
2296	void *freelist;
2297	void *addr = page_address(page);
2298
2299	page->s_mem = addr + colour_off;
2300	page->active = 0;
2301
2302	if (OBJFREELIST_SLAB(cachep))
2303		freelist = NULL;
2304	else if (OFF_SLAB(cachep)) {
2305		/* Slab management obj is off-slab. */
2306		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2307					      local_flags, nodeid);
2308		if (!freelist)
2309			return NULL;
2310	} else {
2311		/* We will use last bytes at the slab for freelist */
2312		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2313				cachep->freelist_size;
2314	}
2315
2316	return freelist;
2317}
2318
2319static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2320{
2321	return ((freelist_idx_t *)page->freelist)[idx];
2322}
2323
2324static inline void set_free_obj(struct page *page,
2325					unsigned int idx, freelist_idx_t val)
2326{
2327	((freelist_idx_t *)(page->freelist))[idx] = val;
2328}
2329
2330static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2331{
2332#if DEBUG
2333	int i;
2334
2335	for (i = 0; i < cachep->num; i++) {
2336		void *objp = index_to_obj(cachep, page, i);
2337
2338		if (cachep->flags & SLAB_STORE_USER)
2339			*dbg_userword(cachep, objp) = NULL;
2340
2341		if (cachep->flags & SLAB_RED_ZONE) {
2342			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2343			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2344		}
2345		/*
2346		 * Constructors are not allowed to allocate memory from the same
2347		 * cache which they are a constructor for.  Otherwise, deadlock.
2348		 * They must also be threaded.
2349		 */
2350		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2351			kasan_unpoison_object_data(cachep,
2352						   objp + obj_offset(cachep));
2353			cachep->ctor(objp + obj_offset(cachep));
2354			kasan_poison_object_data(
2355				cachep, objp + obj_offset(cachep));
2356		}
2357
2358		if (cachep->flags & SLAB_RED_ZONE) {
2359			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2360				slab_error(cachep, "constructor overwrote the end of an object");
2361			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2362				slab_error(cachep, "constructor overwrote the start of an object");
2363		}
2364		/* need to poison the objs? */
2365		if (cachep->flags & SLAB_POISON) {
2366			poison_obj(cachep, objp, POISON_FREE);
2367			slab_kernel_map(cachep, objp, 0);
2368		}
2369	}
2370#endif
2371}
2372
2373#ifdef CONFIG_SLAB_FREELIST_RANDOM
2374/* Hold information during a freelist initialization */
2375union freelist_init_state {
2376	struct {
2377		unsigned int pos;
2378		unsigned int *list;
2379		unsigned int count;
2380	};
2381	struct rnd_state rnd_state;
2382};
2383
2384/*
2385 * Initialize the state based on the randomization methode available.
2386 * return true if the pre-computed list is available, false otherwize.
2387 */
2388static bool freelist_state_initialize(union freelist_init_state *state,
2389				struct kmem_cache *cachep,
2390				unsigned int count)
2391{
2392	bool ret;
2393	unsigned int rand;
2394
2395	/* Use best entropy available to define a random shift */
2396	rand = get_random_int();
2397
2398	/* Use a random state if the pre-computed list is not available */
2399	if (!cachep->random_seq) {
2400		prandom_seed_state(&state->rnd_state, rand);
2401		ret = false;
2402	} else {
2403		state->list = cachep->random_seq;
2404		state->count = count;
2405		state->pos = rand % count;
2406		ret = true;
2407	}
2408	return ret;
2409}
2410
2411/* Get the next entry on the list and randomize it using a random shift */
2412static freelist_idx_t next_random_slot(union freelist_init_state *state)
2413{
2414	if (state->pos >= state->count)
2415		state->pos = 0;
2416	return state->list[state->pos++];
2417}
2418
2419/* Swap two freelist entries */
2420static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2421{
2422	swap(((freelist_idx_t *)page->freelist)[a],
2423		((freelist_idx_t *)page->freelist)[b]);
2424}
2425
2426/*
2427 * Shuffle the freelist initialization state based on pre-computed lists.
2428 * return true if the list was successfully shuffled, false otherwise.
2429 */
2430static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2431{
2432	unsigned int objfreelist = 0, i, rand, count = cachep->num;
2433	union freelist_init_state state;
2434	bool precomputed;
2435
2436	if (count < 2)
2437		return false;
2438
2439	precomputed = freelist_state_initialize(&state, cachep, count);
2440
2441	/* Take a random entry as the objfreelist */
2442	if (OBJFREELIST_SLAB(cachep)) {
2443		if (!precomputed)
2444			objfreelist = count - 1;
2445		else
2446			objfreelist = next_random_slot(&state);
2447		page->freelist = index_to_obj(cachep, page, objfreelist) +
2448						obj_offset(cachep);
2449		count--;
2450	}
2451
2452	/*
2453	 * On early boot, generate the list dynamically.
2454	 * Later use a pre-computed list for speed.
2455	 */
2456	if (!precomputed) {
2457		for (i = 0; i < count; i++)
2458			set_free_obj(page, i, i);
2459
2460		/* Fisher-Yates shuffle */
2461		for (i = count - 1; i > 0; i--) {
2462			rand = prandom_u32_state(&state.rnd_state);
2463			rand %= (i + 1);
2464			swap_free_obj(page, i, rand);
2465		}
2466	} else {
2467		for (i = 0; i < count; i++)
2468			set_free_obj(page, i, next_random_slot(&state));
2469	}
2470
2471	if (OBJFREELIST_SLAB(cachep))
2472		set_free_obj(page, cachep->num - 1, objfreelist);
2473
2474	return true;
2475}
2476#else
2477static inline bool shuffle_freelist(struct kmem_cache *cachep,
2478				struct page *page)
2479{
2480	return false;
2481}
2482#endif /* CONFIG_SLAB_FREELIST_RANDOM */
2483
2484static void cache_init_objs(struct kmem_cache *cachep,
2485			    struct page *page)
2486{
2487	int i;
2488	void *objp;
2489	bool shuffled;
2490
2491	cache_init_objs_debug(cachep, page);
2492
2493	/* Try to randomize the freelist if enabled */
2494	shuffled = shuffle_freelist(cachep, page);
2495
2496	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2497		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2498						obj_offset(cachep);
2499	}
2500
2501	for (i = 0; i < cachep->num; i++) {
2502		objp = index_to_obj(cachep, page, i);
2503		objp = kasan_init_slab_obj(cachep, objp);
2504
2505		/* constructor could break poison info */
2506		if (DEBUG == 0 && cachep->ctor) {
2507			kasan_unpoison_object_data(cachep, objp);
2508			cachep->ctor(objp);
2509			kasan_poison_object_data(cachep, objp);
2510		}
2511
2512		if (!shuffled)
2513			set_free_obj(page, i, i);
2514	}
2515}
2516
2517static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2518{
2519	void *objp;
2520
2521	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2522	page->active++;
2523
2524	return objp;
2525}
2526
2527static void slab_put_obj(struct kmem_cache *cachep,
2528			struct page *page, void *objp)
2529{
2530	unsigned int objnr = obj_to_index(cachep, page, objp);
2531#if DEBUG
2532	unsigned int i;
2533
2534	/* Verify double free bug */
2535	for (i = page->active; i < cachep->num; i++) {
2536		if (get_free_obj(page, i) == objnr) {
2537			pr_err("slab: double free detected in cache '%s', objp %px\n",
2538			       cachep->name, objp);
2539			BUG();
2540		}
2541	}
2542#endif
2543	page->active--;
2544	if (!page->freelist)
2545		page->freelist = objp + obj_offset(cachep);
2546
2547	set_free_obj(page, page->active, objnr);
2548}
2549
2550/*
2551 * Map pages beginning at addr to the given cache and slab. This is required
2552 * for the slab allocator to be able to lookup the cache and slab of a
2553 * virtual address for kfree, ksize, and slab debugging.
2554 */
2555static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2556			   void *freelist)
2557{
2558	page->slab_cache = cache;
2559	page->freelist = freelist;
2560}
2561
2562/*
2563 * Grow (by 1) the number of slabs within a cache.  This is called by
2564 * kmem_cache_alloc() when there are no active objs left in a cache.
2565 */
2566static struct page *cache_grow_begin(struct kmem_cache *cachep,
2567				gfp_t flags, int nodeid)
2568{
2569	void *freelist;
2570	size_t offset;
2571	gfp_t local_flags;
2572	int page_node;
2573	struct kmem_cache_node *n;
2574	struct page *page;
2575
2576	/*
2577	 * Be lazy and only check for valid flags here,  keeping it out of the
2578	 * critical path in kmem_cache_alloc().
2579	 */
2580	if (unlikely(flags & GFP_SLAB_BUG_MASK))
2581		flags = kmalloc_fix_flags(flags);
2582
2583	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2584	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2585
2586	check_irq_off();
2587	if (gfpflags_allow_blocking(local_flags))
2588		local_irq_enable();
2589
2590	/*
2591	 * Get mem for the objs.  Attempt to allocate a physical page from
2592	 * 'nodeid'.
2593	 */
2594	page = kmem_getpages(cachep, local_flags, nodeid);
2595	if (!page)
2596		goto failed;
2597
2598	page_node = page_to_nid(page);
2599	n = get_node(cachep, page_node);
2600
2601	/* Get colour for the slab, and cal the next value. */
2602	n->colour_next++;
2603	if (n->colour_next >= cachep->colour)
2604		n->colour_next = 0;
2605
2606	offset = n->colour_next;
2607	if (offset >= cachep->colour)
2608		offset = 0;
2609
2610	offset *= cachep->colour_off;
2611
2612	/*
2613	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
2614	 * page_address() in the latter returns a non-tagged pointer,
2615	 * as it should be for slab pages.
2616	 */
2617	kasan_poison_slab(page);
2618
2619	/* Get slab management. */
2620	freelist = alloc_slabmgmt(cachep, page, offset,
2621			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2622	if (OFF_SLAB(cachep) && !freelist)
2623		goto opps1;
2624
2625	slab_map_pages(cachep, page, freelist);
2626
2627	cache_init_objs(cachep, page);
2628
2629	if (gfpflags_allow_blocking(local_flags))
2630		local_irq_disable();
2631
2632	return page;
2633
2634opps1:
2635	kmem_freepages(cachep, page);
2636failed:
2637	if (gfpflags_allow_blocking(local_flags))
2638		local_irq_disable();
2639	return NULL;
2640}
2641
2642static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2643{
2644	struct kmem_cache_node *n;
2645	void *list = NULL;
2646
2647	check_irq_off();
2648
2649	if (!page)
2650		return;
2651
2652	INIT_LIST_HEAD(&page->slab_list);
2653	n = get_node(cachep, page_to_nid(page));
2654
2655	spin_lock(&n->list_lock);
2656	n->total_slabs++;
2657	if (!page->active) {
2658		list_add_tail(&page->slab_list, &n->slabs_free);
2659		n->free_slabs++;
2660	} else
2661		fixup_slab_list(cachep, n, page, &list);
2662
2663	STATS_INC_GROWN(cachep);
2664	n->free_objects += cachep->num - page->active;
2665	spin_unlock(&n->list_lock);
2666
2667	fixup_objfreelist_debug(cachep, &list);
2668}
2669
2670#if DEBUG
2671
2672/*
2673 * Perform extra freeing checks:
2674 * - detect bad pointers.
2675 * - POISON/RED_ZONE checking
2676 */
2677static void kfree_debugcheck(const void *objp)
2678{
2679	if (!virt_addr_valid(objp)) {
2680		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2681		       (unsigned long)objp);
2682		BUG();
2683	}
2684}
2685
2686static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2687{
2688	unsigned long long redzone1, redzone2;
2689
2690	redzone1 = *dbg_redzone1(cache, obj);
2691	redzone2 = *dbg_redzone2(cache, obj);
2692
2693	/*
2694	 * Redzone is ok.
2695	 */
2696	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2697		return;
2698
2699	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2700		slab_error(cache, "double free detected");
2701	else
2702		slab_error(cache, "memory outside object was overwritten");
2703
2704	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2705	       obj, redzone1, redzone2);
2706}
2707
2708static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2709				   unsigned long caller)
2710{
2711	unsigned int objnr;
2712	struct page *page;
2713
2714	BUG_ON(virt_to_cache(objp) != cachep);
2715
2716	objp -= obj_offset(cachep);
2717	kfree_debugcheck(objp);
2718	page = virt_to_head_page(objp);
2719
2720	if (cachep->flags & SLAB_RED_ZONE) {
2721		verify_redzone_free(cachep, objp);
2722		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2723		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2724	}
2725	if (cachep->flags & SLAB_STORE_USER)
2726		*dbg_userword(cachep, objp) = (void *)caller;
2727
2728	objnr = obj_to_index(cachep, page, objp);
2729
2730	BUG_ON(objnr >= cachep->num);
2731	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2732
2733	if (cachep->flags & SLAB_POISON) {
2734		poison_obj(cachep, objp, POISON_FREE);
2735		slab_kernel_map(cachep, objp, 0);
2736	}
2737	return objp;
2738}
2739
2740#else
2741#define kfree_debugcheck(x) do { } while(0)
2742#define cache_free_debugcheck(x,objp,z) (objp)
2743#endif
2744
2745static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2746						void **list)
2747{
2748#if DEBUG
2749	void *next = *list;
2750	void *objp;
2751
2752	while (next) {
2753		objp = next - obj_offset(cachep);
2754		next = *(void **)next;
2755		poison_obj(cachep, objp, POISON_FREE);
2756	}
2757#endif
2758}
2759
2760static inline void fixup_slab_list(struct kmem_cache *cachep,
2761				struct kmem_cache_node *n, struct page *page,
2762				void **list)
2763{
2764	/* move slabp to correct slabp list: */
2765	list_del(&page->slab_list);
2766	if (page->active == cachep->num) {
2767		list_add(&page->slab_list, &n->slabs_full);
2768		if (OBJFREELIST_SLAB(cachep)) {
2769#if DEBUG
2770			/* Poisoning will be done without holding the lock */
2771			if (cachep->flags & SLAB_POISON) {
2772				void **objp = page->freelist;
2773
2774				*objp = *list;
2775				*list = objp;
2776			}
2777#endif
2778			page->freelist = NULL;
2779		}
2780	} else
2781		list_add(&page->slab_list, &n->slabs_partial);
2782}
2783
2784/* Try to find non-pfmemalloc slab if needed */
2785static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2786					struct page *page, bool pfmemalloc)
2787{
2788	if (!page)
2789		return NULL;
2790
2791	if (pfmemalloc)
2792		return page;
2793
2794	if (!PageSlabPfmemalloc(page))
2795		return page;
2796
2797	/* No need to keep pfmemalloc slab if we have enough free objects */
2798	if (n->free_objects > n->free_limit) {
2799		ClearPageSlabPfmemalloc(page);
2800		return page;
2801	}
2802
2803	/* Move pfmemalloc slab to the end of list to speed up next search */
2804	list_del(&page->slab_list);
2805	if (!page->active) {
2806		list_add_tail(&page->slab_list, &n->slabs_free);
2807		n->free_slabs++;
2808	} else
2809		list_add_tail(&page->slab_list, &n->slabs_partial);
2810
2811	list_for_each_entry(page, &n->slabs_partial, slab_list) {
2812		if (!PageSlabPfmemalloc(page))
2813			return page;
2814	}
2815
2816	n->free_touched = 1;
2817	list_for_each_entry(page, &n->slabs_free, slab_list) {
2818		if (!PageSlabPfmemalloc(page)) {
2819			n->free_slabs--;
2820			return page;
2821		}
2822	}
2823
2824	return NULL;
2825}
2826
2827static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2828{
2829	struct page *page;
2830
2831	assert_spin_locked(&n->list_lock);
2832	page = list_first_entry_or_null(&n->slabs_partial, struct page,
2833					slab_list);
2834	if (!page) {
2835		n->free_touched = 1;
2836		page = list_first_entry_or_null(&n->slabs_free, struct page,
2837						slab_list);
2838		if (page)
2839			n->free_slabs--;
2840	}
2841
2842	if (sk_memalloc_socks())
2843		page = get_valid_first_slab(n, page, pfmemalloc);
2844
2845	return page;
2846}
2847
2848static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2849				struct kmem_cache_node *n, gfp_t flags)
2850{
2851	struct page *page;
2852	void *obj;
2853	void *list = NULL;
2854
2855	if (!gfp_pfmemalloc_allowed(flags))
2856		return NULL;
2857
2858	spin_lock(&n->list_lock);
2859	page = get_first_slab(n, true);
2860	if (!page) {
2861		spin_unlock(&n->list_lock);
2862		return NULL;
2863	}
2864
2865	obj = slab_get_obj(cachep, page);
2866	n->free_objects--;
2867
2868	fixup_slab_list(cachep, n, page, &list);
2869
2870	spin_unlock(&n->list_lock);
2871	fixup_objfreelist_debug(cachep, &list);
2872
2873	return obj;
2874}
2875
2876/*
2877 * Slab list should be fixed up by fixup_slab_list() for existing slab
2878 * or cache_grow_end() for new slab
2879 */
2880static __always_inline int alloc_block(struct kmem_cache *cachep,
2881		struct array_cache *ac, struct page *page, int batchcount)
2882{
2883	/*
2884	 * There must be at least one object available for
2885	 * allocation.
2886	 */
2887	BUG_ON(page->active >= cachep->num);
2888
2889	while (page->active < cachep->num && batchcount--) {
2890		STATS_INC_ALLOCED(cachep);
2891		STATS_INC_ACTIVE(cachep);
2892		STATS_SET_HIGH(cachep);
2893
2894		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2895	}
2896
2897	return batchcount;
2898}
2899
2900static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2901{
2902	int batchcount;
2903	struct kmem_cache_node *n;
2904	struct array_cache *ac, *shared;
2905	int node;
2906	void *list = NULL;
2907	struct page *page;
2908
2909	check_irq_off();
2910	node = numa_mem_id();
2911
2912	ac = cpu_cache_get(cachep);
2913	batchcount = ac->batchcount;
2914	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2915		/*
2916		 * If there was little recent activity on this cache, then
2917		 * perform only a partial refill.  Otherwise we could generate
2918		 * refill bouncing.
2919		 */
2920		batchcount = BATCHREFILL_LIMIT;
2921	}
2922	n = get_node(cachep, node);
2923
2924	BUG_ON(ac->avail > 0 || !n);
2925	shared = READ_ONCE(n->shared);
2926	if (!n->free_objects && (!shared || !shared->avail))
2927		goto direct_grow;
2928
2929	spin_lock(&n->list_lock);
2930	shared = READ_ONCE(n->shared);
2931
2932	/* See if we can refill from the shared array */
2933	if (shared && transfer_objects(ac, shared, batchcount)) {
2934		shared->touched = 1;
2935		goto alloc_done;
2936	}
2937
2938	while (batchcount > 0) {
2939		/* Get slab alloc is to come from. */
2940		page = get_first_slab(n, false);
2941		if (!page)
2942			goto must_grow;
2943
2944		check_spinlock_acquired(cachep);
2945
2946		batchcount = alloc_block(cachep, ac, page, batchcount);
2947		fixup_slab_list(cachep, n, page, &list);
2948	}
2949
2950must_grow:
2951	n->free_objects -= ac->avail;
2952alloc_done:
2953	spin_unlock(&n->list_lock);
2954	fixup_objfreelist_debug(cachep, &list);
2955
2956direct_grow:
2957	if (unlikely(!ac->avail)) {
2958		/* Check if we can use obj in pfmemalloc slab */
2959		if (sk_memalloc_socks()) {
2960			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2961
2962			if (obj)
2963				return obj;
2964		}
2965
2966		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2967
2968		/*
2969		 * cache_grow_begin() can reenable interrupts,
2970		 * then ac could change.
2971		 */
2972		ac = cpu_cache_get(cachep);
2973		if (!ac->avail && page)
2974			alloc_block(cachep, ac, page, batchcount);
2975		cache_grow_end(cachep, page);
2976
2977		if (!ac->avail)
2978			return NULL;
2979	}
2980	ac->touched = 1;
2981
2982	return ac->entry[--ac->avail];
2983}
2984
2985static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2986						gfp_t flags)
2987{
2988	might_sleep_if(gfpflags_allow_blocking(flags));
2989}
2990
2991#if DEBUG
2992static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2993				gfp_t flags, void *objp, unsigned long caller)
2994{
2995	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2996	if (!objp)
2997		return objp;
2998	if (cachep->flags & SLAB_POISON) {
2999		check_poison_obj(cachep, objp);
3000		slab_kernel_map(cachep, objp, 1);
3001		poison_obj(cachep, objp, POISON_INUSE);
3002	}
3003	if (cachep->flags & SLAB_STORE_USER)
3004		*dbg_userword(cachep, objp) = (void *)caller;
3005
3006	if (cachep->flags & SLAB_RED_ZONE) {
3007		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3008				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3009			slab_error(cachep, "double free, or memory outside object was overwritten");
3010			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3011			       objp, *dbg_redzone1(cachep, objp),
3012			       *dbg_redzone2(cachep, objp));
3013		}
3014		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3015		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3016	}
3017
3018	objp += obj_offset(cachep);
3019	if (cachep->ctor && cachep->flags & SLAB_POISON)
3020		cachep->ctor(objp);
3021	if (ARCH_SLAB_MINALIGN &&
3022	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3023		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3024		       objp, (int)ARCH_SLAB_MINALIGN);
3025	}
3026	return objp;
3027}
3028#else
3029#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3030#endif
3031
3032static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3033{
3034	void *objp;
3035	struct array_cache *ac;
3036
3037	check_irq_off();
3038
3039	ac = cpu_cache_get(cachep);
3040	if (likely(ac->avail)) {
3041		ac->touched = 1;
3042		objp = ac->entry[--ac->avail];
3043
3044		STATS_INC_ALLOCHIT(cachep);
3045		goto out;
3046	}
3047
3048	STATS_INC_ALLOCMISS(cachep);
3049	objp = cache_alloc_refill(cachep, flags);
3050	/*
3051	 * the 'ac' may be updated by cache_alloc_refill(),
3052	 * and kmemleak_erase() requires its correct value.
3053	 */
3054	ac = cpu_cache_get(cachep);
3055
3056out:
3057	/*
3058	 * To avoid a false negative, if an object that is in one of the
3059	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3060	 * treat the array pointers as a reference to the object.
3061	 */
3062	if (objp)
3063		kmemleak_erase(&ac->entry[ac->avail]);
3064	return objp;
3065}
3066
3067#ifdef CONFIG_NUMA
3068/*
3069 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3070 *
3071 * If we are in_interrupt, then process context, including cpusets and
3072 * mempolicy, may not apply and should not be used for allocation policy.
3073 */
3074static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3075{
3076	int nid_alloc, nid_here;
3077
3078	if (in_interrupt() || (flags & __GFP_THISNODE))
3079		return NULL;
3080	nid_alloc = nid_here = numa_mem_id();
3081	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3082		nid_alloc = cpuset_slab_spread_node();
3083	else if (current->mempolicy)
3084		nid_alloc = mempolicy_slab_node();
3085	if (nid_alloc != nid_here)
3086		return ____cache_alloc_node(cachep, flags, nid_alloc);
3087	return NULL;
3088}
3089
3090/*
3091 * Fallback function if there was no memory available and no objects on a
3092 * certain node and fall back is permitted. First we scan all the
3093 * available node for available objects. If that fails then we
3094 * perform an allocation without specifying a node. This allows the page
3095 * allocator to do its reclaim / fallback magic. We then insert the
3096 * slab into the proper nodelist and then allocate from it.
3097 */
3098static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3099{
3100	struct zonelist *zonelist;
3101	struct zoneref *z;
3102	struct zone *zone;
3103	enum zone_type highest_zoneidx = gfp_zone(flags);
3104	void *obj = NULL;
3105	struct page *page;
3106	int nid;
3107	unsigned int cpuset_mems_cookie;
3108
3109	if (flags & __GFP_THISNODE)
3110		return NULL;
3111
3112retry_cpuset:
3113	cpuset_mems_cookie = read_mems_allowed_begin();
3114	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3115
3116retry:
3117	/*
3118	 * Look through allowed nodes for objects available
3119	 * from existing per node queues.
3120	 */
3121	for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3122		nid = zone_to_nid(zone);
3123
3124		if (cpuset_zone_allowed(zone, flags) &&
3125			get_node(cache, nid) &&
3126			get_node(cache, nid)->free_objects) {
3127				obj = ____cache_alloc_node(cache,
3128					gfp_exact_node(flags), nid);
3129				if (obj)
3130					break;
3131		}
3132	}
3133
3134	if (!obj) {
3135		/*
3136		 * This allocation will be performed within the constraints
3137		 * of the current cpuset / memory policy requirements.
3138		 * We may trigger various forms of reclaim on the allowed
3139		 * set and go into memory reserves if necessary.
3140		 */
3141		page = cache_grow_begin(cache, flags, numa_mem_id());
3142		cache_grow_end(cache, page);
3143		if (page) {
3144			nid = page_to_nid(page);
3145			obj = ____cache_alloc_node(cache,
3146				gfp_exact_node(flags), nid);
3147
3148			/*
3149			 * Another processor may allocate the objects in
3150			 * the slab since we are not holding any locks.
3151			 */
3152			if (!obj)
3153				goto retry;
3154		}
3155	}
3156
3157	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3158		goto retry_cpuset;
3159	return obj;
3160}
3161
3162/*
3163 * A interface to enable slab creation on nodeid
3164 */
3165static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3166				int nodeid)
3167{
3168	struct page *page;
3169	struct kmem_cache_node *n;
3170	void *obj = NULL;
3171	void *list = NULL;
3172
3173	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3174	n = get_node(cachep, nodeid);
3175	BUG_ON(!n);
3176
3177	check_irq_off();
3178	spin_lock(&n->list_lock);
3179	page = get_first_slab(n, false);
3180	if (!page)
3181		goto must_grow;
3182
3183	check_spinlock_acquired_node(cachep, nodeid);
3184
3185	STATS_INC_NODEALLOCS(cachep);
3186	STATS_INC_ACTIVE(cachep);
3187	STATS_SET_HIGH(cachep);
3188
3189	BUG_ON(page->active == cachep->num);
3190
3191	obj = slab_get_obj(cachep, page);
3192	n->free_objects--;
3193
3194	fixup_slab_list(cachep, n, page, &list);
3195
3196	spin_unlock(&n->list_lock);
3197	fixup_objfreelist_debug(cachep, &list);
3198	return obj;
3199
3200must_grow:
3201	spin_unlock(&n->list_lock);
3202	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3203	if (page) {
3204		/* This slab isn't counted yet so don't update free_objects */
3205		obj = slab_get_obj(cachep, page);
3206	}
3207	cache_grow_end(cachep, page);
3208
3209	return obj ? obj : fallback_alloc(cachep, flags);
3210}
3211
3212static __always_inline void *
3213slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3214		   unsigned long caller)
3215{
3216	unsigned long save_flags;
3217	void *ptr;
3218	int slab_node = numa_mem_id();
3219	struct obj_cgroup *objcg = NULL;
 
3220
3221	flags &= gfp_allowed_mask;
3222	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3223	if (unlikely(!cachep))
3224		return NULL;
3225
 
 
 
 
3226	cache_alloc_debugcheck_before(cachep, flags);
3227	local_irq_save(save_flags);
3228
3229	if (nodeid == NUMA_NO_NODE)
3230		nodeid = slab_node;
3231
3232	if (unlikely(!get_node(cachep, nodeid))) {
3233		/* Node not bootstrapped yet */
3234		ptr = fallback_alloc(cachep, flags);
3235		goto out;
3236	}
3237
3238	if (nodeid == slab_node) {
3239		/*
3240		 * Use the locally cached objects if possible.
3241		 * However ____cache_alloc does not allow fallback
3242		 * to other nodes. It may fail while we still have
3243		 * objects on other nodes available.
3244		 */
3245		ptr = ____cache_alloc(cachep, flags);
3246		if (ptr)
3247			goto out;
3248	}
3249	/* ___cache_alloc_node can fall back to other nodes */
3250	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3251  out:
3252	local_irq_restore(save_flags);
3253	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
3254
3255	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3256		memset(ptr, 0, cachep->object_size);
3257
3258	slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
3259	return ptr;
3260}
3261
3262static __always_inline void *
3263__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3264{
3265	void *objp;
3266
3267	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3268		objp = alternate_node_alloc(cache, flags);
3269		if (objp)
3270			goto out;
3271	}
3272	objp = ____cache_alloc(cache, flags);
3273
3274	/*
3275	 * We may just have run out of memory on the local node.
3276	 * ____cache_alloc_node() knows how to locate memory on other nodes
3277	 */
3278	if (!objp)
3279		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3280
3281  out:
3282	return objp;
3283}
3284#else
3285
3286static __always_inline void *
3287__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3288{
3289	return ____cache_alloc(cachep, flags);
3290}
3291
3292#endif /* CONFIG_NUMA */
3293
3294static __always_inline void *
3295slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3296{
3297	unsigned long save_flags;
3298	void *objp;
3299	struct obj_cgroup *objcg = NULL;
 
3300
3301	flags &= gfp_allowed_mask;
3302	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3303	if (unlikely(!cachep))
3304		return NULL;
3305
 
 
 
 
3306	cache_alloc_debugcheck_before(cachep, flags);
3307	local_irq_save(save_flags);
3308	objp = __do_cache_alloc(cachep, flags);
3309	local_irq_restore(save_flags);
3310	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3311	prefetchw(objp);
 
3312
3313	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3314		memset(objp, 0, cachep->object_size);
3315
3316	slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
3317	return objp;
3318}
3319
3320/*
3321 * Caller needs to acquire correct kmem_cache_node's list_lock
3322 * @list: List of detached free slabs should be freed by caller
3323 */
3324static void free_block(struct kmem_cache *cachep, void **objpp,
3325			int nr_objects, int node, struct list_head *list)
3326{
3327	int i;
3328	struct kmem_cache_node *n = get_node(cachep, node);
3329	struct page *page;
3330
3331	n->free_objects += nr_objects;
3332
3333	for (i = 0; i < nr_objects; i++) {
3334		void *objp;
3335		struct page *page;
3336
3337		objp = objpp[i];
3338
3339		page = virt_to_head_page(objp);
3340		list_del(&page->slab_list);
3341		check_spinlock_acquired_node(cachep, node);
3342		slab_put_obj(cachep, page, objp);
3343		STATS_DEC_ACTIVE(cachep);
3344
3345		/* fixup slab chains */
3346		if (page->active == 0) {
3347			list_add(&page->slab_list, &n->slabs_free);
3348			n->free_slabs++;
3349		} else {
3350			/* Unconditionally move a slab to the end of the
3351			 * partial list on free - maximum time for the
3352			 * other objects to be freed, too.
3353			 */
3354			list_add_tail(&page->slab_list, &n->slabs_partial);
3355		}
3356	}
3357
3358	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3359		n->free_objects -= cachep->num;
3360
3361		page = list_last_entry(&n->slabs_free, struct page, slab_list);
3362		list_move(&page->slab_list, list);
3363		n->free_slabs--;
3364		n->total_slabs--;
3365	}
3366}
3367
3368static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3369{
3370	int batchcount;
3371	struct kmem_cache_node *n;
3372	int node = numa_mem_id();
3373	LIST_HEAD(list);
3374
3375	batchcount = ac->batchcount;
3376
3377	check_irq_off();
3378	n = get_node(cachep, node);
3379	spin_lock(&n->list_lock);
3380	if (n->shared) {
3381		struct array_cache *shared_array = n->shared;
3382		int max = shared_array->limit - shared_array->avail;
3383		if (max) {
3384			if (batchcount > max)
3385				batchcount = max;
3386			memcpy(&(shared_array->entry[shared_array->avail]),
3387			       ac->entry, sizeof(void *) * batchcount);
3388			shared_array->avail += batchcount;
3389			goto free_done;
3390		}
3391	}
3392
3393	free_block(cachep, ac->entry, batchcount, node, &list);
3394free_done:
3395#if STATS
3396	{
3397		int i = 0;
3398		struct page *page;
3399
3400		list_for_each_entry(page, &n->slabs_free, slab_list) {
3401			BUG_ON(page->active);
3402
3403			i++;
3404		}
3405		STATS_SET_FREEABLE(cachep, i);
3406	}
3407#endif
3408	spin_unlock(&n->list_lock);
3409	ac->avail -= batchcount;
3410	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3411	slabs_destroy(cachep, &list);
3412}
3413
3414/*
3415 * Release an obj back to its cache. If the obj has a constructed state, it must
3416 * be in this state _before_ it is released.  Called with disabled ints.
3417 */
3418static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3419					 unsigned long caller)
3420{
3421	/* Put the object into the quarantine, don't touch it for now. */
3422	if (kasan_slab_free(cachep, objp, _RET_IP_))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3423		return;
3424
3425	/* Use KCSAN to help debug racy use-after-free. */
3426	if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
3427		__kcsan_check_access(objp, cachep->object_size,
3428				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
3429
3430	___cache_free(cachep, objp, caller);
3431}
3432
3433void ___cache_free(struct kmem_cache *cachep, void *objp,
3434		unsigned long caller)
3435{
3436	struct array_cache *ac = cpu_cache_get(cachep);
3437
3438	check_irq_off();
3439	if (unlikely(slab_want_init_on_free(cachep)))
3440		memset(objp, 0, cachep->object_size);
3441	kmemleak_free_recursive(objp, cachep->flags);
3442	objp = cache_free_debugcheck(cachep, objp, caller);
3443	memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
3444
3445	/*
3446	 * Skip calling cache_free_alien() when the platform is not numa.
3447	 * This will avoid cache misses that happen while accessing slabp (which
3448	 * is per page memory  reference) to get nodeid. Instead use a global
3449	 * variable to skip the call, which is mostly likely to be present in
3450	 * the cache.
3451	 */
3452	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3453		return;
3454
3455	if (ac->avail < ac->limit) {
3456		STATS_INC_FREEHIT(cachep);
3457	} else {
3458		STATS_INC_FREEMISS(cachep);
3459		cache_flusharray(cachep, ac);
3460	}
3461
3462	if (sk_memalloc_socks()) {
3463		struct page *page = virt_to_head_page(objp);
3464
3465		if (unlikely(PageSlabPfmemalloc(page))) {
3466			cache_free_pfmemalloc(cachep, page, objp);
3467			return;
3468		}
3469	}
3470
3471	__free_one(ac, objp);
3472}
3473
3474/**
3475 * kmem_cache_alloc - Allocate an object
3476 * @cachep: The cache to allocate from.
3477 * @flags: See kmalloc().
3478 *
3479 * Allocate an object from this cache.  The flags are only relevant
3480 * if the cache has no available objects.
3481 *
3482 * Return: pointer to the new object or %NULL in case of error
3483 */
3484void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3485{
3486	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3487
3488	trace_kmem_cache_alloc(_RET_IP_, ret,
3489			       cachep->object_size, cachep->size, flags);
3490
3491	return ret;
3492}
3493EXPORT_SYMBOL(kmem_cache_alloc);
3494
3495static __always_inline void
3496cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3497				  size_t size, void **p, unsigned long caller)
3498{
3499	size_t i;
3500
3501	for (i = 0; i < size; i++)
3502		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3503}
3504
3505int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3506			  void **p)
3507{
3508	size_t i;
3509	struct obj_cgroup *objcg = NULL;
3510
3511	s = slab_pre_alloc_hook(s, &objcg, size, flags);
3512	if (!s)
3513		return 0;
3514
3515	cache_alloc_debugcheck_before(s, flags);
3516
3517	local_irq_disable();
3518	for (i = 0; i < size; i++) {
3519		void *objp = __do_cache_alloc(s, flags);
3520
3521		if (unlikely(!objp))
3522			goto error;
3523		p[i] = objp;
3524	}
3525	local_irq_enable();
3526
3527	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3528
3529	/* Clear memory outside IRQ disabled section */
3530	if (unlikely(slab_want_init_on_alloc(flags, s)))
3531		for (i = 0; i < size; i++)
3532			memset(p[i], 0, s->object_size);
3533
3534	slab_post_alloc_hook(s, objcg, flags, size, p);
3535	/* FIXME: Trace call missing. Christoph would like a bulk variant */
3536	return size;
3537error:
3538	local_irq_enable();
3539	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3540	slab_post_alloc_hook(s, objcg, flags, i, p);
3541	__kmem_cache_free_bulk(s, i, p);
3542	return 0;
3543}
3544EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3545
3546#ifdef CONFIG_TRACING
3547void *
3548kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3549{
3550	void *ret;
3551
3552	ret = slab_alloc(cachep, flags, _RET_IP_);
3553
3554	ret = kasan_kmalloc(cachep, ret, size, flags);
3555	trace_kmalloc(_RET_IP_, ret,
3556		      size, cachep->size, flags);
3557	return ret;
3558}
3559EXPORT_SYMBOL(kmem_cache_alloc_trace);
3560#endif
3561
3562#ifdef CONFIG_NUMA
3563/**
3564 * kmem_cache_alloc_node - Allocate an object on the specified node
3565 * @cachep: The cache to allocate from.
3566 * @flags: See kmalloc().
3567 * @nodeid: node number of the target node.
3568 *
3569 * Identical to kmem_cache_alloc but it will allocate memory on the given
3570 * node, which can improve the performance for cpu bound structures.
3571 *
3572 * Fallback to other node is possible if __GFP_THISNODE is not set.
3573 *
3574 * Return: pointer to the new object or %NULL in case of error
3575 */
3576void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3577{
3578	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3579
3580	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3581				    cachep->object_size, cachep->size,
3582				    flags, nodeid);
3583
3584	return ret;
3585}
3586EXPORT_SYMBOL(kmem_cache_alloc_node);
3587
3588#ifdef CONFIG_TRACING
3589void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3590				  gfp_t flags,
3591				  int nodeid,
3592				  size_t size)
3593{
3594	void *ret;
3595
3596	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3597
3598	ret = kasan_kmalloc(cachep, ret, size, flags);
3599	trace_kmalloc_node(_RET_IP_, ret,
3600			   size, cachep->size,
3601			   flags, nodeid);
3602	return ret;
3603}
3604EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3605#endif
3606
3607static __always_inline void *
3608__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3609{
3610	struct kmem_cache *cachep;
3611	void *ret;
3612
3613	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3614		return NULL;
3615	cachep = kmalloc_slab(size, flags);
3616	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3617		return cachep;
3618	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3619	ret = kasan_kmalloc(cachep, ret, size, flags);
3620
3621	return ret;
3622}
3623
3624void *__kmalloc_node(size_t size, gfp_t flags, int node)
3625{
3626	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3627}
3628EXPORT_SYMBOL(__kmalloc_node);
3629
3630void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3631		int node, unsigned long caller)
3632{
3633	return __do_kmalloc_node(size, flags, node, caller);
3634}
3635EXPORT_SYMBOL(__kmalloc_node_track_caller);
3636#endif /* CONFIG_NUMA */
3637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3638/**
3639 * __do_kmalloc - allocate memory
3640 * @size: how many bytes of memory are required.
3641 * @flags: the type of memory to allocate (see kmalloc).
3642 * @caller: function caller for debug tracking of the caller
3643 *
3644 * Return: pointer to the allocated memory or %NULL in case of error
3645 */
3646static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3647					  unsigned long caller)
3648{
3649	struct kmem_cache *cachep;
3650	void *ret;
3651
3652	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3653		return NULL;
3654	cachep = kmalloc_slab(size, flags);
3655	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3656		return cachep;
3657	ret = slab_alloc(cachep, flags, caller);
3658
3659	ret = kasan_kmalloc(cachep, ret, size, flags);
3660	trace_kmalloc(caller, ret,
3661		      size, cachep->size, flags);
3662
3663	return ret;
3664}
3665
3666void *__kmalloc(size_t size, gfp_t flags)
3667{
3668	return __do_kmalloc(size, flags, _RET_IP_);
3669}
3670EXPORT_SYMBOL(__kmalloc);
3671
3672void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3673{
3674	return __do_kmalloc(size, flags, caller);
3675}
3676EXPORT_SYMBOL(__kmalloc_track_caller);
3677
3678/**
3679 * kmem_cache_free - Deallocate an object
3680 * @cachep: The cache the allocation was from.
3681 * @objp: The previously allocated object.
3682 *
3683 * Free an object which was previously allocated from this
3684 * cache.
3685 */
3686void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3687{
3688	unsigned long flags;
3689	cachep = cache_from_obj(cachep, objp);
3690	if (!cachep)
3691		return;
3692
3693	local_irq_save(flags);
3694	debug_check_no_locks_freed(objp, cachep->object_size);
3695	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3696		debug_check_no_obj_freed(objp, cachep->object_size);
3697	__cache_free(cachep, objp, _RET_IP_);
3698	local_irq_restore(flags);
3699
3700	trace_kmem_cache_free(_RET_IP_, objp);
3701}
3702EXPORT_SYMBOL(kmem_cache_free);
3703
3704void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3705{
3706	struct kmem_cache *s;
3707	size_t i;
3708
3709	local_irq_disable();
3710	for (i = 0; i < size; i++) {
3711		void *objp = p[i];
3712
3713		if (!orig_s) /* called via kfree_bulk */
3714			s = virt_to_cache(objp);
3715		else
3716			s = cache_from_obj(orig_s, objp);
3717		if (!s)
3718			continue;
3719
3720		debug_check_no_locks_freed(objp, s->object_size);
3721		if (!(s->flags & SLAB_DEBUG_OBJECTS))
3722			debug_check_no_obj_freed(objp, s->object_size);
3723
3724		__cache_free(s, objp, _RET_IP_);
3725	}
3726	local_irq_enable();
3727
3728	/* FIXME: add tracing */
3729}
3730EXPORT_SYMBOL(kmem_cache_free_bulk);
3731
3732/**
3733 * kfree - free previously allocated memory
3734 * @objp: pointer returned by kmalloc.
3735 *
3736 * If @objp is NULL, no operation is performed.
3737 *
3738 * Don't free memory not originally allocated by kmalloc()
3739 * or you will run into trouble.
3740 */
3741void kfree(const void *objp)
3742{
3743	struct kmem_cache *c;
3744	unsigned long flags;
3745
3746	trace_kfree(_RET_IP_, objp);
3747
3748	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3749		return;
3750	local_irq_save(flags);
3751	kfree_debugcheck(objp);
3752	c = virt_to_cache(objp);
3753	if (!c) {
3754		local_irq_restore(flags);
3755		return;
3756	}
3757	debug_check_no_locks_freed(objp, c->object_size);
3758
3759	debug_check_no_obj_freed(objp, c->object_size);
3760	__cache_free(c, (void *)objp, _RET_IP_);
3761	local_irq_restore(flags);
3762}
3763EXPORT_SYMBOL(kfree);
3764
3765/*
3766 * This initializes kmem_cache_node or resizes various caches for all nodes.
3767 */
3768static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3769{
3770	int ret;
3771	int node;
3772	struct kmem_cache_node *n;
3773
3774	for_each_online_node(node) {
3775		ret = setup_kmem_cache_node(cachep, node, gfp, true);
3776		if (ret)
3777			goto fail;
3778
3779	}
3780
3781	return 0;
3782
3783fail:
3784	if (!cachep->list.next) {
3785		/* Cache is not active yet. Roll back what we did */
3786		node--;
3787		while (node >= 0) {
3788			n = get_node(cachep, node);
3789			if (n) {
3790				kfree(n->shared);
3791				free_alien_cache(n->alien);
3792				kfree(n);
3793				cachep->node[node] = NULL;
3794			}
3795			node--;
3796		}
3797	}
3798	return -ENOMEM;
3799}
3800
3801/* Always called with the slab_mutex held */
3802static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3803			    int batchcount, int shared, gfp_t gfp)
3804{
3805	struct array_cache __percpu *cpu_cache, *prev;
3806	int cpu;
3807
3808	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3809	if (!cpu_cache)
3810		return -ENOMEM;
3811
3812	prev = cachep->cpu_cache;
3813	cachep->cpu_cache = cpu_cache;
3814	/*
3815	 * Without a previous cpu_cache there's no need to synchronize remote
3816	 * cpus, so skip the IPIs.
3817	 */
3818	if (prev)
3819		kick_all_cpus_sync();
3820
3821	check_irq_on();
3822	cachep->batchcount = batchcount;
3823	cachep->limit = limit;
3824	cachep->shared = shared;
3825
3826	if (!prev)
3827		goto setup_node;
3828
3829	for_each_online_cpu(cpu) {
3830		LIST_HEAD(list);
3831		int node;
3832		struct kmem_cache_node *n;
3833		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3834
3835		node = cpu_to_mem(cpu);
3836		n = get_node(cachep, node);
3837		spin_lock_irq(&n->list_lock);
3838		free_block(cachep, ac->entry, ac->avail, node, &list);
3839		spin_unlock_irq(&n->list_lock);
3840		slabs_destroy(cachep, &list);
3841	}
3842	free_percpu(prev);
3843
3844setup_node:
3845	return setup_kmem_cache_nodes(cachep, gfp);
3846}
3847
3848/* Called with slab_mutex held always */
3849static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3850{
3851	int err;
3852	int limit = 0;
3853	int shared = 0;
3854	int batchcount = 0;
3855
3856	err = cache_random_seq_create(cachep, cachep->num, gfp);
3857	if (err)
3858		goto end;
3859
3860	if (limit && shared && batchcount)
3861		goto skip_setup;
3862	/*
3863	 * The head array serves three purposes:
3864	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3865	 * - reduce the number of spinlock operations.
3866	 * - reduce the number of linked list operations on the slab and
3867	 *   bufctl chains: array operations are cheaper.
3868	 * The numbers are guessed, we should auto-tune as described by
3869	 * Bonwick.
3870	 */
3871	if (cachep->size > 131072)
3872		limit = 1;
3873	else if (cachep->size > PAGE_SIZE)
3874		limit = 8;
3875	else if (cachep->size > 1024)
3876		limit = 24;
3877	else if (cachep->size > 256)
3878		limit = 54;
3879	else
3880		limit = 120;
3881
3882	/*
3883	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3884	 * allocation behaviour: Most allocs on one cpu, most free operations
3885	 * on another cpu. For these cases, an efficient object passing between
3886	 * cpus is necessary. This is provided by a shared array. The array
3887	 * replaces Bonwick's magazine layer.
3888	 * On uniprocessor, it's functionally equivalent (but less efficient)
3889	 * to a larger limit. Thus disabled by default.
3890	 */
3891	shared = 0;
3892	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3893		shared = 8;
3894
3895#if DEBUG
3896	/*
3897	 * With debugging enabled, large batchcount lead to excessively long
3898	 * periods with disabled local interrupts. Limit the batchcount
3899	 */
3900	if (limit > 32)
3901		limit = 32;
3902#endif
3903	batchcount = (limit + 1) / 2;
3904skip_setup:
3905	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3906end:
3907	if (err)
3908		pr_err("enable_cpucache failed for %s, error %d\n",
3909		       cachep->name, -err);
3910	return err;
3911}
3912
3913/*
3914 * Drain an array if it contains any elements taking the node lock only if
3915 * necessary. Note that the node listlock also protects the array_cache
3916 * if drain_array() is used on the shared array.
3917 */
3918static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3919			 struct array_cache *ac, int node)
3920{
3921	LIST_HEAD(list);
3922
3923	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
3924	check_mutex_acquired();
3925
3926	if (!ac || !ac->avail)
3927		return;
3928
3929	if (ac->touched) {
3930		ac->touched = 0;
3931		return;
3932	}
3933
3934	spin_lock_irq(&n->list_lock);
3935	drain_array_locked(cachep, ac, node, false, &list);
3936	spin_unlock_irq(&n->list_lock);
3937
3938	slabs_destroy(cachep, &list);
3939}
3940
3941/**
3942 * cache_reap - Reclaim memory from caches.
3943 * @w: work descriptor
3944 *
3945 * Called from workqueue/eventd every few seconds.
3946 * Purpose:
3947 * - clear the per-cpu caches for this CPU.
3948 * - return freeable pages to the main free memory pool.
3949 *
3950 * If we cannot acquire the cache chain mutex then just give up - we'll try
3951 * again on the next iteration.
3952 */
3953static void cache_reap(struct work_struct *w)
3954{
3955	struct kmem_cache *searchp;
3956	struct kmem_cache_node *n;
3957	int node = numa_mem_id();
3958	struct delayed_work *work = to_delayed_work(w);
3959
3960	if (!mutex_trylock(&slab_mutex))
3961		/* Give up. Setup the next iteration. */
3962		goto out;
3963
3964	list_for_each_entry(searchp, &slab_caches, list) {
3965		check_irq_on();
3966
3967		/*
3968		 * We only take the node lock if absolutely necessary and we
3969		 * have established with reasonable certainty that
3970		 * we can do some work if the lock was obtained.
3971		 */
3972		n = get_node(searchp, node);
3973
3974		reap_alien(searchp, n);
3975
3976		drain_array(searchp, n, cpu_cache_get(searchp), node);
3977
3978		/*
3979		 * These are racy checks but it does not matter
3980		 * if we skip one check or scan twice.
3981		 */
3982		if (time_after(n->next_reap, jiffies))
3983			goto next;
3984
3985		n->next_reap = jiffies + REAPTIMEOUT_NODE;
3986
3987		drain_array(searchp, n, n->shared, node);
3988
3989		if (n->free_touched)
3990			n->free_touched = 0;
3991		else {
3992			int freed;
3993
3994			freed = drain_freelist(searchp, n, (n->free_limit +
3995				5 * searchp->num - 1) / (5 * searchp->num));
3996			STATS_ADD_REAPED(searchp, freed);
3997		}
3998next:
3999		cond_resched();
4000	}
4001	check_irq_on();
4002	mutex_unlock(&slab_mutex);
4003	next_reap_node();
4004out:
4005	/* Set up the next iteration */
4006	schedule_delayed_work_on(smp_processor_id(), work,
4007				round_jiffies_relative(REAPTIMEOUT_AC));
4008}
4009
4010void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4011{
4012	unsigned long active_objs, num_objs, active_slabs;
4013	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4014	unsigned long free_slabs = 0;
4015	int node;
4016	struct kmem_cache_node *n;
4017
4018	for_each_kmem_cache_node(cachep, node, n) {
4019		check_irq_on();
4020		spin_lock_irq(&n->list_lock);
4021
4022		total_slabs += n->total_slabs;
4023		free_slabs += n->free_slabs;
4024		free_objs += n->free_objects;
4025
4026		if (n->shared)
4027			shared_avail += n->shared->avail;
4028
4029		spin_unlock_irq(&n->list_lock);
4030	}
4031	num_objs = total_slabs * cachep->num;
4032	active_slabs = total_slabs - free_slabs;
4033	active_objs = num_objs - free_objs;
4034
4035	sinfo->active_objs = active_objs;
4036	sinfo->num_objs = num_objs;
4037	sinfo->active_slabs = active_slabs;
4038	sinfo->num_slabs = total_slabs;
4039	sinfo->shared_avail = shared_avail;
4040	sinfo->limit = cachep->limit;
4041	sinfo->batchcount = cachep->batchcount;
4042	sinfo->shared = cachep->shared;
4043	sinfo->objects_per_slab = cachep->num;
4044	sinfo->cache_order = cachep->gfporder;
4045}
4046
4047void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4048{
4049#if STATS
4050	{			/* node stats */
4051		unsigned long high = cachep->high_mark;
4052		unsigned long allocs = cachep->num_allocations;
4053		unsigned long grown = cachep->grown;
4054		unsigned long reaped = cachep->reaped;
4055		unsigned long errors = cachep->errors;
4056		unsigned long max_freeable = cachep->max_freeable;
4057		unsigned long node_allocs = cachep->node_allocs;
4058		unsigned long node_frees = cachep->node_frees;
4059		unsigned long overflows = cachep->node_overflow;
4060
4061		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4062			   allocs, high, grown,
4063			   reaped, errors, max_freeable, node_allocs,
4064			   node_frees, overflows);
4065	}
4066	/* cpu stats */
4067	{
4068		unsigned long allochit = atomic_read(&cachep->allochit);
4069		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4070		unsigned long freehit = atomic_read(&cachep->freehit);
4071		unsigned long freemiss = atomic_read(&cachep->freemiss);
4072
4073		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4074			   allochit, allocmiss, freehit, freemiss);
4075	}
4076#endif
4077}
4078
4079#define MAX_SLABINFO_WRITE 128
4080/**
4081 * slabinfo_write - Tuning for the slab allocator
4082 * @file: unused
4083 * @buffer: user buffer
4084 * @count: data length
4085 * @ppos: unused
4086 *
4087 * Return: %0 on success, negative error code otherwise.
4088 */
4089ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4090		       size_t count, loff_t *ppos)
4091{
4092	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4093	int limit, batchcount, shared, res;
4094	struct kmem_cache *cachep;
4095
4096	if (count > MAX_SLABINFO_WRITE)
4097		return -EINVAL;
4098	if (copy_from_user(&kbuf, buffer, count))
4099		return -EFAULT;
4100	kbuf[MAX_SLABINFO_WRITE] = '\0';
4101
4102	tmp = strchr(kbuf, ' ');
4103	if (!tmp)
4104		return -EINVAL;
4105	*tmp = '\0';
4106	tmp++;
4107	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4108		return -EINVAL;
4109
4110	/* Find the cache in the chain of caches. */
4111	mutex_lock(&slab_mutex);
4112	res = -EINVAL;
4113	list_for_each_entry(cachep, &slab_caches, list) {
4114		if (!strcmp(cachep->name, kbuf)) {
4115			if (limit < 1 || batchcount < 1 ||
4116					batchcount > limit || shared < 0) {
4117				res = 0;
4118			} else {
4119				res = do_tune_cpucache(cachep, limit,
4120						       batchcount, shared,
4121						       GFP_KERNEL);
4122			}
4123			break;
4124		}
4125	}
4126	mutex_unlock(&slab_mutex);
4127	if (res >= 0)
4128		res = count;
4129	return res;
4130}
4131
4132#ifdef CONFIG_HARDENED_USERCOPY
4133/*
4134 * Rejects incorrectly sized objects and objects that are to be copied
4135 * to/from userspace but do not fall entirely within the containing slab
4136 * cache's usercopy region.
4137 *
4138 * Returns NULL if check passes, otherwise const char * to name of cache
4139 * to indicate an error.
4140 */
4141void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4142			 bool to_user)
4143{
4144	struct kmem_cache *cachep;
4145	unsigned int objnr;
4146	unsigned long offset;
4147
4148	ptr = kasan_reset_tag(ptr);
4149
4150	/* Find and validate object. */
4151	cachep = page->slab_cache;
4152	objnr = obj_to_index(cachep, page, (void *)ptr);
4153	BUG_ON(objnr >= cachep->num);
4154
4155	/* Find offset within object. */
4156	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
 
 
 
4157
4158	/* Allow address range falling entirely within usercopy region. */
4159	if (offset >= cachep->useroffset &&
4160	    offset - cachep->useroffset <= cachep->usersize &&
4161	    n <= cachep->useroffset - offset + cachep->usersize)
4162		return;
4163
4164	/*
4165	 * If the copy is still within the allocated object, produce
4166	 * a warning instead of rejecting the copy. This is intended
4167	 * to be a temporary method to find any missing usercopy
4168	 * whitelists.
4169	 */
4170	if (usercopy_fallback &&
4171	    offset <= cachep->object_size &&
4172	    n <= cachep->object_size - offset) {
4173		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
4174		return;
4175	}
4176
4177	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
4178}
4179#endif /* CONFIG_HARDENED_USERCOPY */
4180
4181/**
4182 * __ksize -- Uninstrumented ksize.
4183 * @objp: pointer to the object
4184 *
4185 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
4186 * safety checks as ksize() with KASAN instrumentation enabled.
4187 *
4188 * Return: size of the actual memory used by @objp in bytes
4189 */
4190size_t __ksize(const void *objp)
4191{
4192	struct kmem_cache *c;
4193	size_t size;
4194
4195	BUG_ON(!objp);
4196	if (unlikely(objp == ZERO_SIZE_PTR))
4197		return 0;
4198
4199	c = virt_to_cache(objp);
4200	size = c ? c->object_size : 0;
4201
4202	return size;
4203}
4204EXPORT_SYMBOL(__ksize);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/slab.c
   4 * Written by Mark Hemment, 1996/97.
   5 * (markhe@nextd.demon.co.uk)
   6 *
   7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   8 *
   9 * Major cleanup, different bufctl logic, per-cpu arrays
  10 *	(c) 2000 Manfred Spraul
  11 *
  12 * Cleanup, make the head arrays unconditional, preparation for NUMA
  13 * 	(c) 2002 Manfred Spraul
  14 *
  15 * An implementation of the Slab Allocator as described in outline in;
  16 *	UNIX Internals: The New Frontiers by Uresh Vahalia
  17 *	Pub: Prentice Hall	ISBN 0-13-101908-2
  18 * or with a little more detail in;
  19 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
  20 *	Jeff Bonwick (Sun Microsystems).
  21 *	Presented at: USENIX Summer 1994 Technical Conference
  22 *
  23 * The memory is organized in caches, one cache for each object type.
  24 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  25 * Each cache consists out of many slabs (they are small (usually one
  26 * page long) and always contiguous), and each slab contains multiple
  27 * initialized objects.
  28 *
  29 * This means, that your constructor is used only for newly allocated
  30 * slabs and you must pass objects with the same initializations to
  31 * kmem_cache_free.
  32 *
  33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  34 * normal). If you need a special memory type, then must create a new
  35 * cache for that memory type.
  36 *
  37 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  38 *   full slabs with 0 free objects
  39 *   partial slabs
  40 *   empty slabs with no allocated objects
  41 *
  42 * If partial slabs exist, then new allocations come from these slabs,
  43 * otherwise from empty slabs or new slabs are allocated.
  44 *
  45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  46 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  47 *
  48 * Each cache has a short per-cpu head array, most allocs
  49 * and frees go into that array, and if that array overflows, then 1/2
  50 * of the entries in the array are given back into the global cache.
  51 * The head array is strictly LIFO and should improve the cache hit rates.
  52 * On SMP, it additionally reduces the spinlock operations.
  53 *
  54 * The c_cpuarray may not be read with enabled local interrupts -
  55 * it's changed with a smp_call_function().
  56 *
  57 * SMP synchronization:
  58 *  constructors and destructors are called without any locking.
  59 *  Several members in struct kmem_cache and struct slab never change, they
  60 *	are accessed without any locking.
  61 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  62 *  	and local interrupts are disabled so slab code is preempt-safe.
  63 *  The non-constant members are protected with a per-cache irq spinlock.
  64 *
  65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  66 * in 2000 - many ideas in the current implementation are derived from
  67 * his patch.
  68 *
  69 * Further notes from the original documentation:
  70 *
  71 * 11 April '97.  Started multi-threading - markhe
  72 *	The global cache-chain is protected by the mutex 'slab_mutex'.
  73 *	The sem is only needed when accessing/extending the cache-chain, which
  74 *	can never happen inside an interrupt (kmem_cache_create(),
  75 *	kmem_cache_shrink() and kmem_cache_reap()).
  76 *
  77 *	At present, each engine can be growing a cache.  This should be blocked.
  78 *
  79 * 15 March 2005. NUMA slab allocator.
  80 *	Shai Fultheim <shai@scalex86.org>.
  81 *	Shobhit Dayal <shobhit@calsoftinc.com>
  82 *	Alok N Kataria <alokk@calsoftinc.com>
  83 *	Christoph Lameter <christoph@lameter.com>
  84 *
  85 *	Modified the slab allocator to be node aware on NUMA systems.
  86 *	Each node has its own list of partial, free and full slabs.
  87 *	All object allocations for a node occur from node specific slab lists.
  88 */
  89
  90#include	<linux/__KEEPIDENTS__B.h>
  91#include	<linux/__KEEPIDENTS__C.h>
  92#include	<linux/__KEEPIDENTS__D.h>
  93#include	<linux/__KEEPIDENTS__E.h>
  94#include	<linux/__KEEPIDENTS__F.h>
  95#include	<linux/__KEEPIDENTS__G.h>
  96#include	<linux/__KEEPIDENTS__H.h>
  97#include	<linux/__KEEPIDENTS__I.h>
  98#include	<linux/__KEEPIDENTS__J.h>
  99#include	<linux/proc_fs.h>
 100#include	<linux/__KEEPIDENTS__BA.h>
 101#include	<linux/__KEEPIDENTS__BB.h>
 102#include	<linux/__KEEPIDENTS__BC.h>
 103#include	<linux/kfence.h>
 104#include	<linux/cpu.h>
 105#include	<linux/__KEEPIDENTS__BD.h>
 106#include	<linux/__KEEPIDENTS__BE.h>
 107#include	<linux/rcupdate.h>
 108#include	<linux/__KEEPIDENTS__BF.h>
 109#include	<linux/__KEEPIDENTS__BG.h>
 110#include	<linux/__KEEPIDENTS__BH.h>
 111#include	<linux/kmemleak.h>
 112#include	<linux/__KEEPIDENTS__BI.h>
 113#include	<linux/__KEEPIDENTS__BJ.h>
 114#include	<linux/__KEEPIDENTS__CA-__KEEPIDENTS__CB.h>
 115#include	<linux/__KEEPIDENTS__CC.h>
 116#include	<linux/reciprocal_div.h>
 117#include	<linux/debugobjects.h>
 118#include	<linux/__KEEPIDENTS__CD.h>
 119#include	<linux/__KEEPIDENTS__CE.h>
 120#include	<linux/__KEEPIDENTS__CF/task_stack.h>
 121
 122#include	<net/__KEEPIDENTS__CG.h>
 123
 124#include	<asm/cacheflush.h>
 125#include	<asm/tlbflush.h>
 126#include	<asm/page.h>
 127
 128#include <trace/events/kmem.h>
 129
 130#include	"internal.h"
 131
 132#include	"slab.h"
 133
 134/*
 135 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 136 *		  0 for faster, smaller code (especially in the critical paths).
 137 *
 138 * STATS	- 1 to collect stats for /proc/slabinfo.
 139 *		  0 for faster, smaller code (especially in the critical paths).
 140 *
 141 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 142 */
 143
 144#ifdef CONFIG_DEBUG_SLAB
 145#define	DEBUG		1
 146#define	STATS		1
 147#define	FORCED_DEBUG	1
 148#else
 149#define	DEBUG		0
 150#define	STATS		0
 151#define	FORCED_DEBUG	0
 152#endif
 153
 154/* Shouldn't this be in a header file somewhere? */
 155#define	BYTES_PER_WORD		sizeof(void *)
 156#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
 157
 158#ifndef ARCH_KMALLOC_FLAGS
 159#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 160#endif
 161
 162#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
 163				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
 164
 165#if FREELIST_BYTE_INDEX
 166typedef unsigned char freelist_idx_t;
 167#else
 168typedef unsigned short freelist_idx_t;
 169#endif
 170
 171#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 172
 173/*
 174 * struct array_cache
 175 *
 176 * Purpose:
 177 * - LIFO ordering, to hand out cache-warm objects from _alloc
 178 * - reduce the number of linked list operations
 179 * - reduce spinlock operations
 180 *
 181 * The limit is stored in the per-cpu structure to reduce the data cache
 182 * footprint.
 183 *
 184 */
 185struct array_cache {
 186	unsigned int avail;
 187	unsigned int limit;
 188	unsigned int batchcount;
 189	unsigned int touched;
 190	void *entry[];	/*
 191			 * Must have this definition in here for the proper
 192			 * alignment of array_cache. Also simplifies accessing
 193			 * the entries.
 194			 */
 195};
 196
 197struct alien_cache {
 198	spinlock_t lock;
 199	struct array_cache ac;
 200};
 201
 202/*
 203 * Need this for bootstrapping a per node allocator.
 204 */
 205#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
 206static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
 207#define	CACHE_CACHE 0
 208#define	SIZE_NODE (MAX_NUMNODES)
 209
 210static int drain_freelist(struct kmem_cache *cache,
 211			struct kmem_cache_node *n, int tofree);
 212static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 213			int node, struct list_head *list);
 214static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
 215static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 216static void cache_reap(struct work_struct *unused);
 217
 218static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
 219						void **list);
 220static inline void fixup_slab_list(struct kmem_cache *cachep,
 221				struct kmem_cache_node *n, struct page *page,
 222				void **list);
 223static int slab_early_init = 1;
 224
 225#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
 226
 227static void kmem_cache_node_init(struct kmem_cache_node *parent)
 228{
 229	INIT_LIST_HEAD(&parent->slabs_full);
 230	INIT_LIST_HEAD(&parent->slabs_partial);
 231	INIT_LIST_HEAD(&parent->slabs_free);
 232	parent->total_slabs = 0;
 233	parent->free_slabs = 0;
 234	parent->shared = NULL;
 235	parent->alien = NULL;
 236	parent->colour_next = 0;
 237	spin_lock_init(&parent->list_lock);
 238	parent->free_objects = 0;
 239	parent->free_touched = 0;
 240}
 241
 242#define MAKE_LIST(cachep, listp, slab, nodeid)				\
 243	do {								\
 244		INIT_LIST_HEAD(listp);					\
 245		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
 246	} while (0)
 247
 248#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
 249	do {								\
 250	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
 251	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 252	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
 253	} while (0)
 254
 255#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
 256#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
 257#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
 258#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
 259
 260#define BATCHREFILL_LIMIT	16
 261/*
 262 * Optimization question: fewer reaps means less probability for unnecessary
 263 * cpucache drain/refill cycles.
 264 *
 265 * OTOH the cpuarrays can contain lots of objects,
 266 * which could lock up otherwise freeable slabs.
 267 */
 268#define REAPTIMEOUT_AC		(2*HZ)
 269#define REAPTIMEOUT_NODE	(4*HZ)
 270
 271#if STATS
 272#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
 273#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
 274#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
 275#define	STATS_INC_GROWN(x)	((x)->grown++)
 276#define	STATS_ADD_REAPED(x, y)	((x)->reaped += (y))
 277#define	STATS_SET_HIGH(x)						\
 278	do {								\
 279		if ((x)->num_active > (x)->high_mark)			\
 280			(x)->high_mark = (x)->num_active;		\
 281	} while (0)
 282#define	STATS_INC_ERR(x)	((x)->errors++)
 283#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
 284#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
 285#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 286#define	STATS_SET_FREEABLE(x, i)					\
 287	do {								\
 288		if ((x)->max_freeable < i)				\
 289			(x)->max_freeable = i;				\
 290	} while (0)
 291#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
 292#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
 293#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
 294#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
 295#else
 296#define	STATS_INC_ACTIVE(x)	do { } while (0)
 297#define	STATS_DEC_ACTIVE(x)	do { } while (0)
 298#define	STATS_INC_ALLOCED(x)	do { } while (0)
 299#define	STATS_INC_GROWN(x)	do { } while (0)
 300#define	STATS_ADD_REAPED(x, y)	do { (void)(y); } while (0)
 301#define	STATS_SET_HIGH(x)	do { } while (0)
 302#define	STATS_INC_ERR(x)	do { } while (0)
 303#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
 304#define	STATS_INC_NODEFREES(x)	do { } while (0)
 305#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 306#define	STATS_SET_FREEABLE(x, i) do { } while (0)
 307#define STATS_INC_ALLOCHIT(x)	do { } while (0)
 308#define STATS_INC_ALLOCMISS(x)	do { } while (0)
 309#define STATS_INC_FREEHIT(x)	do { } while (0)
 310#define STATS_INC_FREEMISS(x)	do { } while (0)
 311#endif
 312
 313#if DEBUG
 314
 315/*
 316 * memory layout of objects:
 317 * 0		: objp
 318 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 319 * 		the end of an object is aligned with the end of the real
 320 * 		allocation. Catches writes behind the end of the allocation.
 321 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 322 * 		redzone word.
 323 * cachep->obj_offset: The real object.
 324 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 325 * cachep->size - 1* BYTES_PER_WORD: last caller address
 326 *					[BYTES_PER_WORD long]
 327 */
 328static int obj_offset(struct kmem_cache *cachep)
 329{
 330	return cachep->obj_offset;
 331}
 332
 333static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 334{
 335	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 336	return (unsigned long long *) (objp + obj_offset(cachep) -
 337				      sizeof(unsigned long long));
 338}
 339
 340static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 341{
 342	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 343	if (cachep->flags & SLAB_STORE_USER)
 344		return (unsigned long long *)(objp + cachep->size -
 345					      sizeof(unsigned long long) -
 346					      REDZONE_ALIGN);
 347	return (unsigned long long *) (objp + cachep->size -
 348				       sizeof(unsigned long long));
 349}
 350
 351static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 352{
 353	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 354	return (void **)(objp + cachep->size - BYTES_PER_WORD);
 355}
 356
 357#else
 358
 359#define obj_offset(x)			0
 360#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 361#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 362#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
 363
 364#endif
 365
 366/*
 367 * Do not go above this order unless 0 objects fit into the slab or
 368 * overridden on the command line.
 369 */
 370#define	SLAB_MAX_ORDER_HI	1
 371#define	SLAB_MAX_ORDER_LO	0
 372static int slab_max_order = SLAB_MAX_ORDER_LO;
 373static bool slab_max_order_set __initdata;
 374
 375static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 376				 unsigned int idx)
 377{
 378	return page->s_mem + cache->size * idx;
 379}
 380
 381#define BOOT_CPUCACHE_ENTRIES	1
 382/* internal cache of cache description objs */
 383static struct kmem_cache kmem_cache_boot = {
 384	.batchcount = 1,
 385	.limit = BOOT_CPUCACHE_ENTRIES,
 386	.shared = 1,
 387	.size = sizeof(struct kmem_cache),
 388	.name = "kmem_cache",
 389};
 390
 391static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 392
 393static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 394{
 395	return this_cpu_ptr(cachep->cpu_cache);
 396}
 397
 398/*
 399 * Calculate the number of objects and left-over bytes for a given buffer size.
 400 */
 401static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
 402		slab_flags_t flags, size_t *left_over)
 403{
 404	unsigned int num;
 405	size_t slab_size = PAGE_SIZE << gfporder;
 406
 407	/*
 408	 * The slab management structure can be either off the slab or
 409	 * on it. For the latter case, the memory allocated for a
 410	 * slab is used for:
 411	 *
 412	 * - @buffer_size bytes for each object
 413	 * - One freelist_idx_t for each object
 414	 *
 415	 * We don't need to consider alignment of freelist because
 416	 * freelist will be at the end of slab page. The objects will be
 417	 * at the correct alignment.
 418	 *
 419	 * If the slab management structure is off the slab, then the
 420	 * alignment will already be calculated into the size. Because
 421	 * the slabs are all pages aligned, the objects will be at the
 422	 * correct alignment when allocated.
 423	 */
 424	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
 425		num = slab_size / buffer_size;
 426		*left_over = slab_size % buffer_size;
 427	} else {
 428		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
 429		*left_over = slab_size %
 430			(buffer_size + sizeof(freelist_idx_t));
 431	}
 432
 433	return num;
 434}
 435
 436#if DEBUG
 437#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 438
 439static void __slab_error(const char *function, struct kmem_cache *cachep,
 440			char *msg)
 441{
 442	pr_err("slab error in %s(): cache `%s': %s\n",
 443	       function, cachep->name, msg);
 444	dump_stack();
 445	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 446}
 447#endif
 448
 449/*
 450 * By default on NUMA we use alien caches to stage the freeing of
 451 * objects allocated from other nodes. This causes massive memory
 452 * inefficiencies when using fake NUMA setup to split memory into a
 453 * large number of small nodes, so it can be disabled on the command
 454 * line
 455  */
 456
 457static int use_alien_caches __read_mostly = 1;
 458static int __init noaliencache_setup(char *s)
 459{
 460	use_alien_caches = 0;
 461	return 1;
 462}
 463__setup("noaliencache", noaliencache_setup);
 464
 465static int __init slab_max_order_setup(char *str)
 466{
 467	get_option(&str, &slab_max_order);
 468	slab_max_order = slab_max_order < 0 ? 0 :
 469				min(slab_max_order, MAX_ORDER - 1);
 470	slab_max_order_set = true;
 471
 472	return 1;
 473}
 474__setup("slab_max_order=", slab_max_order_setup);
 475
 476#ifdef CONFIG_NUMA
 477/*
 478 * Special reaping functions for NUMA systems called from cache_reap().
 479 * These take care of doing round robin flushing of alien caches (containing
 480 * objects freed on different nodes from which they were allocated) and the
 481 * flushing of remote pcps by calling drain_node_pages.
 482 */
 483static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 484
 485static void init_reap_node(int cpu)
 486{
 487	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
 488						    node_online_map);
 489}
 490
 491static void next_reap_node(void)
 492{
 493	int node = __this_cpu_read(slab_reap_node);
 494
 495	node = next_node_in(node, node_online_map);
 496	__this_cpu_write(slab_reap_node, node);
 497}
 498
 499#else
 500#define init_reap_node(cpu) do { } while (0)
 501#define next_reap_node(void) do { } while (0)
 502#endif
 503
 504/*
 505 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 506 * via the workqueue/eventd.
 507 * Add the CPU number into the expiration time to minimize the possibility of
 508 * the CPUs getting into lockstep and contending for the global cache chain
 509 * lock.
 510 */
 511static void start_cpu_timer(int cpu)
 512{
 513	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 514
 515	if (reap_work->work.func == NULL) {
 516		init_reap_node(cpu);
 517		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 518		schedule_delayed_work_on(cpu, reap_work,
 519					__round_jiffies_relative(HZ, cpu));
 520	}
 521}
 522
 523static void init_arraycache(struct array_cache *ac, int limit, int batch)
 524{
 525	if (ac) {
 526		ac->avail = 0;
 527		ac->limit = limit;
 528		ac->batchcount = batch;
 529		ac->touched = 0;
 530	}
 531}
 532
 533static struct array_cache *alloc_arraycache(int node, int entries,
 534					    int batchcount, gfp_t gfp)
 535{
 536	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 537	struct array_cache *ac = NULL;
 538
 539	ac = kmalloc_node(memsize, gfp, node);
 540	/*
 541	 * The array_cache structures contain pointers to free object.
 542	 * However, when such objects are allocated or transferred to another
 543	 * cache the pointers are not cleared and they could be counted as
 544	 * valid references during a kmemleak scan. Therefore, kmemleak must
 545	 * not scan such objects.
 546	 */
 547	kmemleak_no_scan(ac);
 548	init_arraycache(ac, entries, batchcount);
 549	return ac;
 550}
 551
 552static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
 553					struct page *page, void *objp)
 554{
 555	struct kmem_cache_node *n;
 556	int page_node;
 557	LIST_HEAD(list);
 558
 559	page_node = page_to_nid(page);
 560	n = get_node(cachep, page_node);
 561
 562	spin_lock(&n->list_lock);
 563	free_block(cachep, &objp, 1, page_node, &list);
 564	spin_unlock(&n->list_lock);
 565
 566	slabs_destroy(cachep, &list);
 567}
 568
 569/*
 570 * Transfer objects in one arraycache to another.
 571 * Locking must be handled by the caller.
 572 *
 573 * Return the number of entries transferred.
 574 */
 575static int transfer_objects(struct array_cache *to,
 576		struct array_cache *from, unsigned int max)
 577{
 578	/* Figure out how many entries to transfer */
 579	int nr = min3(from->avail, max, to->limit - to->avail);
 580
 581	if (!nr)
 582		return 0;
 583
 584	memcpy(to->entry + to->avail, from->entry + from->avail - nr,
 585			sizeof(void *) *nr);
 586
 587	from->avail -= nr;
 588	to->avail += nr;
 589	return nr;
 590}
 591
 592/* &alien->lock must be held by alien callers. */
 593static __always_inline void __free_one(struct array_cache *ac, void *objp)
 594{
 595	/* Avoid trivial double-free. */
 596	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
 597	    WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
 598		return;
 599	ac->entry[ac->avail++] = objp;
 600}
 601
 602#ifndef CONFIG_NUMA
 603
 604#define drain_alien_cache(cachep, alien) do { } while (0)
 605#define reap_alien(cachep, n) do { } while (0)
 606
 607static inline struct alien_cache **alloc_alien_cache(int node,
 608						int limit, gfp_t gfp)
 609{
 610	return NULL;
 611}
 612
 613static inline void free_alien_cache(struct alien_cache **ac_ptr)
 614{
 615}
 616
 617static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 618{
 619	return 0;
 620}
 621
 622static inline void *alternate_node_alloc(struct kmem_cache *cachep,
 623		gfp_t flags)
 624{
 625	return NULL;
 626}
 627
 628static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 629		 gfp_t flags, int nodeid)
 630{
 631	return NULL;
 632}
 633
 634static inline gfp_t gfp_exact_node(gfp_t flags)
 635{
 636	return flags & ~__GFP_NOFAIL;
 637}
 638
 639#else	/* CONFIG_NUMA */
 640
 641static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 642static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 643
 644static struct alien_cache *__alloc_alien_cache(int node, int entries,
 645						int batch, gfp_t gfp)
 646{
 647	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
 648	struct alien_cache *alc = NULL;
 649
 650	alc = kmalloc_node(memsize, gfp, node);
 651	if (alc) {
 652		kmemleak_no_scan(alc);
 653		init_arraycache(&alc->ac, entries, batch);
 654		spin_lock_init(&alc->lock);
 655	}
 656	return alc;
 657}
 658
 659static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 660{
 661	struct alien_cache **alc_ptr;
 662	int i;
 663
 664	if (limit > 1)
 665		limit = 12;
 666	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
 667	if (!alc_ptr)
 668		return NULL;
 669
 670	for_each_node(i) {
 671		if (i == node || !node_online(i))
 672			continue;
 673		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
 674		if (!alc_ptr[i]) {
 675			for (i--; i >= 0; i--)
 676				kfree(alc_ptr[i]);
 677			kfree(alc_ptr);
 678			return NULL;
 679		}
 680	}
 681	return alc_ptr;
 682}
 683
 684static void free_alien_cache(struct alien_cache **alc_ptr)
 685{
 686	int i;
 687
 688	if (!alc_ptr)
 689		return;
 690	for_each_node(i)
 691	    kfree(alc_ptr[i]);
 692	kfree(alc_ptr);
 693}
 694
 695static void __drain_alien_cache(struct kmem_cache *cachep,
 696				struct array_cache *ac, int node,
 697				struct list_head *list)
 698{
 699	struct kmem_cache_node *n = get_node(cachep, node);
 700
 701	if (ac->avail) {
 702		spin_lock(&n->list_lock);
 703		/*
 704		 * Stuff objects into the remote nodes shared array first.
 705		 * That way we could avoid the overhead of putting the objects
 706		 * into the free lists and getting them back later.
 707		 */
 708		if (n->shared)
 709			transfer_objects(n->shared, ac, ac->limit);
 710
 711		free_block(cachep, ac->entry, ac->avail, node, list);
 712		ac->avail = 0;
 713		spin_unlock(&n->list_lock);
 714	}
 715}
 716
 717/*
 718 * Called from cache_reap() to regularly drain alien caches round robin.
 719 */
 720static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
 721{
 722	int node = __this_cpu_read(slab_reap_node);
 723
 724	if (n->alien) {
 725		struct alien_cache *alc = n->alien[node];
 726		struct array_cache *ac;
 727
 728		if (alc) {
 729			ac = &alc->ac;
 730			if (ac->avail && spin_trylock_irq(&alc->lock)) {
 731				LIST_HEAD(list);
 732
 733				__drain_alien_cache(cachep, ac, node, &list);
 734				spin_unlock_irq(&alc->lock);
 735				slabs_destroy(cachep, &list);
 736			}
 737		}
 738	}
 739}
 740
 741static void drain_alien_cache(struct kmem_cache *cachep,
 742				struct alien_cache **alien)
 743{
 744	int i = 0;
 745	struct alien_cache *alc;
 746	struct array_cache *ac;
 747	unsigned long flags;
 748
 749	for_each_online_node(i) {
 750		alc = alien[i];
 751		if (alc) {
 752			LIST_HEAD(list);
 753
 754			ac = &alc->ac;
 755			spin_lock_irqsave(&alc->lock, flags);
 756			__drain_alien_cache(cachep, ac, i, &list);
 757			spin_unlock_irqrestore(&alc->lock, flags);
 758			slabs_destroy(cachep, &list);
 759		}
 760	}
 761}
 762
 763static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
 764				int node, int page_node)
 765{
 766	struct kmem_cache_node *n;
 767	struct alien_cache *alien = NULL;
 768	struct array_cache *ac;
 769	LIST_HEAD(list);
 770
 771	n = get_node(cachep, node);
 772	STATS_INC_NODEFREES(cachep);
 773	if (n->alien && n->alien[page_node]) {
 774		alien = n->alien[page_node];
 775		ac = &alien->ac;
 776		spin_lock(&alien->lock);
 777		if (unlikely(ac->avail == ac->limit)) {
 778			STATS_INC_ACOVERFLOW(cachep);
 779			__drain_alien_cache(cachep, ac, page_node, &list);
 780		}
 781		__free_one(ac, objp);
 782		spin_unlock(&alien->lock);
 783		slabs_destroy(cachep, &list);
 784	} else {
 785		n = get_node(cachep, page_node);
 786		spin_lock(&n->list_lock);
 787		free_block(cachep, &objp, 1, page_node, &list);
 788		spin_unlock(&n->list_lock);
 789		slabs_destroy(cachep, &list);
 790	}
 791	return 1;
 792}
 793
 794static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 795{
 796	int page_node = page_to_nid(virt_to_page(objp));
 797	int node = numa_mem_id();
 798	/*
 799	 * Make sure we are not freeing a object from another node to the array
 800	 * cache on this cpu.
 801	 */
 802	if (likely(node == page_node))
 803		return 0;
 804
 805	return __cache_free_alien(cachep, objp, node, page_node);
 806}
 807
 808/*
 809 * Construct gfp mask to allocate from a specific node but do not reclaim or
 810 * warn about failures.
 811 */
 812static inline gfp_t gfp_exact_node(gfp_t flags)
 813{
 814	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
 815}
 816#endif
 817
 818static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
 819{
 820	struct kmem_cache_node *n;
 821
 822	/*
 823	 * Set up the kmem_cache_node for cpu before we can
 824	 * begin anything. Make sure some other cpu on this
 825	 * node has not already allocated this
 826	 */
 827	n = get_node(cachep, node);
 828	if (n) {
 829		spin_lock_irq(&n->list_lock);
 830		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
 831				cachep->num;
 832		spin_unlock_irq(&n->list_lock);
 833
 834		return 0;
 835	}
 836
 837	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
 838	if (!n)
 839		return -ENOMEM;
 840
 841	kmem_cache_node_init(n);
 842	n->next_reap = jiffies + REAPTIMEOUT_NODE +
 843		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
 844
 845	n->free_limit =
 846		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
 847
 848	/*
 849	 * The kmem_cache_nodes don't come and go as CPUs
 850	 * come and go.  slab_mutex is sufficient
 851	 * protection here.
 852	 */
 853	cachep->node[node] = n;
 854
 855	return 0;
 856}
 857
 858#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
 859/*
 860 * Allocates and initializes node for a node on each slab cache, used for
 861 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
 862 * will be allocated off-node since memory is not yet online for the new node.
 863 * When hotplugging memory or a cpu, existing node are not replaced if
 864 * already in use.
 865 *
 866 * Must hold slab_mutex.
 867 */
 868static int init_cache_node_node(int node)
 869{
 870	int ret;
 871	struct kmem_cache *cachep;
 872
 873	list_for_each_entry(cachep, &slab_caches, list) {
 874		ret = init_cache_node(cachep, node, GFP_KERNEL);
 875		if (ret)
 876			return ret;
 877	}
 878
 879	return 0;
 880}
 881#endif
 882
 883static int setup_kmem_cache_node(struct kmem_cache *cachep,
 884				int node, gfp_t gfp, bool force_change)
 885{
 886	int ret = -ENOMEM;
 887	struct kmem_cache_node *n;
 888	struct array_cache *old_shared = NULL;
 889	struct array_cache *new_shared = NULL;
 890	struct alien_cache **new_alien = NULL;
 891	LIST_HEAD(list);
 892
 893	if (use_alien_caches) {
 894		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
 895		if (!new_alien)
 896			goto fail;
 897	}
 898
 899	if (cachep->shared) {
 900		new_shared = alloc_arraycache(node,
 901			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
 902		if (!new_shared)
 903			goto fail;
 904	}
 905
 906	ret = init_cache_node(cachep, node, gfp);
 907	if (ret)
 908		goto fail;
 909
 910	n = get_node(cachep, node);
 911	spin_lock_irq(&n->list_lock);
 912	if (n->shared && force_change) {
 913		free_block(cachep, n->shared->entry,
 914				n->shared->avail, node, &list);
 915		n->shared->avail = 0;
 916	}
 917
 918	if (!n->shared || force_change) {
 919		old_shared = n->shared;
 920		n->shared = new_shared;
 921		new_shared = NULL;
 922	}
 923
 924	if (!n->alien) {
 925		n->alien = new_alien;
 926		new_alien = NULL;
 927	}
 928
 929	spin_unlock_irq(&n->list_lock);
 930	slabs_destroy(cachep, &list);
 931
 932	/*
 933	 * To protect lockless access to n->shared during irq disabled context.
 934	 * If n->shared isn't NULL in irq disabled context, accessing to it is
 935	 * guaranteed to be valid until irq is re-enabled, because it will be
 936	 * freed after synchronize_rcu().
 937	 */
 938	if (old_shared && force_change)
 939		synchronize_rcu();
 940
 941fail:
 942	kfree(old_shared);
 943	kfree(new_shared);
 944	free_alien_cache(new_alien);
 945
 946	return ret;
 947}
 948
 949#ifdef CONFIG_SMP
 950
 951static void cpuup_canceled(long cpu)
 952{
 953	struct kmem_cache *cachep;
 954	struct kmem_cache_node *n = NULL;
 955	int node = cpu_to_mem(cpu);
 956	const struct cpumask *mask = cpumask_of_node(node);
 957
 958	list_for_each_entry(cachep, &slab_caches, list) {
 959		struct array_cache *nc;
 960		struct array_cache *shared;
 961		struct alien_cache **alien;
 962		LIST_HEAD(list);
 963
 964		n = get_node(cachep, node);
 965		if (!n)
 966			continue;
 967
 968		spin_lock_irq(&n->list_lock);
 969
 970		/* Free limit for this kmem_cache_node */
 971		n->free_limit -= cachep->batchcount;
 972
 973		/* cpu is dead; no one can alloc from it. */
 974		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
 975		free_block(cachep, nc->entry, nc->avail, node, &list);
 976		nc->avail = 0;
 977
 978		if (!cpumask_empty(mask)) {
 979			spin_unlock_irq(&n->list_lock);
 980			goto free_slab;
 981		}
 982
 983		shared = n->shared;
 984		if (shared) {
 985			free_block(cachep, shared->entry,
 986				   shared->avail, node, &list);
 987			n->shared = NULL;
 988		}
 989
 990		alien = n->alien;
 991		n->alien = NULL;
 992
 993		spin_unlock_irq(&n->list_lock);
 994
 995		kfree(shared);
 996		if (alien) {
 997			drain_alien_cache(cachep, alien);
 998			free_alien_cache(alien);
 999		}
1000
1001free_slab:
1002		slabs_destroy(cachep, &list);
1003	}
1004	/*
1005	 * In the previous loop, all the objects were freed to
1006	 * the respective cache's slabs,  now we can go ahead and
1007	 * shrink each nodelist to its limit.
1008	 */
1009	list_for_each_entry(cachep, &slab_caches, list) {
1010		n = get_node(cachep, node);
1011		if (!n)
1012			continue;
1013		drain_freelist(cachep, n, INT_MAX);
1014	}
1015}
1016
1017static int cpuup_prepare(long cpu)
1018{
1019	struct kmem_cache *cachep;
1020	int node = cpu_to_mem(cpu);
1021	int err;
1022
1023	/*
1024	 * We need to do this right in the beginning since
1025	 * alloc_arraycache's are going to use this list.
1026	 * kmalloc_node allows us to add the slab to the right
1027	 * kmem_cache_node and not this cpu's kmem_cache_node
1028	 */
1029	err = init_cache_node_node(node);
1030	if (err < 0)
1031		goto bad;
1032
1033	/*
1034	 * Now we can go ahead with allocating the shared arrays and
1035	 * array caches
1036	 */
1037	list_for_each_entry(cachep, &slab_caches, list) {
1038		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1039		if (err)
1040			goto bad;
1041	}
1042
1043	return 0;
1044bad:
1045	cpuup_canceled(cpu);
1046	return -ENOMEM;
1047}
1048
1049int slab_prepare_cpu(unsigned int cpu)
1050{
1051	int err;
1052
1053	mutex_lock(&slab_mutex);
1054	err = cpuup_prepare(cpu);
1055	mutex_unlock(&slab_mutex);
1056	return err;
1057}
1058
1059/*
1060 * This is called for a failed online attempt and for a successful
1061 * offline.
1062 *
1063 * Even if all the cpus of a node are down, we don't free the
1064 * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
1065 * a kmalloc allocation from another cpu for memory from the node of
1066 * the cpu going down.  The kmem_cache_node structure is usually allocated from
1067 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
1068 */
1069int slab_dead_cpu(unsigned int cpu)
1070{
1071	mutex_lock(&slab_mutex);
1072	cpuup_canceled(cpu);
1073	mutex_unlock(&slab_mutex);
1074	return 0;
1075}
1076#endif
1077
1078static int slab_online_cpu(unsigned int cpu)
1079{
1080	start_cpu_timer(cpu);
1081	return 0;
1082}
1083
1084static int slab_offline_cpu(unsigned int cpu)
1085{
1086	/*
1087	 * Shutdown cache reaper. Note that the slab_mutex is held so
1088	 * that if cache_reap() is invoked it cannot do anything
1089	 * expensive but will only modify reap_work and reschedule the
1090	 * timer.
1091	 */
1092	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1093	/* Now the cache_reaper is guaranteed to be not running. */
1094	per_cpu(slab_reap_work, cpu).work.func = NULL;
1095	return 0;
1096}
1097
1098#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1099/*
1100 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1101 * Returns -EBUSY if all objects cannot be drained so that the node is not
1102 * removed.
1103 *
1104 * Must hold slab_mutex.
1105 */
1106static int __meminit drain_cache_node_node(int node)
1107{
1108	struct kmem_cache *cachep;
1109	int ret = 0;
1110
1111	list_for_each_entry(cachep, &slab_caches, list) {
1112		struct kmem_cache_node *n;
1113
1114		n = get_node(cachep, node);
1115		if (!n)
1116			continue;
1117
1118		drain_freelist(cachep, n, INT_MAX);
1119
1120		if (!list_empty(&n->slabs_full) ||
1121		    !list_empty(&n->slabs_partial)) {
1122			ret = -EBUSY;
1123			break;
1124		}
1125	}
1126	return ret;
1127}
1128
1129static int __meminit slab_memory_callback(struct notifier_block *self,
1130					unsigned long action, void *arg)
1131{
1132	struct memory_notify *mnb = arg;
1133	int ret = 0;
1134	int nid;
1135
1136	nid = mnb->status_change_nid;
1137	if (nid < 0)
1138		goto out;
1139
1140	switch (action) {
1141	case MEM_GOING_ONLINE:
1142		mutex_lock(&slab_mutex);
1143		ret = init_cache_node_node(nid);
1144		mutex_unlock(&slab_mutex);
1145		break;
1146	case MEM_GOING_OFFLINE:
1147		mutex_lock(&slab_mutex);
1148		ret = drain_cache_node_node(nid);
1149		mutex_unlock(&slab_mutex);
1150		break;
1151	case MEM_ONLINE:
1152	case MEM_OFFLINE:
1153	case MEM_CANCEL_ONLINE:
1154	case MEM_CANCEL_OFFLINE:
1155		break;
1156	}
1157out:
1158	return notifier_from_errno(ret);
1159}
1160#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1161
1162/*
1163 * swap the static kmem_cache_node with kmalloced memory
1164 */
1165static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1166				int nodeid)
1167{
1168	struct kmem_cache_node *ptr;
1169
1170	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1171	BUG_ON(!ptr);
1172
1173	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1174	/*
1175	 * Do not assume that spinlocks can be initialized via memcpy:
1176	 */
1177	spin_lock_init(&ptr->list_lock);
1178
1179	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1180	cachep->node[nodeid] = ptr;
1181}
1182
1183/*
1184 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1185 * size of kmem_cache_node.
1186 */
1187static void __init set_up_node(struct kmem_cache *cachep, int index)
1188{
1189	int node;
1190
1191	for_each_online_node(node) {
1192		cachep->node[node] = &init_kmem_cache_node[index + node];
1193		cachep->node[node]->next_reap = jiffies +
1194		    REAPTIMEOUT_NODE +
1195		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1196	}
1197}
1198
1199/*
1200 * Initialisation.  Called after the page allocator have been initialised and
1201 * before smp_init().
1202 */
1203void __init kmem_cache_init(void)
1204{
1205	int i;
1206
1207	kmem_cache = &kmem_cache_boot;
1208
1209	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1210		use_alien_caches = 0;
1211
1212	for (i = 0; i < NUM_INIT_LISTS; i++)
1213		kmem_cache_node_init(&init_kmem_cache_node[i]);
1214
1215	/*
1216	 * Fragmentation resistance on low memory - only use bigger
1217	 * page orders on machines with more than 32MB of memory if
1218	 * not overridden on the command line.
1219	 */
1220	if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1221		slab_max_order = SLAB_MAX_ORDER_HI;
1222
1223	/* Bootstrap is tricky, because several objects are allocated
1224	 * from caches that do not exist yet:
1225	 * 1) initialize the kmem_cache cache: it contains the struct
1226	 *    kmem_cache structures of all caches, except kmem_cache itself:
1227	 *    kmem_cache is statically allocated.
1228	 *    Initially an __init data area is used for the head array and the
1229	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1230	 *    array at the end of the bootstrap.
1231	 * 2) Create the first kmalloc cache.
1232	 *    The struct kmem_cache for the new cache is allocated normally.
1233	 *    An __init data area is used for the head array.
1234	 * 3) Create the remaining kmalloc caches, with minimally sized
1235	 *    head arrays.
1236	 * 4) Replace the __init data head arrays for kmem_cache and the first
1237	 *    kmalloc cache with kmalloc allocated arrays.
1238	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1239	 *    the other cache's with kmalloc allocated memory.
1240	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1241	 */
1242
1243	/* 1) create the kmem_cache */
1244
1245	/*
1246	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1247	 */
1248	create_boot_cache(kmem_cache, "kmem_cache",
1249		offsetof(struct kmem_cache, node) +
1250				  nr_node_ids * sizeof(struct kmem_cache_node *),
1251				  SLAB_HWCACHE_ALIGN, 0, 0);
1252	list_add(&kmem_cache->list, &slab_caches);
1253	slab_state = PARTIAL;
1254
1255	/*
1256	 * Initialize the caches that provide memory for the  kmem_cache_node
1257	 * structures first.  Without this, further allocations will bug.
1258	 */
1259	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1260				kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1261				kmalloc_info[INDEX_NODE].size,
1262				ARCH_KMALLOC_FLAGS, 0,
1263				kmalloc_info[INDEX_NODE].size);
1264	slab_state = PARTIAL_NODE;
1265	setup_kmalloc_cache_index_table();
1266
1267	slab_early_init = 0;
1268
1269	/* 5) Replace the bootstrap kmem_cache_node */
1270	{
1271		int nid;
1272
1273		for_each_online_node(nid) {
1274			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1275
1276			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1277					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1278		}
1279	}
1280
1281	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1282}
1283
1284void __init kmem_cache_init_late(void)
1285{
1286	struct kmem_cache *cachep;
1287
1288	/* 6) resize the head arrays to their final sizes */
1289	mutex_lock(&slab_mutex);
1290	list_for_each_entry(cachep, &slab_caches, list)
1291		if (enable_cpucache(cachep, GFP_NOWAIT))
1292			BUG();
1293	mutex_unlock(&slab_mutex);
1294
1295	/* Done! */
1296	slab_state = FULL;
1297
1298#ifdef CONFIG_NUMA
1299	/*
1300	 * Register a memory hotplug callback that initializes and frees
1301	 * node.
1302	 */
1303	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1304#endif
1305
1306	/*
1307	 * The reap timers are started later, with a module init call: That part
1308	 * of the kernel is not yet operational.
1309	 */
1310}
1311
1312static int __init cpucache_init(void)
1313{
1314	int ret;
1315
1316	/*
1317	 * Register the timers that return unneeded pages to the page allocator
1318	 */
1319	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1320				slab_online_cpu, slab_offline_cpu);
1321	WARN_ON(ret < 0);
1322
1323	return 0;
1324}
1325__initcall(cpucache_init);
1326
1327static noinline void
1328slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1329{
1330#if DEBUG
1331	struct kmem_cache_node *n;
1332	unsigned long flags;
1333	int node;
1334	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1335				      DEFAULT_RATELIMIT_BURST);
1336
1337	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1338		return;
1339
1340	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1341		nodeid, gfpflags, &gfpflags);
1342	pr_warn("  cache: %s, object size: %d, order: %d\n",
1343		cachep->name, cachep->size, cachep->gfporder);
1344
1345	for_each_kmem_cache_node(cachep, node, n) {
1346		unsigned long total_slabs, free_slabs, free_objs;
1347
1348		spin_lock_irqsave(&n->list_lock, flags);
1349		total_slabs = n->total_slabs;
1350		free_slabs = n->free_slabs;
1351		free_objs = n->free_objects;
1352		spin_unlock_irqrestore(&n->list_lock, flags);
1353
1354		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1355			node, total_slabs - free_slabs, total_slabs,
1356			(total_slabs * cachep->num) - free_objs,
1357			total_slabs * cachep->num);
1358	}
1359#endif
1360}
1361
1362/*
1363 * Interface to system's page allocator. No need to hold the
1364 * kmem_cache_node ->list_lock.
1365 *
1366 * If we requested dmaable memory, we will get it. Even if we
1367 * did not request dmaable memory, we might get it, but that
1368 * would be relatively rare and ignorable.
1369 */
1370static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1371								int nodeid)
1372{
1373	struct page *page;
1374
1375	flags |= cachep->allocflags;
1376
1377	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1378	if (!page) {
1379		slab_out_of_memory(cachep, flags, nodeid);
1380		return NULL;
1381	}
1382
1383	account_slab_page(page, cachep->gfporder, cachep, flags);
1384	__SetPageSlab(page);
1385	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1386	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1387		SetPageSlabPfmemalloc(page);
1388
1389	return page;
1390}
1391
1392/*
1393 * Interface to system's page release.
1394 */
1395static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1396{
1397	int order = cachep->gfporder;
1398
1399	BUG_ON(!PageSlab(page));
1400	__ClearPageSlabPfmemalloc(page);
1401	__ClearPageSlab(page);
1402	page_mapcount_reset(page);
1403	/* In union with page->mapping where page allocator expects NULL */
1404	page->slab_cache = NULL;
1405
1406	if (current->reclaim_state)
1407		current->reclaim_state->reclaimed_slab += 1 << order;
1408	unaccount_slab_page(page, order, cachep);
1409	__free_pages(page, order);
1410}
1411
1412static void kmem_rcu_free(struct rcu_head *head)
1413{
1414	struct kmem_cache *cachep;
1415	struct page *page;
1416
1417	page = container_of(head, struct page, rcu_head);
1418	cachep = page->slab_cache;
1419
1420	kmem_freepages(cachep, page);
1421}
1422
1423#if DEBUG
1424static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1425{
1426	if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1427		(cachep->size % PAGE_SIZE) == 0)
1428		return true;
1429
1430	return false;
1431}
1432
1433#ifdef CONFIG_DEBUG_PAGEALLOC
1434static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1435{
1436	if (!is_debug_pagealloc_cache(cachep))
1437		return;
1438
1439	__kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1440}
1441
1442#else
1443static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1444				int map) {}
1445
1446#endif
1447
1448static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1449{
1450	int size = cachep->object_size;
1451	addr = &((char *)addr)[obj_offset(cachep)];
1452
1453	memset(addr, val, size);
1454	*(unsigned char *)(addr + size - 1) = POISON_END;
1455}
1456
1457static void dump_line(char *data, int offset, int limit)
1458{
1459	int i;
1460	unsigned char error = 0;
1461	int bad_count = 0;
1462
1463	pr_err("%03x: ", offset);
1464	for (i = 0; i < limit; i++) {
1465		if (data[offset + i] != POISON_FREE) {
1466			error = data[offset + i];
1467			bad_count++;
1468		}
1469	}
1470	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1471			&data[offset], limit, 1);
1472
1473	if (bad_count == 1) {
1474		error ^= POISON_FREE;
1475		if (!(error & (error - 1))) {
1476			pr_err("Single bit error detected. Probably bad RAM.\n");
1477#ifdef CONFIG_X86
1478			pr_err("Run memtest86+ or a similar memory test tool.\n");
1479#else
1480			pr_err("Run a memory test tool.\n");
1481#endif
1482		}
1483	}
1484}
1485#endif
1486
1487#if DEBUG
1488
1489static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1490{
1491	int i, size;
1492	char *realobj;
1493
1494	if (cachep->flags & SLAB_RED_ZONE) {
1495		pr_err("Redzone: 0x%llx/0x%llx\n",
1496		       *dbg_redzone1(cachep, objp),
1497		       *dbg_redzone2(cachep, objp));
1498	}
1499
1500	if (cachep->flags & SLAB_STORE_USER)
1501		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1502	realobj = (char *)objp + obj_offset(cachep);
1503	size = cachep->object_size;
1504	for (i = 0; i < size && lines; i += 16, lines--) {
1505		int limit;
1506		limit = 16;
1507		if (i + limit > size)
1508			limit = size - i;
1509		dump_line(realobj, i, limit);
1510	}
1511}
1512
1513static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1514{
1515	char *realobj;
1516	int size, i;
1517	int lines = 0;
1518
1519	if (is_debug_pagealloc_cache(cachep))
1520		return;
1521
1522	realobj = (char *)objp + obj_offset(cachep);
1523	size = cachep->object_size;
1524
1525	for (i = 0; i < size; i++) {
1526		char exp = POISON_FREE;
1527		if (i == size - 1)
1528			exp = POISON_END;
1529		if (realobj[i] != exp) {
1530			int limit;
1531			/* Mismatch ! */
1532			/* Print header */
1533			if (lines == 0) {
1534				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1535				       print_tainted(), cachep->name,
1536				       realobj, size);
1537				print_objinfo(cachep, objp, 0);
1538			}
1539			/* Hexdump the affected line */
1540			i = (i / 16) * 16;
1541			limit = 16;
1542			if (i + limit > size)
1543				limit = size - i;
1544			dump_line(realobj, i, limit);
1545			i += 16;
1546			lines++;
1547			/* Limit to 5 lines */
1548			if (lines > 5)
1549				break;
1550		}
1551	}
1552	if (lines != 0) {
1553		/* Print some data about the neighboring objects, if they
1554		 * exist:
1555		 */
1556		struct page *page = virt_to_head_page(objp);
1557		unsigned int objnr;
1558
1559		objnr = obj_to_index(cachep, page, objp);
1560		if (objnr) {
1561			objp = index_to_obj(cachep, page, objnr - 1);
1562			realobj = (char *)objp + obj_offset(cachep);
1563			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1564			print_objinfo(cachep, objp, 2);
1565		}
1566		if (objnr + 1 < cachep->num) {
1567			objp = index_to_obj(cachep, page, objnr + 1);
1568			realobj = (char *)objp + obj_offset(cachep);
1569			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1570			print_objinfo(cachep, objp, 2);
1571		}
1572	}
1573}
1574#endif
1575
1576#if DEBUG
1577static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1578						struct page *page)
1579{
1580	int i;
1581
1582	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1583		poison_obj(cachep, page->freelist - obj_offset(cachep),
1584			POISON_FREE);
1585	}
1586
1587	for (i = 0; i < cachep->num; i++) {
1588		void *objp = index_to_obj(cachep, page, i);
1589
1590		if (cachep->flags & SLAB_POISON) {
1591			check_poison_obj(cachep, objp);
1592			slab_kernel_map(cachep, objp, 1);
1593		}
1594		if (cachep->flags & SLAB_RED_ZONE) {
1595			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1596				slab_error(cachep, "start of a freed object was overwritten");
1597			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1598				slab_error(cachep, "end of a freed object was overwritten");
1599		}
1600	}
1601}
1602#else
1603static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1604						struct page *page)
1605{
1606}
1607#endif
1608
1609/**
1610 * slab_destroy - destroy and release all objects in a slab
1611 * @cachep: cache pointer being destroyed
1612 * @page: page pointer being destroyed
1613 *
1614 * Destroy all the objs in a slab page, and release the mem back to the system.
1615 * Before calling the slab page must have been unlinked from the cache. The
1616 * kmem_cache_node ->list_lock is not held/needed.
1617 */
1618static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1619{
1620	void *freelist;
1621
1622	freelist = page->freelist;
1623	slab_destroy_debugcheck(cachep, page);
1624	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1625		call_rcu(&page->rcu_head, kmem_rcu_free);
1626	else
1627		kmem_freepages(cachep, page);
1628
1629	/*
1630	 * From now on, we don't use freelist
1631	 * although actual page can be freed in rcu context
1632	 */
1633	if (OFF_SLAB(cachep))
1634		kmem_cache_free(cachep->freelist_cache, freelist);
1635}
1636
1637/*
1638 * Update the size of the caches before calling slabs_destroy as it may
1639 * recursively call kfree.
1640 */
1641static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1642{
1643	struct page *page, *n;
1644
1645	list_for_each_entry_safe(page, n, list, slab_list) {
1646		list_del(&page->slab_list);
1647		slab_destroy(cachep, page);
1648	}
1649}
1650
1651/**
1652 * calculate_slab_order - calculate size (page order) of slabs
1653 * @cachep: pointer to the cache that is being created
1654 * @size: size of objects to be created in this cache.
1655 * @flags: slab allocation flags
1656 *
1657 * Also calculates the number of objects per slab.
1658 *
1659 * This could be made much more intelligent.  For now, try to avoid using
1660 * high order pages for slabs.  When the gfp() functions are more friendly
1661 * towards high-order requests, this should be changed.
1662 *
1663 * Return: number of left-over bytes in a slab
1664 */
1665static size_t calculate_slab_order(struct kmem_cache *cachep,
1666				size_t size, slab_flags_t flags)
1667{
1668	size_t left_over = 0;
1669	int gfporder;
1670
1671	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1672		unsigned int num;
1673		size_t remainder;
1674
1675		num = cache_estimate(gfporder, size, flags, &remainder);
1676		if (!num)
1677			continue;
1678
1679		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1680		if (num > SLAB_OBJ_MAX_NUM)
1681			break;
1682
1683		if (flags & CFLGS_OFF_SLAB) {
1684			struct kmem_cache *freelist_cache;
1685			size_t freelist_size;
1686
1687			freelist_size = num * sizeof(freelist_idx_t);
1688			freelist_cache = kmalloc_slab(freelist_size, 0u);
1689			if (!freelist_cache)
1690				continue;
1691
1692			/*
1693			 * Needed to avoid possible looping condition
1694			 * in cache_grow_begin()
1695			 */
1696			if (OFF_SLAB(freelist_cache))
1697				continue;
1698
1699			/* check if off slab has enough benefit */
1700			if (freelist_cache->size > cachep->size / 2)
1701				continue;
1702		}
1703
1704		/* Found something acceptable - save it away */
1705		cachep->num = num;
1706		cachep->gfporder = gfporder;
1707		left_over = remainder;
1708
1709		/*
1710		 * A VFS-reclaimable slab tends to have most allocations
1711		 * as GFP_NOFS and we really don't want to have to be allocating
1712		 * higher-order pages when we are unable to shrink dcache.
1713		 */
1714		if (flags & SLAB_RECLAIM_ACCOUNT)
1715			break;
1716
1717		/*
1718		 * Large number of objects is good, but very large slabs are
1719		 * currently bad for the gfp()s.
1720		 */
1721		if (gfporder >= slab_max_order)
1722			break;
1723
1724		/*
1725		 * Acceptable internal fragmentation?
1726		 */
1727		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1728			break;
1729	}
1730	return left_over;
1731}
1732
1733static struct array_cache __percpu *alloc_kmem_cache_cpus(
1734		struct kmem_cache *cachep, int entries, int batchcount)
1735{
1736	int cpu;
1737	size_t size;
1738	struct array_cache __percpu *cpu_cache;
1739
1740	size = sizeof(void *) * entries + sizeof(struct array_cache);
1741	cpu_cache = __alloc_percpu(size, sizeof(void *));
1742
1743	if (!cpu_cache)
1744		return NULL;
1745
1746	for_each_possible_cpu(cpu) {
1747		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1748				entries, batchcount);
1749	}
1750
1751	return cpu_cache;
1752}
1753
1754static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1755{
1756	if (slab_state >= FULL)
1757		return enable_cpucache(cachep, gfp);
1758
1759	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1760	if (!cachep->cpu_cache)
1761		return 1;
1762
1763	if (slab_state == DOWN) {
1764		/* Creation of first cache (kmem_cache). */
1765		set_up_node(kmem_cache, CACHE_CACHE);
1766	} else if (slab_state == PARTIAL) {
1767		/* For kmem_cache_node */
1768		set_up_node(cachep, SIZE_NODE);
1769	} else {
1770		int node;
1771
1772		for_each_online_node(node) {
1773			cachep->node[node] = kmalloc_node(
1774				sizeof(struct kmem_cache_node), gfp, node);
1775			BUG_ON(!cachep->node[node]);
1776			kmem_cache_node_init(cachep->node[node]);
1777		}
1778	}
1779
1780	cachep->node[numa_mem_id()]->next_reap =
1781			jiffies + REAPTIMEOUT_NODE +
1782			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1783
1784	cpu_cache_get(cachep)->avail = 0;
1785	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1786	cpu_cache_get(cachep)->batchcount = 1;
1787	cpu_cache_get(cachep)->touched = 0;
1788	cachep->batchcount = 1;
1789	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1790	return 0;
1791}
1792
1793slab_flags_t kmem_cache_flags(unsigned int object_size,
1794	slab_flags_t flags, const char *name)
 
1795{
1796	return flags;
1797}
1798
1799struct kmem_cache *
1800__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1801		   slab_flags_t flags, void (*ctor)(void *))
1802{
1803	struct kmem_cache *cachep;
1804
1805	cachep = find_mergeable(size, align, flags, name, ctor);
1806	if (cachep) {
1807		cachep->refcount++;
1808
1809		/*
1810		 * Adjust the object sizes so that we clear
1811		 * the complete object on kzalloc.
1812		 */
1813		cachep->object_size = max_t(int, cachep->object_size, size);
1814	}
1815	return cachep;
1816}
1817
1818static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1819			size_t size, slab_flags_t flags)
1820{
1821	size_t left;
1822
1823	cachep->num = 0;
1824
1825	/*
1826	 * If slab auto-initialization on free is enabled, store the freelist
1827	 * off-slab, so that its contents don't end up in one of the allocated
1828	 * objects.
1829	 */
1830	if (unlikely(slab_want_init_on_free(cachep)))
1831		return false;
1832
1833	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1834		return false;
1835
1836	left = calculate_slab_order(cachep, size,
1837			flags | CFLGS_OBJFREELIST_SLAB);
1838	if (!cachep->num)
1839		return false;
1840
1841	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1842		return false;
1843
1844	cachep->colour = left / cachep->colour_off;
1845
1846	return true;
1847}
1848
1849static bool set_off_slab_cache(struct kmem_cache *cachep,
1850			size_t size, slab_flags_t flags)
1851{
1852	size_t left;
1853
1854	cachep->num = 0;
1855
1856	/*
1857	 * Always use on-slab management when SLAB_NOLEAKTRACE
1858	 * to avoid recursive calls into kmemleak.
1859	 */
1860	if (flags & SLAB_NOLEAKTRACE)
1861		return false;
1862
1863	/*
1864	 * Size is large, assume best to place the slab management obj
1865	 * off-slab (should allow better packing of objs).
1866	 */
1867	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1868	if (!cachep->num)
1869		return false;
1870
1871	/*
1872	 * If the slab has been placed off-slab, and we have enough space then
1873	 * move it on-slab. This is at the expense of any extra colouring.
1874	 */
1875	if (left >= cachep->num * sizeof(freelist_idx_t))
1876		return false;
1877
1878	cachep->colour = left / cachep->colour_off;
1879
1880	return true;
1881}
1882
1883static bool set_on_slab_cache(struct kmem_cache *cachep,
1884			size_t size, slab_flags_t flags)
1885{
1886	size_t left;
1887
1888	cachep->num = 0;
1889
1890	left = calculate_slab_order(cachep, size, flags);
1891	if (!cachep->num)
1892		return false;
1893
1894	cachep->colour = left / cachep->colour_off;
1895
1896	return true;
1897}
1898
1899/**
1900 * __kmem_cache_create - Create a cache.
1901 * @cachep: cache management descriptor
1902 * @flags: SLAB flags
1903 *
1904 * Returns a ptr to the cache on success, NULL on failure.
1905 * Cannot be called within a int, but can be interrupted.
1906 * The @ctor is run when new pages are allocated by the cache.
1907 *
1908 * The flags are
1909 *
1910 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1911 * to catch references to uninitialised memory.
1912 *
1913 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1914 * for buffer overruns.
1915 *
1916 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1917 * cacheline.  This can be beneficial if you're counting cycles as closely
1918 * as davem.
1919 *
1920 * Return: a pointer to the created cache or %NULL in case of error
1921 */
1922int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1923{
1924	size_t ralign = BYTES_PER_WORD;
1925	gfp_t gfp;
1926	int err;
1927	unsigned int size = cachep->size;
1928
1929#if DEBUG
1930#if FORCED_DEBUG
1931	/*
1932	 * Enable redzoning and last user accounting, except for caches with
1933	 * large objects, if the increased size would increase the object size
1934	 * above the next power of two: caches with object sizes just above a
1935	 * power of two have a significant amount of internal fragmentation.
1936	 */
1937	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
1938						2 * sizeof(unsigned long long)))
1939		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1940	if (!(flags & SLAB_TYPESAFE_BY_RCU))
1941		flags |= SLAB_POISON;
1942#endif
1943#endif
1944
1945	/*
1946	 * Check that size is in terms of words.  This is needed to avoid
1947	 * unaligned accesses for some archs when redzoning is used, and makes
1948	 * sure any on-slab bufctl's are also correctly aligned.
1949	 */
1950	size = ALIGN(size, BYTES_PER_WORD);
1951
1952	if (flags & SLAB_RED_ZONE) {
1953		ralign = REDZONE_ALIGN;
1954		/* If redzoning, ensure that the second redzone is suitably
1955		 * aligned, by adjusting the object size accordingly. */
1956		size = ALIGN(size, REDZONE_ALIGN);
1957	}
1958
1959	/* 3) caller mandated alignment */
1960	if (ralign < cachep->align) {
1961		ralign = cachep->align;
1962	}
1963	/* disable debug if necessary */
1964	if (ralign > __alignof__(unsigned long long))
1965		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1966	/*
1967	 * 4) Store it.
1968	 */
1969	cachep->align = ralign;
1970	cachep->colour_off = cache_line_size();
1971	/* Offset must be a multiple of the alignment. */
1972	if (cachep->colour_off < cachep->align)
1973		cachep->colour_off = cachep->align;
1974
1975	if (slab_is_available())
1976		gfp = GFP_KERNEL;
1977	else
1978		gfp = GFP_NOWAIT;
1979
1980#if DEBUG
1981
1982	/*
1983	 * Both debugging options require word-alignment which is calculated
1984	 * into align above.
1985	 */
1986	if (flags & SLAB_RED_ZONE) {
1987		/* add space for red zone words */
1988		cachep->obj_offset += sizeof(unsigned long long);
1989		size += 2 * sizeof(unsigned long long);
1990	}
1991	if (flags & SLAB_STORE_USER) {
1992		/* user store requires one word storage behind the end of
1993		 * the real object. But if the second red zone needs to be
1994		 * aligned to 64 bits, we must allow that much space.
1995		 */
1996		if (flags & SLAB_RED_ZONE)
1997			size += REDZONE_ALIGN;
1998		else
1999			size += BYTES_PER_WORD;
2000	}
2001#endif
2002
2003	kasan_cache_create(cachep, &size, &flags);
2004
2005	size = ALIGN(size, cachep->align);
2006	/*
2007	 * We should restrict the number of objects in a slab to implement
2008	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2009	 */
2010	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2011		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2012
2013#if DEBUG
2014	/*
2015	 * To activate debug pagealloc, off-slab management is necessary
2016	 * requirement. In early phase of initialization, small sized slab
2017	 * doesn't get initialized so it would not be possible. So, we need
2018	 * to check size >= 256. It guarantees that all necessary small
2019	 * sized slab is initialized in current slab initialization sequence.
2020	 */
2021	if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2022		size >= 256 && cachep->object_size > cache_line_size()) {
2023		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2024			size_t tmp_size = ALIGN(size, PAGE_SIZE);
2025
2026			if (set_off_slab_cache(cachep, tmp_size, flags)) {
2027				flags |= CFLGS_OFF_SLAB;
2028				cachep->obj_offset += tmp_size - size;
2029				size = tmp_size;
2030				goto done;
2031			}
2032		}
2033	}
2034#endif
2035
2036	if (set_objfreelist_slab_cache(cachep, size, flags)) {
2037		flags |= CFLGS_OBJFREELIST_SLAB;
2038		goto done;
2039	}
2040
2041	if (set_off_slab_cache(cachep, size, flags)) {
2042		flags |= CFLGS_OFF_SLAB;
2043		goto done;
2044	}
2045
2046	if (set_on_slab_cache(cachep, size, flags))
2047		goto done;
2048
2049	return -E2BIG;
2050
2051done:
2052	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2053	cachep->flags = flags;
2054	cachep->allocflags = __GFP_COMP;
2055	if (flags & SLAB_CACHE_DMA)
2056		cachep->allocflags |= GFP_DMA;
2057	if (flags & SLAB_CACHE_DMA32)
2058		cachep->allocflags |= GFP_DMA32;
2059	if (flags & SLAB_RECLAIM_ACCOUNT)
2060		cachep->allocflags |= __GFP_RECLAIMABLE;
2061	cachep->size = size;
2062	cachep->reciprocal_buffer_size = reciprocal_value(size);
2063
2064#if DEBUG
2065	/*
2066	 * If we're going to use the generic kernel_map_pages()
2067	 * poisoning, then it's going to smash the contents of
2068	 * the redzone and userword anyhow, so switch them off.
2069	 */
2070	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2071		(cachep->flags & SLAB_POISON) &&
2072		is_debug_pagealloc_cache(cachep))
2073		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2074#endif
2075
2076	if (OFF_SLAB(cachep)) {
2077		cachep->freelist_cache =
2078			kmalloc_slab(cachep->freelist_size, 0u);
2079	}
2080
2081	err = setup_cpu_cache(cachep, gfp);
2082	if (err) {
2083		__kmem_cache_release(cachep);
2084		return err;
2085	}
2086
2087	return 0;
2088}
2089
2090#if DEBUG
2091static void check_irq_off(void)
2092{
2093	BUG_ON(!irqs_disabled());
2094}
2095
2096static void check_irq_on(void)
2097{
2098	BUG_ON(irqs_disabled());
2099}
2100
2101static void check_mutex_acquired(void)
2102{
2103	BUG_ON(!mutex_is_locked(&slab_mutex));
2104}
2105
2106static void check_spinlock_acquired(struct kmem_cache *cachep)
2107{
2108#ifdef CONFIG_SMP
2109	check_irq_off();
2110	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2111#endif
2112}
2113
2114static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2115{
2116#ifdef CONFIG_SMP
2117	check_irq_off();
2118	assert_spin_locked(&get_node(cachep, node)->list_lock);
2119#endif
2120}
2121
2122#else
2123#define check_irq_off()	do { } while(0)
2124#define check_irq_on()	do { } while(0)
2125#define check_mutex_acquired()	do { } while(0)
2126#define check_spinlock_acquired(x) do { } while(0)
2127#define check_spinlock_acquired_node(x, y) do { } while(0)
2128#endif
2129
2130static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2131				int node, bool free_all, struct list_head *list)
2132{
2133	int tofree;
2134
2135	if (!ac || !ac->avail)
2136		return;
2137
2138	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2139	if (tofree > ac->avail)
2140		tofree = (ac->avail + 1) / 2;
2141
2142	free_block(cachep, ac->entry, tofree, node, list);
2143	ac->avail -= tofree;
2144	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2145}
2146
2147static void do_drain(void *arg)
2148{
2149	struct kmem_cache *cachep = arg;
2150	struct array_cache *ac;
2151	int node = numa_mem_id();
2152	struct kmem_cache_node *n;
2153	LIST_HEAD(list);
2154
2155	check_irq_off();
2156	ac = cpu_cache_get(cachep);
2157	n = get_node(cachep, node);
2158	spin_lock(&n->list_lock);
2159	free_block(cachep, ac->entry, ac->avail, node, &list);
2160	spin_unlock(&n->list_lock);
2161	ac->avail = 0;
2162	slabs_destroy(cachep, &list);
2163}
2164
2165static void drain_cpu_caches(struct kmem_cache *cachep)
2166{
2167	struct kmem_cache_node *n;
2168	int node;
2169	LIST_HEAD(list);
2170
2171	on_each_cpu(do_drain, cachep, 1);
2172	check_irq_on();
2173	for_each_kmem_cache_node(cachep, node, n)
2174		if (n->alien)
2175			drain_alien_cache(cachep, n->alien);
2176
2177	for_each_kmem_cache_node(cachep, node, n) {
2178		spin_lock_irq(&n->list_lock);
2179		drain_array_locked(cachep, n->shared, node, true, &list);
2180		spin_unlock_irq(&n->list_lock);
2181
2182		slabs_destroy(cachep, &list);
2183	}
2184}
2185
2186/*
2187 * Remove slabs from the list of free slabs.
2188 * Specify the number of slabs to drain in tofree.
2189 *
2190 * Returns the actual number of slabs released.
2191 */
2192static int drain_freelist(struct kmem_cache *cache,
2193			struct kmem_cache_node *n, int tofree)
2194{
2195	struct list_head *p;
2196	int nr_freed;
2197	struct page *page;
2198
2199	nr_freed = 0;
2200	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2201
2202		spin_lock_irq(&n->list_lock);
2203		p = n->slabs_free.prev;
2204		if (p == &n->slabs_free) {
2205			spin_unlock_irq(&n->list_lock);
2206			goto out;
2207		}
2208
2209		page = list_entry(p, struct page, slab_list);
2210		list_del(&page->slab_list);
2211		n->free_slabs--;
2212		n->total_slabs--;
2213		/*
2214		 * Safe to drop the lock. The slab is no longer linked
2215		 * to the cache.
2216		 */
2217		n->free_objects -= cache->num;
2218		spin_unlock_irq(&n->list_lock);
2219		slab_destroy(cache, page);
2220		nr_freed++;
2221	}
2222out:
2223	return nr_freed;
2224}
2225
2226bool __kmem_cache_empty(struct kmem_cache *s)
2227{
2228	int node;
2229	struct kmem_cache_node *n;
2230
2231	for_each_kmem_cache_node(s, node, n)
2232		if (!list_empty(&n->slabs_full) ||
2233		    !list_empty(&n->slabs_partial))
2234			return false;
2235	return true;
2236}
2237
2238int __kmem_cache_shrink(struct kmem_cache *cachep)
2239{
2240	int ret = 0;
2241	int node;
2242	struct kmem_cache_node *n;
2243
2244	drain_cpu_caches(cachep);
2245
2246	check_irq_on();
2247	for_each_kmem_cache_node(cachep, node, n) {
2248		drain_freelist(cachep, n, INT_MAX);
2249
2250		ret += !list_empty(&n->slabs_full) ||
2251			!list_empty(&n->slabs_partial);
2252	}
2253	return (ret ? 1 : 0);
2254}
2255
2256int __kmem_cache_shutdown(struct kmem_cache *cachep)
2257{
2258	return __kmem_cache_shrink(cachep);
2259}
2260
2261void __kmem_cache_release(struct kmem_cache *cachep)
2262{
2263	int i;
2264	struct kmem_cache_node *n;
2265
2266	cache_random_seq_destroy(cachep);
2267
2268	free_percpu(cachep->cpu_cache);
2269
2270	/* NUMA: free the node structures */
2271	for_each_kmem_cache_node(cachep, i, n) {
2272		kfree(n->shared);
2273		free_alien_cache(n->alien);
2274		kfree(n);
2275		cachep->node[i] = NULL;
2276	}
2277}
2278
2279/*
2280 * Get the memory for a slab management obj.
2281 *
2282 * For a slab cache when the slab descriptor is off-slab, the
2283 * slab descriptor can't come from the same cache which is being created,
2284 * Because if it is the case, that means we defer the creation of
2285 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2286 * And we eventually call down to __kmem_cache_create(), which
2287 * in turn looks up in the kmalloc_{dma,}_caches for the desired-size one.
2288 * This is a "chicken-and-egg" problem.
2289 *
2290 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2291 * which are all initialized during kmem_cache_init().
2292 */
2293static void *alloc_slabmgmt(struct kmem_cache *cachep,
2294				   struct page *page, int colour_off,
2295				   gfp_t local_flags, int nodeid)
2296{
2297	void *freelist;
2298	void *addr = page_address(page);
2299
2300	page->s_mem = addr + colour_off;
2301	page->active = 0;
2302
2303	if (OBJFREELIST_SLAB(cachep))
2304		freelist = NULL;
2305	else if (OFF_SLAB(cachep)) {
2306		/* Slab management obj is off-slab. */
2307		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2308					      local_flags, nodeid);
 
 
2309	} else {
2310		/* We will use last bytes at the slab for freelist */
2311		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2312				cachep->freelist_size;
2313	}
2314
2315	return freelist;
2316}
2317
2318static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2319{
2320	return ((freelist_idx_t *)page->freelist)[idx];
2321}
2322
2323static inline void set_free_obj(struct page *page,
2324					unsigned int idx, freelist_idx_t val)
2325{
2326	((freelist_idx_t *)(page->freelist))[idx] = val;
2327}
2328
2329static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2330{
2331#if DEBUG
2332	int i;
2333
2334	for (i = 0; i < cachep->num; i++) {
2335		void *objp = index_to_obj(cachep, page, i);
2336
2337		if (cachep->flags & SLAB_STORE_USER)
2338			*dbg_userword(cachep, objp) = NULL;
2339
2340		if (cachep->flags & SLAB_RED_ZONE) {
2341			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2342			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2343		}
2344		/*
2345		 * Constructors are not allowed to allocate memory from the same
2346		 * cache which they are a constructor for.  Otherwise, deadlock.
2347		 * They must also be threaded.
2348		 */
2349		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2350			kasan_unpoison_object_data(cachep,
2351						   objp + obj_offset(cachep));
2352			cachep->ctor(objp + obj_offset(cachep));
2353			kasan_poison_object_data(
2354				cachep, objp + obj_offset(cachep));
2355		}
2356
2357		if (cachep->flags & SLAB_RED_ZONE) {
2358			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2359				slab_error(cachep, "constructor overwrote the end of an object");
2360			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2361				slab_error(cachep, "constructor overwrote the start of an object");
2362		}
2363		/* need to poison the objs? */
2364		if (cachep->flags & SLAB_POISON) {
2365			poison_obj(cachep, objp, POISON_FREE);
2366			slab_kernel_map(cachep, objp, 0);
2367		}
2368	}
2369#endif
2370}
2371
2372#ifdef CONFIG_SLAB_FREELIST_RANDOM
2373/* Hold information during a freelist initialization */
2374union freelist_init_state {
2375	struct {
2376		unsigned int pos;
2377		unsigned int *list;
2378		unsigned int count;
2379	};
2380	struct rnd_state rnd_state;
2381};
2382
2383/*
2384 * Initialize the state based on the randomization method available.
2385 * return true if the pre-computed list is available, false otherwise.
2386 */
2387static bool freelist_state_initialize(union freelist_init_state *state,
2388				struct kmem_cache *cachep,
2389				unsigned int count)
2390{
2391	bool ret;
2392	unsigned int rand;
2393
2394	/* Use best entropy available to define a random shift */
2395	rand = get_random_int();
2396
2397	/* Use a random state if the pre-computed list is not available */
2398	if (!cachep->random_seq) {
2399		prandom_seed_state(&state->rnd_state, rand);
2400		ret = false;
2401	} else {
2402		state->list = cachep->random_seq;
2403		state->count = count;
2404		state->pos = rand % count;
2405		ret = true;
2406	}
2407	return ret;
2408}
2409
2410/* Get the next entry on the list and randomize it using a random shift */
2411static freelist_idx_t next_random_slot(union freelist_init_state *state)
2412{
2413	if (state->pos >= state->count)
2414		state->pos = 0;
2415	return state->list[state->pos++];
2416}
2417
2418/* Swap two freelist entries */
2419static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2420{
2421	swap(((freelist_idx_t *)page->freelist)[a],
2422		((freelist_idx_t *)page->freelist)[b]);
2423}
2424
2425/*
2426 * Shuffle the freelist initialization state based on pre-computed lists.
2427 * return true if the list was successfully shuffled, false otherwise.
2428 */
2429static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2430{
2431	unsigned int objfreelist = 0, i, rand, count = cachep->num;
2432	union freelist_init_state state;
2433	bool precomputed;
2434
2435	if (count < 2)
2436		return false;
2437
2438	precomputed = freelist_state_initialize(&state, cachep, count);
2439
2440	/* Take a random entry as the objfreelist */
2441	if (OBJFREELIST_SLAB(cachep)) {
2442		if (!precomputed)
2443			objfreelist = count - 1;
2444		else
2445			objfreelist = next_random_slot(&state);
2446		page->freelist = index_to_obj(cachep, page, objfreelist) +
2447						obj_offset(cachep);
2448		count--;
2449	}
2450
2451	/*
2452	 * On early boot, generate the list dynamically.
2453	 * Later use a pre-computed list for speed.
2454	 */
2455	if (!precomputed) {
2456		for (i = 0; i < count; i++)
2457			set_free_obj(page, i, i);
2458
2459		/* Fisher-Yates shuffle */
2460		for (i = count - 1; i > 0; i--) {
2461			rand = prandom_u32_state(&state.rnd_state);
2462			rand %= (i + 1);
2463			swap_free_obj(page, i, rand);
2464		}
2465	} else {
2466		for (i = 0; i < count; i++)
2467			set_free_obj(page, i, next_random_slot(&state));
2468	}
2469
2470	if (OBJFREELIST_SLAB(cachep))
2471		set_free_obj(page, cachep->num - 1, objfreelist);
2472
2473	return true;
2474}
2475#else
2476static inline bool shuffle_freelist(struct kmem_cache *cachep,
2477				struct page *page)
2478{
2479	return false;
2480}
2481#endif /* CONFIG_SLAB_FREELIST_RANDOM */
2482
2483static void cache_init_objs(struct kmem_cache *cachep,
2484			    struct page *page)
2485{
2486	int i;
2487	void *objp;
2488	bool shuffled;
2489
2490	cache_init_objs_debug(cachep, page);
2491
2492	/* Try to randomize the freelist if enabled */
2493	shuffled = shuffle_freelist(cachep, page);
2494
2495	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2496		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2497						obj_offset(cachep);
2498	}
2499
2500	for (i = 0; i < cachep->num; i++) {
2501		objp = index_to_obj(cachep, page, i);
2502		objp = kasan_init_slab_obj(cachep, objp);
2503
2504		/* constructor could break poison info */
2505		if (DEBUG == 0 && cachep->ctor) {
2506			kasan_unpoison_object_data(cachep, objp);
2507			cachep->ctor(objp);
2508			kasan_poison_object_data(cachep, objp);
2509		}
2510
2511		if (!shuffled)
2512			set_free_obj(page, i, i);
2513	}
2514}
2515
2516static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2517{
2518	void *objp;
2519
2520	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2521	page->active++;
2522
2523	return objp;
2524}
2525
2526static void slab_put_obj(struct kmem_cache *cachep,
2527			struct page *page, void *objp)
2528{
2529	unsigned int objnr = obj_to_index(cachep, page, objp);
2530#if DEBUG
2531	unsigned int i;
2532
2533	/* Verify double free bug */
2534	for (i = page->active; i < cachep->num; i++) {
2535		if (get_free_obj(page, i) == objnr) {
2536			pr_err("slab: double free detected in cache '%s', objp %px\n",
2537			       cachep->name, objp);
2538			BUG();
2539		}
2540	}
2541#endif
2542	page->active--;
2543	if (!page->freelist)
2544		page->freelist = objp + obj_offset(cachep);
2545
2546	set_free_obj(page, page->active, objnr);
2547}
2548
2549/*
2550 * Map pages beginning at addr to the given cache and slab. This is required
2551 * for the slab allocator to be able to lookup the cache and slab of a
2552 * virtual address for kfree, ksize, and slab debugging.
2553 */
2554static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2555			   void *freelist)
2556{
2557	page->slab_cache = cache;
2558	page->freelist = freelist;
2559}
2560
2561/*
2562 * Grow (by 1) the number of slabs within a cache.  This is called by
2563 * kmem_cache_alloc() when there are no active objs left in a cache.
2564 */
2565static struct page *cache_grow_begin(struct kmem_cache *cachep,
2566				gfp_t flags, int nodeid)
2567{
2568	void *freelist;
2569	size_t offset;
2570	gfp_t local_flags;
2571	int page_node;
2572	struct kmem_cache_node *n;
2573	struct page *page;
2574
2575	/*
2576	 * Be lazy and only check for valid flags here,  keeping it out of the
2577	 * critical path in kmem_cache_alloc().
2578	 */
2579	if (unlikely(flags & GFP_SLAB_BUG_MASK))
2580		flags = kmalloc_fix_flags(flags);
2581
2582	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2583	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2584
2585	check_irq_off();
2586	if (gfpflags_allow_blocking(local_flags))
2587		local_irq_enable();
2588
2589	/*
2590	 * Get mem for the objs.  Attempt to allocate a physical page from
2591	 * 'nodeid'.
2592	 */
2593	page = kmem_getpages(cachep, local_flags, nodeid);
2594	if (!page)
2595		goto failed;
2596
2597	page_node = page_to_nid(page);
2598	n = get_node(cachep, page_node);
2599
2600	/* Get colour for the slab, and cal the next value. */
2601	n->colour_next++;
2602	if (n->colour_next >= cachep->colour)
2603		n->colour_next = 0;
2604
2605	offset = n->colour_next;
2606	if (offset >= cachep->colour)
2607		offset = 0;
2608
2609	offset *= cachep->colour_off;
2610
2611	/*
2612	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
2613	 * page_address() in the latter returns a non-tagged pointer,
2614	 * as it should be for slab pages.
2615	 */
2616	kasan_poison_slab(page);
2617
2618	/* Get slab management. */
2619	freelist = alloc_slabmgmt(cachep, page, offset,
2620			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2621	if (OFF_SLAB(cachep) && !freelist)
2622		goto opps1;
2623
2624	slab_map_pages(cachep, page, freelist);
2625
2626	cache_init_objs(cachep, page);
2627
2628	if (gfpflags_allow_blocking(local_flags))
2629		local_irq_disable();
2630
2631	return page;
2632
2633opps1:
2634	kmem_freepages(cachep, page);
2635failed:
2636	if (gfpflags_allow_blocking(local_flags))
2637		local_irq_disable();
2638	return NULL;
2639}
2640
2641static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2642{
2643	struct kmem_cache_node *n;
2644	void *list = NULL;
2645
2646	check_irq_off();
2647
2648	if (!page)
2649		return;
2650
2651	INIT_LIST_HEAD(&page->slab_list);
2652	n = get_node(cachep, page_to_nid(page));
2653
2654	spin_lock(&n->list_lock);
2655	n->total_slabs++;
2656	if (!page->active) {
2657		list_add_tail(&page->slab_list, &n->slabs_free);
2658		n->free_slabs++;
2659	} else
2660		fixup_slab_list(cachep, n, page, &list);
2661
2662	STATS_INC_GROWN(cachep);
2663	n->free_objects += cachep->num - page->active;
2664	spin_unlock(&n->list_lock);
2665
2666	fixup_objfreelist_debug(cachep, &list);
2667}
2668
2669#if DEBUG
2670
2671/*
2672 * Perform extra freeing checks:
2673 * - detect bad pointers.
2674 * - POISON/RED_ZONE checking
2675 */
2676static void kfree_debugcheck(const void *objp)
2677{
2678	if (!virt_addr_valid(objp)) {
2679		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2680		       (unsigned long)objp);
2681		BUG();
2682	}
2683}
2684
2685static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2686{
2687	unsigned long long redzone1, redzone2;
2688
2689	redzone1 = *dbg_redzone1(cache, obj);
2690	redzone2 = *dbg_redzone2(cache, obj);
2691
2692	/*
2693	 * Redzone is ok.
2694	 */
2695	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2696		return;
2697
2698	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2699		slab_error(cache, "double free detected");
2700	else
2701		slab_error(cache, "memory outside object was overwritten");
2702
2703	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2704	       obj, redzone1, redzone2);
2705}
2706
2707static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2708				   unsigned long caller)
2709{
2710	unsigned int objnr;
2711	struct page *page;
2712
2713	BUG_ON(virt_to_cache(objp) != cachep);
2714
2715	objp -= obj_offset(cachep);
2716	kfree_debugcheck(objp);
2717	page = virt_to_head_page(objp);
2718
2719	if (cachep->flags & SLAB_RED_ZONE) {
2720		verify_redzone_free(cachep, objp);
2721		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2722		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2723	}
2724	if (cachep->flags & SLAB_STORE_USER)
2725		*dbg_userword(cachep, objp) = (void *)caller;
2726
2727	objnr = obj_to_index(cachep, page, objp);
2728
2729	BUG_ON(objnr >= cachep->num);
2730	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2731
2732	if (cachep->flags & SLAB_POISON) {
2733		poison_obj(cachep, objp, POISON_FREE);
2734		slab_kernel_map(cachep, objp, 0);
2735	}
2736	return objp;
2737}
2738
2739#else
2740#define kfree_debugcheck(x) do { } while(0)
2741#define cache_free_debugcheck(x, objp, z) (objp)
2742#endif
2743
2744static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2745						void **list)
2746{
2747#if DEBUG
2748	void *next = *list;
2749	void *objp;
2750
2751	while (next) {
2752		objp = next - obj_offset(cachep);
2753		next = *(void **)next;
2754		poison_obj(cachep, objp, POISON_FREE);
2755	}
2756#endif
2757}
2758
2759static inline void fixup_slab_list(struct kmem_cache *cachep,
2760				struct kmem_cache_node *n, struct page *page,
2761				void **list)
2762{
2763	/* move slabp to correct slabp list: */
2764	list_del(&page->slab_list);
2765	if (page->active == cachep->num) {
2766		list_add(&page->slab_list, &n->slabs_full);
2767		if (OBJFREELIST_SLAB(cachep)) {
2768#if DEBUG
2769			/* Poisoning will be done without holding the lock */
2770			if (cachep->flags & SLAB_POISON) {
2771				void **objp = page->freelist;
2772
2773				*objp = *list;
2774				*list = objp;
2775			}
2776#endif
2777			page->freelist = NULL;
2778		}
2779	} else
2780		list_add(&page->slab_list, &n->slabs_partial);
2781}
2782
2783/* Try to find non-pfmemalloc slab if needed */
2784static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2785					struct page *page, bool pfmemalloc)
2786{
2787	if (!page)
2788		return NULL;
2789
2790	if (pfmemalloc)
2791		return page;
2792
2793	if (!PageSlabPfmemalloc(page))
2794		return page;
2795
2796	/* No need to keep pfmemalloc slab if we have enough free objects */
2797	if (n->free_objects > n->free_limit) {
2798		ClearPageSlabPfmemalloc(page);
2799		return page;
2800	}
2801
2802	/* Move pfmemalloc slab to the end of list to speed up next search */
2803	list_del(&page->slab_list);
2804	if (!page->active) {
2805		list_add_tail(&page->slab_list, &n->slabs_free);
2806		n->free_slabs++;
2807	} else
2808		list_add_tail(&page->slab_list, &n->slabs_partial);
2809
2810	list_for_each_entry(page, &n->slabs_partial, slab_list) {
2811		if (!PageSlabPfmemalloc(page))
2812			return page;
2813	}
2814
2815	n->free_touched = 1;
2816	list_for_each_entry(page, &n->slabs_free, slab_list) {
2817		if (!PageSlabPfmemalloc(page)) {
2818			n->free_slabs--;
2819			return page;
2820		}
2821	}
2822
2823	return NULL;
2824}
2825
2826static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2827{
2828	struct page *page;
2829
2830	assert_spin_locked(&n->list_lock);
2831	page = list_first_entry_or_null(&n->slabs_partial, struct page,
2832					slab_list);
2833	if (!page) {
2834		n->free_touched = 1;
2835		page = list_first_entry_or_null(&n->slabs_free, struct page,
2836						slab_list);
2837		if (page)
2838			n->free_slabs--;
2839	}
2840
2841	if (sk_memalloc_socks())
2842		page = get_valid_first_slab(n, page, pfmemalloc);
2843
2844	return page;
2845}
2846
2847static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2848				struct kmem_cache_node *n, gfp_t flags)
2849{
2850	struct page *page;
2851	void *obj;
2852	void *list = NULL;
2853
2854	if (!gfp_pfmemalloc_allowed(flags))
2855		return NULL;
2856
2857	spin_lock(&n->list_lock);
2858	page = get_first_slab(n, true);
2859	if (!page) {
2860		spin_unlock(&n->list_lock);
2861		return NULL;
2862	}
2863
2864	obj = slab_get_obj(cachep, page);
2865	n->free_objects--;
2866
2867	fixup_slab_list(cachep, n, page, &list);
2868
2869	spin_unlock(&n->list_lock);
2870	fixup_objfreelist_debug(cachep, &list);
2871
2872	return obj;
2873}
2874
2875/*
2876 * Slab list should be fixed up by fixup_slab_list() for existing slab
2877 * or cache_grow_end() for new slab
2878 */
2879static __always_inline int alloc_block(struct kmem_cache *cachep,
2880		struct array_cache *ac, struct page *page, int batchcount)
2881{
2882	/*
2883	 * There must be at least one object available for
2884	 * allocation.
2885	 */
2886	BUG_ON(page->active >= cachep->num);
2887
2888	while (page->active < cachep->num && batchcount--) {
2889		STATS_INC_ALLOCED(cachep);
2890		STATS_INC_ACTIVE(cachep);
2891		STATS_SET_HIGH(cachep);
2892
2893		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2894	}
2895
2896	return batchcount;
2897}
2898
2899static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2900{
2901	int batchcount;
2902	struct kmem_cache_node *n;
2903	struct array_cache *ac, *shared;
2904	int node;
2905	void *list = NULL;
2906	struct page *page;
2907
2908	check_irq_off();
2909	node = numa_mem_id();
2910
2911	ac = cpu_cache_get(cachep);
2912	batchcount = ac->batchcount;
2913	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2914		/*
2915		 * If there was little recent activity on this cache, then
2916		 * perform only a partial refill.  Otherwise we could generate
2917		 * refill bouncing.
2918		 */
2919		batchcount = BATCHREFILL_LIMIT;
2920	}
2921	n = get_node(cachep, node);
2922
2923	BUG_ON(ac->avail > 0 || !n);
2924	shared = READ_ONCE(n->shared);
2925	if (!n->free_objects && (!shared || !shared->avail))
2926		goto direct_grow;
2927
2928	spin_lock(&n->list_lock);
2929	shared = READ_ONCE(n->shared);
2930
2931	/* See if we can refill from the shared array */
2932	if (shared && transfer_objects(ac, shared, batchcount)) {
2933		shared->touched = 1;
2934		goto alloc_done;
2935	}
2936
2937	while (batchcount > 0) {
2938		/* Get slab alloc is to come from. */
2939		page = get_first_slab(n, false);
2940		if (!page)
2941			goto must_grow;
2942
2943		check_spinlock_acquired(cachep);
2944
2945		batchcount = alloc_block(cachep, ac, page, batchcount);
2946		fixup_slab_list(cachep, n, page, &list);
2947	}
2948
2949must_grow:
2950	n->free_objects -= ac->avail;
2951alloc_done:
2952	spin_unlock(&n->list_lock);
2953	fixup_objfreelist_debug(cachep, &list);
2954
2955direct_grow:
2956	if (unlikely(!ac->avail)) {
2957		/* Check if we can use obj in pfmemalloc slab */
2958		if (sk_memalloc_socks()) {
2959			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2960
2961			if (obj)
2962				return obj;
2963		}
2964
2965		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2966
2967		/*
2968		 * cache_grow_begin() can reenable interrupts,
2969		 * then ac could change.
2970		 */
2971		ac = cpu_cache_get(cachep);
2972		if (!ac->avail && page)
2973			alloc_block(cachep, ac, page, batchcount);
2974		cache_grow_end(cachep, page);
2975
2976		if (!ac->avail)
2977			return NULL;
2978	}
2979	ac->touched = 1;
2980
2981	return ac->entry[--ac->avail];
2982}
2983
2984static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2985						gfp_t flags)
2986{
2987	might_sleep_if(gfpflags_allow_blocking(flags));
2988}
2989
2990#if DEBUG
2991static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2992				gfp_t flags, void *objp, unsigned long caller)
2993{
2994	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2995	if (!objp || is_kfence_address(objp))
2996		return objp;
2997	if (cachep->flags & SLAB_POISON) {
2998		check_poison_obj(cachep, objp);
2999		slab_kernel_map(cachep, objp, 1);
3000		poison_obj(cachep, objp, POISON_INUSE);
3001	}
3002	if (cachep->flags & SLAB_STORE_USER)
3003		*dbg_userword(cachep, objp) = (void *)caller;
3004
3005	if (cachep->flags & SLAB_RED_ZONE) {
3006		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3007				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3008			slab_error(cachep, "double free, or memory outside object was overwritten");
3009			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3010			       objp, *dbg_redzone1(cachep, objp),
3011			       *dbg_redzone2(cachep, objp));
3012		}
3013		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3014		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3015	}
3016
3017	objp += obj_offset(cachep);
3018	if (cachep->ctor && cachep->flags & SLAB_POISON)
3019		cachep->ctor(objp);
3020	if (ARCH_SLAB_MINALIGN &&
3021	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3022		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3023		       objp, (int)ARCH_SLAB_MINALIGN);
3024	}
3025	return objp;
3026}
3027#else
3028#define cache_alloc_debugcheck_after(a, b, objp, d) (objp)
3029#endif
3030
3031static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3032{
3033	void *objp;
3034	struct array_cache *ac;
3035
3036	check_irq_off();
3037
3038	ac = cpu_cache_get(cachep);
3039	if (likely(ac->avail)) {
3040		ac->touched = 1;
3041		objp = ac->entry[--ac->avail];
3042
3043		STATS_INC_ALLOCHIT(cachep);
3044		goto out;
3045	}
3046
3047	STATS_INC_ALLOCMISS(cachep);
3048	objp = cache_alloc_refill(cachep, flags);
3049	/*
3050	 * the 'ac' may be updated by cache_alloc_refill(),
3051	 * and kmemleak_erase() requires its correct value.
3052	 */
3053	ac = cpu_cache_get(cachep);
3054
3055out:
3056	/*
3057	 * To avoid a false negative, if an object that is in one of the
3058	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3059	 * treat the array pointers as a reference to the object.
3060	 */
3061	if (objp)
3062		kmemleak_erase(&ac->entry[ac->avail]);
3063	return objp;
3064}
3065
3066#ifdef CONFIG_NUMA
3067/*
3068 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3069 *
3070 * If we are in_interrupt, then process context, including cpusets and
3071 * mempolicy, may not apply and should not be used for allocation policy.
3072 */
3073static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3074{
3075	int nid_alloc, nid_here;
3076
3077	if (in_interrupt() || (flags & __GFP_THISNODE))
3078		return NULL;
3079	nid_alloc = nid_here = numa_mem_id();
3080	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3081		nid_alloc = cpuset_slab_spread_node();
3082	else if (current->mempolicy)
3083		nid_alloc = mempolicy_slab_node();
3084	if (nid_alloc != nid_here)
3085		return ____cache_alloc_node(cachep, flags, nid_alloc);
3086	return NULL;
3087}
3088
3089/*
3090 * Fallback function if there was no memory available and no objects on a
3091 * certain node and fall back is permitted. First we scan all the
3092 * available node for available objects. If that fails then we
3093 * perform an allocation without specifying a node. This allows the page
3094 * allocator to do its reclaim / fallback magic. We then insert the
3095 * slab into the proper nodelist and then allocate from it.
3096 */
3097static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3098{
3099	struct zonelist *zonelist;
3100	struct zoneref *z;
3101	struct zone *zone;
3102	enum zone_type highest_zoneidx = gfp_zone(flags);
3103	void *obj = NULL;
3104	struct page *page;
3105	int nid;
3106	unsigned int cpuset_mems_cookie;
3107
3108	if (flags & __GFP_THISNODE)
3109		return NULL;
3110
3111retry_cpuset:
3112	cpuset_mems_cookie = read_mems_allowed_begin();
3113	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3114
3115retry:
3116	/*
3117	 * Look through allowed nodes for objects available
3118	 * from existing per node queues.
3119	 */
3120	for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3121		nid = zone_to_nid(zone);
3122
3123		if (cpuset_zone_allowed(zone, flags) &&
3124			get_node(cache, nid) &&
3125			get_node(cache, nid)->free_objects) {
3126				obj = ____cache_alloc_node(cache,
3127					gfp_exact_node(flags), nid);
3128				if (obj)
3129					break;
3130		}
3131	}
3132
3133	if (!obj) {
3134		/*
3135		 * This allocation will be performed within the constraints
3136		 * of the current cpuset / memory policy requirements.
3137		 * We may trigger various forms of reclaim on the allowed
3138		 * set and go into memory reserves if necessary.
3139		 */
3140		page = cache_grow_begin(cache, flags, numa_mem_id());
3141		cache_grow_end(cache, page);
3142		if (page) {
3143			nid = page_to_nid(page);
3144			obj = ____cache_alloc_node(cache,
3145				gfp_exact_node(flags), nid);
3146
3147			/*
3148			 * Another processor may allocate the objects in
3149			 * the slab since we are not holding any locks.
3150			 */
3151			if (!obj)
3152				goto retry;
3153		}
3154	}
3155
3156	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3157		goto retry_cpuset;
3158	return obj;
3159}
3160
3161/*
3162 * A interface to enable slab creation on nodeid
3163 */
3164static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3165				int nodeid)
3166{
3167	struct page *page;
3168	struct kmem_cache_node *n;
3169	void *obj = NULL;
3170	void *list = NULL;
3171
3172	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3173	n = get_node(cachep, nodeid);
3174	BUG_ON(!n);
3175
3176	check_irq_off();
3177	spin_lock(&n->list_lock);
3178	page = get_first_slab(n, false);
3179	if (!page)
3180		goto must_grow;
3181
3182	check_spinlock_acquired_node(cachep, nodeid);
3183
3184	STATS_INC_NODEALLOCS(cachep);
3185	STATS_INC_ACTIVE(cachep);
3186	STATS_SET_HIGH(cachep);
3187
3188	BUG_ON(page->active == cachep->num);
3189
3190	obj = slab_get_obj(cachep, page);
3191	n->free_objects--;
3192
3193	fixup_slab_list(cachep, n, page, &list);
3194
3195	spin_unlock(&n->list_lock);
3196	fixup_objfreelist_debug(cachep, &list);
3197	return obj;
3198
3199must_grow:
3200	spin_unlock(&n->list_lock);
3201	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3202	if (page) {
3203		/* This slab isn't counted yet so don't update free_objects */
3204		obj = slab_get_obj(cachep, page);
3205	}
3206	cache_grow_end(cachep, page);
3207
3208	return obj ? obj : fallback_alloc(cachep, flags);
3209}
3210
3211static __always_inline void *
3212slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
3213		   unsigned long caller)
3214{
3215	unsigned long save_flags;
3216	void *ptr;
3217	int slab_node = numa_mem_id();
3218	struct obj_cgroup *objcg = NULL;
3219	bool init = false;
3220
3221	flags &= gfp_allowed_mask;
3222	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3223	if (unlikely(!cachep))
3224		return NULL;
3225
3226	ptr = kfence_alloc(cachep, orig_size, flags);
3227	if (unlikely(ptr))
3228		goto out_hooks;
3229
3230	cache_alloc_debugcheck_before(cachep, flags);
3231	local_irq_save(save_flags);
3232
3233	if (nodeid == NUMA_NO_NODE)
3234		nodeid = slab_node;
3235
3236	if (unlikely(!get_node(cachep, nodeid))) {
3237		/* Node not bootstrapped yet */
3238		ptr = fallback_alloc(cachep, flags);
3239		goto out;
3240	}
3241
3242	if (nodeid == slab_node) {
3243		/*
3244		 * Use the locally cached objects if possible.
3245		 * However ____cache_alloc does not allow fallback
3246		 * to other nodes. It may fail while we still have
3247		 * objects on other nodes available.
3248		 */
3249		ptr = ____cache_alloc(cachep, flags);
3250		if (ptr)
3251			goto out;
3252	}
3253	/* ___cache_alloc_node can fall back to other nodes */
3254	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3255  out:
3256	local_irq_restore(save_flags);
3257	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3258	init = slab_want_init_on_alloc(flags, cachep);
3259
3260out_hooks:
3261	slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
 
 
3262	return ptr;
3263}
3264
3265static __always_inline void *
3266__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3267{
3268	void *objp;
3269
3270	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3271		objp = alternate_node_alloc(cache, flags);
3272		if (objp)
3273			goto out;
3274	}
3275	objp = ____cache_alloc(cache, flags);
3276
3277	/*
3278	 * We may just have run out of memory on the local node.
3279	 * ____cache_alloc_node() knows how to locate memory on other nodes
3280	 */
3281	if (!objp)
3282		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3283
3284  out:
3285	return objp;
3286}
3287#else
3288
3289static __always_inline void *
3290__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3291{
3292	return ____cache_alloc(cachep, flags);
3293}
3294
3295#endif /* CONFIG_NUMA */
3296
3297static __always_inline void *
3298slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
3299{
3300	unsigned long save_flags;
3301	void *objp;
3302	struct obj_cgroup *objcg = NULL;
3303	bool init = false;
3304
3305	flags &= gfp_allowed_mask;
3306	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3307	if (unlikely(!cachep))
3308		return NULL;
3309
3310	objp = kfence_alloc(cachep, orig_size, flags);
3311	if (unlikely(objp))
3312		goto out;
3313
3314	cache_alloc_debugcheck_before(cachep, flags);
3315	local_irq_save(save_flags);
3316	objp = __do_cache_alloc(cachep, flags);
3317	local_irq_restore(save_flags);
3318	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3319	prefetchw(objp);
3320	init = slab_want_init_on_alloc(flags, cachep);
3321
3322out:
3323	slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
 
 
3324	return objp;
3325}
3326
3327/*
3328 * Caller needs to acquire correct kmem_cache_node's list_lock
3329 * @list: List of detached free slabs should be freed by caller
3330 */
3331static void free_block(struct kmem_cache *cachep, void **objpp,
3332			int nr_objects, int node, struct list_head *list)
3333{
3334	int i;
3335	struct kmem_cache_node *n = get_node(cachep, node);
3336	struct page *page;
3337
3338	n->free_objects += nr_objects;
3339
3340	for (i = 0; i < nr_objects; i++) {
3341		void *objp;
3342		struct page *page;
3343
3344		objp = objpp[i];
3345
3346		page = virt_to_head_page(objp);
3347		list_del(&page->slab_list);
3348		check_spinlock_acquired_node(cachep, node);
3349		slab_put_obj(cachep, page, objp);
3350		STATS_DEC_ACTIVE(cachep);
3351
3352		/* fixup slab chains */
3353		if (page->active == 0) {
3354			list_add(&page->slab_list, &n->slabs_free);
3355			n->free_slabs++;
3356		} else {
3357			/* Unconditionally move a slab to the end of the
3358			 * partial list on free - maximum time for the
3359			 * other objects to be freed, too.
3360			 */
3361			list_add_tail(&page->slab_list, &n->slabs_partial);
3362		}
3363	}
3364
3365	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3366		n->free_objects -= cachep->num;
3367
3368		page = list_last_entry(&n->slabs_free, struct page, slab_list);
3369		list_move(&page->slab_list, list);
3370		n->free_slabs--;
3371		n->total_slabs--;
3372	}
3373}
3374
3375static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3376{
3377	int batchcount;
3378	struct kmem_cache_node *n;
3379	int node = numa_mem_id();
3380	LIST_HEAD(list);
3381
3382	batchcount = ac->batchcount;
3383
3384	check_irq_off();
3385	n = get_node(cachep, node);
3386	spin_lock(&n->list_lock);
3387	if (n->shared) {
3388		struct array_cache *shared_array = n->shared;
3389		int max = shared_array->limit - shared_array->avail;
3390		if (max) {
3391			if (batchcount > max)
3392				batchcount = max;
3393			memcpy(&(shared_array->entry[shared_array->avail]),
3394			       ac->entry, sizeof(void *) * batchcount);
3395			shared_array->avail += batchcount;
3396			goto free_done;
3397		}
3398	}
3399
3400	free_block(cachep, ac->entry, batchcount, node, &list);
3401free_done:
3402#if STATS
3403	{
3404		int i = 0;
3405		struct page *page;
3406
3407		list_for_each_entry(page, &n->slabs_free, slab_list) {
3408			BUG_ON(page->active);
3409
3410			i++;
3411		}
3412		STATS_SET_FREEABLE(cachep, i);
3413	}
3414#endif
3415	spin_unlock(&n->list_lock);
3416	ac->avail -= batchcount;
3417	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3418	slabs_destroy(cachep, &list);
3419}
3420
3421/*
3422 * Release an obj back to its cache. If the obj has a constructed state, it must
3423 * be in this state _before_ it is released.  Called with disabled ints.
3424 */
3425static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3426					 unsigned long caller)
3427{
3428	bool init;
3429
3430	if (is_kfence_address(objp)) {
3431		kmemleak_free_recursive(objp, cachep->flags);
3432		__kfence_free(objp);
3433		return;
3434	}
3435
3436	/*
3437	 * As memory initialization might be integrated into KASAN,
3438	 * kasan_slab_free and initialization memset must be
3439	 * kept together to avoid discrepancies in behavior.
3440	 */
3441	init = slab_want_init_on_free(cachep);
3442	if (init && !kasan_has_integrated_init())
3443		memset(objp, 0, cachep->object_size);
3444	/* KASAN might put objp into memory quarantine, delaying its reuse. */
3445	if (kasan_slab_free(cachep, objp, init))
3446		return;
3447
3448	/* Use KCSAN to help debug racy use-after-free. */
3449	if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
3450		__kcsan_check_access(objp, cachep->object_size,
3451				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
3452
3453	___cache_free(cachep, objp, caller);
3454}
3455
3456void ___cache_free(struct kmem_cache *cachep, void *objp,
3457		unsigned long caller)
3458{
3459	struct array_cache *ac = cpu_cache_get(cachep);
3460
3461	check_irq_off();
 
 
3462	kmemleak_free_recursive(objp, cachep->flags);
3463	objp = cache_free_debugcheck(cachep, objp, caller);
3464	memcg_slab_free_hook(cachep, &objp, 1);
3465
3466	/*
3467	 * Skip calling cache_free_alien() when the platform is not numa.
3468	 * This will avoid cache misses that happen while accessing slabp (which
3469	 * is per page memory  reference) to get nodeid. Instead use a global
3470	 * variable to skip the call, which is mostly likely to be present in
3471	 * the cache.
3472	 */
3473	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3474		return;
3475
3476	if (ac->avail < ac->limit) {
3477		STATS_INC_FREEHIT(cachep);
3478	} else {
3479		STATS_INC_FREEMISS(cachep);
3480		cache_flusharray(cachep, ac);
3481	}
3482
3483	if (sk_memalloc_socks()) {
3484		struct page *page = virt_to_head_page(objp);
3485
3486		if (unlikely(PageSlabPfmemalloc(page))) {
3487			cache_free_pfmemalloc(cachep, page, objp);
3488			return;
3489		}
3490	}
3491
3492	__free_one(ac, objp);
3493}
3494
3495/**
3496 * kmem_cache_alloc - Allocate an object
3497 * @cachep: The cache to allocate from.
3498 * @flags: See kmalloc().
3499 *
3500 * Allocate an object from this cache.  The flags are only relevant
3501 * if the cache has no available objects.
3502 *
3503 * Return: pointer to the new object or %NULL in case of error
3504 */
3505void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3506{
3507	void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_);
3508
3509	trace_kmem_cache_alloc(_RET_IP_, ret,
3510			       cachep->object_size, cachep->size, flags);
3511
3512	return ret;
3513}
3514EXPORT_SYMBOL(kmem_cache_alloc);
3515
3516static __always_inline void
3517cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3518				  size_t size, void **p, unsigned long caller)
3519{
3520	size_t i;
3521
3522	for (i = 0; i < size; i++)
3523		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3524}
3525
3526int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3527			  void **p)
3528{
3529	size_t i;
3530	struct obj_cgroup *objcg = NULL;
3531
3532	s = slab_pre_alloc_hook(s, &objcg, size, flags);
3533	if (!s)
3534		return 0;
3535
3536	cache_alloc_debugcheck_before(s, flags);
3537
3538	local_irq_disable();
3539	for (i = 0; i < size; i++) {
3540		void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
3541
3542		if (unlikely(!objp))
3543			goto error;
3544		p[i] = objp;
3545	}
3546	local_irq_enable();
3547
3548	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3549
3550	/*
3551	 * memcg and kmem_cache debug support and memory initialization.
3552	 * Done outside of the IRQ disabled section.
3553	 */
3554	slab_post_alloc_hook(s, objcg, flags, size, p,
3555				slab_want_init_on_alloc(flags, s));
3556	/* FIXME: Trace call missing. Christoph would like a bulk variant */
3557	return size;
3558error:
3559	local_irq_enable();
3560	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3561	slab_post_alloc_hook(s, objcg, flags, i, p, false);
3562	__kmem_cache_free_bulk(s, i, p);
3563	return 0;
3564}
3565EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3566
3567#ifdef CONFIG_TRACING
3568void *
3569kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3570{
3571	void *ret;
3572
3573	ret = slab_alloc(cachep, flags, size, _RET_IP_);
3574
3575	ret = kasan_kmalloc(cachep, ret, size, flags);
3576	trace_kmalloc(_RET_IP_, ret,
3577		      size, cachep->size, flags);
3578	return ret;
3579}
3580EXPORT_SYMBOL(kmem_cache_alloc_trace);
3581#endif
3582
3583#ifdef CONFIG_NUMA
3584/**
3585 * kmem_cache_alloc_node - Allocate an object on the specified node
3586 * @cachep: The cache to allocate from.
3587 * @flags: See kmalloc().
3588 * @nodeid: node number of the target node.
3589 *
3590 * Identical to kmem_cache_alloc but it will allocate memory on the given
3591 * node, which can improve the performance for cpu bound structures.
3592 *
3593 * Fallback to other node is possible if __GFP_THISNODE is not set.
3594 *
3595 * Return: pointer to the new object or %NULL in case of error
3596 */
3597void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3598{
3599	void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
3600
3601	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3602				    cachep->object_size, cachep->size,
3603				    flags, nodeid);
3604
3605	return ret;
3606}
3607EXPORT_SYMBOL(kmem_cache_alloc_node);
3608
3609#ifdef CONFIG_TRACING
3610void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3611				  gfp_t flags,
3612				  int nodeid,
3613				  size_t size)
3614{
3615	void *ret;
3616
3617	ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
3618
3619	ret = kasan_kmalloc(cachep, ret, size, flags);
3620	trace_kmalloc_node(_RET_IP_, ret,
3621			   size, cachep->size,
3622			   flags, nodeid);
3623	return ret;
3624}
3625EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3626#endif
3627
3628static __always_inline void *
3629__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3630{
3631	struct kmem_cache *cachep;
3632	void *ret;
3633
3634	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3635		return NULL;
3636	cachep = kmalloc_slab(size, flags);
3637	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3638		return cachep;
3639	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3640	ret = kasan_kmalloc(cachep, ret, size, flags);
3641
3642	return ret;
3643}
3644
3645void *__kmalloc_node(size_t size, gfp_t flags, int node)
3646{
3647	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3648}
3649EXPORT_SYMBOL(__kmalloc_node);
3650
3651void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3652		int node, unsigned long caller)
3653{
3654	return __do_kmalloc_node(size, flags, node, caller);
3655}
3656EXPORT_SYMBOL(__kmalloc_node_track_caller);
3657#endif /* CONFIG_NUMA */
3658
3659#ifdef CONFIG_PRINTK
3660void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
3661{
3662	struct kmem_cache *cachep;
3663	unsigned int objnr;
3664	void *objp;
3665
3666	kpp->kp_ptr = object;
3667	kpp->kp_page = page;
3668	cachep = page->slab_cache;
3669	kpp->kp_slab_cache = cachep;
3670	objp = object - obj_offset(cachep);
3671	kpp->kp_data_offset = obj_offset(cachep);
3672	page = virt_to_head_page(objp);
3673	objnr = obj_to_index(cachep, page, objp);
3674	objp = index_to_obj(cachep, page, objnr);
3675	kpp->kp_objp = objp;
3676	if (DEBUG && cachep->flags & SLAB_STORE_USER)
3677		kpp->kp_ret = *dbg_userword(cachep, objp);
3678}
3679#endif
3680
3681/**
3682 * __do_kmalloc - allocate memory
3683 * @size: how many bytes of memory are required.
3684 * @flags: the type of memory to allocate (see kmalloc).
3685 * @caller: function caller for debug tracking of the caller
3686 *
3687 * Return: pointer to the allocated memory or %NULL in case of error
3688 */
3689static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3690					  unsigned long caller)
3691{
3692	struct kmem_cache *cachep;
3693	void *ret;
3694
3695	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3696		return NULL;
3697	cachep = kmalloc_slab(size, flags);
3698	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3699		return cachep;
3700	ret = slab_alloc(cachep, flags, size, caller);
3701
3702	ret = kasan_kmalloc(cachep, ret, size, flags);
3703	trace_kmalloc(caller, ret,
3704		      size, cachep->size, flags);
3705
3706	return ret;
3707}
3708
3709void *__kmalloc(size_t size, gfp_t flags)
3710{
3711	return __do_kmalloc(size, flags, _RET_IP_);
3712}
3713EXPORT_SYMBOL(__kmalloc);
3714
3715void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3716{
3717	return __do_kmalloc(size, flags, caller);
3718}
3719EXPORT_SYMBOL(__kmalloc_track_caller);
3720
3721/**
3722 * kmem_cache_free - Deallocate an object
3723 * @cachep: The cache the allocation was from.
3724 * @objp: The previously allocated object.
3725 *
3726 * Free an object which was previously allocated from this
3727 * cache.
3728 */
3729void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3730{
3731	unsigned long flags;
3732	cachep = cache_from_obj(cachep, objp);
3733	if (!cachep)
3734		return;
3735
3736	local_irq_save(flags);
3737	debug_check_no_locks_freed(objp, cachep->object_size);
3738	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3739		debug_check_no_obj_freed(objp, cachep->object_size);
3740	__cache_free(cachep, objp, _RET_IP_);
3741	local_irq_restore(flags);
3742
3743	trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
3744}
3745EXPORT_SYMBOL(kmem_cache_free);
3746
3747void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3748{
3749	struct kmem_cache *s;
3750	size_t i;
3751
3752	local_irq_disable();
3753	for (i = 0; i < size; i++) {
3754		void *objp = p[i];
3755
3756		if (!orig_s) /* called via kfree_bulk */
3757			s = virt_to_cache(objp);
3758		else
3759			s = cache_from_obj(orig_s, objp);
3760		if (!s)
3761			continue;
3762
3763		debug_check_no_locks_freed(objp, s->object_size);
3764		if (!(s->flags & SLAB_DEBUG_OBJECTS))
3765			debug_check_no_obj_freed(objp, s->object_size);
3766
3767		__cache_free(s, objp, _RET_IP_);
3768	}
3769	local_irq_enable();
3770
3771	/* FIXME: add tracing */
3772}
3773EXPORT_SYMBOL(kmem_cache_free_bulk);
3774
3775/**
3776 * kfree - free previously allocated memory
3777 * @objp: pointer returned by kmalloc.
3778 *
3779 * If @objp is NULL, no operation is performed.
3780 *
3781 * Don't free memory not originally allocated by kmalloc()
3782 * or you will run into trouble.
3783 */
3784void kfree(const void *objp)
3785{
3786	struct kmem_cache *c;
3787	unsigned long flags;
3788
3789	trace_kfree(_RET_IP_, objp);
3790
3791	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3792		return;
3793	local_irq_save(flags);
3794	kfree_debugcheck(objp);
3795	c = virt_to_cache(objp);
3796	if (!c) {
3797		local_irq_restore(flags);
3798		return;
3799	}
3800	debug_check_no_locks_freed(objp, c->object_size);
3801
3802	debug_check_no_obj_freed(objp, c->object_size);
3803	__cache_free(c, (void *)objp, _RET_IP_);
3804	local_irq_restore(flags);
3805}
3806EXPORT_SYMBOL(kfree);
3807
3808/*
3809 * This initializes kmem_cache_node or resizes various caches for all nodes.
3810 */
3811static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3812{
3813	int ret;
3814	int node;
3815	struct kmem_cache_node *n;
3816
3817	for_each_online_node(node) {
3818		ret = setup_kmem_cache_node(cachep, node, gfp, true);
3819		if (ret)
3820			goto fail;
3821
3822	}
3823
3824	return 0;
3825
3826fail:
3827	if (!cachep->list.next) {
3828		/* Cache is not active yet. Roll back what we did */
3829		node--;
3830		while (node >= 0) {
3831			n = get_node(cachep, node);
3832			if (n) {
3833				kfree(n->shared);
3834				free_alien_cache(n->alien);
3835				kfree(n);
3836				cachep->node[node] = NULL;
3837			}
3838			node--;
3839		}
3840	}
3841	return -ENOMEM;
3842}
3843
3844/* Always called with the slab_mutex held */
3845static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3846			    int batchcount, int shared, gfp_t gfp)
3847{
3848	struct array_cache __percpu *cpu_cache, *prev;
3849	int cpu;
3850
3851	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3852	if (!cpu_cache)
3853		return -ENOMEM;
3854
3855	prev = cachep->cpu_cache;
3856	cachep->cpu_cache = cpu_cache;
3857	/*
3858	 * Without a previous cpu_cache there's no need to synchronize remote
3859	 * cpus, so skip the IPIs.
3860	 */
3861	if (prev)
3862		kick_all_cpus_sync();
3863
3864	check_irq_on();
3865	cachep->batchcount = batchcount;
3866	cachep->limit = limit;
3867	cachep->shared = shared;
3868
3869	if (!prev)
3870		goto setup_node;
3871
3872	for_each_online_cpu(cpu) {
3873		LIST_HEAD(list);
3874		int node;
3875		struct kmem_cache_node *n;
3876		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3877
3878		node = cpu_to_mem(cpu);
3879		n = get_node(cachep, node);
3880		spin_lock_irq(&n->list_lock);
3881		free_block(cachep, ac->entry, ac->avail, node, &list);
3882		spin_unlock_irq(&n->list_lock);
3883		slabs_destroy(cachep, &list);
3884	}
3885	free_percpu(prev);
3886
3887setup_node:
3888	return setup_kmem_cache_nodes(cachep, gfp);
3889}
3890
3891/* Called with slab_mutex held always */
3892static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3893{
3894	int err;
3895	int limit = 0;
3896	int shared = 0;
3897	int batchcount = 0;
3898
3899	err = cache_random_seq_create(cachep, cachep->num, gfp);
3900	if (err)
3901		goto end;
3902
3903	if (limit && shared && batchcount)
3904		goto skip_setup;
3905	/*
3906	 * The head array serves three purposes:
3907	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3908	 * - reduce the number of spinlock operations.
3909	 * - reduce the number of linked list operations on the slab and
3910	 *   bufctl chains: array operations are cheaper.
3911	 * The numbers are guessed, we should auto-tune as described by
3912	 * Bonwick.
3913	 */
3914	if (cachep->size > 131072)
3915		limit = 1;
3916	else if (cachep->size > PAGE_SIZE)
3917		limit = 8;
3918	else if (cachep->size > 1024)
3919		limit = 24;
3920	else if (cachep->size > 256)
3921		limit = 54;
3922	else
3923		limit = 120;
3924
3925	/*
3926	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3927	 * allocation behaviour: Most allocs on one cpu, most free operations
3928	 * on another cpu. For these cases, an efficient object passing between
3929	 * cpus is necessary. This is provided by a shared array. The array
3930	 * replaces Bonwick's magazine layer.
3931	 * On uniprocessor, it's functionally equivalent (but less efficient)
3932	 * to a larger limit. Thus disabled by default.
3933	 */
3934	shared = 0;
3935	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3936		shared = 8;
3937
3938#if DEBUG
3939	/*
3940	 * With debugging enabled, large batchcount lead to excessively long
3941	 * periods with disabled local interrupts. Limit the batchcount
3942	 */
3943	if (limit > 32)
3944		limit = 32;
3945#endif
3946	batchcount = (limit + 1) / 2;
3947skip_setup:
3948	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3949end:
3950	if (err)
3951		pr_err("enable_cpucache failed for %s, error %d\n",
3952		       cachep->name, -err);
3953	return err;
3954}
3955
3956/*
3957 * Drain an array if it contains any elements taking the node lock only if
3958 * necessary. Note that the node listlock also protects the array_cache
3959 * if drain_array() is used on the shared array.
3960 */
3961static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3962			 struct array_cache *ac, int node)
3963{
3964	LIST_HEAD(list);
3965
3966	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
3967	check_mutex_acquired();
3968
3969	if (!ac || !ac->avail)
3970		return;
3971
3972	if (ac->touched) {
3973		ac->touched = 0;
3974		return;
3975	}
3976
3977	spin_lock_irq(&n->list_lock);
3978	drain_array_locked(cachep, ac, node, false, &list);
3979	spin_unlock_irq(&n->list_lock);
3980
3981	slabs_destroy(cachep, &list);
3982}
3983
3984/**
3985 * cache_reap - Reclaim memory from caches.
3986 * @w: work descriptor
3987 *
3988 * Called from workqueue/eventd every few seconds.
3989 * Purpose:
3990 * - clear the per-cpu caches for this CPU.
3991 * - return freeable pages to the main free memory pool.
3992 *
3993 * If we cannot acquire the cache chain mutex then just give up - we'll try
3994 * again on the next iteration.
3995 */
3996static void cache_reap(struct work_struct *w)
3997{
3998	struct kmem_cache *searchp;
3999	struct kmem_cache_node *n;
4000	int node = numa_mem_id();
4001	struct delayed_work *work = to_delayed_work(w);
4002
4003	if (!mutex_trylock(&slab_mutex))
4004		/* Give up. Setup the next iteration. */
4005		goto out;
4006
4007	list_for_each_entry(searchp, &slab_caches, list) {
4008		check_irq_on();
4009
4010		/*
4011		 * We only take the node lock if absolutely necessary and we
4012		 * have established with reasonable certainty that
4013		 * we can do some work if the lock was obtained.
4014		 */
4015		n = get_node(searchp, node);
4016
4017		reap_alien(searchp, n);
4018
4019		drain_array(searchp, n, cpu_cache_get(searchp), node);
4020
4021		/*
4022		 * These are racy checks but it does not matter
4023		 * if we skip one check or scan twice.
4024		 */
4025		if (time_after(n->next_reap, jiffies))
4026			goto next;
4027
4028		n->next_reap = jiffies + REAPTIMEOUT_NODE;
4029
4030		drain_array(searchp, n, n->shared, node);
4031
4032		if (n->free_touched)
4033			n->free_touched = 0;
4034		else {
4035			int freed;
4036
4037			freed = drain_freelist(searchp, n, (n->free_limit +
4038				5 * searchp->num - 1) / (5 * searchp->num));
4039			STATS_ADD_REAPED(searchp, freed);
4040		}
4041next:
4042		cond_resched();
4043	}
4044	check_irq_on();
4045	mutex_unlock(&slab_mutex);
4046	next_reap_node();
4047out:
4048	/* Set up the next iteration */
4049	schedule_delayed_work_on(smp_processor_id(), work,
4050				round_jiffies_relative(REAPTIMEOUT_AC));
4051}
4052
4053void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4054{
4055	unsigned long active_objs, num_objs, active_slabs;
4056	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4057	unsigned long free_slabs = 0;
4058	int node;
4059	struct kmem_cache_node *n;
4060
4061	for_each_kmem_cache_node(cachep, node, n) {
4062		check_irq_on();
4063		spin_lock_irq(&n->list_lock);
4064
4065		total_slabs += n->total_slabs;
4066		free_slabs += n->free_slabs;
4067		free_objs += n->free_objects;
4068
4069		if (n->shared)
4070			shared_avail += n->shared->avail;
4071
4072		spin_unlock_irq(&n->list_lock);
4073	}
4074	num_objs = total_slabs * cachep->num;
4075	active_slabs = total_slabs - free_slabs;
4076	active_objs = num_objs - free_objs;
4077
4078	sinfo->active_objs = active_objs;
4079	sinfo->num_objs = num_objs;
4080	sinfo->active_slabs = active_slabs;
4081	sinfo->num_slabs = total_slabs;
4082	sinfo->shared_avail = shared_avail;
4083	sinfo->limit = cachep->limit;
4084	sinfo->batchcount = cachep->batchcount;
4085	sinfo->shared = cachep->shared;
4086	sinfo->objects_per_slab = cachep->num;
4087	sinfo->cache_order = cachep->gfporder;
4088}
4089
4090void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4091{
4092#if STATS
4093	{			/* node stats */
4094		unsigned long high = cachep->high_mark;
4095		unsigned long allocs = cachep->num_allocations;
4096		unsigned long grown = cachep->grown;
4097		unsigned long reaped = cachep->reaped;
4098		unsigned long errors = cachep->errors;
4099		unsigned long max_freeable = cachep->max_freeable;
4100		unsigned long node_allocs = cachep->node_allocs;
4101		unsigned long node_frees = cachep->node_frees;
4102		unsigned long overflows = cachep->node_overflow;
4103
4104		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4105			   allocs, high, grown,
4106			   reaped, errors, max_freeable, node_allocs,
4107			   node_frees, overflows);
4108	}
4109	/* cpu stats */
4110	{
4111		unsigned long allochit = atomic_read(&cachep->allochit);
4112		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4113		unsigned long freehit = atomic_read(&cachep->freehit);
4114		unsigned long freemiss = atomic_read(&cachep->freemiss);
4115
4116		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4117			   allochit, allocmiss, freehit, freemiss);
4118	}
4119#endif
4120}
4121
4122#define MAX_SLABINFO_WRITE 128
4123/**
4124 * slabinfo_write - Tuning for the slab allocator
4125 * @file: unused
4126 * @buffer: user buffer
4127 * @count: data length
4128 * @ppos: unused
4129 *
4130 * Return: %0 on success, negative error code otherwise.
4131 */
4132ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4133		       size_t count, loff_t *ppos)
4134{
4135	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4136	int limit, batchcount, shared, res;
4137	struct kmem_cache *cachep;
4138
4139	if (count > MAX_SLABINFO_WRITE)
4140		return -EINVAL;
4141	if (copy_from_user(&kbuf, buffer, count))
4142		return -EFAULT;
4143	kbuf[MAX_SLABINFO_WRITE] = '\0';
4144
4145	tmp = strchr(kbuf, ' ');
4146	if (!tmp)
4147		return -EINVAL;
4148	*tmp = '\0';
4149	tmp++;
4150	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4151		return -EINVAL;
4152
4153	/* Find the cache in the chain of caches. */
4154	mutex_lock(&slab_mutex);
4155	res = -EINVAL;
4156	list_for_each_entry(cachep, &slab_caches, list) {
4157		if (!strcmp(cachep->name, kbuf)) {
4158			if (limit < 1 || batchcount < 1 ||
4159					batchcount > limit || shared < 0) {
4160				res = 0;
4161			} else {
4162				res = do_tune_cpucache(cachep, limit,
4163						       batchcount, shared,
4164						       GFP_KERNEL);
4165			}
4166			break;
4167		}
4168	}
4169	mutex_unlock(&slab_mutex);
4170	if (res >= 0)
4171		res = count;
4172	return res;
4173}
4174
4175#ifdef CONFIG_HARDENED_USERCOPY
4176/*
4177 * Rejects incorrectly sized objects and objects that are to be copied
4178 * to/from userspace but do not fall entirely within the containing slab
4179 * cache's usercopy region.
4180 *
4181 * Returns NULL if check passes, otherwise const char * to name of cache
4182 * to indicate an error.
4183 */
4184void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4185			 bool to_user)
4186{
4187	struct kmem_cache *cachep;
4188	unsigned int objnr;
4189	unsigned long offset;
4190
4191	ptr = kasan_reset_tag(ptr);
4192
4193	/* Find and validate object. */
4194	cachep = page->slab_cache;
4195	objnr = obj_to_index(cachep, page, (void *)ptr);
4196	BUG_ON(objnr >= cachep->num);
4197
4198	/* Find offset within object. */
4199	if (is_kfence_address(ptr))
4200		offset = ptr - kfence_object_start(ptr);
4201	else
4202		offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4203
4204	/* Allow address range falling entirely within usercopy region. */
4205	if (offset >= cachep->useroffset &&
4206	    offset - cachep->useroffset <= cachep->usersize &&
4207	    n <= cachep->useroffset - offset + cachep->usersize)
4208		return;
4209
4210	/*
4211	 * If the copy is still within the allocated object, produce
4212	 * a warning instead of rejecting the copy. This is intended
4213	 * to be a temporary method to find any missing usercopy
4214	 * whitelists.
4215	 */
4216	if (usercopy_fallback &&
4217	    offset <= cachep->object_size &&
4218	    n <= cachep->object_size - offset) {
4219		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
4220		return;
4221	}
4222
4223	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
4224}
4225#endif /* CONFIG_HARDENED_USERCOPY */
4226
4227/**
4228 * __ksize -- Uninstrumented ksize.
4229 * @objp: pointer to the object
4230 *
4231 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
4232 * safety checks as ksize() with KASAN instrumentation enabled.
4233 *
4234 * Return: size of the actual memory used by @objp in bytes
4235 */
4236size_t __ksize(const void *objp)
4237{
4238	struct kmem_cache *c;
4239	size_t size;
4240
4241	BUG_ON(!objp);
4242	if (unlikely(objp == ZERO_SIZE_PTR))
4243		return 0;
4244
4245	c = virt_to_cache(objp);
4246	size = c ? c->object_size : 0;
4247
4248	return size;
4249}
4250EXPORT_SYMBOL(__ksize);