Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/slab.c
   4 * Written by Mark Hemment, 1996/97.
   5 * (markhe@nextd.demon.co.uk)
   6 *
   7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   8 *
   9 * Major cleanup, different bufctl logic, per-cpu arrays
  10 *	(c) 2000 Manfred Spraul
  11 *
  12 * Cleanup, make the head arrays unconditional, preparation for NUMA
  13 * 	(c) 2002 Manfred Spraul
  14 *
  15 * An implementation of the Slab Allocator as described in outline in;
  16 *	UNIX Internals: The New Frontiers by Uresh Vahalia
  17 *	Pub: Prentice Hall	ISBN 0-13-101908-2
  18 * or with a little more detail in;
  19 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
  20 *	Jeff Bonwick (Sun Microsystems).
  21 *	Presented at: USENIX Summer 1994 Technical Conference
  22 *
  23 * The memory is organized in caches, one cache for each object type.
  24 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  25 * Each cache consists out of many slabs (they are small (usually one
  26 * page long) and always contiguous), and each slab contains multiple
  27 * initialized objects.
  28 *
  29 * This means, that your constructor is used only for newly allocated
  30 * slabs and you must pass objects with the same initializations to
  31 * kmem_cache_free.
  32 *
  33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  34 * normal). If you need a special memory type, then must create a new
  35 * cache for that memory type.
  36 *
  37 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  38 *   full slabs with 0 free objects
  39 *   partial slabs
  40 *   empty slabs with no allocated objects
  41 *
  42 * If partial slabs exist, then new allocations come from these slabs,
  43 * otherwise from empty slabs or new slabs are allocated.
  44 *
  45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  46 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  47 *
  48 * Each cache has a short per-cpu head array, most allocs
  49 * and frees go into that array, and if that array overflows, then 1/2
  50 * of the entries in the array are given back into the global cache.
  51 * The head array is strictly LIFO and should improve the cache hit rates.
  52 * On SMP, it additionally reduces the spinlock operations.
  53 *
  54 * The c_cpuarray may not be read with enabled local interrupts -
  55 * it's changed with a smp_call_function().
  56 *
  57 * SMP synchronization:
  58 *  constructors and destructors are called without any locking.
  59 *  Several members in struct kmem_cache and struct slab never change, they
  60 *	are accessed without any locking.
  61 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  62 *  	and local interrupts are disabled so slab code is preempt-safe.
  63 *  The non-constant members are protected with a per-cache irq spinlock.
  64 *
  65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  66 * in 2000 - many ideas in the current implementation are derived from
  67 * his patch.
  68 *
  69 * Further notes from the original documentation:
  70 *
  71 * 11 April '97.  Started multi-threading - markhe
  72 *	The global cache-chain is protected by the mutex 'slab_mutex'.
  73 *	The sem is only needed when accessing/extending the cache-chain, which
  74 *	can never happen inside an interrupt (kmem_cache_create(),
  75 *	kmem_cache_shrink() and kmem_cache_reap()).
  76 *
  77 *	At present, each engine can be growing a cache.  This should be blocked.
  78 *
  79 * 15 March 2005. NUMA slab allocator.
  80 *	Shai Fultheim <shai@scalex86.org>.
  81 *	Shobhit Dayal <shobhit@calsoftinc.com>
  82 *	Alok N Kataria <alokk@calsoftinc.com>
  83 *	Christoph Lameter <christoph@lameter.com>
  84 *
  85 *	Modified the slab allocator to be node aware on NUMA systems.
  86 *	Each node has its own list of partial, free and full slabs.
  87 *	All object allocations for a node occur from node specific slab lists.
  88 */
  89
  90#include	<linux/__KEEPIDENTS__B.h>
  91#include	<linux/__KEEPIDENTS__C.h>
  92#include	<linux/__KEEPIDENTS__D.h>
  93#include	<linux/__KEEPIDENTS__E.h>
  94#include	<linux/__KEEPIDENTS__F.h>
  95#include	<linux/__KEEPIDENTS__G.h>
  96#include	<linux/__KEEPIDENTS__H.h>
  97#include	<linux/__KEEPIDENTS__I.h>
  98#include	<linux/__KEEPIDENTS__J.h>
  99#include	<linux/proc_fs.h>
 100#include	<linux/__KEEPIDENTS__BA.h>
 101#include	<linux/__KEEPIDENTS__BB.h>
 102#include	<linux/__KEEPIDENTS__BC.h>
 103#include	<linux/cpu.h>
 104#include	<linux/__KEEPIDENTS__BD.h>
 105#include	<linux/__KEEPIDENTS__BE.h>
 106#include	<linux/rcupdate.h>
 107#include	<linux/__KEEPIDENTS__BF.h>
 108#include	<linux/__KEEPIDENTS__BG.h>
 109#include	<linux/__KEEPIDENTS__BH.h>
 110#include	<linux/kmemleak.h>
 111#include	<linux/__KEEPIDENTS__BI.h>
 112#include	<linux/__KEEPIDENTS__BJ.h>
 113#include	<linux/__KEEPIDENTS__CA-__KEEPIDENTS__CB.h>
 114#include	<linux/__KEEPIDENTS__CC.h>
 115#include	<linux/reciprocal_div.h>
 116#include	<linux/debugobjects.h>
 
 117#include	<linux/__KEEPIDENTS__CD.h>
 118#include	<linux/__KEEPIDENTS__CE.h>
 119#include	<linux/__KEEPIDENTS__CF/task_stack.h>
 120
 121#include	<net/__KEEPIDENTS__CG.h>
 122
 123#include	<asm/cacheflush.h>
 124#include	<asm/tlbflush.h>
 125#include	<asm/page.h>
 126
 127#include <trace/events/kmem.h>
 128
 129#include	"internal.h"
 130
 131#include	"slab.h"
 132
 133/*
 134 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 135 *		  0 for faster, smaller code (especially in the critical paths).
 136 *
 137 * STATS	- 1 to collect stats for /proc/slabinfo.
 138 *		  0 for faster, smaller code (especially in the critical paths).
 139 *
 140 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 141 */
 142
 143#ifdef CONFIG_DEBUG_SLAB
 144#define	DEBUG		1
 145#define	STATS		1
 146#define	FORCED_DEBUG	1
 147#else
 148#define	DEBUG		0
 149#define	STATS		0
 150#define	FORCED_DEBUG	0
 151#endif
 152
 153/* Shouldn't this be in a header file somewhere? */
 154#define	BYTES_PER_WORD		sizeof(void *)
 155#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
 156
 157#ifndef ARCH_KMALLOC_FLAGS
 158#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 159#endif
 160
 161#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
 162				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
 163
 164#if FREELIST_BYTE_INDEX
 165typedef unsigned char freelist_idx_t;
 166#else
 167typedef unsigned short freelist_idx_t;
 168#endif
 169
 170#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 171
 172/*
 
 
 
 
 
 
 173 * struct array_cache
 174 *
 175 * Purpose:
 176 * - LIFO ordering, to hand out cache-warm objects from _alloc
 177 * - reduce the number of linked list operations
 178 * - reduce spinlock operations
 179 *
 180 * The limit is stored in the per-cpu structure to reduce the data cache
 181 * footprint.
 182 *
 183 */
 184struct array_cache {
 185	unsigned int avail;
 186	unsigned int limit;
 187	unsigned int batchcount;
 188	unsigned int touched;
 
 189	void *entry[];	/*
 190			 * Must have this definition in here for the proper
 191			 * alignment of array_cache. Also simplifies accessing
 192			 * the entries.
 
 
 
 
 193			 */
 194};
 195
 196struct alien_cache {
 197	spinlock_t lock;
 198	struct array_cache ac;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199};
 200
 201/*
 202 * Need this for bootstrapping a per node allocator.
 203 */
 204#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
 205static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
 206#define	CACHE_CACHE 0
 207#define	SIZE_NODE (MAX_NUMNODES)
 
 208
 209static int drain_freelist(struct kmem_cache *cache,
 210			struct kmem_cache_node *n, int tofree);
 211static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 212			int node, struct list_head *list);
 213static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
 214static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 215static void cache_reap(struct work_struct *unused);
 216
 217static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
 218						void **list);
 219static inline void fixup_slab_list(struct kmem_cache *cachep,
 220				struct kmem_cache_node *n, struct page *page,
 221				void **list);
 222static int slab_early_init = 1;
 223
 
 224#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
 225
 226static void kmem_cache_node_init(struct kmem_cache_node *parent)
 227{
 228	INIT_LIST_HEAD(&parent->slabs_full);
 229	INIT_LIST_HEAD(&parent->slabs_partial);
 230	INIT_LIST_HEAD(&parent->slabs_free);
 231	parent->total_slabs = 0;
 232	parent->free_slabs = 0;
 233	parent->shared = NULL;
 234	parent->alien = NULL;
 235	parent->colour_next = 0;
 236	spin_lock_init(&parent->list_lock);
 237	parent->free_objects = 0;
 238	parent->free_touched = 0;
 239}
 240
 241#define MAKE_LIST(cachep, listp, slab, nodeid)				\
 242	do {								\
 243		INIT_LIST_HEAD(listp);					\
 244		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
 245	} while (0)
 246
 247#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
 248	do {								\
 249	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
 250	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 251	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
 252	} while (0)
 253
 254#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
 255#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
 256#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
 257#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
 258
 259#define BATCHREFILL_LIMIT	16
 260/*
 261 * Optimization question: fewer reaps means less probability for unnessary
 262 * cpucache drain/refill cycles.
 263 *
 264 * OTOH the cpuarrays can contain lots of objects,
 265 * which could lock up otherwise freeable slabs.
 266 */
 267#define REAPTIMEOUT_AC		(2*HZ)
 268#define REAPTIMEOUT_NODE	(4*HZ)
 269
 270#if STATS
 271#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
 272#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
 273#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
 274#define	STATS_INC_GROWN(x)	((x)->grown++)
 275#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
 276#define	STATS_SET_HIGH(x)						\
 277	do {								\
 278		if ((x)->num_active > (x)->high_mark)			\
 279			(x)->high_mark = (x)->num_active;		\
 280	} while (0)
 281#define	STATS_INC_ERR(x)	((x)->errors++)
 282#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
 283#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
 284#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 285#define	STATS_SET_FREEABLE(x, i)					\
 286	do {								\
 287		if ((x)->max_freeable < i)				\
 288			(x)->max_freeable = i;				\
 289	} while (0)
 290#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
 291#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
 292#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
 293#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
 294#else
 295#define	STATS_INC_ACTIVE(x)	do { } while (0)
 296#define	STATS_DEC_ACTIVE(x)	do { } while (0)
 297#define	STATS_INC_ALLOCED(x)	do { } while (0)
 298#define	STATS_INC_GROWN(x)	do { } while (0)
 299#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
 300#define	STATS_SET_HIGH(x)	do { } while (0)
 301#define	STATS_INC_ERR(x)	do { } while (0)
 302#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
 303#define	STATS_INC_NODEFREES(x)	do { } while (0)
 304#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 305#define	STATS_SET_FREEABLE(x, i) do { } while (0)
 306#define STATS_INC_ALLOCHIT(x)	do { } while (0)
 307#define STATS_INC_ALLOCMISS(x)	do { } while (0)
 308#define STATS_INC_FREEHIT(x)	do { } while (0)
 309#define STATS_INC_FREEMISS(x)	do { } while (0)
 310#endif
 311
 312#if DEBUG
 313
 314/*
 315 * memory layout of objects:
 316 * 0		: objp
 317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 318 * 		the end of an object is aligned with the end of the real
 319 * 		allocation. Catches writes behind the end of the allocation.
 320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 321 * 		redzone word.
 322 * cachep->obj_offset: The real object.
 323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 324 * cachep->size - 1* BYTES_PER_WORD: last caller address
 325 *					[BYTES_PER_WORD long]
 326 */
 327static int obj_offset(struct kmem_cache *cachep)
 328{
 329	return cachep->obj_offset;
 330}
 331
 332static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 333{
 334	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 335	return (unsigned long long*) (objp + obj_offset(cachep) -
 336				      sizeof(unsigned long long));
 337}
 338
 339static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 340{
 341	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 342	if (cachep->flags & SLAB_STORE_USER)
 343		return (unsigned long long *)(objp + cachep->size -
 344					      sizeof(unsigned long long) -
 345					      REDZONE_ALIGN);
 346	return (unsigned long long *) (objp + cachep->size -
 347				       sizeof(unsigned long long));
 348}
 349
 350static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 351{
 352	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 353	return (void **)(objp + cachep->size - BYTES_PER_WORD);
 354}
 355
 356#else
 357
 358#define obj_offset(x)			0
 359#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 360#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 361#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
 362
 363#endif
 364
 365/*
 366 * Do not go above this order unless 0 objects fit into the slab or
 367 * overridden on the command line.
 368 */
 369#define	SLAB_MAX_ORDER_HI	1
 370#define	SLAB_MAX_ORDER_LO	0
 371static int slab_max_order = SLAB_MAX_ORDER_LO;
 372static bool slab_max_order_set __initdata;
 373
 
 
 
 
 
 
 374static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 375				 unsigned int idx)
 376{
 377	return page->s_mem + cache->size * idx;
 378}
 379
 380#define BOOT_CPUCACHE_ENTRIES	1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381/* internal cache of cache description objs */
 382static struct kmem_cache kmem_cache_boot = {
 383	.batchcount = 1,
 384	.limit = BOOT_CPUCACHE_ENTRIES,
 385	.shared = 1,
 386	.size = sizeof(struct kmem_cache),
 387	.name = "kmem_cache",
 388};
 389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 391
 392static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 393{
 394	return this_cpu_ptr(cachep->cpu_cache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395}
 396
 397/*
 398 * Calculate the number of objects and left-over bytes for a given buffer size.
 399 */
 400static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
 401		slab_flags_t flags, size_t *left_over)
 
 402{
 403	unsigned int num;
 
 404	size_t slab_size = PAGE_SIZE << gfporder;
 405
 406	/*
 407	 * The slab management structure can be either off the slab or
 408	 * on it. For the latter case, the memory allocated for a
 409	 * slab is used for:
 410	 *
 
 
 411	 * - @buffer_size bytes for each object
 412	 * - One freelist_idx_t for each object
 413	 *
 414	 * We don't need to consider alignment of freelist because
 415	 * freelist will be at the end of slab page. The objects will be
 416	 * at the correct alignment.
 417	 *
 418	 * If the slab management structure is off the slab, then the
 419	 * alignment will already be calculated into the size. Because
 420	 * the slabs are all pages aligned, the objects will be at the
 421	 * correct alignment when allocated.
 422	 */
 423	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
 424		num = slab_size / buffer_size;
 425		*left_over = slab_size % buffer_size;
 
 426	} else {
 427		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
 428		*left_over = slab_size %
 429			(buffer_size + sizeof(freelist_idx_t));
 430	}
 431
 432	return num;
 433}
 434
 435#if DEBUG
 436#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 437
 438static void __slab_error(const char *function, struct kmem_cache *cachep,
 439			char *msg)
 440{
 441	pr_err("slab error in %s(): cache `%s': %s\n",
 442	       function, cachep->name, msg);
 443	dump_stack();
 444	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 445}
 446#endif
 447
 448/*
 449 * By default on NUMA we use alien caches to stage the freeing of
 450 * objects allocated from other nodes. This causes massive memory
 451 * inefficiencies when using fake NUMA setup to split memory into a
 452 * large number of small nodes, so it can be disabled on the command
 453 * line
 454  */
 455
 456static int use_alien_caches __read_mostly = 1;
 457static int __init noaliencache_setup(char *s)
 458{
 459	use_alien_caches = 0;
 460	return 1;
 461}
 462__setup("noaliencache", noaliencache_setup);
 463
 464static int __init slab_max_order_setup(char *str)
 465{
 466	get_option(&str, &slab_max_order);
 467	slab_max_order = slab_max_order < 0 ? 0 :
 468				min(slab_max_order, MAX_ORDER - 1);
 469	slab_max_order_set = true;
 470
 471	return 1;
 472}
 473__setup("slab_max_order=", slab_max_order_setup);
 474
 475#ifdef CONFIG_NUMA
 476/*
 477 * Special reaping functions for NUMA systems called from cache_reap().
 478 * These take care of doing round robin flushing of alien caches (containing
 479 * objects freed on different nodes from which they were allocated) and the
 480 * flushing of remote pcps by calling drain_node_pages.
 481 */
 482static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 483
 484static void init_reap_node(int cpu)
 485{
 486	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
 487						    node_online_map);
 
 
 
 
 
 488}
 489
 490static void next_reap_node(void)
 491{
 492	int node = __this_cpu_read(slab_reap_node);
 493
 494	node = next_node_in(node, node_online_map);
 
 
 495	__this_cpu_write(slab_reap_node, node);
 496}
 497
 498#else
 499#define init_reap_node(cpu) do { } while (0)
 500#define next_reap_node(void) do { } while (0)
 501#endif
 502
 503/*
 504 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 505 * via the workqueue/eventd.
 506 * Add the CPU number into the expiration time to minimize the possibility of
 507 * the CPUs getting into lockstep and contending for the global cache chain
 508 * lock.
 509 */
 510static void start_cpu_timer(int cpu)
 511{
 512	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 513
 514	if (reap_work->work.func == NULL) {
 
 
 
 
 
 515		init_reap_node(cpu);
 516		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 517		schedule_delayed_work_on(cpu, reap_work,
 518					__round_jiffies_relative(HZ, cpu));
 519	}
 520}
 521
 522static void init_arraycache(struct array_cache *ac, int limit, int batch)
 523{
 524	if (ac) {
 525		ac->avail = 0;
 526		ac->limit = limit;
 527		ac->batchcount = batch;
 528		ac->touched = 0;
 529	}
 530}
 531
 532static struct array_cache *alloc_arraycache(int node, int entries,
 533					    int batchcount, gfp_t gfp)
 534{
 535	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 536	struct array_cache *ac = NULL;
 537
 538	ac = kmalloc_node(memsize, gfp, node);
 539	/*
 540	 * The array_cache structures contain pointers to free object.
 541	 * However, when such objects are allocated or transferred to another
 542	 * cache the pointers are not cleared and they could be counted as
 543	 * valid references during a kmemleak scan. Therefore, kmemleak must
 544	 * not scan such objects.
 545	 */
 546	kmemleak_no_scan(ac);
 547	init_arraycache(ac, entries, batchcount);
 548	return ac;
 
 
 
 
 
 
 549}
 550
 551static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
 552					struct page *page, void *objp)
 553{
 554	struct kmem_cache_node *n;
 555	int page_node;
 556	LIST_HEAD(list);
 557
 558	page_node = page_to_nid(page);
 559	n = get_node(cachep, page_node);
 
 
 
 
 
 560
 561	spin_lock(&n->list_lock);
 562	free_block(cachep, &objp, 1, page_node, &list);
 563	spin_unlock(&n->list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 564
 565	slabs_destroy(cachep, &list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566}
 567
 568/*
 569 * Transfer objects in one arraycache to another.
 570 * Locking must be handled by the caller.
 571 *
 572 * Return the number of entries transferred.
 573 */
 574static int transfer_objects(struct array_cache *to,
 575		struct array_cache *from, unsigned int max)
 576{
 577	/* Figure out how many entries to transfer */
 578	int nr = min3(from->avail, max, to->limit - to->avail);
 579
 580	if (!nr)
 581		return 0;
 582
 583	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
 584			sizeof(void *) *nr);
 585
 586	from->avail -= nr;
 587	to->avail += nr;
 588	return nr;
 589}
 590
 591/* &alien->lock must be held by alien callers. */
 592static __always_inline void __free_one(struct array_cache *ac, void *objp)
 593{
 594	/* Avoid trivial double-free. */
 595	if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
 596	    WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
 597		return;
 598	ac->entry[ac->avail++] = objp;
 599}
 600
 601#ifndef CONFIG_NUMA
 602
 603#define drain_alien_cache(cachep, alien) do { } while (0)
 604#define reap_alien(cachep, n) do { } while (0)
 605
 606static inline struct alien_cache **alloc_alien_cache(int node,
 607						int limit, gfp_t gfp)
 608{
 609	return NULL;
 610}
 611
 612static inline void free_alien_cache(struct alien_cache **ac_ptr)
 613{
 614}
 615
 616static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 617{
 618	return 0;
 619}
 620
 621static inline void *alternate_node_alloc(struct kmem_cache *cachep,
 622		gfp_t flags)
 623{
 624	return NULL;
 625}
 626
 627static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 628		 gfp_t flags, int nodeid)
 629{
 630	return NULL;
 631}
 632
 633static inline gfp_t gfp_exact_node(gfp_t flags)
 634{
 635	return flags & ~__GFP_NOFAIL;
 636}
 637
 638#else	/* CONFIG_NUMA */
 639
 640static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 641static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 642
 643static struct alien_cache *__alloc_alien_cache(int node, int entries,
 644						int batch, gfp_t gfp)
 645{
 646	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
 647	struct alien_cache *alc = NULL;
 648
 649	alc = kmalloc_node(memsize, gfp, node);
 650	if (alc) {
 651		kmemleak_no_scan(alc);
 652		init_arraycache(&alc->ac, entries, batch);
 653		spin_lock_init(&alc->lock);
 654	}
 655	return alc;
 656}
 657
 658static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 659{
 660	struct alien_cache **alc_ptr;
 
 661	int i;
 662
 663	if (limit > 1)
 664		limit = 12;
 665	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
 666	if (!alc_ptr)
 667		return NULL;
 668
 669	for_each_node(i) {
 670		if (i == node || !node_online(i))
 671			continue;
 672		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
 673		if (!alc_ptr[i]) {
 674			for (i--; i >= 0; i--)
 675				kfree(alc_ptr[i]);
 676			kfree(alc_ptr);
 677			return NULL;
 678		}
 679	}
 680	return alc_ptr;
 681}
 682
 683static void free_alien_cache(struct alien_cache **alc_ptr)
 684{
 685	int i;
 686
 687	if (!alc_ptr)
 688		return;
 689	for_each_node(i)
 690	    kfree(alc_ptr[i]);
 691	kfree(alc_ptr);
 692}
 693
 694static void __drain_alien_cache(struct kmem_cache *cachep,
 695				struct array_cache *ac, int node,
 696				struct list_head *list)
 697{
 698	struct kmem_cache_node *n = get_node(cachep, node);
 699
 700	if (ac->avail) {
 701		spin_lock(&n->list_lock);
 702		/*
 703		 * Stuff objects into the remote nodes shared array first.
 704		 * That way we could avoid the overhead of putting the objects
 705		 * into the free lists and getting them back later.
 706		 */
 707		if (n->shared)
 708			transfer_objects(n->shared, ac, ac->limit);
 709
 710		free_block(cachep, ac->entry, ac->avail, node, list);
 711		ac->avail = 0;
 712		spin_unlock(&n->list_lock);
 713	}
 714}
 715
 716/*
 717 * Called from cache_reap() to regularly drain alien caches round robin.
 718 */
 719static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
 720{
 721	int node = __this_cpu_read(slab_reap_node);
 722
 723	if (n->alien) {
 724		struct alien_cache *alc = n->alien[node];
 725		struct array_cache *ac;
 726
 727		if (alc) {
 728			ac = &alc->ac;
 729			if (ac->avail && spin_trylock_irq(&alc->lock)) {
 730				LIST_HEAD(list);
 731
 732				__drain_alien_cache(cachep, ac, node, &list);
 733				spin_unlock_irq(&alc->lock);
 734				slabs_destroy(cachep, &list);
 735			}
 736		}
 737	}
 738}
 739
 740static void drain_alien_cache(struct kmem_cache *cachep,
 741				struct alien_cache **alien)
 742{
 743	int i = 0;
 744	struct alien_cache *alc;
 745	struct array_cache *ac;
 746	unsigned long flags;
 747
 748	for_each_online_node(i) {
 749		alc = alien[i];
 750		if (alc) {
 751			LIST_HEAD(list);
 752
 753			ac = &alc->ac;
 754			spin_lock_irqsave(&alc->lock, flags);
 755			__drain_alien_cache(cachep, ac, i, &list);
 756			spin_unlock_irqrestore(&alc->lock, flags);
 757			slabs_destroy(cachep, &list);
 758		}
 759	}
 760}
 761
 762static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
 763				int node, int page_node)
 764{
 
 765	struct kmem_cache_node *n;
 766	struct alien_cache *alien = NULL;
 767	struct array_cache *ac;
 768	LIST_HEAD(list);
 
 769
 770	n = get_node(cachep, node);
 
 
 
 
 
 
 
 771	STATS_INC_NODEFREES(cachep);
 772	if (n->alien && n->alien[page_node]) {
 773		alien = n->alien[page_node];
 774		ac = &alien->ac;
 775		spin_lock(&alien->lock);
 776		if (unlikely(ac->avail == ac->limit)) {
 777			STATS_INC_ACOVERFLOW(cachep);
 778			__drain_alien_cache(cachep, ac, page_node, &list);
 779		}
 780		__free_one(ac, objp);
 781		spin_unlock(&alien->lock);
 782		slabs_destroy(cachep, &list);
 783	} else {
 784		n = get_node(cachep, page_node);
 785		spin_lock(&n->list_lock);
 786		free_block(cachep, &objp, 1, page_node, &list);
 787		spin_unlock(&n->list_lock);
 788		slabs_destroy(cachep, &list);
 789	}
 790	return 1;
 791}
 792
 793static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 794{
 795	int page_node = page_to_nid(virt_to_page(objp));
 796	int node = numa_mem_id();
 797	/*
 798	 * Make sure we are not freeing a object from another node to the array
 799	 * cache on this cpu.
 800	 */
 801	if (likely(node == page_node))
 802		return 0;
 803
 804	return __cache_free_alien(cachep, objp, node, page_node);
 805}
 806
 807/*
 808 * Construct gfp mask to allocate from a specific node but do not reclaim or
 809 * warn about failures.
 810 */
 811static inline gfp_t gfp_exact_node(gfp_t flags)
 812{
 813	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
 814}
 815#endif
 816
 817static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
 818{
 819	struct kmem_cache_node *n;
 820
 821	/*
 822	 * Set up the kmem_cache_node for cpu before we can
 823	 * begin anything. Make sure some other cpu on this
 824	 * node has not already allocated this
 825	 */
 826	n = get_node(cachep, node);
 827	if (n) {
 828		spin_lock_irq(&n->list_lock);
 829		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
 830				cachep->num;
 831		spin_unlock_irq(&n->list_lock);
 832
 833		return 0;
 834	}
 835
 836	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
 837	if (!n)
 838		return -ENOMEM;
 839
 840	kmem_cache_node_init(n);
 841	n->next_reap = jiffies + REAPTIMEOUT_NODE +
 842		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
 843
 844	n->free_limit =
 845		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
 846
 847	/*
 848	 * The kmem_cache_nodes don't come and go as CPUs
 849	 * come and go.  slab_mutex is sufficient
 850	 * protection here.
 851	 */
 852	cachep->node[node] = n;
 853
 854	return 0;
 855}
 856
 857#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
 858/*
 859 * Allocates and initializes node for a node on each slab cache, used for
 860 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
 861 * will be allocated off-node since memory is not yet online for the new node.
 862 * When hotplugging memory or a cpu, existing node are not replaced if
 863 * already in use.
 864 *
 865 * Must hold slab_mutex.
 866 */
 867static int init_cache_node_node(int node)
 868{
 869	int ret;
 870	struct kmem_cache *cachep;
 
 
 871
 872	list_for_each_entry(cachep, &slab_caches, list) {
 873		ret = init_cache_node(cachep, node, GFP_KERNEL);
 874		if (ret)
 875			return ret;
 876	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877
 
 
 
 
 
 
 878	return 0;
 879}
 880#endif
 881
 882static int setup_kmem_cache_node(struct kmem_cache *cachep,
 883				int node, gfp_t gfp, bool force_change)
 884{
 885	int ret = -ENOMEM;
 886	struct kmem_cache_node *n;
 887	struct array_cache *old_shared = NULL;
 888	struct array_cache *new_shared = NULL;
 889	struct alien_cache **new_alien = NULL;
 890	LIST_HEAD(list);
 891
 892	if (use_alien_caches) {
 893		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
 894		if (!new_alien)
 895			goto fail;
 896	}
 897
 898	if (cachep->shared) {
 899		new_shared = alloc_arraycache(node,
 900			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
 901		if (!new_shared)
 902			goto fail;
 903	}
 904
 905	ret = init_cache_node(cachep, node, gfp);
 906	if (ret)
 907		goto fail;
 908
 909	n = get_node(cachep, node);
 910	spin_lock_irq(&n->list_lock);
 911	if (n->shared && force_change) {
 912		free_block(cachep, n->shared->entry,
 913				n->shared->avail, node, &list);
 914		n->shared->avail = 0;
 915	}
 916
 917	if (!n->shared || force_change) {
 918		old_shared = n->shared;
 919		n->shared = new_shared;
 920		new_shared = NULL;
 921	}
 922
 923	if (!n->alien) {
 924		n->alien = new_alien;
 925		new_alien = NULL;
 926	}
 927
 928	spin_unlock_irq(&n->list_lock);
 929	slabs_destroy(cachep, &list);
 930
 931	/*
 932	 * To protect lockless access to n->shared during irq disabled context.
 933	 * If n->shared isn't NULL in irq disabled context, accessing to it is
 934	 * guaranteed to be valid until irq is re-enabled, because it will be
 935	 * freed after synchronize_rcu().
 936	 */
 937	if (old_shared && force_change)
 938		synchronize_rcu();
 939
 940fail:
 941	kfree(old_shared);
 942	kfree(new_shared);
 943	free_alien_cache(new_alien);
 944
 945	return ret;
 946}
 947
 948#ifdef CONFIG_SMP
 949
 950static void cpuup_canceled(long cpu)
 951{
 952	struct kmem_cache *cachep;
 953	struct kmem_cache_node *n = NULL;
 954	int node = cpu_to_mem(cpu);
 955	const struct cpumask *mask = cpumask_of_node(node);
 956
 957	list_for_each_entry(cachep, &slab_caches, list) {
 958		struct array_cache *nc;
 959		struct array_cache *shared;
 960		struct alien_cache **alien;
 961		LIST_HEAD(list);
 
 
 
 
 962
 963		n = get_node(cachep, node);
 964		if (!n)
 965			continue;
 966
 967		spin_lock_irq(&n->list_lock);
 968
 969		/* Free limit for this kmem_cache_node */
 970		n->free_limit -= cachep->batchcount;
 971
 972		/* cpu is dead; no one can alloc from it. */
 973		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
 974		free_block(cachep, nc->entry, nc->avail, node, &list);
 975		nc->avail = 0;
 976
 977		if (!cpumask_empty(mask)) {
 978			spin_unlock_irq(&n->list_lock);
 979			goto free_slab;
 980		}
 981
 982		shared = n->shared;
 983		if (shared) {
 984			free_block(cachep, shared->entry,
 985				   shared->avail, node, &list);
 986			n->shared = NULL;
 987		}
 988
 989		alien = n->alien;
 990		n->alien = NULL;
 991
 992		spin_unlock_irq(&n->list_lock);
 993
 994		kfree(shared);
 995		if (alien) {
 996			drain_alien_cache(cachep, alien);
 997			free_alien_cache(alien);
 998		}
 999
1000free_slab:
1001		slabs_destroy(cachep, &list);
1002	}
1003	/*
1004	 * In the previous loop, all the objects were freed to
1005	 * the respective cache's slabs,  now we can go ahead and
1006	 * shrink each nodelist to its limit.
1007	 */
1008	list_for_each_entry(cachep, &slab_caches, list) {
1009		n = get_node(cachep, node);
1010		if (!n)
1011			continue;
1012		drain_freelist(cachep, n, INT_MAX);
1013	}
1014}
1015
1016static int cpuup_prepare(long cpu)
1017{
1018	struct kmem_cache *cachep;
 
1019	int node = cpu_to_mem(cpu);
1020	int err;
1021
1022	/*
1023	 * We need to do this right in the beginning since
1024	 * alloc_arraycache's are going to use this list.
1025	 * kmalloc_node allows us to add the slab to the right
1026	 * kmem_cache_node and not this cpu's kmem_cache_node
1027	 */
1028	err = init_cache_node_node(node);
1029	if (err < 0)
1030		goto bad;
1031
1032	/*
1033	 * Now we can go ahead with allocating the shared arrays and
1034	 * array caches
1035	 */
1036	list_for_each_entry(cachep, &slab_caches, list) {
1037		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1038		if (err)
 
 
 
 
 
1039			goto bad;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040	}
 
1041
1042	return 0;
1043bad:
1044	cpuup_canceled(cpu);
1045	return -ENOMEM;
1046}
1047
1048int slab_prepare_cpu(unsigned int cpu)
 
1049{
1050	int err;
 
1051
1052	mutex_lock(&slab_mutex);
1053	err = cpuup_prepare(cpu);
1054	mutex_unlock(&slab_mutex);
1055	return err;
1056}
1057
1058/*
1059 * This is called for a failed online attempt and for a successful
1060 * offline.
1061 *
1062 * Even if all the cpus of a node are down, we don't free the
1063 * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
1064 * a kmalloc allocation from another cpu for memory from the node of
1065 * the cpu going down.  The list3 structure is usually allocated from
1066 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
1067 */
1068int slab_dead_cpu(unsigned int cpu)
1069{
1070	mutex_lock(&slab_mutex);
1071	cpuup_canceled(cpu);
1072	mutex_unlock(&slab_mutex);
1073	return 0;
1074}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1075#endif
1076
1077static int slab_online_cpu(unsigned int cpu)
1078{
1079	start_cpu_timer(cpu);
1080	return 0;
 
 
 
1081}
1082
1083static int slab_offline_cpu(unsigned int cpu)
1084{
1085	/*
1086	 * Shutdown cache reaper. Note that the slab_mutex is held so
1087	 * that if cache_reap() is invoked it cannot do anything
1088	 * expensive but will only modify reap_work and reschedule the
1089	 * timer.
1090	 */
1091	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1092	/* Now the cache_reaper is guaranteed to be not running. */
1093	per_cpu(slab_reap_work, cpu).work.func = NULL;
1094	return 0;
1095}
1096
1097#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1098/*
1099 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1100 * Returns -EBUSY if all objects cannot be drained so that the node is not
1101 * removed.
1102 *
1103 * Must hold slab_mutex.
1104 */
1105static int __meminit drain_cache_node_node(int node)
1106{
1107	struct kmem_cache *cachep;
1108	int ret = 0;
1109
1110	list_for_each_entry(cachep, &slab_caches, list) {
1111		struct kmem_cache_node *n;
1112
1113		n = get_node(cachep, node);
1114		if (!n)
1115			continue;
1116
1117		drain_freelist(cachep, n, INT_MAX);
1118
1119		if (!list_empty(&n->slabs_full) ||
1120		    !list_empty(&n->slabs_partial)) {
1121			ret = -EBUSY;
1122			break;
1123		}
1124	}
1125	return ret;
1126}
1127
1128static int __meminit slab_memory_callback(struct notifier_block *self,
1129					unsigned long action, void *arg)
1130{
1131	struct memory_notify *mnb = arg;
1132	int ret = 0;
1133	int nid;
1134
1135	nid = mnb->status_change_nid;
1136	if (nid < 0)
1137		goto out;
1138
1139	switch (action) {
1140	case MEM_GOING_ONLINE:
1141		mutex_lock(&slab_mutex);
1142		ret = init_cache_node_node(nid);
1143		mutex_unlock(&slab_mutex);
1144		break;
1145	case MEM_GOING_OFFLINE:
1146		mutex_lock(&slab_mutex);
1147		ret = drain_cache_node_node(nid);
1148		mutex_unlock(&slab_mutex);
1149		break;
1150	case MEM_ONLINE:
1151	case MEM_OFFLINE:
1152	case MEM_CANCEL_ONLINE:
1153	case MEM_CANCEL_OFFLINE:
1154		break;
1155	}
1156out:
1157	return notifier_from_errno(ret);
1158}
1159#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1160
1161/*
1162 * swap the static kmem_cache_node with kmalloced memory
1163 */
1164static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1165				int nodeid)
1166{
1167	struct kmem_cache_node *ptr;
1168
1169	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1170	BUG_ON(!ptr);
1171
1172	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1173	/*
1174	 * Do not assume that spinlocks can be initialized via memcpy:
1175	 */
1176	spin_lock_init(&ptr->list_lock);
1177
1178	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1179	cachep->node[nodeid] = ptr;
1180}
1181
1182/*
1183 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1184 * size of kmem_cache_node.
1185 */
1186static void __init set_up_node(struct kmem_cache *cachep, int index)
1187{
1188	int node;
1189
1190	for_each_online_node(node) {
1191		cachep->node[node] = &init_kmem_cache_node[index + node];
1192		cachep->node[node]->next_reap = jiffies +
1193		    REAPTIMEOUT_NODE +
1194		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1195	}
1196}
1197
1198/*
 
 
 
 
 
 
 
 
 
1199 * Initialisation.  Called after the page allocator have been initialised and
1200 * before smp_init().
1201 */
1202void __init kmem_cache_init(void)
1203{
1204	int i;
1205
 
 
1206	kmem_cache = &kmem_cache_boot;
 
1207
1208	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1209		use_alien_caches = 0;
1210
1211	for (i = 0; i < NUM_INIT_LISTS; i++)
1212		kmem_cache_node_init(&init_kmem_cache_node[i]);
1213
 
 
1214	/*
1215	 * Fragmentation resistance on low memory - only use bigger
1216	 * page orders on machines with more than 32MB of memory if
1217	 * not overridden on the command line.
1218	 */
1219	if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1220		slab_max_order = SLAB_MAX_ORDER_HI;
1221
1222	/* Bootstrap is tricky, because several objects are allocated
1223	 * from caches that do not exist yet:
1224	 * 1) initialize the kmem_cache cache: it contains the struct
1225	 *    kmem_cache structures of all caches, except kmem_cache itself:
1226	 *    kmem_cache is statically allocated.
1227	 *    Initially an __init data area is used for the head array and the
1228	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1229	 *    array at the end of the bootstrap.
1230	 * 2) Create the first kmalloc cache.
1231	 *    The struct kmem_cache for the new cache is allocated normally.
1232	 *    An __init data area is used for the head array.
1233	 * 3) Create the remaining kmalloc caches, with minimally sized
1234	 *    head arrays.
1235	 * 4) Replace the __init data head arrays for kmem_cache and the first
1236	 *    kmalloc cache with kmalloc allocated arrays.
1237	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1238	 *    the other cache's with kmalloc allocated memory.
1239	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1240	 */
1241
1242	/* 1) create the kmem_cache */
1243
1244	/*
1245	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1246	 */
1247	create_boot_cache(kmem_cache, "kmem_cache",
1248		offsetof(struct kmem_cache, node) +
1249				  nr_node_ids * sizeof(struct kmem_cache_node *),
1250				  SLAB_HWCACHE_ALIGN, 0, 0);
1251	list_add(&kmem_cache->list, &slab_caches);
1252	slab_state = PARTIAL;
 
1253
1254	/*
1255	 * Initialize the caches that provide memory for the  kmem_cache_node
1256	 * structures first.  Without this, further allocations will bug.
 
1257	 */
1258	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1259				kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1260				kmalloc_info[INDEX_NODE].size,
1261				ARCH_KMALLOC_FLAGS, 0,
1262				kmalloc_info[INDEX_NODE].size);
1263	slab_state = PARTIAL_NODE;
1264	setup_kmalloc_cache_index_table();
 
1265
1266	slab_early_init = 0;
1267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268	/* 5) Replace the bootstrap kmem_cache_node */
1269	{
1270		int nid;
1271
1272		for_each_online_node(nid) {
1273			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1274
1275			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
 
 
 
 
1276					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
 
1277		}
1278	}
1279
1280	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1281}
1282
1283void __init kmem_cache_init_late(void)
1284{
1285	struct kmem_cache *cachep;
1286
 
 
1287	/* 6) resize the head arrays to their final sizes */
1288	mutex_lock(&slab_mutex);
1289	list_for_each_entry(cachep, &slab_caches, list)
1290		if (enable_cpucache(cachep, GFP_NOWAIT))
1291			BUG();
1292	mutex_unlock(&slab_mutex);
1293
 
 
 
1294	/* Done! */
1295	slab_state = FULL;
1296
 
 
 
 
 
 
1297#ifdef CONFIG_NUMA
1298	/*
1299	 * Register a memory hotplug callback that initializes and frees
1300	 * node.
1301	 */
1302	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1303#endif
1304
1305	/*
1306	 * The reap timers are started later, with a module init call: That part
1307	 * of the kernel is not yet operational.
1308	 */
1309}
1310
1311static int __init cpucache_init(void)
1312{
1313	int ret;
1314
1315	/*
1316	 * Register the timers that return unneeded pages to the page allocator
1317	 */
1318	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1319				slab_online_cpu, slab_offline_cpu);
1320	WARN_ON(ret < 0);
1321
 
 
1322	return 0;
1323}
1324__initcall(cpucache_init);
1325
1326static noinline void
1327slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1328{
1329#if DEBUG
1330	struct kmem_cache_node *n;
 
1331	unsigned long flags;
1332	int node;
1333	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1334				      DEFAULT_RATELIMIT_BURST);
1335
1336	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1337		return;
1338
1339	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1340		nodeid, gfpflags, &gfpflags);
1341	pr_warn("  cache: %s, object size: %d, order: %d\n",
1342		cachep->name, cachep->size, cachep->gfporder);
1343
1344	for_each_kmem_cache_node(cachep, node, n) {
1345		unsigned long total_slabs, free_slabs, free_objs;
 
 
 
 
 
1346
1347		spin_lock_irqsave(&n->list_lock, flags);
1348		total_slabs = n->total_slabs;
1349		free_slabs = n->free_slabs;
1350		free_objs = n->free_objects;
 
 
 
 
 
 
 
 
 
1351		spin_unlock_irqrestore(&n->list_lock, flags);
1352
1353		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1354			node, total_slabs - free_slabs, total_slabs,
1355			(total_slabs * cachep->num) - free_objs,
1356			total_slabs * cachep->num);
 
 
1357	}
1358#endif
1359}
1360
1361/*
1362 * Interface to system's page allocator. No need to hold the
1363 * kmem_cache_node ->list_lock.
1364 *
1365 * If we requested dmaable memory, we will get it. Even if we
1366 * did not request dmaable memory, we might get it, but that
1367 * would be relatively rare and ignorable.
1368 */
1369static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1370								int nodeid)
1371{
1372	struct page *page;
 
1373
1374	flags |= cachep->allocflags;
 
 
1375
1376	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1377	if (!page) {
1378		slab_out_of_memory(cachep, flags, nodeid);
 
1379		return NULL;
1380	}
1381
1382	account_slab_page(page, cachep->gfporder, cachep);
1383	__SetPageSlab(page);
1384	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1385	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
 
 
 
 
 
 
 
 
 
 
 
1386		SetPageSlabPfmemalloc(page);
 
 
 
 
 
 
 
 
 
 
1387
1388	return page;
1389}
1390
1391/*
1392 * Interface to system's page release.
1393 */
1394static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1395{
1396	int order = cachep->gfporder;
 
 
 
 
 
 
 
 
 
1397
1398	BUG_ON(!PageSlab(page));
1399	__ClearPageSlabPfmemalloc(page);
1400	__ClearPageSlab(page);
1401	page_mapcount_reset(page);
1402	page->mapping = NULL;
1403
 
1404	if (current->reclaim_state)
1405		current->reclaim_state->reclaimed_slab += 1 << order;
1406	unaccount_slab_page(page, order, cachep);
1407	__free_pages(page, order);
1408}
1409
1410static void kmem_rcu_free(struct rcu_head *head)
1411{
1412	struct kmem_cache *cachep;
1413	struct page *page;
1414
1415	page = container_of(head, struct page, rcu_head);
1416	cachep = page->slab_cache;
1417
1418	kmem_freepages(cachep, page);
1419}
1420
1421#if DEBUG
1422static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1423{
1424	if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1425		(cachep->size % PAGE_SIZE) == 0)
1426		return true;
1427
1428	return false;
1429}
1430
1431#ifdef CONFIG_DEBUG_PAGEALLOC
1432static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
 
1433{
1434	if (!is_debug_pagealloc_cache(cachep))
 
 
 
 
1435		return;
1436
1437	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1438}
 
 
 
 
 
1439
1440#else
1441static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1442				int map) {}
 
 
 
 
 
 
1443
 
 
 
1444#endif
1445
1446static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1447{
1448	int size = cachep->object_size;
1449	addr = &((char *)addr)[obj_offset(cachep)];
1450
1451	memset(addr, val, size);
1452	*(unsigned char *)(addr + size - 1) = POISON_END;
1453}
1454
1455static void dump_line(char *data, int offset, int limit)
1456{
1457	int i;
1458	unsigned char error = 0;
1459	int bad_count = 0;
1460
1461	pr_err("%03x: ", offset);
1462	for (i = 0; i < limit; i++) {
1463		if (data[offset + i] != POISON_FREE) {
1464			error = data[offset + i];
1465			bad_count++;
1466		}
1467	}
1468	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1469			&data[offset], limit, 1);
1470
1471	if (bad_count == 1) {
1472		error ^= POISON_FREE;
1473		if (!(error & (error - 1))) {
1474			pr_err("Single bit error detected. Probably bad RAM.\n");
 
1475#ifdef CONFIG_X86
1476			pr_err("Run memtest86+ or a similar memory test tool.\n");
 
1477#else
1478			pr_err("Run a memory test tool.\n");
1479#endif
1480		}
1481	}
1482}
1483#endif
1484
1485#if DEBUG
1486
1487static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1488{
1489	int i, size;
1490	char *realobj;
1491
1492	if (cachep->flags & SLAB_RED_ZONE) {
1493		pr_err("Redzone: 0x%llx/0x%llx\n",
1494		       *dbg_redzone1(cachep, objp),
1495		       *dbg_redzone2(cachep, objp));
1496	}
1497
1498	if (cachep->flags & SLAB_STORE_USER)
1499		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
 
 
 
1500	realobj = (char *)objp + obj_offset(cachep);
1501	size = cachep->object_size;
1502	for (i = 0; i < size && lines; i += 16, lines--) {
1503		int limit;
1504		limit = 16;
1505		if (i + limit > size)
1506			limit = size - i;
1507		dump_line(realobj, i, limit);
1508	}
1509}
1510
1511static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1512{
1513	char *realobj;
1514	int size, i;
1515	int lines = 0;
1516
1517	if (is_debug_pagealloc_cache(cachep))
1518		return;
1519
1520	realobj = (char *)objp + obj_offset(cachep);
1521	size = cachep->object_size;
1522
1523	for (i = 0; i < size; i++) {
1524		char exp = POISON_FREE;
1525		if (i == size - 1)
1526			exp = POISON_END;
1527		if (realobj[i] != exp) {
1528			int limit;
1529			/* Mismatch ! */
1530			/* Print header */
1531			if (lines == 0) {
1532				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1533				       print_tainted(), cachep->name,
1534				       realobj, size);
1535				print_objinfo(cachep, objp, 0);
1536			}
1537			/* Hexdump the affected line */
1538			i = (i / 16) * 16;
1539			limit = 16;
1540			if (i + limit > size)
1541				limit = size - i;
1542			dump_line(realobj, i, limit);
1543			i += 16;
1544			lines++;
1545			/* Limit to 5 lines */
1546			if (lines > 5)
1547				break;
1548		}
1549	}
1550	if (lines != 0) {
1551		/* Print some data about the neighboring objects, if they
1552		 * exist:
1553		 */
1554		struct page *page = virt_to_head_page(objp);
1555		unsigned int objnr;
1556
1557		objnr = obj_to_index(cachep, page, objp);
1558		if (objnr) {
1559			objp = index_to_obj(cachep, page, objnr - 1);
1560			realobj = (char *)objp + obj_offset(cachep);
1561			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
 
1562			print_objinfo(cachep, objp, 2);
1563		}
1564		if (objnr + 1 < cachep->num) {
1565			objp = index_to_obj(cachep, page, objnr + 1);
1566			realobj = (char *)objp + obj_offset(cachep);
1567			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
 
1568			print_objinfo(cachep, objp, 2);
1569		}
1570	}
1571}
1572#endif
1573
1574#if DEBUG
1575static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1576						struct page *page)
1577{
1578	int i;
1579
1580	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1581		poison_obj(cachep, page->freelist - obj_offset(cachep),
1582			POISON_FREE);
1583	}
1584
1585	for (i = 0; i < cachep->num; i++) {
1586		void *objp = index_to_obj(cachep, page, i);
1587
1588		if (cachep->flags & SLAB_POISON) {
 
 
 
 
 
 
 
 
1589			check_poison_obj(cachep, objp);
1590			slab_kernel_map(cachep, objp, 1);
1591		}
1592		if (cachep->flags & SLAB_RED_ZONE) {
1593			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1594				slab_error(cachep, "start of a freed object was overwritten");
 
1595			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1596				slab_error(cachep, "end of a freed object was overwritten");
 
1597		}
1598	}
1599}
1600#else
1601static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1602						struct page *page)
1603{
1604}
1605#endif
1606
1607/**
1608 * slab_destroy - destroy and release all objects in a slab
1609 * @cachep: cache pointer being destroyed
1610 * @page: page pointer being destroyed
1611 *
1612 * Destroy all the objs in a slab page, and release the mem back to the system.
1613 * Before calling the slab page must have been unlinked from the cache. The
1614 * kmem_cache_node ->list_lock is not held/needed.
1615 */
1616static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1617{
1618	void *freelist;
1619
1620	freelist = page->freelist;
1621	slab_destroy_debugcheck(cachep, page);
1622	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1623		call_rcu(&page->rcu_head, kmem_rcu_free);
1624	else
 
 
 
 
 
 
 
 
 
 
1625		kmem_freepages(cachep, page);
 
1626
1627	/*
1628	 * From now on, we don't use freelist
1629	 * although actual page can be freed in rcu context
1630	 */
1631	if (OFF_SLAB(cachep))
1632		kmem_cache_free(cachep->freelist_cache, freelist);
1633}
1634
1635/*
1636 * Update the size of the caches before calling slabs_destroy as it may
1637 * recursively call kfree.
1638 */
1639static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1640{
1641	struct page *page, *n;
1642
1643	list_for_each_entry_safe(page, n, list, slab_list) {
1644		list_del(&page->slab_list);
1645		slab_destroy(cachep, page);
1646	}
1647}
1648
1649/**
1650 * calculate_slab_order - calculate size (page order) of slabs
1651 * @cachep: pointer to the cache that is being created
1652 * @size: size of objects to be created in this cache.
 
1653 * @flags: slab allocation flags
1654 *
1655 * Also calculates the number of objects per slab.
1656 *
1657 * This could be made much more intelligent.  For now, try to avoid using
1658 * high order pages for slabs.  When the gfp() functions are more friendly
1659 * towards high-order requests, this should be changed.
1660 *
1661 * Return: number of left-over bytes in a slab
1662 */
1663static size_t calculate_slab_order(struct kmem_cache *cachep,
1664				size_t size, slab_flags_t flags)
1665{
 
1666	size_t left_over = 0;
1667	int gfporder;
1668
1669	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1670		unsigned int num;
1671		size_t remainder;
1672
1673		num = cache_estimate(gfporder, size, flags, &remainder);
1674		if (!num)
1675			continue;
1676
1677		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1678		if (num > SLAB_OBJ_MAX_NUM)
1679			break;
1680
1681		if (flags & CFLGS_OFF_SLAB) {
1682			struct kmem_cache *freelist_cache;
1683			size_t freelist_size;
1684
1685			freelist_size = num * sizeof(freelist_idx_t);
1686			freelist_cache = kmalloc_slab(freelist_size, 0u);
1687			if (!freelist_cache)
1688				continue;
1689
1690			/*
1691			 * Needed to avoid possible looping condition
1692			 * in cache_grow_begin()
 
1693			 */
1694			if (OFF_SLAB(freelist_cache))
1695				continue;
1696
1697			/* check if off slab has enough benefit */
1698			if (freelist_cache->size > cachep->size / 2)
1699				continue;
1700		}
1701
1702		/* Found something acceptable - save it away */
1703		cachep->num = num;
1704		cachep->gfporder = gfporder;
1705		left_over = remainder;
1706
1707		/*
1708		 * A VFS-reclaimable slab tends to have most allocations
1709		 * as GFP_NOFS and we really don't want to have to be allocating
1710		 * higher-order pages when we are unable to shrink dcache.
1711		 */
1712		if (flags & SLAB_RECLAIM_ACCOUNT)
1713			break;
1714
1715		/*
1716		 * Large number of objects is good, but very large slabs are
1717		 * currently bad for the gfp()s.
1718		 */
1719		if (gfporder >= slab_max_order)
1720			break;
1721
1722		/*
1723		 * Acceptable internal fragmentation?
1724		 */
1725		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1726			break;
1727	}
1728	return left_over;
1729}
1730
1731static struct array_cache __percpu *alloc_kmem_cache_cpus(
1732		struct kmem_cache *cachep, int entries, int batchcount)
1733{
1734	int cpu;
1735	size_t size;
1736	struct array_cache __percpu *cpu_cache;
1737
1738	size = sizeof(void *) * entries + sizeof(struct array_cache);
1739	cpu_cache = __alloc_percpu(size, sizeof(void *));
1740
1741	if (!cpu_cache)
1742		return NULL;
1743
1744	for_each_possible_cpu(cpu) {
1745		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1746				entries, batchcount);
1747	}
1748
1749	return cpu_cache;
1750}
1751
1752static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1753{
1754	if (slab_state >= FULL)
1755		return enable_cpucache(cachep, gfp);
1756
1757	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1758	if (!cachep->cpu_cache)
1759		return 1;
1760
1761	if (slab_state == DOWN) {
1762		/* Creation of first cache (kmem_cache). */
1763		set_up_node(kmem_cache, CACHE_CACHE);
 
 
 
 
 
1764	} else if (slab_state == PARTIAL) {
1765		/* For kmem_cache_node */
1766		set_up_node(cachep, SIZE_NODE);
1767	} else {
1768		int node;
 
 
1769
1770		for_each_online_node(node) {
1771			cachep->node[node] = kmalloc_node(
1772				sizeof(struct kmem_cache_node), gfp, node);
1773			BUG_ON(!cachep->node[node]);
1774			kmem_cache_node_init(cachep->node[node]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1775		}
1776	}
1777
1778	cachep->node[numa_mem_id()]->next_reap =
1779			jiffies + REAPTIMEOUT_NODE +
1780			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1781
1782	cpu_cache_get(cachep)->avail = 0;
1783	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1784	cpu_cache_get(cachep)->batchcount = 1;
1785	cpu_cache_get(cachep)->touched = 0;
1786	cachep->batchcount = 1;
1787	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1788	return 0;
1789}
1790
1791slab_flags_t kmem_cache_flags(unsigned int object_size,
1792	slab_flags_t flags, const char *name,
1793	void (*ctor)(void *))
1794{
1795	return flags;
1796}
1797
1798struct kmem_cache *
1799__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1800		   slab_flags_t flags, void (*ctor)(void *))
1801{
1802	struct kmem_cache *cachep;
1803
1804	cachep = find_mergeable(size, align, flags, name, ctor);
1805	if (cachep) {
1806		cachep->refcount++;
1807
1808		/*
1809		 * Adjust the object sizes so that we clear
1810		 * the complete object on kzalloc.
1811		 */
1812		cachep->object_size = max_t(int, cachep->object_size, size);
1813	}
1814	return cachep;
1815}
1816
1817static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1818			size_t size, slab_flags_t flags)
1819{
1820	size_t left;
1821
1822	cachep->num = 0;
1823
1824	/*
1825	 * If slab auto-initialization on free is enabled, store the freelist
1826	 * off-slab, so that its contents don't end up in one of the allocated
1827	 * objects.
1828	 */
1829	if (unlikely(slab_want_init_on_free(cachep)))
1830		return false;
1831
1832	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1833		return false;
1834
1835	left = calculate_slab_order(cachep, size,
1836			flags | CFLGS_OBJFREELIST_SLAB);
1837	if (!cachep->num)
1838		return false;
1839
1840	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1841		return false;
1842
1843	cachep->colour = left / cachep->colour_off;
1844
1845	return true;
1846}
1847
1848static bool set_off_slab_cache(struct kmem_cache *cachep,
1849			size_t size, slab_flags_t flags)
1850{
1851	size_t left;
1852
1853	cachep->num = 0;
1854
1855	/*
1856	 * Always use on-slab management when SLAB_NOLEAKTRACE
1857	 * to avoid recursive calls into kmemleak.
1858	 */
1859	if (flags & SLAB_NOLEAKTRACE)
1860		return false;
1861
1862	/*
1863	 * Size is large, assume best to place the slab management obj
1864	 * off-slab (should allow better packing of objs).
1865	 */
1866	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1867	if (!cachep->num)
1868		return false;
1869
1870	/*
1871	 * If the slab has been placed off-slab, and we have enough space then
1872	 * move it on-slab. This is at the expense of any extra colouring.
1873	 */
1874	if (left >= cachep->num * sizeof(freelist_idx_t))
1875		return false;
1876
1877	cachep->colour = left / cachep->colour_off;
1878
1879	return true;
1880}
1881
1882static bool set_on_slab_cache(struct kmem_cache *cachep,
1883			size_t size, slab_flags_t flags)
1884{
1885	size_t left;
1886
1887	cachep->num = 0;
1888
1889	left = calculate_slab_order(cachep, size, flags);
1890	if (!cachep->num)
1891		return false;
1892
1893	cachep->colour = left / cachep->colour_off;
1894
1895	return true;
1896}
1897
1898/**
1899 * __kmem_cache_create - Create a cache.
1900 * @cachep: cache management descriptor
1901 * @flags: SLAB flags
1902 *
1903 * Returns a ptr to the cache on success, NULL on failure.
1904 * Cannot be called within a int, but can be interrupted.
1905 * The @ctor is run when new pages are allocated by the cache.
1906 *
1907 * The flags are
1908 *
1909 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1910 * to catch references to uninitialised memory.
1911 *
1912 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1913 * for buffer overruns.
1914 *
1915 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1916 * cacheline.  This can be beneficial if you're counting cycles as closely
1917 * as davem.
1918 *
1919 * Return: a pointer to the created cache or %NULL in case of error
1920 */
1921int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
 
1922{
1923	size_t ralign = BYTES_PER_WORD;
1924	gfp_t gfp;
1925	int err;
1926	unsigned int size = cachep->size;
1927
1928#if DEBUG
1929#if FORCED_DEBUG
1930	/*
1931	 * Enable redzoning and last user accounting, except for caches with
1932	 * large objects, if the increased size would increase the object size
1933	 * above the next power of two: caches with object sizes just above a
1934	 * power of two have a significant amount of internal fragmentation.
1935	 */
1936	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
1937						2 * sizeof(unsigned long long)))
1938		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1939	if (!(flags & SLAB_TYPESAFE_BY_RCU))
1940		flags |= SLAB_POISON;
1941#endif
 
 
1942#endif
1943
1944	/*
1945	 * Check that size is in terms of words.  This is needed to avoid
1946	 * unaligned accesses for some archs when redzoning is used, and makes
1947	 * sure any on-slab bufctl's are also correctly aligned.
1948	 */
1949	size = ALIGN(size, BYTES_PER_WORD);
 
 
 
 
 
 
 
 
 
 
 
1950
1951	if (flags & SLAB_RED_ZONE) {
1952		ralign = REDZONE_ALIGN;
1953		/* If redzoning, ensure that the second redzone is suitably
1954		 * aligned, by adjusting the object size accordingly. */
1955		size = ALIGN(size, REDZONE_ALIGN);
 
1956	}
1957
1958	/* 3) caller mandated alignment */
1959	if (ralign < cachep->align) {
1960		ralign = cachep->align;
1961	}
1962	/* disable debug if necessary */
1963	if (ralign > __alignof__(unsigned long long))
1964		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1965	/*
1966	 * 4) Store it.
1967	 */
1968	cachep->align = ralign;
1969	cachep->colour_off = cache_line_size();
1970	/* Offset must be a multiple of the alignment. */
1971	if (cachep->colour_off < cachep->align)
1972		cachep->colour_off = cachep->align;
1973
1974	if (slab_is_available())
1975		gfp = GFP_KERNEL;
1976	else
1977		gfp = GFP_NOWAIT;
1978
 
1979#if DEBUG
1980
1981	/*
1982	 * Both debugging options require word-alignment which is calculated
1983	 * into align above.
1984	 */
1985	if (flags & SLAB_RED_ZONE) {
1986		/* add space for red zone words */
1987		cachep->obj_offset += sizeof(unsigned long long);
1988		size += 2 * sizeof(unsigned long long);
1989	}
1990	if (flags & SLAB_STORE_USER) {
1991		/* user store requires one word storage behind the end of
1992		 * the real object. But if the second red zone needs to be
1993		 * aligned to 64 bits, we must allow that much space.
1994		 */
1995		if (flags & SLAB_RED_ZONE)
1996			size += REDZONE_ALIGN;
1997		else
1998			size += BYTES_PER_WORD;
1999	}
 
 
 
 
 
 
 
 
2000#endif
2001
2002	kasan_cache_create(cachep, &size, &flags);
 
 
 
 
 
 
 
 
 
 
 
 
2003
2004	size = ALIGN(size, cachep->align);
2005	/*
2006	 * We should restrict the number of objects in a slab to implement
2007	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2008	 */
2009	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2010		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2011
2012#if DEBUG
 
 
 
 
 
 
 
2013	/*
2014	 * To activate debug pagealloc, off-slab management is necessary
2015	 * requirement. In early phase of initialization, small sized slab
2016	 * doesn't get initialized so it would not be possible. So, we need
2017	 * to check size >= 256. It guarantees that all necessary small
2018	 * sized slab is initialized in current slab initialization sequence.
2019	 */
2020	if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2021		size >= 256 && cachep->object_size > cache_line_size()) {
2022		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2023			size_t tmp_size = ALIGN(size, PAGE_SIZE);
2024
2025			if (set_off_slab_cache(cachep, tmp_size, flags)) {
2026				flags |= CFLGS_OFF_SLAB;
2027				cachep->obj_offset += tmp_size - size;
2028				size = tmp_size;
2029				goto done;
2030			}
2031		}
2032	}
2033#endif
2034
2035	if (set_objfreelist_slab_cache(cachep, size, flags)) {
2036		flags |= CFLGS_OBJFREELIST_SLAB;
2037		goto done;
2038	}
2039
2040	if (set_off_slab_cache(cachep, size, flags)) {
2041		flags |= CFLGS_OFF_SLAB;
2042		goto done;
 
 
 
 
2043	}
2044
2045	if (set_on_slab_cache(cachep, size, flags))
2046		goto done;
2047
2048	return -E2BIG;
2049
2050done:
2051	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2052	cachep->flags = flags;
2053	cachep->allocflags = __GFP_COMP;
2054	if (flags & SLAB_CACHE_DMA)
2055		cachep->allocflags |= GFP_DMA;
2056	if (flags & SLAB_CACHE_DMA32)
2057		cachep->allocflags |= GFP_DMA32;
2058	if (flags & SLAB_RECLAIM_ACCOUNT)
2059		cachep->allocflags |= __GFP_RECLAIMABLE;
2060	cachep->size = size;
2061	cachep->reciprocal_buffer_size = reciprocal_value(size);
2062
2063#if DEBUG
2064	/*
2065	 * If we're going to use the generic kernel_map_pages()
2066	 * poisoning, then it's going to smash the contents of
2067	 * the redzone and userword anyhow, so switch them off.
2068	 */
2069	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2070		(cachep->flags & SLAB_POISON) &&
2071		is_debug_pagealloc_cache(cachep))
2072		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2073#endif
2074
2075	if (OFF_SLAB(cachep)) {
2076		cachep->freelist_cache =
2077			kmalloc_slab(cachep->freelist_size, 0u);
2078	}
2079
2080	err = setup_cpu_cache(cachep, gfp);
2081	if (err) {
2082		__kmem_cache_release(cachep);
2083		return err;
2084	}
2085
 
 
 
 
 
 
 
 
 
 
 
2086	return 0;
2087}
2088
2089#if DEBUG
2090static void check_irq_off(void)
2091{
2092	BUG_ON(!irqs_disabled());
2093}
2094
2095static void check_irq_on(void)
2096{
2097	BUG_ON(irqs_disabled());
2098}
2099
2100static void check_mutex_acquired(void)
2101{
2102	BUG_ON(!mutex_is_locked(&slab_mutex));
2103}
2104
2105static void check_spinlock_acquired(struct kmem_cache *cachep)
2106{
2107#ifdef CONFIG_SMP
2108	check_irq_off();
2109	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2110#endif
2111}
2112
2113static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2114{
2115#ifdef CONFIG_SMP
2116	check_irq_off();
2117	assert_spin_locked(&get_node(cachep, node)->list_lock);
2118#endif
2119}
2120
2121#else
2122#define check_irq_off()	do { } while(0)
2123#define check_irq_on()	do { } while(0)
2124#define check_mutex_acquired()	do { } while(0)
2125#define check_spinlock_acquired(x) do { } while(0)
2126#define check_spinlock_acquired_node(x, y) do { } while(0)
2127#endif
2128
2129static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2130				int node, bool free_all, struct list_head *list)
2131{
2132	int tofree;
2133
2134	if (!ac || !ac->avail)
2135		return;
2136
2137	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2138	if (tofree > ac->avail)
2139		tofree = (ac->avail + 1) / 2;
2140
2141	free_block(cachep, ac->entry, tofree, node, list);
2142	ac->avail -= tofree;
2143	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2144}
2145
2146static void do_drain(void *arg)
2147{
2148	struct kmem_cache *cachep = arg;
2149	struct array_cache *ac;
2150	int node = numa_mem_id();
2151	struct kmem_cache_node *n;
2152	LIST_HEAD(list);
2153
2154	check_irq_off();
2155	ac = cpu_cache_get(cachep);
2156	n = get_node(cachep, node);
2157	spin_lock(&n->list_lock);
2158	free_block(cachep, ac->entry, ac->avail, node, &list);
2159	spin_unlock(&n->list_lock);
2160	ac->avail = 0;
2161	slabs_destroy(cachep, &list);
2162}
2163
2164static void drain_cpu_caches(struct kmem_cache *cachep)
2165{
2166	struct kmem_cache_node *n;
2167	int node;
2168	LIST_HEAD(list);
2169
2170	on_each_cpu(do_drain, cachep, 1);
2171	check_irq_on();
2172	for_each_kmem_cache_node(cachep, node, n)
2173		if (n->alien)
 
2174			drain_alien_cache(cachep, n->alien);
 
2175
2176	for_each_kmem_cache_node(cachep, node, n) {
2177		spin_lock_irq(&n->list_lock);
2178		drain_array_locked(cachep, n->shared, node, true, &list);
2179		spin_unlock_irq(&n->list_lock);
2180
2181		slabs_destroy(cachep, &list);
2182	}
2183}
2184
2185/*
2186 * Remove slabs from the list of free slabs.
2187 * Specify the number of slabs to drain in tofree.
2188 *
2189 * Returns the actual number of slabs released.
2190 */
2191static int drain_freelist(struct kmem_cache *cache,
2192			struct kmem_cache_node *n, int tofree)
2193{
2194	struct list_head *p;
2195	int nr_freed;
2196	struct page *page;
2197
2198	nr_freed = 0;
2199	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2200
2201		spin_lock_irq(&n->list_lock);
2202		p = n->slabs_free.prev;
2203		if (p == &n->slabs_free) {
2204			spin_unlock_irq(&n->list_lock);
2205			goto out;
2206		}
2207
2208		page = list_entry(p, struct page, slab_list);
2209		list_del(&page->slab_list);
2210		n->free_slabs--;
2211		n->total_slabs--;
 
2212		/*
2213		 * Safe to drop the lock. The slab is no longer linked
2214		 * to the cache.
2215		 */
2216		n->free_objects -= cache->num;
2217		spin_unlock_irq(&n->list_lock);
2218		slab_destroy(cache, page);
2219		nr_freed++;
2220	}
2221out:
2222	return nr_freed;
2223}
2224
2225bool __kmem_cache_empty(struct kmem_cache *s)
2226{
2227	int node;
2228	struct kmem_cache_node *n;
2229
2230	for_each_kmem_cache_node(s, node, n)
2231		if (!list_empty(&n->slabs_full) ||
2232		    !list_empty(&n->slabs_partial))
2233			return false;
2234	return true;
2235}
2236
2237int __kmem_cache_shrink(struct kmem_cache *cachep)
2238{
2239	int ret = 0;
2240	int node;
2241	struct kmem_cache_node *n;
2242
2243	drain_cpu_caches(cachep);
2244
2245	check_irq_on();
2246	for_each_kmem_cache_node(cachep, node, n) {
2247		drain_freelist(cachep, n, INT_MAX);
 
 
 
 
2248
2249		ret += !list_empty(&n->slabs_full) ||
2250			!list_empty(&n->slabs_partial);
2251	}
2252	return (ret ? 1 : 0);
2253}
2254
2255int __kmem_cache_shutdown(struct kmem_cache *cachep)
 
 
 
 
 
 
 
2256{
2257	return __kmem_cache_shrink(cachep);
 
 
 
 
 
 
 
 
2258}
 
2259
2260void __kmem_cache_release(struct kmem_cache *cachep)
2261{
2262	int i;
2263	struct kmem_cache_node *n;
 
2264
2265	cache_random_seq_destroy(cachep);
 
2266
2267	free_percpu(cachep->cpu_cache);
 
2268
2269	/* NUMA: free the node structures */
2270	for_each_kmem_cache_node(cachep, i, n) {
2271		kfree(n->shared);
2272		free_alien_cache(n->alien);
2273		kfree(n);
2274		cachep->node[i] = NULL;
 
 
2275	}
 
2276}
2277
2278/*
2279 * Get the memory for a slab management obj.
2280 *
2281 * For a slab cache when the slab descriptor is off-slab, the
2282 * slab descriptor can't come from the same cache which is being created,
2283 * Because if it is the case, that means we defer the creation of
2284 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2285 * And we eventually call down to __kmem_cache_create(), which
2286 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2287 * This is a "chicken-and-egg" problem.
2288 *
2289 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2290 * which are all initialized during kmem_cache_init().
2291 */
2292static void *alloc_slabmgmt(struct kmem_cache *cachep,
2293				   struct page *page, int colour_off,
2294				   gfp_t local_flags, int nodeid)
2295{
2296	void *freelist;
2297	void *addr = page_address(page);
2298
2299	page->s_mem = addr + colour_off;
2300	page->active = 0;
2301
2302	if (OBJFREELIST_SLAB(cachep))
2303		freelist = NULL;
2304	else if (OFF_SLAB(cachep)) {
2305		/* Slab management obj is off-slab. */
2306		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2307					      local_flags, nodeid);
2308		if (!freelist)
2309			return NULL;
2310	} else {
2311		/* We will use last bytes at the slab for freelist */
2312		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2313				cachep->freelist_size;
2314	}
2315
 
2316	return freelist;
2317}
2318
2319static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2320{
2321	return ((freelist_idx_t *)page->freelist)[idx];
2322}
2323
2324static inline void set_free_obj(struct page *page,
2325					unsigned int idx, freelist_idx_t val)
2326{
2327	((freelist_idx_t *)(page->freelist))[idx] = val;
2328}
2329
2330static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
 
2331{
2332#if DEBUG
2333	int i;
2334
2335	for (i = 0; i < cachep->num; i++) {
2336		void *objp = index_to_obj(cachep, page, i);
2337
 
 
 
2338		if (cachep->flags & SLAB_STORE_USER)
2339			*dbg_userword(cachep, objp) = NULL;
2340
2341		if (cachep->flags & SLAB_RED_ZONE) {
2342			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2343			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2344		}
2345		/*
2346		 * Constructors are not allowed to allocate memory from the same
2347		 * cache which they are a constructor for.  Otherwise, deadlock.
2348		 * They must also be threaded.
2349		 */
2350		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2351			kasan_unpoison_object_data(cachep,
2352						   objp + obj_offset(cachep));
2353			cachep->ctor(objp + obj_offset(cachep));
2354			kasan_poison_object_data(
2355				cachep, objp + obj_offset(cachep));
2356		}
2357
2358		if (cachep->flags & SLAB_RED_ZONE) {
2359			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2360				slab_error(cachep, "constructor overwrote the end of an object");
 
2361			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2362				slab_error(cachep, "constructor overwrote the start of an object");
 
2363		}
2364		/* need to poison the objs? */
2365		if (cachep->flags & SLAB_POISON) {
2366			poison_obj(cachep, objp, POISON_FREE);
2367			slab_kernel_map(cachep, objp, 0);
2368		}
2369	}
 
2370#endif
2371}
2372
2373#ifdef CONFIG_SLAB_FREELIST_RANDOM
2374/* Hold information during a freelist initialization */
2375union freelist_init_state {
2376	struct {
2377		unsigned int pos;
2378		unsigned int *list;
2379		unsigned int count;
2380	};
2381	struct rnd_state rnd_state;
2382};
2383
2384/*
2385 * Initialize the state based on the randomization methode available.
2386 * return true if the pre-computed list is available, false otherwize.
2387 */
2388static bool freelist_state_initialize(union freelist_init_state *state,
2389				struct kmem_cache *cachep,
2390				unsigned int count)
2391{
2392	bool ret;
2393	unsigned int rand;
2394
2395	/* Use best entropy available to define a random shift */
2396	rand = get_random_int();
2397
2398	/* Use a random state if the pre-computed list is not available */
2399	if (!cachep->random_seq) {
2400		prandom_seed_state(&state->rnd_state, rand);
2401		ret = false;
2402	} else {
2403		state->list = cachep->random_seq;
2404		state->count = count;
2405		state->pos = rand % count;
2406		ret = true;
2407	}
2408	return ret;
2409}
2410
2411/* Get the next entry on the list and randomize it using a random shift */
2412static freelist_idx_t next_random_slot(union freelist_init_state *state)
2413{
2414	if (state->pos >= state->count)
2415		state->pos = 0;
2416	return state->list[state->pos++];
2417}
2418
2419/* Swap two freelist entries */
2420static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2421{
2422	swap(((freelist_idx_t *)page->freelist)[a],
2423		((freelist_idx_t *)page->freelist)[b]);
2424}
2425
2426/*
2427 * Shuffle the freelist initialization state based on pre-computed lists.
2428 * return true if the list was successfully shuffled, false otherwise.
2429 */
2430static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2431{
2432	unsigned int objfreelist = 0, i, rand, count = cachep->num;
2433	union freelist_init_state state;
2434	bool precomputed;
2435
2436	if (count < 2)
2437		return false;
2438
2439	precomputed = freelist_state_initialize(&state, cachep, count);
2440
2441	/* Take a random entry as the objfreelist */
2442	if (OBJFREELIST_SLAB(cachep)) {
2443		if (!precomputed)
2444			objfreelist = count - 1;
2445		else
2446			objfreelist = next_random_slot(&state);
2447		page->freelist = index_to_obj(cachep, page, objfreelist) +
2448						obj_offset(cachep);
2449		count--;
2450	}
2451
2452	/*
2453	 * On early boot, generate the list dynamically.
2454	 * Later use a pre-computed list for speed.
2455	 */
2456	if (!precomputed) {
2457		for (i = 0; i < count; i++)
2458			set_free_obj(page, i, i);
2459
2460		/* Fisher-Yates shuffle */
2461		for (i = count - 1; i > 0; i--) {
2462			rand = prandom_u32_state(&state.rnd_state);
2463			rand %= (i + 1);
2464			swap_free_obj(page, i, rand);
2465		}
2466	} else {
2467		for (i = 0; i < count; i++)
2468			set_free_obj(page, i, next_random_slot(&state));
2469	}
2470
2471	if (OBJFREELIST_SLAB(cachep))
2472		set_free_obj(page, cachep->num - 1, objfreelist);
2473
2474	return true;
2475}
2476#else
2477static inline bool shuffle_freelist(struct kmem_cache *cachep,
2478				struct page *page)
2479{
2480	return false;
2481}
2482#endif /* CONFIG_SLAB_FREELIST_RANDOM */
2483
2484static void cache_init_objs(struct kmem_cache *cachep,
2485			    struct page *page)
2486{
2487	int i;
2488	void *objp;
2489	bool shuffled;
2490
2491	cache_init_objs_debug(cachep, page);
2492
2493	/* Try to randomize the freelist if enabled */
2494	shuffled = shuffle_freelist(cachep, page);
2495
2496	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2497		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2498						obj_offset(cachep);
2499	}
2500
2501	for (i = 0; i < cachep->num; i++) {
2502		objp = index_to_obj(cachep, page, i);
2503		objp = kasan_init_slab_obj(cachep, objp);
2504
2505		/* constructor could break poison info */
2506		if (DEBUG == 0 && cachep->ctor) {
2507			kasan_unpoison_object_data(cachep, objp);
2508			cachep->ctor(objp);
2509			kasan_poison_object_data(cachep, objp);
2510		}
2511
2512		if (!shuffled)
2513			set_free_obj(page, i, i);
2514	}
2515}
2516
2517static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
 
2518{
2519	void *objp;
2520
2521	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2522	page->active++;
 
 
 
2523
2524	return objp;
2525}
2526
2527static void slab_put_obj(struct kmem_cache *cachep,
2528			struct page *page, void *objp)
2529{
2530	unsigned int objnr = obj_to_index(cachep, page, objp);
2531#if DEBUG
2532	unsigned int i;
2533
 
 
 
2534	/* Verify double free bug */
2535	for (i = page->active; i < cachep->num; i++) {
2536		if (get_free_obj(page, i) == objnr) {
2537			pr_err("slab: double free detected in cache '%s', objp %px\n",
2538			       cachep->name, objp);
2539			BUG();
2540		}
2541	}
2542#endif
2543	page->active--;
2544	if (!page->freelist)
2545		page->freelist = objp + obj_offset(cachep);
2546
2547	set_free_obj(page, page->active, objnr);
2548}
2549
2550/*
2551 * Map pages beginning at addr to the given cache and slab. This is required
2552 * for the slab allocator to be able to lookup the cache and slab of a
2553 * virtual address for kfree, ksize, and slab debugging.
2554 */
2555static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2556			   void *freelist)
2557{
2558	page->slab_cache = cache;
2559	page->freelist = freelist;
2560}
2561
2562/*
2563 * Grow (by 1) the number of slabs within a cache.  This is called by
2564 * kmem_cache_alloc() when there are no active objs left in a cache.
2565 */
2566static struct page *cache_grow_begin(struct kmem_cache *cachep,
2567				gfp_t flags, int nodeid)
2568{
2569	void *freelist;
2570	size_t offset;
2571	gfp_t local_flags;
2572	int page_node;
2573	struct kmem_cache_node *n;
2574	struct page *page;
2575
2576	/*
2577	 * Be lazy and only check for valid flags here,  keeping it out of the
2578	 * critical path in kmem_cache_alloc().
2579	 */
2580	if (unlikely(flags & GFP_SLAB_BUG_MASK))
2581		flags = kmalloc_fix_flags(flags);
2582
2583	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2584	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2585
 
2586	check_irq_off();
2587	if (gfpflags_allow_blocking(local_flags))
2588		local_irq_enable();
2589
2590	/*
2591	 * Get mem for the objs.  Attempt to allocate a physical page from
2592	 * 'nodeid'.
2593	 */
2594	page = kmem_getpages(cachep, local_flags, nodeid);
2595	if (!page)
2596		goto failed;
2597
2598	page_node = page_to_nid(page);
2599	n = get_node(cachep, page_node);
2600
2601	/* Get colour for the slab, and cal the next value. */
 
2602	n->colour_next++;
2603	if (n->colour_next >= cachep->colour)
2604		n->colour_next = 0;
2605
2606	offset = n->colour_next;
2607	if (offset >= cachep->colour)
2608		offset = 0;
2609
2610	offset *= cachep->colour_off;
2611
 
 
 
2612	/*
2613	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
2614	 * page_address() in the latter returns a non-tagged pointer,
2615	 * as it should be for slab pages.
 
2616	 */
2617	kasan_poison_slab(page);
 
 
 
 
 
 
 
 
 
2618
2619	/* Get slab management. */
2620	freelist = alloc_slabmgmt(cachep, page, offset,
2621			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2622	if (OFF_SLAB(cachep) && !freelist)
2623		goto opps1;
2624
2625	slab_map_pages(cachep, page, freelist);
2626
2627	cache_init_objs(cachep, page);
2628
2629	if (gfpflags_allow_blocking(local_flags))
2630		local_irq_disable();
2631
2632	return page;
2633
2634opps1:
2635	kmem_freepages(cachep, page);
2636failed:
2637	if (gfpflags_allow_blocking(local_flags))
2638		local_irq_disable();
2639	return NULL;
2640}
2641
2642static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2643{
2644	struct kmem_cache_node *n;
2645	void *list = NULL;
2646
2647	check_irq_off();
2648
2649	if (!page)
2650		return;
2651
2652	INIT_LIST_HEAD(&page->slab_list);
2653	n = get_node(cachep, page_to_nid(page));
2654
2655	spin_lock(&n->list_lock);
2656	n->total_slabs++;
2657	if (!page->active) {
2658		list_add_tail(&page->slab_list, &n->slabs_free);
2659		n->free_slabs++;
2660	} else
2661		fixup_slab_list(cachep, n, page, &list);
2662
 
 
2663	STATS_INC_GROWN(cachep);
2664	n->free_objects += cachep->num - page->active;
2665	spin_unlock(&n->list_lock);
2666
2667	fixup_objfreelist_debug(cachep, &list);
 
 
 
 
 
2668}
2669
2670#if DEBUG
2671
2672/*
2673 * Perform extra freeing checks:
2674 * - detect bad pointers.
2675 * - POISON/RED_ZONE checking
2676 */
2677static void kfree_debugcheck(const void *objp)
2678{
2679	if (!virt_addr_valid(objp)) {
2680		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2681		       (unsigned long)objp);
2682		BUG();
2683	}
2684}
2685
2686static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2687{
2688	unsigned long long redzone1, redzone2;
2689
2690	redzone1 = *dbg_redzone1(cache, obj);
2691	redzone2 = *dbg_redzone2(cache, obj);
2692
2693	/*
2694	 * Redzone is ok.
2695	 */
2696	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2697		return;
2698
2699	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2700		slab_error(cache, "double free detected");
2701	else
2702		slab_error(cache, "memory outside object was overwritten");
2703
2704	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2705	       obj, redzone1, redzone2);
2706}
2707
2708static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2709				   unsigned long caller)
2710{
2711	unsigned int objnr;
2712	struct page *page;
2713
2714	BUG_ON(virt_to_cache(objp) != cachep);
2715
2716	objp -= obj_offset(cachep);
2717	kfree_debugcheck(objp);
2718	page = virt_to_head_page(objp);
2719
2720	if (cachep->flags & SLAB_RED_ZONE) {
2721		verify_redzone_free(cachep, objp);
2722		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2723		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2724	}
2725	if (cachep->flags & SLAB_STORE_USER)
2726		*dbg_userword(cachep, objp) = (void *)caller;
2727
2728	objnr = obj_to_index(cachep, page, objp);
2729
2730	BUG_ON(objnr >= cachep->num);
2731	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2732
2733	if (cachep->flags & SLAB_POISON) {
 
 
 
 
 
 
 
 
 
2734		poison_obj(cachep, objp, POISON_FREE);
2735		slab_kernel_map(cachep, objp, 0);
2736	}
2737	return objp;
2738}
2739
2740#else
2741#define kfree_debugcheck(x) do { } while(0)
2742#define cache_free_debugcheck(x,objp,z) (objp)
2743#endif
2744
2745static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2746						void **list)
2747{
2748#if DEBUG
2749	void *next = *list;
2750	void *objp;
2751
2752	while (next) {
2753		objp = next - obj_offset(cachep);
2754		next = *(void **)next;
2755		poison_obj(cachep, objp, POISON_FREE);
2756	}
2757#endif
2758}
2759
2760static inline void fixup_slab_list(struct kmem_cache *cachep,
2761				struct kmem_cache_node *n, struct page *page,
2762				void **list)
2763{
2764	/* move slabp to correct slabp list: */
2765	list_del(&page->slab_list);
2766	if (page->active == cachep->num) {
2767		list_add(&page->slab_list, &n->slabs_full);
2768		if (OBJFREELIST_SLAB(cachep)) {
2769#if DEBUG
2770			/* Poisoning will be done without holding the lock */
2771			if (cachep->flags & SLAB_POISON) {
2772				void **objp = page->freelist;
2773
2774				*objp = *list;
2775				*list = objp;
2776			}
2777#endif
2778			page->freelist = NULL;
2779		}
2780	} else
2781		list_add(&page->slab_list, &n->slabs_partial);
2782}
2783
2784/* Try to find non-pfmemalloc slab if needed */
2785static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2786					struct page *page, bool pfmemalloc)
2787{
2788	if (!page)
2789		return NULL;
2790
2791	if (pfmemalloc)
2792		return page;
2793
2794	if (!PageSlabPfmemalloc(page))
2795		return page;
2796
2797	/* No need to keep pfmemalloc slab if we have enough free objects */
2798	if (n->free_objects > n->free_limit) {
2799		ClearPageSlabPfmemalloc(page);
2800		return page;
2801	}
2802
2803	/* Move pfmemalloc slab to the end of list to speed up next search */
2804	list_del(&page->slab_list);
2805	if (!page->active) {
2806		list_add_tail(&page->slab_list, &n->slabs_free);
2807		n->free_slabs++;
2808	} else
2809		list_add_tail(&page->slab_list, &n->slabs_partial);
2810
2811	list_for_each_entry(page, &n->slabs_partial, slab_list) {
2812		if (!PageSlabPfmemalloc(page))
2813			return page;
2814	}
2815
2816	n->free_touched = 1;
2817	list_for_each_entry(page, &n->slabs_free, slab_list) {
2818		if (!PageSlabPfmemalloc(page)) {
2819			n->free_slabs--;
2820			return page;
2821		}
2822	}
2823
2824	return NULL;
2825}
2826
2827static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2828{
2829	struct page *page;
2830
2831	assert_spin_locked(&n->list_lock);
2832	page = list_first_entry_or_null(&n->slabs_partial, struct page,
2833					slab_list);
2834	if (!page) {
2835		n->free_touched = 1;
2836		page = list_first_entry_or_null(&n->slabs_free, struct page,
2837						slab_list);
2838		if (page)
2839			n->free_slabs--;
2840	}
2841
2842	if (sk_memalloc_socks())
2843		page = get_valid_first_slab(n, page, pfmemalloc);
2844
2845	return page;
2846}
2847
2848static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2849				struct kmem_cache_node *n, gfp_t flags)
2850{
2851	struct page *page;
2852	void *obj;
2853	void *list = NULL;
2854
2855	if (!gfp_pfmemalloc_allowed(flags))
2856		return NULL;
2857
2858	spin_lock(&n->list_lock);
2859	page = get_first_slab(n, true);
2860	if (!page) {
2861		spin_unlock(&n->list_lock);
2862		return NULL;
2863	}
2864
2865	obj = slab_get_obj(cachep, page);
2866	n->free_objects--;
2867
2868	fixup_slab_list(cachep, n, page, &list);
2869
2870	spin_unlock(&n->list_lock);
2871	fixup_objfreelist_debug(cachep, &list);
2872
2873	return obj;
2874}
2875
2876/*
2877 * Slab list should be fixed up by fixup_slab_list() for existing slab
2878 * or cache_grow_end() for new slab
2879 */
2880static __always_inline int alloc_block(struct kmem_cache *cachep,
2881		struct array_cache *ac, struct page *page, int batchcount)
2882{
2883	/*
2884	 * There must be at least one object available for
2885	 * allocation.
2886	 */
2887	BUG_ON(page->active >= cachep->num);
2888
2889	while (page->active < cachep->num && batchcount--) {
2890		STATS_INC_ALLOCED(cachep);
2891		STATS_INC_ACTIVE(cachep);
2892		STATS_SET_HIGH(cachep);
2893
2894		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2895	}
2896
2897	return batchcount;
2898}
2899
2900static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2901{
2902	int batchcount;
2903	struct kmem_cache_node *n;
2904	struct array_cache *ac, *shared;
2905	int node;
2906	void *list = NULL;
2907	struct page *page;
2908
2909	check_irq_off();
2910	node = numa_mem_id();
2911
 
 
2912	ac = cpu_cache_get(cachep);
2913	batchcount = ac->batchcount;
2914	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2915		/*
2916		 * If there was little recent activity on this cache, then
2917		 * perform only a partial refill.  Otherwise we could generate
2918		 * refill bouncing.
2919		 */
2920		batchcount = BATCHREFILL_LIMIT;
2921	}
2922	n = get_node(cachep, node);
2923
2924	BUG_ON(ac->avail > 0 || !n);
2925	shared = READ_ONCE(n->shared);
2926	if (!n->free_objects && (!shared || !shared->avail))
2927		goto direct_grow;
2928
2929	spin_lock(&n->list_lock);
2930	shared = READ_ONCE(n->shared);
2931
2932	/* See if we can refill from the shared array */
2933	if (shared && transfer_objects(ac, shared, batchcount)) {
2934		shared->touched = 1;
2935		goto alloc_done;
2936	}
2937
2938	while (batchcount > 0) {
 
 
2939		/* Get slab alloc is to come from. */
2940		page = get_first_slab(n, false);
2941		if (!page)
2942			goto must_grow;
 
 
 
 
2943
 
2944		check_spinlock_acquired(cachep);
2945
2946		batchcount = alloc_block(cachep, ac, page, batchcount);
2947		fixup_slab_list(cachep, n, page, &list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2948	}
2949
2950must_grow:
2951	n->free_objects -= ac->avail;
2952alloc_done:
2953	spin_unlock(&n->list_lock);
2954	fixup_objfreelist_debug(cachep, &list);
2955
2956direct_grow:
2957	if (unlikely(!ac->avail)) {
2958		/* Check if we can use obj in pfmemalloc slab */
2959		if (sk_memalloc_socks()) {
2960			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2961
2962			if (obj)
2963				return obj;
2964		}
2965
2966		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2967
2968		/*
2969		 * cache_grow_begin() can reenable interrupts,
2970		 * then ac could change.
2971		 */
2972		ac = cpu_cache_get(cachep);
2973		if (!ac->avail && page)
2974			alloc_block(cachep, ac, page, batchcount);
2975		cache_grow_end(cachep, page);
2976
2977		if (!ac->avail)
 
2978			return NULL;
 
 
 
2979	}
2980	ac->touched = 1;
2981
2982	return ac->entry[--ac->avail];
2983}
2984
2985static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2986						gfp_t flags)
2987{
2988	might_sleep_if(gfpflags_allow_blocking(flags));
 
 
 
2989}
2990
2991#if DEBUG
2992static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2993				gfp_t flags, void *objp, unsigned long caller)
2994{
2995	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2996	if (!objp)
2997		return objp;
2998	if (cachep->flags & SLAB_POISON) {
 
 
 
 
 
 
 
2999		check_poison_obj(cachep, objp);
3000		slab_kernel_map(cachep, objp, 1);
3001		poison_obj(cachep, objp, POISON_INUSE);
3002	}
3003	if (cachep->flags & SLAB_STORE_USER)
3004		*dbg_userword(cachep, objp) = (void *)caller;
3005
3006	if (cachep->flags & SLAB_RED_ZONE) {
3007		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3008				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3009			slab_error(cachep, "double free, or memory outside object was overwritten");
3010			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3011			       objp, *dbg_redzone1(cachep, objp),
3012			       *dbg_redzone2(cachep, objp));
 
 
3013		}
3014		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3015		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3016	}
3017
3018	objp += obj_offset(cachep);
3019	if (cachep->ctor && cachep->flags & SLAB_POISON)
3020		cachep->ctor(objp);
3021	if (ARCH_SLAB_MINALIGN &&
3022	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3023		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3024		       objp, (int)ARCH_SLAB_MINALIGN);
3025	}
3026	return objp;
3027}
3028#else
3029#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3030#endif
3031
 
 
 
 
 
 
 
 
3032static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3033{
3034	void *objp;
3035	struct array_cache *ac;
 
3036
3037	check_irq_off();
3038
3039	ac = cpu_cache_get(cachep);
3040	if (likely(ac->avail)) {
3041		ac->touched = 1;
3042		objp = ac->entry[--ac->avail];
3043
3044		STATS_INC_ALLOCHIT(cachep);
3045		goto out;
 
 
 
 
 
 
 
3046	}
3047
3048	STATS_INC_ALLOCMISS(cachep);
3049	objp = cache_alloc_refill(cachep, flags);
3050	/*
3051	 * the 'ac' may be updated by cache_alloc_refill(),
3052	 * and kmemleak_erase() requires its correct value.
3053	 */
3054	ac = cpu_cache_get(cachep);
3055
3056out:
3057	/*
3058	 * To avoid a false negative, if an object that is in one of the
3059	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3060	 * treat the array pointers as a reference to the object.
3061	 */
3062	if (objp)
3063		kmemleak_erase(&ac->entry[ac->avail]);
3064	return objp;
3065}
3066
3067#ifdef CONFIG_NUMA
3068/*
3069 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3070 *
3071 * If we are in_interrupt, then process context, including cpusets and
3072 * mempolicy, may not apply and should not be used for allocation policy.
3073 */
3074static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3075{
3076	int nid_alloc, nid_here;
3077
3078	if (in_interrupt() || (flags & __GFP_THISNODE))
3079		return NULL;
3080	nid_alloc = nid_here = numa_mem_id();
3081	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3082		nid_alloc = cpuset_slab_spread_node();
3083	else if (current->mempolicy)
3084		nid_alloc = mempolicy_slab_node();
3085	if (nid_alloc != nid_here)
3086		return ____cache_alloc_node(cachep, flags, nid_alloc);
3087	return NULL;
3088}
3089
3090/*
3091 * Fallback function if there was no memory available and no objects on a
3092 * certain node and fall back is permitted. First we scan all the
3093 * available node for available objects. If that fails then we
3094 * perform an allocation without specifying a node. This allows the page
3095 * allocator to do its reclaim / fallback magic. We then insert the
3096 * slab into the proper nodelist and then allocate from it.
3097 */
3098static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3099{
3100	struct zonelist *zonelist;
 
3101	struct zoneref *z;
3102	struct zone *zone;
3103	enum zone_type highest_zoneidx = gfp_zone(flags);
3104	void *obj = NULL;
3105	struct page *page;
3106	int nid;
3107	unsigned int cpuset_mems_cookie;
3108
3109	if (flags & __GFP_THISNODE)
3110		return NULL;
3111
 
 
3112retry_cpuset:
3113	cpuset_mems_cookie = read_mems_allowed_begin();
3114	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3115
3116retry:
3117	/*
3118	 * Look through allowed nodes for objects available
3119	 * from existing per node queues.
3120	 */
3121	for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3122		nid = zone_to_nid(zone);
3123
3124		if (cpuset_zone_allowed(zone, flags) &&
3125			get_node(cache, nid) &&
3126			get_node(cache, nid)->free_objects) {
3127				obj = ____cache_alloc_node(cache,
3128					gfp_exact_node(flags), nid);
3129				if (obj)
3130					break;
3131		}
3132	}
3133
3134	if (!obj) {
3135		/*
3136		 * This allocation will be performed within the constraints
3137		 * of the current cpuset / memory policy requirements.
3138		 * We may trigger various forms of reclaim on the allowed
3139		 * set and go into memory reserves if necessary.
3140		 */
3141		page = cache_grow_begin(cache, flags, numa_mem_id());
3142		cache_grow_end(cache, page);
3143		if (page) {
3144			nid = page_to_nid(page);
3145			obj = ____cache_alloc_node(cache,
3146				gfp_exact_node(flags), nid);
3147
 
 
 
 
 
 
 
3148			/*
3149			 * Another processor may allocate the objects in
3150			 * the slab since we are not holding any locks.
3151			 */
3152			if (!obj)
3153				goto retry;
 
 
 
 
 
 
 
 
 
 
 
 
 
3154		}
3155	}
3156
3157	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3158		goto retry_cpuset;
3159	return obj;
3160}
3161
3162/*
3163 * A interface to enable slab creation on nodeid
3164 */
3165static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3166				int nodeid)
3167{
 
3168	struct page *page;
3169	struct kmem_cache_node *n;
3170	void *obj = NULL;
3171	void *list = NULL;
3172
3173	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3174	n = get_node(cachep, nodeid);
3175	BUG_ON(!n);
3176
 
3177	check_irq_off();
3178	spin_lock(&n->list_lock);
3179	page = get_first_slab(n, false);
3180	if (!page)
3181		goto must_grow;
 
 
 
 
3182
 
3183	check_spinlock_acquired_node(cachep, nodeid);
3184
3185	STATS_INC_NODEALLOCS(cachep);
3186	STATS_INC_ACTIVE(cachep);
3187	STATS_SET_HIGH(cachep);
3188
3189	BUG_ON(page->active == cachep->num);
3190
3191	obj = slab_get_obj(cachep, page);
3192	n->free_objects--;
 
 
3193
3194	fixup_slab_list(cachep, n, page, &list);
 
 
 
3195
3196	spin_unlock(&n->list_lock);
3197	fixup_objfreelist_debug(cachep, &list);
3198	return obj;
3199
3200must_grow:
3201	spin_unlock(&n->list_lock);
3202	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3203	if (page) {
3204		/* This slab isn't counted yet so don't update free_objects */
3205		obj = slab_get_obj(cachep, page);
3206	}
3207	cache_grow_end(cachep, page);
3208
3209	return obj ? obj : fallback_alloc(cachep, flags);
 
 
 
3210}
3211
3212static __always_inline void *
3213slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3214		   unsigned long caller)
3215{
3216	unsigned long save_flags;
3217	void *ptr;
3218	int slab_node = numa_mem_id();
3219	struct obj_cgroup *objcg = NULL;
3220
3221	flags &= gfp_allowed_mask;
3222	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3223	if (unlikely(!cachep))
 
 
3224		return NULL;
3225
 
 
3226	cache_alloc_debugcheck_before(cachep, flags);
3227	local_irq_save(save_flags);
3228
3229	if (nodeid == NUMA_NO_NODE)
3230		nodeid = slab_node;
3231
3232	if (unlikely(!get_node(cachep, nodeid))) {
3233		/* Node not bootstrapped yet */
3234		ptr = fallback_alloc(cachep, flags);
3235		goto out;
3236	}
3237
3238	if (nodeid == slab_node) {
3239		/*
3240		 * Use the locally cached objects if possible.
3241		 * However ____cache_alloc does not allow fallback
3242		 * to other nodes. It may fail while we still have
3243		 * objects on other nodes available.
3244		 */
3245		ptr = ____cache_alloc(cachep, flags);
3246		if (ptr)
3247			goto out;
3248	}
3249	/* ___cache_alloc_node can fall back to other nodes */
3250	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3251  out:
3252	local_irq_restore(save_flags);
3253	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
 
3254
3255	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3256		memset(ptr, 0, cachep->object_size);
 
 
 
3257
3258	slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
3259	return ptr;
3260}
3261
3262static __always_inline void *
3263__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3264{
3265	void *objp;
3266
3267	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3268		objp = alternate_node_alloc(cache, flags);
3269		if (objp)
3270			goto out;
3271	}
3272	objp = ____cache_alloc(cache, flags);
3273
3274	/*
3275	 * We may just have run out of memory on the local node.
3276	 * ____cache_alloc_node() knows how to locate memory on other nodes
3277	 */
3278	if (!objp)
3279		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3280
3281  out:
3282	return objp;
3283}
3284#else
3285
3286static __always_inline void *
3287__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3288{
3289	return ____cache_alloc(cachep, flags);
3290}
3291
3292#endif /* CONFIG_NUMA */
3293
3294static __always_inline void *
3295slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3296{
3297	unsigned long save_flags;
3298	void *objp;
3299	struct obj_cgroup *objcg = NULL;
3300
3301	flags &= gfp_allowed_mask;
3302	cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3303	if (unlikely(!cachep))
 
 
3304		return NULL;
3305
 
 
3306	cache_alloc_debugcheck_before(cachep, flags);
3307	local_irq_save(save_flags);
3308	objp = __do_cache_alloc(cachep, flags);
3309	local_irq_restore(save_flags);
3310	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 
 
3311	prefetchw(objp);
3312
3313	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3314		memset(objp, 0, cachep->object_size);
 
 
 
3315
3316	slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
3317	return objp;
3318}
3319
3320/*
3321 * Caller needs to acquire correct kmem_cache_node's list_lock
3322 * @list: List of detached free slabs should be freed by caller
3323 */
3324static void free_block(struct kmem_cache *cachep, void **objpp,
3325			int nr_objects, int node, struct list_head *list)
3326{
3327	int i;
3328	struct kmem_cache_node *n = get_node(cachep, node);
3329	struct page *page;
3330
3331	n->free_objects += nr_objects;
3332
3333	for (i = 0; i < nr_objects; i++) {
3334		void *objp;
3335		struct page *page;
3336
 
3337		objp = objpp[i];
3338
3339		page = virt_to_head_page(objp);
3340		list_del(&page->slab_list);
 
3341		check_spinlock_acquired_node(cachep, node);
3342		slab_put_obj(cachep, page, objp);
3343		STATS_DEC_ACTIVE(cachep);
 
3344
3345		/* fixup slab chains */
3346		if (page->active == 0) {
3347			list_add(&page->slab_list, &n->slabs_free);
3348			n->free_slabs++;
 
 
 
 
 
 
 
 
 
 
3349		} else {
3350			/* Unconditionally move a slab to the end of the
3351			 * partial list on free - maximum time for the
3352			 * other objects to be freed, too.
3353			 */
3354			list_add_tail(&page->slab_list, &n->slabs_partial);
3355		}
3356	}
3357
3358	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3359		n->free_objects -= cachep->num;
3360
3361		page = list_last_entry(&n->slabs_free, struct page, slab_list);
3362		list_move(&page->slab_list, list);
3363		n->free_slabs--;
3364		n->total_slabs--;
3365	}
3366}
3367
3368static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3369{
3370	int batchcount;
3371	struct kmem_cache_node *n;
3372	int node = numa_mem_id();
3373	LIST_HEAD(list);
3374
3375	batchcount = ac->batchcount;
3376
 
 
3377	check_irq_off();
3378	n = get_node(cachep, node);
3379	spin_lock(&n->list_lock);
3380	if (n->shared) {
3381		struct array_cache *shared_array = n->shared;
3382		int max = shared_array->limit - shared_array->avail;
3383		if (max) {
3384			if (batchcount > max)
3385				batchcount = max;
3386			memcpy(&(shared_array->entry[shared_array->avail]),
3387			       ac->entry, sizeof(void *) * batchcount);
3388			shared_array->avail += batchcount;
3389			goto free_done;
3390		}
3391	}
3392
3393	free_block(cachep, ac->entry, batchcount, node, &list);
3394free_done:
3395#if STATS
3396	{
3397		int i = 0;
3398		struct page *page;
 
 
 
 
3399
3400		list_for_each_entry(page, &n->slabs_free, slab_list) {
3401			BUG_ON(page->active);
3402
3403			i++;
 
3404		}
3405		STATS_SET_FREEABLE(cachep, i);
3406	}
3407#endif
3408	spin_unlock(&n->list_lock);
3409	ac->avail -= batchcount;
3410	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3411	slabs_destroy(cachep, &list);
3412}
3413
3414/*
3415 * Release an obj back to its cache. If the obj has a constructed state, it must
3416 * be in this state _before_ it is released.  Called with disabled ints.
3417 */
3418static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3419					 unsigned long caller)
3420{
3421	/* Put the object into the quarantine, don't touch it for now. */
3422	if (kasan_slab_free(cachep, objp, _RET_IP_))
3423		return;
3424
3425	/* Use KCSAN to help debug racy use-after-free. */
3426	if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
3427		__kcsan_check_access(objp, cachep->object_size,
3428				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
3429
3430	___cache_free(cachep, objp, caller);
3431}
3432
3433void ___cache_free(struct kmem_cache *cachep, void *objp,
3434		unsigned long caller)
3435{
3436	struct array_cache *ac = cpu_cache_get(cachep);
3437
3438	check_irq_off();
3439	if (unlikely(slab_want_init_on_free(cachep)))
3440		memset(objp, 0, cachep->object_size);
3441	kmemleak_free_recursive(objp, cachep->flags);
3442	objp = cache_free_debugcheck(cachep, objp, caller);
3443	memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
 
3444
3445	/*
3446	 * Skip calling cache_free_alien() when the platform is not numa.
3447	 * This will avoid cache misses that happen while accessing slabp (which
3448	 * is per page memory  reference) to get nodeid. Instead use a global
3449	 * variable to skip the call, which is mostly likely to be present in
3450	 * the cache.
3451	 */
3452	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3453		return;
3454
3455	if (ac->avail < ac->limit) {
3456		STATS_INC_FREEHIT(cachep);
3457	} else {
3458		STATS_INC_FREEMISS(cachep);
3459		cache_flusharray(cachep, ac);
3460	}
3461
3462	if (sk_memalloc_socks()) {
3463		struct page *page = virt_to_head_page(objp);
3464
3465		if (unlikely(PageSlabPfmemalloc(page))) {
3466			cache_free_pfmemalloc(cachep, page, objp);
3467			return;
3468		}
3469	}
3470
3471	__free_one(ac, objp);
3472}
3473
3474/**
3475 * kmem_cache_alloc - Allocate an object
3476 * @cachep: The cache to allocate from.
3477 * @flags: See kmalloc().
3478 *
3479 * Allocate an object from this cache.  The flags are only relevant
3480 * if the cache has no available objects.
3481 *
3482 * Return: pointer to the new object or %NULL in case of error
3483 */
3484void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3485{
3486	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3487
3488	trace_kmem_cache_alloc(_RET_IP_, ret,
3489			       cachep->object_size, cachep->size, flags);
3490
3491	return ret;
3492}
3493EXPORT_SYMBOL(kmem_cache_alloc);
3494
3495static __always_inline void
3496cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3497				  size_t size, void **p, unsigned long caller)
3498{
3499	size_t i;
3500
3501	for (i = 0; i < size; i++)
3502		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3503}
3504
3505int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3506			  void **p)
3507{
3508	size_t i;
3509	struct obj_cgroup *objcg = NULL;
3510
3511	s = slab_pre_alloc_hook(s, &objcg, size, flags);
3512	if (!s)
3513		return 0;
3514
3515	cache_alloc_debugcheck_before(s, flags);
3516
3517	local_irq_disable();
3518	for (i = 0; i < size; i++) {
3519		void *objp = __do_cache_alloc(s, flags);
3520
3521		if (unlikely(!objp))
3522			goto error;
3523		p[i] = objp;
3524	}
3525	local_irq_enable();
3526
3527	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3528
3529	/* Clear memory outside IRQ disabled section */
3530	if (unlikely(slab_want_init_on_alloc(flags, s)))
3531		for (i = 0; i < size; i++)
3532			memset(p[i], 0, s->object_size);
3533
3534	slab_post_alloc_hook(s, objcg, flags, size, p);
3535	/* FIXME: Trace call missing. Christoph would like a bulk variant */
3536	return size;
3537error:
3538	local_irq_enable();
3539	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3540	slab_post_alloc_hook(s, objcg, flags, i, p);
3541	__kmem_cache_free_bulk(s, i, p);
3542	return 0;
3543}
3544EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3545
3546#ifdef CONFIG_TRACING
3547void *
3548kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3549{
3550	void *ret;
3551
3552	ret = slab_alloc(cachep, flags, _RET_IP_);
3553
3554	ret = kasan_kmalloc(cachep, ret, size, flags);
3555	trace_kmalloc(_RET_IP_, ret,
3556		      size, cachep->size, flags);
3557	return ret;
3558}
3559EXPORT_SYMBOL(kmem_cache_alloc_trace);
3560#endif
3561
3562#ifdef CONFIG_NUMA
3563/**
3564 * kmem_cache_alloc_node - Allocate an object on the specified node
3565 * @cachep: The cache to allocate from.
3566 * @flags: See kmalloc().
3567 * @nodeid: node number of the target node.
3568 *
3569 * Identical to kmem_cache_alloc but it will allocate memory on the given
3570 * node, which can improve the performance for cpu bound structures.
3571 *
3572 * Fallback to other node is possible if __GFP_THISNODE is not set.
3573 *
3574 * Return: pointer to the new object or %NULL in case of error
3575 */
3576void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3577{
3578	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3579
3580	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3581				    cachep->object_size, cachep->size,
3582				    flags, nodeid);
3583
3584	return ret;
3585}
3586EXPORT_SYMBOL(kmem_cache_alloc_node);
3587
3588#ifdef CONFIG_TRACING
3589void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3590				  gfp_t flags,
3591				  int nodeid,
3592				  size_t size)
3593{
3594	void *ret;
3595
3596	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3597
3598	ret = kasan_kmalloc(cachep, ret, size, flags);
3599	trace_kmalloc_node(_RET_IP_, ret,
3600			   size, cachep->size,
3601			   flags, nodeid);
3602	return ret;
3603}
3604EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3605#endif
3606
3607static __always_inline void *
3608__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3609{
3610	struct kmem_cache *cachep;
3611	void *ret;
3612
3613	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3614		return NULL;
3615	cachep = kmalloc_slab(size, flags);
3616	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3617		return cachep;
3618	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3619	ret = kasan_kmalloc(cachep, ret, size, flags);
3620
3621	return ret;
3622}
3623
 
3624void *__kmalloc_node(size_t size, gfp_t flags, int node)
3625{
3626	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3627}
3628EXPORT_SYMBOL(__kmalloc_node);
3629
3630void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3631		int node, unsigned long caller)
3632{
3633	return __do_kmalloc_node(size, flags, node, caller);
3634}
3635EXPORT_SYMBOL(__kmalloc_node_track_caller);
 
 
 
 
 
 
 
3636#endif /* CONFIG_NUMA */
3637
3638/**
3639 * __do_kmalloc - allocate memory
3640 * @size: how many bytes of memory are required.
3641 * @flags: the type of memory to allocate (see kmalloc).
3642 * @caller: function caller for debug tracking of the caller
3643 *
3644 * Return: pointer to the allocated memory or %NULL in case of error
3645 */
3646static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3647					  unsigned long caller)
3648{
3649	struct kmem_cache *cachep;
3650	void *ret;
3651
3652	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3653		return NULL;
3654	cachep = kmalloc_slab(size, flags);
3655	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3656		return cachep;
3657	ret = slab_alloc(cachep, flags, caller);
3658
3659	ret = kasan_kmalloc(cachep, ret, size, flags);
3660	trace_kmalloc(caller, ret,
3661		      size, cachep->size, flags);
3662
3663	return ret;
3664}
3665
 
 
3666void *__kmalloc(size_t size, gfp_t flags)
3667{
3668	return __do_kmalloc(size, flags, _RET_IP_);
3669}
3670EXPORT_SYMBOL(__kmalloc);
3671
3672void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3673{
3674	return __do_kmalloc(size, flags, caller);
3675}
3676EXPORT_SYMBOL(__kmalloc_track_caller);
3677
 
 
 
 
 
 
 
 
3678/**
3679 * kmem_cache_free - Deallocate an object
3680 * @cachep: The cache the allocation was from.
3681 * @objp: The previously allocated object.
3682 *
3683 * Free an object which was previously allocated from this
3684 * cache.
3685 */
3686void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3687{
3688	unsigned long flags;
3689	cachep = cache_from_obj(cachep, objp);
3690	if (!cachep)
3691		return;
3692
3693	local_irq_save(flags);
3694	debug_check_no_locks_freed(objp, cachep->object_size);
3695	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3696		debug_check_no_obj_freed(objp, cachep->object_size);
3697	__cache_free(cachep, objp, _RET_IP_);
3698	local_irq_restore(flags);
3699
3700	trace_kmem_cache_free(_RET_IP_, objp);
3701}
3702EXPORT_SYMBOL(kmem_cache_free);
3703
3704void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3705{
3706	struct kmem_cache *s;
3707	size_t i;
3708
3709	local_irq_disable();
3710	for (i = 0; i < size; i++) {
3711		void *objp = p[i];
3712
3713		if (!orig_s) /* called via kfree_bulk */
3714			s = virt_to_cache(objp);
3715		else
3716			s = cache_from_obj(orig_s, objp);
3717		if (!s)
3718			continue;
3719
3720		debug_check_no_locks_freed(objp, s->object_size);
3721		if (!(s->flags & SLAB_DEBUG_OBJECTS))
3722			debug_check_no_obj_freed(objp, s->object_size);
3723
3724		__cache_free(s, objp, _RET_IP_);
3725	}
3726	local_irq_enable();
3727
3728	/* FIXME: add tracing */
3729}
3730EXPORT_SYMBOL(kmem_cache_free_bulk);
3731
3732/**
3733 * kfree - free previously allocated memory
3734 * @objp: pointer returned by kmalloc.
3735 *
3736 * If @objp is NULL, no operation is performed.
3737 *
3738 * Don't free memory not originally allocated by kmalloc()
3739 * or you will run into trouble.
3740 */
3741void kfree(const void *objp)
3742{
3743	struct kmem_cache *c;
3744	unsigned long flags;
3745
3746	trace_kfree(_RET_IP_, objp);
3747
3748	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3749		return;
3750	local_irq_save(flags);
3751	kfree_debugcheck(objp);
3752	c = virt_to_cache(objp);
3753	if (!c) {
3754		local_irq_restore(flags);
3755		return;
3756	}
3757	debug_check_no_locks_freed(objp, c->object_size);
3758
3759	debug_check_no_obj_freed(objp, c->object_size);
3760	__cache_free(c, (void *)objp, _RET_IP_);
3761	local_irq_restore(flags);
3762}
3763EXPORT_SYMBOL(kfree);
3764
3765/*
3766 * This initializes kmem_cache_node or resizes various caches for all nodes.
3767 */
3768static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3769{
3770	int ret;
3771	int node;
3772	struct kmem_cache_node *n;
 
 
3773
3774	for_each_online_node(node) {
3775		ret = setup_kmem_cache_node(cachep, node, gfp, true);
3776		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3777			goto fail;
 
3778
 
 
 
 
 
 
 
 
3779	}
3780
3781	return 0;
3782
3783fail:
3784	if (!cachep->list.next) {
3785		/* Cache is not active yet. Roll back what we did */
3786		node--;
3787		while (node >= 0) {
3788			n = get_node(cachep, node);
3789			if (n) {
 
3790				kfree(n->shared);
3791				free_alien_cache(n->alien);
3792				kfree(n);
3793				cachep->node[node] = NULL;
3794			}
3795			node--;
3796		}
3797	}
3798	return -ENOMEM;
3799}
3800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3801/* Always called with the slab_mutex held */
3802static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3803			    int batchcount, int shared, gfp_t gfp)
3804{
3805	struct array_cache __percpu *cpu_cache, *prev;
3806	int cpu;
3807
3808	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3809	if (!cpu_cache)
 
3810		return -ENOMEM;
3811
3812	prev = cachep->cpu_cache;
3813	cachep->cpu_cache = cpu_cache;
3814	/*
3815	 * Without a previous cpu_cache there's no need to synchronize remote
3816	 * cpus, so skip the IPIs.
3817	 */
3818	if (prev)
3819		kick_all_cpus_sync();
 
 
 
 
 
3820
3821	check_irq_on();
3822	cachep->batchcount = batchcount;
3823	cachep->limit = limit;
3824	cachep->shared = shared;
3825
3826	if (!prev)
3827		goto setup_node;
 
 
 
 
 
 
 
 
 
 
3828
3829	for_each_online_cpu(cpu) {
3830		LIST_HEAD(list);
3831		int node;
3832		struct kmem_cache_node *n;
3833		struct array_cache *ac = per_cpu_ptr(prev, cpu);
 
3834
3835		node = cpu_to_mem(cpu);
3836		n = get_node(cachep, node);
3837		spin_lock_irq(&n->list_lock);
3838		free_block(cachep, ac->entry, ac->avail, node, &list);
3839		spin_unlock_irq(&n->list_lock);
3840		slabs_destroy(cachep, &list);
 
 
 
 
 
 
 
 
3841	}
3842	free_percpu(prev);
3843
3844setup_node:
3845	return setup_kmem_cache_nodes(cachep, gfp);
3846}
3847
3848/* Called with slab_mutex held always */
3849static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3850{
3851	int err;
3852	int limit = 0;
3853	int shared = 0;
3854	int batchcount = 0;
3855
3856	err = cache_random_seq_create(cachep, cachep->num, gfp);
3857	if (err)
3858		goto end;
 
 
 
3859
3860	if (limit && shared && batchcount)
3861		goto skip_setup;
3862	/*
3863	 * The head array serves three purposes:
3864	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3865	 * - reduce the number of spinlock operations.
3866	 * - reduce the number of linked list operations on the slab and
3867	 *   bufctl chains: array operations are cheaper.
3868	 * The numbers are guessed, we should auto-tune as described by
3869	 * Bonwick.
3870	 */
3871	if (cachep->size > 131072)
3872		limit = 1;
3873	else if (cachep->size > PAGE_SIZE)
3874		limit = 8;
3875	else if (cachep->size > 1024)
3876		limit = 24;
3877	else if (cachep->size > 256)
3878		limit = 54;
3879	else
3880		limit = 120;
3881
3882	/*
3883	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3884	 * allocation behaviour: Most allocs on one cpu, most free operations
3885	 * on another cpu. For these cases, an efficient object passing between
3886	 * cpus is necessary. This is provided by a shared array. The array
3887	 * replaces Bonwick's magazine layer.
3888	 * On uniprocessor, it's functionally equivalent (but less efficient)
3889	 * to a larger limit. Thus disabled by default.
3890	 */
3891	shared = 0;
3892	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3893		shared = 8;
3894
3895#if DEBUG
3896	/*
3897	 * With debugging enabled, large batchcount lead to excessively long
3898	 * periods with disabled local interrupts. Limit the batchcount
3899	 */
3900	if (limit > 32)
3901		limit = 32;
3902#endif
3903	batchcount = (limit + 1) / 2;
3904skip_setup:
3905	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3906end:
3907	if (err)
3908		pr_err("enable_cpucache failed for %s, error %d\n",
3909		       cachep->name, -err);
3910	return err;
3911}
3912
3913/*
3914 * Drain an array if it contains any elements taking the node lock only if
3915 * necessary. Note that the node listlock also protects the array_cache
3916 * if drain_array() is used on the shared array.
3917 */
3918static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3919			 struct array_cache *ac, int node)
3920{
3921	LIST_HEAD(list);
3922
3923	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
3924	check_mutex_acquired();
3925
3926	if (!ac || !ac->avail)
3927		return;
3928
3929	if (ac->touched) {
3930		ac->touched = 0;
3931		return;
 
 
 
 
 
 
 
 
 
 
 
3932	}
3933
3934	spin_lock_irq(&n->list_lock);
3935	drain_array_locked(cachep, ac, node, false, &list);
3936	spin_unlock_irq(&n->list_lock);
3937
3938	slabs_destroy(cachep, &list);
3939}
3940
3941/**
3942 * cache_reap - Reclaim memory from caches.
3943 * @w: work descriptor
3944 *
3945 * Called from workqueue/eventd every few seconds.
3946 * Purpose:
3947 * - clear the per-cpu caches for this CPU.
3948 * - return freeable pages to the main free memory pool.
3949 *
3950 * If we cannot acquire the cache chain mutex then just give up - we'll try
3951 * again on the next iteration.
3952 */
3953static void cache_reap(struct work_struct *w)
3954{
3955	struct kmem_cache *searchp;
3956	struct kmem_cache_node *n;
3957	int node = numa_mem_id();
3958	struct delayed_work *work = to_delayed_work(w);
3959
3960	if (!mutex_trylock(&slab_mutex))
3961		/* Give up. Setup the next iteration. */
3962		goto out;
3963
3964	list_for_each_entry(searchp, &slab_caches, list) {
3965		check_irq_on();
3966
3967		/*
3968		 * We only take the node lock if absolutely necessary and we
3969		 * have established with reasonable certainty that
3970		 * we can do some work if the lock was obtained.
3971		 */
3972		n = get_node(searchp, node);
3973
3974		reap_alien(searchp, n);
3975
3976		drain_array(searchp, n, cpu_cache_get(searchp), node);
3977
3978		/*
3979		 * These are racy checks but it does not matter
3980		 * if we skip one check or scan twice.
3981		 */
3982		if (time_after(n->next_reap, jiffies))
3983			goto next;
3984
3985		n->next_reap = jiffies + REAPTIMEOUT_NODE;
3986
3987		drain_array(searchp, n, n->shared, node);
3988
3989		if (n->free_touched)
3990			n->free_touched = 0;
3991		else {
3992			int freed;
3993
3994			freed = drain_freelist(searchp, n, (n->free_limit +
3995				5 * searchp->num - 1) / (5 * searchp->num));
3996			STATS_ADD_REAPED(searchp, freed);
3997		}
3998next:
3999		cond_resched();
4000	}
4001	check_irq_on();
4002	mutex_unlock(&slab_mutex);
4003	next_reap_node();
4004out:
4005	/* Set up the next iteration */
4006	schedule_delayed_work_on(smp_processor_id(), work,
4007				round_jiffies_relative(REAPTIMEOUT_AC));
4008}
4009
 
4010void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4011{
4012	unsigned long active_objs, num_objs, active_slabs;
4013	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4014	unsigned long free_slabs = 0;
 
 
 
 
4015	int node;
4016	struct kmem_cache_node *n;
4017
4018	for_each_kmem_cache_node(cachep, node, n) {
 
 
 
 
 
 
4019		check_irq_on();
4020		spin_lock_irq(&n->list_lock);
4021
4022		total_slabs += n->total_slabs;
4023		free_slabs += n->free_slabs;
4024		free_objs += n->free_objects;
4025
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4026		if (n->shared)
4027			shared_avail += n->shared->avail;
4028
4029		spin_unlock_irq(&n->list_lock);
4030	}
4031	num_objs = total_slabs * cachep->num;
4032	active_slabs = total_slabs - free_slabs;
4033	active_objs = num_objs - free_objs;
 
 
 
 
 
4034
4035	sinfo->active_objs = active_objs;
4036	sinfo->num_objs = num_objs;
4037	sinfo->active_slabs = active_slabs;
4038	sinfo->num_slabs = total_slabs;
4039	sinfo->shared_avail = shared_avail;
4040	sinfo->limit = cachep->limit;
4041	sinfo->batchcount = cachep->batchcount;
4042	sinfo->shared = cachep->shared;
4043	sinfo->objects_per_slab = cachep->num;
4044	sinfo->cache_order = cachep->gfporder;
4045}
4046
4047void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4048{
4049#if STATS
4050	{			/* node stats */
4051		unsigned long high = cachep->high_mark;
4052		unsigned long allocs = cachep->num_allocations;
4053		unsigned long grown = cachep->grown;
4054		unsigned long reaped = cachep->reaped;
4055		unsigned long errors = cachep->errors;
4056		unsigned long max_freeable = cachep->max_freeable;
4057		unsigned long node_allocs = cachep->node_allocs;
4058		unsigned long node_frees = cachep->node_frees;
4059		unsigned long overflows = cachep->node_overflow;
4060
4061		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
 
4062			   allocs, high, grown,
4063			   reaped, errors, max_freeable, node_allocs,
4064			   node_frees, overflows);
4065	}
4066	/* cpu stats */
4067	{
4068		unsigned long allochit = atomic_read(&cachep->allochit);
4069		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4070		unsigned long freehit = atomic_read(&cachep->freehit);
4071		unsigned long freemiss = atomic_read(&cachep->freemiss);
4072
4073		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4074			   allochit, allocmiss, freehit, freemiss);
4075	}
4076#endif
4077}
4078
4079#define MAX_SLABINFO_WRITE 128
4080/**
4081 * slabinfo_write - Tuning for the slab allocator
4082 * @file: unused
4083 * @buffer: user buffer
4084 * @count: data length
4085 * @ppos: unused
4086 *
4087 * Return: %0 on success, negative error code otherwise.
4088 */
4089ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4090		       size_t count, loff_t *ppos)
4091{
4092	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4093	int limit, batchcount, shared, res;
4094	struct kmem_cache *cachep;
4095
4096	if (count > MAX_SLABINFO_WRITE)
4097		return -EINVAL;
4098	if (copy_from_user(&kbuf, buffer, count))
4099		return -EFAULT;
4100	kbuf[MAX_SLABINFO_WRITE] = '\0';
4101
4102	tmp = strchr(kbuf, ' ');
4103	if (!tmp)
4104		return -EINVAL;
4105	*tmp = '\0';
4106	tmp++;
4107	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4108		return -EINVAL;
4109
4110	/* Find the cache in the chain of caches. */
4111	mutex_lock(&slab_mutex);
4112	res = -EINVAL;
4113	list_for_each_entry(cachep, &slab_caches, list) {
4114		if (!strcmp(cachep->name, kbuf)) {
4115			if (limit < 1 || batchcount < 1 ||
4116					batchcount > limit || shared < 0) {
4117				res = 0;
4118			} else {
4119				res = do_tune_cpucache(cachep, limit,
4120						       batchcount, shared,
4121						       GFP_KERNEL);
4122			}
4123			break;
4124		}
4125	}
4126	mutex_unlock(&slab_mutex);
4127	if (res >= 0)
4128		res = count;
4129	return res;
4130}
4131
4132#ifdef CONFIG_HARDENED_USERCOPY
4133/*
4134 * Rejects incorrectly sized objects and objects that are to be copied
4135 * to/from userspace but do not fall entirely within the containing slab
4136 * cache's usercopy region.
4137 *
4138 * Returns NULL if check passes, otherwise const char * to name of cache
4139 * to indicate an error.
4140 */
4141void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4142			 bool to_user)
4143{
4144	struct kmem_cache *cachep;
4145	unsigned int objnr;
4146	unsigned long offset;
4147
4148	ptr = kasan_reset_tag(ptr);
 
 
 
 
4149
4150	/* Find and validate object. */
4151	cachep = page->slab_cache;
4152	objnr = obj_to_index(cachep, page, (void *)ptr);
4153	BUG_ON(objnr >= cachep->num);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4154
4155	/* Find offset within object. */
4156	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
 
 
 
4157
4158	/* Allow address range falling entirely within usercopy region. */
4159	if (offset >= cachep->useroffset &&
4160	    offset - cachep->useroffset <= cachep->usersize &&
4161	    n <= cachep->useroffset - offset + cachep->usersize)
4162		return;
 
 
4163
4164	/*
4165	 * If the copy is still within the allocated object, produce
4166	 * a warning instead of rejecting the copy. This is intended
4167	 * to be a temporary method to find any missing usercopy
4168	 * whitelists.
4169	 */
4170	if (usercopy_fallback &&
4171	    offset <= cachep->object_size &&
4172	    n <= cachep->object_size - offset) {
4173		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4174		return;
4175	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4176
4177	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4178}
4179#endif /* CONFIG_HARDENED_USERCOPY */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4180
4181/**
4182 * __ksize -- Uninstrumented ksize.
4183 * @objp: pointer to the object
4184 *
4185 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
4186 * safety checks as ksize() with KASAN instrumentation enabled.
4187 *
4188 * Return: size of the actual memory used by @objp in bytes
 
 
 
 
 
 
4189 */
4190size_t __ksize(const void *objp)
4191{
4192	struct kmem_cache *c;
4193	size_t size;
4194
4195	BUG_ON(!objp);
4196	if (unlikely(objp == ZERO_SIZE_PTR))
4197		return 0;
4198
4199	c = virt_to_cache(objp);
4200	size = c ? c->object_size : 0;
4201
4202	return size;
4203}
4204EXPORT_SYMBOL(__ksize);
v3.15
 
   1/*
   2 * linux/mm/slab.c
   3 * Written by Mark Hemment, 1996/97.
   4 * (markhe@nextd.demon.co.uk)
   5 *
   6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   7 *
   8 * Major cleanup, different bufctl logic, per-cpu arrays
   9 *	(c) 2000 Manfred Spraul
  10 *
  11 * Cleanup, make the head arrays unconditional, preparation for NUMA
  12 * 	(c) 2002 Manfred Spraul
  13 *
  14 * An implementation of the Slab Allocator as described in outline in;
  15 *	UNIX Internals: The New Frontiers by Uresh Vahalia
  16 *	Pub: Prentice Hall	ISBN 0-13-101908-2
  17 * or with a little more detail in;
  18 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
  19 *	Jeff Bonwick (Sun Microsystems).
  20 *	Presented at: USENIX Summer 1994 Technical Conference
  21 *
  22 * The memory is organized in caches, one cache for each object type.
  23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  24 * Each cache consists out of many slabs (they are small (usually one
  25 * page long) and always contiguous), and each slab contains multiple
  26 * initialized objects.
  27 *
  28 * This means, that your constructor is used only for newly allocated
  29 * slabs and you must pass objects with the same initializations to
  30 * kmem_cache_free.
  31 *
  32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  33 * normal). If you need a special memory type, then must create a new
  34 * cache for that memory type.
  35 *
  36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  37 *   full slabs with 0 free objects
  38 *   partial slabs
  39 *   empty slabs with no allocated objects
  40 *
  41 * If partial slabs exist, then new allocations come from these slabs,
  42 * otherwise from empty slabs or new slabs are allocated.
  43 *
  44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  46 *
  47 * Each cache has a short per-cpu head array, most allocs
  48 * and frees go into that array, and if that array overflows, then 1/2
  49 * of the entries in the array are given back into the global cache.
  50 * The head array is strictly LIFO and should improve the cache hit rates.
  51 * On SMP, it additionally reduces the spinlock operations.
  52 *
  53 * The c_cpuarray may not be read with enabled local interrupts -
  54 * it's changed with a smp_call_function().
  55 *
  56 * SMP synchronization:
  57 *  constructors and destructors are called without any locking.
  58 *  Several members in struct kmem_cache and struct slab never change, they
  59 *	are accessed without any locking.
  60 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  61 *  	and local interrupts are disabled so slab code is preempt-safe.
  62 *  The non-constant members are protected with a per-cache irq spinlock.
  63 *
  64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  65 * in 2000 - many ideas in the current implementation are derived from
  66 * his patch.
  67 *
  68 * Further notes from the original documentation:
  69 *
  70 * 11 April '97.  Started multi-threading - markhe
  71 *	The global cache-chain is protected by the mutex 'slab_mutex'.
  72 *	The sem is only needed when accessing/extending the cache-chain, which
  73 *	can never happen inside an interrupt (kmem_cache_create(),
  74 *	kmem_cache_shrink() and kmem_cache_reap()).
  75 *
  76 *	At present, each engine can be growing a cache.  This should be blocked.
  77 *
  78 * 15 March 2005. NUMA slab allocator.
  79 *	Shai Fultheim <shai@scalex86.org>.
  80 *	Shobhit Dayal <shobhit@calsoftinc.com>
  81 *	Alok N Kataria <alokk@calsoftinc.com>
  82 *	Christoph Lameter <christoph@lameter.com>
  83 *
  84 *	Modified the slab allocator to be node aware on NUMA systems.
  85 *	Each node has its own list of partial, free and full slabs.
  86 *	All object allocations for a node occur from node specific slab lists.
  87 */
  88
  89#include	<linux/__KEEPIDENTS__B.h>
  90#include	<linux/__KEEPIDENTS__C.h>
  91#include	<linux/__KEEPIDENTS__D.h>
  92#include	<linux/__KEEPIDENTS__E.h>
  93#include	<linux/__KEEPIDENTS__F.h>
  94#include	<linux/__KEEPIDENTS__G.h>
  95#include	<linux/__KEEPIDENTS__H.h>
  96#include	<linux/__KEEPIDENTS__I.h>
  97#include	<linux/__KEEPIDENTS__J.h>
  98#include	<linux/proc_fs.h>
  99#include	<linux/__KEEPIDENTS__BA.h>
 100#include	<linux/__KEEPIDENTS__BB.h>
 101#include	<linux/__KEEPIDENTS__BC.h>
 102#include	<linux/cpu.h>
 103#include	<linux/__KEEPIDENTS__BD.h>
 104#include	<linux/__KEEPIDENTS__BE.h>
 105#include	<linux/rcupdate.h>
 106#include	<linux/__KEEPIDENTS__BF.h>
 107#include	<linux/__KEEPIDENTS__BG.h>
 108#include	<linux/__KEEPIDENTS__BH.h>
 109#include	<linux/kmemleak.h>
 110#include	<linux/__KEEPIDENTS__BI.h>
 111#include	<linux/__KEEPIDENTS__BJ.h>
 112#include	<linux/__KEEPIDENTS__CA-__KEEPIDENTS__CB.h>
 113#include	<linux/__KEEPIDENTS__CC.h>
 114#include	<linux/reciprocal_div.h>
 115#include	<linux/debugobjects.h>
 116#include	<linux/kmemcheck.h>
 117#include	<linux/__KEEPIDENTS__CD.h>
 118#include	<linux/__KEEPIDENTS__CE.h>
 
 119
 120#include	<net/__KEEPIDENTS__CF.h>
 121
 122#include	<asm/cacheflush.h>
 123#include	<asm/tlbflush.h>
 124#include	<asm/page.h>
 125
 126#include <trace/events/kmem.h>
 127
 128#include	"internal.h"
 129
 130#include	"slab.h"
 131
 132/*
 133 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 134 *		  0 for faster, smaller code (especially in the critical paths).
 135 *
 136 * STATS	- 1 to collect stats for /proc/slabinfo.
 137 *		  0 for faster, smaller code (especially in the critical paths).
 138 *
 139 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 140 */
 141
 142#ifdef CONFIG_DEBUG_SLAB
 143#define	DEBUG		1
 144#define	STATS		1
 145#define	FORCED_DEBUG	1
 146#else
 147#define	DEBUG		0
 148#define	STATS		0
 149#define	FORCED_DEBUG	0
 150#endif
 151
 152/* Shouldn't this be in a header file somewhere? */
 153#define	BYTES_PER_WORD		sizeof(void *)
 154#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
 155
 156#ifndef ARCH_KMALLOC_FLAGS
 157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 158#endif
 159
 160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
 161				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
 162
 163#if FREELIST_BYTE_INDEX
 164typedef unsigned char freelist_idx_t;
 165#else
 166typedef unsigned short freelist_idx_t;
 167#endif
 168
 169#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 170
 171/*
 172 * true if a page was allocated from pfmemalloc reserves for network-based
 173 * swap
 174 */
 175static bool pfmemalloc_active __read_mostly;
 176
 177/*
 178 * struct array_cache
 179 *
 180 * Purpose:
 181 * - LIFO ordering, to hand out cache-warm objects from _alloc
 182 * - reduce the number of linked list operations
 183 * - reduce spinlock operations
 184 *
 185 * The limit is stored in the per-cpu structure to reduce the data cache
 186 * footprint.
 187 *
 188 */
 189struct array_cache {
 190	unsigned int avail;
 191	unsigned int limit;
 192	unsigned int batchcount;
 193	unsigned int touched;
 194	spinlock_t lock;
 195	void *entry[];	/*
 196			 * Must have this definition in here for the proper
 197			 * alignment of array_cache. Also simplifies accessing
 198			 * the entries.
 199			 *
 200			 * Entries should not be directly dereferenced as
 201			 * entries belonging to slabs marked pfmemalloc will
 202			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
 203			 */
 204};
 205
 206#define SLAB_OBJ_PFMEMALLOC	1
 207static inline bool is_obj_pfmemalloc(void *objp)
 208{
 209	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
 210}
 211
 212static inline void set_obj_pfmemalloc(void **objp)
 213{
 214	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
 215	return;
 216}
 217
 218static inline void clear_obj_pfmemalloc(void **objp)
 219{
 220	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
 221}
 222
 223/*
 224 * bootstrap: The caches do not work without cpuarrays anymore, but the
 225 * cpuarrays are allocated from the generic caches...
 226 */
 227#define BOOT_CPUCACHE_ENTRIES	1
 228struct arraycache_init {
 229	struct array_cache cache;
 230	void *entries[BOOT_CPUCACHE_ENTRIES];
 231};
 232
 233/*
 234 * Need this for bootstrapping a per node allocator.
 235 */
 236#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
 237static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
 238#define	CACHE_CACHE 0
 239#define	SIZE_AC MAX_NUMNODES
 240#define	SIZE_NODE (2 * MAX_NUMNODES)
 241
 242static int drain_freelist(struct kmem_cache *cache,
 243			struct kmem_cache_node *n, int tofree);
 244static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 245			int node);
 
 246static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 247static void cache_reap(struct work_struct *unused);
 248
 
 
 
 
 
 249static int slab_early_init = 1;
 250
 251#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
 252#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
 253
 254static void kmem_cache_node_init(struct kmem_cache_node *parent)
 255{
 256	INIT_LIST_HEAD(&parent->slabs_full);
 257	INIT_LIST_HEAD(&parent->slabs_partial);
 258	INIT_LIST_HEAD(&parent->slabs_free);
 
 
 259	parent->shared = NULL;
 260	parent->alien = NULL;
 261	parent->colour_next = 0;
 262	spin_lock_init(&parent->list_lock);
 263	parent->free_objects = 0;
 264	parent->free_touched = 0;
 265}
 266
 267#define MAKE_LIST(cachep, listp, slab, nodeid)				\
 268	do {								\
 269		INIT_LIST_HEAD(listp);					\
 270		list_splice(&(cachep->node[nodeid]->slab), listp);	\
 271	} while (0)
 272
 273#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
 274	do {								\
 275	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
 276	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 277	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
 278	} while (0)
 279
 280#define CFLGS_OFF_SLAB		(0x80000000UL)
 
 
 281#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
 282
 283#define BATCHREFILL_LIMIT	16
 284/*
 285 * Optimization question: fewer reaps means less probability for unnessary
 286 * cpucache drain/refill cycles.
 287 *
 288 * OTOH the cpuarrays can contain lots of objects,
 289 * which could lock up otherwise freeable slabs.
 290 */
 291#define REAPTIMEOUT_AC		(2*HZ)
 292#define REAPTIMEOUT_NODE	(4*HZ)
 293
 294#if STATS
 295#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
 296#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
 297#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
 298#define	STATS_INC_GROWN(x)	((x)->grown++)
 299#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
 300#define	STATS_SET_HIGH(x)						\
 301	do {								\
 302		if ((x)->num_active > (x)->high_mark)			\
 303			(x)->high_mark = (x)->num_active;		\
 304	} while (0)
 305#define	STATS_INC_ERR(x)	((x)->errors++)
 306#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
 307#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
 308#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 309#define	STATS_SET_FREEABLE(x, i)					\
 310	do {								\
 311		if ((x)->max_freeable < i)				\
 312			(x)->max_freeable = i;				\
 313	} while (0)
 314#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
 315#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
 316#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
 317#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
 318#else
 319#define	STATS_INC_ACTIVE(x)	do { } while (0)
 320#define	STATS_DEC_ACTIVE(x)	do { } while (0)
 321#define	STATS_INC_ALLOCED(x)	do { } while (0)
 322#define	STATS_INC_GROWN(x)	do { } while (0)
 323#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
 324#define	STATS_SET_HIGH(x)	do { } while (0)
 325#define	STATS_INC_ERR(x)	do { } while (0)
 326#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
 327#define	STATS_INC_NODEFREES(x)	do { } while (0)
 328#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 329#define	STATS_SET_FREEABLE(x, i) do { } while (0)
 330#define STATS_INC_ALLOCHIT(x)	do { } while (0)
 331#define STATS_INC_ALLOCMISS(x)	do { } while (0)
 332#define STATS_INC_FREEHIT(x)	do { } while (0)
 333#define STATS_INC_FREEMISS(x)	do { } while (0)
 334#endif
 335
 336#if DEBUG
 337
 338/*
 339 * memory layout of objects:
 340 * 0		: objp
 341 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 342 * 		the end of an object is aligned with the end of the real
 343 * 		allocation. Catches writes behind the end of the allocation.
 344 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 345 * 		redzone word.
 346 * cachep->obj_offset: The real object.
 347 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 348 * cachep->size - 1* BYTES_PER_WORD: last caller address
 349 *					[BYTES_PER_WORD long]
 350 */
 351static int obj_offset(struct kmem_cache *cachep)
 352{
 353	return cachep->obj_offset;
 354}
 355
 356static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 357{
 358	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 359	return (unsigned long long*) (objp + obj_offset(cachep) -
 360				      sizeof(unsigned long long));
 361}
 362
 363static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 364{
 365	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 366	if (cachep->flags & SLAB_STORE_USER)
 367		return (unsigned long long *)(objp + cachep->size -
 368					      sizeof(unsigned long long) -
 369					      REDZONE_ALIGN);
 370	return (unsigned long long *) (objp + cachep->size -
 371				       sizeof(unsigned long long));
 372}
 373
 374static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 375{
 376	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 377	return (void **)(objp + cachep->size - BYTES_PER_WORD);
 378}
 379
 380#else
 381
 382#define obj_offset(x)			0
 383#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 384#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 385#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
 386
 387#endif
 388
 389/*
 390 * Do not go above this order unless 0 objects fit into the slab or
 391 * overridden on the command line.
 392 */
 393#define	SLAB_MAX_ORDER_HI	1
 394#define	SLAB_MAX_ORDER_LO	0
 395static int slab_max_order = SLAB_MAX_ORDER_LO;
 396static bool slab_max_order_set __initdata;
 397
 398static inline struct kmem_cache *virt_to_cache(const void *obj)
 399{
 400	struct page *page = virt_to_head_page(obj);
 401	return page->slab_cache;
 402}
 403
 404static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 405				 unsigned int idx)
 406{
 407	return page->s_mem + cache->size * idx;
 408}
 409
 410/*
 411 * We want to avoid an expensive divide : (offset / cache->size)
 412 *   Using the fact that size is a constant for a particular cache,
 413 *   we can replace (offset / cache->size) by
 414 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 415 */
 416static inline unsigned int obj_to_index(const struct kmem_cache *cache,
 417					const struct page *page, void *obj)
 418{
 419	u32 offset = (obj - page->s_mem);
 420	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
 421}
 422
 423static struct arraycache_init initarray_generic =
 424    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 425
 426/* internal cache of cache description objs */
 427static struct kmem_cache kmem_cache_boot = {
 428	.batchcount = 1,
 429	.limit = BOOT_CPUCACHE_ENTRIES,
 430	.shared = 1,
 431	.size = sizeof(struct kmem_cache),
 432	.name = "kmem_cache",
 433};
 434
 435#define BAD_ALIEN_MAGIC 0x01020304ul
 436
 437#ifdef CONFIG_LOCKDEP
 438
 439/*
 440 * Slab sometimes uses the kmalloc slabs to store the slab headers
 441 * for other slabs "off slab".
 442 * The locking for this is tricky in that it nests within the locks
 443 * of all other slabs in a few places; to deal with this special
 444 * locking we put on-slab caches into a separate lock-class.
 445 *
 446 * We set lock class for alien array caches which are up during init.
 447 * The lock annotation will be lost if all cpus of a node goes down and
 448 * then comes back up during hotplug
 449 */
 450static struct lock_class_key on_slab_l3_key;
 451static struct lock_class_key on_slab_alc_key;
 452
 453static struct lock_class_key debugobj_l3_key;
 454static struct lock_class_key debugobj_alc_key;
 455
 456static void slab_set_lock_classes(struct kmem_cache *cachep,
 457		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
 458		int q)
 459{
 460	struct array_cache **alc;
 461	struct kmem_cache_node *n;
 462	int r;
 463
 464	n = cachep->node[q];
 465	if (!n)
 466		return;
 467
 468	lockdep_set_class(&n->list_lock, l3_key);
 469	alc = n->alien;
 470	/*
 471	 * FIXME: This check for BAD_ALIEN_MAGIC
 472	 * should go away when common slab code is taught to
 473	 * work even without alien caches.
 474	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
 475	 * for alloc_alien_cache,
 476	 */
 477	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
 478		return;
 479	for_each_node(r) {
 480		if (alc[r])
 481			lockdep_set_class(&alc[r]->lock, alc_key);
 482	}
 483}
 484
 485static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 486{
 487	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
 488}
 489
 490static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
 491{
 492	int node;
 493
 494	for_each_online_node(node)
 495		slab_set_debugobj_lock_classes_node(cachep, node);
 496}
 497
 498static void init_node_lock_keys(int q)
 499{
 500	int i;
 501
 502	if (slab_state < UP)
 503		return;
 504
 505	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
 506		struct kmem_cache_node *n;
 507		struct kmem_cache *cache = kmalloc_caches[i];
 508
 509		if (!cache)
 510			continue;
 511
 512		n = cache->node[q];
 513		if (!n || OFF_SLAB(cache))
 514			continue;
 515
 516		slab_set_lock_classes(cache, &on_slab_l3_key,
 517				&on_slab_alc_key, q);
 518	}
 519}
 520
 521static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
 522{
 523	if (!cachep->node[q])
 524		return;
 525
 526	slab_set_lock_classes(cachep, &on_slab_l3_key,
 527			&on_slab_alc_key, q);
 528}
 529
 530static inline void on_slab_lock_classes(struct kmem_cache *cachep)
 531{
 532	int node;
 533
 534	VM_BUG_ON(OFF_SLAB(cachep));
 535	for_each_node(node)
 536		on_slab_lock_classes_node(cachep, node);
 537}
 538
 539static inline void init_lock_keys(void)
 540{
 541	int node;
 542
 543	for_each_node(node)
 544		init_node_lock_keys(node);
 545}
 546#else
 547static void init_node_lock_keys(int q)
 548{
 549}
 550
 551static inline void init_lock_keys(void)
 552{
 553}
 554
 555static inline void on_slab_lock_classes(struct kmem_cache *cachep)
 556{
 557}
 558
 559static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
 560{
 561}
 562
 563static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 564{
 565}
 566
 567static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
 568{
 569}
 570#endif
 571
 572static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 573
 574static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 575{
 576	return cachep->array[smp_processor_id()];
 577}
 578
 579static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
 580				size_t idx_size, size_t align)
 581{
 582	int nr_objs;
 583	size_t freelist_size;
 584
 585	/*
 586	 * Ignore padding for the initial guess. The padding
 587	 * is at most @align-1 bytes, and @buffer_size is at
 588	 * least @align. In the worst case, this result will
 589	 * be one greater than the number of objects that fit
 590	 * into the memory allocation when taking the padding
 591	 * into account.
 592	 */
 593	nr_objs = slab_size / (buffer_size + idx_size);
 594
 595	/*
 596	 * This calculated number will be either the right
 597	 * amount, or one greater than what we want.
 598	 */
 599	freelist_size = slab_size - nr_objs * buffer_size;
 600	if (freelist_size < ALIGN(nr_objs * idx_size, align))
 601		nr_objs--;
 602
 603	return nr_objs;
 604}
 605
 606/*
 607 * Calculate the number of objects and left-over bytes for a given buffer size.
 608 */
 609static void cache_estimate(unsigned long gfporder, size_t buffer_size,
 610			   size_t align, int flags, size_t *left_over,
 611			   unsigned int *num)
 612{
 613	int nr_objs;
 614	size_t mgmt_size;
 615	size_t slab_size = PAGE_SIZE << gfporder;
 616
 617	/*
 618	 * The slab management structure can be either off the slab or
 619	 * on it. For the latter case, the memory allocated for a
 620	 * slab is used for:
 621	 *
 622	 * - One unsigned int for each object
 623	 * - Padding to respect alignment of @align
 624	 * - @buffer_size bytes for each object
 
 
 
 
 
 625	 *
 626	 * If the slab management structure is off the slab, then the
 627	 * alignment will already be calculated into the size. Because
 628	 * the slabs are all pages aligned, the objects will be at the
 629	 * correct alignment when allocated.
 630	 */
 631	if (flags & CFLGS_OFF_SLAB) {
 632		mgmt_size = 0;
 633		nr_objs = slab_size / buffer_size;
 634
 635	} else {
 636		nr_objs = calculate_nr_objs(slab_size, buffer_size,
 637					sizeof(freelist_idx_t), align);
 638		mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
 639	}
 640	*num = nr_objs;
 641	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 642}
 643
 644#if DEBUG
 645#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 646
 647static void __slab_error(const char *function, struct kmem_cache *cachep,
 648			char *msg)
 649{
 650	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
 651	       function, cachep->name, msg);
 652	dump_stack();
 653	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 654}
 655#endif
 656
 657/*
 658 * By default on NUMA we use alien caches to stage the freeing of
 659 * objects allocated from other nodes. This causes massive memory
 660 * inefficiencies when using fake NUMA setup to split memory into a
 661 * large number of small nodes, so it can be disabled on the command
 662 * line
 663  */
 664
 665static int use_alien_caches __read_mostly = 1;
 666static int __init noaliencache_setup(char *s)
 667{
 668	use_alien_caches = 0;
 669	return 1;
 670}
 671__setup("noaliencache", noaliencache_setup);
 672
 673static int __init slab_max_order_setup(char *str)
 674{
 675	get_option(&str, &slab_max_order);
 676	slab_max_order = slab_max_order < 0 ? 0 :
 677				min(slab_max_order, MAX_ORDER - 1);
 678	slab_max_order_set = true;
 679
 680	return 1;
 681}
 682__setup("slab_max_order=", slab_max_order_setup);
 683
 684#ifdef CONFIG_NUMA
 685/*
 686 * Special reaping functions for NUMA systems called from cache_reap().
 687 * These take care of doing round robin flushing of alien caches (containing
 688 * objects freed on different nodes from which they were allocated) and the
 689 * flushing of remote pcps by calling drain_node_pages.
 690 */
 691static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 692
 693static void init_reap_node(int cpu)
 694{
 695	int node;
 696
 697	node = next_node(cpu_to_mem(cpu), node_online_map);
 698	if (node == MAX_NUMNODES)
 699		node = first_node(node_online_map);
 700
 701	per_cpu(slab_reap_node, cpu) = node;
 702}
 703
 704static void next_reap_node(void)
 705{
 706	int node = __this_cpu_read(slab_reap_node);
 707
 708	node = next_node(node, node_online_map);
 709	if (unlikely(node >= MAX_NUMNODES))
 710		node = first_node(node_online_map);
 711	__this_cpu_write(slab_reap_node, node);
 712}
 713
 714#else
 715#define init_reap_node(cpu) do { } while (0)
 716#define next_reap_node(void) do { } while (0)
 717#endif
 718
 719/*
 720 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 721 * via the workqueue/eventd.
 722 * Add the CPU number into the expiration time to minimize the possibility of
 723 * the CPUs getting into lockstep and contending for the global cache chain
 724 * lock.
 725 */
 726static void start_cpu_timer(int cpu)
 727{
 728	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 729
 730	/*
 731	 * When this gets called from do_initcalls via cpucache_init(),
 732	 * init_workqueues() has already run, so keventd will be setup
 733	 * at that time.
 734	 */
 735	if (keventd_up() && reap_work->work.func == NULL) {
 736		init_reap_node(cpu);
 737		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 738		schedule_delayed_work_on(cpu, reap_work,
 739					__round_jiffies_relative(HZ, cpu));
 740	}
 741}
 742
 
 
 
 
 
 
 
 
 
 
 743static struct array_cache *alloc_arraycache(int node, int entries,
 744					    int batchcount, gfp_t gfp)
 745{
 746	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 747	struct array_cache *nc = NULL;
 748
 749	nc = kmalloc_node(memsize, gfp, node);
 750	/*
 751	 * The array_cache structures contain pointers to free object.
 752	 * However, when such objects are allocated or transferred to another
 753	 * cache the pointers are not cleared and they could be counted as
 754	 * valid references during a kmemleak scan. Therefore, kmemleak must
 755	 * not scan such objects.
 756	 */
 757	kmemleak_no_scan(nc);
 758	if (nc) {
 759		nc->avail = 0;
 760		nc->limit = entries;
 761		nc->batchcount = batchcount;
 762		nc->touched = 0;
 763		spin_lock_init(&nc->lock);
 764	}
 765	return nc;
 766}
 767
 768static inline bool is_slab_pfmemalloc(struct page *page)
 
 769{
 770	return PageSlabPfmemalloc(page);
 771}
 
 772
 773/* Clears pfmemalloc_active if no slabs have pfmalloc set */
 774static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
 775						struct array_cache *ac)
 776{
 777	struct kmem_cache_node *n = cachep->node[numa_mem_id()];
 778	struct page *page;
 779	unsigned long flags;
 780
 781	if (!pfmemalloc_active)
 782		return;
 783
 784	spin_lock_irqsave(&n->list_lock, flags);
 785	list_for_each_entry(page, &n->slabs_full, lru)
 786		if (is_slab_pfmemalloc(page))
 787			goto out;
 788
 789	list_for_each_entry(page, &n->slabs_partial, lru)
 790		if (is_slab_pfmemalloc(page))
 791			goto out;
 792
 793	list_for_each_entry(page, &n->slabs_free, lru)
 794		if (is_slab_pfmemalloc(page))
 795			goto out;
 796
 797	pfmemalloc_active = false;
 798out:
 799	spin_unlock_irqrestore(&n->list_lock, flags);
 800}
 801
 802static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
 803						gfp_t flags, bool force_refill)
 804{
 805	int i;
 806	void *objp = ac->entry[--ac->avail];
 807
 808	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
 809	if (unlikely(is_obj_pfmemalloc(objp))) {
 810		struct kmem_cache_node *n;
 811
 812		if (gfp_pfmemalloc_allowed(flags)) {
 813			clear_obj_pfmemalloc(&objp);
 814			return objp;
 815		}
 816
 817		/* The caller cannot use PFMEMALLOC objects, find another one */
 818		for (i = 0; i < ac->avail; i++) {
 819			/* If a !PFMEMALLOC object is found, swap them */
 820			if (!is_obj_pfmemalloc(ac->entry[i])) {
 821				objp = ac->entry[i];
 822				ac->entry[i] = ac->entry[ac->avail];
 823				ac->entry[ac->avail] = objp;
 824				return objp;
 825			}
 826		}
 827
 828		/*
 829		 * If there are empty slabs on the slabs_free list and we are
 830		 * being forced to refill the cache, mark this one !pfmemalloc.
 831		 */
 832		n = cachep->node[numa_mem_id()];
 833		if (!list_empty(&n->slabs_free) && force_refill) {
 834			struct page *page = virt_to_head_page(objp);
 835			ClearPageSlabPfmemalloc(page);
 836			clear_obj_pfmemalloc(&objp);
 837			recheck_pfmemalloc_active(cachep, ac);
 838			return objp;
 839		}
 840
 841		/* No !PFMEMALLOC objects available */
 842		ac->avail++;
 843		objp = NULL;
 844	}
 845
 846	return objp;
 847}
 848
 849static inline void *ac_get_obj(struct kmem_cache *cachep,
 850			struct array_cache *ac, gfp_t flags, bool force_refill)
 851{
 852	void *objp;
 853
 854	if (unlikely(sk_memalloc_socks()))
 855		objp = __ac_get_obj(cachep, ac, flags, force_refill);
 856	else
 857		objp = ac->entry[--ac->avail];
 858
 859	return objp;
 860}
 861
 862static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
 863								void *objp)
 864{
 865	if (unlikely(pfmemalloc_active)) {
 866		/* Some pfmemalloc slabs exist, check if this is one */
 867		struct page *page = virt_to_head_page(objp);
 868		if (PageSlabPfmemalloc(page))
 869			set_obj_pfmemalloc(&objp);
 870	}
 871
 872	return objp;
 873}
 874
 875static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
 876								void *objp)
 877{
 878	if (unlikely(sk_memalloc_socks()))
 879		objp = __ac_put_obj(cachep, ac, objp);
 880
 881	ac->entry[ac->avail++] = objp;
 882}
 883
 884/*
 885 * Transfer objects in one arraycache to another.
 886 * Locking must be handled by the caller.
 887 *
 888 * Return the number of entries transferred.
 889 */
 890static int transfer_objects(struct array_cache *to,
 891		struct array_cache *from, unsigned int max)
 892{
 893	/* Figure out how many entries to transfer */
 894	int nr = min3(from->avail, max, to->limit - to->avail);
 895
 896	if (!nr)
 897		return 0;
 898
 899	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
 900			sizeof(void *) *nr);
 901
 902	from->avail -= nr;
 903	to->avail += nr;
 904	return nr;
 905}
 906
 
 
 
 
 
 
 
 
 
 
 907#ifndef CONFIG_NUMA
 908
 909#define drain_alien_cache(cachep, alien) do { } while (0)
 910#define reap_alien(cachep, n) do { } while (0)
 911
 912static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 
 913{
 914	return (struct array_cache **)BAD_ALIEN_MAGIC;
 915}
 916
 917static inline void free_alien_cache(struct array_cache **ac_ptr)
 918{
 919}
 920
 921static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 922{
 923	return 0;
 924}
 925
 926static inline void *alternate_node_alloc(struct kmem_cache *cachep,
 927		gfp_t flags)
 928{
 929	return NULL;
 930}
 931
 932static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 933		 gfp_t flags, int nodeid)
 934{
 935	return NULL;
 936}
 937
 
 
 
 
 
 938#else	/* CONFIG_NUMA */
 939
 940static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 941static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 942
 943static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944{
 945	struct array_cache **ac_ptr;
 946	int memsize = sizeof(void *) * nr_node_ids;
 947	int i;
 948
 949	if (limit > 1)
 950		limit = 12;
 951	ac_ptr = kzalloc_node(memsize, gfp, node);
 952	if (ac_ptr) {
 953		for_each_node(i) {
 954			if (i == node || !node_online(i))
 955				continue;
 956			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
 957			if (!ac_ptr[i]) {
 958				for (i--; i >= 0; i--)
 959					kfree(ac_ptr[i]);
 960				kfree(ac_ptr);
 961				return NULL;
 962			}
 
 963		}
 964	}
 965	return ac_ptr;
 966}
 967
 968static void free_alien_cache(struct array_cache **ac_ptr)
 969{
 970	int i;
 971
 972	if (!ac_ptr)
 973		return;
 974	for_each_node(i)
 975	    kfree(ac_ptr[i]);
 976	kfree(ac_ptr);
 977}
 978
 979static void __drain_alien_cache(struct kmem_cache *cachep,
 980				struct array_cache *ac, int node)
 
 981{
 982	struct kmem_cache_node *n = cachep->node[node];
 983
 984	if (ac->avail) {
 985		spin_lock(&n->list_lock);
 986		/*
 987		 * Stuff objects into the remote nodes shared array first.
 988		 * That way we could avoid the overhead of putting the objects
 989		 * into the free lists and getting them back later.
 990		 */
 991		if (n->shared)
 992			transfer_objects(n->shared, ac, ac->limit);
 993
 994		free_block(cachep, ac->entry, ac->avail, node);
 995		ac->avail = 0;
 996		spin_unlock(&n->list_lock);
 997	}
 998}
 999
1000/*
1001 * Called from cache_reap() to regularly drain alien caches round robin.
1002 */
1003static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1004{
1005	int node = __this_cpu_read(slab_reap_node);
1006
1007	if (n->alien) {
1008		struct array_cache *ac = n->alien[node];
 
1009
1010		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1011			__drain_alien_cache(cachep, ac, node);
1012			spin_unlock_irq(&ac->lock);
 
 
 
 
 
 
1013		}
1014	}
1015}
1016
1017static void drain_alien_cache(struct kmem_cache *cachep,
1018				struct array_cache **alien)
1019{
1020	int i = 0;
 
1021	struct array_cache *ac;
1022	unsigned long flags;
1023
1024	for_each_online_node(i) {
1025		ac = alien[i];
1026		if (ac) {
1027			spin_lock_irqsave(&ac->lock, flags);
1028			__drain_alien_cache(cachep, ac, i);
1029			spin_unlock_irqrestore(&ac->lock, flags);
 
 
 
 
1030		}
1031	}
1032}
1033
1034static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 
1035{
1036	int nodeid = page_to_nid(virt_to_page(objp));
1037	struct kmem_cache_node *n;
1038	struct array_cache *alien = NULL;
1039	int node;
1040
1041	node = numa_mem_id();
1042
1043	/*
1044	 * Make sure we are not freeing a object from another node to the array
1045	 * cache on this cpu.
1046	 */
1047	if (likely(nodeid == node))
1048		return 0;
1049
1050	n = cachep->node[node];
1051	STATS_INC_NODEFREES(cachep);
1052	if (n->alien && n->alien[nodeid]) {
1053		alien = n->alien[nodeid];
 
1054		spin_lock(&alien->lock);
1055		if (unlikely(alien->avail == alien->limit)) {
1056			STATS_INC_ACOVERFLOW(cachep);
1057			__drain_alien_cache(cachep, alien, nodeid);
1058		}
1059		ac_put_obj(cachep, alien, objp);
1060		spin_unlock(&alien->lock);
 
1061	} else {
1062		spin_lock(&(cachep->node[nodeid])->list_lock);
1063		free_block(cachep, &objp, 1, nodeid);
1064		spin_unlock(&(cachep->node[nodeid])->list_lock);
 
 
1065	}
1066	return 1;
1067}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068#endif
1069
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070/*
1071 * Allocates and initializes node for a node on each slab cache, used for
1072 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1073 * will be allocated off-node since memory is not yet online for the new node.
1074 * When hotplugging memory or a cpu, existing node are not replaced if
1075 * already in use.
1076 *
1077 * Must hold slab_mutex.
1078 */
1079static int init_cache_node_node(int node)
1080{
 
1081	struct kmem_cache *cachep;
1082	struct kmem_cache_node *n;
1083	const int memsize = sizeof(struct kmem_cache_node);
1084
1085	list_for_each_entry(cachep, &slab_caches, list) {
1086		/*
1087		 * Set up the kmem_cache_node for cpu before we can
1088		 * begin anything. Make sure some other cpu on this
1089		 * node has not already allocated this
1090		 */
1091		if (!cachep->node[node]) {
1092			n = kmalloc_node(memsize, GFP_KERNEL, node);
1093			if (!n)
1094				return -ENOMEM;
1095			kmem_cache_node_init(n);
1096			n->next_reap = jiffies + REAPTIMEOUT_NODE +
1097			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1098
1099			/*
1100			 * The kmem_cache_nodes don't come and go as CPUs
1101			 * come and go.  slab_mutex is sufficient
1102			 * protection here.
1103			 */
1104			cachep->node[node] = n;
1105		}
1106
1107		spin_lock_irq(&cachep->node[node]->list_lock);
1108		cachep->node[node]->free_limit =
1109			(1 + nr_cpus_node(node)) *
1110			cachep->batchcount + cachep->num;
1111		spin_unlock_irq(&cachep->node[node]->list_lock);
1112	}
1113	return 0;
1114}
 
1115
1116static inline int slabs_tofree(struct kmem_cache *cachep,
1117						struct kmem_cache_node *n)
1118{
1119	return (n->free_objects + cachep->num - 1) / cachep->num;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
1121
 
 
1122static void cpuup_canceled(long cpu)
1123{
1124	struct kmem_cache *cachep;
1125	struct kmem_cache_node *n = NULL;
1126	int node = cpu_to_mem(cpu);
1127	const struct cpumask *mask = cpumask_of_node(node);
1128
1129	list_for_each_entry(cachep, &slab_caches, list) {
1130		struct array_cache *nc;
1131		struct array_cache *shared;
1132		struct array_cache **alien;
1133
1134		/* cpu is dead; no one can alloc from it. */
1135		nc = cachep->array[cpu];
1136		cachep->array[cpu] = NULL;
1137		n = cachep->node[node];
1138
 
1139		if (!n)
1140			goto free_array_cache;
1141
1142		spin_lock_irq(&n->list_lock);
1143
1144		/* Free limit for this kmem_cache_node */
1145		n->free_limit -= cachep->batchcount;
1146		if (nc)
1147			free_block(cachep, nc->entry, nc->avail, node);
 
 
 
1148
1149		if (!cpumask_empty(mask)) {
1150			spin_unlock_irq(&n->list_lock);
1151			goto free_array_cache;
1152		}
1153
1154		shared = n->shared;
1155		if (shared) {
1156			free_block(cachep, shared->entry,
1157				   shared->avail, node);
1158			n->shared = NULL;
1159		}
1160
1161		alien = n->alien;
1162		n->alien = NULL;
1163
1164		spin_unlock_irq(&n->list_lock);
1165
1166		kfree(shared);
1167		if (alien) {
1168			drain_alien_cache(cachep, alien);
1169			free_alien_cache(alien);
1170		}
1171free_array_cache:
1172		kfree(nc);
 
1173	}
1174	/*
1175	 * In the previous loop, all the objects were freed to
1176	 * the respective cache's slabs,  now we can go ahead and
1177	 * shrink each nodelist to its limit.
1178	 */
1179	list_for_each_entry(cachep, &slab_caches, list) {
1180		n = cachep->node[node];
1181		if (!n)
1182			continue;
1183		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1184	}
1185}
1186
1187static int cpuup_prepare(long cpu)
1188{
1189	struct kmem_cache *cachep;
1190	struct kmem_cache_node *n = NULL;
1191	int node = cpu_to_mem(cpu);
1192	int err;
1193
1194	/*
1195	 * We need to do this right in the beginning since
1196	 * alloc_arraycache's are going to use this list.
1197	 * kmalloc_node allows us to add the slab to the right
1198	 * kmem_cache_node and not this cpu's kmem_cache_node
1199	 */
1200	err = init_cache_node_node(node);
1201	if (err < 0)
1202		goto bad;
1203
1204	/*
1205	 * Now we can go ahead with allocating the shared arrays and
1206	 * array caches
1207	 */
1208	list_for_each_entry(cachep, &slab_caches, list) {
1209		struct array_cache *nc;
1210		struct array_cache *shared = NULL;
1211		struct array_cache **alien = NULL;
1212
1213		nc = alloc_arraycache(node, cachep->limit,
1214					cachep->batchcount, GFP_KERNEL);
1215		if (!nc)
1216			goto bad;
1217		if (cachep->shared) {
1218			shared = alloc_arraycache(node,
1219				cachep->shared * cachep->batchcount,
1220				0xbaadf00d, GFP_KERNEL);
1221			if (!shared) {
1222				kfree(nc);
1223				goto bad;
1224			}
1225		}
1226		if (use_alien_caches) {
1227			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1228			if (!alien) {
1229				kfree(shared);
1230				kfree(nc);
1231				goto bad;
1232			}
1233		}
1234		cachep->array[cpu] = nc;
1235		n = cachep->node[node];
1236		BUG_ON(!n);
1237
1238		spin_lock_irq(&n->list_lock);
1239		if (!n->shared) {
1240			/*
1241			 * We are serialised from CPU_DEAD or
1242			 * CPU_UP_CANCELLED by the cpucontrol lock
1243			 */
1244			n->shared = shared;
1245			shared = NULL;
1246		}
1247#ifdef CONFIG_NUMA
1248		if (!n->alien) {
1249			n->alien = alien;
1250			alien = NULL;
1251		}
1252#endif
1253		spin_unlock_irq(&n->list_lock);
1254		kfree(shared);
1255		free_alien_cache(alien);
1256		if (cachep->flags & SLAB_DEBUG_OBJECTS)
1257			slab_set_debugobj_lock_classes_node(cachep, node);
1258		else if (!OFF_SLAB(cachep) &&
1259			 !(cachep->flags & SLAB_DESTROY_BY_RCU))
1260			on_slab_lock_classes_node(cachep, node);
1261	}
1262	init_node_lock_keys(node);
1263
1264	return 0;
1265bad:
1266	cpuup_canceled(cpu);
1267	return -ENOMEM;
1268}
1269
1270static int cpuup_callback(struct notifier_block *nfb,
1271				    unsigned long action, void *hcpu)
1272{
1273	long cpu = (long)hcpu;
1274	int err = 0;
1275
1276	switch (action) {
1277	case CPU_UP_PREPARE:
1278	case CPU_UP_PREPARE_FROZEN:
1279		mutex_lock(&slab_mutex);
1280		err = cpuup_prepare(cpu);
1281		mutex_unlock(&slab_mutex);
1282		break;
1283	case CPU_ONLINE:
1284	case CPU_ONLINE_FROZEN:
1285		start_cpu_timer(cpu);
1286		break;
1287#ifdef CONFIG_HOTPLUG_CPU
1288  	case CPU_DOWN_PREPARE:
1289  	case CPU_DOWN_PREPARE_FROZEN:
1290		/*
1291		 * Shutdown cache reaper. Note that the slab_mutex is
1292		 * held so that if cache_reap() is invoked it cannot do
1293		 * anything expensive but will only modify reap_work
1294		 * and reschedule the timer.
1295		*/
1296		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1297		/* Now the cache_reaper is guaranteed to be not running. */
1298		per_cpu(slab_reap_work, cpu).work.func = NULL;
1299  		break;
1300  	case CPU_DOWN_FAILED:
1301  	case CPU_DOWN_FAILED_FROZEN:
1302		start_cpu_timer(cpu);
1303  		break;
1304	case CPU_DEAD:
1305	case CPU_DEAD_FROZEN:
1306		/*
1307		 * Even if all the cpus of a node are down, we don't free the
1308		 * kmem_cache_node of any cache. This to avoid a race between
1309		 * cpu_down, and a kmalloc allocation from another cpu for
1310		 * memory from the node of the cpu going down.  The node
1311		 * structure is usually allocated from kmem_cache_create() and
1312		 * gets destroyed at kmem_cache_destroy().
1313		 */
1314		/* fall through */
1315#endif
1316	case CPU_UP_CANCELED:
1317	case CPU_UP_CANCELED_FROZEN:
1318		mutex_lock(&slab_mutex);
1319		cpuup_canceled(cpu);
1320		mutex_unlock(&slab_mutex);
1321		break;
1322	}
1323	return notifier_from_errno(err);
1324}
1325
1326static struct notifier_block cpucache_notifier = {
1327	&cpuup_callback, NULL, 0
1328};
 
 
 
 
 
 
 
 
 
 
1329
1330#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1331/*
1332 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1333 * Returns -EBUSY if all objects cannot be drained so that the node is not
1334 * removed.
1335 *
1336 * Must hold slab_mutex.
1337 */
1338static int __meminit drain_cache_node_node(int node)
1339{
1340	struct kmem_cache *cachep;
1341	int ret = 0;
1342
1343	list_for_each_entry(cachep, &slab_caches, list) {
1344		struct kmem_cache_node *n;
1345
1346		n = cachep->node[node];
1347		if (!n)
1348			continue;
1349
1350		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1351
1352		if (!list_empty(&n->slabs_full) ||
1353		    !list_empty(&n->slabs_partial)) {
1354			ret = -EBUSY;
1355			break;
1356		}
1357	}
1358	return ret;
1359}
1360
1361static int __meminit slab_memory_callback(struct notifier_block *self,
1362					unsigned long action, void *arg)
1363{
1364	struct memory_notify *mnb = arg;
1365	int ret = 0;
1366	int nid;
1367
1368	nid = mnb->status_change_nid;
1369	if (nid < 0)
1370		goto out;
1371
1372	switch (action) {
1373	case MEM_GOING_ONLINE:
1374		mutex_lock(&slab_mutex);
1375		ret = init_cache_node_node(nid);
1376		mutex_unlock(&slab_mutex);
1377		break;
1378	case MEM_GOING_OFFLINE:
1379		mutex_lock(&slab_mutex);
1380		ret = drain_cache_node_node(nid);
1381		mutex_unlock(&slab_mutex);
1382		break;
1383	case MEM_ONLINE:
1384	case MEM_OFFLINE:
1385	case MEM_CANCEL_ONLINE:
1386	case MEM_CANCEL_OFFLINE:
1387		break;
1388	}
1389out:
1390	return notifier_from_errno(ret);
1391}
1392#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1393
1394/*
1395 * swap the static kmem_cache_node with kmalloced memory
1396 */
1397static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1398				int nodeid)
1399{
1400	struct kmem_cache_node *ptr;
1401
1402	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1403	BUG_ON(!ptr);
1404
1405	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1406	/*
1407	 * Do not assume that spinlocks can be initialized via memcpy:
1408	 */
1409	spin_lock_init(&ptr->list_lock);
1410
1411	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1412	cachep->node[nodeid] = ptr;
1413}
1414
1415/*
1416 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1417 * size of kmem_cache_node.
1418 */
1419static void __init set_up_node(struct kmem_cache *cachep, int index)
1420{
1421	int node;
1422
1423	for_each_online_node(node) {
1424		cachep->node[node] = &init_kmem_cache_node[index + node];
1425		cachep->node[node]->next_reap = jiffies +
1426		    REAPTIMEOUT_NODE +
1427		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1428	}
1429}
1430
1431/*
1432 * The memory after the last cpu cache pointer is used for the
1433 * the node pointer.
1434 */
1435static void setup_node_pointer(struct kmem_cache *cachep)
1436{
1437	cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
1438}
1439
1440/*
1441 * Initialisation.  Called after the page allocator have been initialised and
1442 * before smp_init().
1443 */
1444void __init kmem_cache_init(void)
1445{
1446	int i;
1447
1448	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1449					sizeof(struct rcu_head));
1450	kmem_cache = &kmem_cache_boot;
1451	setup_node_pointer(kmem_cache);
1452
1453	if (num_possible_nodes() == 1)
1454		use_alien_caches = 0;
1455
1456	for (i = 0; i < NUM_INIT_LISTS; i++)
1457		kmem_cache_node_init(&init_kmem_cache_node[i]);
1458
1459	set_up_node(kmem_cache, CACHE_CACHE);
1460
1461	/*
1462	 * Fragmentation resistance on low memory - only use bigger
1463	 * page orders on machines with more than 32MB of memory if
1464	 * not overridden on the command line.
1465	 */
1466	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1467		slab_max_order = SLAB_MAX_ORDER_HI;
1468
1469	/* Bootstrap is tricky, because several objects are allocated
1470	 * from caches that do not exist yet:
1471	 * 1) initialize the kmem_cache cache: it contains the struct
1472	 *    kmem_cache structures of all caches, except kmem_cache itself:
1473	 *    kmem_cache is statically allocated.
1474	 *    Initially an __init data area is used for the head array and the
1475	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1476	 *    array at the end of the bootstrap.
1477	 * 2) Create the first kmalloc cache.
1478	 *    The struct kmem_cache for the new cache is allocated normally.
1479	 *    An __init data area is used for the head array.
1480	 * 3) Create the remaining kmalloc caches, with minimally sized
1481	 *    head arrays.
1482	 * 4) Replace the __init data head arrays for kmem_cache and the first
1483	 *    kmalloc cache with kmalloc allocated arrays.
1484	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1485	 *    the other cache's with kmalloc allocated memory.
1486	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1487	 */
1488
1489	/* 1) create the kmem_cache */
1490
1491	/*
1492	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1493	 */
1494	create_boot_cache(kmem_cache, "kmem_cache",
1495		offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1496				  nr_node_ids * sizeof(struct kmem_cache_node *),
1497				  SLAB_HWCACHE_ALIGN);
1498	list_add(&kmem_cache->list, &slab_caches);
1499
1500	/* 2+3) create the kmalloc caches */
1501
1502	/*
1503	 * Initialize the caches that provide memory for the array cache and the
1504	 * kmem_cache_node structures first.  Without this, further allocations will
1505	 * bug.
1506	 */
1507
1508	kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
1509					kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1510
1511	if (INDEX_AC != INDEX_NODE)
1512		kmalloc_caches[INDEX_NODE] =
1513			create_kmalloc_cache("kmalloc-node",
1514				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1515
1516	slab_early_init = 0;
1517
1518	/* 4) Replace the bootstrap head arrays */
1519	{
1520		struct array_cache *ptr;
1521
1522		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1523
1524		memcpy(ptr, cpu_cache_get(kmem_cache),
1525		       sizeof(struct arraycache_init));
1526		/*
1527		 * Do not assume that spinlocks can be initialized via memcpy:
1528		 */
1529		spin_lock_init(&ptr->lock);
1530
1531		kmem_cache->array[smp_processor_id()] = ptr;
1532
1533		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1534
1535		BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
1536		       != &initarray_generic.cache);
1537		memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
1538		       sizeof(struct arraycache_init));
1539		/*
1540		 * Do not assume that spinlocks can be initialized via memcpy:
1541		 */
1542		spin_lock_init(&ptr->lock);
1543
1544		kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
1545	}
1546	/* 5) Replace the bootstrap kmem_cache_node */
1547	{
1548		int nid;
1549
1550		for_each_online_node(nid) {
1551			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1552
1553			init_list(kmalloc_caches[INDEX_AC],
1554				  &init_kmem_cache_node[SIZE_AC + nid], nid);
1555
1556			if (INDEX_AC != INDEX_NODE) {
1557				init_list(kmalloc_caches[INDEX_NODE],
1558					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1559			}
1560		}
1561	}
1562
1563	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1564}
1565
1566void __init kmem_cache_init_late(void)
1567{
1568	struct kmem_cache *cachep;
1569
1570	slab_state = UP;
1571
1572	/* 6) resize the head arrays to their final sizes */
1573	mutex_lock(&slab_mutex);
1574	list_for_each_entry(cachep, &slab_caches, list)
1575		if (enable_cpucache(cachep, GFP_NOWAIT))
1576			BUG();
1577	mutex_unlock(&slab_mutex);
1578
1579	/* Annotate slab for lockdep -- annotate the malloc caches */
1580	init_lock_keys();
1581
1582	/* Done! */
1583	slab_state = FULL;
1584
1585	/*
1586	 * Register a cpu startup notifier callback that initializes
1587	 * cpu_cache_get for all new cpus
1588	 */
1589	register_cpu_notifier(&cpucache_notifier);
1590
1591#ifdef CONFIG_NUMA
1592	/*
1593	 * Register a memory hotplug callback that initializes and frees
1594	 * node.
1595	 */
1596	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1597#endif
1598
1599	/*
1600	 * The reap timers are started later, with a module init call: That part
1601	 * of the kernel is not yet operational.
1602	 */
1603}
1604
1605static int __init cpucache_init(void)
1606{
1607	int cpu;
1608
1609	/*
1610	 * Register the timers that return unneeded pages to the page allocator
1611	 */
1612	for_each_online_cpu(cpu)
1613		start_cpu_timer(cpu);
 
1614
1615	/* Done! */
1616	slab_state = FULL;
1617	return 0;
1618}
1619__initcall(cpucache_init);
1620
1621static noinline void
1622slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1623{
 
1624	struct kmem_cache_node *n;
1625	struct page *page;
1626	unsigned long flags;
1627	int node;
 
 
1628
1629	printk(KERN_WARNING
1630		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1631		nodeid, gfpflags);
1632	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
 
 
1633		cachep->name, cachep->size, cachep->gfporder);
1634
1635	for_each_online_node(node) {
1636		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1637		unsigned long active_slabs = 0, num_slabs = 0;
1638
1639		n = cachep->node[node];
1640		if (!n)
1641			continue;
1642
1643		spin_lock_irqsave(&n->list_lock, flags);
1644		list_for_each_entry(page, &n->slabs_full, lru) {
1645			active_objs += cachep->num;
1646			active_slabs++;
1647		}
1648		list_for_each_entry(page, &n->slabs_partial, lru) {
1649			active_objs += page->active;
1650			active_slabs++;
1651		}
1652		list_for_each_entry(page, &n->slabs_free, lru)
1653			num_slabs++;
1654
1655		free_objects += n->free_objects;
1656		spin_unlock_irqrestore(&n->list_lock, flags);
1657
1658		num_slabs += active_slabs;
1659		num_objs = num_slabs * cachep->num;
1660		printk(KERN_WARNING
1661			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1662			node, active_slabs, num_slabs, active_objs, num_objs,
1663			free_objects);
1664	}
 
1665}
1666
1667/*
1668 * Interface to system's page allocator. No need to hold the cache-lock.
 
1669 *
1670 * If we requested dmaable memory, we will get it. Even if we
1671 * did not request dmaable memory, we might get it, but that
1672 * would be relatively rare and ignorable.
1673 */
1674static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1675								int nodeid)
1676{
1677	struct page *page;
1678	int nr_pages;
1679
1680	flags |= cachep->allocflags;
1681	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1682		flags |= __GFP_RECLAIMABLE;
1683
1684	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1685	if (!page) {
1686		if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1687			slab_out_of_memory(cachep, flags, nodeid);
1688		return NULL;
1689	}
1690
 
 
1691	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1692	if (unlikely(page->pfmemalloc))
1693		pfmemalloc_active = true;
1694
1695	nr_pages = (1 << cachep->gfporder);
1696	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1697		add_zone_page_state(page_zone(page),
1698			NR_SLAB_RECLAIMABLE, nr_pages);
1699	else
1700		add_zone_page_state(page_zone(page),
1701			NR_SLAB_UNRECLAIMABLE, nr_pages);
1702	__SetPageSlab(page);
1703	if (page->pfmemalloc)
1704		SetPageSlabPfmemalloc(page);
1705	memcg_bind_pages(cachep, cachep->gfporder);
1706
1707	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1708		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1709
1710		if (cachep->ctor)
1711			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1712		else
1713			kmemcheck_mark_unallocated_pages(page, nr_pages);
1714	}
1715
1716	return page;
1717}
1718
1719/*
1720 * Interface to system's page release.
1721 */
1722static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1723{
1724	const unsigned long nr_freed = (1 << cachep->gfporder);
1725
1726	kmemcheck_free_shadow(page, cachep->gfporder);
1727
1728	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1729		sub_zone_page_state(page_zone(page),
1730				NR_SLAB_RECLAIMABLE, nr_freed);
1731	else
1732		sub_zone_page_state(page_zone(page),
1733				NR_SLAB_UNRECLAIMABLE, nr_freed);
1734
1735	BUG_ON(!PageSlab(page));
1736	__ClearPageSlabPfmemalloc(page);
1737	__ClearPageSlab(page);
1738	page_mapcount_reset(page);
1739	page->mapping = NULL;
1740
1741	memcg_release_pages(cachep, cachep->gfporder);
1742	if (current->reclaim_state)
1743		current->reclaim_state->reclaimed_slab += nr_freed;
1744	__free_memcg_kmem_pages(page, cachep->gfporder);
 
1745}
1746
1747static void kmem_rcu_free(struct rcu_head *head)
1748{
1749	struct kmem_cache *cachep;
1750	struct page *page;
1751
1752	page = container_of(head, struct page, rcu_head);
1753	cachep = page->slab_cache;
1754
1755	kmem_freepages(cachep, page);
1756}
1757
1758#if DEBUG
 
 
 
 
 
 
 
 
1759
1760#ifdef CONFIG_DEBUG_PAGEALLOC
1761static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1762			    unsigned long caller)
1763{
1764	int size = cachep->object_size;
1765
1766	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1767
1768	if (size < 5 * sizeof(unsigned long))
1769		return;
1770
1771	*addr++ = 0x12345678;
1772	*addr++ = caller;
1773	*addr++ = smp_processor_id();
1774	size -= 3 * sizeof(unsigned long);
1775	{
1776		unsigned long *sptr = &caller;
1777		unsigned long svalue;
1778
1779		while (!kstack_end(sptr)) {
1780			svalue = *sptr++;
1781			if (kernel_text_address(svalue)) {
1782				*addr++ = svalue;
1783				size -= sizeof(unsigned long);
1784				if (size <= sizeof(unsigned long))
1785					break;
1786			}
1787		}
1788
1789	}
1790	*addr++ = 0x87654321;
1791}
1792#endif
1793
1794static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1795{
1796	int size = cachep->object_size;
1797	addr = &((char *)addr)[obj_offset(cachep)];
1798
1799	memset(addr, val, size);
1800	*(unsigned char *)(addr + size - 1) = POISON_END;
1801}
1802
1803static void dump_line(char *data, int offset, int limit)
1804{
1805	int i;
1806	unsigned char error = 0;
1807	int bad_count = 0;
1808
1809	printk(KERN_ERR "%03x: ", offset);
1810	for (i = 0; i < limit; i++) {
1811		if (data[offset + i] != POISON_FREE) {
1812			error = data[offset + i];
1813			bad_count++;
1814		}
1815	}
1816	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1817			&data[offset], limit, 1);
1818
1819	if (bad_count == 1) {
1820		error ^= POISON_FREE;
1821		if (!(error & (error - 1))) {
1822			printk(KERN_ERR "Single bit error detected. Probably "
1823					"bad RAM.\n");
1824#ifdef CONFIG_X86
1825			printk(KERN_ERR "Run memtest86+ or a similar memory "
1826					"test tool.\n");
1827#else
1828			printk(KERN_ERR "Run a memory test tool.\n");
1829#endif
1830		}
1831	}
1832}
1833#endif
1834
1835#if DEBUG
1836
1837static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1838{
1839	int i, size;
1840	char *realobj;
1841
1842	if (cachep->flags & SLAB_RED_ZONE) {
1843		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1844			*dbg_redzone1(cachep, objp),
1845			*dbg_redzone2(cachep, objp));
1846	}
1847
1848	if (cachep->flags & SLAB_STORE_USER) {
1849		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1850		       *dbg_userword(cachep, objp),
1851		       *dbg_userword(cachep, objp));
1852	}
1853	realobj = (char *)objp + obj_offset(cachep);
1854	size = cachep->object_size;
1855	for (i = 0; i < size && lines; i += 16, lines--) {
1856		int limit;
1857		limit = 16;
1858		if (i + limit > size)
1859			limit = size - i;
1860		dump_line(realobj, i, limit);
1861	}
1862}
1863
1864static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1865{
1866	char *realobj;
1867	int size, i;
1868	int lines = 0;
1869
 
 
 
1870	realobj = (char *)objp + obj_offset(cachep);
1871	size = cachep->object_size;
1872
1873	for (i = 0; i < size; i++) {
1874		char exp = POISON_FREE;
1875		if (i == size - 1)
1876			exp = POISON_END;
1877		if (realobj[i] != exp) {
1878			int limit;
1879			/* Mismatch ! */
1880			/* Print header */
1881			if (lines == 0) {
1882				printk(KERN_ERR
1883					"Slab corruption (%s): %s start=%p, len=%d\n",
1884					print_tainted(), cachep->name, realobj, size);
1885				print_objinfo(cachep, objp, 0);
1886			}
1887			/* Hexdump the affected line */
1888			i = (i / 16) * 16;
1889			limit = 16;
1890			if (i + limit > size)
1891				limit = size - i;
1892			dump_line(realobj, i, limit);
1893			i += 16;
1894			lines++;
1895			/* Limit to 5 lines */
1896			if (lines > 5)
1897				break;
1898		}
1899	}
1900	if (lines != 0) {
1901		/* Print some data about the neighboring objects, if they
1902		 * exist:
1903		 */
1904		struct page *page = virt_to_head_page(objp);
1905		unsigned int objnr;
1906
1907		objnr = obj_to_index(cachep, page, objp);
1908		if (objnr) {
1909			objp = index_to_obj(cachep, page, objnr - 1);
1910			realobj = (char *)objp + obj_offset(cachep);
1911			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1912			       realobj, size);
1913			print_objinfo(cachep, objp, 2);
1914		}
1915		if (objnr + 1 < cachep->num) {
1916			objp = index_to_obj(cachep, page, objnr + 1);
1917			realobj = (char *)objp + obj_offset(cachep);
1918			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1919			       realobj, size);
1920			print_objinfo(cachep, objp, 2);
1921		}
1922	}
1923}
1924#endif
1925
1926#if DEBUG
1927static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1928						struct page *page)
1929{
1930	int i;
 
 
 
 
 
 
1931	for (i = 0; i < cachep->num; i++) {
1932		void *objp = index_to_obj(cachep, page, i);
1933
1934		if (cachep->flags & SLAB_POISON) {
1935#ifdef CONFIG_DEBUG_PAGEALLOC
1936			if (cachep->size % PAGE_SIZE == 0 &&
1937					OFF_SLAB(cachep))
1938				kernel_map_pages(virt_to_page(objp),
1939					cachep->size / PAGE_SIZE, 1);
1940			else
1941				check_poison_obj(cachep, objp);
1942#else
1943			check_poison_obj(cachep, objp);
1944#endif
1945		}
1946		if (cachep->flags & SLAB_RED_ZONE) {
1947			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1948				slab_error(cachep, "start of a freed object "
1949					   "was overwritten");
1950			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1951				slab_error(cachep, "end of a freed object "
1952					   "was overwritten");
1953		}
1954	}
1955}
1956#else
1957static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1958						struct page *page)
1959{
1960}
1961#endif
1962
1963/**
1964 * slab_destroy - destroy and release all objects in a slab
1965 * @cachep: cache pointer being destroyed
1966 * @page: page pointer being destroyed
1967 *
1968 * Destroy all the objs in a slab, and release the mem back to the system.
1969 * Before calling the slab must have been unlinked from the cache.  The
1970 * cache-lock is not held/needed.
1971 */
1972static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1973{
1974	void *freelist;
1975
1976	freelist = page->freelist;
1977	slab_destroy_debugcheck(cachep, page);
1978	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1979		struct rcu_head *head;
1980
1981		/*
1982		 * RCU free overloads the RCU head over the LRU.
1983		 * slab_page has been overloeaded over the LRU,
1984		 * however it is not used from now on so that
1985		 * we can use it safely.
1986		 */
1987		head = (void *)&page->rcu_head;
1988		call_rcu(head, kmem_rcu_free);
1989
1990	} else {
1991		kmem_freepages(cachep, page);
1992	}
1993
1994	/*
1995	 * From now on, we don't use freelist
1996	 * although actual page can be freed in rcu context
1997	 */
1998	if (OFF_SLAB(cachep))
1999		kmem_cache_free(cachep->freelist_cache, freelist);
2000}
2001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002/**
2003 * calculate_slab_order - calculate size (page order) of slabs
2004 * @cachep: pointer to the cache that is being created
2005 * @size: size of objects to be created in this cache.
2006 * @align: required alignment for the objects.
2007 * @flags: slab allocation flags
2008 *
2009 * Also calculates the number of objects per slab.
2010 *
2011 * This could be made much more intelligent.  For now, try to avoid using
2012 * high order pages for slabs.  When the gfp() functions are more friendly
2013 * towards high-order requests, this should be changed.
 
 
2014 */
2015static size_t calculate_slab_order(struct kmem_cache *cachep,
2016			size_t size, size_t align, unsigned long flags)
2017{
2018	unsigned long offslab_limit;
2019	size_t left_over = 0;
2020	int gfporder;
2021
2022	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2023		unsigned int num;
2024		size_t remainder;
2025
2026		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2027		if (!num)
2028			continue;
2029
2030		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
2031		if (num > SLAB_OBJ_MAX_NUM)
2032			break;
2033
2034		if (flags & CFLGS_OFF_SLAB) {
 
 
 
 
 
 
 
 
2035			/*
2036			 * Max number of objs-per-slab for caches which
2037			 * use off-slab slabs. Needed to avoid a possible
2038			 * looping condition in cache_grow().
2039			 */
2040			offslab_limit = size;
2041			offslab_limit /= sizeof(freelist_idx_t);
2042
2043 			if (num > offslab_limit)
2044				break;
 
2045		}
2046
2047		/* Found something acceptable - save it away */
2048		cachep->num = num;
2049		cachep->gfporder = gfporder;
2050		left_over = remainder;
2051
2052		/*
2053		 * A VFS-reclaimable slab tends to have most allocations
2054		 * as GFP_NOFS and we really don't want to have to be allocating
2055		 * higher-order pages when we are unable to shrink dcache.
2056		 */
2057		if (flags & SLAB_RECLAIM_ACCOUNT)
2058			break;
2059
2060		/*
2061		 * Large number of objects is good, but very large slabs are
2062		 * currently bad for the gfp()s.
2063		 */
2064		if (gfporder >= slab_max_order)
2065			break;
2066
2067		/*
2068		 * Acceptable internal fragmentation?
2069		 */
2070		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2071			break;
2072	}
2073	return left_over;
2074}
2075
2076static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2077{
2078	if (slab_state >= FULL)
2079		return enable_cpucache(cachep, gfp);
2080
 
 
 
 
2081	if (slab_state == DOWN) {
2082		/*
2083		 * Note: Creation of first cache (kmem_cache).
2084		 * The setup_node is taken care
2085		 * of by the caller of __kmem_cache_create
2086		 */
2087		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2088		slab_state = PARTIAL;
2089	} else if (slab_state == PARTIAL) {
2090		/*
2091		 * Note: the second kmem_cache_create must create the cache
2092		 * that's used by kmalloc(24), otherwise the creation of
2093		 * further caches will BUG().
2094		 */
2095		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2096
2097		/*
2098		 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
2099		 * the second cache, then we need to set up all its node/,
2100		 * otherwise the creation of further caches will BUG().
2101		 */
2102		set_up_node(cachep, SIZE_AC);
2103		if (INDEX_AC == INDEX_NODE)
2104			slab_state = PARTIAL_NODE;
2105		else
2106			slab_state = PARTIAL_ARRAYCACHE;
2107	} else {
2108		/* Remaining boot caches */
2109		cachep->array[smp_processor_id()] =
2110			kmalloc(sizeof(struct arraycache_init), gfp);
2111
2112		if (slab_state == PARTIAL_ARRAYCACHE) {
2113			set_up_node(cachep, SIZE_NODE);
2114			slab_state = PARTIAL_NODE;
2115		} else {
2116			int node;
2117			for_each_online_node(node) {
2118				cachep->node[node] =
2119				    kmalloc_node(sizeof(struct kmem_cache_node),
2120						gfp, node);
2121				BUG_ON(!cachep->node[node]);
2122				kmem_cache_node_init(cachep->node[node]);
2123			}
2124		}
2125	}
 
2126	cachep->node[numa_mem_id()]->next_reap =
2127			jiffies + REAPTIMEOUT_NODE +
2128			((unsigned long)cachep) % REAPTIMEOUT_NODE;
2129
2130	cpu_cache_get(cachep)->avail = 0;
2131	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2132	cpu_cache_get(cachep)->batchcount = 1;
2133	cpu_cache_get(cachep)->touched = 0;
2134	cachep->batchcount = 1;
2135	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2136	return 0;
2137}
2138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139/**
2140 * __kmem_cache_create - Create a cache.
2141 * @cachep: cache management descriptor
2142 * @flags: SLAB flags
2143 *
2144 * Returns a ptr to the cache on success, NULL on failure.
2145 * Cannot be called within a int, but can be interrupted.
2146 * The @ctor is run when new pages are allocated by the cache.
2147 *
2148 * The flags are
2149 *
2150 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2151 * to catch references to uninitialised memory.
2152 *
2153 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2154 * for buffer overruns.
2155 *
2156 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2157 * cacheline.  This can be beneficial if you're counting cycles as closely
2158 * as davem.
 
 
2159 */
2160int
2161__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2162{
2163	size_t left_over, freelist_size, ralign;
2164	gfp_t gfp;
2165	int err;
2166	size_t size = cachep->size;
2167
2168#if DEBUG
2169#if FORCED_DEBUG
2170	/*
2171	 * Enable redzoning and last user accounting, except for caches with
2172	 * large objects, if the increased size would increase the object size
2173	 * above the next power of two: caches with object sizes just above a
2174	 * power of two have a significant amount of internal fragmentation.
2175	 */
2176	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2177						2 * sizeof(unsigned long long)))
2178		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2179	if (!(flags & SLAB_DESTROY_BY_RCU))
2180		flags |= SLAB_POISON;
2181#endif
2182	if (flags & SLAB_DESTROY_BY_RCU)
2183		BUG_ON(flags & SLAB_POISON);
2184#endif
2185
2186	/*
2187	 * Check that size is in terms of words.  This is needed to avoid
2188	 * unaligned accesses for some archs when redzoning is used, and makes
2189	 * sure any on-slab bufctl's are also correctly aligned.
2190	 */
2191	if (size & (BYTES_PER_WORD - 1)) {
2192		size += (BYTES_PER_WORD - 1);
2193		size &= ~(BYTES_PER_WORD - 1);
2194	}
2195
2196	/*
2197	 * Redzoning and user store require word alignment or possibly larger.
2198	 * Note this will be overridden by architecture or caller mandated
2199	 * alignment if either is greater than BYTES_PER_WORD.
2200	 */
2201	if (flags & SLAB_STORE_USER)
2202		ralign = BYTES_PER_WORD;
2203
2204	if (flags & SLAB_RED_ZONE) {
2205		ralign = REDZONE_ALIGN;
2206		/* If redzoning, ensure that the second redzone is suitably
2207		 * aligned, by adjusting the object size accordingly. */
2208		size += REDZONE_ALIGN - 1;
2209		size &= ~(REDZONE_ALIGN - 1);
2210	}
2211
2212	/* 3) caller mandated alignment */
2213	if (ralign < cachep->align) {
2214		ralign = cachep->align;
2215	}
2216	/* disable debug if necessary */
2217	if (ralign > __alignof__(unsigned long long))
2218		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2219	/*
2220	 * 4) Store it.
2221	 */
2222	cachep->align = ralign;
 
 
 
 
2223
2224	if (slab_is_available())
2225		gfp = GFP_KERNEL;
2226	else
2227		gfp = GFP_NOWAIT;
2228
2229	setup_node_pointer(cachep);
2230#if DEBUG
2231
2232	/*
2233	 * Both debugging options require word-alignment which is calculated
2234	 * into align above.
2235	 */
2236	if (flags & SLAB_RED_ZONE) {
2237		/* add space for red zone words */
2238		cachep->obj_offset += sizeof(unsigned long long);
2239		size += 2 * sizeof(unsigned long long);
2240	}
2241	if (flags & SLAB_STORE_USER) {
2242		/* user store requires one word storage behind the end of
2243		 * the real object. But if the second red zone needs to be
2244		 * aligned to 64 bits, we must allow that much space.
2245		 */
2246		if (flags & SLAB_RED_ZONE)
2247			size += REDZONE_ALIGN;
2248		else
2249			size += BYTES_PER_WORD;
2250	}
2251#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2252	if (size >= kmalloc_size(INDEX_NODE + 1)
2253	    && cachep->object_size > cache_line_size()
2254	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
2255		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2256		size = PAGE_SIZE;
2257	}
2258#endif
2259#endif
2260
2261	/*
2262	 * Determine if the slab management is 'on' or 'off' slab.
2263	 * (bootstrapping cannot cope with offslab caches so don't do
2264	 * it too early on. Always use on-slab management when
2265	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2266	 */
2267	if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2268	    !(flags & SLAB_NOLEAKTRACE))
2269		/*
2270		 * Size is large, assume best to place the slab management obj
2271		 * off-slab (should allow better packing of objs).
2272		 */
2273		flags |= CFLGS_OFF_SLAB;
2274
2275	size = ALIGN(size, cachep->align);
2276	/*
2277	 * We should restrict the number of objects in a slab to implement
2278	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2279	 */
2280	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2281		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2282
2283	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2284
2285	if (!cachep->num)
2286		return -E2BIG;
2287
2288	freelist_size =
2289		ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
2290
2291	/*
2292	 * If the slab has been placed off-slab, and we have enough space then
2293	 * move it on-slab. This is at the expense of any extra colouring.
 
 
 
2294	 */
2295	if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
2296		flags &= ~CFLGS_OFF_SLAB;
2297		left_over -= freelist_size;
 
 
 
 
 
 
 
 
 
2298	}
 
2299
2300	if (flags & CFLGS_OFF_SLAB) {
2301		/* really off slab. No need for manual alignment */
2302		freelist_size = cachep->num * sizeof(freelist_idx_t);
2303
2304#ifdef CONFIG_PAGE_POISONING
2305		/* If we're going to use the generic kernel_map_pages()
2306		 * poisoning, then it's going to smash the contents of
2307		 * the redzone and userword anyhow, so switch them off.
2308		 */
2309		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2310			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2311#endif
2312	}
2313
2314	cachep->colour_off = cache_line_size();
2315	/* Offset must be a multiple of the alignment. */
2316	if (cachep->colour_off < cachep->align)
2317		cachep->colour_off = cachep->align;
2318	cachep->colour = left_over / cachep->colour_off;
2319	cachep->freelist_size = freelist_size;
 
2320	cachep->flags = flags;
2321	cachep->allocflags = __GFP_COMP;
2322	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2323		cachep->allocflags |= GFP_DMA;
 
 
 
 
2324	cachep->size = size;
2325	cachep->reciprocal_buffer_size = reciprocal_value(size);
2326
2327	if (flags & CFLGS_OFF_SLAB) {
2328		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2329		/*
2330		 * This is a possibility for one of the kmalloc_{dma,}_caches.
2331		 * But since we go off slab only for object size greater than
2332		 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
2333		 * in ascending order,this should not happen at all.
2334		 * But leave a BUG_ON for some lucky dude.
2335		 */
2336		BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
 
 
 
 
 
2337	}
2338
2339	err = setup_cpu_cache(cachep, gfp);
2340	if (err) {
2341		__kmem_cache_shutdown(cachep);
2342		return err;
2343	}
2344
2345	if (flags & SLAB_DEBUG_OBJECTS) {
2346		/*
2347		 * Would deadlock through slab_destroy()->call_rcu()->
2348		 * debug_object_activate()->kmem_cache_alloc().
2349		 */
2350		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2351
2352		slab_set_debugobj_lock_classes(cachep);
2353	} else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2354		on_slab_lock_classes(cachep);
2355
2356	return 0;
2357}
2358
2359#if DEBUG
2360static void check_irq_off(void)
2361{
2362	BUG_ON(!irqs_disabled());
2363}
2364
2365static void check_irq_on(void)
2366{
2367	BUG_ON(irqs_disabled());
2368}
2369
 
 
 
 
 
2370static void check_spinlock_acquired(struct kmem_cache *cachep)
2371{
2372#ifdef CONFIG_SMP
2373	check_irq_off();
2374	assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
2375#endif
2376}
2377
2378static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2379{
2380#ifdef CONFIG_SMP
2381	check_irq_off();
2382	assert_spin_locked(&cachep->node[node]->list_lock);
2383#endif
2384}
2385
2386#else
2387#define check_irq_off()	do { } while(0)
2388#define check_irq_on()	do { } while(0)
 
2389#define check_spinlock_acquired(x) do { } while(0)
2390#define check_spinlock_acquired_node(x, y) do { } while(0)
2391#endif
2392
2393static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2394			struct array_cache *ac,
2395			int force, int node);
 
 
 
 
 
 
 
 
 
 
 
 
 
2396
2397static void do_drain(void *arg)
2398{
2399	struct kmem_cache *cachep = arg;
2400	struct array_cache *ac;
2401	int node = numa_mem_id();
 
 
2402
2403	check_irq_off();
2404	ac = cpu_cache_get(cachep);
2405	spin_lock(&cachep->node[node]->list_lock);
2406	free_block(cachep, ac->entry, ac->avail, node);
2407	spin_unlock(&cachep->node[node]->list_lock);
 
2408	ac->avail = 0;
 
2409}
2410
2411static void drain_cpu_caches(struct kmem_cache *cachep)
2412{
2413	struct kmem_cache_node *n;
2414	int node;
 
2415
2416	on_each_cpu(do_drain, cachep, 1);
2417	check_irq_on();
2418	for_each_online_node(node) {
2419		n = cachep->node[node];
2420		if (n && n->alien)
2421			drain_alien_cache(cachep, n->alien);
2422	}
2423
2424	for_each_online_node(node) {
2425		n = cachep->node[node];
2426		if (n)
2427			drain_array(cachep, n, n->shared, 1, node);
 
 
2428	}
2429}
2430
2431/*
2432 * Remove slabs from the list of free slabs.
2433 * Specify the number of slabs to drain in tofree.
2434 *
2435 * Returns the actual number of slabs released.
2436 */
2437static int drain_freelist(struct kmem_cache *cache,
2438			struct kmem_cache_node *n, int tofree)
2439{
2440	struct list_head *p;
2441	int nr_freed;
2442	struct page *page;
2443
2444	nr_freed = 0;
2445	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2446
2447		spin_lock_irq(&n->list_lock);
2448		p = n->slabs_free.prev;
2449		if (p == &n->slabs_free) {
2450			spin_unlock_irq(&n->list_lock);
2451			goto out;
2452		}
2453
2454		page = list_entry(p, struct page, lru);
2455#if DEBUG
2456		BUG_ON(page->active);
2457#endif
2458		list_del(&page->lru);
2459		/*
2460		 * Safe to drop the lock. The slab is no longer linked
2461		 * to the cache.
2462		 */
2463		n->free_objects -= cache->num;
2464		spin_unlock_irq(&n->list_lock);
2465		slab_destroy(cache, page);
2466		nr_freed++;
2467	}
2468out:
2469	return nr_freed;
2470}
2471
2472/* Called with slab_mutex held to protect against cpu hotplug */
2473static int __cache_shrink(struct kmem_cache *cachep)
 
 
 
 
 
 
 
 
 
 
 
2474{
2475	int ret = 0, i = 0;
 
2476	struct kmem_cache_node *n;
2477
2478	drain_cpu_caches(cachep);
2479
2480	check_irq_on();
2481	for_each_online_node(i) {
2482		n = cachep->node[i];
2483		if (!n)
2484			continue;
2485
2486		drain_freelist(cachep, n, slabs_tofree(cachep, n));
2487
2488		ret += !list_empty(&n->slabs_full) ||
2489			!list_empty(&n->slabs_partial);
2490	}
2491	return (ret ? 1 : 0);
2492}
2493
2494/**
2495 * kmem_cache_shrink - Shrink a cache.
2496 * @cachep: The cache to shrink.
2497 *
2498 * Releases as many slabs as possible for a cache.
2499 * To help debugging, a zero exit status indicates all slabs were released.
2500 */
2501int kmem_cache_shrink(struct kmem_cache *cachep)
2502{
2503	int ret;
2504	BUG_ON(!cachep || in_interrupt());
2505
2506	get_online_cpus();
2507	mutex_lock(&slab_mutex);
2508	ret = __cache_shrink(cachep);
2509	mutex_unlock(&slab_mutex);
2510	put_online_cpus();
2511	return ret;
2512}
2513EXPORT_SYMBOL(kmem_cache_shrink);
2514
2515int __kmem_cache_shutdown(struct kmem_cache *cachep)
2516{
2517	int i;
2518	struct kmem_cache_node *n;
2519	int rc = __cache_shrink(cachep);
2520
2521	if (rc)
2522		return rc;
2523
2524	for_each_online_cpu(i)
2525	    kfree(cachep->array[i]);
2526
2527	/* NUMA: free the node structures */
2528	for_each_online_node(i) {
2529		n = cachep->node[i];
2530		if (n) {
2531			kfree(n->shared);
2532			free_alien_cache(n->alien);
2533			kfree(n);
2534		}
2535	}
2536	return 0;
2537}
2538
2539/*
2540 * Get the memory for a slab management obj.
2541 *
2542 * For a slab cache when the slab descriptor is off-slab, the
2543 * slab descriptor can't come from the same cache which is being created,
2544 * Because if it is the case, that means we defer the creation of
2545 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2546 * And we eventually call down to __kmem_cache_create(), which
2547 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2548 * This is a "chicken-and-egg" problem.
2549 *
2550 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2551 * which are all initialized during kmem_cache_init().
2552 */
2553static void *alloc_slabmgmt(struct kmem_cache *cachep,
2554				   struct page *page, int colour_off,
2555				   gfp_t local_flags, int nodeid)
2556{
2557	void *freelist;
2558	void *addr = page_address(page);
2559
2560	if (OFF_SLAB(cachep)) {
 
 
 
 
 
2561		/* Slab management obj is off-slab. */
2562		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2563					      local_flags, nodeid);
2564		if (!freelist)
2565			return NULL;
2566	} else {
2567		freelist = addr + colour_off;
2568		colour_off += cachep->freelist_size;
 
2569	}
2570	page->active = 0;
2571	page->s_mem = addr + colour_off;
2572	return freelist;
2573}
2574
2575static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2576{
2577	return ((freelist_idx_t *)page->freelist)[idx];
2578}
2579
2580static inline void set_free_obj(struct page *page,
2581					unsigned int idx, freelist_idx_t val)
2582{
2583	((freelist_idx_t *)(page->freelist))[idx] = val;
2584}
2585
2586static void cache_init_objs(struct kmem_cache *cachep,
2587			    struct page *page)
2588{
 
2589	int i;
2590
2591	for (i = 0; i < cachep->num; i++) {
2592		void *objp = index_to_obj(cachep, page, i);
2593#if DEBUG
2594		/* need to poison the objs? */
2595		if (cachep->flags & SLAB_POISON)
2596			poison_obj(cachep, objp, POISON_FREE);
2597		if (cachep->flags & SLAB_STORE_USER)
2598			*dbg_userword(cachep, objp) = NULL;
2599
2600		if (cachep->flags & SLAB_RED_ZONE) {
2601			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2602			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2603		}
2604		/*
2605		 * Constructors are not allowed to allocate memory from the same
2606		 * cache which they are a constructor for.  Otherwise, deadlock.
2607		 * They must also be threaded.
2608		 */
2609		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
 
 
2610			cachep->ctor(objp + obj_offset(cachep));
 
 
 
2611
2612		if (cachep->flags & SLAB_RED_ZONE) {
2613			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2614				slab_error(cachep, "constructor overwrote the"
2615					   " end of an object");
2616			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2617				slab_error(cachep, "constructor overwrote the"
2618					   " start of an object");
2619		}
2620		if ((cachep->size % PAGE_SIZE) == 0 &&
2621			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2622			kernel_map_pages(virt_to_page(objp),
2623					 cachep->size / PAGE_SIZE, 0);
2624#else
2625		if (cachep->ctor)
2626			cachep->ctor(objp);
2627#endif
2628		set_free_obj(page, i, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2629	}
 
2630}
2631
2632static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 
2633{
2634	if (CONFIG_ZONE_DMA_FLAG) {
2635		if (flags & GFP_DMA)
2636			BUG_ON(!(cachep->allocflags & GFP_DMA));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2637		else
2638			BUG_ON(cachep->allocflags & GFP_DMA);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2639	}
2640}
2641
2642static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
2643				int nodeid)
2644{
2645	void *objp;
2646
2647	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2648	page->active++;
2649#if DEBUG
2650	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2651#endif
2652
2653	return objp;
2654}
2655
2656static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
2657				void *objp, int nodeid)
2658{
2659	unsigned int objnr = obj_to_index(cachep, page, objp);
2660#if DEBUG
2661	unsigned int i;
2662
2663	/* Verify that the slab belongs to the intended node */
2664	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2665
2666	/* Verify double free bug */
2667	for (i = page->active; i < cachep->num; i++) {
2668		if (get_free_obj(page, i) == objnr) {
2669			printk(KERN_ERR "slab: double free detected in cache "
2670					"'%s', objp %p\n", cachep->name, objp);
2671			BUG();
2672		}
2673	}
2674#endif
2675	page->active--;
 
 
 
2676	set_free_obj(page, page->active, objnr);
2677}
2678
2679/*
2680 * Map pages beginning at addr to the given cache and slab. This is required
2681 * for the slab allocator to be able to lookup the cache and slab of a
2682 * virtual address for kfree, ksize, and slab debugging.
2683 */
2684static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2685			   void *freelist)
2686{
2687	page->slab_cache = cache;
2688	page->freelist = freelist;
2689}
2690
2691/*
2692 * Grow (by 1) the number of slabs within a cache.  This is called by
2693 * kmem_cache_alloc() when there are no active objs left in a cache.
2694 */
2695static int cache_grow(struct kmem_cache *cachep,
2696		gfp_t flags, int nodeid, struct page *page)
2697{
2698	void *freelist;
2699	size_t offset;
2700	gfp_t local_flags;
 
2701	struct kmem_cache_node *n;
 
2702
2703	/*
2704	 * Be lazy and only check for valid flags here,  keeping it out of the
2705	 * critical path in kmem_cache_alloc().
2706	 */
2707	BUG_ON(flags & GFP_SLAB_BUG_MASK);
 
 
 
2708	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2709
2710	/* Take the node list lock to change the colour_next on this node */
2711	check_irq_off();
2712	n = cachep->node[nodeid];
2713	spin_lock(&n->list_lock);
 
 
 
 
 
 
 
 
 
 
 
2714
2715	/* Get colour for the slab, and cal the next value. */
2716	offset = n->colour_next;
2717	n->colour_next++;
2718	if (n->colour_next >= cachep->colour)
2719		n->colour_next = 0;
2720	spin_unlock(&n->list_lock);
 
 
 
2721
2722	offset *= cachep->colour_off;
2723
2724	if (local_flags & __GFP_WAIT)
2725		local_irq_enable();
2726
2727	/*
2728	 * The test for missing atomic flag is performed here, rather than
2729	 * the more obvious place, simply to reduce the critical path length
2730	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2731	 * will eventually be caught here (where it matters).
2732	 */
2733	kmem_flagcheck(cachep, flags);
2734
2735	/*
2736	 * Get mem for the objs.  Attempt to allocate a physical page from
2737	 * 'nodeid'.
2738	 */
2739	if (!page)
2740		page = kmem_getpages(cachep, local_flags, nodeid);
2741	if (!page)
2742		goto failed;
2743
2744	/* Get slab management. */
2745	freelist = alloc_slabmgmt(cachep, page, offset,
2746			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2747	if (!freelist)
2748		goto opps1;
2749
2750	slab_map_pages(cachep, page, freelist);
2751
2752	cache_init_objs(cachep, page);
2753
2754	if (local_flags & __GFP_WAIT)
2755		local_irq_disable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2756	check_irq_off();
 
 
 
 
 
 
 
2757	spin_lock(&n->list_lock);
 
 
 
 
 
 
2758
2759	/* Make slab active. */
2760	list_add_tail(&page->lru, &(n->slabs_free));
2761	STATS_INC_GROWN(cachep);
2762	n->free_objects += cachep->num;
2763	spin_unlock(&n->list_lock);
2764	return 1;
2765opps1:
2766	kmem_freepages(cachep, page);
2767failed:
2768	if (local_flags & __GFP_WAIT)
2769		local_irq_disable();
2770	return 0;
2771}
2772
2773#if DEBUG
2774
2775/*
2776 * Perform extra freeing checks:
2777 * - detect bad pointers.
2778 * - POISON/RED_ZONE checking
2779 */
2780static void kfree_debugcheck(const void *objp)
2781{
2782	if (!virt_addr_valid(objp)) {
2783		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2784		       (unsigned long)objp);
2785		BUG();
2786	}
2787}
2788
2789static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2790{
2791	unsigned long long redzone1, redzone2;
2792
2793	redzone1 = *dbg_redzone1(cache, obj);
2794	redzone2 = *dbg_redzone2(cache, obj);
2795
2796	/*
2797	 * Redzone is ok.
2798	 */
2799	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2800		return;
2801
2802	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2803		slab_error(cache, "double free detected");
2804	else
2805		slab_error(cache, "memory outside object was overwritten");
2806
2807	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2808			obj, redzone1, redzone2);
2809}
2810
2811static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2812				   unsigned long caller)
2813{
2814	unsigned int objnr;
2815	struct page *page;
2816
2817	BUG_ON(virt_to_cache(objp) != cachep);
2818
2819	objp -= obj_offset(cachep);
2820	kfree_debugcheck(objp);
2821	page = virt_to_head_page(objp);
2822
2823	if (cachep->flags & SLAB_RED_ZONE) {
2824		verify_redzone_free(cachep, objp);
2825		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2826		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2827	}
2828	if (cachep->flags & SLAB_STORE_USER)
2829		*dbg_userword(cachep, objp) = (void *)caller;
2830
2831	objnr = obj_to_index(cachep, page, objp);
2832
2833	BUG_ON(objnr >= cachep->num);
2834	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2835
2836	if (cachep->flags & SLAB_POISON) {
2837#ifdef CONFIG_DEBUG_PAGEALLOC
2838		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2839			store_stackinfo(cachep, objp, caller);
2840			kernel_map_pages(virt_to_page(objp),
2841					 cachep->size / PAGE_SIZE, 0);
2842		} else {
2843			poison_obj(cachep, objp, POISON_FREE);
2844		}
2845#else
2846		poison_obj(cachep, objp, POISON_FREE);
2847#endif
2848	}
2849	return objp;
2850}
2851
2852#else
2853#define kfree_debugcheck(x) do { } while(0)
2854#define cache_free_debugcheck(x,objp,z) (objp)
2855#endif
2856
2857static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
2858							bool force_refill)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2859{
2860	int batchcount;
2861	struct kmem_cache_node *n;
2862	struct array_cache *ac;
2863	int node;
 
 
2864
2865	check_irq_off();
2866	node = numa_mem_id();
2867	if (unlikely(force_refill))
2868		goto force_grow;
2869retry:
2870	ac = cpu_cache_get(cachep);
2871	batchcount = ac->batchcount;
2872	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2873		/*
2874		 * If there was little recent activity on this cache, then
2875		 * perform only a partial refill.  Otherwise we could generate
2876		 * refill bouncing.
2877		 */
2878		batchcount = BATCHREFILL_LIMIT;
2879	}
2880	n = cachep->node[node];
2881
2882	BUG_ON(ac->avail > 0 || !n);
 
 
 
 
2883	spin_lock(&n->list_lock);
 
2884
2885	/* See if we can refill from the shared array */
2886	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2887		n->shared->touched = 1;
2888		goto alloc_done;
2889	}
2890
2891	while (batchcount > 0) {
2892		struct list_head *entry;
2893		struct page *page;
2894		/* Get slab alloc is to come from. */
2895		entry = n->slabs_partial.next;
2896		if (entry == &n->slabs_partial) {
2897			n->free_touched = 1;
2898			entry = n->slabs_free.next;
2899			if (entry == &n->slabs_free)
2900				goto must_grow;
2901		}
2902
2903		page = list_entry(entry, struct page, lru);
2904		check_spinlock_acquired(cachep);
2905
2906		/*
2907		 * The slab was either on partial or free list so
2908		 * there must be at least one object available for
2909		 * allocation.
2910		 */
2911		BUG_ON(page->active >= cachep->num);
2912
2913		while (page->active < cachep->num && batchcount--) {
2914			STATS_INC_ALLOCED(cachep);
2915			STATS_INC_ACTIVE(cachep);
2916			STATS_SET_HIGH(cachep);
2917
2918			ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
2919									node));
2920		}
2921
2922		/* move slabp to correct slabp list: */
2923		list_del(&page->lru);
2924		if (page->active == cachep->num)
2925			list_add(&page->lru, &n->slabs_full);
2926		else
2927			list_add(&page->lru, &n->slabs_partial);
2928	}
2929
2930must_grow:
2931	n->free_objects -= ac->avail;
2932alloc_done:
2933	spin_unlock(&n->list_lock);
 
2934
 
2935	if (unlikely(!ac->avail)) {
2936		int x;
2937force_grow:
2938		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
 
 
 
 
 
 
2939
2940		/* cache_grow can reenable interrupts, then ac could change. */
 
 
 
2941		ac = cpu_cache_get(cachep);
2942		node = numa_mem_id();
 
 
2943
2944		/* no objects in sight? abort */
2945		if (!x && (ac->avail == 0 || force_refill))
2946			return NULL;
2947
2948		if (!ac->avail)		/* objects refilled by interrupt? */
2949			goto retry;
2950	}
2951	ac->touched = 1;
2952
2953	return ac_get_obj(cachep, ac, flags, force_refill);
2954}
2955
2956static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2957						gfp_t flags)
2958{
2959	might_sleep_if(flags & __GFP_WAIT);
2960#if DEBUG
2961	kmem_flagcheck(cachep, flags);
2962#endif
2963}
2964
2965#if DEBUG
2966static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2967				gfp_t flags, void *objp, unsigned long caller)
2968{
 
2969	if (!objp)
2970		return objp;
2971	if (cachep->flags & SLAB_POISON) {
2972#ifdef CONFIG_DEBUG_PAGEALLOC
2973		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2974			kernel_map_pages(virt_to_page(objp),
2975					 cachep->size / PAGE_SIZE, 1);
2976		else
2977			check_poison_obj(cachep, objp);
2978#else
2979		check_poison_obj(cachep, objp);
2980#endif
2981		poison_obj(cachep, objp, POISON_INUSE);
2982	}
2983	if (cachep->flags & SLAB_STORE_USER)
2984		*dbg_userword(cachep, objp) = (void *)caller;
2985
2986	if (cachep->flags & SLAB_RED_ZONE) {
2987		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2988				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2989			slab_error(cachep, "double free, or memory outside"
2990						" object was overwritten");
2991			printk(KERN_ERR
2992				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2993				objp, *dbg_redzone1(cachep, objp),
2994				*dbg_redzone2(cachep, objp));
2995		}
2996		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
2997		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
2998	}
 
2999	objp += obj_offset(cachep);
3000	if (cachep->ctor && cachep->flags & SLAB_POISON)
3001		cachep->ctor(objp);
3002	if (ARCH_SLAB_MINALIGN &&
3003	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3004		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3005		       objp, (int)ARCH_SLAB_MINALIGN);
3006	}
3007	return objp;
3008}
3009#else
3010#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3011#endif
3012
3013static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3014{
3015	if (cachep == kmem_cache)
3016		return false;
3017
3018	return should_failslab(cachep->object_size, flags, cachep->flags);
3019}
3020
3021static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3022{
3023	void *objp;
3024	struct array_cache *ac;
3025	bool force_refill = false;
3026
3027	check_irq_off();
3028
3029	ac = cpu_cache_get(cachep);
3030	if (likely(ac->avail)) {
3031		ac->touched = 1;
3032		objp = ac_get_obj(cachep, ac, flags, false);
3033
3034		/*
3035		 * Allow for the possibility all avail objects are not allowed
3036		 * by the current flags
3037		 */
3038		if (objp) {
3039			STATS_INC_ALLOCHIT(cachep);
3040			goto out;
3041		}
3042		force_refill = true;
3043	}
3044
3045	STATS_INC_ALLOCMISS(cachep);
3046	objp = cache_alloc_refill(cachep, flags, force_refill);
3047	/*
3048	 * the 'ac' may be updated by cache_alloc_refill(),
3049	 * and kmemleak_erase() requires its correct value.
3050	 */
3051	ac = cpu_cache_get(cachep);
3052
3053out:
3054	/*
3055	 * To avoid a false negative, if an object that is in one of the
3056	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3057	 * treat the array pointers as a reference to the object.
3058	 */
3059	if (objp)
3060		kmemleak_erase(&ac->entry[ac->avail]);
3061	return objp;
3062}
3063
3064#ifdef CONFIG_NUMA
3065/*
3066 * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
3067 *
3068 * If we are in_interrupt, then process context, including cpusets and
3069 * mempolicy, may not apply and should not be used for allocation policy.
3070 */
3071static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3072{
3073	int nid_alloc, nid_here;
3074
3075	if (in_interrupt() || (flags & __GFP_THISNODE))
3076		return NULL;
3077	nid_alloc = nid_here = numa_mem_id();
3078	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3079		nid_alloc = cpuset_slab_spread_node();
3080	else if (current->mempolicy)
3081		nid_alloc = mempolicy_slab_node();
3082	if (nid_alloc != nid_here)
3083		return ____cache_alloc_node(cachep, flags, nid_alloc);
3084	return NULL;
3085}
3086
3087/*
3088 * Fallback function if there was no memory available and no objects on a
3089 * certain node and fall back is permitted. First we scan all the
3090 * available node for available objects. If that fails then we
3091 * perform an allocation without specifying a node. This allows the page
3092 * allocator to do its reclaim / fallback magic. We then insert the
3093 * slab into the proper nodelist and then allocate from it.
3094 */
3095static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3096{
3097	struct zonelist *zonelist;
3098	gfp_t local_flags;
3099	struct zoneref *z;
3100	struct zone *zone;
3101	enum zone_type high_zoneidx = gfp_zone(flags);
3102	void *obj = NULL;
 
3103	int nid;
3104	unsigned int cpuset_mems_cookie;
3105
3106	if (flags & __GFP_THISNODE)
3107		return NULL;
3108
3109	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3110
3111retry_cpuset:
3112	cpuset_mems_cookie = read_mems_allowed_begin();
3113	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3114
3115retry:
3116	/*
3117	 * Look through allowed nodes for objects available
3118	 * from existing per node queues.
3119	 */
3120	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3121		nid = zone_to_nid(zone);
3122
3123		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3124			cache->node[nid] &&
3125			cache->node[nid]->free_objects) {
3126				obj = ____cache_alloc_node(cache,
3127					flags | GFP_THISNODE, nid);
3128				if (obj)
3129					break;
3130		}
3131	}
3132
3133	if (!obj) {
3134		/*
3135		 * This allocation will be performed within the constraints
3136		 * of the current cpuset / memory policy requirements.
3137		 * We may trigger various forms of reclaim on the allowed
3138		 * set and go into memory reserves if necessary.
3139		 */
3140		struct page *page;
 
 
 
 
 
3141
3142		if (local_flags & __GFP_WAIT)
3143			local_irq_enable();
3144		kmem_flagcheck(cache, flags);
3145		page = kmem_getpages(cache, local_flags, numa_mem_id());
3146		if (local_flags & __GFP_WAIT)
3147			local_irq_disable();
3148		if (page) {
3149			/*
3150			 * Insert into the appropriate per node queues
 
3151			 */
3152			nid = page_to_nid(page);
3153			if (cache_grow(cache, flags, nid, page)) {
3154				obj = ____cache_alloc_node(cache,
3155					flags | GFP_THISNODE, nid);
3156				if (!obj)
3157					/*
3158					 * Another processor may allocate the
3159					 * objects in the slab since we are
3160					 * not holding any locks.
3161					 */
3162					goto retry;
3163			} else {
3164				/* cache_grow already freed obj */
3165				obj = NULL;
3166			}
3167		}
3168	}
3169
3170	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3171		goto retry_cpuset;
3172	return obj;
3173}
3174
3175/*
3176 * A interface to enable slab creation on nodeid
3177 */
3178static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3179				int nodeid)
3180{
3181	struct list_head *entry;
3182	struct page *page;
3183	struct kmem_cache_node *n;
3184	void *obj;
3185	int x;
3186
3187	VM_BUG_ON(nodeid > num_online_nodes());
3188	n = cachep->node[nodeid];
3189	BUG_ON(!n);
3190
3191retry:
3192	check_irq_off();
3193	spin_lock(&n->list_lock);
3194	entry = n->slabs_partial.next;
3195	if (entry == &n->slabs_partial) {
3196		n->free_touched = 1;
3197		entry = n->slabs_free.next;
3198		if (entry == &n->slabs_free)
3199			goto must_grow;
3200	}
3201
3202	page = list_entry(entry, struct page, lru);
3203	check_spinlock_acquired_node(cachep, nodeid);
3204
3205	STATS_INC_NODEALLOCS(cachep);
3206	STATS_INC_ACTIVE(cachep);
3207	STATS_SET_HIGH(cachep);
3208
3209	BUG_ON(page->active == cachep->num);
3210
3211	obj = slab_get_obj(cachep, page, nodeid);
3212	n->free_objects--;
3213	/* move slabp to correct slabp list: */
3214	list_del(&page->lru);
3215
3216	if (page->active == cachep->num)
3217		list_add(&page->lru, &n->slabs_full);
3218	else
3219		list_add(&page->lru, &n->slabs_partial);
3220
3221	spin_unlock(&n->list_lock);
3222	goto done;
 
3223
3224must_grow:
3225	spin_unlock(&n->list_lock);
3226	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3227	if (x)
3228		goto retry;
 
 
 
3229
3230	return fallback_alloc(cachep, flags);
3231
3232done:
3233	return obj;
3234}
3235
3236static __always_inline void *
3237slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3238		   unsigned long caller)
3239{
3240	unsigned long save_flags;
3241	void *ptr;
3242	int slab_node = numa_mem_id();
 
3243
3244	flags &= gfp_allowed_mask;
3245
3246	lockdep_trace_alloc(flags);
3247
3248	if (slab_should_failslab(cachep, flags))
3249		return NULL;
3250
3251	cachep = memcg_kmem_get_cache(cachep, flags);
3252
3253	cache_alloc_debugcheck_before(cachep, flags);
3254	local_irq_save(save_flags);
3255
3256	if (nodeid == NUMA_NO_NODE)
3257		nodeid = slab_node;
3258
3259	if (unlikely(!cachep->node[nodeid])) {
3260		/* Node not bootstrapped yet */
3261		ptr = fallback_alloc(cachep, flags);
3262		goto out;
3263	}
3264
3265	if (nodeid == slab_node) {
3266		/*
3267		 * Use the locally cached objects if possible.
3268		 * However ____cache_alloc does not allow fallback
3269		 * to other nodes. It may fail while we still have
3270		 * objects on other nodes available.
3271		 */
3272		ptr = ____cache_alloc(cachep, flags);
3273		if (ptr)
3274			goto out;
3275	}
3276	/* ___cache_alloc_node can fall back to other nodes */
3277	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3278  out:
3279	local_irq_restore(save_flags);
3280	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3281	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3282				 flags);
3283
3284	if (likely(ptr)) {
3285		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3286		if (unlikely(flags & __GFP_ZERO))
3287			memset(ptr, 0, cachep->object_size);
3288	}
3289
 
3290	return ptr;
3291}
3292
3293static __always_inline void *
3294__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3295{
3296	void *objp;
3297
3298	if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
3299		objp = alternate_node_alloc(cache, flags);
3300		if (objp)
3301			goto out;
3302	}
3303	objp = ____cache_alloc(cache, flags);
3304
3305	/*
3306	 * We may just have run out of memory on the local node.
3307	 * ____cache_alloc_node() knows how to locate memory on other nodes
3308	 */
3309	if (!objp)
3310		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3311
3312  out:
3313	return objp;
3314}
3315#else
3316
3317static __always_inline void *
3318__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3319{
3320	return ____cache_alloc(cachep, flags);
3321}
3322
3323#endif /* CONFIG_NUMA */
3324
3325static __always_inline void *
3326slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3327{
3328	unsigned long save_flags;
3329	void *objp;
 
3330
3331	flags &= gfp_allowed_mask;
3332
3333	lockdep_trace_alloc(flags);
3334
3335	if (slab_should_failslab(cachep, flags))
3336		return NULL;
3337
3338	cachep = memcg_kmem_get_cache(cachep, flags);
3339
3340	cache_alloc_debugcheck_before(cachep, flags);
3341	local_irq_save(save_flags);
3342	objp = __do_cache_alloc(cachep, flags);
3343	local_irq_restore(save_flags);
3344	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3345	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3346				 flags);
3347	prefetchw(objp);
3348
3349	if (likely(objp)) {
3350		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3351		if (unlikely(flags & __GFP_ZERO))
3352			memset(objp, 0, cachep->object_size);
3353	}
3354
 
3355	return objp;
3356}
3357
3358/*
3359 * Caller needs to acquire correct kmem_cache_node's list_lock
 
3360 */
3361static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3362		       int node)
3363{
3364	int i;
3365	struct kmem_cache_node *n;
 
 
 
3366
3367	for (i = 0; i < nr_objects; i++) {
3368		void *objp;
3369		struct page *page;
3370
3371		clear_obj_pfmemalloc(&objpp[i]);
3372		objp = objpp[i];
3373
3374		page = virt_to_head_page(objp);
3375		n = cachep->node[node];
3376		list_del(&page->lru);
3377		check_spinlock_acquired_node(cachep, node);
3378		slab_put_obj(cachep, page, objp, node);
3379		STATS_DEC_ACTIVE(cachep);
3380		n->free_objects++;
3381
3382		/* fixup slab chains */
3383		if (page->active == 0) {
3384			if (n->free_objects > n->free_limit) {
3385				n->free_objects -= cachep->num;
3386				/* No need to drop any previously held
3387				 * lock here, even if we have a off-slab slab
3388				 * descriptor it is guaranteed to come from
3389				 * a different cache, refer to comments before
3390				 * alloc_slabmgmt.
3391				 */
3392				slab_destroy(cachep, page);
3393			} else {
3394				list_add(&page->lru, &n->slabs_free);
3395			}
3396		} else {
3397			/* Unconditionally move a slab to the end of the
3398			 * partial list on free - maximum time for the
3399			 * other objects to be freed, too.
3400			 */
3401			list_add_tail(&page->lru, &n->slabs_partial);
3402		}
3403	}
 
 
 
 
 
 
 
 
 
3404}
3405
3406static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3407{
3408	int batchcount;
3409	struct kmem_cache_node *n;
3410	int node = numa_mem_id();
 
3411
3412	batchcount = ac->batchcount;
3413#if DEBUG
3414	BUG_ON(!batchcount || batchcount > ac->avail);
3415#endif
3416	check_irq_off();
3417	n = cachep->node[node];
3418	spin_lock(&n->list_lock);
3419	if (n->shared) {
3420		struct array_cache *shared_array = n->shared;
3421		int max = shared_array->limit - shared_array->avail;
3422		if (max) {
3423			if (batchcount > max)
3424				batchcount = max;
3425			memcpy(&(shared_array->entry[shared_array->avail]),
3426			       ac->entry, sizeof(void *) * batchcount);
3427			shared_array->avail += batchcount;
3428			goto free_done;
3429		}
3430	}
3431
3432	free_block(cachep, ac->entry, batchcount, node);
3433free_done:
3434#if STATS
3435	{
3436		int i = 0;
3437		struct list_head *p;
3438
3439		p = n->slabs_free.next;
3440		while (p != &(n->slabs_free)) {
3441			struct page *page;
3442
3443			page = list_entry(p, struct page, lru);
3444			BUG_ON(page->active);
3445
3446			i++;
3447			p = p->next;
3448		}
3449		STATS_SET_FREEABLE(cachep, i);
3450	}
3451#endif
3452	spin_unlock(&n->list_lock);
3453	ac->avail -= batchcount;
3454	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
 
3455}
3456
3457/*
3458 * Release an obj back to its cache. If the obj has a constructed state, it must
3459 * be in this state _before_ it is released.  Called with disabled ints.
3460 */
3461static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3462				unsigned long caller)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3463{
3464	struct array_cache *ac = cpu_cache_get(cachep);
3465
3466	check_irq_off();
 
 
3467	kmemleak_free_recursive(objp, cachep->flags);
3468	objp = cache_free_debugcheck(cachep, objp, caller);
3469
3470	kmemcheck_slab_free(cachep, objp, cachep->object_size);
3471
3472	/*
3473	 * Skip calling cache_free_alien() when the platform is not numa.
3474	 * This will avoid cache misses that happen while accessing slabp (which
3475	 * is per page memory  reference) to get nodeid. Instead use a global
3476	 * variable to skip the call, which is mostly likely to be present in
3477	 * the cache.
3478	 */
3479	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3480		return;
3481
3482	if (likely(ac->avail < ac->limit)) {
3483		STATS_INC_FREEHIT(cachep);
3484	} else {
3485		STATS_INC_FREEMISS(cachep);
3486		cache_flusharray(cachep, ac);
3487	}
3488
3489	ac_put_obj(cachep, ac, objp);
 
 
 
 
 
 
 
 
 
3490}
3491
3492/**
3493 * kmem_cache_alloc - Allocate an object
3494 * @cachep: The cache to allocate from.
3495 * @flags: See kmalloc().
3496 *
3497 * Allocate an object from this cache.  The flags are only relevant
3498 * if the cache has no available objects.
 
 
3499 */
3500void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3501{
3502	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3503
3504	trace_kmem_cache_alloc(_RET_IP_, ret,
3505			       cachep->object_size, cachep->size, flags);
3506
3507	return ret;
3508}
3509EXPORT_SYMBOL(kmem_cache_alloc);
3510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3511#ifdef CONFIG_TRACING
3512void *
3513kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3514{
3515	void *ret;
3516
3517	ret = slab_alloc(cachep, flags, _RET_IP_);
3518
 
3519	trace_kmalloc(_RET_IP_, ret,
3520		      size, cachep->size, flags);
3521	return ret;
3522}
3523EXPORT_SYMBOL(kmem_cache_alloc_trace);
3524#endif
3525
3526#ifdef CONFIG_NUMA
3527/**
3528 * kmem_cache_alloc_node - Allocate an object on the specified node
3529 * @cachep: The cache to allocate from.
3530 * @flags: See kmalloc().
3531 * @nodeid: node number of the target node.
3532 *
3533 * Identical to kmem_cache_alloc but it will allocate memory on the given
3534 * node, which can improve the performance for cpu bound structures.
3535 *
3536 * Fallback to other node is possible if __GFP_THISNODE is not set.
 
 
3537 */
3538void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3539{
3540	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3541
3542	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3543				    cachep->object_size, cachep->size,
3544				    flags, nodeid);
3545
3546	return ret;
3547}
3548EXPORT_SYMBOL(kmem_cache_alloc_node);
3549
3550#ifdef CONFIG_TRACING
3551void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3552				  gfp_t flags,
3553				  int nodeid,
3554				  size_t size)
3555{
3556	void *ret;
3557
3558	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3559
 
3560	trace_kmalloc_node(_RET_IP_, ret,
3561			   size, cachep->size,
3562			   flags, nodeid);
3563	return ret;
3564}
3565EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3566#endif
3567
3568static __always_inline void *
3569__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3570{
3571	struct kmem_cache *cachep;
 
3572
 
 
3573	cachep = kmalloc_slab(size, flags);
3574	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3575		return cachep;
3576	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
 
 
 
3577}
3578
3579#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3580void *__kmalloc_node(size_t size, gfp_t flags, int node)
3581{
3582	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3583}
3584EXPORT_SYMBOL(__kmalloc_node);
3585
3586void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3587		int node, unsigned long caller)
3588{
3589	return __do_kmalloc_node(size, flags, node, caller);
3590}
3591EXPORT_SYMBOL(__kmalloc_node_track_caller);
3592#else
3593void *__kmalloc_node(size_t size, gfp_t flags, int node)
3594{
3595	return __do_kmalloc_node(size, flags, node, 0);
3596}
3597EXPORT_SYMBOL(__kmalloc_node);
3598#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3599#endif /* CONFIG_NUMA */
3600
3601/**
3602 * __do_kmalloc - allocate memory
3603 * @size: how many bytes of memory are required.
3604 * @flags: the type of memory to allocate (see kmalloc).
3605 * @caller: function caller for debug tracking of the caller
 
 
3606 */
3607static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3608					  unsigned long caller)
3609{
3610	struct kmem_cache *cachep;
3611	void *ret;
3612
 
 
3613	cachep = kmalloc_slab(size, flags);
3614	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3615		return cachep;
3616	ret = slab_alloc(cachep, flags, caller);
3617
 
3618	trace_kmalloc(caller, ret,
3619		      size, cachep->size, flags);
3620
3621	return ret;
3622}
3623
3624
3625#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3626void *__kmalloc(size_t size, gfp_t flags)
3627{
3628	return __do_kmalloc(size, flags, _RET_IP_);
3629}
3630EXPORT_SYMBOL(__kmalloc);
3631
3632void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3633{
3634	return __do_kmalloc(size, flags, caller);
3635}
3636EXPORT_SYMBOL(__kmalloc_track_caller);
3637
3638#else
3639void *__kmalloc(size_t size, gfp_t flags)
3640{
3641	return __do_kmalloc(size, flags, 0);
3642}
3643EXPORT_SYMBOL(__kmalloc);
3644#endif
3645
3646/**
3647 * kmem_cache_free - Deallocate an object
3648 * @cachep: The cache the allocation was from.
3649 * @objp: The previously allocated object.
3650 *
3651 * Free an object which was previously allocated from this
3652 * cache.
3653 */
3654void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3655{
3656	unsigned long flags;
3657	cachep = cache_from_obj(cachep, objp);
3658	if (!cachep)
3659		return;
3660
3661	local_irq_save(flags);
3662	debug_check_no_locks_freed(objp, cachep->object_size);
3663	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3664		debug_check_no_obj_freed(objp, cachep->object_size);
3665	__cache_free(cachep, objp, _RET_IP_);
3666	local_irq_restore(flags);
3667
3668	trace_kmem_cache_free(_RET_IP_, objp);
3669}
3670EXPORT_SYMBOL(kmem_cache_free);
3671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3672/**
3673 * kfree - free previously allocated memory
3674 * @objp: pointer returned by kmalloc.
3675 *
3676 * If @objp is NULL, no operation is performed.
3677 *
3678 * Don't free memory not originally allocated by kmalloc()
3679 * or you will run into trouble.
3680 */
3681void kfree(const void *objp)
3682{
3683	struct kmem_cache *c;
3684	unsigned long flags;
3685
3686	trace_kfree(_RET_IP_, objp);
3687
3688	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3689		return;
3690	local_irq_save(flags);
3691	kfree_debugcheck(objp);
3692	c = virt_to_cache(objp);
 
 
 
 
3693	debug_check_no_locks_freed(objp, c->object_size);
3694
3695	debug_check_no_obj_freed(objp, c->object_size);
3696	__cache_free(c, (void *)objp, _RET_IP_);
3697	local_irq_restore(flags);
3698}
3699EXPORT_SYMBOL(kfree);
3700
3701/*
3702 * This initializes kmem_cache_node or resizes various caches for all nodes.
3703 */
3704static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3705{
 
3706	int node;
3707	struct kmem_cache_node *n;
3708	struct array_cache *new_shared;
3709	struct array_cache **new_alien = NULL;
3710
3711	for_each_online_node(node) {
3712
3713                if (use_alien_caches) {
3714                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3715                        if (!new_alien)
3716                                goto fail;
3717                }
3718
3719		new_shared = NULL;
3720		if (cachep->shared) {
3721			new_shared = alloc_arraycache(node,
3722				cachep->shared*cachep->batchcount,
3723					0xbaadf00d, gfp);
3724			if (!new_shared) {
3725				free_alien_cache(new_alien);
3726				goto fail;
3727			}
3728		}
3729
3730		n = cachep->node[node];
3731		if (n) {
3732			struct array_cache *shared = n->shared;
3733
3734			spin_lock_irq(&n->list_lock);
3735
3736			if (shared)
3737				free_block(cachep, shared->entry,
3738						shared->avail, node);
3739
3740			n->shared = new_shared;
3741			if (!n->alien) {
3742				n->alien = new_alien;
3743				new_alien = NULL;
3744			}
3745			n->free_limit = (1 + nr_cpus_node(node)) *
3746					cachep->batchcount + cachep->num;
3747			spin_unlock_irq(&n->list_lock);
3748			kfree(shared);
3749			free_alien_cache(new_alien);
3750			continue;
3751		}
3752		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3753		if (!n) {
3754			free_alien_cache(new_alien);
3755			kfree(new_shared);
3756			goto fail;
3757		}
3758
3759		kmem_cache_node_init(n);
3760		n->next_reap = jiffies + REAPTIMEOUT_NODE +
3761				((unsigned long)cachep) % REAPTIMEOUT_NODE;
3762		n->shared = new_shared;
3763		n->alien = new_alien;
3764		n->free_limit = (1 + nr_cpus_node(node)) *
3765					cachep->batchcount + cachep->num;
3766		cachep->node[node] = n;
3767	}
 
3768	return 0;
3769
3770fail:
3771	if (!cachep->list.next) {
3772		/* Cache is not active yet. Roll back what we did */
3773		node--;
3774		while (node >= 0) {
3775			if (cachep->node[node]) {
3776				n = cachep->node[node];
3777
3778				kfree(n->shared);
3779				free_alien_cache(n->alien);
3780				kfree(n);
3781				cachep->node[node] = NULL;
3782			}
3783			node--;
3784		}
3785	}
3786	return -ENOMEM;
3787}
3788
3789struct ccupdate_struct {
3790	struct kmem_cache *cachep;
3791	struct array_cache *new[0];
3792};
3793
3794static void do_ccupdate_local(void *info)
3795{
3796	struct ccupdate_struct *new = info;
3797	struct array_cache *old;
3798
3799	check_irq_off();
3800	old = cpu_cache_get(new->cachep);
3801
3802	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3803	new->new[smp_processor_id()] = old;
3804}
3805
3806/* Always called with the slab_mutex held */
3807static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3808				int batchcount, int shared, gfp_t gfp)
3809{
3810	struct ccupdate_struct *new;
3811	int i;
3812
3813	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
3814		      gfp);
3815	if (!new)
3816		return -ENOMEM;
3817
3818	for_each_online_cpu(i) {
3819		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3820						batchcount, gfp);
3821		if (!new->new[i]) {
3822			for (i--; i >= 0; i--)
3823				kfree(new->new[i]);
3824			kfree(new);
3825			return -ENOMEM;
3826		}
3827	}
3828	new->cachep = cachep;
3829
3830	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3831
3832	check_irq_on();
3833	cachep->batchcount = batchcount;
3834	cachep->limit = limit;
3835	cachep->shared = shared;
3836
3837	for_each_online_cpu(i) {
3838		struct array_cache *ccold = new->new[i];
3839		if (!ccold)
3840			continue;
3841		spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3842		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
3843		spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3844		kfree(ccold);
3845	}
3846	kfree(new);
3847	return alloc_kmem_cache_node(cachep, gfp);
3848}
3849
3850static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3851				int batchcount, int shared, gfp_t gfp)
3852{
3853	int ret;
3854	struct kmem_cache *c = NULL;
3855	int i = 0;
3856
3857	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3858
3859	if (slab_state < FULL)
3860		return ret;
3861
3862	if ((ret < 0) || !is_root_cache(cachep))
3863		return ret;
3864
3865	VM_BUG_ON(!mutex_is_locked(&slab_mutex));
3866	for_each_memcg_cache_index(i) {
3867		c = cache_from_memcg_idx(cachep, i);
3868		if (c)
3869			/* return value determined by the parent cache only */
3870			__do_tune_cpucache(c, limit, batchcount, shared, gfp);
3871	}
 
3872
3873	return ret;
 
3874}
3875
3876/* Called with slab_mutex held always */
3877static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3878{
3879	int err;
3880	int limit = 0;
3881	int shared = 0;
3882	int batchcount = 0;
3883
3884	if (!is_root_cache(cachep)) {
3885		struct kmem_cache *root = memcg_root_cache(cachep);
3886		limit = root->limit;
3887		shared = root->shared;
3888		batchcount = root->batchcount;
3889	}
3890
3891	if (limit && shared && batchcount)
3892		goto skip_setup;
3893	/*
3894	 * The head array serves three purposes:
3895	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3896	 * - reduce the number of spinlock operations.
3897	 * - reduce the number of linked list operations on the slab and
3898	 *   bufctl chains: array operations are cheaper.
3899	 * The numbers are guessed, we should auto-tune as described by
3900	 * Bonwick.
3901	 */
3902	if (cachep->size > 131072)
3903		limit = 1;
3904	else if (cachep->size > PAGE_SIZE)
3905		limit = 8;
3906	else if (cachep->size > 1024)
3907		limit = 24;
3908	else if (cachep->size > 256)
3909		limit = 54;
3910	else
3911		limit = 120;
3912
3913	/*
3914	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3915	 * allocation behaviour: Most allocs on one cpu, most free operations
3916	 * on another cpu. For these cases, an efficient object passing between
3917	 * cpus is necessary. This is provided by a shared array. The array
3918	 * replaces Bonwick's magazine layer.
3919	 * On uniprocessor, it's functionally equivalent (but less efficient)
3920	 * to a larger limit. Thus disabled by default.
3921	 */
3922	shared = 0;
3923	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3924		shared = 8;
3925
3926#if DEBUG
3927	/*
3928	 * With debugging enabled, large batchcount lead to excessively long
3929	 * periods with disabled local interrupts. Limit the batchcount
3930	 */
3931	if (limit > 32)
3932		limit = 32;
3933#endif
3934	batchcount = (limit + 1) / 2;
3935skip_setup:
3936	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
 
3937	if (err)
3938		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3939		       cachep->name, -err);
3940	return err;
3941}
3942
3943/*
3944 * Drain an array if it contains any elements taking the node lock only if
3945 * necessary. Note that the node listlock also protects the array_cache
3946 * if drain_array() is used on the shared array.
3947 */
3948static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3949			 struct array_cache *ac, int force, int node)
3950{
3951	int tofree;
 
 
 
3952
3953	if (!ac || !ac->avail)
3954		return;
3955	if (ac->touched && !force) {
 
3956		ac->touched = 0;
3957	} else {
3958		spin_lock_irq(&n->list_lock);
3959		if (ac->avail) {
3960			tofree = force ? ac->avail : (ac->limit + 4) / 5;
3961			if (tofree > ac->avail)
3962				tofree = (ac->avail + 1) / 2;
3963			free_block(cachep, ac->entry, tofree, node);
3964			ac->avail -= tofree;
3965			memmove(ac->entry, &(ac->entry[tofree]),
3966				sizeof(void *) * ac->avail);
3967		}
3968		spin_unlock_irq(&n->list_lock);
3969	}
 
 
 
 
 
 
3970}
3971
3972/**
3973 * cache_reap - Reclaim memory from caches.
3974 * @w: work descriptor
3975 *
3976 * Called from workqueue/eventd every few seconds.
3977 * Purpose:
3978 * - clear the per-cpu caches for this CPU.
3979 * - return freeable pages to the main free memory pool.
3980 *
3981 * If we cannot acquire the cache chain mutex then just give up - we'll try
3982 * again on the next iteration.
3983 */
3984static void cache_reap(struct work_struct *w)
3985{
3986	struct kmem_cache *searchp;
3987	struct kmem_cache_node *n;
3988	int node = numa_mem_id();
3989	struct delayed_work *work = to_delayed_work(w);
3990
3991	if (!mutex_trylock(&slab_mutex))
3992		/* Give up. Setup the next iteration. */
3993		goto out;
3994
3995	list_for_each_entry(searchp, &slab_caches, list) {
3996		check_irq_on();
3997
3998		/*
3999		 * We only take the node lock if absolutely necessary and we
4000		 * have established with reasonable certainty that
4001		 * we can do some work if the lock was obtained.
4002		 */
4003		n = searchp->node[node];
4004
4005		reap_alien(searchp, n);
4006
4007		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
4008
4009		/*
4010		 * These are racy checks but it does not matter
4011		 * if we skip one check or scan twice.
4012		 */
4013		if (time_after(n->next_reap, jiffies))
4014			goto next;
4015
4016		n->next_reap = jiffies + REAPTIMEOUT_NODE;
4017
4018		drain_array(searchp, n, n->shared, 0, node);
4019
4020		if (n->free_touched)
4021			n->free_touched = 0;
4022		else {
4023			int freed;
4024
4025			freed = drain_freelist(searchp, n, (n->free_limit +
4026				5 * searchp->num - 1) / (5 * searchp->num));
4027			STATS_ADD_REAPED(searchp, freed);
4028		}
4029next:
4030		cond_resched();
4031	}
4032	check_irq_on();
4033	mutex_unlock(&slab_mutex);
4034	next_reap_node();
4035out:
4036	/* Set up the next iteration */
4037	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
 
4038}
4039
4040#ifdef CONFIG_SLABINFO
4041void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4042{
4043	struct page *page;
4044	unsigned long active_objs;
4045	unsigned long num_objs;
4046	unsigned long active_slabs = 0;
4047	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4048	const char *name;
4049	char *error = NULL;
4050	int node;
4051	struct kmem_cache_node *n;
4052
4053	active_objs = 0;
4054	num_slabs = 0;
4055	for_each_online_node(node) {
4056		n = cachep->node[node];
4057		if (!n)
4058			continue;
4059
4060		check_irq_on();
4061		spin_lock_irq(&n->list_lock);
4062
4063		list_for_each_entry(page, &n->slabs_full, lru) {
4064			if (page->active != cachep->num && !error)
4065				error = "slabs_full accounting error";
4066			active_objs += cachep->num;
4067			active_slabs++;
4068		}
4069		list_for_each_entry(page, &n->slabs_partial, lru) {
4070			if (page->active == cachep->num && !error)
4071				error = "slabs_partial accounting error";
4072			if (!page->active && !error)
4073				error = "slabs_partial accounting error";
4074			active_objs += page->active;
4075			active_slabs++;
4076		}
4077		list_for_each_entry(page, &n->slabs_free, lru) {
4078			if (page->active && !error)
4079				error = "slabs_free accounting error";
4080			num_slabs++;
4081		}
4082		free_objects += n->free_objects;
4083		if (n->shared)
4084			shared_avail += n->shared->avail;
4085
4086		spin_unlock_irq(&n->list_lock);
4087	}
4088	num_slabs += active_slabs;
4089	num_objs = num_slabs * cachep->num;
4090	if (num_objs - active_objs != free_objects && !error)
4091		error = "free_objects accounting error";
4092
4093	name = cachep->name;
4094	if (error)
4095		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4096
4097	sinfo->active_objs = active_objs;
4098	sinfo->num_objs = num_objs;
4099	sinfo->active_slabs = active_slabs;
4100	sinfo->num_slabs = num_slabs;
4101	sinfo->shared_avail = shared_avail;
4102	sinfo->limit = cachep->limit;
4103	sinfo->batchcount = cachep->batchcount;
4104	sinfo->shared = cachep->shared;
4105	sinfo->objects_per_slab = cachep->num;
4106	sinfo->cache_order = cachep->gfporder;
4107}
4108
4109void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4110{
4111#if STATS
4112	{			/* node stats */
4113		unsigned long high = cachep->high_mark;
4114		unsigned long allocs = cachep->num_allocations;
4115		unsigned long grown = cachep->grown;
4116		unsigned long reaped = cachep->reaped;
4117		unsigned long errors = cachep->errors;
4118		unsigned long max_freeable = cachep->max_freeable;
4119		unsigned long node_allocs = cachep->node_allocs;
4120		unsigned long node_frees = cachep->node_frees;
4121		unsigned long overflows = cachep->node_overflow;
4122
4123		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4124			   "%4lu %4lu %4lu %4lu %4lu",
4125			   allocs, high, grown,
4126			   reaped, errors, max_freeable, node_allocs,
4127			   node_frees, overflows);
4128	}
4129	/* cpu stats */
4130	{
4131		unsigned long allochit = atomic_read(&cachep->allochit);
4132		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4133		unsigned long freehit = atomic_read(&cachep->freehit);
4134		unsigned long freemiss = atomic_read(&cachep->freemiss);
4135
4136		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4137			   allochit, allocmiss, freehit, freemiss);
4138	}
4139#endif
4140}
4141
4142#define MAX_SLABINFO_WRITE 128
4143/**
4144 * slabinfo_write - Tuning for the slab allocator
4145 * @file: unused
4146 * @buffer: user buffer
4147 * @count: data length
4148 * @ppos: unused
 
 
4149 */
4150ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4151		       size_t count, loff_t *ppos)
4152{
4153	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4154	int limit, batchcount, shared, res;
4155	struct kmem_cache *cachep;
4156
4157	if (count > MAX_SLABINFO_WRITE)
4158		return -EINVAL;
4159	if (copy_from_user(&kbuf, buffer, count))
4160		return -EFAULT;
4161	kbuf[MAX_SLABINFO_WRITE] = '\0';
4162
4163	tmp = strchr(kbuf, ' ');
4164	if (!tmp)
4165		return -EINVAL;
4166	*tmp = '\0';
4167	tmp++;
4168	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4169		return -EINVAL;
4170
4171	/* Find the cache in the chain of caches. */
4172	mutex_lock(&slab_mutex);
4173	res = -EINVAL;
4174	list_for_each_entry(cachep, &slab_caches, list) {
4175		if (!strcmp(cachep->name, kbuf)) {
4176			if (limit < 1 || batchcount < 1 ||
4177					batchcount > limit || shared < 0) {
4178				res = 0;
4179			} else {
4180				res = do_tune_cpucache(cachep, limit,
4181						       batchcount, shared,
4182						       GFP_KERNEL);
4183			}
4184			break;
4185		}
4186	}
4187	mutex_unlock(&slab_mutex);
4188	if (res >= 0)
4189		res = count;
4190	return res;
4191}
4192
4193#ifdef CONFIG_DEBUG_SLAB_LEAK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4194
4195static void *leaks_start(struct seq_file *m, loff_t *pos)
4196{
4197	mutex_lock(&slab_mutex);
4198	return seq_list_start(&slab_caches, *pos);
4199}
4200
4201static inline int add_caller(unsigned long *n, unsigned long v)
4202{
4203	unsigned long *p;
4204	int l;
4205	if (!v)
4206		return 1;
4207	l = n[1];
4208	p = n + 2;
4209	while (l) {
4210		int i = l/2;
4211		unsigned long *q = p + 2 * i;
4212		if (*q == v) {
4213			q[1]++;
4214			return 1;
4215		}
4216		if (*q > v) {
4217			l = i;
4218		} else {
4219			p = q + 2;
4220			l -= i + 1;
4221		}
4222	}
4223	if (++n[1] == n[0])
4224		return 0;
4225	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4226	p[0] = v;
4227	p[1] = 1;
4228	return 1;
4229}
4230
4231static void handle_slab(unsigned long *n, struct kmem_cache *c,
4232						struct page *page)
4233{
4234	void *p;
4235	int i, j;
4236
4237	if (n[0] == n[1])
 
 
 
4238		return;
4239	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4240		bool active = true;
4241
4242		for (j = page->active; j < c->num; j++) {
4243			/* Skip freed item */
4244			if (get_free_obj(page, j) == i) {
4245				active = false;
4246				break;
4247			}
4248		}
4249		if (!active)
4250			continue;
4251
4252		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4253			return;
4254	}
4255}
4256
4257static void show_symbol(struct seq_file *m, unsigned long address)
4258{
4259#ifdef CONFIG_KALLSYMS
4260	unsigned long offset, size;
4261	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4262
4263	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4264		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4265		if (modname[0])
4266			seq_printf(m, " [%s]", modname);
4267		return;
4268	}
4269#endif
4270	seq_printf(m, "%p", (void *)address);
4271}
4272
4273static int leaks_show(struct seq_file *m, void *p)
4274{
4275	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4276	struct page *page;
4277	struct kmem_cache_node *n;
4278	const char *name;
4279	unsigned long *x = m->private;
4280	int node;
4281	int i;
4282
4283	if (!(cachep->flags & SLAB_STORE_USER))
4284		return 0;
4285	if (!(cachep->flags & SLAB_RED_ZONE))
4286		return 0;
4287
4288	/* OK, we can do it */
4289
4290	x[1] = 0;
4291
4292	for_each_online_node(node) {
4293		n = cachep->node[node];
4294		if (!n)
4295			continue;
4296
4297		check_irq_on();
4298		spin_lock_irq(&n->list_lock);
4299
4300		list_for_each_entry(page, &n->slabs_full, lru)
4301			handle_slab(x, cachep, page);
4302		list_for_each_entry(page, &n->slabs_partial, lru)
4303			handle_slab(x, cachep, page);
4304		spin_unlock_irq(&n->list_lock);
4305	}
4306	name = cachep->name;
4307	if (x[0] == x[1]) {
4308		/* Increase the buffer size */
4309		mutex_unlock(&slab_mutex);
4310		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4311		if (!m->private) {
4312			/* Too bad, we are really out */
4313			m->private = x;
4314			mutex_lock(&slab_mutex);
4315			return -ENOMEM;
4316		}
4317		*(unsigned long *)m->private = x[0] * 2;
4318		kfree(x);
4319		mutex_lock(&slab_mutex);
4320		/* Now make sure this entry will be retried */
4321		m->count = m->size;
4322		return 0;
4323	}
4324	for (i = 0; i < x[1]; i++) {
4325		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4326		show_symbol(m, x[2*i+2]);
4327		seq_putc(m, '\n');
4328	}
4329
4330	return 0;
4331}
4332
4333static const struct seq_operations slabstats_op = {
4334	.start = leaks_start,
4335	.next = slab_next,
4336	.stop = slab_stop,
4337	.show = leaks_show,
4338};
4339
4340static int slabstats_open(struct inode *inode, struct file *file)
4341{
4342	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4343	int ret = -ENOMEM;
4344	if (n) {
4345		ret = seq_open(file, &slabstats_op);
4346		if (!ret) {
4347			struct seq_file *m = file->private_data;
4348			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4349			m->private = n;
4350			n = NULL;
4351		}
4352		kfree(n);
4353	}
4354	return ret;
4355}
4356
4357static const struct file_operations proc_slabstats_operations = {
4358	.open		= slabstats_open,
4359	.read		= seq_read,
4360	.llseek		= seq_lseek,
4361	.release	= seq_release_private,
4362};
4363#endif
4364
4365static int __init slab_proc_init(void)
4366{
4367#ifdef CONFIG_DEBUG_SLAB_LEAK
4368	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4369#endif
4370	return 0;
4371}
4372module_init(slab_proc_init);
4373#endif
4374
4375/**
4376 * ksize - get the actual amount of memory allocated for a given object
4377 * @objp: Pointer to the object
 
 
 
4378 *
4379 * kmalloc may internally round up allocations and return more memory
4380 * than requested. ksize() can be used to determine the actual amount of
4381 * memory allocated. The caller may use this additional memory, even though
4382 * a smaller amount of memory was initially specified with the kmalloc call.
4383 * The caller must guarantee that objp points to a valid object previously
4384 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4385 * must not be freed during the duration of the call.
4386 */
4387size_t ksize(const void *objp)
4388{
 
 
 
4389	BUG_ON(!objp);
4390	if (unlikely(objp == ZERO_SIZE_PTR))
4391		return 0;
4392
4393	return virt_to_cache(objp)->object_size;
 
 
 
4394}
4395EXPORT_SYMBOL(ksize);