Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/slab.c
   4 * Written by Mark Hemment, 1996/97.
   5 * (markhe@nextd.demon.co.uk)
   6 *
   7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   8 *
   9 * Major cleanup, different bufctl logic, per-cpu arrays
  10 *	(c) 2000 Manfred Spraul
  11 *
  12 * Cleanup, make the head arrays unconditional, preparation for NUMA
  13 * 	(c) 2002 Manfred Spraul
  14 *
  15 * An implementation of the Slab Allocator as described in outline in;
  16 *	UNIX Internals: The New Frontiers by Uresh Vahalia
  17 *	Pub: Prentice Hall	ISBN 0-13-101908-2
  18 * or with a little more detail in;
  19 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
  20 *	Jeff Bonwick (Sun Microsystems).
  21 *	Presented at: USENIX Summer 1994 Technical Conference
  22 *
  23 * The memory is organized in caches, one cache for each object type.
  24 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  25 * Each cache consists out of many slabs (they are small (usually one
  26 * page long) and always contiguous), and each slab contains multiple
  27 * initialized objects.
  28 *
  29 * This means, that your constructor is used only for newly allocated
  30 * slabs and you must pass objects with the same initializations to
  31 * kmem_cache_free.
  32 *
  33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  34 * normal). If you need a special memory type, then must create a new
  35 * cache for that memory type.
  36 *
  37 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  38 *   full slabs with 0 free objects
  39 *   partial slabs
  40 *   empty slabs with no allocated objects
  41 *
  42 * If partial slabs exist, then new allocations come from these slabs,
  43 * otherwise from empty slabs or new slabs are allocated.
  44 *
  45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  46 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  47 *
  48 * Each cache has a short per-cpu head array, most allocs
  49 * and frees go into that array, and if that array overflows, then 1/2
  50 * of the entries in the array are given back into the global cache.
  51 * The head array is strictly LIFO and should improve the cache hit rates.
  52 * On SMP, it additionally reduces the spinlock operations.
  53 *
  54 * The c_cpuarray may not be read with enabled local interrupts -
  55 * it's changed with a smp_call_function().
  56 *
  57 * SMP synchronization:
  58 *  constructors and destructors are called without any locking.
  59 *  Several members in struct kmem_cache and struct slab never change, they
  60 *	are accessed without any locking.
  61 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  62 *  	and local interrupts are disabled so slab code is preempt-safe.
  63 *  The non-constant members are protected with a per-cache irq spinlock.
  64 *
  65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  66 * in 2000 - many ideas in the current implementation are derived from
  67 * his patch.
  68 *
  69 * Further notes from the original documentation:
  70 *
  71 * 11 April '97.  Started multi-threading - markhe
  72 *	The global cache-chain is protected by the mutex 'slab_mutex'.
  73 *	The sem is only needed when accessing/extending the cache-chain, which
  74 *	can never happen inside an interrupt (kmem_cache_create(),
  75 *	kmem_cache_shrink() and kmem_cache_reap()).
  76 *
  77 *	At present, each engine can be growing a cache.  This should be blocked.
  78 *
  79 * 15 March 2005. NUMA slab allocator.
  80 *	Shai Fultheim <shai@scalex86.org>.
  81 *	Shobhit Dayal <shobhit@calsoftinc.com>
  82 *	Alok N Kataria <alokk@calsoftinc.com>
  83 *	Christoph Lameter <christoph@lameter.com>
  84 *
  85 *	Modified the slab allocator to be node aware on NUMA systems.
  86 *	Each node has its own list of partial, free and full slabs.
  87 *	All object allocations for a node occur from node specific slab lists.
  88 */
  89
  90#include	<linux/__KEEPIDENTS__B.h>
  91#include	<linux/__KEEPIDENTS__C.h>
  92#include	<linux/__KEEPIDENTS__D.h>
  93#include	<linux/__KEEPIDENTS__E.h>
  94#include	<linux/__KEEPIDENTS__F.h>
  95#include	<linux/__KEEPIDENTS__G.h>
  96#include	<linux/__KEEPIDENTS__H.h>
  97#include	<linux/__KEEPIDENTS__I.h>
  98#include	<linux/__KEEPIDENTS__J.h>
  99#include	<linux/proc_fs.h>
 100#include	<linux/__KEEPIDENTS__BA.h>
 101#include	<linux/__KEEPIDENTS__BB.h>
 102#include	<linux/__KEEPIDENTS__BC.h>
 103#include	<linux/cpu.h>
 104#include	<linux/__KEEPIDENTS__BD.h>
 105#include	<linux/__KEEPIDENTS__BE.h>
 106#include	<linux/rcupdate.h>
 107#include	<linux/__KEEPIDENTS__BF.h>
 108#include	<linux/__KEEPIDENTS__BG.h>
 109#include	<linux/__KEEPIDENTS__BH.h>
 110#include	<linux/kmemleak.h>
 111#include	<linux/__KEEPIDENTS__BI.h>
 112#include	<linux/__KEEPIDENTS__BJ.h>
 113#include	<linux/__KEEPIDENTS__CA-__KEEPIDENTS__CB.h>
 114#include	<linux/__KEEPIDENTS__CC.h>
 115#include	<linux/reciprocal_div.h>
 116#include	<linux/debugobjects.h>
 
 117#include	<linux/__KEEPIDENTS__CD.h>
 118#include	<linux/__KEEPIDENTS__CE.h>
 119#include	<linux/__KEEPIDENTS__CF/task_stack.h>
 120
 121#include	<net/__KEEPIDENTS__CG.h>
 122
 123#include	<asm/cacheflush.h>
 124#include	<asm/tlbflush.h>
 125#include	<asm/page.h>
 126
 127#include <trace/events/kmem.h>
 128
 129#include	"internal.h"
 130
 131#include	"slab.h"
 132
 133/*
 134 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 135 *		  0 for faster, smaller code (especially in the critical paths).
 136 *
 137 * STATS	- 1 to collect stats for /proc/slabinfo.
 138 *		  0 for faster, smaller code (especially in the critical paths).
 139 *
 140 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 141 */
 142
 143#ifdef CONFIG_DEBUG_SLAB
 144#define	DEBUG		1
 145#define	STATS		1
 146#define	FORCED_DEBUG	1
 147#else
 148#define	DEBUG		0
 149#define	STATS		0
 150#define	FORCED_DEBUG	0
 151#endif
 152
 153/* Shouldn't this be in a header file somewhere? */
 154#define	BYTES_PER_WORD		sizeof(void *)
 155#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
 156
 157#ifndef ARCH_KMALLOC_FLAGS
 158#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 159#endif
 160
 161#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
 162				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
 163
 164#if FREELIST_BYTE_INDEX
 165typedef unsigned char freelist_idx_t;
 166#else
 167typedef unsigned short freelist_idx_t;
 168#endif
 169
 170#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 171
 172/*
 
 
 
 
 
 
 173 * struct array_cache
 174 *
 175 * Purpose:
 176 * - LIFO ordering, to hand out cache-warm objects from _alloc
 177 * - reduce the number of linked list operations
 178 * - reduce spinlock operations
 179 *
 180 * The limit is stored in the per-cpu structure to reduce the data cache
 181 * footprint.
 182 *
 183 */
 184struct array_cache {
 185	unsigned int avail;
 186	unsigned int limit;
 187	unsigned int batchcount;
 188	unsigned int touched;
 
 189	void *entry[];	/*
 190			 * Must have this definition in here for the proper
 191			 * alignment of array_cache. Also simplifies accessing
 192			 * the entries.
 
 
 
 
 193			 */
 194};
 195
 196struct alien_cache {
 197	spinlock_t lock;
 198	struct array_cache ac;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199};
 200
 201/*
 202 * Need this for bootstrapping a per node allocator.
 203 */
 204#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
 205static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
 206#define	CACHE_CACHE 0
 207#define	SIZE_NODE (MAX_NUMNODES)
 
 208
 209static int drain_freelist(struct kmem_cache *cache,
 210			struct kmem_cache_node *n, int tofree);
 211static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 212			int node, struct list_head *list);
 213static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
 214static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 215static void cache_reap(struct work_struct *unused);
 216
 217static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
 218						void **list);
 219static inline void fixup_slab_list(struct kmem_cache *cachep,
 220				struct kmem_cache_node *n, struct page *page,
 221				void **list);
 222static int slab_early_init = 1;
 223
 
 224#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
 225
 226static void kmem_cache_node_init(struct kmem_cache_node *parent)
 227{
 228	INIT_LIST_HEAD(&parent->slabs_full);
 229	INIT_LIST_HEAD(&parent->slabs_partial);
 230	INIT_LIST_HEAD(&parent->slabs_free);
 231	parent->total_slabs = 0;
 232	parent->free_slabs = 0;
 233	parent->shared = NULL;
 234	parent->alien = NULL;
 235	parent->colour_next = 0;
 236	spin_lock_init(&parent->list_lock);
 237	parent->free_objects = 0;
 238	parent->free_touched = 0;
 239}
 240
 241#define MAKE_LIST(cachep, listp, slab, nodeid)				\
 242	do {								\
 243		INIT_LIST_HEAD(listp);					\
 244		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
 245	} while (0)
 246
 247#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
 248	do {								\
 249	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
 250	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 251	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
 252	} while (0)
 253
 254#define CFLGS_OBJFREELIST_SLAB	((slab_flags_t __force)0x40000000U)
 255#define CFLGS_OFF_SLAB		((slab_flags_t __force)0x80000000U)
 256#define	OBJFREELIST_SLAB(x)	((x)->flags & CFLGS_OBJFREELIST_SLAB)
 257#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
 258
 259#define BATCHREFILL_LIMIT	16
 260/*
 261 * Optimization question: fewer reaps means less probability for unnessary
 262 * cpucache drain/refill cycles.
 263 *
 264 * OTOH the cpuarrays can contain lots of objects,
 265 * which could lock up otherwise freeable slabs.
 266 */
 267#define REAPTIMEOUT_AC		(2*HZ)
 268#define REAPTIMEOUT_NODE	(4*HZ)
 269
 270#if STATS
 271#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
 272#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
 273#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
 274#define	STATS_INC_GROWN(x)	((x)->grown++)
 275#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
 276#define	STATS_SET_HIGH(x)						\
 277	do {								\
 278		if ((x)->num_active > (x)->high_mark)			\
 279			(x)->high_mark = (x)->num_active;		\
 280	} while (0)
 281#define	STATS_INC_ERR(x)	((x)->errors++)
 282#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
 283#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
 284#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 285#define	STATS_SET_FREEABLE(x, i)					\
 286	do {								\
 287		if ((x)->max_freeable < i)				\
 288			(x)->max_freeable = i;				\
 289	} while (0)
 290#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
 291#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
 292#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
 293#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
 294#else
 295#define	STATS_INC_ACTIVE(x)	do { } while (0)
 296#define	STATS_DEC_ACTIVE(x)	do { } while (0)
 297#define	STATS_INC_ALLOCED(x)	do { } while (0)
 298#define	STATS_INC_GROWN(x)	do { } while (0)
 299#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
 300#define	STATS_SET_HIGH(x)	do { } while (0)
 301#define	STATS_INC_ERR(x)	do { } while (0)
 302#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
 303#define	STATS_INC_NODEFREES(x)	do { } while (0)
 304#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 305#define	STATS_SET_FREEABLE(x, i) do { } while (0)
 306#define STATS_INC_ALLOCHIT(x)	do { } while (0)
 307#define STATS_INC_ALLOCMISS(x)	do { } while (0)
 308#define STATS_INC_FREEHIT(x)	do { } while (0)
 309#define STATS_INC_FREEMISS(x)	do { } while (0)
 310#endif
 311
 312#if DEBUG
 313
 314/*
 315 * memory layout of objects:
 316 * 0		: objp
 317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 318 * 		the end of an object is aligned with the end of the real
 319 * 		allocation. Catches writes behind the end of the allocation.
 320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 321 * 		redzone word.
 322 * cachep->obj_offset: The real object.
 323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 324 * cachep->size - 1* BYTES_PER_WORD: last caller address
 325 *					[BYTES_PER_WORD long]
 326 */
 327static int obj_offset(struct kmem_cache *cachep)
 328{
 329	return cachep->obj_offset;
 330}
 331
 332static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 333{
 334	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 335	return (unsigned long long*) (objp + obj_offset(cachep) -
 336				      sizeof(unsigned long long));
 337}
 338
 339static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 340{
 341	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 342	if (cachep->flags & SLAB_STORE_USER)
 343		return (unsigned long long *)(objp + cachep->size -
 344					      sizeof(unsigned long long) -
 345					      REDZONE_ALIGN);
 346	return (unsigned long long *) (objp + cachep->size -
 347				       sizeof(unsigned long long));
 348}
 349
 350static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 351{
 352	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 353	return (void **)(objp + cachep->size - BYTES_PER_WORD);
 354}
 355
 356#else
 357
 358#define obj_offset(x)			0
 359#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 360#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 361#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
 362
 363#endif
 364
 365/*
 366 * Do not go above this order unless 0 objects fit into the slab or
 367 * overridden on the command line.
 368 */
 369#define	SLAB_MAX_ORDER_HI	1
 370#define	SLAB_MAX_ORDER_LO	0
 371static int slab_max_order = SLAB_MAX_ORDER_LO;
 372static bool slab_max_order_set __initdata;
 373
 
 
 
 
 
 
 374static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 375				 unsigned int idx)
 376{
 377	return page->s_mem + cache->size * idx;
 378}
 379
 380#define BOOT_CPUCACHE_ENTRIES	1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381/* internal cache of cache description objs */
 382static struct kmem_cache kmem_cache_boot = {
 383	.batchcount = 1,
 384	.limit = BOOT_CPUCACHE_ENTRIES,
 385	.shared = 1,
 386	.size = sizeof(struct kmem_cache),
 387	.name = "kmem_cache",
 388};
 389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 391
 392static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 393{
 394	return this_cpu_ptr(cachep->cpu_cache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395}
 396
 397/*
 398 * Calculate the number of objects and left-over bytes for a given buffer size.
 399 */
 400static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
 401		slab_flags_t flags, size_t *left_over)
 
 402{
 403	unsigned int num;
 
 404	size_t slab_size = PAGE_SIZE << gfporder;
 405
 406	/*
 407	 * The slab management structure can be either off the slab or
 408	 * on it. For the latter case, the memory allocated for a
 409	 * slab is used for:
 410	 *
 
 
 411	 * - @buffer_size bytes for each object
 412	 * - One freelist_idx_t for each object
 413	 *
 414	 * We don't need to consider alignment of freelist because
 415	 * freelist will be at the end of slab page. The objects will be
 416	 * at the correct alignment.
 417	 *
 418	 * If the slab management structure is off the slab, then the
 419	 * alignment will already be calculated into the size. Because
 420	 * the slabs are all pages aligned, the objects will be at the
 421	 * correct alignment when allocated.
 422	 */
 423	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
 424		num = slab_size / buffer_size;
 425		*left_over = slab_size % buffer_size;
 
 426	} else {
 427		num = slab_size / (buffer_size + sizeof(freelist_idx_t));
 428		*left_over = slab_size %
 429			(buffer_size + sizeof(freelist_idx_t));
 430	}
 431
 432	return num;
 433}
 434
 435#if DEBUG
 436#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 437
 438static void __slab_error(const char *function, struct kmem_cache *cachep,
 439			char *msg)
 440{
 441	pr_err("slab error in %s(): cache `%s': %s\n",
 442	       function, cachep->name, msg);
 443	dump_stack();
 444	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 445}
 446#endif
 447
 448/*
 449 * By default on NUMA we use alien caches to stage the freeing of
 450 * objects allocated from other nodes. This causes massive memory
 451 * inefficiencies when using fake NUMA setup to split memory into a
 452 * large number of small nodes, so it can be disabled on the command
 453 * line
 454  */
 455
 456static int use_alien_caches __read_mostly = 1;
 457static int __init noaliencache_setup(char *s)
 458{
 459	use_alien_caches = 0;
 460	return 1;
 461}
 462__setup("noaliencache", noaliencache_setup);
 463
 464static int __init slab_max_order_setup(char *str)
 465{
 466	get_option(&str, &slab_max_order);
 467	slab_max_order = slab_max_order < 0 ? 0 :
 468				min(slab_max_order, MAX_ORDER - 1);
 469	slab_max_order_set = true;
 470
 471	return 1;
 472}
 473__setup("slab_max_order=", slab_max_order_setup);
 474
 475#ifdef CONFIG_NUMA
 476/*
 477 * Special reaping functions for NUMA systems called from cache_reap().
 478 * These take care of doing round robin flushing of alien caches (containing
 479 * objects freed on different nodes from which they were allocated) and the
 480 * flushing of remote pcps by calling drain_node_pages.
 481 */
 482static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 483
 484static void init_reap_node(int cpu)
 485{
 486	per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
 487						    node_online_map);
 
 
 
 
 
 488}
 489
 490static void next_reap_node(void)
 491{
 492	int node = __this_cpu_read(slab_reap_node);
 493
 494	node = next_node_in(node, node_online_map);
 
 
 495	__this_cpu_write(slab_reap_node, node);
 496}
 497
 498#else
 499#define init_reap_node(cpu) do { } while (0)
 500#define next_reap_node(void) do { } while (0)
 501#endif
 502
 503/*
 504 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 505 * via the workqueue/eventd.
 506 * Add the CPU number into the expiration time to minimize the possibility of
 507 * the CPUs getting into lockstep and contending for the global cache chain
 508 * lock.
 509 */
 510static void start_cpu_timer(int cpu)
 511{
 512	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 513
 514	if (reap_work->work.func == NULL) {
 
 
 
 
 
 515		init_reap_node(cpu);
 516		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 517		schedule_delayed_work_on(cpu, reap_work,
 518					__round_jiffies_relative(HZ, cpu));
 519	}
 520}
 521
 522static void init_arraycache(struct array_cache *ac, int limit, int batch)
 523{
 524	if (ac) {
 525		ac->avail = 0;
 526		ac->limit = limit;
 527		ac->batchcount = batch;
 528		ac->touched = 0;
 529	}
 530}
 531
 532static struct array_cache *alloc_arraycache(int node, int entries,
 533					    int batchcount, gfp_t gfp)
 534{
 535	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 536	struct array_cache *ac = NULL;
 537
 538	ac = kmalloc_node(memsize, gfp, node);
 539	/*
 540	 * The array_cache structures contain pointers to free object.
 541	 * However, when such objects are allocated or transferred to another
 542	 * cache the pointers are not cleared and they could be counted as
 543	 * valid references during a kmemleak scan. Therefore, kmemleak must
 544	 * not scan such objects.
 545	 */
 546	kmemleak_no_scan(ac);
 547	init_arraycache(ac, entries, batchcount);
 548	return ac;
 
 
 
 
 
 
 549}
 550
 551static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
 552					struct page *page, void *objp)
 553{
 554	struct kmem_cache_node *n;
 555	int page_node;
 556	LIST_HEAD(list);
 557
 558	page_node = page_to_nid(page);
 559	n = get_node(cachep, page_node);
 
 
 
 
 
 560
 561	spin_lock(&n->list_lock);
 562	free_block(cachep, &objp, 1, page_node, &list);
 563	spin_unlock(&n->list_lock);
 
 
 
 
 564
 565	slabs_destroy(cachep, &list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566}
 567
 568/*
 569 * Transfer objects in one arraycache to another.
 570 * Locking must be handled by the caller.
 571 *
 572 * Return the number of entries transferred.
 573 */
 574static int transfer_objects(struct array_cache *to,
 575		struct array_cache *from, unsigned int max)
 576{
 577	/* Figure out how many entries to transfer */
 578	int nr = min3(from->avail, max, to->limit - to->avail);
 579
 580	if (!nr)
 581		return 0;
 582
 583	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
 584			sizeof(void *) *nr);
 585
 586	from->avail -= nr;
 587	to->avail += nr;
 588	return nr;
 589}
 590
 591#ifndef CONFIG_NUMA
 592
 593#define drain_alien_cache(cachep, alien) do { } while (0)
 594#define reap_alien(cachep, n) do { } while (0)
 595
 596static inline struct alien_cache **alloc_alien_cache(int node,
 597						int limit, gfp_t gfp)
 598{
 599	return NULL;
 600}
 601
 602static inline void free_alien_cache(struct alien_cache **ac_ptr)
 603{
 604}
 605
 606static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 607{
 608	return 0;
 609}
 610
 611static inline void *alternate_node_alloc(struct kmem_cache *cachep,
 612		gfp_t flags)
 613{
 614	return NULL;
 615}
 616
 617static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 618		 gfp_t flags, int nodeid)
 619{
 620	return NULL;
 621}
 622
 623static inline gfp_t gfp_exact_node(gfp_t flags)
 624{
 625	return flags & ~__GFP_NOFAIL;
 626}
 627
 628#else	/* CONFIG_NUMA */
 629
 630static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 631static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 632
 633static struct alien_cache *__alloc_alien_cache(int node, int entries,
 634						int batch, gfp_t gfp)
 635{
 636	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
 637	struct alien_cache *alc = NULL;
 638
 639	alc = kmalloc_node(memsize, gfp, node);
 640	if (alc) {
 641		kmemleak_no_scan(alc);
 642		init_arraycache(&alc->ac, entries, batch);
 643		spin_lock_init(&alc->lock);
 644	}
 645	return alc;
 646}
 647
 648static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 649{
 650	struct alien_cache **alc_ptr;
 
 651	int i;
 652
 653	if (limit > 1)
 654		limit = 12;
 655	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
 656	if (!alc_ptr)
 657		return NULL;
 658
 659	for_each_node(i) {
 660		if (i == node || !node_online(i))
 661			continue;
 662		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
 663		if (!alc_ptr[i]) {
 664			for (i--; i >= 0; i--)
 665				kfree(alc_ptr[i]);
 666			kfree(alc_ptr);
 667			return NULL;
 668		}
 669	}
 670	return alc_ptr;
 671}
 672
 673static void free_alien_cache(struct alien_cache **alc_ptr)
 674{
 675	int i;
 676
 677	if (!alc_ptr)
 678		return;
 679	for_each_node(i)
 680	    kfree(alc_ptr[i]);
 681	kfree(alc_ptr);
 682}
 683
 684static void __drain_alien_cache(struct kmem_cache *cachep,
 685				struct array_cache *ac, int node,
 686				struct list_head *list)
 687{
 688	struct kmem_cache_node *n = get_node(cachep, node);
 689
 690	if (ac->avail) {
 691		spin_lock(&n->list_lock);
 692		/*
 693		 * Stuff objects into the remote nodes shared array first.
 694		 * That way we could avoid the overhead of putting the objects
 695		 * into the free lists and getting them back later.
 696		 */
 697		if (n->shared)
 698			transfer_objects(n->shared, ac, ac->limit);
 699
 700		free_block(cachep, ac->entry, ac->avail, node, list);
 701		ac->avail = 0;
 702		spin_unlock(&n->list_lock);
 703	}
 704}
 705
 706/*
 707 * Called from cache_reap() to regularly drain alien caches round robin.
 708 */
 709static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
 710{
 711	int node = __this_cpu_read(slab_reap_node);
 712
 713	if (n->alien) {
 714		struct alien_cache *alc = n->alien[node];
 715		struct array_cache *ac;
 716
 717		if (alc) {
 718			ac = &alc->ac;
 719			if (ac->avail && spin_trylock_irq(&alc->lock)) {
 720				LIST_HEAD(list);
 721
 722				__drain_alien_cache(cachep, ac, node, &list);
 723				spin_unlock_irq(&alc->lock);
 724				slabs_destroy(cachep, &list);
 725			}
 726		}
 727	}
 728}
 729
 730static void drain_alien_cache(struct kmem_cache *cachep,
 731				struct alien_cache **alien)
 732{
 733	int i = 0;
 734	struct alien_cache *alc;
 735	struct array_cache *ac;
 736	unsigned long flags;
 737
 738	for_each_online_node(i) {
 739		alc = alien[i];
 740		if (alc) {
 741			LIST_HEAD(list);
 742
 743			ac = &alc->ac;
 744			spin_lock_irqsave(&alc->lock, flags);
 745			__drain_alien_cache(cachep, ac, i, &list);
 746			spin_unlock_irqrestore(&alc->lock, flags);
 747			slabs_destroy(cachep, &list);
 748		}
 749	}
 750}
 751
 752static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
 753				int node, int page_node)
 754{
 
 755	struct kmem_cache_node *n;
 756	struct alien_cache *alien = NULL;
 757	struct array_cache *ac;
 758	LIST_HEAD(list);
 
 759
 760	n = get_node(cachep, node);
 
 
 
 
 
 
 
 761	STATS_INC_NODEFREES(cachep);
 762	if (n->alien && n->alien[page_node]) {
 763		alien = n->alien[page_node];
 764		ac = &alien->ac;
 765		spin_lock(&alien->lock);
 766		if (unlikely(ac->avail == ac->limit)) {
 767			STATS_INC_ACOVERFLOW(cachep);
 768			__drain_alien_cache(cachep, ac, page_node, &list);
 769		}
 770		ac->entry[ac->avail++] = objp;
 771		spin_unlock(&alien->lock);
 772		slabs_destroy(cachep, &list);
 773	} else {
 774		n = get_node(cachep, page_node);
 775		spin_lock(&n->list_lock);
 776		free_block(cachep, &objp, 1, page_node, &list);
 777		spin_unlock(&n->list_lock);
 778		slabs_destroy(cachep, &list);
 779	}
 780	return 1;
 781}
 782
 783static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 784{
 785	int page_node = page_to_nid(virt_to_page(objp));
 786	int node = numa_mem_id();
 787	/*
 788	 * Make sure we are not freeing a object from another node to the array
 789	 * cache on this cpu.
 790	 */
 791	if (likely(node == page_node))
 792		return 0;
 793
 794	return __cache_free_alien(cachep, objp, node, page_node);
 795}
 796
 797/*
 798 * Construct gfp mask to allocate from a specific node but do not reclaim or
 799 * warn about failures.
 800 */
 801static inline gfp_t gfp_exact_node(gfp_t flags)
 802{
 803	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
 804}
 805#endif
 806
 807static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
 808{
 809	struct kmem_cache_node *n;
 810
 811	/*
 812	 * Set up the kmem_cache_node for cpu before we can
 813	 * begin anything. Make sure some other cpu on this
 814	 * node has not already allocated this
 815	 */
 816	n = get_node(cachep, node);
 817	if (n) {
 818		spin_lock_irq(&n->list_lock);
 819		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
 820				cachep->num;
 821		spin_unlock_irq(&n->list_lock);
 822
 823		return 0;
 824	}
 825
 826	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
 827	if (!n)
 828		return -ENOMEM;
 829
 830	kmem_cache_node_init(n);
 831	n->next_reap = jiffies + REAPTIMEOUT_NODE +
 832		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
 833
 834	n->free_limit =
 835		(1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
 836
 837	/*
 838	 * The kmem_cache_nodes don't come and go as CPUs
 839	 * come and go.  slab_mutex is sufficient
 840	 * protection here.
 841	 */
 842	cachep->node[node] = n;
 843
 844	return 0;
 845}
 846
 847#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
 848/*
 849 * Allocates and initializes node for a node on each slab cache, used for
 850 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
 851 * will be allocated off-node since memory is not yet online for the new node.
 852 * When hotplugging memory or a cpu, existing node are not replaced if
 853 * already in use.
 854 *
 855 * Must hold slab_mutex.
 856 */
 857static int init_cache_node_node(int node)
 858{
 859	int ret;
 860	struct kmem_cache *cachep;
 
 
 861
 862	list_for_each_entry(cachep, &slab_caches, list) {
 863		ret = init_cache_node(cachep, node, GFP_KERNEL);
 864		if (ret)
 865			return ret;
 866	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 867
 
 
 
 
 
 
 868	return 0;
 869}
 870#endif
 871
 872static int setup_kmem_cache_node(struct kmem_cache *cachep,
 873				int node, gfp_t gfp, bool force_change)
 874{
 875	int ret = -ENOMEM;
 876	struct kmem_cache_node *n;
 877	struct array_cache *old_shared = NULL;
 878	struct array_cache *new_shared = NULL;
 879	struct alien_cache **new_alien = NULL;
 880	LIST_HEAD(list);
 881
 882	if (use_alien_caches) {
 883		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
 884		if (!new_alien)
 885			goto fail;
 886	}
 887
 888	if (cachep->shared) {
 889		new_shared = alloc_arraycache(node,
 890			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
 891		if (!new_shared)
 892			goto fail;
 893	}
 894
 895	ret = init_cache_node(cachep, node, gfp);
 896	if (ret)
 897		goto fail;
 898
 899	n = get_node(cachep, node);
 900	spin_lock_irq(&n->list_lock);
 901	if (n->shared && force_change) {
 902		free_block(cachep, n->shared->entry,
 903				n->shared->avail, node, &list);
 904		n->shared->avail = 0;
 905	}
 906
 907	if (!n->shared || force_change) {
 908		old_shared = n->shared;
 909		n->shared = new_shared;
 910		new_shared = NULL;
 911	}
 912
 913	if (!n->alien) {
 914		n->alien = new_alien;
 915		new_alien = NULL;
 916	}
 917
 918	spin_unlock_irq(&n->list_lock);
 919	slabs_destroy(cachep, &list);
 920
 921	/*
 922	 * To protect lockless access to n->shared during irq disabled context.
 923	 * If n->shared isn't NULL in irq disabled context, accessing to it is
 924	 * guaranteed to be valid until irq is re-enabled, because it will be
 925	 * freed after synchronize_rcu().
 926	 */
 927	if (old_shared && force_change)
 928		synchronize_rcu();
 929
 930fail:
 931	kfree(old_shared);
 932	kfree(new_shared);
 933	free_alien_cache(new_alien);
 934
 935	return ret;
 936}
 937
 938#ifdef CONFIG_SMP
 939
 940static void cpuup_canceled(long cpu)
 941{
 942	struct kmem_cache *cachep;
 943	struct kmem_cache_node *n = NULL;
 944	int node = cpu_to_mem(cpu);
 945	const struct cpumask *mask = cpumask_of_node(node);
 946
 947	list_for_each_entry(cachep, &slab_caches, list) {
 948		struct array_cache *nc;
 949		struct array_cache *shared;
 950		struct alien_cache **alien;
 951		LIST_HEAD(list);
 
 
 
 
 952
 953		n = get_node(cachep, node);
 954		if (!n)
 955			continue;
 956
 957		spin_lock_irq(&n->list_lock);
 958
 959		/* Free limit for this kmem_cache_node */
 960		n->free_limit -= cachep->batchcount;
 961
 962		/* cpu is dead; no one can alloc from it. */
 963		nc = per_cpu_ptr(cachep->cpu_cache, cpu);
 964		free_block(cachep, nc->entry, nc->avail, node, &list);
 965		nc->avail = 0;
 966
 967		if (!cpumask_empty(mask)) {
 968			spin_unlock_irq(&n->list_lock);
 969			goto free_slab;
 970		}
 971
 972		shared = n->shared;
 973		if (shared) {
 974			free_block(cachep, shared->entry,
 975				   shared->avail, node, &list);
 976			n->shared = NULL;
 977		}
 978
 979		alien = n->alien;
 980		n->alien = NULL;
 981
 982		spin_unlock_irq(&n->list_lock);
 983
 984		kfree(shared);
 985		if (alien) {
 986			drain_alien_cache(cachep, alien);
 987			free_alien_cache(alien);
 988		}
 989
 990free_slab:
 991		slabs_destroy(cachep, &list);
 992	}
 993	/*
 994	 * In the previous loop, all the objects were freed to
 995	 * the respective cache's slabs,  now we can go ahead and
 996	 * shrink each nodelist to its limit.
 997	 */
 998	list_for_each_entry(cachep, &slab_caches, list) {
 999		n = get_node(cachep, node);
1000		if (!n)
1001			continue;
1002		drain_freelist(cachep, n, INT_MAX);
1003	}
1004}
1005
1006static int cpuup_prepare(long cpu)
1007{
1008	struct kmem_cache *cachep;
 
1009	int node = cpu_to_mem(cpu);
1010	int err;
1011
1012	/*
1013	 * We need to do this right in the beginning since
1014	 * alloc_arraycache's are going to use this list.
1015	 * kmalloc_node allows us to add the slab to the right
1016	 * kmem_cache_node and not this cpu's kmem_cache_node
1017	 */
1018	err = init_cache_node_node(node);
1019	if (err < 0)
1020		goto bad;
1021
1022	/*
1023	 * Now we can go ahead with allocating the shared arrays and
1024	 * array caches
1025	 */
1026	list_for_each_entry(cachep, &slab_caches, list) {
1027		err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1028		if (err)
 
 
 
 
 
1029			goto bad;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030	}
 
1031
1032	return 0;
1033bad:
1034	cpuup_canceled(cpu);
1035	return -ENOMEM;
1036}
1037
1038int slab_prepare_cpu(unsigned int cpu)
 
1039{
1040	int err;
 
1041
1042	mutex_lock(&slab_mutex);
1043	err = cpuup_prepare(cpu);
1044	mutex_unlock(&slab_mutex);
1045	return err;
1046}
1047
1048/*
1049 * This is called for a failed online attempt and for a successful
1050 * offline.
1051 *
1052 * Even if all the cpus of a node are down, we don't free the
1053 * kmem_list3 of any cache. This to avoid a race between cpu_down, and
1054 * a kmalloc allocation from another cpu for memory from the node of
1055 * the cpu going down.  The list3 structure is usually allocated from
1056 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
1057 */
1058int slab_dead_cpu(unsigned int cpu)
1059{
1060	mutex_lock(&slab_mutex);
1061	cpuup_canceled(cpu);
1062	mutex_unlock(&slab_mutex);
1063	return 0;
1064}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065#endif
1066
1067static int slab_online_cpu(unsigned int cpu)
1068{
1069	start_cpu_timer(cpu);
1070	return 0;
 
 
 
1071}
1072
1073static int slab_offline_cpu(unsigned int cpu)
1074{
1075	/*
1076	 * Shutdown cache reaper. Note that the slab_mutex is held so
1077	 * that if cache_reap() is invoked it cannot do anything
1078	 * expensive but will only modify reap_work and reschedule the
1079	 * timer.
1080	 */
1081	cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1082	/* Now the cache_reaper is guaranteed to be not running. */
1083	per_cpu(slab_reap_work, cpu).work.func = NULL;
1084	return 0;
1085}
1086
1087#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1088/*
1089 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1090 * Returns -EBUSY if all objects cannot be drained so that the node is not
1091 * removed.
1092 *
1093 * Must hold slab_mutex.
1094 */
1095static int __meminit drain_cache_node_node(int node)
1096{
1097	struct kmem_cache *cachep;
1098	int ret = 0;
1099
1100	list_for_each_entry(cachep, &slab_caches, list) {
1101		struct kmem_cache_node *n;
1102
1103		n = get_node(cachep, node);
1104		if (!n)
1105			continue;
1106
1107		drain_freelist(cachep, n, INT_MAX);
1108
1109		if (!list_empty(&n->slabs_full) ||
1110		    !list_empty(&n->slabs_partial)) {
1111			ret = -EBUSY;
1112			break;
1113		}
1114	}
1115	return ret;
1116}
1117
1118static int __meminit slab_memory_callback(struct notifier_block *self,
1119					unsigned long action, void *arg)
1120{
1121	struct memory_notify *mnb = arg;
1122	int ret = 0;
1123	int nid;
1124
1125	nid = mnb->status_change_nid;
1126	if (nid < 0)
1127		goto out;
1128
1129	switch (action) {
1130	case MEM_GOING_ONLINE:
1131		mutex_lock(&slab_mutex);
1132		ret = init_cache_node_node(nid);
1133		mutex_unlock(&slab_mutex);
1134		break;
1135	case MEM_GOING_OFFLINE:
1136		mutex_lock(&slab_mutex);
1137		ret = drain_cache_node_node(nid);
1138		mutex_unlock(&slab_mutex);
1139		break;
1140	case MEM_ONLINE:
1141	case MEM_OFFLINE:
1142	case MEM_CANCEL_ONLINE:
1143	case MEM_CANCEL_OFFLINE:
1144		break;
1145	}
1146out:
1147	return notifier_from_errno(ret);
1148}
1149#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1150
1151/*
1152 * swap the static kmem_cache_node with kmalloced memory
1153 */
1154static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1155				int nodeid)
1156{
1157	struct kmem_cache_node *ptr;
1158
1159	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1160	BUG_ON(!ptr);
1161
1162	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1163	/*
1164	 * Do not assume that spinlocks can be initialized via memcpy:
1165	 */
1166	spin_lock_init(&ptr->list_lock);
1167
1168	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1169	cachep->node[nodeid] = ptr;
1170}
1171
1172/*
1173 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1174 * size of kmem_cache_node.
1175 */
1176static void __init set_up_node(struct kmem_cache *cachep, int index)
1177{
1178	int node;
1179
1180	for_each_online_node(node) {
1181		cachep->node[node] = &init_kmem_cache_node[index + node];
1182		cachep->node[node]->next_reap = jiffies +
1183		    REAPTIMEOUT_NODE +
1184		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1185	}
1186}
1187
1188/*
 
 
 
 
 
 
 
 
 
1189 * Initialisation.  Called after the page allocator have been initialised and
1190 * before smp_init().
1191 */
1192void __init kmem_cache_init(void)
1193{
1194	int i;
1195
 
 
1196	kmem_cache = &kmem_cache_boot;
 
1197
1198	if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1199		use_alien_caches = 0;
1200
1201	for (i = 0; i < NUM_INIT_LISTS; i++)
1202		kmem_cache_node_init(&init_kmem_cache_node[i]);
1203
 
 
1204	/*
1205	 * Fragmentation resistance on low memory - only use bigger
1206	 * page orders on machines with more than 32MB of memory if
1207	 * not overridden on the command line.
1208	 */
1209	if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1210		slab_max_order = SLAB_MAX_ORDER_HI;
1211
1212	/* Bootstrap is tricky, because several objects are allocated
1213	 * from caches that do not exist yet:
1214	 * 1) initialize the kmem_cache cache: it contains the struct
1215	 *    kmem_cache structures of all caches, except kmem_cache itself:
1216	 *    kmem_cache is statically allocated.
1217	 *    Initially an __init data area is used for the head array and the
1218	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1219	 *    array at the end of the bootstrap.
1220	 * 2) Create the first kmalloc cache.
1221	 *    The struct kmem_cache for the new cache is allocated normally.
1222	 *    An __init data area is used for the head array.
1223	 * 3) Create the remaining kmalloc caches, with minimally sized
1224	 *    head arrays.
1225	 * 4) Replace the __init data head arrays for kmem_cache and the first
1226	 *    kmalloc cache with kmalloc allocated arrays.
1227	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1228	 *    the other cache's with kmalloc allocated memory.
1229	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1230	 */
1231
1232	/* 1) create the kmem_cache */
1233
1234	/*
1235	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1236	 */
1237	create_boot_cache(kmem_cache, "kmem_cache",
1238		offsetof(struct kmem_cache, node) +
1239				  nr_node_ids * sizeof(struct kmem_cache_node *),
1240				  SLAB_HWCACHE_ALIGN, 0, 0);
1241	list_add(&kmem_cache->list, &slab_caches);
1242	memcg_link_cache(kmem_cache, NULL);
1243	slab_state = PARTIAL;
1244
1245	/*
1246	 * Initialize the caches that provide memory for the  kmem_cache_node
1247	 * structures first.  Without this, further allocations will bug.
 
1248	 */
1249	kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1250				kmalloc_info[INDEX_NODE].name,
1251				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
1252				0, kmalloc_size(INDEX_NODE));
1253	slab_state = PARTIAL_NODE;
1254	setup_kmalloc_cache_index_table();
 
 
1255
1256	slab_early_init = 0;
1257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1258	/* 5) Replace the bootstrap kmem_cache_node */
1259	{
1260		int nid;
1261
1262		for_each_online_node(nid) {
1263			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1264
1265			init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
 
 
 
 
1266					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
 
1267		}
1268	}
1269
1270	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1271}
1272
1273void __init kmem_cache_init_late(void)
1274{
1275	struct kmem_cache *cachep;
1276
 
 
1277	/* 6) resize the head arrays to their final sizes */
1278	mutex_lock(&slab_mutex);
1279	list_for_each_entry(cachep, &slab_caches, list)
1280		if (enable_cpucache(cachep, GFP_NOWAIT))
1281			BUG();
1282	mutex_unlock(&slab_mutex);
1283
 
 
 
1284	/* Done! */
1285	slab_state = FULL;
1286
 
 
 
 
 
 
1287#ifdef CONFIG_NUMA
1288	/*
1289	 * Register a memory hotplug callback that initializes and frees
1290	 * node.
1291	 */
1292	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1293#endif
1294
1295	/*
1296	 * The reap timers are started later, with a module init call: That part
1297	 * of the kernel is not yet operational.
1298	 */
1299}
1300
1301static int __init cpucache_init(void)
1302{
1303	int ret;
1304
1305	/*
1306	 * Register the timers that return unneeded pages to the page allocator
1307	 */
1308	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1309				slab_online_cpu, slab_offline_cpu);
1310	WARN_ON(ret < 0);
1311
 
 
1312	return 0;
1313}
1314__initcall(cpucache_init);
1315
1316static noinline void
1317slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1318{
1319#if DEBUG
1320	struct kmem_cache_node *n;
 
1321	unsigned long flags;
1322	int node;
1323	static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1324				      DEFAULT_RATELIMIT_BURST);
1325
1326	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1327		return;
1328
1329	pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1330		nodeid, gfpflags, &gfpflags);
1331	pr_warn("  cache: %s, object size: %d, order: %d\n",
 
1332		cachep->name, cachep->size, cachep->gfporder);
1333
1334	for_each_kmem_cache_node(cachep, node, n) {
1335		unsigned long total_slabs, free_slabs, free_objs;
 
 
 
 
 
1336
1337		spin_lock_irqsave(&n->list_lock, flags);
1338		total_slabs = n->total_slabs;
1339		free_slabs = n->free_slabs;
1340		free_objs = n->free_objects;
 
 
 
 
 
 
 
 
 
1341		spin_unlock_irqrestore(&n->list_lock, flags);
1342
1343		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1344			node, total_slabs - free_slabs, total_slabs,
1345			(total_slabs * cachep->num) - free_objs,
1346			total_slabs * cachep->num);
 
 
1347	}
1348#endif
1349}
1350
1351/*
1352 * Interface to system's page allocator. No need to hold the
1353 * kmem_cache_node ->list_lock.
1354 *
1355 * If we requested dmaable memory, we will get it. Even if we
1356 * did not request dmaable memory, we might get it, but that
1357 * would be relatively rare and ignorable.
1358 */
1359static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1360								int nodeid)
1361{
1362	struct page *page;
 
1363
1364	flags |= cachep->allocflags;
 
 
1365
1366	page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1367	if (!page) {
1368		slab_out_of_memory(cachep, flags, nodeid);
 
1369		return NULL;
1370	}
1371
1372	if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
1373		__free_pages(page, cachep->gfporder);
1374		return NULL;
1375	}
1376
 
 
 
 
 
 
 
1377	__SetPageSlab(page);
1378	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1379	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1380		SetPageSlabPfmemalloc(page);
 
 
 
 
 
 
 
 
 
 
1381
1382	return page;
1383}
1384
1385/*
1386 * Interface to system's page release.
1387 */
1388static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1389{
1390	int order = cachep->gfporder;
 
 
 
 
 
 
 
 
 
1391
1392	BUG_ON(!PageSlab(page));
1393	__ClearPageSlabPfmemalloc(page);
1394	__ClearPageSlab(page);
1395	page_mapcount_reset(page);
1396	page->mapping = NULL;
1397
 
1398	if (current->reclaim_state)
1399		current->reclaim_state->reclaimed_slab += 1 << order;
1400	uncharge_slab_page(page, order, cachep);
1401	__free_pages(page, order);
1402}
1403
1404static void kmem_rcu_free(struct rcu_head *head)
1405{
1406	struct kmem_cache *cachep;
1407	struct page *page;
1408
1409	page = container_of(head, struct page, rcu_head);
1410	cachep = page->slab_cache;
1411
1412	kmem_freepages(cachep, page);
1413}
1414
1415#if DEBUG
1416static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1417{
1418	if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1419		(cachep->size % PAGE_SIZE) == 0)
1420		return true;
1421
1422	return false;
1423}
1424
1425#ifdef CONFIG_DEBUG_PAGEALLOC
1426static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
 
1427{
1428	if (!is_debug_pagealloc_cache(cachep))
 
 
 
 
1429		return;
1430
1431	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1432}
 
 
 
 
 
1433
1434#else
1435static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1436				int map) {}
 
 
 
 
 
 
1437
 
 
 
1438#endif
1439
1440static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1441{
1442	int size = cachep->object_size;
1443	addr = &((char *)addr)[obj_offset(cachep)];
1444
1445	memset(addr, val, size);
1446	*(unsigned char *)(addr + size - 1) = POISON_END;
1447}
1448
1449static void dump_line(char *data, int offset, int limit)
1450{
1451	int i;
1452	unsigned char error = 0;
1453	int bad_count = 0;
1454
1455	pr_err("%03x: ", offset);
1456	for (i = 0; i < limit; i++) {
1457		if (data[offset + i] != POISON_FREE) {
1458			error = data[offset + i];
1459			bad_count++;
1460		}
1461	}
1462	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1463			&data[offset], limit, 1);
1464
1465	if (bad_count == 1) {
1466		error ^= POISON_FREE;
1467		if (!(error & (error - 1))) {
1468			pr_err("Single bit error detected. Probably bad RAM.\n");
 
1469#ifdef CONFIG_X86
1470			pr_err("Run memtest86+ or a similar memory test tool.\n");
 
1471#else
1472			pr_err("Run a memory test tool.\n");
1473#endif
1474		}
1475	}
1476}
1477#endif
1478
1479#if DEBUG
1480
1481static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1482{
1483	int i, size;
1484	char *realobj;
1485
1486	if (cachep->flags & SLAB_RED_ZONE) {
1487		pr_err("Redzone: 0x%llx/0x%llx\n",
1488		       *dbg_redzone1(cachep, objp),
1489		       *dbg_redzone2(cachep, objp));
1490	}
1491
1492	if (cachep->flags & SLAB_STORE_USER)
1493		pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
 
 
 
1494	realobj = (char *)objp + obj_offset(cachep);
1495	size = cachep->object_size;
1496	for (i = 0; i < size && lines; i += 16, lines--) {
1497		int limit;
1498		limit = 16;
1499		if (i + limit > size)
1500			limit = size - i;
1501		dump_line(realobj, i, limit);
1502	}
1503}
1504
1505static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1506{
1507	char *realobj;
1508	int size, i;
1509	int lines = 0;
1510
1511	if (is_debug_pagealloc_cache(cachep))
1512		return;
1513
1514	realobj = (char *)objp + obj_offset(cachep);
1515	size = cachep->object_size;
1516
1517	for (i = 0; i < size; i++) {
1518		char exp = POISON_FREE;
1519		if (i == size - 1)
1520			exp = POISON_END;
1521		if (realobj[i] != exp) {
1522			int limit;
1523			/* Mismatch ! */
1524			/* Print header */
1525			if (lines == 0) {
1526				pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1527				       print_tainted(), cachep->name,
1528				       realobj, size);
1529				print_objinfo(cachep, objp, 0);
1530			}
1531			/* Hexdump the affected line */
1532			i = (i / 16) * 16;
1533			limit = 16;
1534			if (i + limit > size)
1535				limit = size - i;
1536			dump_line(realobj, i, limit);
1537			i += 16;
1538			lines++;
1539			/* Limit to 5 lines */
1540			if (lines > 5)
1541				break;
1542		}
1543	}
1544	if (lines != 0) {
1545		/* Print some data about the neighboring objects, if they
1546		 * exist:
1547		 */
1548		struct page *page = virt_to_head_page(objp);
1549		unsigned int objnr;
1550
1551		objnr = obj_to_index(cachep, page, objp);
1552		if (objnr) {
1553			objp = index_to_obj(cachep, page, objnr - 1);
1554			realobj = (char *)objp + obj_offset(cachep);
1555			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
 
1556			print_objinfo(cachep, objp, 2);
1557		}
1558		if (objnr + 1 < cachep->num) {
1559			objp = index_to_obj(cachep, page, objnr + 1);
1560			realobj = (char *)objp + obj_offset(cachep);
1561			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
 
1562			print_objinfo(cachep, objp, 2);
1563		}
1564	}
1565}
1566#endif
1567
1568#if DEBUG
1569static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1570						struct page *page)
1571{
1572	int i;
1573
1574	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1575		poison_obj(cachep, page->freelist - obj_offset(cachep),
1576			POISON_FREE);
1577	}
1578
1579	for (i = 0; i < cachep->num; i++) {
1580		void *objp = index_to_obj(cachep, page, i);
1581
1582		if (cachep->flags & SLAB_POISON) {
 
 
 
 
 
 
 
 
1583			check_poison_obj(cachep, objp);
1584			slab_kernel_map(cachep, objp, 1);
1585		}
1586		if (cachep->flags & SLAB_RED_ZONE) {
1587			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1588				slab_error(cachep, "start of a freed object was overwritten");
 
1589			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1590				slab_error(cachep, "end of a freed object was overwritten");
 
1591		}
1592	}
1593}
1594#else
1595static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1596						struct page *page)
1597{
1598}
1599#endif
1600
1601/**
1602 * slab_destroy - destroy and release all objects in a slab
1603 * @cachep: cache pointer being destroyed
1604 * @page: page pointer being destroyed
1605 *
1606 * Destroy all the objs in a slab page, and release the mem back to the system.
1607 * Before calling the slab page must have been unlinked from the cache. The
1608 * kmem_cache_node ->list_lock is not held/needed.
1609 */
1610static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1611{
1612	void *freelist;
1613
1614	freelist = page->freelist;
1615	slab_destroy_debugcheck(cachep, page);
1616	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1617		call_rcu(&page->rcu_head, kmem_rcu_free);
1618	else
 
 
 
 
 
 
 
 
 
 
1619		kmem_freepages(cachep, page);
 
1620
1621	/*
1622	 * From now on, we don't use freelist
1623	 * although actual page can be freed in rcu context
1624	 */
1625	if (OFF_SLAB(cachep))
1626		kmem_cache_free(cachep->freelist_cache, freelist);
1627}
1628
1629static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1630{
1631	struct page *page, *n;
1632
1633	list_for_each_entry_safe(page, n, list, slab_list) {
1634		list_del(&page->slab_list);
1635		slab_destroy(cachep, page);
1636	}
1637}
1638
1639/**
1640 * calculate_slab_order - calculate size (page order) of slabs
1641 * @cachep: pointer to the cache that is being created
1642 * @size: size of objects to be created in this cache.
 
1643 * @flags: slab allocation flags
1644 *
1645 * Also calculates the number of objects per slab.
1646 *
1647 * This could be made much more intelligent.  For now, try to avoid using
1648 * high order pages for slabs.  When the gfp() functions are more friendly
1649 * towards high-order requests, this should be changed.
1650 *
1651 * Return: number of left-over bytes in a slab
1652 */
1653static size_t calculate_slab_order(struct kmem_cache *cachep,
1654				size_t size, slab_flags_t flags)
1655{
 
1656	size_t left_over = 0;
1657	int gfporder;
1658
1659	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1660		unsigned int num;
1661		size_t remainder;
1662
1663		num = cache_estimate(gfporder, size, flags, &remainder);
1664		if (!num)
1665			continue;
1666
1667		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
1668		if (num > SLAB_OBJ_MAX_NUM)
1669			break;
1670
1671		if (flags & CFLGS_OFF_SLAB) {
1672			struct kmem_cache *freelist_cache;
1673			size_t freelist_size;
1674
1675			freelist_size = num * sizeof(freelist_idx_t);
1676			freelist_cache = kmalloc_slab(freelist_size, 0u);
1677			if (!freelist_cache)
1678				continue;
1679
1680			/*
1681			 * Needed to avoid possible looping condition
1682			 * in cache_grow_begin()
 
1683			 */
1684			if (OFF_SLAB(freelist_cache))
1685				continue;
1686
1687			/* check if off slab has enough benefit */
1688			if (freelist_cache->size > cachep->size / 2)
1689				continue;
1690		}
1691
1692		/* Found something acceptable - save it away */
1693		cachep->num = num;
1694		cachep->gfporder = gfporder;
1695		left_over = remainder;
1696
1697		/*
1698		 * A VFS-reclaimable slab tends to have most allocations
1699		 * as GFP_NOFS and we really don't want to have to be allocating
1700		 * higher-order pages when we are unable to shrink dcache.
1701		 */
1702		if (flags & SLAB_RECLAIM_ACCOUNT)
1703			break;
1704
1705		/*
1706		 * Large number of objects is good, but very large slabs are
1707		 * currently bad for the gfp()s.
1708		 */
1709		if (gfporder >= slab_max_order)
1710			break;
1711
1712		/*
1713		 * Acceptable internal fragmentation?
1714		 */
1715		if (left_over * 8 <= (PAGE_SIZE << gfporder))
1716			break;
1717	}
1718	return left_over;
1719}
1720
1721static struct array_cache __percpu *alloc_kmem_cache_cpus(
1722		struct kmem_cache *cachep, int entries, int batchcount)
1723{
1724	int cpu;
1725	size_t size;
1726	struct array_cache __percpu *cpu_cache;
1727
1728	size = sizeof(void *) * entries + sizeof(struct array_cache);
1729	cpu_cache = __alloc_percpu(size, sizeof(void *));
1730
1731	if (!cpu_cache)
1732		return NULL;
1733
1734	for_each_possible_cpu(cpu) {
1735		init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1736				entries, batchcount);
1737	}
1738
1739	return cpu_cache;
1740}
1741
1742static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1743{
1744	if (slab_state >= FULL)
1745		return enable_cpucache(cachep, gfp);
1746
1747	cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1748	if (!cachep->cpu_cache)
1749		return 1;
1750
1751	if (slab_state == DOWN) {
1752		/* Creation of first cache (kmem_cache). */
1753		set_up_node(kmem_cache, CACHE_CACHE);
 
 
 
 
 
1754	} else if (slab_state == PARTIAL) {
1755		/* For kmem_cache_node */
1756		set_up_node(cachep, SIZE_NODE);
1757	} else {
1758		int node;
 
 
1759
1760		for_each_online_node(node) {
1761			cachep->node[node] = kmalloc_node(
1762				sizeof(struct kmem_cache_node), gfp, node);
1763			BUG_ON(!cachep->node[node]);
1764			kmem_cache_node_init(cachep->node[node]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1765		}
1766	}
1767
1768	cachep->node[numa_mem_id()]->next_reap =
1769			jiffies + REAPTIMEOUT_NODE +
1770			((unsigned long)cachep) % REAPTIMEOUT_NODE;
1771
1772	cpu_cache_get(cachep)->avail = 0;
1773	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1774	cpu_cache_get(cachep)->batchcount = 1;
1775	cpu_cache_get(cachep)->touched = 0;
1776	cachep->batchcount = 1;
1777	cachep->limit = BOOT_CPUCACHE_ENTRIES;
1778	return 0;
1779}
1780
1781slab_flags_t kmem_cache_flags(unsigned int object_size,
1782	slab_flags_t flags, const char *name,
1783	void (*ctor)(void *))
1784{
1785	return flags;
1786}
1787
1788struct kmem_cache *
1789__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1790		   slab_flags_t flags, void (*ctor)(void *))
1791{
1792	struct kmem_cache *cachep;
1793
1794	cachep = find_mergeable(size, align, flags, name, ctor);
1795	if (cachep) {
1796		cachep->refcount++;
1797
1798		/*
1799		 * Adjust the object sizes so that we clear
1800		 * the complete object on kzalloc.
1801		 */
1802		cachep->object_size = max_t(int, cachep->object_size, size);
1803	}
1804	return cachep;
1805}
1806
1807static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1808			size_t size, slab_flags_t flags)
1809{
1810	size_t left;
1811
1812	cachep->num = 0;
1813
1814	/*
1815	 * If slab auto-initialization on free is enabled, store the freelist
1816	 * off-slab, so that its contents don't end up in one of the allocated
1817	 * objects.
1818	 */
1819	if (unlikely(slab_want_init_on_free(cachep)))
1820		return false;
1821
1822	if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1823		return false;
1824
1825	left = calculate_slab_order(cachep, size,
1826			flags | CFLGS_OBJFREELIST_SLAB);
1827	if (!cachep->num)
1828		return false;
1829
1830	if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1831		return false;
1832
1833	cachep->colour = left / cachep->colour_off;
1834
1835	return true;
1836}
1837
1838static bool set_off_slab_cache(struct kmem_cache *cachep,
1839			size_t size, slab_flags_t flags)
1840{
1841	size_t left;
1842
1843	cachep->num = 0;
1844
1845	/*
1846	 * Always use on-slab management when SLAB_NOLEAKTRACE
1847	 * to avoid recursive calls into kmemleak.
1848	 */
1849	if (flags & SLAB_NOLEAKTRACE)
1850		return false;
1851
1852	/*
1853	 * Size is large, assume best to place the slab management obj
1854	 * off-slab (should allow better packing of objs).
1855	 */
1856	left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1857	if (!cachep->num)
1858		return false;
1859
1860	/*
1861	 * If the slab has been placed off-slab, and we have enough space then
1862	 * move it on-slab. This is at the expense of any extra colouring.
1863	 */
1864	if (left >= cachep->num * sizeof(freelist_idx_t))
1865		return false;
1866
1867	cachep->colour = left / cachep->colour_off;
1868
1869	return true;
1870}
1871
1872static bool set_on_slab_cache(struct kmem_cache *cachep,
1873			size_t size, slab_flags_t flags)
1874{
1875	size_t left;
1876
1877	cachep->num = 0;
1878
1879	left = calculate_slab_order(cachep, size, flags);
1880	if (!cachep->num)
1881		return false;
1882
1883	cachep->colour = left / cachep->colour_off;
1884
1885	return true;
1886}
1887
1888/**
1889 * __kmem_cache_create - Create a cache.
1890 * @cachep: cache management descriptor
1891 * @flags: SLAB flags
1892 *
1893 * Returns a ptr to the cache on success, NULL on failure.
1894 * Cannot be called within a int, but can be interrupted.
1895 * The @ctor is run when new pages are allocated by the cache.
1896 *
1897 * The flags are
1898 *
1899 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1900 * to catch references to uninitialised memory.
1901 *
1902 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1903 * for buffer overruns.
1904 *
1905 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1906 * cacheline.  This can be beneficial if you're counting cycles as closely
1907 * as davem.
1908 *
1909 * Return: a pointer to the created cache or %NULL in case of error
1910 */
1911int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
 
1912{
1913	size_t ralign = BYTES_PER_WORD;
1914	gfp_t gfp;
1915	int err;
1916	unsigned int size = cachep->size;
1917
1918#if DEBUG
1919#if FORCED_DEBUG
1920	/*
1921	 * Enable redzoning and last user accounting, except for caches with
1922	 * large objects, if the increased size would increase the object size
1923	 * above the next power of two: caches with object sizes just above a
1924	 * power of two have a significant amount of internal fragmentation.
1925	 */
1926	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
1927						2 * sizeof(unsigned long long)))
1928		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1929	if (!(flags & SLAB_TYPESAFE_BY_RCU))
1930		flags |= SLAB_POISON;
1931#endif
 
 
1932#endif
1933
1934	/*
1935	 * Check that size is in terms of words.  This is needed to avoid
1936	 * unaligned accesses for some archs when redzoning is used, and makes
1937	 * sure any on-slab bufctl's are also correctly aligned.
1938	 */
1939	size = ALIGN(size, BYTES_PER_WORD);
 
 
 
 
 
 
 
 
 
 
 
1940
1941	if (flags & SLAB_RED_ZONE) {
1942		ralign = REDZONE_ALIGN;
1943		/* If redzoning, ensure that the second redzone is suitably
1944		 * aligned, by adjusting the object size accordingly. */
1945		size = ALIGN(size, REDZONE_ALIGN);
 
1946	}
1947
1948	/* 3) caller mandated alignment */
1949	if (ralign < cachep->align) {
1950		ralign = cachep->align;
1951	}
1952	/* disable debug if necessary */
1953	if (ralign > __alignof__(unsigned long long))
1954		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1955	/*
1956	 * 4) Store it.
1957	 */
1958	cachep->align = ralign;
1959	cachep->colour_off = cache_line_size();
1960	/* Offset must be a multiple of the alignment. */
1961	if (cachep->colour_off < cachep->align)
1962		cachep->colour_off = cachep->align;
1963
1964	if (slab_is_available())
1965		gfp = GFP_KERNEL;
1966	else
1967		gfp = GFP_NOWAIT;
1968
 
1969#if DEBUG
1970
1971	/*
1972	 * Both debugging options require word-alignment which is calculated
1973	 * into align above.
1974	 */
1975	if (flags & SLAB_RED_ZONE) {
1976		/* add space for red zone words */
1977		cachep->obj_offset += sizeof(unsigned long long);
1978		size += 2 * sizeof(unsigned long long);
1979	}
1980	if (flags & SLAB_STORE_USER) {
1981		/* user store requires one word storage behind the end of
1982		 * the real object. But if the second red zone needs to be
1983		 * aligned to 64 bits, we must allow that much space.
1984		 */
1985		if (flags & SLAB_RED_ZONE)
1986			size += REDZONE_ALIGN;
1987		else
1988			size += BYTES_PER_WORD;
1989	}
 
 
 
 
 
 
 
 
1990#endif
1991
1992	kasan_cache_create(cachep, &size, &flags);
 
 
 
 
 
 
 
 
 
 
 
 
1993
1994	size = ALIGN(size, cachep->align);
1995	/*
1996	 * We should restrict the number of objects in a slab to implement
1997	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
1998	 */
1999	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2000		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2001
2002#if DEBUG
 
 
 
 
 
 
 
2003	/*
2004	 * To activate debug pagealloc, off-slab management is necessary
2005	 * requirement. In early phase of initialization, small sized slab
2006	 * doesn't get initialized so it would not be possible. So, we need
2007	 * to check size >= 256. It guarantees that all necessary small
2008	 * sized slab is initialized in current slab initialization sequence.
2009	 */
2010	if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2011		size >= 256 && cachep->object_size > cache_line_size()) {
2012		if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2013			size_t tmp_size = ALIGN(size, PAGE_SIZE);
2014
2015			if (set_off_slab_cache(cachep, tmp_size, flags)) {
2016				flags |= CFLGS_OFF_SLAB;
2017				cachep->obj_offset += tmp_size - size;
2018				size = tmp_size;
2019				goto done;
2020			}
2021		}
2022	}
2023#endif
2024
2025	if (set_objfreelist_slab_cache(cachep, size, flags)) {
2026		flags |= CFLGS_OBJFREELIST_SLAB;
2027		goto done;
2028	}
2029
2030	if (set_off_slab_cache(cachep, size, flags)) {
2031		flags |= CFLGS_OFF_SLAB;
2032		goto done;
 
 
 
 
 
 
 
 
 
2033	}
2034
2035	if (set_on_slab_cache(cachep, size, flags))
2036		goto done;
2037
2038	return -E2BIG;
2039
2040done:
2041	cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2042	cachep->flags = flags;
2043	cachep->allocflags = __GFP_COMP;
2044	if (flags & SLAB_CACHE_DMA)
2045		cachep->allocflags |= GFP_DMA;
2046	if (flags & SLAB_CACHE_DMA32)
2047		cachep->allocflags |= GFP_DMA32;
2048	if (flags & SLAB_RECLAIM_ACCOUNT)
2049		cachep->allocflags |= __GFP_RECLAIMABLE;
2050	cachep->size = size;
2051	cachep->reciprocal_buffer_size = reciprocal_value(size);
2052
2053#if DEBUG
2054	/*
2055	 * If we're going to use the generic kernel_map_pages()
2056	 * poisoning, then it's going to smash the contents of
2057	 * the redzone and userword anyhow, so switch them off.
2058	 */
2059	if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2060		(cachep->flags & SLAB_POISON) &&
2061		is_debug_pagealloc_cache(cachep))
2062		cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2063#endif
2064
2065	if (OFF_SLAB(cachep)) {
2066		cachep->freelist_cache =
2067			kmalloc_slab(cachep->freelist_size, 0u);
2068	}
2069
2070	err = setup_cpu_cache(cachep, gfp);
2071	if (err) {
2072		__kmem_cache_release(cachep);
2073		return err;
2074	}
2075
 
 
 
 
 
 
 
 
 
 
 
2076	return 0;
2077}
2078
2079#if DEBUG
2080static void check_irq_off(void)
2081{
2082	BUG_ON(!irqs_disabled());
2083}
2084
2085static void check_irq_on(void)
2086{
2087	BUG_ON(irqs_disabled());
2088}
2089
2090static void check_mutex_acquired(void)
2091{
2092	BUG_ON(!mutex_is_locked(&slab_mutex));
2093}
2094
2095static void check_spinlock_acquired(struct kmem_cache *cachep)
2096{
2097#ifdef CONFIG_SMP
2098	check_irq_off();
2099	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2100#endif
2101}
2102
2103static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2104{
2105#ifdef CONFIG_SMP
2106	check_irq_off();
2107	assert_spin_locked(&get_node(cachep, node)->list_lock);
2108#endif
2109}
2110
2111#else
2112#define check_irq_off()	do { } while(0)
2113#define check_irq_on()	do { } while(0)
2114#define check_mutex_acquired()	do { } while(0)
2115#define check_spinlock_acquired(x) do { } while(0)
2116#define check_spinlock_acquired_node(x, y) do { } while(0)
2117#endif
2118
2119static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2120				int node, bool free_all, struct list_head *list)
2121{
2122	int tofree;
2123
2124	if (!ac || !ac->avail)
2125		return;
2126
2127	tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2128	if (tofree > ac->avail)
2129		tofree = (ac->avail + 1) / 2;
2130
2131	free_block(cachep, ac->entry, tofree, node, list);
2132	ac->avail -= tofree;
2133	memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2134}
2135
2136static void do_drain(void *arg)
2137{
2138	struct kmem_cache *cachep = arg;
2139	struct array_cache *ac;
2140	int node = numa_mem_id();
2141	struct kmem_cache_node *n;
2142	LIST_HEAD(list);
2143
2144	check_irq_off();
2145	ac = cpu_cache_get(cachep);
2146	n = get_node(cachep, node);
2147	spin_lock(&n->list_lock);
2148	free_block(cachep, ac->entry, ac->avail, node, &list);
2149	spin_unlock(&n->list_lock);
2150	slabs_destroy(cachep, &list);
2151	ac->avail = 0;
2152}
2153
2154static void drain_cpu_caches(struct kmem_cache *cachep)
2155{
2156	struct kmem_cache_node *n;
2157	int node;
2158	LIST_HEAD(list);
2159
2160	on_each_cpu(do_drain, cachep, 1);
2161	check_irq_on();
2162	for_each_kmem_cache_node(cachep, node, n)
2163		if (n->alien)
 
2164			drain_alien_cache(cachep, n->alien);
 
2165
2166	for_each_kmem_cache_node(cachep, node, n) {
2167		spin_lock_irq(&n->list_lock);
2168		drain_array_locked(cachep, n->shared, node, true, &list);
2169		spin_unlock_irq(&n->list_lock);
2170
2171		slabs_destroy(cachep, &list);
2172	}
2173}
2174
2175/*
2176 * Remove slabs from the list of free slabs.
2177 * Specify the number of slabs to drain in tofree.
2178 *
2179 * Returns the actual number of slabs released.
2180 */
2181static int drain_freelist(struct kmem_cache *cache,
2182			struct kmem_cache_node *n, int tofree)
2183{
2184	struct list_head *p;
2185	int nr_freed;
2186	struct page *page;
2187
2188	nr_freed = 0;
2189	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2190
2191		spin_lock_irq(&n->list_lock);
2192		p = n->slabs_free.prev;
2193		if (p == &n->slabs_free) {
2194			spin_unlock_irq(&n->list_lock);
2195			goto out;
2196		}
2197
2198		page = list_entry(p, struct page, slab_list);
2199		list_del(&page->slab_list);
2200		n->free_slabs--;
2201		n->total_slabs--;
 
2202		/*
2203		 * Safe to drop the lock. The slab is no longer linked
2204		 * to the cache.
2205		 */
2206		n->free_objects -= cache->num;
2207		spin_unlock_irq(&n->list_lock);
2208		slab_destroy(cache, page);
2209		nr_freed++;
2210	}
2211out:
2212	return nr_freed;
2213}
2214
2215bool __kmem_cache_empty(struct kmem_cache *s)
2216{
2217	int node;
2218	struct kmem_cache_node *n;
2219
2220	for_each_kmem_cache_node(s, node, n)
2221		if (!list_empty(&n->slabs_full) ||
2222		    !list_empty(&n->slabs_partial))
2223			return false;
2224	return true;
2225}
2226
2227int __kmem_cache_shrink(struct kmem_cache *cachep)
2228{
2229	int ret = 0;
2230	int node;
2231	struct kmem_cache_node *n;
2232
2233	drain_cpu_caches(cachep);
2234
2235	check_irq_on();
2236	for_each_kmem_cache_node(cachep, node, n) {
2237		drain_freelist(cachep, n, INT_MAX);
 
 
 
 
2238
2239		ret += !list_empty(&n->slabs_full) ||
2240			!list_empty(&n->slabs_partial);
2241	}
2242	return (ret ? 1 : 0);
2243}
2244
2245#ifdef CONFIG_MEMCG
2246void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
 
 
 
 
 
 
2247{
2248	__kmem_cache_shrink(cachep);
2249}
2250
2251void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
2252{
 
 
 
 
2253}
2254#endif
2255
2256int __kmem_cache_shutdown(struct kmem_cache *cachep)
2257{
2258	return __kmem_cache_shrink(cachep);
2259}
2260
2261void __kmem_cache_release(struct kmem_cache *cachep)
2262{
2263	int i;
2264	struct kmem_cache_node *n;
 
2265
2266	cache_random_seq_destroy(cachep);
 
2267
2268	free_percpu(cachep->cpu_cache);
 
2269
2270	/* NUMA: free the node structures */
2271	for_each_kmem_cache_node(cachep, i, n) {
2272		kfree(n->shared);
2273		free_alien_cache(n->alien);
2274		kfree(n);
2275		cachep->node[i] = NULL;
 
 
2276	}
 
2277}
2278
2279/*
2280 * Get the memory for a slab management obj.
2281 *
2282 * For a slab cache when the slab descriptor is off-slab, the
2283 * slab descriptor can't come from the same cache which is being created,
2284 * Because if it is the case, that means we defer the creation of
2285 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2286 * And we eventually call down to __kmem_cache_create(), which
2287 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2288 * This is a "chicken-and-egg" problem.
2289 *
2290 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2291 * which are all initialized during kmem_cache_init().
2292 */
2293static void *alloc_slabmgmt(struct kmem_cache *cachep,
2294				   struct page *page, int colour_off,
2295				   gfp_t local_flags, int nodeid)
2296{
2297	void *freelist;
2298	void *addr = page_address(page);
2299
2300	page->s_mem = addr + colour_off;
2301	page->active = 0;
2302
2303	if (OBJFREELIST_SLAB(cachep))
2304		freelist = NULL;
2305	else if (OFF_SLAB(cachep)) {
2306		/* Slab management obj is off-slab. */
2307		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2308					      local_flags, nodeid);
2309		if (!freelist)
2310			return NULL;
2311	} else {
2312		/* We will use last bytes at the slab for freelist */
2313		freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2314				cachep->freelist_size;
2315	}
2316
 
2317	return freelist;
2318}
2319
2320static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2321{
2322	return ((freelist_idx_t *)page->freelist)[idx];
2323}
2324
2325static inline void set_free_obj(struct page *page,
2326					unsigned int idx, freelist_idx_t val)
2327{
2328	((freelist_idx_t *)(page->freelist))[idx] = val;
2329}
2330
2331static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
 
2332{
2333#if DEBUG
2334	int i;
2335
2336	for (i = 0; i < cachep->num; i++) {
2337		void *objp = index_to_obj(cachep, page, i);
2338
 
 
 
2339		if (cachep->flags & SLAB_STORE_USER)
2340			*dbg_userword(cachep, objp) = NULL;
2341
2342		if (cachep->flags & SLAB_RED_ZONE) {
2343			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2344			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2345		}
2346		/*
2347		 * Constructors are not allowed to allocate memory from the same
2348		 * cache which they are a constructor for.  Otherwise, deadlock.
2349		 * They must also be threaded.
2350		 */
2351		if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2352			kasan_unpoison_object_data(cachep,
2353						   objp + obj_offset(cachep));
2354			cachep->ctor(objp + obj_offset(cachep));
2355			kasan_poison_object_data(
2356				cachep, objp + obj_offset(cachep));
2357		}
2358
2359		if (cachep->flags & SLAB_RED_ZONE) {
2360			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2361				slab_error(cachep, "constructor overwrote the end of an object");
 
2362			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2363				slab_error(cachep, "constructor overwrote the start of an object");
2364		}
2365		/* need to poison the objs? */
2366		if (cachep->flags & SLAB_POISON) {
2367			poison_obj(cachep, objp, POISON_FREE);
2368			slab_kernel_map(cachep, objp, 0);
2369		}
2370	}
 
 
 
 
 
 
2371#endif
2372}
2373
2374#ifdef CONFIG_SLAB_FREELIST_RANDOM
2375/* Hold information during a freelist initialization */
2376union freelist_init_state {
2377	struct {
2378		unsigned int pos;
2379		unsigned int *list;
2380		unsigned int count;
2381	};
2382	struct rnd_state rnd_state;
2383};
2384
2385/*
2386 * Initialize the state based on the randomization methode available.
2387 * return true if the pre-computed list is available, false otherwize.
2388 */
2389static bool freelist_state_initialize(union freelist_init_state *state,
2390				struct kmem_cache *cachep,
2391				unsigned int count)
2392{
2393	bool ret;
2394	unsigned int rand;
2395
2396	/* Use best entropy available to define a random shift */
2397	rand = get_random_int();
2398
2399	/* Use a random state if the pre-computed list is not available */
2400	if (!cachep->random_seq) {
2401		prandom_seed_state(&state->rnd_state, rand);
2402		ret = false;
2403	} else {
2404		state->list = cachep->random_seq;
2405		state->count = count;
2406		state->pos = rand % count;
2407		ret = true;
2408	}
2409	return ret;
2410}
2411
2412/* Get the next entry on the list and randomize it using a random shift */
2413static freelist_idx_t next_random_slot(union freelist_init_state *state)
2414{
2415	if (state->pos >= state->count)
2416		state->pos = 0;
2417	return state->list[state->pos++];
2418}
2419
2420/* Swap two freelist entries */
2421static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2422{
2423	swap(((freelist_idx_t *)page->freelist)[a],
2424		((freelist_idx_t *)page->freelist)[b]);
2425}
2426
2427/*
2428 * Shuffle the freelist initialization state based on pre-computed lists.
2429 * return true if the list was successfully shuffled, false otherwise.
2430 */
2431static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2432{
2433	unsigned int objfreelist = 0, i, rand, count = cachep->num;
2434	union freelist_init_state state;
2435	bool precomputed;
2436
2437	if (count < 2)
2438		return false;
2439
2440	precomputed = freelist_state_initialize(&state, cachep, count);
2441
2442	/* Take a random entry as the objfreelist */
2443	if (OBJFREELIST_SLAB(cachep)) {
2444		if (!precomputed)
2445			objfreelist = count - 1;
2446		else
2447			objfreelist = next_random_slot(&state);
2448		page->freelist = index_to_obj(cachep, page, objfreelist) +
2449						obj_offset(cachep);
2450		count--;
2451	}
2452
2453	/*
2454	 * On early boot, generate the list dynamically.
2455	 * Later use a pre-computed list for speed.
2456	 */
2457	if (!precomputed) {
2458		for (i = 0; i < count; i++)
2459			set_free_obj(page, i, i);
2460
2461		/* Fisher-Yates shuffle */
2462		for (i = count - 1; i > 0; i--) {
2463			rand = prandom_u32_state(&state.rnd_state);
2464			rand %= (i + 1);
2465			swap_free_obj(page, i, rand);
2466		}
2467	} else {
2468		for (i = 0; i < count; i++)
2469			set_free_obj(page, i, next_random_slot(&state));
2470	}
2471
2472	if (OBJFREELIST_SLAB(cachep))
2473		set_free_obj(page, cachep->num - 1, objfreelist);
2474
2475	return true;
2476}
2477#else
2478static inline bool shuffle_freelist(struct kmem_cache *cachep,
2479				struct page *page)
2480{
2481	return false;
2482}
2483#endif /* CONFIG_SLAB_FREELIST_RANDOM */
2484
2485static void cache_init_objs(struct kmem_cache *cachep,
2486			    struct page *page)
2487{
2488	int i;
2489	void *objp;
2490	bool shuffled;
2491
2492	cache_init_objs_debug(cachep, page);
2493
2494	/* Try to randomize the freelist if enabled */
2495	shuffled = shuffle_freelist(cachep, page);
2496
2497	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2498		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2499						obj_offset(cachep);
2500	}
2501
2502	for (i = 0; i < cachep->num; i++) {
2503		objp = index_to_obj(cachep, page, i);
2504		objp = kasan_init_slab_obj(cachep, objp);
2505
2506		/* constructor could break poison info */
2507		if (DEBUG == 0 && cachep->ctor) {
2508			kasan_unpoison_object_data(cachep, objp);
2509			cachep->ctor(objp);
2510			kasan_poison_object_data(cachep, objp);
2511		}
2512
2513		if (!shuffled)
2514			set_free_obj(page, i, i);
2515	}
2516}
2517
2518static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2519{
2520	void *objp;
2521
2522	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2523	page->active++;
 
 
 
2524
2525	return objp;
2526}
2527
2528static void slab_put_obj(struct kmem_cache *cachep,
2529			struct page *page, void *objp)
2530{
2531	unsigned int objnr = obj_to_index(cachep, page, objp);
2532#if DEBUG
2533	unsigned int i;
2534
 
 
 
2535	/* Verify double free bug */
2536	for (i = page->active; i < cachep->num; i++) {
2537		if (get_free_obj(page, i) == objnr) {
2538			pr_err("slab: double free detected in cache '%s', objp %px\n",
2539			       cachep->name, objp);
2540			BUG();
2541		}
2542	}
2543#endif
2544	page->active--;
2545	if (!page->freelist)
2546		page->freelist = objp + obj_offset(cachep);
2547
2548	set_free_obj(page, page->active, objnr);
2549}
2550
2551/*
2552 * Map pages beginning at addr to the given cache and slab. This is required
2553 * for the slab allocator to be able to lookup the cache and slab of a
2554 * virtual address for kfree, ksize, and slab debugging.
2555 */
2556static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2557			   void *freelist)
2558{
2559	page->slab_cache = cache;
2560	page->freelist = freelist;
2561}
2562
2563/*
2564 * Grow (by 1) the number of slabs within a cache.  This is called by
2565 * kmem_cache_alloc() when there are no active objs left in a cache.
2566 */
2567static struct page *cache_grow_begin(struct kmem_cache *cachep,
2568				gfp_t flags, int nodeid)
2569{
2570	void *freelist;
2571	size_t offset;
2572	gfp_t local_flags;
2573	int page_node;
2574	struct kmem_cache_node *n;
2575	struct page *page;
2576
2577	/*
2578	 * Be lazy and only check for valid flags here,  keeping it out of the
2579	 * critical path in kmem_cache_alloc().
2580	 */
2581	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2582		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2583		flags &= ~GFP_SLAB_BUG_MASK;
2584		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
2585				invalid_mask, &invalid_mask, flags, &flags);
2586		dump_stack();
2587	}
2588	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2589	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2590
 
2591	check_irq_off();
2592	if (gfpflags_allow_blocking(local_flags))
2593		local_irq_enable();
2594
2595	/*
2596	 * Get mem for the objs.  Attempt to allocate a physical page from
2597	 * 'nodeid'.
2598	 */
2599	page = kmem_getpages(cachep, local_flags, nodeid);
2600	if (!page)
2601		goto failed;
2602
2603	page_node = page_to_nid(page);
2604	n = get_node(cachep, page_node);
2605
2606	/* Get colour for the slab, and cal the next value. */
 
2607	n->colour_next++;
2608	if (n->colour_next >= cachep->colour)
2609		n->colour_next = 0;
2610
2611	offset = n->colour_next;
2612	if (offset >= cachep->colour)
2613		offset = 0;
2614
2615	offset *= cachep->colour_off;
2616
 
 
 
2617	/*
2618	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
2619	 * page_address() in the latter returns a non-tagged pointer,
2620	 * as it should be for slab pages.
 
 
 
 
 
 
 
2621	 */
2622	kasan_poison_slab(page);
 
 
 
2623
2624	/* Get slab management. */
2625	freelist = alloc_slabmgmt(cachep, page, offset,
2626			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2627	if (OFF_SLAB(cachep) && !freelist)
2628		goto opps1;
2629
2630	slab_map_pages(cachep, page, freelist);
2631
2632	cache_init_objs(cachep, page);
2633
2634	if (gfpflags_allow_blocking(local_flags))
2635		local_irq_disable();
2636
2637	return page;
2638
2639opps1:
2640	kmem_freepages(cachep, page);
2641failed:
2642	if (gfpflags_allow_blocking(local_flags))
2643		local_irq_disable();
2644	return NULL;
2645}
2646
2647static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2648{
2649	struct kmem_cache_node *n;
2650	void *list = NULL;
2651
2652	check_irq_off();
2653
2654	if (!page)
2655		return;
2656
2657	INIT_LIST_HEAD(&page->slab_list);
2658	n = get_node(cachep, page_to_nid(page));
2659
2660	spin_lock(&n->list_lock);
2661	n->total_slabs++;
2662	if (!page->active) {
2663		list_add_tail(&page->slab_list, &n->slabs_free);
2664		n->free_slabs++;
2665	} else
2666		fixup_slab_list(cachep, n, page, &list);
2667
 
 
2668	STATS_INC_GROWN(cachep);
2669	n->free_objects += cachep->num - page->active;
2670	spin_unlock(&n->list_lock);
2671
2672	fixup_objfreelist_debug(cachep, &list);
 
 
 
 
 
2673}
2674
2675#if DEBUG
2676
2677/*
2678 * Perform extra freeing checks:
2679 * - detect bad pointers.
2680 * - POISON/RED_ZONE checking
2681 */
2682static void kfree_debugcheck(const void *objp)
2683{
2684	if (!virt_addr_valid(objp)) {
2685		pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2686		       (unsigned long)objp);
2687		BUG();
2688	}
2689}
2690
2691static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2692{
2693	unsigned long long redzone1, redzone2;
2694
2695	redzone1 = *dbg_redzone1(cache, obj);
2696	redzone2 = *dbg_redzone2(cache, obj);
2697
2698	/*
2699	 * Redzone is ok.
2700	 */
2701	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2702		return;
2703
2704	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2705		slab_error(cache, "double free detected");
2706	else
2707		slab_error(cache, "memory outside object was overwritten");
2708
2709	pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2710	       obj, redzone1, redzone2);
2711}
2712
2713static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2714				   unsigned long caller)
2715{
2716	unsigned int objnr;
2717	struct page *page;
2718
2719	BUG_ON(virt_to_cache(objp) != cachep);
2720
2721	objp -= obj_offset(cachep);
2722	kfree_debugcheck(objp);
2723	page = virt_to_head_page(objp);
2724
2725	if (cachep->flags & SLAB_RED_ZONE) {
2726		verify_redzone_free(cachep, objp);
2727		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2728		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2729	}
2730	if (cachep->flags & SLAB_STORE_USER)
2731		*dbg_userword(cachep, objp) = (void *)caller;
2732
2733	objnr = obj_to_index(cachep, page, objp);
2734
2735	BUG_ON(objnr >= cachep->num);
2736	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2737
2738	if (cachep->flags & SLAB_POISON) {
 
 
 
 
 
 
 
 
 
2739		poison_obj(cachep, objp, POISON_FREE);
2740		slab_kernel_map(cachep, objp, 0);
2741	}
2742	return objp;
2743}
2744
2745#else
2746#define kfree_debugcheck(x) do { } while(0)
2747#define cache_free_debugcheck(x,objp,z) (objp)
2748#endif
2749
2750static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2751						void **list)
2752{
2753#if DEBUG
2754	void *next = *list;
2755	void *objp;
2756
2757	while (next) {
2758		objp = next - obj_offset(cachep);
2759		next = *(void **)next;
2760		poison_obj(cachep, objp, POISON_FREE);
2761	}
2762#endif
2763}
2764
2765static inline void fixup_slab_list(struct kmem_cache *cachep,
2766				struct kmem_cache_node *n, struct page *page,
2767				void **list)
2768{
2769	/* move slabp to correct slabp list: */
2770	list_del(&page->slab_list);
2771	if (page->active == cachep->num) {
2772		list_add(&page->slab_list, &n->slabs_full);
2773		if (OBJFREELIST_SLAB(cachep)) {
2774#if DEBUG
2775			/* Poisoning will be done without holding the lock */
2776			if (cachep->flags & SLAB_POISON) {
2777				void **objp = page->freelist;
2778
2779				*objp = *list;
2780				*list = objp;
2781			}
2782#endif
2783			page->freelist = NULL;
2784		}
2785	} else
2786		list_add(&page->slab_list, &n->slabs_partial);
2787}
2788
2789/* Try to find non-pfmemalloc slab if needed */
2790static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2791					struct page *page, bool pfmemalloc)
2792{
2793	if (!page)
2794		return NULL;
2795
2796	if (pfmemalloc)
2797		return page;
2798
2799	if (!PageSlabPfmemalloc(page))
2800		return page;
2801
2802	/* No need to keep pfmemalloc slab if we have enough free objects */
2803	if (n->free_objects > n->free_limit) {
2804		ClearPageSlabPfmemalloc(page);
2805		return page;
2806	}
2807
2808	/* Move pfmemalloc slab to the end of list to speed up next search */
2809	list_del(&page->slab_list);
2810	if (!page->active) {
2811		list_add_tail(&page->slab_list, &n->slabs_free);
2812		n->free_slabs++;
2813	} else
2814		list_add_tail(&page->slab_list, &n->slabs_partial);
2815
2816	list_for_each_entry(page, &n->slabs_partial, slab_list) {
2817		if (!PageSlabPfmemalloc(page))
2818			return page;
2819	}
2820
2821	n->free_touched = 1;
2822	list_for_each_entry(page, &n->slabs_free, slab_list) {
2823		if (!PageSlabPfmemalloc(page)) {
2824			n->free_slabs--;
2825			return page;
2826		}
2827	}
2828
2829	return NULL;
2830}
2831
2832static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2833{
2834	struct page *page;
2835
2836	assert_spin_locked(&n->list_lock);
2837	page = list_first_entry_or_null(&n->slabs_partial, struct page,
2838					slab_list);
2839	if (!page) {
2840		n->free_touched = 1;
2841		page = list_first_entry_or_null(&n->slabs_free, struct page,
2842						slab_list);
2843		if (page)
2844			n->free_slabs--;
2845	}
2846
2847	if (sk_memalloc_socks())
2848		page = get_valid_first_slab(n, page, pfmemalloc);
2849
2850	return page;
2851}
2852
2853static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2854				struct kmem_cache_node *n, gfp_t flags)
2855{
2856	struct page *page;
2857	void *obj;
2858	void *list = NULL;
2859
2860	if (!gfp_pfmemalloc_allowed(flags))
2861		return NULL;
2862
2863	spin_lock(&n->list_lock);
2864	page = get_first_slab(n, true);
2865	if (!page) {
2866		spin_unlock(&n->list_lock);
2867		return NULL;
2868	}
2869
2870	obj = slab_get_obj(cachep, page);
2871	n->free_objects--;
2872
2873	fixup_slab_list(cachep, n, page, &list);
2874
2875	spin_unlock(&n->list_lock);
2876	fixup_objfreelist_debug(cachep, &list);
2877
2878	return obj;
2879}
2880
2881/*
2882 * Slab list should be fixed up by fixup_slab_list() for existing slab
2883 * or cache_grow_end() for new slab
2884 */
2885static __always_inline int alloc_block(struct kmem_cache *cachep,
2886		struct array_cache *ac, struct page *page, int batchcount)
2887{
2888	/*
2889	 * There must be at least one object available for
2890	 * allocation.
2891	 */
2892	BUG_ON(page->active >= cachep->num);
2893
2894	while (page->active < cachep->num && batchcount--) {
2895		STATS_INC_ALLOCED(cachep);
2896		STATS_INC_ACTIVE(cachep);
2897		STATS_SET_HIGH(cachep);
2898
2899		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2900	}
2901
2902	return batchcount;
2903}
2904
2905static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2906{
2907	int batchcount;
2908	struct kmem_cache_node *n;
2909	struct array_cache *ac, *shared;
2910	int node;
2911	void *list = NULL;
2912	struct page *page;
2913
2914	check_irq_off();
2915	node = numa_mem_id();
2916
 
 
2917	ac = cpu_cache_get(cachep);
2918	batchcount = ac->batchcount;
2919	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2920		/*
2921		 * If there was little recent activity on this cache, then
2922		 * perform only a partial refill.  Otherwise we could generate
2923		 * refill bouncing.
2924		 */
2925		batchcount = BATCHREFILL_LIMIT;
2926	}
2927	n = get_node(cachep, node);
2928
2929	BUG_ON(ac->avail > 0 || !n);
2930	shared = READ_ONCE(n->shared);
2931	if (!n->free_objects && (!shared || !shared->avail))
2932		goto direct_grow;
2933
2934	spin_lock(&n->list_lock);
2935	shared = READ_ONCE(n->shared);
2936
2937	/* See if we can refill from the shared array */
2938	if (shared && transfer_objects(ac, shared, batchcount)) {
2939		shared->touched = 1;
2940		goto alloc_done;
2941	}
2942
2943	while (batchcount > 0) {
 
 
2944		/* Get slab alloc is to come from. */
2945		page = get_first_slab(n, false);
2946		if (!page)
2947			goto must_grow;
 
 
 
 
2948
 
2949		check_spinlock_acquired(cachep);
2950
2951		batchcount = alloc_block(cachep, ac, page, batchcount);
2952		fixup_slab_list(cachep, n, page, &list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2953	}
2954
2955must_grow:
2956	n->free_objects -= ac->avail;
2957alloc_done:
2958	spin_unlock(&n->list_lock);
2959	fixup_objfreelist_debug(cachep, &list);
2960
2961direct_grow:
2962	if (unlikely(!ac->avail)) {
2963		/* Check if we can use obj in pfmemalloc slab */
2964		if (sk_memalloc_socks()) {
2965			void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2966
2967			if (obj)
2968				return obj;
2969		}
2970
2971		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2972
2973		/*
2974		 * cache_grow_begin() can reenable interrupts,
2975		 * then ac could change.
2976		 */
2977		ac = cpu_cache_get(cachep);
2978		if (!ac->avail && page)
2979			alloc_block(cachep, ac, page, batchcount);
2980		cache_grow_end(cachep, page);
2981
2982		if (!ac->avail)
 
2983			return NULL;
 
 
 
2984	}
2985	ac->touched = 1;
2986
2987	return ac->entry[--ac->avail];
2988}
2989
2990static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2991						gfp_t flags)
2992{
2993	might_sleep_if(gfpflags_allow_blocking(flags));
 
 
 
2994}
2995
2996#if DEBUG
2997static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2998				gfp_t flags, void *objp, unsigned long caller)
2999{
3000	WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
3001	if (!objp)
3002		return objp;
3003	if (cachep->flags & SLAB_POISON) {
 
 
 
 
 
 
 
3004		check_poison_obj(cachep, objp);
3005		slab_kernel_map(cachep, objp, 1);
3006		poison_obj(cachep, objp, POISON_INUSE);
3007	}
3008	if (cachep->flags & SLAB_STORE_USER)
3009		*dbg_userword(cachep, objp) = (void *)caller;
3010
3011	if (cachep->flags & SLAB_RED_ZONE) {
3012		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3013				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3014			slab_error(cachep, "double free, or memory outside object was overwritten");
3015			pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3016			       objp, *dbg_redzone1(cachep, objp),
3017			       *dbg_redzone2(cachep, objp));
 
 
3018		}
3019		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3020		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3021	}
3022
3023	objp += obj_offset(cachep);
3024	if (cachep->ctor && cachep->flags & SLAB_POISON)
3025		cachep->ctor(objp);
3026	if (ARCH_SLAB_MINALIGN &&
3027	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3028		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3029		       objp, (int)ARCH_SLAB_MINALIGN);
3030	}
3031	return objp;
3032}
3033#else
3034#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3035#endif
3036
 
 
 
 
 
 
 
 
3037static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3038{
3039	void *objp;
3040	struct array_cache *ac;
 
3041
3042	check_irq_off();
3043
3044	ac = cpu_cache_get(cachep);
3045	if (likely(ac->avail)) {
3046		ac->touched = 1;
3047		objp = ac->entry[--ac->avail];
3048
3049		STATS_INC_ALLOCHIT(cachep);
3050		goto out;
 
 
 
 
 
 
 
3051	}
3052
3053	STATS_INC_ALLOCMISS(cachep);
3054	objp = cache_alloc_refill(cachep, flags);
3055	/*
3056	 * the 'ac' may be updated by cache_alloc_refill(),
3057	 * and kmemleak_erase() requires its correct value.
3058	 */
3059	ac = cpu_cache_get(cachep);
3060
3061out:
3062	/*
3063	 * To avoid a false negative, if an object that is in one of the
3064	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3065	 * treat the array pointers as a reference to the object.
3066	 */
3067	if (objp)
3068		kmemleak_erase(&ac->entry[ac->avail]);
3069	return objp;
3070}
3071
3072#ifdef CONFIG_NUMA
3073/*
3074 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3075 *
3076 * If we are in_interrupt, then process context, including cpusets and
3077 * mempolicy, may not apply and should not be used for allocation policy.
3078 */
3079static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3080{
3081	int nid_alloc, nid_here;
3082
3083	if (in_interrupt() || (flags & __GFP_THISNODE))
3084		return NULL;
3085	nid_alloc = nid_here = numa_mem_id();
3086	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3087		nid_alloc = cpuset_slab_spread_node();
3088	else if (current->mempolicy)
3089		nid_alloc = mempolicy_slab_node();
3090	if (nid_alloc != nid_here)
3091		return ____cache_alloc_node(cachep, flags, nid_alloc);
3092	return NULL;
3093}
3094
3095/*
3096 * Fallback function if there was no memory available and no objects on a
3097 * certain node and fall back is permitted. First we scan all the
3098 * available node for available objects. If that fails then we
3099 * perform an allocation without specifying a node. This allows the page
3100 * allocator to do its reclaim / fallback magic. We then insert the
3101 * slab into the proper nodelist and then allocate from it.
3102 */
3103static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3104{
3105	struct zonelist *zonelist;
 
3106	struct zoneref *z;
3107	struct zone *zone;
3108	enum zone_type high_zoneidx = gfp_zone(flags);
3109	void *obj = NULL;
3110	struct page *page;
3111	int nid;
3112	unsigned int cpuset_mems_cookie;
3113
3114	if (flags & __GFP_THISNODE)
3115		return NULL;
3116
 
 
3117retry_cpuset:
3118	cpuset_mems_cookie = read_mems_allowed_begin();
3119	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3120
3121retry:
3122	/*
3123	 * Look through allowed nodes for objects available
3124	 * from existing per node queues.
3125	 */
3126	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3127		nid = zone_to_nid(zone);
3128
3129		if (cpuset_zone_allowed(zone, flags) &&
3130			get_node(cache, nid) &&
3131			get_node(cache, nid)->free_objects) {
3132				obj = ____cache_alloc_node(cache,
3133					gfp_exact_node(flags), nid);
3134				if (obj)
3135					break;
3136		}
3137	}
3138
3139	if (!obj) {
3140		/*
3141		 * This allocation will be performed within the constraints
3142		 * of the current cpuset / memory policy requirements.
3143		 * We may trigger various forms of reclaim on the allowed
3144		 * set and go into memory reserves if necessary.
3145		 */
3146		page = cache_grow_begin(cache, flags, numa_mem_id());
3147		cache_grow_end(cache, page);
3148		if (page) {
3149			nid = page_to_nid(page);
3150			obj = ____cache_alloc_node(cache,
3151				gfp_exact_node(flags), nid);
3152
 
 
 
 
 
 
 
3153			/*
3154			 * Another processor may allocate the objects in
3155			 * the slab since we are not holding any locks.
3156			 */
3157			if (!obj)
3158				goto retry;
 
 
 
 
 
 
 
 
 
 
 
 
 
3159		}
3160	}
3161
3162	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3163		goto retry_cpuset;
3164	return obj;
3165}
3166
3167/*
3168 * A interface to enable slab creation on nodeid
3169 */
3170static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3171				int nodeid)
3172{
 
3173	struct page *page;
3174	struct kmem_cache_node *n;
3175	void *obj = NULL;
3176	void *list = NULL;
3177
3178	VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3179	n = get_node(cachep, nodeid);
3180	BUG_ON(!n);
3181
 
3182	check_irq_off();
3183	spin_lock(&n->list_lock);
3184	page = get_first_slab(n, false);
3185	if (!page)
3186		goto must_grow;
 
 
 
 
3187
 
3188	check_spinlock_acquired_node(cachep, nodeid);
3189
3190	STATS_INC_NODEALLOCS(cachep);
3191	STATS_INC_ACTIVE(cachep);
3192	STATS_SET_HIGH(cachep);
3193
3194	BUG_ON(page->active == cachep->num);
3195
3196	obj = slab_get_obj(cachep, page);
3197	n->free_objects--;
 
 
3198
3199	fixup_slab_list(cachep, n, page, &list);
 
 
 
3200
3201	spin_unlock(&n->list_lock);
3202	fixup_objfreelist_debug(cachep, &list);
3203	return obj;
3204
3205must_grow:
3206	spin_unlock(&n->list_lock);
3207	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3208	if (page) {
3209		/* This slab isn't counted yet so don't update free_objects */
3210		obj = slab_get_obj(cachep, page);
3211	}
3212	cache_grow_end(cachep, page);
3213
3214	return obj ? obj : fallback_alloc(cachep, flags);
 
 
 
3215}
3216
3217static __always_inline void *
3218slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3219		   unsigned long caller)
3220{
3221	unsigned long save_flags;
3222	void *ptr;
3223	int slab_node = numa_mem_id();
3224
3225	flags &= gfp_allowed_mask;
3226	cachep = slab_pre_alloc_hook(cachep, flags);
3227	if (unlikely(!cachep))
 
 
3228		return NULL;
3229
 
 
3230	cache_alloc_debugcheck_before(cachep, flags);
3231	local_irq_save(save_flags);
3232
3233	if (nodeid == NUMA_NO_NODE)
3234		nodeid = slab_node;
3235
3236	if (unlikely(!get_node(cachep, nodeid))) {
3237		/* Node not bootstrapped yet */
3238		ptr = fallback_alloc(cachep, flags);
3239		goto out;
3240	}
3241
3242	if (nodeid == slab_node) {
3243		/*
3244		 * Use the locally cached objects if possible.
3245		 * However ____cache_alloc does not allow fallback
3246		 * to other nodes. It may fail while we still have
3247		 * objects on other nodes available.
3248		 */
3249		ptr = ____cache_alloc(cachep, flags);
3250		if (ptr)
3251			goto out;
3252	}
3253	/* ___cache_alloc_node can fall back to other nodes */
3254	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3255  out:
3256	local_irq_restore(save_flags);
3257	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
 
3258
3259	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3260		memset(ptr, 0, cachep->object_size);
 
 
 
3261
3262	slab_post_alloc_hook(cachep, flags, 1, &ptr);
3263	return ptr;
3264}
3265
3266static __always_inline void *
3267__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3268{
3269	void *objp;
3270
3271	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3272		objp = alternate_node_alloc(cache, flags);
3273		if (objp)
3274			goto out;
3275	}
3276	objp = ____cache_alloc(cache, flags);
3277
3278	/*
3279	 * We may just have run out of memory on the local node.
3280	 * ____cache_alloc_node() knows how to locate memory on other nodes
3281	 */
3282	if (!objp)
3283		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3284
3285  out:
3286	return objp;
3287}
3288#else
3289
3290static __always_inline void *
3291__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3292{
3293	return ____cache_alloc(cachep, flags);
3294}
3295
3296#endif /* CONFIG_NUMA */
3297
3298static __always_inline void *
3299slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3300{
3301	unsigned long save_flags;
3302	void *objp;
3303
3304	flags &= gfp_allowed_mask;
3305	cachep = slab_pre_alloc_hook(cachep, flags);
3306	if (unlikely(!cachep))
 
 
3307		return NULL;
3308
 
 
3309	cache_alloc_debugcheck_before(cachep, flags);
3310	local_irq_save(save_flags);
3311	objp = __do_cache_alloc(cachep, flags);
3312	local_irq_restore(save_flags);
3313	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 
 
3314	prefetchw(objp);
3315
3316	if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3317		memset(objp, 0, cachep->object_size);
 
 
 
3318
3319	slab_post_alloc_hook(cachep, flags, 1, &objp);
3320	return objp;
3321}
3322
3323/*
3324 * Caller needs to acquire correct kmem_cache_node's list_lock
3325 * @list: List of detached free slabs should be freed by caller
3326 */
3327static void free_block(struct kmem_cache *cachep, void **objpp,
3328			int nr_objects, int node, struct list_head *list)
3329{
3330	int i;
3331	struct kmem_cache_node *n = get_node(cachep, node);
3332	struct page *page;
3333
3334	n->free_objects += nr_objects;
3335
3336	for (i = 0; i < nr_objects; i++) {
3337		void *objp;
3338		struct page *page;
3339
 
3340		objp = objpp[i];
3341
3342		page = virt_to_head_page(objp);
3343		list_del(&page->slab_list);
 
3344		check_spinlock_acquired_node(cachep, node);
3345		slab_put_obj(cachep, page, objp);
3346		STATS_DEC_ACTIVE(cachep);
 
3347
3348		/* fixup slab chains */
3349		if (page->active == 0) {
3350			list_add(&page->slab_list, &n->slabs_free);
3351			n->free_slabs++;
 
 
 
 
 
 
 
 
 
 
3352		} else {
3353			/* Unconditionally move a slab to the end of the
3354			 * partial list on free - maximum time for the
3355			 * other objects to be freed, too.
3356			 */
3357			list_add_tail(&page->slab_list, &n->slabs_partial);
3358		}
3359	}
3360
3361	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3362		n->free_objects -= cachep->num;
3363
3364		page = list_last_entry(&n->slabs_free, struct page, slab_list);
3365		list_move(&page->slab_list, list);
3366		n->free_slabs--;
3367		n->total_slabs--;
3368	}
3369}
3370
3371static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3372{
3373	int batchcount;
3374	struct kmem_cache_node *n;
3375	int node = numa_mem_id();
3376	LIST_HEAD(list);
3377
3378	batchcount = ac->batchcount;
3379
 
 
3380	check_irq_off();
3381	n = get_node(cachep, node);
3382	spin_lock(&n->list_lock);
3383	if (n->shared) {
3384		struct array_cache *shared_array = n->shared;
3385		int max = shared_array->limit - shared_array->avail;
3386		if (max) {
3387			if (batchcount > max)
3388				batchcount = max;
3389			memcpy(&(shared_array->entry[shared_array->avail]),
3390			       ac->entry, sizeof(void *) * batchcount);
3391			shared_array->avail += batchcount;
3392			goto free_done;
3393		}
3394	}
3395
3396	free_block(cachep, ac->entry, batchcount, node, &list);
3397free_done:
3398#if STATS
3399	{
3400		int i = 0;
3401		struct page *page;
 
 
 
 
3402
3403		list_for_each_entry(page, &n->slabs_free, slab_list) {
3404			BUG_ON(page->active);
3405
3406			i++;
 
3407		}
3408		STATS_SET_FREEABLE(cachep, i);
3409	}
3410#endif
3411	spin_unlock(&n->list_lock);
3412	slabs_destroy(cachep, &list);
3413	ac->avail -= batchcount;
3414	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3415}
3416
3417/*
3418 * Release an obj back to its cache. If the obj has a constructed state, it must
3419 * be in this state _before_ it is released.  Called with disabled ints.
3420 */
3421static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3422					 unsigned long caller)
3423{
3424	/* Put the object into the quarantine, don't touch it for now. */
3425	if (kasan_slab_free(cachep, objp, _RET_IP_))
3426		return;
3427
3428	___cache_free(cachep, objp, caller);
3429}
3430
3431void ___cache_free(struct kmem_cache *cachep, void *objp,
3432		unsigned long caller)
3433{
3434	struct array_cache *ac = cpu_cache_get(cachep);
3435
3436	check_irq_off();
3437	if (unlikely(slab_want_init_on_free(cachep)))
3438		memset(objp, 0, cachep->object_size);
3439	kmemleak_free_recursive(objp, cachep->flags);
3440	objp = cache_free_debugcheck(cachep, objp, caller);
3441
 
 
3442	/*
3443	 * Skip calling cache_free_alien() when the platform is not numa.
3444	 * This will avoid cache misses that happen while accessing slabp (which
3445	 * is per page memory  reference) to get nodeid. Instead use a global
3446	 * variable to skip the call, which is mostly likely to be present in
3447	 * the cache.
3448	 */
3449	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3450		return;
3451
3452	if (ac->avail < ac->limit) {
3453		STATS_INC_FREEHIT(cachep);
3454	} else {
3455		STATS_INC_FREEMISS(cachep);
3456		cache_flusharray(cachep, ac);
3457	}
3458
3459	if (sk_memalloc_socks()) {
3460		struct page *page = virt_to_head_page(objp);
3461
3462		if (unlikely(PageSlabPfmemalloc(page))) {
3463			cache_free_pfmemalloc(cachep, page, objp);
3464			return;
3465		}
3466	}
3467
3468	ac->entry[ac->avail++] = objp;
3469}
3470
3471/**
3472 * kmem_cache_alloc - Allocate an object
3473 * @cachep: The cache to allocate from.
3474 * @flags: See kmalloc().
3475 *
3476 * Allocate an object from this cache.  The flags are only relevant
3477 * if the cache has no available objects.
3478 *
3479 * Return: pointer to the new object or %NULL in case of error
3480 */
3481void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3482{
3483	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3484
3485	trace_kmem_cache_alloc(_RET_IP_, ret,
3486			       cachep->object_size, cachep->size, flags);
3487
3488	return ret;
3489}
3490EXPORT_SYMBOL(kmem_cache_alloc);
3491
3492static __always_inline void
3493cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3494				  size_t size, void **p, unsigned long caller)
3495{
3496	size_t i;
3497
3498	for (i = 0; i < size; i++)
3499		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3500}
3501
3502int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3503			  void **p)
3504{
3505	size_t i;
3506
3507	s = slab_pre_alloc_hook(s, flags);
3508	if (!s)
3509		return 0;
3510
3511	cache_alloc_debugcheck_before(s, flags);
3512
3513	local_irq_disable();
3514	for (i = 0; i < size; i++) {
3515		void *objp = __do_cache_alloc(s, flags);
3516
3517		if (unlikely(!objp))
3518			goto error;
3519		p[i] = objp;
3520	}
3521	local_irq_enable();
3522
3523	cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3524
3525	/* Clear memory outside IRQ disabled section */
3526	if (unlikely(slab_want_init_on_alloc(flags, s)))
3527		for (i = 0; i < size; i++)
3528			memset(p[i], 0, s->object_size);
3529
3530	slab_post_alloc_hook(s, flags, size, p);
3531	/* FIXME: Trace call missing. Christoph would like a bulk variant */
3532	return size;
3533error:
3534	local_irq_enable();
3535	cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3536	slab_post_alloc_hook(s, flags, i, p);
3537	__kmem_cache_free_bulk(s, i, p);
3538	return 0;
3539}
3540EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3541
3542#ifdef CONFIG_TRACING
3543void *
3544kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3545{
3546	void *ret;
3547
3548	ret = slab_alloc(cachep, flags, _RET_IP_);
3549
3550	ret = kasan_kmalloc(cachep, ret, size, flags);
3551	trace_kmalloc(_RET_IP_, ret,
3552		      size, cachep->size, flags);
3553	return ret;
3554}
3555EXPORT_SYMBOL(kmem_cache_alloc_trace);
3556#endif
3557
3558#ifdef CONFIG_NUMA
3559/**
3560 * kmem_cache_alloc_node - Allocate an object on the specified node
3561 * @cachep: The cache to allocate from.
3562 * @flags: See kmalloc().
3563 * @nodeid: node number of the target node.
3564 *
3565 * Identical to kmem_cache_alloc but it will allocate memory on the given
3566 * node, which can improve the performance for cpu bound structures.
3567 *
3568 * Fallback to other node is possible if __GFP_THISNODE is not set.
3569 *
3570 * Return: pointer to the new object or %NULL in case of error
3571 */
3572void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3573{
3574	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3575
3576	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3577				    cachep->object_size, cachep->size,
3578				    flags, nodeid);
3579
3580	return ret;
3581}
3582EXPORT_SYMBOL(kmem_cache_alloc_node);
3583
3584#ifdef CONFIG_TRACING
3585void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3586				  gfp_t flags,
3587				  int nodeid,
3588				  size_t size)
3589{
3590	void *ret;
3591
3592	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3593
3594	ret = kasan_kmalloc(cachep, ret, size, flags);
3595	trace_kmalloc_node(_RET_IP_, ret,
3596			   size, cachep->size,
3597			   flags, nodeid);
3598	return ret;
3599}
3600EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3601#endif
3602
3603static __always_inline void *
3604__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3605{
3606	struct kmem_cache *cachep;
3607	void *ret;
3608
3609	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3610		return NULL;
3611	cachep = kmalloc_slab(size, flags);
3612	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3613		return cachep;
3614	ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3615	ret = kasan_kmalloc(cachep, ret, size, flags);
3616
3617	return ret;
3618}
3619
 
3620void *__kmalloc_node(size_t size, gfp_t flags, int node)
3621{
3622	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3623}
3624EXPORT_SYMBOL(__kmalloc_node);
3625
3626void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3627		int node, unsigned long caller)
3628{
3629	return __do_kmalloc_node(size, flags, node, caller);
3630}
3631EXPORT_SYMBOL(__kmalloc_node_track_caller);
 
 
 
 
 
 
 
3632#endif /* CONFIG_NUMA */
3633
3634/**
3635 * __do_kmalloc - allocate memory
3636 * @size: how many bytes of memory are required.
3637 * @flags: the type of memory to allocate (see kmalloc).
3638 * @caller: function caller for debug tracking of the caller
3639 *
3640 * Return: pointer to the allocated memory or %NULL in case of error
3641 */
3642static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3643					  unsigned long caller)
3644{
3645	struct kmem_cache *cachep;
3646	void *ret;
3647
3648	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3649		return NULL;
3650	cachep = kmalloc_slab(size, flags);
3651	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3652		return cachep;
3653	ret = slab_alloc(cachep, flags, caller);
3654
3655	ret = kasan_kmalloc(cachep, ret, size, flags);
3656	trace_kmalloc(caller, ret,
3657		      size, cachep->size, flags);
3658
3659	return ret;
3660}
3661
 
 
3662void *__kmalloc(size_t size, gfp_t flags)
3663{
3664	return __do_kmalloc(size, flags, _RET_IP_);
3665}
3666EXPORT_SYMBOL(__kmalloc);
3667
3668void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3669{
3670	return __do_kmalloc(size, flags, caller);
3671}
3672EXPORT_SYMBOL(__kmalloc_track_caller);
3673
 
 
 
 
 
 
 
 
3674/**
3675 * kmem_cache_free - Deallocate an object
3676 * @cachep: The cache the allocation was from.
3677 * @objp: The previously allocated object.
3678 *
3679 * Free an object which was previously allocated from this
3680 * cache.
3681 */
3682void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3683{
3684	unsigned long flags;
3685	cachep = cache_from_obj(cachep, objp);
3686	if (!cachep)
3687		return;
3688
3689	local_irq_save(flags);
3690	debug_check_no_locks_freed(objp, cachep->object_size);
3691	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3692		debug_check_no_obj_freed(objp, cachep->object_size);
3693	__cache_free(cachep, objp, _RET_IP_);
3694	local_irq_restore(flags);
3695
3696	trace_kmem_cache_free(_RET_IP_, objp);
3697}
3698EXPORT_SYMBOL(kmem_cache_free);
3699
3700void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3701{
3702	struct kmem_cache *s;
3703	size_t i;
3704
3705	local_irq_disable();
3706	for (i = 0; i < size; i++) {
3707		void *objp = p[i];
3708
3709		if (!orig_s) /* called via kfree_bulk */
3710			s = virt_to_cache(objp);
3711		else
3712			s = cache_from_obj(orig_s, objp);
3713		if (!s)
3714			continue;
3715
3716		debug_check_no_locks_freed(objp, s->object_size);
3717		if (!(s->flags & SLAB_DEBUG_OBJECTS))
3718			debug_check_no_obj_freed(objp, s->object_size);
3719
3720		__cache_free(s, objp, _RET_IP_);
3721	}
3722	local_irq_enable();
3723
3724	/* FIXME: add tracing */
3725}
3726EXPORT_SYMBOL(kmem_cache_free_bulk);
3727
3728/**
3729 * kfree - free previously allocated memory
3730 * @objp: pointer returned by kmalloc.
3731 *
3732 * If @objp is NULL, no operation is performed.
3733 *
3734 * Don't free memory not originally allocated by kmalloc()
3735 * or you will run into trouble.
3736 */
3737void kfree(const void *objp)
3738{
3739	struct kmem_cache *c;
3740	unsigned long flags;
3741
3742	trace_kfree(_RET_IP_, objp);
3743
3744	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3745		return;
3746	local_irq_save(flags);
3747	kfree_debugcheck(objp);
3748	c = virt_to_cache(objp);
3749	if (!c) {
3750		local_irq_restore(flags);
3751		return;
3752	}
3753	debug_check_no_locks_freed(objp, c->object_size);
3754
3755	debug_check_no_obj_freed(objp, c->object_size);
3756	__cache_free(c, (void *)objp, _RET_IP_);
3757	local_irq_restore(flags);
3758}
3759EXPORT_SYMBOL(kfree);
3760
3761/*
3762 * This initializes kmem_cache_node or resizes various caches for all nodes.
3763 */
3764static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3765{
3766	int ret;
3767	int node;
3768	struct kmem_cache_node *n;
 
 
3769
3770	for_each_online_node(node) {
3771		ret = setup_kmem_cache_node(cachep, node, gfp, true);
3772		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3773			goto fail;
 
3774
 
 
 
 
 
 
 
 
3775	}
3776
3777	return 0;
3778
3779fail:
3780	if (!cachep->list.next) {
3781		/* Cache is not active yet. Roll back what we did */
3782		node--;
3783		while (node >= 0) {
3784			n = get_node(cachep, node);
3785			if (n) {
 
3786				kfree(n->shared);
3787				free_alien_cache(n->alien);
3788				kfree(n);
3789				cachep->node[node] = NULL;
3790			}
3791			node--;
3792		}
3793	}
3794	return -ENOMEM;
3795}
3796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3797/* Always called with the slab_mutex held */
3798static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3799				int batchcount, int shared, gfp_t gfp)
3800{
3801	struct array_cache __percpu *cpu_cache, *prev;
3802	int cpu;
3803
3804	cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3805	if (!cpu_cache)
 
3806		return -ENOMEM;
3807
3808	prev = cachep->cpu_cache;
3809	cachep->cpu_cache = cpu_cache;
3810	/*
3811	 * Without a previous cpu_cache there's no need to synchronize remote
3812	 * cpus, so skip the IPIs.
3813	 */
3814	if (prev)
3815		kick_all_cpus_sync();
 
 
 
 
 
3816
3817	check_irq_on();
3818	cachep->batchcount = batchcount;
3819	cachep->limit = limit;
3820	cachep->shared = shared;
3821
3822	if (!prev)
3823		goto setup_node;
3824
3825	for_each_online_cpu(cpu) {
3826		LIST_HEAD(list);
3827		int node;
3828		struct kmem_cache_node *n;
3829		struct array_cache *ac = per_cpu_ptr(prev, cpu);
3830
3831		node = cpu_to_mem(cpu);
3832		n = get_node(cachep, node);
3833		spin_lock_irq(&n->list_lock);
3834		free_block(cachep, ac->entry, ac->avail, node, &list);
3835		spin_unlock_irq(&n->list_lock);
3836		slabs_destroy(cachep, &list);
3837	}
3838	free_percpu(prev);
3839
3840setup_node:
3841	return setup_kmem_cache_nodes(cachep, gfp);
3842}
3843
3844static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3845				int batchcount, int shared, gfp_t gfp)
3846{
3847	int ret;
3848	struct kmem_cache *c;
 
3849
3850	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3851
3852	if (slab_state < FULL)
3853		return ret;
3854
3855	if ((ret < 0) || !is_root_cache(cachep))
3856		return ret;
3857
3858	lockdep_assert_held(&slab_mutex);
3859	for_each_memcg_cache(c, cachep) {
3860		/* return value determined by the root cache only */
3861		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
 
 
3862	}
3863
3864	return ret;
3865}
3866
3867/* Called with slab_mutex held always */
3868static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3869{
3870	int err;
3871	int limit = 0;
3872	int shared = 0;
3873	int batchcount = 0;
3874
3875	err = cache_random_seq_create(cachep, cachep->num, gfp);
3876	if (err)
3877		goto end;
3878
3879	if (!is_root_cache(cachep)) {
3880		struct kmem_cache *root = memcg_root_cache(cachep);
3881		limit = root->limit;
3882		shared = root->shared;
3883		batchcount = root->batchcount;
3884	}
3885
3886	if (limit && shared && batchcount)
3887		goto skip_setup;
3888	/*
3889	 * The head array serves three purposes:
3890	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3891	 * - reduce the number of spinlock operations.
3892	 * - reduce the number of linked list operations on the slab and
3893	 *   bufctl chains: array operations are cheaper.
3894	 * The numbers are guessed, we should auto-tune as described by
3895	 * Bonwick.
3896	 */
3897	if (cachep->size > 131072)
3898		limit = 1;
3899	else if (cachep->size > PAGE_SIZE)
3900		limit = 8;
3901	else if (cachep->size > 1024)
3902		limit = 24;
3903	else if (cachep->size > 256)
3904		limit = 54;
3905	else
3906		limit = 120;
3907
3908	/*
3909	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3910	 * allocation behaviour: Most allocs on one cpu, most free operations
3911	 * on another cpu. For these cases, an efficient object passing between
3912	 * cpus is necessary. This is provided by a shared array. The array
3913	 * replaces Bonwick's magazine layer.
3914	 * On uniprocessor, it's functionally equivalent (but less efficient)
3915	 * to a larger limit. Thus disabled by default.
3916	 */
3917	shared = 0;
3918	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3919		shared = 8;
3920
3921#if DEBUG
3922	/*
3923	 * With debugging enabled, large batchcount lead to excessively long
3924	 * periods with disabled local interrupts. Limit the batchcount
3925	 */
3926	if (limit > 32)
3927		limit = 32;
3928#endif
3929	batchcount = (limit + 1) / 2;
3930skip_setup:
3931	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3932end:
3933	if (err)
3934		pr_err("enable_cpucache failed for %s, error %d\n",
3935		       cachep->name, -err);
3936	return err;
3937}
3938
3939/*
3940 * Drain an array if it contains any elements taking the node lock only if
3941 * necessary. Note that the node listlock also protects the array_cache
3942 * if drain_array() is used on the shared array.
3943 */
3944static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3945			 struct array_cache *ac, int node)
3946{
3947	LIST_HEAD(list);
3948
3949	/* ac from n->shared can be freed if we don't hold the slab_mutex. */
3950	check_mutex_acquired();
3951
3952	if (!ac || !ac->avail)
3953		return;
3954
3955	if (ac->touched) {
3956		ac->touched = 0;
3957		return;
 
 
 
 
 
 
 
 
 
 
 
3958	}
3959
3960	spin_lock_irq(&n->list_lock);
3961	drain_array_locked(cachep, ac, node, false, &list);
3962	spin_unlock_irq(&n->list_lock);
3963
3964	slabs_destroy(cachep, &list);
3965}
3966
3967/**
3968 * cache_reap - Reclaim memory from caches.
3969 * @w: work descriptor
3970 *
3971 * Called from workqueue/eventd every few seconds.
3972 * Purpose:
3973 * - clear the per-cpu caches for this CPU.
3974 * - return freeable pages to the main free memory pool.
3975 *
3976 * If we cannot acquire the cache chain mutex then just give up - we'll try
3977 * again on the next iteration.
3978 */
3979static void cache_reap(struct work_struct *w)
3980{
3981	struct kmem_cache *searchp;
3982	struct kmem_cache_node *n;
3983	int node = numa_mem_id();
3984	struct delayed_work *work = to_delayed_work(w);
3985
3986	if (!mutex_trylock(&slab_mutex))
3987		/* Give up. Setup the next iteration. */
3988		goto out;
3989
3990	list_for_each_entry(searchp, &slab_caches, list) {
3991		check_irq_on();
3992
3993		/*
3994		 * We only take the node lock if absolutely necessary and we
3995		 * have established with reasonable certainty that
3996		 * we can do some work if the lock was obtained.
3997		 */
3998		n = get_node(searchp, node);
3999
4000		reap_alien(searchp, n);
4001
4002		drain_array(searchp, n, cpu_cache_get(searchp), node);
4003
4004		/*
4005		 * These are racy checks but it does not matter
4006		 * if we skip one check or scan twice.
4007		 */
4008		if (time_after(n->next_reap, jiffies))
4009			goto next;
4010
4011		n->next_reap = jiffies + REAPTIMEOUT_NODE;
4012
4013		drain_array(searchp, n, n->shared, node);
4014
4015		if (n->free_touched)
4016			n->free_touched = 0;
4017		else {
4018			int freed;
4019
4020			freed = drain_freelist(searchp, n, (n->free_limit +
4021				5 * searchp->num - 1) / (5 * searchp->num));
4022			STATS_ADD_REAPED(searchp, freed);
4023		}
4024next:
4025		cond_resched();
4026	}
4027	check_irq_on();
4028	mutex_unlock(&slab_mutex);
4029	next_reap_node();
4030out:
4031	/* Set up the next iteration */
4032	schedule_delayed_work_on(smp_processor_id(), work,
4033				round_jiffies_relative(REAPTIMEOUT_AC));
4034}
4035
 
4036void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4037{
4038	unsigned long active_objs, num_objs, active_slabs;
4039	unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4040	unsigned long free_slabs = 0;
 
 
 
 
4041	int node;
4042	struct kmem_cache_node *n;
4043
4044	for_each_kmem_cache_node(cachep, node, n) {
 
 
 
 
 
 
4045		check_irq_on();
4046		spin_lock_irq(&n->list_lock);
4047
4048		total_slabs += n->total_slabs;
4049		free_slabs += n->free_slabs;
4050		free_objs += n->free_objects;
4051
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4052		if (n->shared)
4053			shared_avail += n->shared->avail;
4054
4055		spin_unlock_irq(&n->list_lock);
4056	}
4057	num_objs = total_slabs * cachep->num;
4058	active_slabs = total_slabs - free_slabs;
4059	active_objs = num_objs - free_objs;
 
 
 
 
 
4060
4061	sinfo->active_objs = active_objs;
4062	sinfo->num_objs = num_objs;
4063	sinfo->active_slabs = active_slabs;
4064	sinfo->num_slabs = total_slabs;
4065	sinfo->shared_avail = shared_avail;
4066	sinfo->limit = cachep->limit;
4067	sinfo->batchcount = cachep->batchcount;
4068	sinfo->shared = cachep->shared;
4069	sinfo->objects_per_slab = cachep->num;
4070	sinfo->cache_order = cachep->gfporder;
4071}
4072
4073void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4074{
4075#if STATS
4076	{			/* node stats */
4077		unsigned long high = cachep->high_mark;
4078		unsigned long allocs = cachep->num_allocations;
4079		unsigned long grown = cachep->grown;
4080		unsigned long reaped = cachep->reaped;
4081		unsigned long errors = cachep->errors;
4082		unsigned long max_freeable = cachep->max_freeable;
4083		unsigned long node_allocs = cachep->node_allocs;
4084		unsigned long node_frees = cachep->node_frees;
4085		unsigned long overflows = cachep->node_overflow;
4086
4087		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
 
4088			   allocs, high, grown,
4089			   reaped, errors, max_freeable, node_allocs,
4090			   node_frees, overflows);
4091	}
4092	/* cpu stats */
4093	{
4094		unsigned long allochit = atomic_read(&cachep->allochit);
4095		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4096		unsigned long freehit = atomic_read(&cachep->freehit);
4097		unsigned long freemiss = atomic_read(&cachep->freemiss);
4098
4099		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4100			   allochit, allocmiss, freehit, freemiss);
4101	}
4102#endif
4103}
4104
4105#define MAX_SLABINFO_WRITE 128
4106/**
4107 * slabinfo_write - Tuning for the slab allocator
4108 * @file: unused
4109 * @buffer: user buffer
4110 * @count: data length
4111 * @ppos: unused
4112 *
4113 * Return: %0 on success, negative error code otherwise.
4114 */
4115ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4116		       size_t count, loff_t *ppos)
4117{
4118	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4119	int limit, batchcount, shared, res;
4120	struct kmem_cache *cachep;
4121
4122	if (count > MAX_SLABINFO_WRITE)
4123		return -EINVAL;
4124	if (copy_from_user(&kbuf, buffer, count))
4125		return -EFAULT;
4126	kbuf[MAX_SLABINFO_WRITE] = '\0';
4127
4128	tmp = strchr(kbuf, ' ');
4129	if (!tmp)
4130		return -EINVAL;
4131	*tmp = '\0';
4132	tmp++;
4133	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4134		return -EINVAL;
4135
4136	/* Find the cache in the chain of caches. */
4137	mutex_lock(&slab_mutex);
4138	res = -EINVAL;
4139	list_for_each_entry(cachep, &slab_caches, list) {
4140		if (!strcmp(cachep->name, kbuf)) {
4141			if (limit < 1 || batchcount < 1 ||
4142					batchcount > limit || shared < 0) {
4143				res = 0;
4144			} else {
4145				res = do_tune_cpucache(cachep, limit,
4146						       batchcount, shared,
4147						       GFP_KERNEL);
4148			}
4149			break;
4150		}
4151	}
4152	mutex_unlock(&slab_mutex);
4153	if (res >= 0)
4154		res = count;
4155	return res;
4156}
4157
4158#ifdef CONFIG_HARDENED_USERCOPY
4159/*
4160 * Rejects incorrectly sized objects and objects that are to be copied
4161 * to/from userspace but do not fall entirely within the containing slab
4162 * cache's usercopy region.
4163 *
4164 * Returns NULL if check passes, otherwise const char * to name of cache
4165 * to indicate an error.
4166 */
4167void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4168			 bool to_user)
4169{
4170	struct kmem_cache *cachep;
4171	unsigned int objnr;
4172	unsigned long offset;
4173
4174	ptr = kasan_reset_tag(ptr);
 
 
 
 
4175
4176	/* Find and validate object. */
4177	cachep = page->slab_cache;
4178	objnr = obj_to_index(cachep, page, (void *)ptr);
4179	BUG_ON(objnr >= cachep->num);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4180
4181	/* Find offset within object. */
4182	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
 
 
 
4183
4184	/* Allow address range falling entirely within usercopy region. */
4185	if (offset >= cachep->useroffset &&
4186	    offset - cachep->useroffset <= cachep->usersize &&
4187	    n <= cachep->useroffset - offset + cachep->usersize)
4188		return;
 
 
4189
4190	/*
4191	 * If the copy is still within the allocated object, produce
4192	 * a warning instead of rejecting the copy. This is intended
4193	 * to be a temporary method to find any missing usercopy
4194	 * whitelists.
4195	 */
4196	if (usercopy_fallback &&
4197	    offset <= cachep->object_size &&
4198	    n <= cachep->object_size - offset) {
4199		usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4200		return;
4201	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4202
4203	usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4204}
4205#endif /* CONFIG_HARDENED_USERCOPY */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4206
4207/**
4208 * __ksize -- Uninstrumented ksize.
4209 * @objp: pointer to the object
4210 *
4211 * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
4212 * safety checks as ksize() with KASAN instrumentation enabled.
4213 *
4214 * Return: size of the actual memory used by @objp in bytes
 
 
 
 
 
 
4215 */
4216size_t __ksize(const void *objp)
4217{
4218	struct kmem_cache *c;
4219	size_t size;
4220
4221	BUG_ON(!objp);
4222	if (unlikely(objp == ZERO_SIZE_PTR))
4223		return 0;
4224
4225	c = virt_to_cache(objp);
4226	size = c ? c->object_size : 0;
4227
4228	return size;
4229}
4230EXPORT_SYMBOL(__ksize);
v3.15
 
   1/*
   2 * linux/mm/slab.c
   3 * Written by Mark Hemment, 1996/97.
   4 * (markhe@nextd.demon.co.uk)
   5 *
   6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
   7 *
   8 * Major cleanup, different bufctl logic, per-cpu arrays
   9 *	(c) 2000 Manfred Spraul
  10 *
  11 * Cleanup, make the head arrays unconditional, preparation for NUMA
  12 * 	(c) 2002 Manfred Spraul
  13 *
  14 * An implementation of the Slab Allocator as described in outline in;
  15 *	UNIX Internals: The New Frontiers by Uresh Vahalia
  16 *	Pub: Prentice Hall	ISBN 0-13-101908-2
  17 * or with a little more detail in;
  18 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
  19 *	Jeff Bonwick (Sun Microsystems).
  20 *	Presented at: USENIX Summer 1994 Technical Conference
  21 *
  22 * The memory is organized in caches, one cache for each object type.
  23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  24 * Each cache consists out of many slabs (they are small (usually one
  25 * page long) and always contiguous), and each slab contains multiple
  26 * initialized objects.
  27 *
  28 * This means, that your constructor is used only for newly allocated
  29 * slabs and you must pass objects with the same initializations to
  30 * kmem_cache_free.
  31 *
  32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  33 * normal). If you need a special memory type, then must create a new
  34 * cache for that memory type.
  35 *
  36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  37 *   full slabs with 0 free objects
  38 *   partial slabs
  39 *   empty slabs with no allocated objects
  40 *
  41 * If partial slabs exist, then new allocations come from these slabs,
  42 * otherwise from empty slabs or new slabs are allocated.
  43 *
  44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  46 *
  47 * Each cache has a short per-cpu head array, most allocs
  48 * and frees go into that array, and if that array overflows, then 1/2
  49 * of the entries in the array are given back into the global cache.
  50 * The head array is strictly LIFO and should improve the cache hit rates.
  51 * On SMP, it additionally reduces the spinlock operations.
  52 *
  53 * The c_cpuarray may not be read with enabled local interrupts -
  54 * it's changed with a smp_call_function().
  55 *
  56 * SMP synchronization:
  57 *  constructors and destructors are called without any locking.
  58 *  Several members in struct kmem_cache and struct slab never change, they
  59 *	are accessed without any locking.
  60 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
  61 *  	and local interrupts are disabled so slab code is preempt-safe.
  62 *  The non-constant members are protected with a per-cache irq spinlock.
  63 *
  64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  65 * in 2000 - many ideas in the current implementation are derived from
  66 * his patch.
  67 *
  68 * Further notes from the original documentation:
  69 *
  70 * 11 April '97.  Started multi-threading - markhe
  71 *	The global cache-chain is protected by the mutex 'slab_mutex'.
  72 *	The sem is only needed when accessing/extending the cache-chain, which
  73 *	can never happen inside an interrupt (kmem_cache_create(),
  74 *	kmem_cache_shrink() and kmem_cache_reap()).
  75 *
  76 *	At present, each engine can be growing a cache.  This should be blocked.
  77 *
  78 * 15 March 2005. NUMA slab allocator.
  79 *	Shai Fultheim <shai@scalex86.org>.
  80 *	Shobhit Dayal <shobhit@calsoftinc.com>
  81 *	Alok N Kataria <alokk@calsoftinc.com>
  82 *	Christoph Lameter <christoph@lameter.com>
  83 *
  84 *	Modified the slab allocator to be node aware on NUMA systems.
  85 *	Each node has its own list of partial, free and full slabs.
  86 *	All object allocations for a node occur from node specific slab lists.
  87 */
  88
  89#include	<linux/__KEEPIDENTS__B.h>
  90#include	<linux/__KEEPIDENTS__C.h>
  91#include	<linux/__KEEPIDENTS__D.h>
  92#include	<linux/__KEEPIDENTS__E.h>
  93#include	<linux/__KEEPIDENTS__F.h>
  94#include	<linux/__KEEPIDENTS__G.h>
  95#include	<linux/__KEEPIDENTS__H.h>
  96#include	<linux/__KEEPIDENTS__I.h>
  97#include	<linux/__KEEPIDENTS__J.h>
  98#include	<linux/proc_fs.h>
  99#include	<linux/__KEEPIDENTS__BA.h>
 100#include	<linux/__KEEPIDENTS__BB.h>
 101#include	<linux/__KEEPIDENTS__BC.h>
 102#include	<linux/cpu.h>
 103#include	<linux/__KEEPIDENTS__BD.h>
 104#include	<linux/__KEEPIDENTS__BE.h>
 105#include	<linux/rcupdate.h>
 106#include	<linux/__KEEPIDENTS__BF.h>
 107#include	<linux/__KEEPIDENTS__BG.h>
 108#include	<linux/__KEEPIDENTS__BH.h>
 109#include	<linux/kmemleak.h>
 110#include	<linux/__KEEPIDENTS__BI.h>
 111#include	<linux/__KEEPIDENTS__BJ.h>
 112#include	<linux/__KEEPIDENTS__CA-__KEEPIDENTS__CB.h>
 113#include	<linux/__KEEPIDENTS__CC.h>
 114#include	<linux/reciprocal_div.h>
 115#include	<linux/debugobjects.h>
 116#include	<linux/kmemcheck.h>
 117#include	<linux/__KEEPIDENTS__CD.h>
 118#include	<linux/__KEEPIDENTS__CE.h>
 
 119
 120#include	<net/__KEEPIDENTS__CF.h>
 121
 122#include	<asm/cacheflush.h>
 123#include	<asm/tlbflush.h>
 124#include	<asm/page.h>
 125
 126#include <trace/events/kmem.h>
 127
 128#include	"internal.h"
 129
 130#include	"slab.h"
 131
 132/*
 133 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 134 *		  0 for faster, smaller code (especially in the critical paths).
 135 *
 136 * STATS	- 1 to collect stats for /proc/slabinfo.
 137 *		  0 for faster, smaller code (especially in the critical paths).
 138 *
 139 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 140 */
 141
 142#ifdef CONFIG_DEBUG_SLAB
 143#define	DEBUG		1
 144#define	STATS		1
 145#define	FORCED_DEBUG	1
 146#else
 147#define	DEBUG		0
 148#define	STATS		0
 149#define	FORCED_DEBUG	0
 150#endif
 151
 152/* Shouldn't this be in a header file somewhere? */
 153#define	BYTES_PER_WORD		sizeof(void *)
 154#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
 155
 156#ifndef ARCH_KMALLOC_FLAGS
 157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
 158#endif
 159
 160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
 161				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
 162
 163#if FREELIST_BYTE_INDEX
 164typedef unsigned char freelist_idx_t;
 165#else
 166typedef unsigned short freelist_idx_t;
 167#endif
 168
 169#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
 170
 171/*
 172 * true if a page was allocated from pfmemalloc reserves for network-based
 173 * swap
 174 */
 175static bool pfmemalloc_active __read_mostly;
 176
 177/*
 178 * struct array_cache
 179 *
 180 * Purpose:
 181 * - LIFO ordering, to hand out cache-warm objects from _alloc
 182 * - reduce the number of linked list operations
 183 * - reduce spinlock operations
 184 *
 185 * The limit is stored in the per-cpu structure to reduce the data cache
 186 * footprint.
 187 *
 188 */
 189struct array_cache {
 190	unsigned int avail;
 191	unsigned int limit;
 192	unsigned int batchcount;
 193	unsigned int touched;
 194	spinlock_t lock;
 195	void *entry[];	/*
 196			 * Must have this definition in here for the proper
 197			 * alignment of array_cache. Also simplifies accessing
 198			 * the entries.
 199			 *
 200			 * Entries should not be directly dereferenced as
 201			 * entries belonging to slabs marked pfmemalloc will
 202			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
 203			 */
 204};
 205
 206#define SLAB_OBJ_PFMEMALLOC	1
 207static inline bool is_obj_pfmemalloc(void *objp)
 208{
 209	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
 210}
 211
 212static inline void set_obj_pfmemalloc(void **objp)
 213{
 214	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
 215	return;
 216}
 217
 218static inline void clear_obj_pfmemalloc(void **objp)
 219{
 220	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
 221}
 222
 223/*
 224 * bootstrap: The caches do not work without cpuarrays anymore, but the
 225 * cpuarrays are allocated from the generic caches...
 226 */
 227#define BOOT_CPUCACHE_ENTRIES	1
 228struct arraycache_init {
 229	struct array_cache cache;
 230	void *entries[BOOT_CPUCACHE_ENTRIES];
 231};
 232
 233/*
 234 * Need this for bootstrapping a per node allocator.
 235 */
 236#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
 237static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
 238#define	CACHE_CACHE 0
 239#define	SIZE_AC MAX_NUMNODES
 240#define	SIZE_NODE (2 * MAX_NUMNODES)
 241
 242static int drain_freelist(struct kmem_cache *cache,
 243			struct kmem_cache_node *n, int tofree);
 244static void free_block(struct kmem_cache *cachep, void **objpp, int len,
 245			int node);
 
 246static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
 247static void cache_reap(struct work_struct *unused);
 248
 
 
 
 
 
 249static int slab_early_init = 1;
 250
 251#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
 252#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
 253
 254static void kmem_cache_node_init(struct kmem_cache_node *parent)
 255{
 256	INIT_LIST_HEAD(&parent->slabs_full);
 257	INIT_LIST_HEAD(&parent->slabs_partial);
 258	INIT_LIST_HEAD(&parent->slabs_free);
 
 
 259	parent->shared = NULL;
 260	parent->alien = NULL;
 261	parent->colour_next = 0;
 262	spin_lock_init(&parent->list_lock);
 263	parent->free_objects = 0;
 264	parent->free_touched = 0;
 265}
 266
 267#define MAKE_LIST(cachep, listp, slab, nodeid)				\
 268	do {								\
 269		INIT_LIST_HEAD(listp);					\
 270		list_splice(&(cachep->node[nodeid]->slab), listp);	\
 271	} while (0)
 272
 273#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
 274	do {								\
 275	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
 276	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
 277	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
 278	} while (0)
 279
 280#define CFLGS_OFF_SLAB		(0x80000000UL)
 
 
 281#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
 282
 283#define BATCHREFILL_LIMIT	16
 284/*
 285 * Optimization question: fewer reaps means less probability for unnessary
 286 * cpucache drain/refill cycles.
 287 *
 288 * OTOH the cpuarrays can contain lots of objects,
 289 * which could lock up otherwise freeable slabs.
 290 */
 291#define REAPTIMEOUT_AC		(2*HZ)
 292#define REAPTIMEOUT_NODE	(4*HZ)
 293
 294#if STATS
 295#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
 296#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
 297#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
 298#define	STATS_INC_GROWN(x)	((x)->grown++)
 299#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
 300#define	STATS_SET_HIGH(x)						\
 301	do {								\
 302		if ((x)->num_active > (x)->high_mark)			\
 303			(x)->high_mark = (x)->num_active;		\
 304	} while (0)
 305#define	STATS_INC_ERR(x)	((x)->errors++)
 306#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
 307#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
 308#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 309#define	STATS_SET_FREEABLE(x, i)					\
 310	do {								\
 311		if ((x)->max_freeable < i)				\
 312			(x)->max_freeable = i;				\
 313	} while (0)
 314#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
 315#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
 316#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
 317#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
 318#else
 319#define	STATS_INC_ACTIVE(x)	do { } while (0)
 320#define	STATS_DEC_ACTIVE(x)	do { } while (0)
 321#define	STATS_INC_ALLOCED(x)	do { } while (0)
 322#define	STATS_INC_GROWN(x)	do { } while (0)
 323#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
 324#define	STATS_SET_HIGH(x)	do { } while (0)
 325#define	STATS_INC_ERR(x)	do { } while (0)
 326#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
 327#define	STATS_INC_NODEFREES(x)	do { } while (0)
 328#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 329#define	STATS_SET_FREEABLE(x, i) do { } while (0)
 330#define STATS_INC_ALLOCHIT(x)	do { } while (0)
 331#define STATS_INC_ALLOCMISS(x)	do { } while (0)
 332#define STATS_INC_FREEHIT(x)	do { } while (0)
 333#define STATS_INC_FREEMISS(x)	do { } while (0)
 334#endif
 335
 336#if DEBUG
 337
 338/*
 339 * memory layout of objects:
 340 * 0		: objp
 341 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
 342 * 		the end of an object is aligned with the end of the real
 343 * 		allocation. Catches writes behind the end of the allocation.
 344 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
 345 * 		redzone word.
 346 * cachep->obj_offset: The real object.
 347 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 348 * cachep->size - 1* BYTES_PER_WORD: last caller address
 349 *					[BYTES_PER_WORD long]
 350 */
 351static int obj_offset(struct kmem_cache *cachep)
 352{
 353	return cachep->obj_offset;
 354}
 355
 356static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
 357{
 358	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 359	return (unsigned long long*) (objp + obj_offset(cachep) -
 360				      sizeof(unsigned long long));
 361}
 362
 363static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
 364{
 365	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
 366	if (cachep->flags & SLAB_STORE_USER)
 367		return (unsigned long long *)(objp + cachep->size -
 368					      sizeof(unsigned long long) -
 369					      REDZONE_ALIGN);
 370	return (unsigned long long *) (objp + cachep->size -
 371				       sizeof(unsigned long long));
 372}
 373
 374static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 375{
 376	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
 377	return (void **)(objp + cachep->size - BYTES_PER_WORD);
 378}
 379
 380#else
 381
 382#define obj_offset(x)			0
 383#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 384#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
 385#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
 386
 387#endif
 388
 389/*
 390 * Do not go above this order unless 0 objects fit into the slab or
 391 * overridden on the command line.
 392 */
 393#define	SLAB_MAX_ORDER_HI	1
 394#define	SLAB_MAX_ORDER_LO	0
 395static int slab_max_order = SLAB_MAX_ORDER_LO;
 396static bool slab_max_order_set __initdata;
 397
 398static inline struct kmem_cache *virt_to_cache(const void *obj)
 399{
 400	struct page *page = virt_to_head_page(obj);
 401	return page->slab_cache;
 402}
 403
 404static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
 405				 unsigned int idx)
 406{
 407	return page->s_mem + cache->size * idx;
 408}
 409
 410/*
 411 * We want to avoid an expensive divide : (offset / cache->size)
 412 *   Using the fact that size is a constant for a particular cache,
 413 *   we can replace (offset / cache->size) by
 414 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 415 */
 416static inline unsigned int obj_to_index(const struct kmem_cache *cache,
 417					const struct page *page, void *obj)
 418{
 419	u32 offset = (obj - page->s_mem);
 420	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
 421}
 422
 423static struct arraycache_init initarray_generic =
 424    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
 425
 426/* internal cache of cache description objs */
 427static struct kmem_cache kmem_cache_boot = {
 428	.batchcount = 1,
 429	.limit = BOOT_CPUCACHE_ENTRIES,
 430	.shared = 1,
 431	.size = sizeof(struct kmem_cache),
 432	.name = "kmem_cache",
 433};
 434
 435#define BAD_ALIEN_MAGIC 0x01020304ul
 436
 437#ifdef CONFIG_LOCKDEP
 438
 439/*
 440 * Slab sometimes uses the kmalloc slabs to store the slab headers
 441 * for other slabs "off slab".
 442 * The locking for this is tricky in that it nests within the locks
 443 * of all other slabs in a few places; to deal with this special
 444 * locking we put on-slab caches into a separate lock-class.
 445 *
 446 * We set lock class for alien array caches which are up during init.
 447 * The lock annotation will be lost if all cpus of a node goes down and
 448 * then comes back up during hotplug
 449 */
 450static struct lock_class_key on_slab_l3_key;
 451static struct lock_class_key on_slab_alc_key;
 452
 453static struct lock_class_key debugobj_l3_key;
 454static struct lock_class_key debugobj_alc_key;
 455
 456static void slab_set_lock_classes(struct kmem_cache *cachep,
 457		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
 458		int q)
 459{
 460	struct array_cache **alc;
 461	struct kmem_cache_node *n;
 462	int r;
 463
 464	n = cachep->node[q];
 465	if (!n)
 466		return;
 467
 468	lockdep_set_class(&n->list_lock, l3_key);
 469	alc = n->alien;
 470	/*
 471	 * FIXME: This check for BAD_ALIEN_MAGIC
 472	 * should go away when common slab code is taught to
 473	 * work even without alien caches.
 474	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
 475	 * for alloc_alien_cache,
 476	 */
 477	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
 478		return;
 479	for_each_node(r) {
 480		if (alc[r])
 481			lockdep_set_class(&alc[r]->lock, alc_key);
 482	}
 483}
 484
 485static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 486{
 487	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
 488}
 489
 490static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
 491{
 492	int node;
 493
 494	for_each_online_node(node)
 495		slab_set_debugobj_lock_classes_node(cachep, node);
 496}
 497
 498static void init_node_lock_keys(int q)
 499{
 500	int i;
 501
 502	if (slab_state < UP)
 503		return;
 504
 505	for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
 506		struct kmem_cache_node *n;
 507		struct kmem_cache *cache = kmalloc_caches[i];
 508
 509		if (!cache)
 510			continue;
 511
 512		n = cache->node[q];
 513		if (!n || OFF_SLAB(cache))
 514			continue;
 515
 516		slab_set_lock_classes(cache, &on_slab_l3_key,
 517				&on_slab_alc_key, q);
 518	}
 519}
 520
 521static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
 522{
 523	if (!cachep->node[q])
 524		return;
 525
 526	slab_set_lock_classes(cachep, &on_slab_l3_key,
 527			&on_slab_alc_key, q);
 528}
 529
 530static inline void on_slab_lock_classes(struct kmem_cache *cachep)
 531{
 532	int node;
 533
 534	VM_BUG_ON(OFF_SLAB(cachep));
 535	for_each_node(node)
 536		on_slab_lock_classes_node(cachep, node);
 537}
 538
 539static inline void init_lock_keys(void)
 540{
 541	int node;
 542
 543	for_each_node(node)
 544		init_node_lock_keys(node);
 545}
 546#else
 547static void init_node_lock_keys(int q)
 548{
 549}
 550
 551static inline void init_lock_keys(void)
 552{
 553}
 554
 555static inline void on_slab_lock_classes(struct kmem_cache *cachep)
 556{
 557}
 558
 559static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node)
 560{
 561}
 562
 563static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
 564{
 565}
 566
 567static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
 568{
 569}
 570#endif
 571
 572static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 573
 574static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 575{
 576	return cachep->array[smp_processor_id()];
 577}
 578
 579static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
 580				size_t idx_size, size_t align)
 581{
 582	int nr_objs;
 583	size_t freelist_size;
 584
 585	/*
 586	 * Ignore padding for the initial guess. The padding
 587	 * is at most @align-1 bytes, and @buffer_size is at
 588	 * least @align. In the worst case, this result will
 589	 * be one greater than the number of objects that fit
 590	 * into the memory allocation when taking the padding
 591	 * into account.
 592	 */
 593	nr_objs = slab_size / (buffer_size + idx_size);
 594
 595	/*
 596	 * This calculated number will be either the right
 597	 * amount, or one greater than what we want.
 598	 */
 599	freelist_size = slab_size - nr_objs * buffer_size;
 600	if (freelist_size < ALIGN(nr_objs * idx_size, align))
 601		nr_objs--;
 602
 603	return nr_objs;
 604}
 605
 606/*
 607 * Calculate the number of objects and left-over bytes for a given buffer size.
 608 */
 609static void cache_estimate(unsigned long gfporder, size_t buffer_size,
 610			   size_t align, int flags, size_t *left_over,
 611			   unsigned int *num)
 612{
 613	int nr_objs;
 614	size_t mgmt_size;
 615	size_t slab_size = PAGE_SIZE << gfporder;
 616
 617	/*
 618	 * The slab management structure can be either off the slab or
 619	 * on it. For the latter case, the memory allocated for a
 620	 * slab is used for:
 621	 *
 622	 * - One unsigned int for each object
 623	 * - Padding to respect alignment of @align
 624	 * - @buffer_size bytes for each object
 
 
 
 
 
 625	 *
 626	 * If the slab management structure is off the slab, then the
 627	 * alignment will already be calculated into the size. Because
 628	 * the slabs are all pages aligned, the objects will be at the
 629	 * correct alignment when allocated.
 630	 */
 631	if (flags & CFLGS_OFF_SLAB) {
 632		mgmt_size = 0;
 633		nr_objs = slab_size / buffer_size;
 634
 635	} else {
 636		nr_objs = calculate_nr_objs(slab_size, buffer_size,
 637					sizeof(freelist_idx_t), align);
 638		mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
 639	}
 640	*num = nr_objs;
 641	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 642}
 643
 644#if DEBUG
 645#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 646
 647static void __slab_error(const char *function, struct kmem_cache *cachep,
 648			char *msg)
 649{
 650	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
 651	       function, cachep->name, msg);
 652	dump_stack();
 653	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 654}
 655#endif
 656
 657/*
 658 * By default on NUMA we use alien caches to stage the freeing of
 659 * objects allocated from other nodes. This causes massive memory
 660 * inefficiencies when using fake NUMA setup to split memory into a
 661 * large number of small nodes, so it can be disabled on the command
 662 * line
 663  */
 664
 665static int use_alien_caches __read_mostly = 1;
 666static int __init noaliencache_setup(char *s)
 667{
 668	use_alien_caches = 0;
 669	return 1;
 670}
 671__setup("noaliencache", noaliencache_setup);
 672
 673static int __init slab_max_order_setup(char *str)
 674{
 675	get_option(&str, &slab_max_order);
 676	slab_max_order = slab_max_order < 0 ? 0 :
 677				min(slab_max_order, MAX_ORDER - 1);
 678	slab_max_order_set = true;
 679
 680	return 1;
 681}
 682__setup("slab_max_order=", slab_max_order_setup);
 683
 684#ifdef CONFIG_NUMA
 685/*
 686 * Special reaping functions for NUMA systems called from cache_reap().
 687 * These take care of doing round robin flushing of alien caches (containing
 688 * objects freed on different nodes from which they were allocated) and the
 689 * flushing of remote pcps by calling drain_node_pages.
 690 */
 691static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 692
 693static void init_reap_node(int cpu)
 694{
 695	int node;
 696
 697	node = next_node(cpu_to_mem(cpu), node_online_map);
 698	if (node == MAX_NUMNODES)
 699		node = first_node(node_online_map);
 700
 701	per_cpu(slab_reap_node, cpu) = node;
 702}
 703
 704static void next_reap_node(void)
 705{
 706	int node = __this_cpu_read(slab_reap_node);
 707
 708	node = next_node(node, node_online_map);
 709	if (unlikely(node >= MAX_NUMNODES))
 710		node = first_node(node_online_map);
 711	__this_cpu_write(slab_reap_node, node);
 712}
 713
 714#else
 715#define init_reap_node(cpu) do { } while (0)
 716#define next_reap_node(void) do { } while (0)
 717#endif
 718
 719/*
 720 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 721 * via the workqueue/eventd.
 722 * Add the CPU number into the expiration time to minimize the possibility of
 723 * the CPUs getting into lockstep and contending for the global cache chain
 724 * lock.
 725 */
 726static void start_cpu_timer(int cpu)
 727{
 728	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 729
 730	/*
 731	 * When this gets called from do_initcalls via cpucache_init(),
 732	 * init_workqueues() has already run, so keventd will be setup
 733	 * at that time.
 734	 */
 735	if (keventd_up() && reap_work->work.func == NULL) {
 736		init_reap_node(cpu);
 737		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 738		schedule_delayed_work_on(cpu, reap_work,
 739					__round_jiffies_relative(HZ, cpu));
 740	}
 741}
 742
 
 
 
 
 
 
 
 
 
 
 743static struct array_cache *alloc_arraycache(int node, int entries,
 744					    int batchcount, gfp_t gfp)
 745{
 746	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
 747	struct array_cache *nc = NULL;
 748
 749	nc = kmalloc_node(memsize, gfp, node);
 750	/*
 751	 * The array_cache structures contain pointers to free object.
 752	 * However, when such objects are allocated or transferred to another
 753	 * cache the pointers are not cleared and they could be counted as
 754	 * valid references during a kmemleak scan. Therefore, kmemleak must
 755	 * not scan such objects.
 756	 */
 757	kmemleak_no_scan(nc);
 758	if (nc) {
 759		nc->avail = 0;
 760		nc->limit = entries;
 761		nc->batchcount = batchcount;
 762		nc->touched = 0;
 763		spin_lock_init(&nc->lock);
 764	}
 765	return nc;
 766}
 767
 768static inline bool is_slab_pfmemalloc(struct page *page)
 
 769{
 770	return PageSlabPfmemalloc(page);
 771}
 
 772
 773/* Clears pfmemalloc_active if no slabs have pfmalloc set */
 774static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
 775						struct array_cache *ac)
 776{
 777	struct kmem_cache_node *n = cachep->node[numa_mem_id()];
 778	struct page *page;
 779	unsigned long flags;
 780
 781	if (!pfmemalloc_active)
 782		return;
 783
 784	spin_lock_irqsave(&n->list_lock, flags);
 785	list_for_each_entry(page, &n->slabs_full, lru)
 786		if (is_slab_pfmemalloc(page))
 787			goto out;
 788
 789	list_for_each_entry(page, &n->slabs_partial, lru)
 790		if (is_slab_pfmemalloc(page))
 791			goto out;
 792
 793	list_for_each_entry(page, &n->slabs_free, lru)
 794		if (is_slab_pfmemalloc(page))
 795			goto out;
 796
 797	pfmemalloc_active = false;
 798out:
 799	spin_unlock_irqrestore(&n->list_lock, flags);
 800}
 801
 802static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
 803						gfp_t flags, bool force_refill)
 804{
 805	int i;
 806	void *objp = ac->entry[--ac->avail];
 807
 808	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
 809	if (unlikely(is_obj_pfmemalloc(objp))) {
 810		struct kmem_cache_node *n;
 811
 812		if (gfp_pfmemalloc_allowed(flags)) {
 813			clear_obj_pfmemalloc(&objp);
 814			return objp;
 815		}
 816
 817		/* The caller cannot use PFMEMALLOC objects, find another one */
 818		for (i = 0; i < ac->avail; i++) {
 819			/* If a !PFMEMALLOC object is found, swap them */
 820			if (!is_obj_pfmemalloc(ac->entry[i])) {
 821				objp = ac->entry[i];
 822				ac->entry[i] = ac->entry[ac->avail];
 823				ac->entry[ac->avail] = objp;
 824				return objp;
 825			}
 826		}
 827
 828		/*
 829		 * If there are empty slabs on the slabs_free list and we are
 830		 * being forced to refill the cache, mark this one !pfmemalloc.
 831		 */
 832		n = cachep->node[numa_mem_id()];
 833		if (!list_empty(&n->slabs_free) && force_refill) {
 834			struct page *page = virt_to_head_page(objp);
 835			ClearPageSlabPfmemalloc(page);
 836			clear_obj_pfmemalloc(&objp);
 837			recheck_pfmemalloc_active(cachep, ac);
 838			return objp;
 839		}
 840
 841		/* No !PFMEMALLOC objects available */
 842		ac->avail++;
 843		objp = NULL;
 844	}
 845
 846	return objp;
 847}
 848
 849static inline void *ac_get_obj(struct kmem_cache *cachep,
 850			struct array_cache *ac, gfp_t flags, bool force_refill)
 851{
 852	void *objp;
 853
 854	if (unlikely(sk_memalloc_socks()))
 855		objp = __ac_get_obj(cachep, ac, flags, force_refill);
 856	else
 857		objp = ac->entry[--ac->avail];
 858
 859	return objp;
 860}
 861
 862static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
 863								void *objp)
 864{
 865	if (unlikely(pfmemalloc_active)) {
 866		/* Some pfmemalloc slabs exist, check if this is one */
 867		struct page *page = virt_to_head_page(objp);
 868		if (PageSlabPfmemalloc(page))
 869			set_obj_pfmemalloc(&objp);
 870	}
 871
 872	return objp;
 873}
 874
 875static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
 876								void *objp)
 877{
 878	if (unlikely(sk_memalloc_socks()))
 879		objp = __ac_put_obj(cachep, ac, objp);
 880
 881	ac->entry[ac->avail++] = objp;
 882}
 883
 884/*
 885 * Transfer objects in one arraycache to another.
 886 * Locking must be handled by the caller.
 887 *
 888 * Return the number of entries transferred.
 889 */
 890static int transfer_objects(struct array_cache *to,
 891		struct array_cache *from, unsigned int max)
 892{
 893	/* Figure out how many entries to transfer */
 894	int nr = min3(from->avail, max, to->limit - to->avail);
 895
 896	if (!nr)
 897		return 0;
 898
 899	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
 900			sizeof(void *) *nr);
 901
 902	from->avail -= nr;
 903	to->avail += nr;
 904	return nr;
 905}
 906
 907#ifndef CONFIG_NUMA
 908
 909#define drain_alien_cache(cachep, alien) do { } while (0)
 910#define reap_alien(cachep, n) do { } while (0)
 911
 912static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 
 913{
 914	return (struct array_cache **)BAD_ALIEN_MAGIC;
 915}
 916
 917static inline void free_alien_cache(struct array_cache **ac_ptr)
 918{
 919}
 920
 921static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 922{
 923	return 0;
 924}
 925
 926static inline void *alternate_node_alloc(struct kmem_cache *cachep,
 927		gfp_t flags)
 928{
 929	return NULL;
 930}
 931
 932static inline void *____cache_alloc_node(struct kmem_cache *cachep,
 933		 gfp_t flags, int nodeid)
 934{
 935	return NULL;
 936}
 937
 
 
 
 
 
 938#else	/* CONFIG_NUMA */
 939
 940static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 941static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 942
 943static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944{
 945	struct array_cache **ac_ptr;
 946	int memsize = sizeof(void *) * nr_node_ids;
 947	int i;
 948
 949	if (limit > 1)
 950		limit = 12;
 951	ac_ptr = kzalloc_node(memsize, gfp, node);
 952	if (ac_ptr) {
 953		for_each_node(i) {
 954			if (i == node || !node_online(i))
 955				continue;
 956			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
 957			if (!ac_ptr[i]) {
 958				for (i--; i >= 0; i--)
 959					kfree(ac_ptr[i]);
 960				kfree(ac_ptr);
 961				return NULL;
 962			}
 
 963		}
 964	}
 965	return ac_ptr;
 966}
 967
 968static void free_alien_cache(struct array_cache **ac_ptr)
 969{
 970	int i;
 971
 972	if (!ac_ptr)
 973		return;
 974	for_each_node(i)
 975	    kfree(ac_ptr[i]);
 976	kfree(ac_ptr);
 977}
 978
 979static void __drain_alien_cache(struct kmem_cache *cachep,
 980				struct array_cache *ac, int node)
 
 981{
 982	struct kmem_cache_node *n = cachep->node[node];
 983
 984	if (ac->avail) {
 985		spin_lock(&n->list_lock);
 986		/*
 987		 * Stuff objects into the remote nodes shared array first.
 988		 * That way we could avoid the overhead of putting the objects
 989		 * into the free lists and getting them back later.
 990		 */
 991		if (n->shared)
 992			transfer_objects(n->shared, ac, ac->limit);
 993
 994		free_block(cachep, ac->entry, ac->avail, node);
 995		ac->avail = 0;
 996		spin_unlock(&n->list_lock);
 997	}
 998}
 999
1000/*
1001 * Called from cache_reap() to regularly drain alien caches round robin.
1002 */
1003static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
1004{
1005	int node = __this_cpu_read(slab_reap_node);
1006
1007	if (n->alien) {
1008		struct array_cache *ac = n->alien[node];
 
1009
1010		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1011			__drain_alien_cache(cachep, ac, node);
1012			spin_unlock_irq(&ac->lock);
 
 
 
 
 
 
1013		}
1014	}
1015}
1016
1017static void drain_alien_cache(struct kmem_cache *cachep,
1018				struct array_cache **alien)
1019{
1020	int i = 0;
 
1021	struct array_cache *ac;
1022	unsigned long flags;
1023
1024	for_each_online_node(i) {
1025		ac = alien[i];
1026		if (ac) {
1027			spin_lock_irqsave(&ac->lock, flags);
1028			__drain_alien_cache(cachep, ac, i);
1029			spin_unlock_irqrestore(&ac->lock, flags);
 
 
 
 
1030		}
1031	}
1032}
1033
1034static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
 
1035{
1036	int nodeid = page_to_nid(virt_to_page(objp));
1037	struct kmem_cache_node *n;
1038	struct array_cache *alien = NULL;
1039	int node;
1040
1041	node = numa_mem_id();
1042
1043	/*
1044	 * Make sure we are not freeing a object from another node to the array
1045	 * cache on this cpu.
1046	 */
1047	if (likely(nodeid == node))
1048		return 0;
1049
1050	n = cachep->node[node];
1051	STATS_INC_NODEFREES(cachep);
1052	if (n->alien && n->alien[nodeid]) {
1053		alien = n->alien[nodeid];
 
1054		spin_lock(&alien->lock);
1055		if (unlikely(alien->avail == alien->limit)) {
1056			STATS_INC_ACOVERFLOW(cachep);
1057			__drain_alien_cache(cachep, alien, nodeid);
1058		}
1059		ac_put_obj(cachep, alien, objp);
1060		spin_unlock(&alien->lock);
 
1061	} else {
1062		spin_lock(&(cachep->node[nodeid])->list_lock);
1063		free_block(cachep, &objp, 1, nodeid);
1064		spin_unlock(&(cachep->node[nodeid])->list_lock);
 
 
1065	}
1066	return 1;
1067}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068#endif
1069
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070/*
1071 * Allocates and initializes node for a node on each slab cache, used for
1072 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1073 * will be allocated off-node since memory is not yet online for the new node.
1074 * When hotplugging memory or a cpu, existing node are not replaced if
1075 * already in use.
1076 *
1077 * Must hold slab_mutex.
1078 */
1079static int init_cache_node_node(int node)
1080{
 
1081	struct kmem_cache *cachep;
1082	struct kmem_cache_node *n;
1083	const int memsize = sizeof(struct kmem_cache_node);
1084
1085	list_for_each_entry(cachep, &slab_caches, list) {
1086		/*
1087		 * Set up the kmem_cache_node for cpu before we can
1088		 * begin anything. Make sure some other cpu on this
1089		 * node has not already allocated this
1090		 */
1091		if (!cachep->node[node]) {
1092			n = kmalloc_node(memsize, GFP_KERNEL, node);
1093			if (!n)
1094				return -ENOMEM;
1095			kmem_cache_node_init(n);
1096			n->next_reap = jiffies + REAPTIMEOUT_NODE +
1097			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1098
1099			/*
1100			 * The kmem_cache_nodes don't come and go as CPUs
1101			 * come and go.  slab_mutex is sufficient
1102			 * protection here.
1103			 */
1104			cachep->node[node] = n;
1105		}
1106
1107		spin_lock_irq(&cachep->node[node]->list_lock);
1108		cachep->node[node]->free_limit =
1109			(1 + nr_cpus_node(node)) *
1110			cachep->batchcount + cachep->num;
1111		spin_unlock_irq(&cachep->node[node]->list_lock);
1112	}
1113	return 0;
1114}
 
1115
1116static inline int slabs_tofree(struct kmem_cache *cachep,
1117						struct kmem_cache_node *n)
1118{
1119	return (n->free_objects + cachep->num - 1) / cachep->num;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120}
1121
 
 
1122static void cpuup_canceled(long cpu)
1123{
1124	struct kmem_cache *cachep;
1125	struct kmem_cache_node *n = NULL;
1126	int node = cpu_to_mem(cpu);
1127	const struct cpumask *mask = cpumask_of_node(node);
1128
1129	list_for_each_entry(cachep, &slab_caches, list) {
1130		struct array_cache *nc;
1131		struct array_cache *shared;
1132		struct array_cache **alien;
1133
1134		/* cpu is dead; no one can alloc from it. */
1135		nc = cachep->array[cpu];
1136		cachep->array[cpu] = NULL;
1137		n = cachep->node[node];
1138
 
1139		if (!n)
1140			goto free_array_cache;
1141
1142		spin_lock_irq(&n->list_lock);
1143
1144		/* Free limit for this kmem_cache_node */
1145		n->free_limit -= cachep->batchcount;
1146		if (nc)
1147			free_block(cachep, nc->entry, nc->avail, node);
 
 
 
1148
1149		if (!cpumask_empty(mask)) {
1150			spin_unlock_irq(&n->list_lock);
1151			goto free_array_cache;
1152		}
1153
1154		shared = n->shared;
1155		if (shared) {
1156			free_block(cachep, shared->entry,
1157				   shared->avail, node);
1158			n->shared = NULL;
1159		}
1160
1161		alien = n->alien;
1162		n->alien = NULL;
1163
1164		spin_unlock_irq(&n->list_lock);
1165
1166		kfree(shared);
1167		if (alien) {
1168			drain_alien_cache(cachep, alien);
1169			free_alien_cache(alien);
1170		}
1171free_array_cache:
1172		kfree(nc);
 
1173	}
1174	/*
1175	 * In the previous loop, all the objects were freed to
1176	 * the respective cache's slabs,  now we can go ahead and
1177	 * shrink each nodelist to its limit.
1178	 */
1179	list_for_each_entry(cachep, &slab_caches, list) {
1180		n = cachep->node[node];
1181		if (!n)
1182			continue;
1183		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1184	}
1185}
1186
1187static int cpuup_prepare(long cpu)
1188{
1189	struct kmem_cache *cachep;
1190	struct kmem_cache_node *n = NULL;
1191	int node = cpu_to_mem(cpu);
1192	int err;
1193
1194	/*
1195	 * We need to do this right in the beginning since
1196	 * alloc_arraycache's are going to use this list.
1197	 * kmalloc_node allows us to add the slab to the right
1198	 * kmem_cache_node and not this cpu's kmem_cache_node
1199	 */
1200	err = init_cache_node_node(node);
1201	if (err < 0)
1202		goto bad;
1203
1204	/*
1205	 * Now we can go ahead with allocating the shared arrays and
1206	 * array caches
1207	 */
1208	list_for_each_entry(cachep, &slab_caches, list) {
1209		struct array_cache *nc;
1210		struct array_cache *shared = NULL;
1211		struct array_cache **alien = NULL;
1212
1213		nc = alloc_arraycache(node, cachep->limit,
1214					cachep->batchcount, GFP_KERNEL);
1215		if (!nc)
1216			goto bad;
1217		if (cachep->shared) {
1218			shared = alloc_arraycache(node,
1219				cachep->shared * cachep->batchcount,
1220				0xbaadf00d, GFP_KERNEL);
1221			if (!shared) {
1222				kfree(nc);
1223				goto bad;
1224			}
1225		}
1226		if (use_alien_caches) {
1227			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1228			if (!alien) {
1229				kfree(shared);
1230				kfree(nc);
1231				goto bad;
1232			}
1233		}
1234		cachep->array[cpu] = nc;
1235		n = cachep->node[node];
1236		BUG_ON(!n);
1237
1238		spin_lock_irq(&n->list_lock);
1239		if (!n->shared) {
1240			/*
1241			 * We are serialised from CPU_DEAD or
1242			 * CPU_UP_CANCELLED by the cpucontrol lock
1243			 */
1244			n->shared = shared;
1245			shared = NULL;
1246		}
1247#ifdef CONFIG_NUMA
1248		if (!n->alien) {
1249			n->alien = alien;
1250			alien = NULL;
1251		}
1252#endif
1253		spin_unlock_irq(&n->list_lock);
1254		kfree(shared);
1255		free_alien_cache(alien);
1256		if (cachep->flags & SLAB_DEBUG_OBJECTS)
1257			slab_set_debugobj_lock_classes_node(cachep, node);
1258		else if (!OFF_SLAB(cachep) &&
1259			 !(cachep->flags & SLAB_DESTROY_BY_RCU))
1260			on_slab_lock_classes_node(cachep, node);
1261	}
1262	init_node_lock_keys(node);
1263
1264	return 0;
1265bad:
1266	cpuup_canceled(cpu);
1267	return -ENOMEM;
1268}
1269
1270static int cpuup_callback(struct notifier_block *nfb,
1271				    unsigned long action, void *hcpu)
1272{
1273	long cpu = (long)hcpu;
1274	int err = 0;
1275
1276	switch (action) {
1277	case CPU_UP_PREPARE:
1278	case CPU_UP_PREPARE_FROZEN:
1279		mutex_lock(&slab_mutex);
1280		err = cpuup_prepare(cpu);
1281		mutex_unlock(&slab_mutex);
1282		break;
1283	case CPU_ONLINE:
1284	case CPU_ONLINE_FROZEN:
1285		start_cpu_timer(cpu);
1286		break;
1287#ifdef CONFIG_HOTPLUG_CPU
1288  	case CPU_DOWN_PREPARE:
1289  	case CPU_DOWN_PREPARE_FROZEN:
1290		/*
1291		 * Shutdown cache reaper. Note that the slab_mutex is
1292		 * held so that if cache_reap() is invoked it cannot do
1293		 * anything expensive but will only modify reap_work
1294		 * and reschedule the timer.
1295		*/
1296		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1297		/* Now the cache_reaper is guaranteed to be not running. */
1298		per_cpu(slab_reap_work, cpu).work.func = NULL;
1299  		break;
1300  	case CPU_DOWN_FAILED:
1301  	case CPU_DOWN_FAILED_FROZEN:
1302		start_cpu_timer(cpu);
1303  		break;
1304	case CPU_DEAD:
1305	case CPU_DEAD_FROZEN:
1306		/*
1307		 * Even if all the cpus of a node are down, we don't free the
1308		 * kmem_cache_node of any cache. This to avoid a race between
1309		 * cpu_down, and a kmalloc allocation from another cpu for
1310		 * memory from the node of the cpu going down.  The node
1311		 * structure is usually allocated from kmem_cache_create() and
1312		 * gets destroyed at kmem_cache_destroy().
1313		 */
1314		/* fall through */
1315#endif
1316	case CPU_UP_CANCELED:
1317	case CPU_UP_CANCELED_FROZEN:
1318		mutex_lock(&slab_mutex);
1319		cpuup_canceled(cpu);
1320		mutex_unlock(&slab_mutex);
1321		break;
1322	}
1323	return notifier_from_errno(err);
1324}
1325
1326static struct notifier_block cpucache_notifier = {
1327	&cpuup_callback, NULL, 0
1328};
 
 
 
 
 
 
 
 
 
 
1329
1330#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1331/*
1332 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1333 * Returns -EBUSY if all objects cannot be drained so that the node is not
1334 * removed.
1335 *
1336 * Must hold slab_mutex.
1337 */
1338static int __meminit drain_cache_node_node(int node)
1339{
1340	struct kmem_cache *cachep;
1341	int ret = 0;
1342
1343	list_for_each_entry(cachep, &slab_caches, list) {
1344		struct kmem_cache_node *n;
1345
1346		n = cachep->node[node];
1347		if (!n)
1348			continue;
1349
1350		drain_freelist(cachep, n, slabs_tofree(cachep, n));
1351
1352		if (!list_empty(&n->slabs_full) ||
1353		    !list_empty(&n->slabs_partial)) {
1354			ret = -EBUSY;
1355			break;
1356		}
1357	}
1358	return ret;
1359}
1360
1361static int __meminit slab_memory_callback(struct notifier_block *self,
1362					unsigned long action, void *arg)
1363{
1364	struct memory_notify *mnb = arg;
1365	int ret = 0;
1366	int nid;
1367
1368	nid = mnb->status_change_nid;
1369	if (nid < 0)
1370		goto out;
1371
1372	switch (action) {
1373	case MEM_GOING_ONLINE:
1374		mutex_lock(&slab_mutex);
1375		ret = init_cache_node_node(nid);
1376		mutex_unlock(&slab_mutex);
1377		break;
1378	case MEM_GOING_OFFLINE:
1379		mutex_lock(&slab_mutex);
1380		ret = drain_cache_node_node(nid);
1381		mutex_unlock(&slab_mutex);
1382		break;
1383	case MEM_ONLINE:
1384	case MEM_OFFLINE:
1385	case MEM_CANCEL_ONLINE:
1386	case MEM_CANCEL_OFFLINE:
1387		break;
1388	}
1389out:
1390	return notifier_from_errno(ret);
1391}
1392#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1393
1394/*
1395 * swap the static kmem_cache_node with kmalloced memory
1396 */
1397static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1398				int nodeid)
1399{
1400	struct kmem_cache_node *ptr;
1401
1402	ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1403	BUG_ON(!ptr);
1404
1405	memcpy(ptr, list, sizeof(struct kmem_cache_node));
1406	/*
1407	 * Do not assume that spinlocks can be initialized via memcpy:
1408	 */
1409	spin_lock_init(&ptr->list_lock);
1410
1411	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1412	cachep->node[nodeid] = ptr;
1413}
1414
1415/*
1416 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1417 * size of kmem_cache_node.
1418 */
1419static void __init set_up_node(struct kmem_cache *cachep, int index)
1420{
1421	int node;
1422
1423	for_each_online_node(node) {
1424		cachep->node[node] = &init_kmem_cache_node[index + node];
1425		cachep->node[node]->next_reap = jiffies +
1426		    REAPTIMEOUT_NODE +
1427		    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1428	}
1429}
1430
1431/*
1432 * The memory after the last cpu cache pointer is used for the
1433 * the node pointer.
1434 */
1435static void setup_node_pointer(struct kmem_cache *cachep)
1436{
1437	cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
1438}
1439
1440/*
1441 * Initialisation.  Called after the page allocator have been initialised and
1442 * before smp_init().
1443 */
1444void __init kmem_cache_init(void)
1445{
1446	int i;
1447
1448	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1449					sizeof(struct rcu_head));
1450	kmem_cache = &kmem_cache_boot;
1451	setup_node_pointer(kmem_cache);
1452
1453	if (num_possible_nodes() == 1)
1454		use_alien_caches = 0;
1455
1456	for (i = 0; i < NUM_INIT_LISTS; i++)
1457		kmem_cache_node_init(&init_kmem_cache_node[i]);
1458
1459	set_up_node(kmem_cache, CACHE_CACHE);
1460
1461	/*
1462	 * Fragmentation resistance on low memory - only use bigger
1463	 * page orders on machines with more than 32MB of memory if
1464	 * not overridden on the command line.
1465	 */
1466	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1467		slab_max_order = SLAB_MAX_ORDER_HI;
1468
1469	/* Bootstrap is tricky, because several objects are allocated
1470	 * from caches that do not exist yet:
1471	 * 1) initialize the kmem_cache cache: it contains the struct
1472	 *    kmem_cache structures of all caches, except kmem_cache itself:
1473	 *    kmem_cache is statically allocated.
1474	 *    Initially an __init data area is used for the head array and the
1475	 *    kmem_cache_node structures, it's replaced with a kmalloc allocated
1476	 *    array at the end of the bootstrap.
1477	 * 2) Create the first kmalloc cache.
1478	 *    The struct kmem_cache for the new cache is allocated normally.
1479	 *    An __init data area is used for the head array.
1480	 * 3) Create the remaining kmalloc caches, with minimally sized
1481	 *    head arrays.
1482	 * 4) Replace the __init data head arrays for kmem_cache and the first
1483	 *    kmalloc cache with kmalloc allocated arrays.
1484	 * 5) Replace the __init data for kmem_cache_node for kmem_cache and
1485	 *    the other cache's with kmalloc allocated memory.
1486	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1487	 */
1488
1489	/* 1) create the kmem_cache */
1490
1491	/*
1492	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1493	 */
1494	create_boot_cache(kmem_cache, "kmem_cache",
1495		offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1496				  nr_node_ids * sizeof(struct kmem_cache_node *),
1497				  SLAB_HWCACHE_ALIGN);
1498	list_add(&kmem_cache->list, &slab_caches);
1499
1500	/* 2+3) create the kmalloc caches */
1501
1502	/*
1503	 * Initialize the caches that provide memory for the array cache and the
1504	 * kmem_cache_node structures first.  Without this, further allocations will
1505	 * bug.
1506	 */
1507
1508	kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
1509					kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1510
1511	if (INDEX_AC != INDEX_NODE)
1512		kmalloc_caches[INDEX_NODE] =
1513			create_kmalloc_cache("kmalloc-node",
1514				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1515
1516	slab_early_init = 0;
1517
1518	/* 4) Replace the bootstrap head arrays */
1519	{
1520		struct array_cache *ptr;
1521
1522		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1523
1524		memcpy(ptr, cpu_cache_get(kmem_cache),
1525		       sizeof(struct arraycache_init));
1526		/*
1527		 * Do not assume that spinlocks can be initialized via memcpy:
1528		 */
1529		spin_lock_init(&ptr->lock);
1530
1531		kmem_cache->array[smp_processor_id()] = ptr;
1532
1533		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1534
1535		BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
1536		       != &initarray_generic.cache);
1537		memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
1538		       sizeof(struct arraycache_init));
1539		/*
1540		 * Do not assume that spinlocks can be initialized via memcpy:
1541		 */
1542		spin_lock_init(&ptr->lock);
1543
1544		kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
1545	}
1546	/* 5) Replace the bootstrap kmem_cache_node */
1547	{
1548		int nid;
1549
1550		for_each_online_node(nid) {
1551			init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1552
1553			init_list(kmalloc_caches[INDEX_AC],
1554				  &init_kmem_cache_node[SIZE_AC + nid], nid);
1555
1556			if (INDEX_AC != INDEX_NODE) {
1557				init_list(kmalloc_caches[INDEX_NODE],
1558					  &init_kmem_cache_node[SIZE_NODE + nid], nid);
1559			}
1560		}
1561	}
1562
1563	create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1564}
1565
1566void __init kmem_cache_init_late(void)
1567{
1568	struct kmem_cache *cachep;
1569
1570	slab_state = UP;
1571
1572	/* 6) resize the head arrays to their final sizes */
1573	mutex_lock(&slab_mutex);
1574	list_for_each_entry(cachep, &slab_caches, list)
1575		if (enable_cpucache(cachep, GFP_NOWAIT))
1576			BUG();
1577	mutex_unlock(&slab_mutex);
1578
1579	/* Annotate slab for lockdep -- annotate the malloc caches */
1580	init_lock_keys();
1581
1582	/* Done! */
1583	slab_state = FULL;
1584
1585	/*
1586	 * Register a cpu startup notifier callback that initializes
1587	 * cpu_cache_get for all new cpus
1588	 */
1589	register_cpu_notifier(&cpucache_notifier);
1590
1591#ifdef CONFIG_NUMA
1592	/*
1593	 * Register a memory hotplug callback that initializes and frees
1594	 * node.
1595	 */
1596	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1597#endif
1598
1599	/*
1600	 * The reap timers are started later, with a module init call: That part
1601	 * of the kernel is not yet operational.
1602	 */
1603}
1604
1605static int __init cpucache_init(void)
1606{
1607	int cpu;
1608
1609	/*
1610	 * Register the timers that return unneeded pages to the page allocator
1611	 */
1612	for_each_online_cpu(cpu)
1613		start_cpu_timer(cpu);
 
1614
1615	/* Done! */
1616	slab_state = FULL;
1617	return 0;
1618}
1619__initcall(cpucache_init);
1620
1621static noinline void
1622slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1623{
 
1624	struct kmem_cache_node *n;
1625	struct page *page;
1626	unsigned long flags;
1627	int node;
 
 
 
 
 
1628
1629	printk(KERN_WARNING
1630		"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1631		nodeid, gfpflags);
1632	printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
1633		cachep->name, cachep->size, cachep->gfporder);
1634
1635	for_each_online_node(node) {
1636		unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1637		unsigned long active_slabs = 0, num_slabs = 0;
1638
1639		n = cachep->node[node];
1640		if (!n)
1641			continue;
1642
1643		spin_lock_irqsave(&n->list_lock, flags);
1644		list_for_each_entry(page, &n->slabs_full, lru) {
1645			active_objs += cachep->num;
1646			active_slabs++;
1647		}
1648		list_for_each_entry(page, &n->slabs_partial, lru) {
1649			active_objs += page->active;
1650			active_slabs++;
1651		}
1652		list_for_each_entry(page, &n->slabs_free, lru)
1653			num_slabs++;
1654
1655		free_objects += n->free_objects;
1656		spin_unlock_irqrestore(&n->list_lock, flags);
1657
1658		num_slabs += active_slabs;
1659		num_objs = num_slabs * cachep->num;
1660		printk(KERN_WARNING
1661			"  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1662			node, active_slabs, num_slabs, active_objs, num_objs,
1663			free_objects);
1664	}
 
1665}
1666
1667/*
1668 * Interface to system's page allocator. No need to hold the cache-lock.
 
1669 *
1670 * If we requested dmaable memory, we will get it. Even if we
1671 * did not request dmaable memory, we might get it, but that
1672 * would be relatively rare and ignorable.
1673 */
1674static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1675								int nodeid)
1676{
1677	struct page *page;
1678	int nr_pages;
1679
1680	flags |= cachep->allocflags;
1681	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1682		flags |= __GFP_RECLAIMABLE;
1683
1684	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1685	if (!page) {
1686		if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1687			slab_out_of_memory(cachep, flags, nodeid);
1688		return NULL;
1689	}
1690
1691	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1692	if (unlikely(page->pfmemalloc))
1693		pfmemalloc_active = true;
 
1694
1695	nr_pages = (1 << cachep->gfporder);
1696	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1697		add_zone_page_state(page_zone(page),
1698			NR_SLAB_RECLAIMABLE, nr_pages);
1699	else
1700		add_zone_page_state(page_zone(page),
1701			NR_SLAB_UNRECLAIMABLE, nr_pages);
1702	__SetPageSlab(page);
1703	if (page->pfmemalloc)
 
1704		SetPageSlabPfmemalloc(page);
1705	memcg_bind_pages(cachep, cachep->gfporder);
1706
1707	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1708		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1709
1710		if (cachep->ctor)
1711			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1712		else
1713			kmemcheck_mark_unallocated_pages(page, nr_pages);
1714	}
1715
1716	return page;
1717}
1718
1719/*
1720 * Interface to system's page release.
1721 */
1722static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1723{
1724	const unsigned long nr_freed = (1 << cachep->gfporder);
1725
1726	kmemcheck_free_shadow(page, cachep->gfporder);
1727
1728	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1729		sub_zone_page_state(page_zone(page),
1730				NR_SLAB_RECLAIMABLE, nr_freed);
1731	else
1732		sub_zone_page_state(page_zone(page),
1733				NR_SLAB_UNRECLAIMABLE, nr_freed);
1734
1735	BUG_ON(!PageSlab(page));
1736	__ClearPageSlabPfmemalloc(page);
1737	__ClearPageSlab(page);
1738	page_mapcount_reset(page);
1739	page->mapping = NULL;
1740
1741	memcg_release_pages(cachep, cachep->gfporder);
1742	if (current->reclaim_state)
1743		current->reclaim_state->reclaimed_slab += nr_freed;
1744	__free_memcg_kmem_pages(page, cachep->gfporder);
 
1745}
1746
1747static void kmem_rcu_free(struct rcu_head *head)
1748{
1749	struct kmem_cache *cachep;
1750	struct page *page;
1751
1752	page = container_of(head, struct page, rcu_head);
1753	cachep = page->slab_cache;
1754
1755	kmem_freepages(cachep, page);
1756}
1757
1758#if DEBUG
 
 
 
 
 
 
 
 
1759
1760#ifdef CONFIG_DEBUG_PAGEALLOC
1761static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1762			    unsigned long caller)
1763{
1764	int size = cachep->object_size;
1765
1766	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1767
1768	if (size < 5 * sizeof(unsigned long))
1769		return;
1770
1771	*addr++ = 0x12345678;
1772	*addr++ = caller;
1773	*addr++ = smp_processor_id();
1774	size -= 3 * sizeof(unsigned long);
1775	{
1776		unsigned long *sptr = &caller;
1777		unsigned long svalue;
1778
1779		while (!kstack_end(sptr)) {
1780			svalue = *sptr++;
1781			if (kernel_text_address(svalue)) {
1782				*addr++ = svalue;
1783				size -= sizeof(unsigned long);
1784				if (size <= sizeof(unsigned long))
1785					break;
1786			}
1787		}
1788
1789	}
1790	*addr++ = 0x87654321;
1791}
1792#endif
1793
1794static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1795{
1796	int size = cachep->object_size;
1797	addr = &((char *)addr)[obj_offset(cachep)];
1798
1799	memset(addr, val, size);
1800	*(unsigned char *)(addr + size - 1) = POISON_END;
1801}
1802
1803static void dump_line(char *data, int offset, int limit)
1804{
1805	int i;
1806	unsigned char error = 0;
1807	int bad_count = 0;
1808
1809	printk(KERN_ERR "%03x: ", offset);
1810	for (i = 0; i < limit; i++) {
1811		if (data[offset + i] != POISON_FREE) {
1812			error = data[offset + i];
1813			bad_count++;
1814		}
1815	}
1816	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1817			&data[offset], limit, 1);
1818
1819	if (bad_count == 1) {
1820		error ^= POISON_FREE;
1821		if (!(error & (error - 1))) {
1822			printk(KERN_ERR "Single bit error detected. Probably "
1823					"bad RAM.\n");
1824#ifdef CONFIG_X86
1825			printk(KERN_ERR "Run memtest86+ or a similar memory "
1826					"test tool.\n");
1827#else
1828			printk(KERN_ERR "Run a memory test tool.\n");
1829#endif
1830		}
1831	}
1832}
1833#endif
1834
1835#if DEBUG
1836
1837static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1838{
1839	int i, size;
1840	char *realobj;
1841
1842	if (cachep->flags & SLAB_RED_ZONE) {
1843		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1844			*dbg_redzone1(cachep, objp),
1845			*dbg_redzone2(cachep, objp));
1846	}
1847
1848	if (cachep->flags & SLAB_STORE_USER) {
1849		printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1850		       *dbg_userword(cachep, objp),
1851		       *dbg_userword(cachep, objp));
1852	}
1853	realobj = (char *)objp + obj_offset(cachep);
1854	size = cachep->object_size;
1855	for (i = 0; i < size && lines; i += 16, lines--) {
1856		int limit;
1857		limit = 16;
1858		if (i + limit > size)
1859			limit = size - i;
1860		dump_line(realobj, i, limit);
1861	}
1862}
1863
1864static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1865{
1866	char *realobj;
1867	int size, i;
1868	int lines = 0;
1869
 
 
 
1870	realobj = (char *)objp + obj_offset(cachep);
1871	size = cachep->object_size;
1872
1873	for (i = 0; i < size; i++) {
1874		char exp = POISON_FREE;
1875		if (i == size - 1)
1876			exp = POISON_END;
1877		if (realobj[i] != exp) {
1878			int limit;
1879			/* Mismatch ! */
1880			/* Print header */
1881			if (lines == 0) {
1882				printk(KERN_ERR
1883					"Slab corruption (%s): %s start=%p, len=%d\n",
1884					print_tainted(), cachep->name, realobj, size);
1885				print_objinfo(cachep, objp, 0);
1886			}
1887			/* Hexdump the affected line */
1888			i = (i / 16) * 16;
1889			limit = 16;
1890			if (i + limit > size)
1891				limit = size - i;
1892			dump_line(realobj, i, limit);
1893			i += 16;
1894			lines++;
1895			/* Limit to 5 lines */
1896			if (lines > 5)
1897				break;
1898		}
1899	}
1900	if (lines != 0) {
1901		/* Print some data about the neighboring objects, if they
1902		 * exist:
1903		 */
1904		struct page *page = virt_to_head_page(objp);
1905		unsigned int objnr;
1906
1907		objnr = obj_to_index(cachep, page, objp);
1908		if (objnr) {
1909			objp = index_to_obj(cachep, page, objnr - 1);
1910			realobj = (char *)objp + obj_offset(cachep);
1911			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1912			       realobj, size);
1913			print_objinfo(cachep, objp, 2);
1914		}
1915		if (objnr + 1 < cachep->num) {
1916			objp = index_to_obj(cachep, page, objnr + 1);
1917			realobj = (char *)objp + obj_offset(cachep);
1918			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1919			       realobj, size);
1920			print_objinfo(cachep, objp, 2);
1921		}
1922	}
1923}
1924#endif
1925
1926#if DEBUG
1927static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1928						struct page *page)
1929{
1930	int i;
 
 
 
 
 
 
1931	for (i = 0; i < cachep->num; i++) {
1932		void *objp = index_to_obj(cachep, page, i);
1933
1934		if (cachep->flags & SLAB_POISON) {
1935#ifdef CONFIG_DEBUG_PAGEALLOC
1936			if (cachep->size % PAGE_SIZE == 0 &&
1937					OFF_SLAB(cachep))
1938				kernel_map_pages(virt_to_page(objp),
1939					cachep->size / PAGE_SIZE, 1);
1940			else
1941				check_poison_obj(cachep, objp);
1942#else
1943			check_poison_obj(cachep, objp);
1944#endif
1945		}
1946		if (cachep->flags & SLAB_RED_ZONE) {
1947			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1948				slab_error(cachep, "start of a freed object "
1949					   "was overwritten");
1950			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1951				slab_error(cachep, "end of a freed object "
1952					   "was overwritten");
1953		}
1954	}
1955}
1956#else
1957static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1958						struct page *page)
1959{
1960}
1961#endif
1962
1963/**
1964 * slab_destroy - destroy and release all objects in a slab
1965 * @cachep: cache pointer being destroyed
1966 * @page: page pointer being destroyed
1967 *
1968 * Destroy all the objs in a slab, and release the mem back to the system.
1969 * Before calling the slab must have been unlinked from the cache.  The
1970 * cache-lock is not held/needed.
1971 */
1972static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1973{
1974	void *freelist;
1975
1976	freelist = page->freelist;
1977	slab_destroy_debugcheck(cachep, page);
1978	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1979		struct rcu_head *head;
1980
1981		/*
1982		 * RCU free overloads the RCU head over the LRU.
1983		 * slab_page has been overloeaded over the LRU,
1984		 * however it is not used from now on so that
1985		 * we can use it safely.
1986		 */
1987		head = (void *)&page->rcu_head;
1988		call_rcu(head, kmem_rcu_free);
1989
1990	} else {
1991		kmem_freepages(cachep, page);
1992	}
1993
1994	/*
1995	 * From now on, we don't use freelist
1996	 * although actual page can be freed in rcu context
1997	 */
1998	if (OFF_SLAB(cachep))
1999		kmem_cache_free(cachep->freelist_cache, freelist);
2000}
2001
 
 
 
 
 
 
 
 
 
 
2002/**
2003 * calculate_slab_order - calculate size (page order) of slabs
2004 * @cachep: pointer to the cache that is being created
2005 * @size: size of objects to be created in this cache.
2006 * @align: required alignment for the objects.
2007 * @flags: slab allocation flags
2008 *
2009 * Also calculates the number of objects per slab.
2010 *
2011 * This could be made much more intelligent.  For now, try to avoid using
2012 * high order pages for slabs.  When the gfp() functions are more friendly
2013 * towards high-order requests, this should be changed.
 
 
2014 */
2015static size_t calculate_slab_order(struct kmem_cache *cachep,
2016			size_t size, size_t align, unsigned long flags)
2017{
2018	unsigned long offslab_limit;
2019	size_t left_over = 0;
2020	int gfporder;
2021
2022	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2023		unsigned int num;
2024		size_t remainder;
2025
2026		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2027		if (!num)
2028			continue;
2029
2030		/* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */
2031		if (num > SLAB_OBJ_MAX_NUM)
2032			break;
2033
2034		if (flags & CFLGS_OFF_SLAB) {
 
 
 
 
 
 
 
 
2035			/*
2036			 * Max number of objs-per-slab for caches which
2037			 * use off-slab slabs. Needed to avoid a possible
2038			 * looping condition in cache_grow().
2039			 */
2040			offslab_limit = size;
2041			offslab_limit /= sizeof(freelist_idx_t);
2042
2043 			if (num > offslab_limit)
2044				break;
 
2045		}
2046
2047		/* Found something acceptable - save it away */
2048		cachep->num = num;
2049		cachep->gfporder = gfporder;
2050		left_over = remainder;
2051
2052		/*
2053		 * A VFS-reclaimable slab tends to have most allocations
2054		 * as GFP_NOFS and we really don't want to have to be allocating
2055		 * higher-order pages when we are unable to shrink dcache.
2056		 */
2057		if (flags & SLAB_RECLAIM_ACCOUNT)
2058			break;
2059
2060		/*
2061		 * Large number of objects is good, but very large slabs are
2062		 * currently bad for the gfp()s.
2063		 */
2064		if (gfporder >= slab_max_order)
2065			break;
2066
2067		/*
2068		 * Acceptable internal fragmentation?
2069		 */
2070		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2071			break;
2072	}
2073	return left_over;
2074}
2075
2076static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2077{
2078	if (slab_state >= FULL)
2079		return enable_cpucache(cachep, gfp);
2080
 
 
 
 
2081	if (slab_state == DOWN) {
2082		/*
2083		 * Note: Creation of first cache (kmem_cache).
2084		 * The setup_node is taken care
2085		 * of by the caller of __kmem_cache_create
2086		 */
2087		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2088		slab_state = PARTIAL;
2089	} else if (slab_state == PARTIAL) {
2090		/*
2091		 * Note: the second kmem_cache_create must create the cache
2092		 * that's used by kmalloc(24), otherwise the creation of
2093		 * further caches will BUG().
2094		 */
2095		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2096
2097		/*
2098		 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
2099		 * the second cache, then we need to set up all its node/,
2100		 * otherwise the creation of further caches will BUG().
2101		 */
2102		set_up_node(cachep, SIZE_AC);
2103		if (INDEX_AC == INDEX_NODE)
2104			slab_state = PARTIAL_NODE;
2105		else
2106			slab_state = PARTIAL_ARRAYCACHE;
2107	} else {
2108		/* Remaining boot caches */
2109		cachep->array[smp_processor_id()] =
2110			kmalloc(sizeof(struct arraycache_init), gfp);
2111
2112		if (slab_state == PARTIAL_ARRAYCACHE) {
2113			set_up_node(cachep, SIZE_NODE);
2114			slab_state = PARTIAL_NODE;
2115		} else {
2116			int node;
2117			for_each_online_node(node) {
2118				cachep->node[node] =
2119				    kmalloc_node(sizeof(struct kmem_cache_node),
2120						gfp, node);
2121				BUG_ON(!cachep->node[node]);
2122				kmem_cache_node_init(cachep->node[node]);
2123			}
2124		}
2125	}
 
2126	cachep->node[numa_mem_id()]->next_reap =
2127			jiffies + REAPTIMEOUT_NODE +
2128			((unsigned long)cachep) % REAPTIMEOUT_NODE;
2129
2130	cpu_cache_get(cachep)->avail = 0;
2131	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2132	cpu_cache_get(cachep)->batchcount = 1;
2133	cpu_cache_get(cachep)->touched = 0;
2134	cachep->batchcount = 1;
2135	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2136	return 0;
2137}
2138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139/**
2140 * __kmem_cache_create - Create a cache.
2141 * @cachep: cache management descriptor
2142 * @flags: SLAB flags
2143 *
2144 * Returns a ptr to the cache on success, NULL on failure.
2145 * Cannot be called within a int, but can be interrupted.
2146 * The @ctor is run when new pages are allocated by the cache.
2147 *
2148 * The flags are
2149 *
2150 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2151 * to catch references to uninitialised memory.
2152 *
2153 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2154 * for buffer overruns.
2155 *
2156 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2157 * cacheline.  This can be beneficial if you're counting cycles as closely
2158 * as davem.
 
 
2159 */
2160int
2161__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2162{
2163	size_t left_over, freelist_size, ralign;
2164	gfp_t gfp;
2165	int err;
2166	size_t size = cachep->size;
2167
2168#if DEBUG
2169#if FORCED_DEBUG
2170	/*
2171	 * Enable redzoning and last user accounting, except for caches with
2172	 * large objects, if the increased size would increase the object size
2173	 * above the next power of two: caches with object sizes just above a
2174	 * power of two have a significant amount of internal fragmentation.
2175	 */
2176	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2177						2 * sizeof(unsigned long long)))
2178		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2179	if (!(flags & SLAB_DESTROY_BY_RCU))
2180		flags |= SLAB_POISON;
2181#endif
2182	if (flags & SLAB_DESTROY_BY_RCU)
2183		BUG_ON(flags & SLAB_POISON);
2184#endif
2185
2186	/*
2187	 * Check that size is in terms of words.  This is needed to avoid
2188	 * unaligned accesses for some archs when redzoning is used, and makes
2189	 * sure any on-slab bufctl's are also correctly aligned.
2190	 */
2191	if (size & (BYTES_PER_WORD - 1)) {
2192		size += (BYTES_PER_WORD - 1);
2193		size &= ~(BYTES_PER_WORD - 1);
2194	}
2195
2196	/*
2197	 * Redzoning and user store require word alignment or possibly larger.
2198	 * Note this will be overridden by architecture or caller mandated
2199	 * alignment if either is greater than BYTES_PER_WORD.
2200	 */
2201	if (flags & SLAB_STORE_USER)
2202		ralign = BYTES_PER_WORD;
2203
2204	if (flags & SLAB_RED_ZONE) {
2205		ralign = REDZONE_ALIGN;
2206		/* If redzoning, ensure that the second redzone is suitably
2207		 * aligned, by adjusting the object size accordingly. */
2208		size += REDZONE_ALIGN - 1;
2209		size &= ~(REDZONE_ALIGN - 1);
2210	}
2211
2212	/* 3) caller mandated alignment */
2213	if (ralign < cachep->align) {
2214		ralign = cachep->align;
2215	}
2216	/* disable debug if necessary */
2217	if (ralign > __alignof__(unsigned long long))
2218		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2219	/*
2220	 * 4) Store it.
2221	 */
2222	cachep->align = ralign;
 
 
 
 
2223
2224	if (slab_is_available())
2225		gfp = GFP_KERNEL;
2226	else
2227		gfp = GFP_NOWAIT;
2228
2229	setup_node_pointer(cachep);
2230#if DEBUG
2231
2232	/*
2233	 * Both debugging options require word-alignment which is calculated
2234	 * into align above.
2235	 */
2236	if (flags & SLAB_RED_ZONE) {
2237		/* add space for red zone words */
2238		cachep->obj_offset += sizeof(unsigned long long);
2239		size += 2 * sizeof(unsigned long long);
2240	}
2241	if (flags & SLAB_STORE_USER) {
2242		/* user store requires one word storage behind the end of
2243		 * the real object. But if the second red zone needs to be
2244		 * aligned to 64 bits, we must allow that much space.
2245		 */
2246		if (flags & SLAB_RED_ZONE)
2247			size += REDZONE_ALIGN;
2248		else
2249			size += BYTES_PER_WORD;
2250	}
2251#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2252	if (size >= kmalloc_size(INDEX_NODE + 1)
2253	    && cachep->object_size > cache_line_size()
2254	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
2255		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2256		size = PAGE_SIZE;
2257	}
2258#endif
2259#endif
2260
2261	/*
2262	 * Determine if the slab management is 'on' or 'off' slab.
2263	 * (bootstrapping cannot cope with offslab caches so don't do
2264	 * it too early on. Always use on-slab management when
2265	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2266	 */
2267	if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init &&
2268	    !(flags & SLAB_NOLEAKTRACE))
2269		/*
2270		 * Size is large, assume best to place the slab management obj
2271		 * off-slab (should allow better packing of objs).
2272		 */
2273		flags |= CFLGS_OFF_SLAB;
2274
2275	size = ALIGN(size, cachep->align);
2276	/*
2277	 * We should restrict the number of objects in a slab to implement
2278	 * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
2279	 */
2280	if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2281		size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2282
2283	left_over = calculate_slab_order(cachep, size, cachep->align, flags);
2284
2285	if (!cachep->num)
2286		return -E2BIG;
2287
2288	freelist_size =
2289		ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
2290
2291	/*
2292	 * If the slab has been placed off-slab, and we have enough space then
2293	 * move it on-slab. This is at the expense of any extra colouring.
 
 
 
2294	 */
2295	if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
2296		flags &= ~CFLGS_OFF_SLAB;
2297		left_over -= freelist_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2298	}
2299
2300	if (flags & CFLGS_OFF_SLAB) {
2301		/* really off slab. No need for manual alignment */
2302		freelist_size = cachep->num * sizeof(freelist_idx_t);
2303
2304#ifdef CONFIG_PAGE_POISONING
2305		/* If we're going to use the generic kernel_map_pages()
2306		 * poisoning, then it's going to smash the contents of
2307		 * the redzone and userword anyhow, so switch them off.
2308		 */
2309		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2310			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2311#endif
2312	}
2313
2314	cachep->colour_off = cache_line_size();
2315	/* Offset must be a multiple of the alignment. */
2316	if (cachep->colour_off < cachep->align)
2317		cachep->colour_off = cachep->align;
2318	cachep->colour = left_over / cachep->colour_off;
2319	cachep->freelist_size = freelist_size;
 
2320	cachep->flags = flags;
2321	cachep->allocflags = __GFP_COMP;
2322	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2323		cachep->allocflags |= GFP_DMA;
 
 
 
 
2324	cachep->size = size;
2325	cachep->reciprocal_buffer_size = reciprocal_value(size);
2326
2327	if (flags & CFLGS_OFF_SLAB) {
2328		cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2329		/*
2330		 * This is a possibility for one of the kmalloc_{dma,}_caches.
2331		 * But since we go off slab only for object size greater than
2332		 * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created
2333		 * in ascending order,this should not happen at all.
2334		 * But leave a BUG_ON for some lucky dude.
2335		 */
2336		BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
 
 
 
 
 
2337	}
2338
2339	err = setup_cpu_cache(cachep, gfp);
2340	if (err) {
2341		__kmem_cache_shutdown(cachep);
2342		return err;
2343	}
2344
2345	if (flags & SLAB_DEBUG_OBJECTS) {
2346		/*
2347		 * Would deadlock through slab_destroy()->call_rcu()->
2348		 * debug_object_activate()->kmem_cache_alloc().
2349		 */
2350		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2351
2352		slab_set_debugobj_lock_classes(cachep);
2353	} else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
2354		on_slab_lock_classes(cachep);
2355
2356	return 0;
2357}
2358
2359#if DEBUG
2360static void check_irq_off(void)
2361{
2362	BUG_ON(!irqs_disabled());
2363}
2364
2365static void check_irq_on(void)
2366{
2367	BUG_ON(irqs_disabled());
2368}
2369
 
 
 
 
 
2370static void check_spinlock_acquired(struct kmem_cache *cachep)
2371{
2372#ifdef CONFIG_SMP
2373	check_irq_off();
2374	assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock);
2375#endif
2376}
2377
2378static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2379{
2380#ifdef CONFIG_SMP
2381	check_irq_off();
2382	assert_spin_locked(&cachep->node[node]->list_lock);
2383#endif
2384}
2385
2386#else
2387#define check_irq_off()	do { } while(0)
2388#define check_irq_on()	do { } while(0)
 
2389#define check_spinlock_acquired(x) do { } while(0)
2390#define check_spinlock_acquired_node(x, y) do { } while(0)
2391#endif
2392
2393static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2394			struct array_cache *ac,
2395			int force, int node);
 
 
 
 
 
 
 
 
 
 
 
 
 
2396
2397static void do_drain(void *arg)
2398{
2399	struct kmem_cache *cachep = arg;
2400	struct array_cache *ac;
2401	int node = numa_mem_id();
 
 
2402
2403	check_irq_off();
2404	ac = cpu_cache_get(cachep);
2405	spin_lock(&cachep->node[node]->list_lock);
2406	free_block(cachep, ac->entry, ac->avail, node);
2407	spin_unlock(&cachep->node[node]->list_lock);
 
 
2408	ac->avail = 0;
2409}
2410
2411static void drain_cpu_caches(struct kmem_cache *cachep)
2412{
2413	struct kmem_cache_node *n;
2414	int node;
 
2415
2416	on_each_cpu(do_drain, cachep, 1);
2417	check_irq_on();
2418	for_each_online_node(node) {
2419		n = cachep->node[node];
2420		if (n && n->alien)
2421			drain_alien_cache(cachep, n->alien);
2422	}
2423
2424	for_each_online_node(node) {
2425		n = cachep->node[node];
2426		if (n)
2427			drain_array(cachep, n, n->shared, 1, node);
 
 
2428	}
2429}
2430
2431/*
2432 * Remove slabs from the list of free slabs.
2433 * Specify the number of slabs to drain in tofree.
2434 *
2435 * Returns the actual number of slabs released.
2436 */
2437static int drain_freelist(struct kmem_cache *cache,
2438			struct kmem_cache_node *n, int tofree)
2439{
2440	struct list_head *p;
2441	int nr_freed;
2442	struct page *page;
2443
2444	nr_freed = 0;
2445	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2446
2447		spin_lock_irq(&n->list_lock);
2448		p = n->slabs_free.prev;
2449		if (p == &n->slabs_free) {
2450			spin_unlock_irq(&n->list_lock);
2451			goto out;
2452		}
2453
2454		page = list_entry(p, struct page, lru);
2455#if DEBUG
2456		BUG_ON(page->active);
2457#endif
2458		list_del(&page->lru);
2459		/*
2460		 * Safe to drop the lock. The slab is no longer linked
2461		 * to the cache.
2462		 */
2463		n->free_objects -= cache->num;
2464		spin_unlock_irq(&n->list_lock);
2465		slab_destroy(cache, page);
2466		nr_freed++;
2467	}
2468out:
2469	return nr_freed;
2470}
2471
2472/* Called with slab_mutex held to protect against cpu hotplug */
2473static int __cache_shrink(struct kmem_cache *cachep)
 
 
 
 
 
 
 
 
 
 
 
2474{
2475	int ret = 0, i = 0;
 
2476	struct kmem_cache_node *n;
2477
2478	drain_cpu_caches(cachep);
2479
2480	check_irq_on();
2481	for_each_online_node(i) {
2482		n = cachep->node[i];
2483		if (!n)
2484			continue;
2485
2486		drain_freelist(cachep, n, slabs_tofree(cachep, n));
2487
2488		ret += !list_empty(&n->slabs_full) ||
2489			!list_empty(&n->slabs_partial);
2490	}
2491	return (ret ? 1 : 0);
2492}
2493
2494/**
2495 * kmem_cache_shrink - Shrink a cache.
2496 * @cachep: The cache to shrink.
2497 *
2498 * Releases as many slabs as possible for a cache.
2499 * To help debugging, a zero exit status indicates all slabs were released.
2500 */
2501int kmem_cache_shrink(struct kmem_cache *cachep)
2502{
2503	int ret;
2504	BUG_ON(!cachep || in_interrupt());
2505
2506	get_online_cpus();
2507	mutex_lock(&slab_mutex);
2508	ret = __cache_shrink(cachep);
2509	mutex_unlock(&slab_mutex);
2510	put_online_cpus();
2511	return ret;
2512}
2513EXPORT_SYMBOL(kmem_cache_shrink);
2514
2515int __kmem_cache_shutdown(struct kmem_cache *cachep)
2516{
 
 
 
 
 
2517	int i;
2518	struct kmem_cache_node *n;
2519	int rc = __cache_shrink(cachep);
2520
2521	if (rc)
2522		return rc;
2523
2524	for_each_online_cpu(i)
2525	    kfree(cachep->array[i]);
2526
2527	/* NUMA: free the node structures */
2528	for_each_online_node(i) {
2529		n = cachep->node[i];
2530		if (n) {
2531			kfree(n->shared);
2532			free_alien_cache(n->alien);
2533			kfree(n);
2534		}
2535	}
2536	return 0;
2537}
2538
2539/*
2540 * Get the memory for a slab management obj.
2541 *
2542 * For a slab cache when the slab descriptor is off-slab, the
2543 * slab descriptor can't come from the same cache which is being created,
2544 * Because if it is the case, that means we defer the creation of
2545 * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point.
2546 * And we eventually call down to __kmem_cache_create(), which
2547 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2548 * This is a "chicken-and-egg" problem.
2549 *
2550 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2551 * which are all initialized during kmem_cache_init().
2552 */
2553static void *alloc_slabmgmt(struct kmem_cache *cachep,
2554				   struct page *page, int colour_off,
2555				   gfp_t local_flags, int nodeid)
2556{
2557	void *freelist;
2558	void *addr = page_address(page);
2559
2560	if (OFF_SLAB(cachep)) {
 
 
 
 
 
2561		/* Slab management obj is off-slab. */
2562		freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2563					      local_flags, nodeid);
2564		if (!freelist)
2565			return NULL;
2566	} else {
2567		freelist = addr + colour_off;
2568		colour_off += cachep->freelist_size;
 
2569	}
2570	page->active = 0;
2571	page->s_mem = addr + colour_off;
2572	return freelist;
2573}
2574
2575static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2576{
2577	return ((freelist_idx_t *)page->freelist)[idx];
2578}
2579
2580static inline void set_free_obj(struct page *page,
2581					unsigned int idx, freelist_idx_t val)
2582{
2583	((freelist_idx_t *)(page->freelist))[idx] = val;
2584}
2585
2586static void cache_init_objs(struct kmem_cache *cachep,
2587			    struct page *page)
2588{
 
2589	int i;
2590
2591	for (i = 0; i < cachep->num; i++) {
2592		void *objp = index_to_obj(cachep, page, i);
2593#if DEBUG
2594		/* need to poison the objs? */
2595		if (cachep->flags & SLAB_POISON)
2596			poison_obj(cachep, objp, POISON_FREE);
2597		if (cachep->flags & SLAB_STORE_USER)
2598			*dbg_userword(cachep, objp) = NULL;
2599
2600		if (cachep->flags & SLAB_RED_ZONE) {
2601			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2602			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2603		}
2604		/*
2605		 * Constructors are not allowed to allocate memory from the same
2606		 * cache which they are a constructor for.  Otherwise, deadlock.
2607		 * They must also be threaded.
2608		 */
2609		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
 
 
2610			cachep->ctor(objp + obj_offset(cachep));
 
 
 
2611
2612		if (cachep->flags & SLAB_RED_ZONE) {
2613			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2614				slab_error(cachep, "constructor overwrote the"
2615					   " end of an object");
2616			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2617				slab_error(cachep, "constructor overwrote the"
2618					   " start of an object");
 
 
 
 
2619		}
2620		if ((cachep->size % PAGE_SIZE) == 0 &&
2621			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2622			kernel_map_pages(virt_to_page(objp),
2623					 cachep->size / PAGE_SIZE, 0);
2624#else
2625		if (cachep->ctor)
2626			cachep->ctor(objp);
2627#endif
2628		set_free_obj(page, i, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2629	}
 
 
 
 
 
 
 
 
 
2630}
2631
2632static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 
 
 
 
 
 
 
 
 
 
 
2633{
2634	if (CONFIG_ZONE_DMA_FLAG) {
2635		if (flags & GFP_DMA)
2636			BUG_ON(!(cachep->allocflags & GFP_DMA));
 
 
 
 
 
 
 
 
 
 
2637		else
2638			BUG_ON(cachep->allocflags & GFP_DMA);
 
 
 
2639	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2640}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2641
2642static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
2643				int nodeid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2644{
2645	void *objp;
2646
2647	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2648	page->active++;
2649#if DEBUG
2650	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2651#endif
2652
2653	return objp;
2654}
2655
2656static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
2657				void *objp, int nodeid)
2658{
2659	unsigned int objnr = obj_to_index(cachep, page, objp);
2660#if DEBUG
2661	unsigned int i;
2662
2663	/* Verify that the slab belongs to the intended node */
2664	WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2665
2666	/* Verify double free bug */
2667	for (i = page->active; i < cachep->num; i++) {
2668		if (get_free_obj(page, i) == objnr) {
2669			printk(KERN_ERR "slab: double free detected in cache "
2670					"'%s', objp %p\n", cachep->name, objp);
2671			BUG();
2672		}
2673	}
2674#endif
2675	page->active--;
 
 
 
2676	set_free_obj(page, page->active, objnr);
2677}
2678
2679/*
2680 * Map pages beginning at addr to the given cache and slab. This is required
2681 * for the slab allocator to be able to lookup the cache and slab of a
2682 * virtual address for kfree, ksize, and slab debugging.
2683 */
2684static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2685			   void *freelist)
2686{
2687	page->slab_cache = cache;
2688	page->freelist = freelist;
2689}
2690
2691/*
2692 * Grow (by 1) the number of slabs within a cache.  This is called by
2693 * kmem_cache_alloc() when there are no active objs left in a cache.
2694 */
2695static int cache_grow(struct kmem_cache *cachep,
2696		gfp_t flags, int nodeid, struct page *page)
2697{
2698	void *freelist;
2699	size_t offset;
2700	gfp_t local_flags;
 
2701	struct kmem_cache_node *n;
 
2702
2703	/*
2704	 * Be lazy and only check for valid flags here,  keeping it out of the
2705	 * critical path in kmem_cache_alloc().
2706	 */
2707	BUG_ON(flags & GFP_SLAB_BUG_MASK);
 
 
 
 
 
 
 
2708	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2709
2710	/* Take the node list lock to change the colour_next on this node */
2711	check_irq_off();
2712	n = cachep->node[nodeid];
2713	spin_lock(&n->list_lock);
 
 
 
 
 
 
 
 
 
 
 
2714
2715	/* Get colour for the slab, and cal the next value. */
2716	offset = n->colour_next;
2717	n->colour_next++;
2718	if (n->colour_next >= cachep->colour)
2719		n->colour_next = 0;
2720	spin_unlock(&n->list_lock);
 
 
 
2721
2722	offset *= cachep->colour_off;
2723
2724	if (local_flags & __GFP_WAIT)
2725		local_irq_enable();
2726
2727	/*
2728	 * The test for missing atomic flag is performed here, rather than
2729	 * the more obvious place, simply to reduce the critical path length
2730	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2731	 * will eventually be caught here (where it matters).
2732	 */
2733	kmem_flagcheck(cachep, flags);
2734
2735	/*
2736	 * Get mem for the objs.  Attempt to allocate a physical page from
2737	 * 'nodeid'.
2738	 */
2739	if (!page)
2740		page = kmem_getpages(cachep, local_flags, nodeid);
2741	if (!page)
2742		goto failed;
2743
2744	/* Get slab management. */
2745	freelist = alloc_slabmgmt(cachep, page, offset,
2746			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2747	if (!freelist)
2748		goto opps1;
2749
2750	slab_map_pages(cachep, page, freelist);
2751
2752	cache_init_objs(cachep, page);
2753
2754	if (local_flags & __GFP_WAIT)
 
 
 
 
 
 
 
 
2755		local_irq_disable();
 
 
 
 
 
 
 
 
2756	check_irq_off();
 
 
 
 
 
 
 
2757	spin_lock(&n->list_lock);
 
 
 
 
 
 
2758
2759	/* Make slab active. */
2760	list_add_tail(&page->lru, &(n->slabs_free));
2761	STATS_INC_GROWN(cachep);
2762	n->free_objects += cachep->num;
2763	spin_unlock(&n->list_lock);
2764	return 1;
2765opps1:
2766	kmem_freepages(cachep, page);
2767failed:
2768	if (local_flags & __GFP_WAIT)
2769		local_irq_disable();
2770	return 0;
2771}
2772
2773#if DEBUG
2774
2775/*
2776 * Perform extra freeing checks:
2777 * - detect bad pointers.
2778 * - POISON/RED_ZONE checking
2779 */
2780static void kfree_debugcheck(const void *objp)
2781{
2782	if (!virt_addr_valid(objp)) {
2783		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2784		       (unsigned long)objp);
2785		BUG();
2786	}
2787}
2788
2789static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2790{
2791	unsigned long long redzone1, redzone2;
2792
2793	redzone1 = *dbg_redzone1(cache, obj);
2794	redzone2 = *dbg_redzone2(cache, obj);
2795
2796	/*
2797	 * Redzone is ok.
2798	 */
2799	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2800		return;
2801
2802	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2803		slab_error(cache, "double free detected");
2804	else
2805		slab_error(cache, "memory outside object was overwritten");
2806
2807	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2808			obj, redzone1, redzone2);
2809}
2810
2811static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2812				   unsigned long caller)
2813{
2814	unsigned int objnr;
2815	struct page *page;
2816
2817	BUG_ON(virt_to_cache(objp) != cachep);
2818
2819	objp -= obj_offset(cachep);
2820	kfree_debugcheck(objp);
2821	page = virt_to_head_page(objp);
2822
2823	if (cachep->flags & SLAB_RED_ZONE) {
2824		verify_redzone_free(cachep, objp);
2825		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2826		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2827	}
2828	if (cachep->flags & SLAB_STORE_USER)
2829		*dbg_userword(cachep, objp) = (void *)caller;
2830
2831	objnr = obj_to_index(cachep, page, objp);
2832
2833	BUG_ON(objnr >= cachep->num);
2834	BUG_ON(objp != index_to_obj(cachep, page, objnr));
2835
2836	if (cachep->flags & SLAB_POISON) {
2837#ifdef CONFIG_DEBUG_PAGEALLOC
2838		if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2839			store_stackinfo(cachep, objp, caller);
2840			kernel_map_pages(virt_to_page(objp),
2841					 cachep->size / PAGE_SIZE, 0);
2842		} else {
2843			poison_obj(cachep, objp, POISON_FREE);
2844		}
2845#else
2846		poison_obj(cachep, objp, POISON_FREE);
2847#endif
2848	}
2849	return objp;
2850}
2851
2852#else
2853#define kfree_debugcheck(x) do { } while(0)
2854#define cache_free_debugcheck(x,objp,z) (objp)
2855#endif
2856
2857static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
2858							bool force_refill)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2859{
2860	int batchcount;
2861	struct kmem_cache_node *n;
2862	struct array_cache *ac;
2863	int node;
 
 
2864
2865	check_irq_off();
2866	node = numa_mem_id();
2867	if (unlikely(force_refill))
2868		goto force_grow;
2869retry:
2870	ac = cpu_cache_get(cachep);
2871	batchcount = ac->batchcount;
2872	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2873		/*
2874		 * If there was little recent activity on this cache, then
2875		 * perform only a partial refill.  Otherwise we could generate
2876		 * refill bouncing.
2877		 */
2878		batchcount = BATCHREFILL_LIMIT;
2879	}
2880	n = cachep->node[node];
2881
2882	BUG_ON(ac->avail > 0 || !n);
 
 
 
 
2883	spin_lock(&n->list_lock);
 
2884
2885	/* See if we can refill from the shared array */
2886	if (n->shared && transfer_objects(ac, n->shared, batchcount)) {
2887		n->shared->touched = 1;
2888		goto alloc_done;
2889	}
2890
2891	while (batchcount > 0) {
2892		struct list_head *entry;
2893		struct page *page;
2894		/* Get slab alloc is to come from. */
2895		entry = n->slabs_partial.next;
2896		if (entry == &n->slabs_partial) {
2897			n->free_touched = 1;
2898			entry = n->slabs_free.next;
2899			if (entry == &n->slabs_free)
2900				goto must_grow;
2901		}
2902
2903		page = list_entry(entry, struct page, lru);
2904		check_spinlock_acquired(cachep);
2905
2906		/*
2907		 * The slab was either on partial or free list so
2908		 * there must be at least one object available for
2909		 * allocation.
2910		 */
2911		BUG_ON(page->active >= cachep->num);
2912
2913		while (page->active < cachep->num && batchcount--) {
2914			STATS_INC_ALLOCED(cachep);
2915			STATS_INC_ACTIVE(cachep);
2916			STATS_SET_HIGH(cachep);
2917
2918			ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
2919									node));
2920		}
2921
2922		/* move slabp to correct slabp list: */
2923		list_del(&page->lru);
2924		if (page->active == cachep->num)
2925			list_add(&page->lru, &n->slabs_full);
2926		else
2927			list_add(&page->lru, &n->slabs_partial);
2928	}
2929
2930must_grow:
2931	n->free_objects -= ac->avail;
2932alloc_done:
2933	spin_unlock(&n->list_lock);
 
2934
 
2935	if (unlikely(!ac->avail)) {
2936		int x;
2937force_grow:
2938		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
 
 
 
 
 
 
2939
2940		/* cache_grow can reenable interrupts, then ac could change. */
 
 
 
2941		ac = cpu_cache_get(cachep);
2942		node = numa_mem_id();
 
 
2943
2944		/* no objects in sight? abort */
2945		if (!x && (ac->avail == 0 || force_refill))
2946			return NULL;
2947
2948		if (!ac->avail)		/* objects refilled by interrupt? */
2949			goto retry;
2950	}
2951	ac->touched = 1;
2952
2953	return ac_get_obj(cachep, ac, flags, force_refill);
2954}
2955
2956static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2957						gfp_t flags)
2958{
2959	might_sleep_if(flags & __GFP_WAIT);
2960#if DEBUG
2961	kmem_flagcheck(cachep, flags);
2962#endif
2963}
2964
2965#if DEBUG
2966static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2967				gfp_t flags, void *objp, unsigned long caller)
2968{
 
2969	if (!objp)
2970		return objp;
2971	if (cachep->flags & SLAB_POISON) {
2972#ifdef CONFIG_DEBUG_PAGEALLOC
2973		if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2974			kernel_map_pages(virt_to_page(objp),
2975					 cachep->size / PAGE_SIZE, 1);
2976		else
2977			check_poison_obj(cachep, objp);
2978#else
2979		check_poison_obj(cachep, objp);
2980#endif
2981		poison_obj(cachep, objp, POISON_INUSE);
2982	}
2983	if (cachep->flags & SLAB_STORE_USER)
2984		*dbg_userword(cachep, objp) = (void *)caller;
2985
2986	if (cachep->flags & SLAB_RED_ZONE) {
2987		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2988				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2989			slab_error(cachep, "double free, or memory outside"
2990						" object was overwritten");
2991			printk(KERN_ERR
2992				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2993				objp, *dbg_redzone1(cachep, objp),
2994				*dbg_redzone2(cachep, objp));
2995		}
2996		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
2997		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
2998	}
 
2999	objp += obj_offset(cachep);
3000	if (cachep->ctor && cachep->flags & SLAB_POISON)
3001		cachep->ctor(objp);
3002	if (ARCH_SLAB_MINALIGN &&
3003	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3004		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3005		       objp, (int)ARCH_SLAB_MINALIGN);
3006	}
3007	return objp;
3008}
3009#else
3010#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3011#endif
3012
3013static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3014{
3015	if (cachep == kmem_cache)
3016		return false;
3017
3018	return should_failslab(cachep->object_size, flags, cachep->flags);
3019}
3020
3021static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3022{
3023	void *objp;
3024	struct array_cache *ac;
3025	bool force_refill = false;
3026
3027	check_irq_off();
3028
3029	ac = cpu_cache_get(cachep);
3030	if (likely(ac->avail)) {
3031		ac->touched = 1;
3032		objp = ac_get_obj(cachep, ac, flags, false);
3033
3034		/*
3035		 * Allow for the possibility all avail objects are not allowed
3036		 * by the current flags
3037		 */
3038		if (objp) {
3039			STATS_INC_ALLOCHIT(cachep);
3040			goto out;
3041		}
3042		force_refill = true;
3043	}
3044
3045	STATS_INC_ALLOCMISS(cachep);
3046	objp = cache_alloc_refill(cachep, flags, force_refill);
3047	/*
3048	 * the 'ac' may be updated by cache_alloc_refill(),
3049	 * and kmemleak_erase() requires its correct value.
3050	 */
3051	ac = cpu_cache_get(cachep);
3052
3053out:
3054	/*
3055	 * To avoid a false negative, if an object that is in one of the
3056	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3057	 * treat the array pointers as a reference to the object.
3058	 */
3059	if (objp)
3060		kmemleak_erase(&ac->entry[ac->avail]);
3061	return objp;
3062}
3063
3064#ifdef CONFIG_NUMA
3065/*
3066 * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
3067 *
3068 * If we are in_interrupt, then process context, including cpusets and
3069 * mempolicy, may not apply and should not be used for allocation policy.
3070 */
3071static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3072{
3073	int nid_alloc, nid_here;
3074
3075	if (in_interrupt() || (flags & __GFP_THISNODE))
3076		return NULL;
3077	nid_alloc = nid_here = numa_mem_id();
3078	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3079		nid_alloc = cpuset_slab_spread_node();
3080	else if (current->mempolicy)
3081		nid_alloc = mempolicy_slab_node();
3082	if (nid_alloc != nid_here)
3083		return ____cache_alloc_node(cachep, flags, nid_alloc);
3084	return NULL;
3085}
3086
3087/*
3088 * Fallback function if there was no memory available and no objects on a
3089 * certain node and fall back is permitted. First we scan all the
3090 * available node for available objects. If that fails then we
3091 * perform an allocation without specifying a node. This allows the page
3092 * allocator to do its reclaim / fallback magic. We then insert the
3093 * slab into the proper nodelist and then allocate from it.
3094 */
3095static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3096{
3097	struct zonelist *zonelist;
3098	gfp_t local_flags;
3099	struct zoneref *z;
3100	struct zone *zone;
3101	enum zone_type high_zoneidx = gfp_zone(flags);
3102	void *obj = NULL;
 
3103	int nid;
3104	unsigned int cpuset_mems_cookie;
3105
3106	if (flags & __GFP_THISNODE)
3107		return NULL;
3108
3109	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3110
3111retry_cpuset:
3112	cpuset_mems_cookie = read_mems_allowed_begin();
3113	zonelist = node_zonelist(mempolicy_slab_node(), flags);
3114
3115retry:
3116	/*
3117	 * Look through allowed nodes for objects available
3118	 * from existing per node queues.
3119	 */
3120	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3121		nid = zone_to_nid(zone);
3122
3123		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3124			cache->node[nid] &&
3125			cache->node[nid]->free_objects) {
3126				obj = ____cache_alloc_node(cache,
3127					flags | GFP_THISNODE, nid);
3128				if (obj)
3129					break;
3130		}
3131	}
3132
3133	if (!obj) {
3134		/*
3135		 * This allocation will be performed within the constraints
3136		 * of the current cpuset / memory policy requirements.
3137		 * We may trigger various forms of reclaim on the allowed
3138		 * set and go into memory reserves if necessary.
3139		 */
3140		struct page *page;
 
 
 
 
 
3141
3142		if (local_flags & __GFP_WAIT)
3143			local_irq_enable();
3144		kmem_flagcheck(cache, flags);
3145		page = kmem_getpages(cache, local_flags, numa_mem_id());
3146		if (local_flags & __GFP_WAIT)
3147			local_irq_disable();
3148		if (page) {
3149			/*
3150			 * Insert into the appropriate per node queues
 
3151			 */
3152			nid = page_to_nid(page);
3153			if (cache_grow(cache, flags, nid, page)) {
3154				obj = ____cache_alloc_node(cache,
3155					flags | GFP_THISNODE, nid);
3156				if (!obj)
3157					/*
3158					 * Another processor may allocate the
3159					 * objects in the slab since we are
3160					 * not holding any locks.
3161					 */
3162					goto retry;
3163			} else {
3164				/* cache_grow already freed obj */
3165				obj = NULL;
3166			}
3167		}
3168	}
3169
3170	if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3171		goto retry_cpuset;
3172	return obj;
3173}
3174
3175/*
3176 * A interface to enable slab creation on nodeid
3177 */
3178static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3179				int nodeid)
3180{
3181	struct list_head *entry;
3182	struct page *page;
3183	struct kmem_cache_node *n;
3184	void *obj;
3185	int x;
3186
3187	VM_BUG_ON(nodeid > num_online_nodes());
3188	n = cachep->node[nodeid];
3189	BUG_ON(!n);
3190
3191retry:
3192	check_irq_off();
3193	spin_lock(&n->list_lock);
3194	entry = n->slabs_partial.next;
3195	if (entry == &n->slabs_partial) {
3196		n->free_touched = 1;
3197		entry = n->slabs_free.next;
3198		if (entry == &n->slabs_free)
3199			goto must_grow;
3200	}
3201
3202	page = list_entry(entry, struct page, lru);
3203	check_spinlock_acquired_node(cachep, nodeid);
3204
3205	STATS_INC_NODEALLOCS(cachep);
3206	STATS_INC_ACTIVE(cachep);
3207	STATS_SET_HIGH(cachep);
3208
3209	BUG_ON(page->active == cachep->num);
3210
3211	obj = slab_get_obj(cachep, page, nodeid);
3212	n->free_objects--;
3213	/* move slabp to correct slabp list: */
3214	list_del(&page->lru);
3215
3216	if (page->active == cachep->num)
3217		list_add(&page->lru, &n->slabs_full);
3218	else
3219		list_add(&page->lru, &n->slabs_partial);
3220
3221	spin_unlock(&n->list_lock);
3222	goto done;
 
3223
3224must_grow:
3225	spin_unlock(&n->list_lock);
3226	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3227	if (x)
3228		goto retry;
 
 
 
3229
3230	return fallback_alloc(cachep, flags);
3231
3232done:
3233	return obj;
3234}
3235
3236static __always_inline void *
3237slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3238		   unsigned long caller)
3239{
3240	unsigned long save_flags;
3241	void *ptr;
3242	int slab_node = numa_mem_id();
3243
3244	flags &= gfp_allowed_mask;
3245
3246	lockdep_trace_alloc(flags);
3247
3248	if (slab_should_failslab(cachep, flags))
3249		return NULL;
3250
3251	cachep = memcg_kmem_get_cache(cachep, flags);
3252
3253	cache_alloc_debugcheck_before(cachep, flags);
3254	local_irq_save(save_flags);
3255
3256	if (nodeid == NUMA_NO_NODE)
3257		nodeid = slab_node;
3258
3259	if (unlikely(!cachep->node[nodeid])) {
3260		/* Node not bootstrapped yet */
3261		ptr = fallback_alloc(cachep, flags);
3262		goto out;
3263	}
3264
3265	if (nodeid == slab_node) {
3266		/*
3267		 * Use the locally cached objects if possible.
3268		 * However ____cache_alloc does not allow fallback
3269		 * to other nodes. It may fail while we still have
3270		 * objects on other nodes available.
3271		 */
3272		ptr = ____cache_alloc(cachep, flags);
3273		if (ptr)
3274			goto out;
3275	}
3276	/* ___cache_alloc_node can fall back to other nodes */
3277	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3278  out:
3279	local_irq_restore(save_flags);
3280	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3281	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
3282				 flags);
3283
3284	if (likely(ptr)) {
3285		kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
3286		if (unlikely(flags & __GFP_ZERO))
3287			memset(ptr, 0, cachep->object_size);
3288	}
3289
 
3290	return ptr;
3291}
3292
3293static __always_inline void *
3294__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3295{
3296	void *objp;
3297
3298	if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
3299		objp = alternate_node_alloc(cache, flags);
3300		if (objp)
3301			goto out;
3302	}
3303	objp = ____cache_alloc(cache, flags);
3304
3305	/*
3306	 * We may just have run out of memory on the local node.
3307	 * ____cache_alloc_node() knows how to locate memory on other nodes
3308	 */
3309	if (!objp)
3310		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3311
3312  out:
3313	return objp;
3314}
3315#else
3316
3317static __always_inline void *
3318__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3319{
3320	return ____cache_alloc(cachep, flags);
3321}
3322
3323#endif /* CONFIG_NUMA */
3324
3325static __always_inline void *
3326slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3327{
3328	unsigned long save_flags;
3329	void *objp;
3330
3331	flags &= gfp_allowed_mask;
3332
3333	lockdep_trace_alloc(flags);
3334
3335	if (slab_should_failslab(cachep, flags))
3336		return NULL;
3337
3338	cachep = memcg_kmem_get_cache(cachep, flags);
3339
3340	cache_alloc_debugcheck_before(cachep, flags);
3341	local_irq_save(save_flags);
3342	objp = __do_cache_alloc(cachep, flags);
3343	local_irq_restore(save_flags);
3344	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3345	kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
3346				 flags);
3347	prefetchw(objp);
3348
3349	if (likely(objp)) {
3350		kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
3351		if (unlikely(flags & __GFP_ZERO))
3352			memset(objp, 0, cachep->object_size);
3353	}
3354
 
3355	return objp;
3356}
3357
3358/*
3359 * Caller needs to acquire correct kmem_cache_node's list_lock
 
3360 */
3361static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3362		       int node)
3363{
3364	int i;
3365	struct kmem_cache_node *n;
 
 
 
3366
3367	for (i = 0; i < nr_objects; i++) {
3368		void *objp;
3369		struct page *page;
3370
3371		clear_obj_pfmemalloc(&objpp[i]);
3372		objp = objpp[i];
3373
3374		page = virt_to_head_page(objp);
3375		n = cachep->node[node];
3376		list_del(&page->lru);
3377		check_spinlock_acquired_node(cachep, node);
3378		slab_put_obj(cachep, page, objp, node);
3379		STATS_DEC_ACTIVE(cachep);
3380		n->free_objects++;
3381
3382		/* fixup slab chains */
3383		if (page->active == 0) {
3384			if (n->free_objects > n->free_limit) {
3385				n->free_objects -= cachep->num;
3386				/* No need to drop any previously held
3387				 * lock here, even if we have a off-slab slab
3388				 * descriptor it is guaranteed to come from
3389				 * a different cache, refer to comments before
3390				 * alloc_slabmgmt.
3391				 */
3392				slab_destroy(cachep, page);
3393			} else {
3394				list_add(&page->lru, &n->slabs_free);
3395			}
3396		} else {
3397			/* Unconditionally move a slab to the end of the
3398			 * partial list on free - maximum time for the
3399			 * other objects to be freed, too.
3400			 */
3401			list_add_tail(&page->lru, &n->slabs_partial);
3402		}
3403	}
 
 
 
 
 
 
 
 
 
3404}
3405
3406static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3407{
3408	int batchcount;
3409	struct kmem_cache_node *n;
3410	int node = numa_mem_id();
 
3411
3412	batchcount = ac->batchcount;
3413#if DEBUG
3414	BUG_ON(!batchcount || batchcount > ac->avail);
3415#endif
3416	check_irq_off();
3417	n = cachep->node[node];
3418	spin_lock(&n->list_lock);
3419	if (n->shared) {
3420		struct array_cache *shared_array = n->shared;
3421		int max = shared_array->limit - shared_array->avail;
3422		if (max) {
3423			if (batchcount > max)
3424				batchcount = max;
3425			memcpy(&(shared_array->entry[shared_array->avail]),
3426			       ac->entry, sizeof(void *) * batchcount);
3427			shared_array->avail += batchcount;
3428			goto free_done;
3429		}
3430	}
3431
3432	free_block(cachep, ac->entry, batchcount, node);
3433free_done:
3434#if STATS
3435	{
3436		int i = 0;
3437		struct list_head *p;
3438
3439		p = n->slabs_free.next;
3440		while (p != &(n->slabs_free)) {
3441			struct page *page;
3442
3443			page = list_entry(p, struct page, lru);
3444			BUG_ON(page->active);
3445
3446			i++;
3447			p = p->next;
3448		}
3449		STATS_SET_FREEABLE(cachep, i);
3450	}
3451#endif
3452	spin_unlock(&n->list_lock);
 
3453	ac->avail -= batchcount;
3454	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3455}
3456
3457/*
3458 * Release an obj back to its cache. If the obj has a constructed state, it must
3459 * be in this state _before_ it is released.  Called with disabled ints.
3460 */
3461static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3462				unsigned long caller)
 
 
 
 
 
 
 
 
 
 
3463{
3464	struct array_cache *ac = cpu_cache_get(cachep);
3465
3466	check_irq_off();
 
 
3467	kmemleak_free_recursive(objp, cachep->flags);
3468	objp = cache_free_debugcheck(cachep, objp, caller);
3469
3470	kmemcheck_slab_free(cachep, objp, cachep->object_size);
3471
3472	/*
3473	 * Skip calling cache_free_alien() when the platform is not numa.
3474	 * This will avoid cache misses that happen while accessing slabp (which
3475	 * is per page memory  reference) to get nodeid. Instead use a global
3476	 * variable to skip the call, which is mostly likely to be present in
3477	 * the cache.
3478	 */
3479	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3480		return;
3481
3482	if (likely(ac->avail < ac->limit)) {
3483		STATS_INC_FREEHIT(cachep);
3484	} else {
3485		STATS_INC_FREEMISS(cachep);
3486		cache_flusharray(cachep, ac);
3487	}
3488
3489	ac_put_obj(cachep, ac, objp);
 
 
 
 
 
 
 
 
 
3490}
3491
3492/**
3493 * kmem_cache_alloc - Allocate an object
3494 * @cachep: The cache to allocate from.
3495 * @flags: See kmalloc().
3496 *
3497 * Allocate an object from this cache.  The flags are only relevant
3498 * if the cache has no available objects.
 
 
3499 */
3500void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3501{
3502	void *ret = slab_alloc(cachep, flags, _RET_IP_);
3503
3504	trace_kmem_cache_alloc(_RET_IP_, ret,
3505			       cachep->object_size, cachep->size, flags);
3506
3507	return ret;
3508}
3509EXPORT_SYMBOL(kmem_cache_alloc);
3510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3511#ifdef CONFIG_TRACING
3512void *
3513kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3514{
3515	void *ret;
3516
3517	ret = slab_alloc(cachep, flags, _RET_IP_);
3518
 
3519	trace_kmalloc(_RET_IP_, ret,
3520		      size, cachep->size, flags);
3521	return ret;
3522}
3523EXPORT_SYMBOL(kmem_cache_alloc_trace);
3524#endif
3525
3526#ifdef CONFIG_NUMA
3527/**
3528 * kmem_cache_alloc_node - Allocate an object on the specified node
3529 * @cachep: The cache to allocate from.
3530 * @flags: See kmalloc().
3531 * @nodeid: node number of the target node.
3532 *
3533 * Identical to kmem_cache_alloc but it will allocate memory on the given
3534 * node, which can improve the performance for cpu bound structures.
3535 *
3536 * Fallback to other node is possible if __GFP_THISNODE is not set.
 
 
3537 */
3538void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3539{
3540	void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3541
3542	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3543				    cachep->object_size, cachep->size,
3544				    flags, nodeid);
3545
3546	return ret;
3547}
3548EXPORT_SYMBOL(kmem_cache_alloc_node);
3549
3550#ifdef CONFIG_TRACING
3551void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3552				  gfp_t flags,
3553				  int nodeid,
3554				  size_t size)
3555{
3556	void *ret;
3557
3558	ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3559
 
3560	trace_kmalloc_node(_RET_IP_, ret,
3561			   size, cachep->size,
3562			   flags, nodeid);
3563	return ret;
3564}
3565EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3566#endif
3567
3568static __always_inline void *
3569__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3570{
3571	struct kmem_cache *cachep;
 
3572
 
 
3573	cachep = kmalloc_slab(size, flags);
3574	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3575		return cachep;
3576	return kmem_cache_alloc_node_trace(cachep, flags, node, size);
 
 
 
3577}
3578
3579#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3580void *__kmalloc_node(size_t size, gfp_t flags, int node)
3581{
3582	return __do_kmalloc_node(size, flags, node, _RET_IP_);
3583}
3584EXPORT_SYMBOL(__kmalloc_node);
3585
3586void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3587		int node, unsigned long caller)
3588{
3589	return __do_kmalloc_node(size, flags, node, caller);
3590}
3591EXPORT_SYMBOL(__kmalloc_node_track_caller);
3592#else
3593void *__kmalloc_node(size_t size, gfp_t flags, int node)
3594{
3595	return __do_kmalloc_node(size, flags, node, 0);
3596}
3597EXPORT_SYMBOL(__kmalloc_node);
3598#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3599#endif /* CONFIG_NUMA */
3600
3601/**
3602 * __do_kmalloc - allocate memory
3603 * @size: how many bytes of memory are required.
3604 * @flags: the type of memory to allocate (see kmalloc).
3605 * @caller: function caller for debug tracking of the caller
 
 
3606 */
3607static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3608					  unsigned long caller)
3609{
3610	struct kmem_cache *cachep;
3611	void *ret;
3612
 
 
3613	cachep = kmalloc_slab(size, flags);
3614	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3615		return cachep;
3616	ret = slab_alloc(cachep, flags, caller);
3617
 
3618	trace_kmalloc(caller, ret,
3619		      size, cachep->size, flags);
3620
3621	return ret;
3622}
3623
3624
3625#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3626void *__kmalloc(size_t size, gfp_t flags)
3627{
3628	return __do_kmalloc(size, flags, _RET_IP_);
3629}
3630EXPORT_SYMBOL(__kmalloc);
3631
3632void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3633{
3634	return __do_kmalloc(size, flags, caller);
3635}
3636EXPORT_SYMBOL(__kmalloc_track_caller);
3637
3638#else
3639void *__kmalloc(size_t size, gfp_t flags)
3640{
3641	return __do_kmalloc(size, flags, 0);
3642}
3643EXPORT_SYMBOL(__kmalloc);
3644#endif
3645
3646/**
3647 * kmem_cache_free - Deallocate an object
3648 * @cachep: The cache the allocation was from.
3649 * @objp: The previously allocated object.
3650 *
3651 * Free an object which was previously allocated from this
3652 * cache.
3653 */
3654void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3655{
3656	unsigned long flags;
3657	cachep = cache_from_obj(cachep, objp);
3658	if (!cachep)
3659		return;
3660
3661	local_irq_save(flags);
3662	debug_check_no_locks_freed(objp, cachep->object_size);
3663	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3664		debug_check_no_obj_freed(objp, cachep->object_size);
3665	__cache_free(cachep, objp, _RET_IP_);
3666	local_irq_restore(flags);
3667
3668	trace_kmem_cache_free(_RET_IP_, objp);
3669}
3670EXPORT_SYMBOL(kmem_cache_free);
3671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3672/**
3673 * kfree - free previously allocated memory
3674 * @objp: pointer returned by kmalloc.
3675 *
3676 * If @objp is NULL, no operation is performed.
3677 *
3678 * Don't free memory not originally allocated by kmalloc()
3679 * or you will run into trouble.
3680 */
3681void kfree(const void *objp)
3682{
3683	struct kmem_cache *c;
3684	unsigned long flags;
3685
3686	trace_kfree(_RET_IP_, objp);
3687
3688	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3689		return;
3690	local_irq_save(flags);
3691	kfree_debugcheck(objp);
3692	c = virt_to_cache(objp);
 
 
 
 
3693	debug_check_no_locks_freed(objp, c->object_size);
3694
3695	debug_check_no_obj_freed(objp, c->object_size);
3696	__cache_free(c, (void *)objp, _RET_IP_);
3697	local_irq_restore(flags);
3698}
3699EXPORT_SYMBOL(kfree);
3700
3701/*
3702 * This initializes kmem_cache_node or resizes various caches for all nodes.
3703 */
3704static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp)
3705{
 
3706	int node;
3707	struct kmem_cache_node *n;
3708	struct array_cache *new_shared;
3709	struct array_cache **new_alien = NULL;
3710
3711	for_each_online_node(node) {
3712
3713                if (use_alien_caches) {
3714                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3715                        if (!new_alien)
3716                                goto fail;
3717                }
3718
3719		new_shared = NULL;
3720		if (cachep->shared) {
3721			new_shared = alloc_arraycache(node,
3722				cachep->shared*cachep->batchcount,
3723					0xbaadf00d, gfp);
3724			if (!new_shared) {
3725				free_alien_cache(new_alien);
3726				goto fail;
3727			}
3728		}
3729
3730		n = cachep->node[node];
3731		if (n) {
3732			struct array_cache *shared = n->shared;
3733
3734			spin_lock_irq(&n->list_lock);
3735
3736			if (shared)
3737				free_block(cachep, shared->entry,
3738						shared->avail, node);
3739
3740			n->shared = new_shared;
3741			if (!n->alien) {
3742				n->alien = new_alien;
3743				new_alien = NULL;
3744			}
3745			n->free_limit = (1 + nr_cpus_node(node)) *
3746					cachep->batchcount + cachep->num;
3747			spin_unlock_irq(&n->list_lock);
3748			kfree(shared);
3749			free_alien_cache(new_alien);
3750			continue;
3751		}
3752		n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3753		if (!n) {
3754			free_alien_cache(new_alien);
3755			kfree(new_shared);
3756			goto fail;
3757		}
3758
3759		kmem_cache_node_init(n);
3760		n->next_reap = jiffies + REAPTIMEOUT_NODE +
3761				((unsigned long)cachep) % REAPTIMEOUT_NODE;
3762		n->shared = new_shared;
3763		n->alien = new_alien;
3764		n->free_limit = (1 + nr_cpus_node(node)) *
3765					cachep->batchcount + cachep->num;
3766		cachep->node[node] = n;
3767	}
 
3768	return 0;
3769
3770fail:
3771	if (!cachep->list.next) {
3772		/* Cache is not active yet. Roll back what we did */
3773		node--;
3774		while (node >= 0) {
3775			if (cachep->node[node]) {
3776				n = cachep->node[node];
3777
3778				kfree(n->shared);
3779				free_alien_cache(n->alien);
3780				kfree(n);
3781				cachep->node[node] = NULL;
3782			}
3783			node--;
3784		}
3785	}
3786	return -ENOMEM;
3787}
3788
3789struct ccupdate_struct {
3790	struct kmem_cache *cachep;
3791	struct array_cache *new[0];
3792};
3793
3794static void do_ccupdate_local(void *info)
3795{
3796	struct ccupdate_struct *new = info;
3797	struct array_cache *old;
3798
3799	check_irq_off();
3800	old = cpu_cache_get(new->cachep);
3801
3802	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3803	new->new[smp_processor_id()] = old;
3804}
3805
3806/* Always called with the slab_mutex held */
3807static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3808				int batchcount, int shared, gfp_t gfp)
3809{
3810	struct ccupdate_struct *new;
3811	int i;
3812
3813	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
3814		      gfp);
3815	if (!new)
3816		return -ENOMEM;
3817
3818	for_each_online_cpu(i) {
3819		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3820						batchcount, gfp);
3821		if (!new->new[i]) {
3822			for (i--; i >= 0; i--)
3823				kfree(new->new[i]);
3824			kfree(new);
3825			return -ENOMEM;
3826		}
3827	}
3828	new->cachep = cachep;
3829
3830	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3831
3832	check_irq_on();
3833	cachep->batchcount = batchcount;
3834	cachep->limit = limit;
3835	cachep->shared = shared;
3836
3837	for_each_online_cpu(i) {
3838		struct array_cache *ccold = new->new[i];
3839		if (!ccold)
3840			continue;
3841		spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3842		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
3843		spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock);
3844		kfree(ccold);
 
 
 
 
 
 
 
3845	}
3846	kfree(new);
3847	return alloc_kmem_cache_node(cachep, gfp);
 
 
3848}
3849
3850static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3851				int batchcount, int shared, gfp_t gfp)
3852{
3853	int ret;
3854	struct kmem_cache *c = NULL;
3855	int i = 0;
3856
3857	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3858
3859	if (slab_state < FULL)
3860		return ret;
3861
3862	if ((ret < 0) || !is_root_cache(cachep))
3863		return ret;
3864
3865	VM_BUG_ON(!mutex_is_locked(&slab_mutex));
3866	for_each_memcg_cache_index(i) {
3867		c = cache_from_memcg_idx(cachep, i);
3868		if (c)
3869			/* return value determined by the parent cache only */
3870			__do_tune_cpucache(c, limit, batchcount, shared, gfp);
3871	}
3872
3873	return ret;
3874}
3875
3876/* Called with slab_mutex held always */
3877static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3878{
3879	int err;
3880	int limit = 0;
3881	int shared = 0;
3882	int batchcount = 0;
3883
 
 
 
 
3884	if (!is_root_cache(cachep)) {
3885		struct kmem_cache *root = memcg_root_cache(cachep);
3886		limit = root->limit;
3887		shared = root->shared;
3888		batchcount = root->batchcount;
3889	}
3890
3891	if (limit && shared && batchcount)
3892		goto skip_setup;
3893	/*
3894	 * The head array serves three purposes:
3895	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3896	 * - reduce the number of spinlock operations.
3897	 * - reduce the number of linked list operations on the slab and
3898	 *   bufctl chains: array operations are cheaper.
3899	 * The numbers are guessed, we should auto-tune as described by
3900	 * Bonwick.
3901	 */
3902	if (cachep->size > 131072)
3903		limit = 1;
3904	else if (cachep->size > PAGE_SIZE)
3905		limit = 8;
3906	else if (cachep->size > 1024)
3907		limit = 24;
3908	else if (cachep->size > 256)
3909		limit = 54;
3910	else
3911		limit = 120;
3912
3913	/*
3914	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3915	 * allocation behaviour: Most allocs on one cpu, most free operations
3916	 * on another cpu. For these cases, an efficient object passing between
3917	 * cpus is necessary. This is provided by a shared array. The array
3918	 * replaces Bonwick's magazine layer.
3919	 * On uniprocessor, it's functionally equivalent (but less efficient)
3920	 * to a larger limit. Thus disabled by default.
3921	 */
3922	shared = 0;
3923	if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3924		shared = 8;
3925
3926#if DEBUG
3927	/*
3928	 * With debugging enabled, large batchcount lead to excessively long
3929	 * periods with disabled local interrupts. Limit the batchcount
3930	 */
3931	if (limit > 32)
3932		limit = 32;
3933#endif
3934	batchcount = (limit + 1) / 2;
3935skip_setup:
3936	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
 
3937	if (err)
3938		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3939		       cachep->name, -err);
3940	return err;
3941}
3942
3943/*
3944 * Drain an array if it contains any elements taking the node lock only if
3945 * necessary. Note that the node listlock also protects the array_cache
3946 * if drain_array() is used on the shared array.
3947 */
3948static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3949			 struct array_cache *ac, int force, int node)
3950{
3951	int tofree;
 
 
 
3952
3953	if (!ac || !ac->avail)
3954		return;
3955	if (ac->touched && !force) {
 
3956		ac->touched = 0;
3957	} else {
3958		spin_lock_irq(&n->list_lock);
3959		if (ac->avail) {
3960			tofree = force ? ac->avail : (ac->limit + 4) / 5;
3961			if (tofree > ac->avail)
3962				tofree = (ac->avail + 1) / 2;
3963			free_block(cachep, ac->entry, tofree, node);
3964			ac->avail -= tofree;
3965			memmove(ac->entry, &(ac->entry[tofree]),
3966				sizeof(void *) * ac->avail);
3967		}
3968		spin_unlock_irq(&n->list_lock);
3969	}
 
 
 
 
 
 
3970}
3971
3972/**
3973 * cache_reap - Reclaim memory from caches.
3974 * @w: work descriptor
3975 *
3976 * Called from workqueue/eventd every few seconds.
3977 * Purpose:
3978 * - clear the per-cpu caches for this CPU.
3979 * - return freeable pages to the main free memory pool.
3980 *
3981 * If we cannot acquire the cache chain mutex then just give up - we'll try
3982 * again on the next iteration.
3983 */
3984static void cache_reap(struct work_struct *w)
3985{
3986	struct kmem_cache *searchp;
3987	struct kmem_cache_node *n;
3988	int node = numa_mem_id();
3989	struct delayed_work *work = to_delayed_work(w);
3990
3991	if (!mutex_trylock(&slab_mutex))
3992		/* Give up. Setup the next iteration. */
3993		goto out;
3994
3995	list_for_each_entry(searchp, &slab_caches, list) {
3996		check_irq_on();
3997
3998		/*
3999		 * We only take the node lock if absolutely necessary and we
4000		 * have established with reasonable certainty that
4001		 * we can do some work if the lock was obtained.
4002		 */
4003		n = searchp->node[node];
4004
4005		reap_alien(searchp, n);
4006
4007		drain_array(searchp, n, cpu_cache_get(searchp), 0, node);
4008
4009		/*
4010		 * These are racy checks but it does not matter
4011		 * if we skip one check or scan twice.
4012		 */
4013		if (time_after(n->next_reap, jiffies))
4014			goto next;
4015
4016		n->next_reap = jiffies + REAPTIMEOUT_NODE;
4017
4018		drain_array(searchp, n, n->shared, 0, node);
4019
4020		if (n->free_touched)
4021			n->free_touched = 0;
4022		else {
4023			int freed;
4024
4025			freed = drain_freelist(searchp, n, (n->free_limit +
4026				5 * searchp->num - 1) / (5 * searchp->num));
4027			STATS_ADD_REAPED(searchp, freed);
4028		}
4029next:
4030		cond_resched();
4031	}
4032	check_irq_on();
4033	mutex_unlock(&slab_mutex);
4034	next_reap_node();
4035out:
4036	/* Set up the next iteration */
4037	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
 
4038}
4039
4040#ifdef CONFIG_SLABINFO
4041void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4042{
4043	struct page *page;
4044	unsigned long active_objs;
4045	unsigned long num_objs;
4046	unsigned long active_slabs = 0;
4047	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4048	const char *name;
4049	char *error = NULL;
4050	int node;
4051	struct kmem_cache_node *n;
4052
4053	active_objs = 0;
4054	num_slabs = 0;
4055	for_each_online_node(node) {
4056		n = cachep->node[node];
4057		if (!n)
4058			continue;
4059
4060		check_irq_on();
4061		spin_lock_irq(&n->list_lock);
4062
4063		list_for_each_entry(page, &n->slabs_full, lru) {
4064			if (page->active != cachep->num && !error)
4065				error = "slabs_full accounting error";
4066			active_objs += cachep->num;
4067			active_slabs++;
4068		}
4069		list_for_each_entry(page, &n->slabs_partial, lru) {
4070			if (page->active == cachep->num && !error)
4071				error = "slabs_partial accounting error";
4072			if (!page->active && !error)
4073				error = "slabs_partial accounting error";
4074			active_objs += page->active;
4075			active_slabs++;
4076		}
4077		list_for_each_entry(page, &n->slabs_free, lru) {
4078			if (page->active && !error)
4079				error = "slabs_free accounting error";
4080			num_slabs++;
4081		}
4082		free_objects += n->free_objects;
4083		if (n->shared)
4084			shared_avail += n->shared->avail;
4085
4086		spin_unlock_irq(&n->list_lock);
4087	}
4088	num_slabs += active_slabs;
4089	num_objs = num_slabs * cachep->num;
4090	if (num_objs - active_objs != free_objects && !error)
4091		error = "free_objects accounting error";
4092
4093	name = cachep->name;
4094	if (error)
4095		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4096
4097	sinfo->active_objs = active_objs;
4098	sinfo->num_objs = num_objs;
4099	sinfo->active_slabs = active_slabs;
4100	sinfo->num_slabs = num_slabs;
4101	sinfo->shared_avail = shared_avail;
4102	sinfo->limit = cachep->limit;
4103	sinfo->batchcount = cachep->batchcount;
4104	sinfo->shared = cachep->shared;
4105	sinfo->objects_per_slab = cachep->num;
4106	sinfo->cache_order = cachep->gfporder;
4107}
4108
4109void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4110{
4111#if STATS
4112	{			/* node stats */
4113		unsigned long high = cachep->high_mark;
4114		unsigned long allocs = cachep->num_allocations;
4115		unsigned long grown = cachep->grown;
4116		unsigned long reaped = cachep->reaped;
4117		unsigned long errors = cachep->errors;
4118		unsigned long max_freeable = cachep->max_freeable;
4119		unsigned long node_allocs = cachep->node_allocs;
4120		unsigned long node_frees = cachep->node_frees;
4121		unsigned long overflows = cachep->node_overflow;
4122
4123		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4124			   "%4lu %4lu %4lu %4lu %4lu",
4125			   allocs, high, grown,
4126			   reaped, errors, max_freeable, node_allocs,
4127			   node_frees, overflows);
4128	}
4129	/* cpu stats */
4130	{
4131		unsigned long allochit = atomic_read(&cachep->allochit);
4132		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4133		unsigned long freehit = atomic_read(&cachep->freehit);
4134		unsigned long freemiss = atomic_read(&cachep->freemiss);
4135
4136		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4137			   allochit, allocmiss, freehit, freemiss);
4138	}
4139#endif
4140}
4141
4142#define MAX_SLABINFO_WRITE 128
4143/**
4144 * slabinfo_write - Tuning for the slab allocator
4145 * @file: unused
4146 * @buffer: user buffer
4147 * @count: data length
4148 * @ppos: unused
 
 
4149 */
4150ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4151		       size_t count, loff_t *ppos)
4152{
4153	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4154	int limit, batchcount, shared, res;
4155	struct kmem_cache *cachep;
4156
4157	if (count > MAX_SLABINFO_WRITE)
4158		return -EINVAL;
4159	if (copy_from_user(&kbuf, buffer, count))
4160		return -EFAULT;
4161	kbuf[MAX_SLABINFO_WRITE] = '\0';
4162
4163	tmp = strchr(kbuf, ' ');
4164	if (!tmp)
4165		return -EINVAL;
4166	*tmp = '\0';
4167	tmp++;
4168	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4169		return -EINVAL;
4170
4171	/* Find the cache in the chain of caches. */
4172	mutex_lock(&slab_mutex);
4173	res = -EINVAL;
4174	list_for_each_entry(cachep, &slab_caches, list) {
4175		if (!strcmp(cachep->name, kbuf)) {
4176			if (limit < 1 || batchcount < 1 ||
4177					batchcount > limit || shared < 0) {
4178				res = 0;
4179			} else {
4180				res = do_tune_cpucache(cachep, limit,
4181						       batchcount, shared,
4182						       GFP_KERNEL);
4183			}
4184			break;
4185		}
4186	}
4187	mutex_unlock(&slab_mutex);
4188	if (res >= 0)
4189		res = count;
4190	return res;
4191}
4192
4193#ifdef CONFIG_DEBUG_SLAB_LEAK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4194
4195static void *leaks_start(struct seq_file *m, loff_t *pos)
4196{
4197	mutex_lock(&slab_mutex);
4198	return seq_list_start(&slab_caches, *pos);
4199}
4200
4201static inline int add_caller(unsigned long *n, unsigned long v)
4202{
4203	unsigned long *p;
4204	int l;
4205	if (!v)
4206		return 1;
4207	l = n[1];
4208	p = n + 2;
4209	while (l) {
4210		int i = l/2;
4211		unsigned long *q = p + 2 * i;
4212		if (*q == v) {
4213			q[1]++;
4214			return 1;
4215		}
4216		if (*q > v) {
4217			l = i;
4218		} else {
4219			p = q + 2;
4220			l -= i + 1;
4221		}
4222	}
4223	if (++n[1] == n[0])
4224		return 0;
4225	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4226	p[0] = v;
4227	p[1] = 1;
4228	return 1;
4229}
4230
4231static void handle_slab(unsigned long *n, struct kmem_cache *c,
4232						struct page *page)
4233{
4234	void *p;
4235	int i, j;
4236
4237	if (n[0] == n[1])
 
 
 
4238		return;
4239	for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4240		bool active = true;
4241
4242		for (j = page->active; j < c->num; j++) {
4243			/* Skip freed item */
4244			if (get_free_obj(page, j) == i) {
4245				active = false;
4246				break;
4247			}
4248		}
4249		if (!active)
4250			continue;
4251
4252		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4253			return;
4254	}
4255}
4256
4257static void show_symbol(struct seq_file *m, unsigned long address)
4258{
4259#ifdef CONFIG_KALLSYMS
4260	unsigned long offset, size;
4261	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4262
4263	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4264		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4265		if (modname[0])
4266			seq_printf(m, " [%s]", modname);
4267		return;
4268	}
4269#endif
4270	seq_printf(m, "%p", (void *)address);
4271}
4272
4273static int leaks_show(struct seq_file *m, void *p)
4274{
4275	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4276	struct page *page;
4277	struct kmem_cache_node *n;
4278	const char *name;
4279	unsigned long *x = m->private;
4280	int node;
4281	int i;
4282
4283	if (!(cachep->flags & SLAB_STORE_USER))
4284		return 0;
4285	if (!(cachep->flags & SLAB_RED_ZONE))
4286		return 0;
4287
4288	/* OK, we can do it */
4289
4290	x[1] = 0;
4291
4292	for_each_online_node(node) {
4293		n = cachep->node[node];
4294		if (!n)
4295			continue;
4296
4297		check_irq_on();
4298		spin_lock_irq(&n->list_lock);
4299
4300		list_for_each_entry(page, &n->slabs_full, lru)
4301			handle_slab(x, cachep, page);
4302		list_for_each_entry(page, &n->slabs_partial, lru)
4303			handle_slab(x, cachep, page);
4304		spin_unlock_irq(&n->list_lock);
4305	}
4306	name = cachep->name;
4307	if (x[0] == x[1]) {
4308		/* Increase the buffer size */
4309		mutex_unlock(&slab_mutex);
4310		m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4311		if (!m->private) {
4312			/* Too bad, we are really out */
4313			m->private = x;
4314			mutex_lock(&slab_mutex);
4315			return -ENOMEM;
4316		}
4317		*(unsigned long *)m->private = x[0] * 2;
4318		kfree(x);
4319		mutex_lock(&slab_mutex);
4320		/* Now make sure this entry will be retried */
4321		m->count = m->size;
4322		return 0;
4323	}
4324	for (i = 0; i < x[1]; i++) {
4325		seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4326		show_symbol(m, x[2*i+2]);
4327		seq_putc(m, '\n');
4328	}
4329
4330	return 0;
4331}
4332
4333static const struct seq_operations slabstats_op = {
4334	.start = leaks_start,
4335	.next = slab_next,
4336	.stop = slab_stop,
4337	.show = leaks_show,
4338};
4339
4340static int slabstats_open(struct inode *inode, struct file *file)
4341{
4342	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4343	int ret = -ENOMEM;
4344	if (n) {
4345		ret = seq_open(file, &slabstats_op);
4346		if (!ret) {
4347			struct seq_file *m = file->private_data;
4348			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4349			m->private = n;
4350			n = NULL;
4351		}
4352		kfree(n);
4353	}
4354	return ret;
4355}
4356
4357static const struct file_operations proc_slabstats_operations = {
4358	.open		= slabstats_open,
4359	.read		= seq_read,
4360	.llseek		= seq_lseek,
4361	.release	= seq_release_private,
4362};
4363#endif
4364
4365static int __init slab_proc_init(void)
4366{
4367#ifdef CONFIG_DEBUG_SLAB_LEAK
4368	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4369#endif
4370	return 0;
4371}
4372module_init(slab_proc_init);
4373#endif
4374
4375/**
4376 * ksize - get the actual amount of memory allocated for a given object
4377 * @objp: Pointer to the object
 
 
 
4378 *
4379 * kmalloc may internally round up allocations and return more memory
4380 * than requested. ksize() can be used to determine the actual amount of
4381 * memory allocated. The caller may use this additional memory, even though
4382 * a smaller amount of memory was initially specified with the kmalloc call.
4383 * The caller must guarantee that objp points to a valid object previously
4384 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4385 * must not be freed during the duration of the call.
4386 */
4387size_t ksize(const void *objp)
4388{
 
 
 
4389	BUG_ON(!objp);
4390	if (unlikely(objp == ZERO_SIZE_PTR))
4391		return 0;
4392
4393	return virt_to_cache(objp)->object_size;
 
 
 
4394}
4395EXPORT_SYMBOL(ksize);