Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SLUB: A slab allocator that limits cache line use instead of queuing
   4 * objects in per cpu and per node lists.
   5 *
   6 * The allocator synchronizes using per slab locks or atomic operatios
   7 * and only uses a centralized lock to manage a pool of partial slabs.
   8 *
   9 * (C) 2007 SGI, Christoph Lameter
  10 * (C) 2011 Linux Foundation, Christoph Lameter
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/swap.h> /* struct reclaim_state */
  15#include <linux/module.h>
  16#include <linux/bit_spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/bitops.h>
  19#include <linux/slab.h>
  20#include "slab.h"
  21#include <linux/proc_fs.h>
 
  22#include <linux/seq_file.h>
  23#include <linux/kasan.h>
  24#include <linux/cpu.h>
  25#include <linux/cpuset.h>
  26#include <linux/mempolicy.h>
  27#include <linux/ctype.h>
  28#include <linux/debugobjects.h>
  29#include <linux/kallsyms.h>
  30#include <linux/memory.h>
  31#include <linux/math64.h>
  32#include <linux/fault-inject.h>
  33#include <linux/stacktrace.h>
  34#include <linux/prefetch.h>
  35#include <linux/memcontrol.h>
  36#include <linux/random.h>
  37
  38#include <trace/events/kmem.h>
  39
  40#include "internal.h"
  41
  42/*
  43 * Lock order:
  44 *   1. slab_mutex (Global Mutex)
  45 *   2. node->list_lock
  46 *   3. slab_lock(page) (Only on some arches and for debugging)
  47 *
  48 *   slab_mutex
  49 *
  50 *   The role of the slab_mutex is to protect the list of all the slabs
  51 *   and to synchronize major metadata changes to slab cache structures.
  52 *
  53 *   The slab_lock is only used for debugging and on arches that do not
  54 *   have the ability to do a cmpxchg_double. It only protects:
 
  55 *	A. page->freelist	-> List of object free in a page
  56 *	B. page->inuse		-> Number of objects in use
  57 *	C. page->objects	-> Number of objects in page
  58 *	D. page->frozen		-> frozen state
  59 *
  60 *   If a slab is frozen then it is exempt from list management. It is not
  61 *   on any list except per cpu partial list. The processor that froze the
  62 *   slab is the one who can perform list operations on the page. Other
  63 *   processors may put objects onto the freelist but the processor that
  64 *   froze the slab is the only one that can retrieve the objects from the
  65 *   page's freelist.
  66 *
  67 *   The list_lock protects the partial and full list on each node and
  68 *   the partial slab counter. If taken then no new slabs may be added or
  69 *   removed from the lists nor make the number of partial slabs be modified.
  70 *   (Note that the total number of slabs is an atomic value that may be
  71 *   modified without taking the list lock).
  72 *
  73 *   The list_lock is a centralized lock and thus we avoid taking it as
  74 *   much as possible. As long as SLUB does not have to handle partial
  75 *   slabs, operations can continue without any centralized lock. F.e.
  76 *   allocating a long series of objects that fill up slabs does not require
  77 *   the list lock.
  78 *   Interrupts are disabled during allocation and deallocation in order to
  79 *   make the slab allocator safe to use in the context of an irq. In addition
  80 *   interrupts are disabled to ensure that the processor does not change
  81 *   while handling per_cpu slabs, due to kernel preemption.
  82 *
  83 * SLUB assigns one slab for allocation to each processor.
  84 * Allocations only occur from these slabs called cpu slabs.
  85 *
  86 * Slabs with free elements are kept on a partial list and during regular
  87 * operations no list for full slabs is used. If an object in a full slab is
  88 * freed then the slab will show up again on the partial lists.
  89 * We track full slabs for debugging purposes though because otherwise we
  90 * cannot scan all objects.
  91 *
  92 * Slabs are freed when they become empty. Teardown and setup is
  93 * minimal so we rely on the page allocators per cpu caches for
  94 * fast frees and allocs.
  95 *
  96 * Overloading of page flags that are otherwise used for LRU management.
  97 *
  98 * PageActive 		The slab is frozen and exempt from list processing.
  99 * 			This means that the slab is dedicated to a purpose
 100 * 			such as satisfying allocations for a specific
 101 * 			processor. Objects may be freed in the slab while
 102 * 			it is frozen but slab_free will then skip the usual
 103 * 			list operations. It is up to the processor holding
 104 * 			the slab to integrate the slab into the slab lists
 105 * 			when the slab is no longer needed.
 106 *
 107 * 			One use of this flag is to mark slabs that are
 108 * 			used for allocations. Then such a slab becomes a cpu
 109 * 			slab. The cpu slab may be equipped with an additional
 110 * 			freelist that allows lockless access to
 111 * 			free objects in addition to the regular freelist
 112 * 			that requires the slab lock.
 113 *
 114 * PageError		Slab requires special handling due to debug
 115 * 			options set. This moves	slab handling out of
 116 * 			the fast path and disables lockless freelists.
 117 */
 118
 119static inline int kmem_cache_debug(struct kmem_cache *s)
 120{
 121#ifdef CONFIG_SLUB_DEBUG
 122	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
 123#else
 124	return 0;
 125#endif
 126}
 127
 128void *fixup_red_left(struct kmem_cache *s, void *p)
 129{
 130	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
 131		p += s->red_left_pad;
 132
 133	return p;
 134}
 135
 136static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 137{
 138#ifdef CONFIG_SLUB_CPU_PARTIAL
 139	return !kmem_cache_debug(s);
 140#else
 141	return false;
 142#endif
 143}
 144
 145/*
 146 * Issues still to be resolved:
 147 *
 148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 149 *
 150 * - Variable sizing of the per node arrays
 151 */
 152
 153/* Enable to test recovery from slab corruption on boot */
 154#undef SLUB_RESILIENCY_TEST
 155
 156/* Enable to log cmpxchg failures */
 157#undef SLUB_DEBUG_CMPXCHG
 158
 159/*
 160 * Mininum number of partial slabs. These will be left on the partial
 161 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 162 */
 163#define MIN_PARTIAL 5
 164
 165/*
 166 * Maximum number of desirable partial slabs.
 167 * The existence of more partial slabs makes kmem_cache_shrink
 168 * sort the partial list by the number of objects in use.
 169 */
 170#define MAX_PARTIAL 10
 171
 172#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
 173				SLAB_POISON | SLAB_STORE_USER)
 174
 175/*
 176 * These debug flags cannot use CMPXCHG because there might be consistency
 177 * issues when checking or reading debug information
 178 */
 179#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
 180				SLAB_TRACE)
 181
 182
 183/*
 184 * Debugging flags that require metadata to be stored in the slab.  These get
 185 * disabled when slub_debug=O is used and a cache's min order increases with
 186 * metadata.
 187 */
 188#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 189
 190#define OO_SHIFT	16
 191#define OO_MASK		((1 << OO_SHIFT) - 1)
 192#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
 193
 194/* Internal SLUB flags */
 195/* Poison object */
 196#define __OBJECT_POISON		((slab_flags_t __force)0x80000000U)
 197/* Use cmpxchg_double */
 198#define __CMPXCHG_DOUBLE	((slab_flags_t __force)0x40000000U)
 199
 200/*
 201 * Tracking user of a slab.
 202 */
 203#define TRACK_ADDRS_COUNT 16
 204struct track {
 205	unsigned long addr;	/* Called from address */
 206#ifdef CONFIG_STACKTRACE
 207	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
 208#endif
 209	int cpu;		/* Was running on cpu */
 210	int pid;		/* Pid context */
 211	unsigned long when;	/* When did the operation occur */
 212};
 213
 214enum track_item { TRACK_ALLOC, TRACK_FREE };
 215
 216#ifdef CONFIG_SYSFS
 217static int sysfs_slab_add(struct kmem_cache *);
 218static int sysfs_slab_alias(struct kmem_cache *, const char *);
 219static void memcg_propagate_slab_attrs(struct kmem_cache *s);
 220static void sysfs_slab_remove(struct kmem_cache *s);
 221#else
 222static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 223static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
 224							{ return 0; }
 225static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
 226static inline void sysfs_slab_remove(struct kmem_cache *s) { }
 227#endif
 228
 229static inline void stat(const struct kmem_cache *s, enum stat_item si)
 230{
 231#ifdef CONFIG_SLUB_STATS
 232	/*
 233	 * The rmw is racy on a preemptible kernel but this is acceptable, so
 234	 * avoid this_cpu_add()'s irq-disable overhead.
 235	 */
 236	raw_cpu_inc(s->cpu_slab->stat[si]);
 237#endif
 238}
 239
 240/********************************************************************
 241 * 			Core slab cache functions
 242 *******************************************************************/
 243
 244/*
 245 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 246 * with an XOR of the address where the pointer is held and a per-cache
 247 * random number.
 248 */
 249static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
 250				 unsigned long ptr_addr)
 251{
 252#ifdef CONFIG_SLAB_FREELIST_HARDENED
 253	/*
 254	 * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
 255	 * Normally, this doesn't cause any issues, as both set_freepointer()
 256	 * and get_freepointer() are called with a pointer with the same tag.
 257	 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
 258	 * example, when __free_slub() iterates over objects in a cache, it
 259	 * passes untagged pointers to check_object(). check_object() in turns
 260	 * calls get_freepointer() with an untagged pointer, which causes the
 261	 * freepointer to be restored incorrectly.
 262	 */
 263	return (void *)((unsigned long)ptr ^ s->random ^
 264			(unsigned long)kasan_reset_tag((void *)ptr_addr));
 265#else
 266	return ptr;
 267#endif
 268}
 269
 270/* Returns the freelist pointer recorded at location ptr_addr. */
 271static inline void *freelist_dereference(const struct kmem_cache *s,
 272					 void *ptr_addr)
 273{
 274	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
 275			    (unsigned long)ptr_addr);
 276}
 277
 278static inline void *get_freepointer(struct kmem_cache *s, void *object)
 279{
 280	return freelist_dereference(s, object + s->offset);
 281}
 282
 283static void prefetch_freepointer(const struct kmem_cache *s, void *object)
 284{
 285	prefetch(object + s->offset);
 
 286}
 287
 288static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 289{
 290	unsigned long freepointer_addr;
 291	void *p;
 292
 293	if (!debug_pagealloc_enabled())
 294		return get_freepointer(s, object);
 295
 296	freepointer_addr = (unsigned long)object + s->offset;
 297	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
 298	return freelist_ptr(s, p, freepointer_addr);
 299}
 300
 301static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 302{
 303	unsigned long freeptr_addr = (unsigned long)object + s->offset;
 304
 305#ifdef CONFIG_SLAB_FREELIST_HARDENED
 306	BUG_ON(object == fp); /* naive detection of double free or corruption */
 307#endif
 308
 309	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
 310}
 311
 312/* Loop over all objects in a slab */
 313#define for_each_object(__p, __s, __addr, __objects) \
 314	for (__p = fixup_red_left(__s, __addr); \
 315		__p < (__addr) + (__objects) * (__s)->size; \
 316		__p += (__s)->size)
 317
 
 
 
 
 
 318/* Determine object index from a given position */
 319static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
 320{
 321	return (kasan_reset_tag(p) - addr) / s->size;
 322}
 323
 324static inline unsigned int order_objects(unsigned int order, unsigned int size)
 325{
 326	return ((unsigned int)PAGE_SIZE << order) / size;
 327}
 328
 329static inline struct kmem_cache_order_objects oo_make(unsigned int order,
 330		unsigned int size)
 331{
 332	struct kmem_cache_order_objects x = {
 333		(order << OO_SHIFT) + order_objects(order, size)
 334	};
 335
 336	return x;
 337}
 338
 339static inline unsigned int oo_order(struct kmem_cache_order_objects x)
 340{
 341	return x.x >> OO_SHIFT;
 342}
 343
 344static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
 345{
 346	return x.x & OO_MASK;
 347}
 348
 349/*
 350 * Per slab locking using the pagelock
 351 */
 352static __always_inline void slab_lock(struct page *page)
 353{
 354	VM_BUG_ON_PAGE(PageTail(page), page);
 355	bit_spin_lock(PG_locked, &page->flags);
 356}
 357
 358static __always_inline void slab_unlock(struct page *page)
 359{
 360	VM_BUG_ON_PAGE(PageTail(page), page);
 361	__bit_spin_unlock(PG_locked, &page->flags);
 362}
 363
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 364/* Interrupts must be disabled (for the fallback code to work right) */
 365static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 366		void *freelist_old, unsigned long counters_old,
 367		void *freelist_new, unsigned long counters_new,
 368		const char *n)
 369{
 370	VM_BUG_ON(!irqs_disabled());
 371#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 372    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 373	if (s->flags & __CMPXCHG_DOUBLE) {
 374		if (cmpxchg_double(&page->freelist, &page->counters,
 375				   freelist_old, counters_old,
 376				   freelist_new, counters_new))
 377			return true;
 378	} else
 379#endif
 380	{
 381		slab_lock(page);
 382		if (page->freelist == freelist_old &&
 383					page->counters == counters_old) {
 384			page->freelist = freelist_new;
 385			page->counters = counters_new;
 386			slab_unlock(page);
 387			return true;
 388		}
 389		slab_unlock(page);
 390	}
 391
 392	cpu_relax();
 393	stat(s, CMPXCHG_DOUBLE_FAIL);
 394
 395#ifdef SLUB_DEBUG_CMPXCHG
 396	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 397#endif
 398
 399	return false;
 400}
 401
 402static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 403		void *freelist_old, unsigned long counters_old,
 404		void *freelist_new, unsigned long counters_new,
 405		const char *n)
 406{
 407#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 408    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 409	if (s->flags & __CMPXCHG_DOUBLE) {
 410		if (cmpxchg_double(&page->freelist, &page->counters,
 411				   freelist_old, counters_old,
 412				   freelist_new, counters_new))
 413			return true;
 414	} else
 415#endif
 416	{
 417		unsigned long flags;
 418
 419		local_irq_save(flags);
 420		slab_lock(page);
 421		if (page->freelist == freelist_old &&
 422					page->counters == counters_old) {
 423			page->freelist = freelist_new;
 424			page->counters = counters_new;
 425			slab_unlock(page);
 426			local_irq_restore(flags);
 427			return true;
 428		}
 429		slab_unlock(page);
 430		local_irq_restore(flags);
 431	}
 432
 433	cpu_relax();
 434	stat(s, CMPXCHG_DOUBLE_FAIL);
 435
 436#ifdef SLUB_DEBUG_CMPXCHG
 437	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 438#endif
 439
 440	return false;
 441}
 442
 443#ifdef CONFIG_SLUB_DEBUG
 444/*
 445 * Determine a map of object in use on a page.
 446 *
 447 * Node listlock must be held to guarantee that the page does
 448 * not vanish from under us.
 449 */
 450static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
 451{
 452	void *p;
 453	void *addr = page_address(page);
 454
 455	for (p = page->freelist; p; p = get_freepointer(s, p))
 456		set_bit(slab_index(p, s, addr), map);
 457}
 458
 459static inline unsigned int size_from_object(struct kmem_cache *s)
 460{
 461	if (s->flags & SLAB_RED_ZONE)
 462		return s->size - s->red_left_pad;
 463
 464	return s->size;
 465}
 466
 467static inline void *restore_red_left(struct kmem_cache *s, void *p)
 468{
 469	if (s->flags & SLAB_RED_ZONE)
 470		p -= s->red_left_pad;
 471
 472	return p;
 473}
 474
 475/*
 476 * Debug settings:
 477 */
 478#if defined(CONFIG_SLUB_DEBUG_ON)
 479static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
 480#else
 481static slab_flags_t slub_debug;
 482#endif
 483
 484static char *slub_debug_slabs;
 485static int disable_higher_order_debug;
 486
 487/*
 488 * slub is about to manipulate internal object metadata.  This memory lies
 489 * outside the range of the allocated object, so accessing it would normally
 490 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 491 * to tell kasan that these accesses are OK.
 492 */
 493static inline void metadata_access_enable(void)
 494{
 495	kasan_disable_current();
 496}
 497
 498static inline void metadata_access_disable(void)
 499{
 500	kasan_enable_current();
 501}
 502
 503/*
 504 * Object debugging
 505 */
 506
 507/* Verify that a pointer has an address that is valid within a slab page */
 508static inline int check_valid_pointer(struct kmem_cache *s,
 509				struct page *page, void *object)
 510{
 511	void *base;
 512
 513	if (!object)
 514		return 1;
 515
 516	base = page_address(page);
 517	object = kasan_reset_tag(object);
 518	object = restore_red_left(s, object);
 519	if (object < base || object >= base + page->objects * s->size ||
 520		(object - base) % s->size) {
 521		return 0;
 522	}
 523
 524	return 1;
 525}
 526
 527static void print_section(char *level, char *text, u8 *addr,
 528			  unsigned int length)
 529{
 530	metadata_access_enable();
 531	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
 532			length, 1);
 533	metadata_access_disable();
 534}
 535
 536static struct track *get_track(struct kmem_cache *s, void *object,
 537	enum track_item alloc)
 538{
 539	struct track *p;
 540
 541	if (s->offset)
 542		p = object + s->offset + sizeof(void *);
 543	else
 544		p = object + s->inuse;
 545
 546	return p + alloc;
 547}
 548
 549static void set_track(struct kmem_cache *s, void *object,
 550			enum track_item alloc, unsigned long addr)
 551{
 552	struct track *p = get_track(s, object, alloc);
 553
 554	if (addr) {
 555#ifdef CONFIG_STACKTRACE
 556		unsigned int nr_entries;
 
 557
 
 
 
 
 558		metadata_access_enable();
 559		nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
 560		metadata_access_disable();
 561
 562		if (nr_entries < TRACK_ADDRS_COUNT)
 563			p->addrs[nr_entries] = 0;
 
 
 
 
 
 564#endif
 565		p->addr = addr;
 566		p->cpu = smp_processor_id();
 567		p->pid = current->pid;
 568		p->when = jiffies;
 569	} else {
 570		memset(p, 0, sizeof(struct track));
 571	}
 572}
 573
 574static void init_tracking(struct kmem_cache *s, void *object)
 575{
 576	if (!(s->flags & SLAB_STORE_USER))
 577		return;
 578
 579	set_track(s, object, TRACK_FREE, 0UL);
 580	set_track(s, object, TRACK_ALLOC, 0UL);
 581}
 582
 583static void print_track(const char *s, struct track *t, unsigned long pr_time)
 584{
 585	if (!t->addr)
 586		return;
 587
 588	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
 589	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
 590#ifdef CONFIG_STACKTRACE
 591	{
 592		int i;
 593		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
 594			if (t->addrs[i])
 595				pr_err("\t%pS\n", (void *)t->addrs[i]);
 596			else
 597				break;
 598	}
 599#endif
 600}
 601
 602static void print_tracking(struct kmem_cache *s, void *object)
 603{
 604	unsigned long pr_time = jiffies;
 605	if (!(s->flags & SLAB_STORE_USER))
 606		return;
 607
 608	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
 609	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
 610}
 611
 612static void print_page_info(struct page *page)
 613{
 614	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
 615	       page, page->objects, page->inuse, page->freelist, page->flags);
 616
 617}
 618
 619static void slab_bug(struct kmem_cache *s, char *fmt, ...)
 620{
 621	struct va_format vaf;
 622	va_list args;
 623
 624	va_start(args, fmt);
 625	vaf.fmt = fmt;
 626	vaf.va = &args;
 627	pr_err("=============================================================================\n");
 628	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
 629	pr_err("-----------------------------------------------------------------------------\n\n");
 630
 631	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 632	va_end(args);
 633}
 634
 635static void slab_fix(struct kmem_cache *s, char *fmt, ...)
 636{
 637	struct va_format vaf;
 638	va_list args;
 639
 640	va_start(args, fmt);
 641	vaf.fmt = fmt;
 642	vaf.va = &args;
 643	pr_err("FIX %s: %pV\n", s->name, &vaf);
 644	va_end(args);
 645}
 646
 647static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 648{
 649	unsigned int off;	/* Offset of last byte */
 650	u8 *addr = page_address(page);
 651
 652	print_tracking(s, p);
 653
 654	print_page_info(page);
 655
 656	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
 657	       p, p - addr, get_freepointer(s, p));
 658
 659	if (s->flags & SLAB_RED_ZONE)
 660		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
 661			      s->red_left_pad);
 662	else if (p > addr + 16)
 663		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 664
 665	print_section(KERN_ERR, "Object ", p,
 666		      min_t(unsigned int, s->object_size, PAGE_SIZE));
 667	if (s->flags & SLAB_RED_ZONE)
 668		print_section(KERN_ERR, "Redzone ", p + s->object_size,
 669			s->inuse - s->object_size);
 670
 671	if (s->offset)
 672		off = s->offset + sizeof(void *);
 673	else
 674		off = s->inuse;
 675
 676	if (s->flags & SLAB_STORE_USER)
 677		off += 2 * sizeof(struct track);
 678
 679	off += kasan_metadata_size(s);
 680
 681	if (off != size_from_object(s))
 682		/* Beginning of the filler is the free pointer */
 683		print_section(KERN_ERR, "Padding ", p + off,
 684			      size_from_object(s) - off);
 685
 686	dump_stack();
 687}
 688
 689void object_err(struct kmem_cache *s, struct page *page,
 690			u8 *object, char *reason)
 691{
 692	slab_bug(s, "%s", reason);
 693	print_trailer(s, page, object);
 694}
 695
 696static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
 697			const char *fmt, ...)
 698{
 699	va_list args;
 700	char buf[100];
 701
 702	va_start(args, fmt);
 703	vsnprintf(buf, sizeof(buf), fmt, args);
 704	va_end(args);
 705	slab_bug(s, "%s", buf);
 706	print_page_info(page);
 707	dump_stack();
 708}
 709
 710static void init_object(struct kmem_cache *s, void *object, u8 val)
 711{
 712	u8 *p = object;
 713
 714	if (s->flags & SLAB_RED_ZONE)
 715		memset(p - s->red_left_pad, val, s->red_left_pad);
 716
 717	if (s->flags & __OBJECT_POISON) {
 718		memset(p, POISON_FREE, s->object_size - 1);
 719		p[s->object_size - 1] = POISON_END;
 720	}
 721
 722	if (s->flags & SLAB_RED_ZONE)
 723		memset(p + s->object_size, val, s->inuse - s->object_size);
 724}
 725
 726static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 727						void *from, void *to)
 728{
 729	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
 730	memset(from, data, to - from);
 731}
 732
 733static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
 734			u8 *object, char *what,
 735			u8 *start, unsigned int value, unsigned int bytes)
 736{
 737	u8 *fault;
 738	u8 *end;
 739
 740	metadata_access_enable();
 741	fault = memchr_inv(start, value, bytes);
 742	metadata_access_disable();
 743	if (!fault)
 744		return 1;
 745
 746	end = start + bytes;
 747	while (end > fault && end[-1] == value)
 748		end--;
 749
 750	slab_bug(s, "%s overwritten", what);
 751	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
 752					fault, end - 1, fault[0], value);
 753	print_trailer(s, page, object);
 754
 755	restore_bytes(s, what, value, fault, end);
 756	return 0;
 757}
 758
 759/*
 760 * Object layout:
 761 *
 762 * object address
 763 * 	Bytes of the object to be managed.
 764 * 	If the freepointer may overlay the object then the free
 765 * 	pointer is the first word of the object.
 766 *
 767 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 768 * 	0xa5 (POISON_END)
 769 *
 770 * object + s->object_size
 771 * 	Padding to reach word boundary. This is also used for Redzoning.
 772 * 	Padding is extended by another word if Redzoning is enabled and
 773 * 	object_size == inuse.
 774 *
 775 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 776 * 	0xcc (RED_ACTIVE) for objects in use.
 777 *
 778 * object + s->inuse
 779 * 	Meta data starts here.
 780 *
 781 * 	A. Free pointer (if we cannot overwrite object on free)
 782 * 	B. Tracking data for SLAB_STORE_USER
 783 * 	C. Padding to reach required alignment boundary or at mininum
 784 * 		one word if debugging is on to be able to detect writes
 785 * 		before the word boundary.
 786 *
 787 *	Padding is done using 0x5a (POISON_INUSE)
 788 *
 789 * object + s->size
 790 * 	Nothing is used beyond s->size.
 791 *
 792 * If slabcaches are merged then the object_size and inuse boundaries are mostly
 793 * ignored. And therefore no slab options that rely on these boundaries
 794 * may be used with merged slabcaches.
 795 */
 796
 797static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 798{
 799	unsigned long off = s->inuse;	/* The end of info */
 800
 801	if (s->offset)
 802		/* Freepointer is placed after the object. */
 803		off += sizeof(void *);
 804
 805	if (s->flags & SLAB_STORE_USER)
 806		/* We also have user information there */
 807		off += 2 * sizeof(struct track);
 808
 809	off += kasan_metadata_size(s);
 810
 811	if (size_from_object(s) == off)
 812		return 1;
 813
 814	return check_bytes_and_report(s, page, p, "Object padding",
 815			p + off, POISON_INUSE, size_from_object(s) - off);
 816}
 817
 818/* Check the pad bytes at the end of a slab page */
 819static int slab_pad_check(struct kmem_cache *s, struct page *page)
 820{
 821	u8 *start;
 822	u8 *fault;
 823	u8 *end;
 824	u8 *pad;
 825	int length;
 826	int remainder;
 827
 828	if (!(s->flags & SLAB_POISON))
 829		return 1;
 830
 831	start = page_address(page);
 832	length = page_size(page);
 833	end = start + length;
 834	remainder = length % s->size;
 835	if (!remainder)
 836		return 1;
 837
 838	pad = end - remainder;
 839	metadata_access_enable();
 840	fault = memchr_inv(pad, POISON_INUSE, remainder);
 841	metadata_access_disable();
 842	if (!fault)
 843		return 1;
 844	while (end > fault && end[-1] == POISON_INUSE)
 845		end--;
 846
 847	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
 848	print_section(KERN_ERR, "Padding ", pad, remainder);
 849
 850	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
 851	return 0;
 852}
 853
 854static int check_object(struct kmem_cache *s, struct page *page,
 855					void *object, u8 val)
 856{
 857	u8 *p = object;
 858	u8 *endobject = object + s->object_size;
 859
 860	if (s->flags & SLAB_RED_ZONE) {
 861		if (!check_bytes_and_report(s, page, object, "Redzone",
 862			object - s->red_left_pad, val, s->red_left_pad))
 863			return 0;
 864
 865		if (!check_bytes_and_report(s, page, object, "Redzone",
 866			endobject, val, s->inuse - s->object_size))
 867			return 0;
 868	} else {
 869		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 870			check_bytes_and_report(s, page, p, "Alignment padding",
 871				endobject, POISON_INUSE,
 872				s->inuse - s->object_size);
 873		}
 874	}
 875
 876	if (s->flags & SLAB_POISON) {
 877		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
 878			(!check_bytes_and_report(s, page, p, "Poison", p,
 879					POISON_FREE, s->object_size - 1) ||
 880			 !check_bytes_and_report(s, page, p, "Poison",
 881				p + s->object_size - 1, POISON_END, 1)))
 882			return 0;
 883		/*
 884		 * check_pad_bytes cleans up on its own.
 885		 */
 886		check_pad_bytes(s, page, p);
 887	}
 888
 889	if (!s->offset && val == SLUB_RED_ACTIVE)
 890		/*
 891		 * Object and freepointer overlap. Cannot check
 892		 * freepointer while object is allocated.
 893		 */
 894		return 1;
 895
 896	/* Check free pointer validity */
 897	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
 898		object_err(s, page, p, "Freepointer corrupt");
 899		/*
 900		 * No choice but to zap it and thus lose the remainder
 901		 * of the free objects in this slab. May cause
 902		 * another error because the object count is now wrong.
 903		 */
 904		set_freepointer(s, p, NULL);
 905		return 0;
 906	}
 907	return 1;
 908}
 909
 910static int check_slab(struct kmem_cache *s, struct page *page)
 911{
 912	int maxobj;
 913
 914	VM_BUG_ON(!irqs_disabled());
 915
 916	if (!PageSlab(page)) {
 917		slab_err(s, page, "Not a valid slab page");
 918		return 0;
 919	}
 920
 921	maxobj = order_objects(compound_order(page), s->size);
 922	if (page->objects > maxobj) {
 923		slab_err(s, page, "objects %u > max %u",
 924			page->objects, maxobj);
 925		return 0;
 926	}
 927	if (page->inuse > page->objects) {
 928		slab_err(s, page, "inuse %u > max %u",
 929			page->inuse, page->objects);
 930		return 0;
 931	}
 932	/* Slab_pad_check fixes things up after itself */
 933	slab_pad_check(s, page);
 934	return 1;
 935}
 936
 937/*
 938 * Determine if a certain object on a page is on the freelist. Must hold the
 939 * slab lock to guarantee that the chains are in a consistent state.
 940 */
 941static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
 942{
 943	int nr = 0;
 944	void *fp;
 945	void *object = NULL;
 946	int max_objects;
 947
 948	fp = page->freelist;
 949	while (fp && nr <= page->objects) {
 950		if (fp == search)
 951			return 1;
 952		if (!check_valid_pointer(s, page, fp)) {
 953			if (object) {
 954				object_err(s, page, object,
 955					"Freechain corrupt");
 956				set_freepointer(s, object, NULL);
 957			} else {
 958				slab_err(s, page, "Freepointer corrupt");
 959				page->freelist = NULL;
 960				page->inuse = page->objects;
 961				slab_fix(s, "Freelist cleared");
 962				return 0;
 963			}
 964			break;
 965		}
 966		object = fp;
 967		fp = get_freepointer(s, object);
 968		nr++;
 969	}
 970
 971	max_objects = order_objects(compound_order(page), s->size);
 972	if (max_objects > MAX_OBJS_PER_PAGE)
 973		max_objects = MAX_OBJS_PER_PAGE;
 974
 975	if (page->objects != max_objects) {
 976		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
 977			 page->objects, max_objects);
 978		page->objects = max_objects;
 979		slab_fix(s, "Number of objects adjusted.");
 980	}
 981	if (page->inuse != page->objects - nr) {
 982		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
 983			 page->inuse, page->objects - nr);
 984		page->inuse = page->objects - nr;
 985		slab_fix(s, "Object count adjusted.");
 986	}
 987	return search == NULL;
 988}
 989
 990static void trace(struct kmem_cache *s, struct page *page, void *object,
 991								int alloc)
 992{
 993	if (s->flags & SLAB_TRACE) {
 994		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
 995			s->name,
 996			alloc ? "alloc" : "free",
 997			object, page->inuse,
 998			page->freelist);
 999
1000		if (!alloc)
1001			print_section(KERN_INFO, "Object ", (void *)object,
1002					s->object_size);
1003
1004		dump_stack();
1005	}
1006}
1007
1008/*
1009 * Tracking of fully allocated slabs for debugging purposes.
1010 */
1011static void add_full(struct kmem_cache *s,
1012	struct kmem_cache_node *n, struct page *page)
1013{
1014	if (!(s->flags & SLAB_STORE_USER))
1015		return;
1016
1017	lockdep_assert_held(&n->list_lock);
1018	list_add(&page->slab_list, &n->full);
1019}
1020
1021static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1022{
1023	if (!(s->flags & SLAB_STORE_USER))
1024		return;
1025
1026	lockdep_assert_held(&n->list_lock);
1027	list_del(&page->slab_list);
1028}
1029
1030/* Tracking of the number of slabs for debugging purposes */
1031static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1032{
1033	struct kmem_cache_node *n = get_node(s, node);
1034
1035	return atomic_long_read(&n->nr_slabs);
1036}
1037
1038static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1039{
1040	return atomic_long_read(&n->nr_slabs);
1041}
1042
1043static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1044{
1045	struct kmem_cache_node *n = get_node(s, node);
1046
1047	/*
1048	 * May be called early in order to allocate a slab for the
1049	 * kmem_cache_node structure. Solve the chicken-egg
1050	 * dilemma by deferring the increment of the count during
1051	 * bootstrap (see early_kmem_cache_node_alloc).
1052	 */
1053	if (likely(n)) {
1054		atomic_long_inc(&n->nr_slabs);
1055		atomic_long_add(objects, &n->total_objects);
1056	}
1057}
1058static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1059{
1060	struct kmem_cache_node *n = get_node(s, node);
1061
1062	atomic_long_dec(&n->nr_slabs);
1063	atomic_long_sub(objects, &n->total_objects);
1064}
1065
1066/* Object debug checks for alloc/free paths */
1067static void setup_object_debug(struct kmem_cache *s, struct page *page,
1068								void *object)
1069{
1070	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1071		return;
1072
1073	init_object(s, object, SLUB_RED_INACTIVE);
1074	init_tracking(s, object);
1075}
1076
1077static
1078void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1079{
1080	if (!(s->flags & SLAB_POISON))
1081		return;
1082
1083	metadata_access_enable();
1084	memset(addr, POISON_INUSE, page_size(page));
1085	metadata_access_disable();
1086}
1087
1088static inline int alloc_consistency_checks(struct kmem_cache *s,
1089					struct page *page, void *object)
 
1090{
1091	if (!check_slab(s, page))
1092		return 0;
1093
1094	if (!check_valid_pointer(s, page, object)) {
1095		object_err(s, page, object, "Freelist Pointer check fails");
1096		return 0;
1097	}
1098
1099	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1100		return 0;
1101
1102	return 1;
1103}
1104
1105static noinline int alloc_debug_processing(struct kmem_cache *s,
1106					struct page *page,
1107					void *object, unsigned long addr)
1108{
1109	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1110		if (!alloc_consistency_checks(s, page, object))
1111			goto bad;
1112	}
1113
1114	/* Success perform special debug activities for allocs */
1115	if (s->flags & SLAB_STORE_USER)
1116		set_track(s, object, TRACK_ALLOC, addr);
1117	trace(s, page, object, 1);
1118	init_object(s, object, SLUB_RED_ACTIVE);
1119	return 1;
1120
1121bad:
1122	if (PageSlab(page)) {
1123		/*
1124		 * If this is a slab page then lets do the best we can
1125		 * to avoid issues in the future. Marking all objects
1126		 * as used avoids touching the remaining objects.
1127		 */
1128		slab_fix(s, "Marking all objects used");
1129		page->inuse = page->objects;
1130		page->freelist = NULL;
1131	}
1132	return 0;
1133}
1134
1135static inline int free_consistency_checks(struct kmem_cache *s,
1136		struct page *page, void *object, unsigned long addr)
1137{
1138	if (!check_valid_pointer(s, page, object)) {
1139		slab_err(s, page, "Invalid object pointer 0x%p", object);
1140		return 0;
1141	}
1142
1143	if (on_freelist(s, page, object)) {
1144		object_err(s, page, object, "Object already free");
1145		return 0;
1146	}
1147
1148	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1149		return 0;
1150
1151	if (unlikely(s != page->slab_cache)) {
1152		if (!PageSlab(page)) {
1153			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1154				 object);
1155		} else if (!page->slab_cache) {
1156			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1157			       object);
1158			dump_stack();
1159		} else
1160			object_err(s, page, object,
1161					"page slab pointer corrupt.");
1162		return 0;
1163	}
1164	return 1;
1165}
1166
1167/* Supports checking bulk free of a constructed freelist */
1168static noinline int free_debug_processing(
1169	struct kmem_cache *s, struct page *page,
1170	void *head, void *tail, int bulk_cnt,
1171	unsigned long addr)
1172{
1173	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1174	void *object = head;
1175	int cnt = 0;
1176	unsigned long uninitialized_var(flags);
1177	int ret = 0;
1178
1179	spin_lock_irqsave(&n->list_lock, flags);
1180	slab_lock(page);
1181
1182	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1183		if (!check_slab(s, page))
1184			goto out;
1185	}
1186
1187next_object:
1188	cnt++;
1189
1190	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1191		if (!free_consistency_checks(s, page, object, addr))
1192			goto out;
1193	}
1194
1195	if (s->flags & SLAB_STORE_USER)
1196		set_track(s, object, TRACK_FREE, addr);
1197	trace(s, page, object, 0);
1198	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1199	init_object(s, object, SLUB_RED_INACTIVE);
1200
1201	/* Reached end of constructed freelist yet? */
1202	if (object != tail) {
1203		object = get_freepointer(s, object);
1204		goto next_object;
1205	}
1206	ret = 1;
1207
1208out:
1209	if (cnt != bulk_cnt)
1210		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1211			 bulk_cnt, cnt);
1212
1213	slab_unlock(page);
1214	spin_unlock_irqrestore(&n->list_lock, flags);
1215	if (!ret)
1216		slab_fix(s, "Object at 0x%p not freed", object);
1217	return ret;
1218}
1219
1220static int __init setup_slub_debug(char *str)
1221{
1222	slub_debug = DEBUG_DEFAULT_FLAGS;
1223	if (*str++ != '=' || !*str)
1224		/*
1225		 * No options specified. Switch on full debugging.
1226		 */
1227		goto out;
1228
1229	if (*str == ',')
1230		/*
1231		 * No options but restriction on slabs. This means full
1232		 * debugging for slabs matching a pattern.
1233		 */
1234		goto check_slabs;
1235
1236	slub_debug = 0;
1237	if (*str == '-')
1238		/*
1239		 * Switch off all debugging measures.
1240		 */
1241		goto out;
1242
1243	/*
1244	 * Determine which debug features should be switched on
1245	 */
1246	for (; *str && *str != ','; str++) {
1247		switch (tolower(*str)) {
1248		case 'f':
1249			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1250			break;
1251		case 'z':
1252			slub_debug |= SLAB_RED_ZONE;
1253			break;
1254		case 'p':
1255			slub_debug |= SLAB_POISON;
1256			break;
1257		case 'u':
1258			slub_debug |= SLAB_STORE_USER;
1259			break;
1260		case 't':
1261			slub_debug |= SLAB_TRACE;
1262			break;
1263		case 'a':
1264			slub_debug |= SLAB_FAILSLAB;
1265			break;
1266		case 'o':
1267			/*
1268			 * Avoid enabling debugging on caches if its minimum
1269			 * order would increase as a result.
1270			 */
1271			disable_higher_order_debug = 1;
1272			break;
1273		default:
1274			pr_err("slub_debug option '%c' unknown. skipped\n",
1275			       *str);
1276		}
1277	}
1278
1279check_slabs:
1280	if (*str == ',')
1281		slub_debug_slabs = str + 1;
1282out:
1283	if ((static_branch_unlikely(&init_on_alloc) ||
1284	     static_branch_unlikely(&init_on_free)) &&
1285	    (slub_debug & SLAB_POISON))
1286		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1287	return 1;
1288}
1289
1290__setup("slub_debug", setup_slub_debug);
1291
1292/*
1293 * kmem_cache_flags - apply debugging options to the cache
1294 * @object_size:	the size of an object without meta data
1295 * @flags:		flags to set
1296 * @name:		name of the cache
1297 * @ctor:		constructor function
1298 *
1299 * Debug option(s) are applied to @flags. In addition to the debug
1300 * option(s), if a slab name (or multiple) is specified i.e.
1301 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1302 * then only the select slabs will receive the debug option(s).
1303 */
1304slab_flags_t kmem_cache_flags(unsigned int object_size,
1305	slab_flags_t flags, const char *name,
1306	void (*ctor)(void *))
1307{
1308	char *iter;
1309	size_t len;
1310
1311	/* If slub_debug = 0, it folds into the if conditional. */
1312	if (!slub_debug_slabs)
1313		return flags | slub_debug;
1314
1315	len = strlen(name);
1316	iter = slub_debug_slabs;
1317	while (*iter) {
1318		char *end, *glob;
1319		size_t cmplen;
1320
1321		end = strchrnul(iter, ',');
1322
1323		glob = strnchr(iter, end - iter, '*');
1324		if (glob)
1325			cmplen = glob - iter;
1326		else
1327			cmplen = max_t(size_t, len, (end - iter));
1328
1329		if (!strncmp(name, iter, cmplen)) {
1330			flags |= slub_debug;
1331			break;
1332		}
1333
1334		if (!*end)
1335			break;
1336		iter = end + 1;
1337	}
1338
1339	return flags;
1340}
1341#else /* !CONFIG_SLUB_DEBUG */
1342static inline void setup_object_debug(struct kmem_cache *s,
1343			struct page *page, void *object) {}
1344static inline
1345void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1346
1347static inline int alloc_debug_processing(struct kmem_cache *s,
1348	struct page *page, void *object, unsigned long addr) { return 0; }
1349
1350static inline int free_debug_processing(
1351	struct kmem_cache *s, struct page *page,
1352	void *head, void *tail, int bulk_cnt,
1353	unsigned long addr) { return 0; }
1354
1355static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1356			{ return 1; }
1357static inline int check_object(struct kmem_cache *s, struct page *page,
1358			void *object, u8 val) { return 1; }
1359static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1360					struct page *page) {}
1361static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1362					struct page *page) {}
1363slab_flags_t kmem_cache_flags(unsigned int object_size,
1364	slab_flags_t flags, const char *name,
1365	void (*ctor)(void *))
1366{
1367	return flags;
1368}
1369#define slub_debug 0
1370
1371#define disable_higher_order_debug 0
1372
1373static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1374							{ return 0; }
1375static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1376							{ return 0; }
1377static inline void inc_slabs_node(struct kmem_cache *s, int node,
1378							int objects) {}
1379static inline void dec_slabs_node(struct kmem_cache *s, int node,
1380							int objects) {}
1381
1382#endif /* CONFIG_SLUB_DEBUG */
1383
1384/*
1385 * Hooks for other subsystems that check memory allocations. In a typical
1386 * production configuration these hooks all should produce no code at all.
1387 */
1388static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1389{
1390	ptr = kasan_kmalloc_large(ptr, size, flags);
1391	/* As ptr might get tagged, call kmemleak hook after KASAN. */
1392	kmemleak_alloc(ptr, size, 1, flags);
1393	return ptr;
1394}
1395
1396static __always_inline void kfree_hook(void *x)
1397{
1398	kmemleak_free(x);
1399	kasan_kfree_large(x, _RET_IP_);
1400}
1401
1402static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
1403{
1404	kmemleak_free_recursive(x, s->flags);
1405
1406	/*
1407	 * Trouble is that we may no longer disable interrupts in the fast path
1408	 * So in order to make the debug calls that expect irqs to be
1409	 * disabled we need to disable interrupts temporarily.
1410	 */
1411#ifdef CONFIG_LOCKDEP
1412	{
1413		unsigned long flags;
1414
1415		local_irq_save(flags);
1416		debug_check_no_locks_freed(x, s->object_size);
1417		local_irq_restore(flags);
1418	}
1419#endif
1420	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1421		debug_check_no_obj_freed(x, s->object_size);
1422
1423	/* KASAN might put x into memory quarantine, delaying its reuse */
1424	return kasan_slab_free(s, x, _RET_IP_);
1425}
1426
1427static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1428					   void **head, void **tail)
1429{
 
 
 
 
 
 
 
 
1430
1431	void *object;
1432	void *next = *head;
1433	void *old_tail = *tail ? *tail : *head;
1434	int rsize;
1435
1436	/* Head and tail of the reconstructed freelist */
1437	*head = NULL;
1438	*tail = NULL;
1439
1440	do {
1441		object = next;
1442		next = get_freepointer(s, object);
1443
1444		if (slab_want_init_on_free(s)) {
1445			/*
1446			 * Clear the object and the metadata, but don't touch
1447			 * the redzone.
1448			 */
1449			memset(object, 0, s->object_size);
1450			rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
1451							   : 0;
1452			memset((char *)object + s->inuse, 0,
1453			       s->size - s->inuse - rsize);
1454
1455		}
1456		/* If object's reuse doesn't have to be delayed */
1457		if (!slab_free_hook(s, object)) {
1458			/* Move object to the new freelist */
1459			set_freepointer(s, object, *head);
1460			*head = object;
1461			if (!*tail)
1462				*tail = object;
1463		}
1464	} while (object != old_tail);
1465
1466	if (*head == *tail)
1467		*tail = NULL;
1468
1469	return *head != NULL;
 
 
 
1470}
1471
1472static void *setup_object(struct kmem_cache *s, struct page *page,
1473				void *object)
1474{
1475	setup_object_debug(s, page, object);
1476	object = kasan_init_slab_obj(s, object);
1477	if (unlikely(s->ctor)) {
1478		kasan_unpoison_object_data(s, object);
1479		s->ctor(object);
1480		kasan_poison_object_data(s, object);
1481	}
1482	return object;
1483}
1484
1485/*
1486 * Slab allocation and freeing
1487 */
1488static inline struct page *alloc_slab_page(struct kmem_cache *s,
1489		gfp_t flags, int node, struct kmem_cache_order_objects oo)
1490{
1491	struct page *page;
1492	unsigned int order = oo_order(oo);
1493
1494	if (node == NUMA_NO_NODE)
1495		page = alloc_pages(flags, order);
1496	else
1497		page = __alloc_pages_node(node, flags, order);
1498
1499	if (page && charge_slab_page(page, flags, order, s)) {
1500		__free_pages(page, order);
1501		page = NULL;
1502	}
1503
1504	return page;
1505}
1506
1507#ifdef CONFIG_SLAB_FREELIST_RANDOM
1508/* Pre-initialize the random sequence cache */
1509static int init_cache_random_seq(struct kmem_cache *s)
1510{
1511	unsigned int count = oo_objects(s->oo);
1512	int err;
1513
1514	/* Bailout if already initialised */
1515	if (s->random_seq)
1516		return 0;
1517
1518	err = cache_random_seq_create(s, count, GFP_KERNEL);
1519	if (err) {
1520		pr_err("SLUB: Unable to initialize free list for %s\n",
1521			s->name);
1522		return err;
1523	}
1524
1525	/* Transform to an offset on the set of pages */
1526	if (s->random_seq) {
1527		unsigned int i;
1528
1529		for (i = 0; i < count; i++)
1530			s->random_seq[i] *= s->size;
1531	}
1532	return 0;
1533}
1534
1535/* Initialize each random sequence freelist per cache */
1536static void __init init_freelist_randomization(void)
1537{
1538	struct kmem_cache *s;
1539
1540	mutex_lock(&slab_mutex);
1541
1542	list_for_each_entry(s, &slab_caches, list)
1543		init_cache_random_seq(s);
1544
1545	mutex_unlock(&slab_mutex);
1546}
1547
1548/* Get the next entry on the pre-computed freelist randomized */
1549static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1550				unsigned long *pos, void *start,
1551				unsigned long page_limit,
1552				unsigned long freelist_count)
1553{
1554	unsigned int idx;
1555
1556	/*
1557	 * If the target page allocation failed, the number of objects on the
1558	 * page might be smaller than the usual size defined by the cache.
1559	 */
1560	do {
1561		idx = s->random_seq[*pos];
1562		*pos += 1;
1563		if (*pos >= freelist_count)
1564			*pos = 0;
1565	} while (unlikely(idx >= page_limit));
1566
1567	return (char *)start + idx;
1568}
1569
1570/* Shuffle the single linked freelist based on a random pre-computed sequence */
1571static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1572{
1573	void *start;
1574	void *cur;
1575	void *next;
1576	unsigned long idx, pos, page_limit, freelist_count;
1577
1578	if (page->objects < 2 || !s->random_seq)
1579		return false;
1580
1581	freelist_count = oo_objects(s->oo);
1582	pos = get_random_int() % freelist_count;
1583
1584	page_limit = page->objects * s->size;
1585	start = fixup_red_left(s, page_address(page));
1586
1587	/* First entry is used as the base of the freelist */
1588	cur = next_freelist_entry(s, page, &pos, start, page_limit,
1589				freelist_count);
1590	cur = setup_object(s, page, cur);
1591	page->freelist = cur;
1592
1593	for (idx = 1; idx < page->objects; idx++) {
 
1594		next = next_freelist_entry(s, page, &pos, start, page_limit,
1595			freelist_count);
1596		next = setup_object(s, page, next);
1597		set_freepointer(s, cur, next);
1598		cur = next;
1599	}
 
1600	set_freepointer(s, cur, NULL);
1601
1602	return true;
1603}
1604#else
1605static inline int init_cache_random_seq(struct kmem_cache *s)
1606{
1607	return 0;
1608}
1609static inline void init_freelist_randomization(void) { }
1610static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1611{
1612	return false;
1613}
1614#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1615
1616static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1617{
1618	struct page *page;
1619	struct kmem_cache_order_objects oo = s->oo;
1620	gfp_t alloc_gfp;
1621	void *start, *p, *next;
1622	int idx;
1623	bool shuffle;
1624
1625	flags &= gfp_allowed_mask;
1626
1627	if (gfpflags_allow_blocking(flags))
1628		local_irq_enable();
1629
1630	flags |= s->allocflags;
1631
1632	/*
1633	 * Let the initial higher-order allocation fail under memory pressure
1634	 * so we fall-back to the minimum order allocation.
1635	 */
1636	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1637	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1638		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1639
1640	page = alloc_slab_page(s, alloc_gfp, node, oo);
1641	if (unlikely(!page)) {
1642		oo = s->min;
1643		alloc_gfp = flags;
1644		/*
1645		 * Allocation may have failed due to fragmentation.
1646		 * Try a lower order alloc if possible
1647		 */
1648		page = alloc_slab_page(s, alloc_gfp, node, oo);
1649		if (unlikely(!page))
1650			goto out;
1651		stat(s, ORDER_FALLBACK);
1652	}
1653
1654	page->objects = oo_objects(oo);
1655
 
1656	page->slab_cache = s;
1657	__SetPageSlab(page);
1658	if (page_is_pfmemalloc(page))
1659		SetPageSlabPfmemalloc(page);
1660
1661	kasan_poison_slab(page);
1662
1663	start = page_address(page);
1664
1665	setup_page_debug(s, page, start);
 
 
 
1666
1667	shuffle = shuffle_freelist(s, page);
1668
1669	if (!shuffle) {
1670		start = fixup_red_left(s, start);
1671		start = setup_object(s, page, start);
1672		page->freelist = start;
1673		for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1674			next = p + s->size;
1675			next = setup_object(s, page, next);
1676			set_freepointer(s, p, next);
1677			p = next;
1678		}
1679		set_freepointer(s, p, NULL);
1680	}
1681
1682	page->inuse = page->objects;
1683	page->frozen = 1;
1684
1685out:
1686	if (gfpflags_allow_blocking(flags))
1687		local_irq_disable();
1688	if (!page)
1689		return NULL;
1690
 
 
 
 
 
1691	inc_slabs_node(s, page_to_nid(page), page->objects);
1692
1693	return page;
1694}
1695
1696static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1697{
1698	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1699		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1700		flags &= ~GFP_SLAB_BUG_MASK;
1701		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1702				invalid_mask, &invalid_mask, flags, &flags);
1703		dump_stack();
1704	}
1705
1706	return allocate_slab(s,
1707		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1708}
1709
1710static void __free_slab(struct kmem_cache *s, struct page *page)
1711{
1712	int order = compound_order(page);
1713	int pages = 1 << order;
1714
1715	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1716		void *p;
1717
1718		slab_pad_check(s, page);
1719		for_each_object(p, s, page_address(page),
1720						page->objects)
1721			check_object(s, page, p, SLUB_RED_INACTIVE);
1722	}
1723
 
 
 
 
 
1724	__ClearPageSlabPfmemalloc(page);
1725	__ClearPageSlab(page);
1726
1727	page->mapping = NULL;
1728	if (current->reclaim_state)
1729		current->reclaim_state->reclaimed_slab += pages;
1730	uncharge_slab_page(page, order, s);
1731	__free_pages(page, order);
1732}
1733
 
 
 
1734static void rcu_free_slab(struct rcu_head *h)
1735{
1736	struct page *page = container_of(h, struct page, rcu_head);
 
 
 
 
 
1737
1738	__free_slab(page->slab_cache, page);
1739}
1740
1741static void free_slab(struct kmem_cache *s, struct page *page)
1742{
1743	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1744		call_rcu(&page->rcu_head, rcu_free_slab);
 
 
 
 
 
 
 
 
 
 
 
 
1745	} else
1746		__free_slab(s, page);
1747}
1748
1749static void discard_slab(struct kmem_cache *s, struct page *page)
1750{
1751	dec_slabs_node(s, page_to_nid(page), page->objects);
1752	free_slab(s, page);
1753}
1754
1755/*
1756 * Management of partially allocated slabs.
1757 */
1758static inline void
1759__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1760{
1761	n->nr_partial++;
1762	if (tail == DEACTIVATE_TO_TAIL)
1763		list_add_tail(&page->slab_list, &n->partial);
1764	else
1765		list_add(&page->slab_list, &n->partial);
1766}
1767
1768static inline void add_partial(struct kmem_cache_node *n,
1769				struct page *page, int tail)
1770{
1771	lockdep_assert_held(&n->list_lock);
1772	__add_partial(n, page, tail);
1773}
1774
1775static inline void remove_partial(struct kmem_cache_node *n,
1776					struct page *page)
1777{
1778	lockdep_assert_held(&n->list_lock);
1779	list_del(&page->slab_list);
1780	n->nr_partial--;
1781}
1782
1783/*
1784 * Remove slab from the partial list, freeze it and
1785 * return the pointer to the freelist.
1786 *
1787 * Returns a list of objects or NULL if it fails.
1788 */
1789static inline void *acquire_slab(struct kmem_cache *s,
1790		struct kmem_cache_node *n, struct page *page,
1791		int mode, int *objects)
1792{
1793	void *freelist;
1794	unsigned long counters;
1795	struct page new;
1796
1797	lockdep_assert_held(&n->list_lock);
1798
1799	/*
1800	 * Zap the freelist and set the frozen bit.
1801	 * The old freelist is the list of objects for the
1802	 * per cpu allocation list.
1803	 */
1804	freelist = page->freelist;
1805	counters = page->counters;
1806	new.counters = counters;
1807	*objects = new.objects - new.inuse;
1808	if (mode) {
1809		new.inuse = page->objects;
1810		new.freelist = NULL;
1811	} else {
1812		new.freelist = freelist;
1813	}
1814
1815	VM_BUG_ON(new.frozen);
1816	new.frozen = 1;
1817
1818	if (!__cmpxchg_double_slab(s, page,
1819			freelist, counters,
1820			new.freelist, new.counters,
1821			"acquire_slab"))
1822		return NULL;
1823
1824	remove_partial(n, page);
1825	WARN_ON(!freelist);
1826	return freelist;
1827}
1828
1829static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1830static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1831
1832/*
1833 * Try to allocate a partial slab from a specific node.
1834 */
1835static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1836				struct kmem_cache_cpu *c, gfp_t flags)
1837{
1838	struct page *page, *page2;
1839	void *object = NULL;
1840	unsigned int available = 0;
1841	int objects;
1842
1843	/*
1844	 * Racy check. If we mistakenly see no partial slabs then we
1845	 * just allocate an empty slab. If we mistakenly try to get a
1846	 * partial slab and there is none available then get_partials()
1847	 * will return NULL.
1848	 */
1849	if (!n || !n->nr_partial)
1850		return NULL;
1851
1852	spin_lock(&n->list_lock);
1853	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
1854		void *t;
1855
1856		if (!pfmemalloc_match(page, flags))
1857			continue;
1858
1859		t = acquire_slab(s, n, page, object == NULL, &objects);
1860		if (!t)
1861			break;
1862
1863		available += objects;
1864		if (!object) {
1865			c->page = page;
1866			stat(s, ALLOC_FROM_PARTIAL);
1867			object = t;
1868		} else {
1869			put_cpu_partial(s, page, 0);
1870			stat(s, CPU_PARTIAL_NODE);
1871		}
1872		if (!kmem_cache_has_cpu_partial(s)
1873			|| available > slub_cpu_partial(s) / 2)
1874			break;
1875
1876	}
1877	spin_unlock(&n->list_lock);
1878	return object;
1879}
1880
1881/*
1882 * Get a page from somewhere. Search in increasing NUMA distances.
1883 */
1884static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1885		struct kmem_cache_cpu *c)
1886{
1887#ifdef CONFIG_NUMA
1888	struct zonelist *zonelist;
1889	struct zoneref *z;
1890	struct zone *zone;
1891	enum zone_type high_zoneidx = gfp_zone(flags);
1892	void *object;
1893	unsigned int cpuset_mems_cookie;
1894
1895	/*
1896	 * The defrag ratio allows a configuration of the tradeoffs between
1897	 * inter node defragmentation and node local allocations. A lower
1898	 * defrag_ratio increases the tendency to do local allocations
1899	 * instead of attempting to obtain partial slabs from other nodes.
1900	 *
1901	 * If the defrag_ratio is set to 0 then kmalloc() always
1902	 * returns node local objects. If the ratio is higher then kmalloc()
1903	 * may return off node objects because partial slabs are obtained
1904	 * from other nodes and filled up.
1905	 *
1906	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1907	 * (which makes defrag_ratio = 1000) then every (well almost)
1908	 * allocation will first attempt to defrag slab caches on other nodes.
1909	 * This means scanning over all nodes to look for partial slabs which
1910	 * may be expensive if we do it every time we are trying to find a slab
1911	 * with available objects.
1912	 */
1913	if (!s->remote_node_defrag_ratio ||
1914			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1915		return NULL;
1916
1917	do {
1918		cpuset_mems_cookie = read_mems_allowed_begin();
1919		zonelist = node_zonelist(mempolicy_slab_node(), flags);
1920		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1921			struct kmem_cache_node *n;
1922
1923			n = get_node(s, zone_to_nid(zone));
1924
1925			if (n && cpuset_zone_allowed(zone, flags) &&
1926					n->nr_partial > s->min_partial) {
1927				object = get_partial_node(s, n, c, flags);
1928				if (object) {
1929					/*
1930					 * Don't check read_mems_allowed_retry()
1931					 * here - if mems_allowed was updated in
1932					 * parallel, that was a harmless race
1933					 * between allocation and the cpuset
1934					 * update
1935					 */
1936					return object;
1937				}
1938			}
1939		}
1940	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1941#endif	/* CONFIG_NUMA */
1942	return NULL;
1943}
1944
1945/*
1946 * Get a partial page, lock it and return it.
1947 */
1948static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1949		struct kmem_cache_cpu *c)
1950{
1951	void *object;
1952	int searchnode = node;
1953
1954	if (node == NUMA_NO_NODE)
1955		searchnode = numa_mem_id();
1956	else if (!node_present_pages(node))
1957		searchnode = node_to_mem_node(node);
1958
1959	object = get_partial_node(s, get_node(s, searchnode), c, flags);
1960	if (object || node != NUMA_NO_NODE)
1961		return object;
1962
1963	return get_any_partial(s, flags, c);
1964}
1965
1966#ifdef CONFIG_PREEMPT
1967/*
1968 * Calculate the next globally unique transaction for disambiguiation
1969 * during cmpxchg. The transactions start with the cpu number and are then
1970 * incremented by CONFIG_NR_CPUS.
1971 */
1972#define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1973#else
1974/*
1975 * No preemption supported therefore also no need to check for
1976 * different cpus.
1977 */
1978#define TID_STEP 1
1979#endif
1980
1981static inline unsigned long next_tid(unsigned long tid)
1982{
1983	return tid + TID_STEP;
1984}
1985
1986#ifdef SLUB_DEBUG_CMPXCHG
1987static inline unsigned int tid_to_cpu(unsigned long tid)
1988{
1989	return tid % TID_STEP;
1990}
1991
1992static inline unsigned long tid_to_event(unsigned long tid)
1993{
1994	return tid / TID_STEP;
1995}
1996#endif
1997
1998static inline unsigned int init_tid(int cpu)
1999{
2000	return cpu;
2001}
2002
2003static inline void note_cmpxchg_failure(const char *n,
2004		const struct kmem_cache *s, unsigned long tid)
2005{
2006#ifdef SLUB_DEBUG_CMPXCHG
2007	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2008
2009	pr_info("%s %s: cmpxchg redo ", n, s->name);
2010
2011#ifdef CONFIG_PREEMPT
2012	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2013		pr_warn("due to cpu change %d -> %d\n",
2014			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2015	else
2016#endif
2017	if (tid_to_event(tid) != tid_to_event(actual_tid))
2018		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2019			tid_to_event(tid), tid_to_event(actual_tid));
2020	else
2021		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2022			actual_tid, tid, next_tid(tid));
2023#endif
2024	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2025}
2026
2027static void init_kmem_cache_cpus(struct kmem_cache *s)
2028{
2029	int cpu;
2030
2031	for_each_possible_cpu(cpu)
2032		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2033}
2034
2035/*
2036 * Remove the cpu slab
2037 */
2038static void deactivate_slab(struct kmem_cache *s, struct page *page,
2039				void *freelist, struct kmem_cache_cpu *c)
2040{
2041	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2042	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2043	int lock = 0;
2044	enum slab_modes l = M_NONE, m = M_NONE;
2045	void *nextfree;
2046	int tail = DEACTIVATE_TO_HEAD;
2047	struct page new;
2048	struct page old;
2049
2050	if (page->freelist) {
2051		stat(s, DEACTIVATE_REMOTE_FREES);
2052		tail = DEACTIVATE_TO_TAIL;
2053	}
2054
2055	/*
2056	 * Stage one: Free all available per cpu objects back
2057	 * to the page freelist while it is still frozen. Leave the
2058	 * last one.
2059	 *
2060	 * There is no need to take the list->lock because the page
2061	 * is still frozen.
2062	 */
2063	while (freelist && (nextfree = get_freepointer(s, freelist))) {
2064		void *prior;
2065		unsigned long counters;
2066
2067		do {
2068			prior = page->freelist;
2069			counters = page->counters;
2070			set_freepointer(s, freelist, prior);
2071			new.counters = counters;
2072			new.inuse--;
2073			VM_BUG_ON(!new.frozen);
2074
2075		} while (!__cmpxchg_double_slab(s, page,
2076			prior, counters,
2077			freelist, new.counters,
2078			"drain percpu freelist"));
2079
2080		freelist = nextfree;
2081	}
2082
2083	/*
2084	 * Stage two: Ensure that the page is unfrozen while the
2085	 * list presence reflects the actual number of objects
2086	 * during unfreeze.
2087	 *
2088	 * We setup the list membership and then perform a cmpxchg
2089	 * with the count. If there is a mismatch then the page
2090	 * is not unfrozen but the page is on the wrong list.
2091	 *
2092	 * Then we restart the process which may have to remove
2093	 * the page from the list that we just put it on again
2094	 * because the number of objects in the slab may have
2095	 * changed.
2096	 */
2097redo:
2098
2099	old.freelist = page->freelist;
2100	old.counters = page->counters;
2101	VM_BUG_ON(!old.frozen);
2102
2103	/* Determine target state of the slab */
2104	new.counters = old.counters;
2105	if (freelist) {
2106		new.inuse--;
2107		set_freepointer(s, freelist, old.freelist);
2108		new.freelist = freelist;
2109	} else
2110		new.freelist = old.freelist;
2111
2112	new.frozen = 0;
2113
2114	if (!new.inuse && n->nr_partial >= s->min_partial)
2115		m = M_FREE;
2116	else if (new.freelist) {
2117		m = M_PARTIAL;
2118		if (!lock) {
2119			lock = 1;
2120			/*
2121			 * Taking the spinlock removes the possibility
2122			 * that acquire_slab() will see a slab page that
2123			 * is frozen
2124			 */
2125			spin_lock(&n->list_lock);
2126		}
2127	} else {
2128		m = M_FULL;
2129		if (kmem_cache_debug(s) && !lock) {
2130			lock = 1;
2131			/*
2132			 * This also ensures that the scanning of full
2133			 * slabs from diagnostic functions will not see
2134			 * any frozen slabs.
2135			 */
2136			spin_lock(&n->list_lock);
2137		}
2138	}
2139
2140	if (l != m) {
 
2141		if (l == M_PARTIAL)
 
2142			remove_partial(n, page);
 
2143		else if (l == M_FULL)
 
2144			remove_full(s, n, page);
2145
2146		if (m == M_PARTIAL)
 
2147			add_partial(n, page, tail);
2148		else if (m == M_FULL)
 
 
 
 
2149			add_full(s, n, page);
 
 
2150	}
2151
2152	l = m;
2153	if (!__cmpxchg_double_slab(s, page,
2154				old.freelist, old.counters,
2155				new.freelist, new.counters,
2156				"unfreezing slab"))
2157		goto redo;
2158
2159	if (lock)
2160		spin_unlock(&n->list_lock);
2161
2162	if (m == M_PARTIAL)
2163		stat(s, tail);
2164	else if (m == M_FULL)
2165		stat(s, DEACTIVATE_FULL);
2166	else if (m == M_FREE) {
2167		stat(s, DEACTIVATE_EMPTY);
2168		discard_slab(s, page);
2169		stat(s, FREE_SLAB);
2170	}
2171
2172	c->page = NULL;
2173	c->freelist = NULL;
2174}
2175
2176/*
2177 * Unfreeze all the cpu partial slabs.
2178 *
2179 * This function must be called with interrupts disabled
2180 * for the cpu using c (or some other guarantee must be there
2181 * to guarantee no concurrent accesses).
2182 */
2183static void unfreeze_partials(struct kmem_cache *s,
2184		struct kmem_cache_cpu *c)
2185{
2186#ifdef CONFIG_SLUB_CPU_PARTIAL
2187	struct kmem_cache_node *n = NULL, *n2 = NULL;
2188	struct page *page, *discard_page = NULL;
2189
2190	while ((page = c->partial)) {
2191		struct page new;
2192		struct page old;
2193
2194		c->partial = page->next;
2195
2196		n2 = get_node(s, page_to_nid(page));
2197		if (n != n2) {
2198			if (n)
2199				spin_unlock(&n->list_lock);
2200
2201			n = n2;
2202			spin_lock(&n->list_lock);
2203		}
2204
2205		do {
2206
2207			old.freelist = page->freelist;
2208			old.counters = page->counters;
2209			VM_BUG_ON(!old.frozen);
2210
2211			new.counters = old.counters;
2212			new.freelist = old.freelist;
2213
2214			new.frozen = 0;
2215
2216		} while (!__cmpxchg_double_slab(s, page,
2217				old.freelist, old.counters,
2218				new.freelist, new.counters,
2219				"unfreezing slab"));
2220
2221		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2222			page->next = discard_page;
2223			discard_page = page;
2224		} else {
2225			add_partial(n, page, DEACTIVATE_TO_TAIL);
2226			stat(s, FREE_ADD_PARTIAL);
2227		}
2228	}
2229
2230	if (n)
2231		spin_unlock(&n->list_lock);
2232
2233	while (discard_page) {
2234		page = discard_page;
2235		discard_page = discard_page->next;
2236
2237		stat(s, DEACTIVATE_EMPTY);
2238		discard_slab(s, page);
2239		stat(s, FREE_SLAB);
2240	}
2241#endif	/* CONFIG_SLUB_CPU_PARTIAL */
2242}
2243
2244/*
2245 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2246 * partial page slot if available.
2247 *
2248 * If we did not find a slot then simply move all the partials to the
2249 * per node partial list.
2250 */
2251static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2252{
2253#ifdef CONFIG_SLUB_CPU_PARTIAL
2254	struct page *oldpage;
2255	int pages;
2256	int pobjects;
2257
2258	preempt_disable();
2259	do {
2260		pages = 0;
2261		pobjects = 0;
2262		oldpage = this_cpu_read(s->cpu_slab->partial);
2263
2264		if (oldpage) {
2265			pobjects = oldpage->pobjects;
2266			pages = oldpage->pages;
2267			if (drain && pobjects > s->cpu_partial) {
2268				unsigned long flags;
2269				/*
2270				 * partial array is full. Move the existing
2271				 * set to the per node partial list.
2272				 */
2273				local_irq_save(flags);
2274				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2275				local_irq_restore(flags);
2276				oldpage = NULL;
2277				pobjects = 0;
2278				pages = 0;
2279				stat(s, CPU_PARTIAL_DRAIN);
2280			}
2281		}
2282
2283		pages++;
2284		pobjects += page->objects - page->inuse;
2285
2286		page->pages = pages;
2287		page->pobjects = pobjects;
2288		page->next = oldpage;
2289
2290	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2291								!= oldpage);
2292	if (unlikely(!s->cpu_partial)) {
2293		unsigned long flags;
2294
2295		local_irq_save(flags);
2296		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2297		local_irq_restore(flags);
2298	}
2299	preempt_enable();
2300#endif	/* CONFIG_SLUB_CPU_PARTIAL */
2301}
2302
2303static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2304{
2305	stat(s, CPUSLAB_FLUSH);
2306	deactivate_slab(s, c->page, c->freelist, c);
2307
2308	c->tid = next_tid(c->tid);
2309}
2310
2311/*
2312 * Flush cpu slab.
2313 *
2314 * Called from IPI handler with interrupts disabled.
2315 */
2316static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2317{
2318	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2319
2320	if (c->page)
2321		flush_slab(s, c);
 
2322
2323	unfreeze_partials(s, c);
 
2324}
2325
2326static void flush_cpu_slab(void *d)
2327{
2328	struct kmem_cache *s = d;
2329
2330	__flush_cpu_slab(s, smp_processor_id());
2331}
2332
2333static bool has_cpu_slab(int cpu, void *info)
2334{
2335	struct kmem_cache *s = info;
2336	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2337
2338	return c->page || slub_percpu_partial(c);
2339}
2340
2341static void flush_all(struct kmem_cache *s)
2342{
2343	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2344}
2345
2346/*
2347 * Use the cpu notifier to insure that the cpu slabs are flushed when
2348 * necessary.
2349 */
2350static int slub_cpu_dead(unsigned int cpu)
2351{
2352	struct kmem_cache *s;
2353	unsigned long flags;
2354
2355	mutex_lock(&slab_mutex);
2356	list_for_each_entry(s, &slab_caches, list) {
2357		local_irq_save(flags);
2358		__flush_cpu_slab(s, cpu);
2359		local_irq_restore(flags);
2360	}
2361	mutex_unlock(&slab_mutex);
2362	return 0;
2363}
2364
2365/*
2366 * Check if the objects in a per cpu structure fit numa
2367 * locality expectations.
2368 */
2369static inline int node_match(struct page *page, int node)
2370{
2371#ifdef CONFIG_NUMA
2372	if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2373		return 0;
2374#endif
2375	return 1;
2376}
2377
2378#ifdef CONFIG_SLUB_DEBUG
2379static int count_free(struct page *page)
2380{
2381	return page->objects - page->inuse;
2382}
2383
2384static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2385{
2386	return atomic_long_read(&n->total_objects);
2387}
2388#endif /* CONFIG_SLUB_DEBUG */
2389
2390#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2391static unsigned long count_partial(struct kmem_cache_node *n,
2392					int (*get_count)(struct page *))
2393{
2394	unsigned long flags;
2395	unsigned long x = 0;
2396	struct page *page;
2397
2398	spin_lock_irqsave(&n->list_lock, flags);
2399	list_for_each_entry(page, &n->partial, slab_list)
2400		x += get_count(page);
2401	spin_unlock_irqrestore(&n->list_lock, flags);
2402	return x;
2403}
2404#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2405
2406static noinline void
2407slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2408{
2409#ifdef CONFIG_SLUB_DEBUG
2410	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2411				      DEFAULT_RATELIMIT_BURST);
2412	int node;
2413	struct kmem_cache_node *n;
2414
2415	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2416		return;
2417
2418	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2419		nid, gfpflags, &gfpflags);
2420	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2421		s->name, s->object_size, s->size, oo_order(s->oo),
2422		oo_order(s->min));
2423
2424	if (oo_order(s->min) > get_order(s->object_size))
2425		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2426			s->name);
2427
2428	for_each_kmem_cache_node(s, node, n) {
2429		unsigned long nr_slabs;
2430		unsigned long nr_objs;
2431		unsigned long nr_free;
2432
2433		nr_free  = count_partial(n, count_free);
2434		nr_slabs = node_nr_slabs(n);
2435		nr_objs  = node_nr_objs(n);
2436
2437		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2438			node, nr_slabs, nr_objs, nr_free);
2439	}
2440#endif
2441}
2442
2443static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2444			int node, struct kmem_cache_cpu **pc)
2445{
2446	void *freelist;
2447	struct kmem_cache_cpu *c = *pc;
2448	struct page *page;
2449
2450	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2451
2452	freelist = get_partial(s, flags, node, c);
2453
2454	if (freelist)
2455		return freelist;
2456
2457	page = new_slab(s, flags, node);
2458	if (page) {
2459		c = raw_cpu_ptr(s->cpu_slab);
2460		if (c->page)
2461			flush_slab(s, c);
2462
2463		/*
2464		 * No other reference to the page yet so we can
2465		 * muck around with it freely without cmpxchg
2466		 */
2467		freelist = page->freelist;
2468		page->freelist = NULL;
2469
2470		stat(s, ALLOC_SLAB);
2471		c->page = page;
2472		*pc = c;
2473	}
 
2474
2475	return freelist;
2476}
2477
2478static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2479{
2480	if (unlikely(PageSlabPfmemalloc(page)))
2481		return gfp_pfmemalloc_allowed(gfpflags);
2482
2483	return true;
2484}
2485
2486/*
2487 * Check the page->freelist of a page and either transfer the freelist to the
2488 * per cpu freelist or deactivate the page.
2489 *
2490 * The page is still frozen if the return value is not NULL.
2491 *
2492 * If this function returns NULL then the page has been unfrozen.
2493 *
2494 * This function must be called with interrupt disabled.
2495 */
2496static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2497{
2498	struct page new;
2499	unsigned long counters;
2500	void *freelist;
2501
2502	do {
2503		freelist = page->freelist;
2504		counters = page->counters;
2505
2506		new.counters = counters;
2507		VM_BUG_ON(!new.frozen);
2508
2509		new.inuse = page->objects;
2510		new.frozen = freelist != NULL;
2511
2512	} while (!__cmpxchg_double_slab(s, page,
2513		freelist, counters,
2514		NULL, new.counters,
2515		"get_freelist"));
2516
2517	return freelist;
2518}
2519
2520/*
2521 * Slow path. The lockless freelist is empty or we need to perform
2522 * debugging duties.
2523 *
2524 * Processing is still very fast if new objects have been freed to the
2525 * regular freelist. In that case we simply take over the regular freelist
2526 * as the lockless freelist and zap the regular freelist.
2527 *
2528 * If that is not working then we fall back to the partial lists. We take the
2529 * first element of the freelist as the object to allocate now and move the
2530 * rest of the freelist to the lockless freelist.
2531 *
2532 * And if we were unable to get a new slab from the partial slab lists then
2533 * we need to allocate a new slab. This is the slowest path since it involves
2534 * a call to the page allocator and the setup of a new slab.
2535 *
2536 * Version of __slab_alloc to use when we know that interrupts are
2537 * already disabled (which is the case for bulk allocation).
2538 */
2539static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2540			  unsigned long addr, struct kmem_cache_cpu *c)
2541{
2542	void *freelist;
2543	struct page *page;
2544
2545	page = c->page;
2546	if (!page)
2547		goto new_slab;
2548redo:
2549
2550	if (unlikely(!node_match(page, node))) {
2551		int searchnode = node;
2552
2553		if (node != NUMA_NO_NODE && !node_present_pages(node))
2554			searchnode = node_to_mem_node(node);
2555
2556		if (unlikely(!node_match(page, searchnode))) {
2557			stat(s, ALLOC_NODE_MISMATCH);
2558			deactivate_slab(s, page, c->freelist, c);
2559			goto new_slab;
2560		}
2561	}
2562
2563	/*
2564	 * By rights, we should be searching for a slab page that was
2565	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2566	 * information when the page leaves the per-cpu allocator
2567	 */
2568	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2569		deactivate_slab(s, page, c->freelist, c);
2570		goto new_slab;
2571	}
2572
2573	/* must check again c->freelist in case of cpu migration or IRQ */
2574	freelist = c->freelist;
2575	if (freelist)
2576		goto load_freelist;
2577
2578	freelist = get_freelist(s, page);
2579
2580	if (!freelist) {
2581		c->page = NULL;
2582		stat(s, DEACTIVATE_BYPASS);
2583		goto new_slab;
2584	}
2585
2586	stat(s, ALLOC_REFILL);
2587
2588load_freelist:
2589	/*
2590	 * freelist is pointing to the list of objects to be used.
2591	 * page is pointing to the page from which the objects are obtained.
2592	 * That page must be frozen for per cpu allocations to work.
2593	 */
2594	VM_BUG_ON(!c->page->frozen);
2595	c->freelist = get_freepointer(s, freelist);
2596	c->tid = next_tid(c->tid);
2597	return freelist;
2598
2599new_slab:
2600
2601	if (slub_percpu_partial(c)) {
2602		page = c->page = slub_percpu_partial(c);
2603		slub_set_percpu_partial(c, page);
2604		stat(s, CPU_PARTIAL_ALLOC);
2605		goto redo;
2606	}
2607
2608	freelist = new_slab_objects(s, gfpflags, node, &c);
2609
2610	if (unlikely(!freelist)) {
2611		slab_out_of_memory(s, gfpflags, node);
2612		return NULL;
2613	}
2614
2615	page = c->page;
2616	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2617		goto load_freelist;
2618
2619	/* Only entered in the debug case */
2620	if (kmem_cache_debug(s) &&
2621			!alloc_debug_processing(s, page, freelist, addr))
2622		goto new_slab;	/* Slab failed checks. Next slab needed */
2623
2624	deactivate_slab(s, page, get_freepointer(s, freelist), c);
2625	return freelist;
2626}
2627
2628/*
2629 * Another one that disabled interrupt and compensates for possible
2630 * cpu changes by refetching the per cpu area pointer.
2631 */
2632static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2633			  unsigned long addr, struct kmem_cache_cpu *c)
2634{
2635	void *p;
2636	unsigned long flags;
2637
2638	local_irq_save(flags);
2639#ifdef CONFIG_PREEMPT
2640	/*
2641	 * We may have been preempted and rescheduled on a different
2642	 * cpu before disabling interrupts. Need to reload cpu area
2643	 * pointer.
2644	 */
2645	c = this_cpu_ptr(s->cpu_slab);
2646#endif
2647
2648	p = ___slab_alloc(s, gfpflags, node, addr, c);
2649	local_irq_restore(flags);
2650	return p;
2651}
2652
2653/*
2654 * If the object has been wiped upon free, make sure it's fully initialized by
2655 * zeroing out freelist pointer.
2656 */
2657static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2658						   void *obj)
2659{
2660	if (unlikely(slab_want_init_on_free(s)) && obj)
2661		memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
2662}
2663
2664/*
2665 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2666 * have the fastpath folded into their functions. So no function call
2667 * overhead for requests that can be satisfied on the fastpath.
2668 *
2669 * The fastpath works by first checking if the lockless freelist can be used.
2670 * If not then __slab_alloc is called for slow processing.
2671 *
2672 * Otherwise we can simply pick the next object from the lockless free list.
2673 */
2674static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2675		gfp_t gfpflags, int node, unsigned long addr)
2676{
2677	void *object;
2678	struct kmem_cache_cpu *c;
2679	struct page *page;
2680	unsigned long tid;
2681
2682	s = slab_pre_alloc_hook(s, gfpflags);
2683	if (!s)
2684		return NULL;
2685redo:
2686	/*
2687	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2688	 * enabled. We may switch back and forth between cpus while
2689	 * reading from one cpu area. That does not matter as long
2690	 * as we end up on the original cpu again when doing the cmpxchg.
2691	 *
2692	 * We should guarantee that tid and kmem_cache are retrieved on
2693	 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2694	 * to check if it is matched or not.
2695	 */
2696	do {
2697		tid = this_cpu_read(s->cpu_slab->tid);
2698		c = raw_cpu_ptr(s->cpu_slab);
2699	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2700		 unlikely(tid != READ_ONCE(c->tid)));
2701
2702	/*
2703	 * Irqless object alloc/free algorithm used here depends on sequence
2704	 * of fetching cpu_slab's data. tid should be fetched before anything
2705	 * on c to guarantee that object and page associated with previous tid
2706	 * won't be used with current tid. If we fetch tid first, object and
2707	 * page could be one associated with next tid and our alloc/free
2708	 * request will be failed. In this case, we will retry. So, no problem.
2709	 */
2710	barrier();
2711
2712	/*
2713	 * The transaction ids are globally unique per cpu and per operation on
2714	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2715	 * occurs on the right processor and that there was no operation on the
2716	 * linked list in between.
2717	 */
2718
2719	object = c->freelist;
2720	page = c->page;
2721	if (unlikely(!object || !node_match(page, node))) {
2722		object = __slab_alloc(s, gfpflags, node, addr, c);
2723		stat(s, ALLOC_SLOWPATH);
2724	} else {
2725		void *next_object = get_freepointer_safe(s, object);
2726
2727		/*
2728		 * The cmpxchg will only match if there was no additional
2729		 * operation and if we are on the right processor.
2730		 *
2731		 * The cmpxchg does the following atomically (without lock
2732		 * semantics!)
2733		 * 1. Relocate first pointer to the current per cpu area.
2734		 * 2. Verify that tid and freelist have not been changed
2735		 * 3. If they were not changed replace tid and freelist
2736		 *
2737		 * Since this is without lock semantics the protection is only
2738		 * against code executing on this cpu *not* from access by
2739		 * other cpus.
2740		 */
2741		if (unlikely(!this_cpu_cmpxchg_double(
2742				s->cpu_slab->freelist, s->cpu_slab->tid,
2743				object, tid,
2744				next_object, next_tid(tid)))) {
2745
2746			note_cmpxchg_failure("slab_alloc", s, tid);
2747			goto redo;
2748		}
2749		prefetch_freepointer(s, next_object);
2750		stat(s, ALLOC_FASTPATH);
2751	}
2752
2753	maybe_wipe_obj_freeptr(s, object);
2754
2755	if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
2756		memset(object, 0, s->object_size);
2757
2758	slab_post_alloc_hook(s, gfpflags, 1, &object);
2759
2760	return object;
2761}
2762
2763static __always_inline void *slab_alloc(struct kmem_cache *s,
2764		gfp_t gfpflags, unsigned long addr)
2765{
2766	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2767}
2768
2769void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2770{
2771	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2772
2773	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2774				s->size, gfpflags);
2775
2776	return ret;
2777}
2778EXPORT_SYMBOL(kmem_cache_alloc);
2779
2780#ifdef CONFIG_TRACING
2781void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2782{
2783	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2784	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2785	ret = kasan_kmalloc(s, ret, size, gfpflags);
2786	return ret;
2787}
2788EXPORT_SYMBOL(kmem_cache_alloc_trace);
2789#endif
2790
2791#ifdef CONFIG_NUMA
2792void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2793{
2794	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2795
2796	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2797				    s->object_size, s->size, gfpflags, node);
2798
2799	return ret;
2800}
2801EXPORT_SYMBOL(kmem_cache_alloc_node);
2802
2803#ifdef CONFIG_TRACING
2804void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2805				    gfp_t gfpflags,
2806				    int node, size_t size)
2807{
2808	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2809
2810	trace_kmalloc_node(_RET_IP_, ret,
2811			   size, s->size, gfpflags, node);
2812
2813	ret = kasan_kmalloc(s, ret, size, gfpflags);
2814	return ret;
2815}
2816EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2817#endif
2818#endif	/* CONFIG_NUMA */
2819
2820/*
2821 * Slow path handling. This may still be called frequently since objects
2822 * have a longer lifetime than the cpu slabs in most processing loads.
2823 *
2824 * So we still attempt to reduce cache line usage. Just take the slab
2825 * lock and free the item. If there is no additional partial page
2826 * handling required then we can return immediately.
2827 */
2828static void __slab_free(struct kmem_cache *s, struct page *page,
2829			void *head, void *tail, int cnt,
2830			unsigned long addr)
2831
2832{
2833	void *prior;
2834	int was_frozen;
2835	struct page new;
2836	unsigned long counters;
2837	struct kmem_cache_node *n = NULL;
2838	unsigned long uninitialized_var(flags);
2839
2840	stat(s, FREE_SLOWPATH);
2841
2842	if (kmem_cache_debug(s) &&
2843	    !free_debug_processing(s, page, head, tail, cnt, addr))
2844		return;
2845
2846	do {
2847		if (unlikely(n)) {
2848			spin_unlock_irqrestore(&n->list_lock, flags);
2849			n = NULL;
2850		}
2851		prior = page->freelist;
2852		counters = page->counters;
2853		set_freepointer(s, tail, prior);
2854		new.counters = counters;
2855		was_frozen = new.frozen;
2856		new.inuse -= cnt;
2857		if ((!new.inuse || !prior) && !was_frozen) {
2858
2859			if (kmem_cache_has_cpu_partial(s) && !prior) {
2860
2861				/*
2862				 * Slab was on no list before and will be
2863				 * partially empty
2864				 * We can defer the list move and instead
2865				 * freeze it.
2866				 */
2867				new.frozen = 1;
2868
2869			} else { /* Needs to be taken off a list */
2870
2871				n = get_node(s, page_to_nid(page));
2872				/*
2873				 * Speculatively acquire the list_lock.
2874				 * If the cmpxchg does not succeed then we may
2875				 * drop the list_lock without any processing.
2876				 *
2877				 * Otherwise the list_lock will synchronize with
2878				 * other processors updating the list of slabs.
2879				 */
2880				spin_lock_irqsave(&n->list_lock, flags);
2881
2882			}
2883		}
2884
2885	} while (!cmpxchg_double_slab(s, page,
2886		prior, counters,
2887		head, new.counters,
2888		"__slab_free"));
2889
2890	if (likely(!n)) {
2891
2892		/*
2893		 * If we just froze the page then put it onto the
2894		 * per cpu partial list.
2895		 */
2896		if (new.frozen && !was_frozen) {
2897			put_cpu_partial(s, page, 1);
2898			stat(s, CPU_PARTIAL_FREE);
2899		}
2900		/*
2901		 * The list lock was not taken therefore no list
2902		 * activity can be necessary.
2903		 */
2904		if (was_frozen)
2905			stat(s, FREE_FROZEN);
2906		return;
2907	}
2908
2909	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2910		goto slab_empty;
2911
2912	/*
2913	 * Objects left in the slab. If it was not on the partial list before
2914	 * then add it.
2915	 */
2916	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2917		remove_full(s, n, page);
 
2918		add_partial(n, page, DEACTIVATE_TO_TAIL);
2919		stat(s, FREE_ADD_PARTIAL);
2920	}
2921	spin_unlock_irqrestore(&n->list_lock, flags);
2922	return;
2923
2924slab_empty:
2925	if (prior) {
2926		/*
2927		 * Slab on the partial list.
2928		 */
2929		remove_partial(n, page);
2930		stat(s, FREE_REMOVE_PARTIAL);
2931	} else {
2932		/* Slab must be on the full list */
2933		remove_full(s, n, page);
2934	}
2935
2936	spin_unlock_irqrestore(&n->list_lock, flags);
2937	stat(s, FREE_SLAB);
2938	discard_slab(s, page);
2939}
2940
2941/*
2942 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2943 * can perform fastpath freeing without additional function calls.
2944 *
2945 * The fastpath is only possible if we are freeing to the current cpu slab
2946 * of this processor. This typically the case if we have just allocated
2947 * the item before.
2948 *
2949 * If fastpath is not possible then fall back to __slab_free where we deal
2950 * with all sorts of special processing.
2951 *
2952 * Bulk free of a freelist with several objects (all pointing to the
2953 * same page) possible by specifying head and tail ptr, plus objects
2954 * count (cnt). Bulk free indicated by tail pointer being set.
2955 */
2956static __always_inline void do_slab_free(struct kmem_cache *s,
2957				struct page *page, void *head, void *tail,
2958				int cnt, unsigned long addr)
2959{
2960	void *tail_obj = tail ? : head;
2961	struct kmem_cache_cpu *c;
2962	unsigned long tid;
2963redo:
2964	/*
2965	 * Determine the currently cpus per cpu slab.
2966	 * The cpu may change afterward. However that does not matter since
2967	 * data is retrieved via this pointer. If we are on the same cpu
2968	 * during the cmpxchg then the free will succeed.
2969	 */
2970	do {
2971		tid = this_cpu_read(s->cpu_slab->tid);
2972		c = raw_cpu_ptr(s->cpu_slab);
2973	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2974		 unlikely(tid != READ_ONCE(c->tid)));
2975
2976	/* Same with comment on barrier() in slab_alloc_node() */
2977	barrier();
2978
2979	if (likely(page == c->page)) {
2980		set_freepointer(s, tail_obj, c->freelist);
2981
2982		if (unlikely(!this_cpu_cmpxchg_double(
2983				s->cpu_slab->freelist, s->cpu_slab->tid,
2984				c->freelist, tid,
2985				head, next_tid(tid)))) {
2986
2987			note_cmpxchg_failure("slab_free", s, tid);
2988			goto redo;
2989		}
2990		stat(s, FREE_FASTPATH);
2991	} else
2992		__slab_free(s, page, head, tail_obj, cnt, addr);
2993
2994}
2995
2996static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2997				      void *head, void *tail, int cnt,
2998				      unsigned long addr)
2999{
3000	/*
3001	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3002	 * to remove objects, whose reuse must be delayed.
3003	 */
3004	if (slab_free_freelist_hook(s, &head, &tail))
3005		do_slab_free(s, page, head, tail, cnt, addr);
3006}
3007
3008#ifdef CONFIG_KASAN_GENERIC
3009void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3010{
3011	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3012}
3013#endif
3014
3015void kmem_cache_free(struct kmem_cache *s, void *x)
3016{
3017	s = cache_from_obj(s, x);
3018	if (!s)
3019		return;
3020	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3021	trace_kmem_cache_free(_RET_IP_, x);
3022}
3023EXPORT_SYMBOL(kmem_cache_free);
3024
3025struct detached_freelist {
3026	struct page *page;
3027	void *tail;
3028	void *freelist;
3029	int cnt;
3030	struct kmem_cache *s;
3031};
3032
3033/*
3034 * This function progressively scans the array with free objects (with
3035 * a limited look ahead) and extract objects belonging to the same
3036 * page.  It builds a detached freelist directly within the given
3037 * page/objects.  This can happen without any need for
3038 * synchronization, because the objects are owned by running process.
3039 * The freelist is build up as a single linked list in the objects.
3040 * The idea is, that this detached freelist can then be bulk
3041 * transferred to the real freelist(s), but only requiring a single
3042 * synchronization primitive.  Look ahead in the array is limited due
3043 * to performance reasons.
3044 */
3045static inline
3046int build_detached_freelist(struct kmem_cache *s, size_t size,
3047			    void **p, struct detached_freelist *df)
3048{
3049	size_t first_skipped_index = 0;
3050	int lookahead = 3;
3051	void *object;
3052	struct page *page;
3053
3054	/* Always re-init detached_freelist */
3055	df->page = NULL;
3056
3057	do {
3058		object = p[--size];
3059		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3060	} while (!object && size);
3061
3062	if (!object)
3063		return 0;
3064
3065	page = virt_to_head_page(object);
3066	if (!s) {
3067		/* Handle kalloc'ed objects */
3068		if (unlikely(!PageSlab(page))) {
3069			BUG_ON(!PageCompound(page));
3070			kfree_hook(object);
3071			__free_pages(page, compound_order(page));
3072			p[size] = NULL; /* mark object processed */
3073			return size;
3074		}
3075		/* Derive kmem_cache from object */
3076		df->s = page->slab_cache;
3077	} else {
3078		df->s = cache_from_obj(s, object); /* Support for memcg */
3079	}
3080
3081	/* Start new detached freelist */
3082	df->page = page;
3083	set_freepointer(df->s, object, NULL);
3084	df->tail = object;
3085	df->freelist = object;
3086	p[size] = NULL; /* mark object processed */
3087	df->cnt = 1;
3088
3089	while (size) {
3090		object = p[--size];
3091		if (!object)
3092			continue; /* Skip processed objects */
3093
3094		/* df->page is always set at this point */
3095		if (df->page == virt_to_head_page(object)) {
3096			/* Opportunity build freelist */
3097			set_freepointer(df->s, object, df->freelist);
3098			df->freelist = object;
3099			df->cnt++;
3100			p[size] = NULL; /* mark object processed */
3101
3102			continue;
3103		}
3104
3105		/* Limit look ahead search */
3106		if (!--lookahead)
3107			break;
3108
3109		if (!first_skipped_index)
3110			first_skipped_index = size + 1;
3111	}
3112
3113	return first_skipped_index;
3114}
3115
3116/* Note that interrupts must be enabled when calling this function. */
3117void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3118{
3119	if (WARN_ON(!size))
3120		return;
3121
3122	do {
3123		struct detached_freelist df;
3124
3125		size = build_detached_freelist(s, size, p, &df);
3126		if (!df.page)
3127			continue;
3128
3129		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3130	} while (likely(size));
3131}
3132EXPORT_SYMBOL(kmem_cache_free_bulk);
3133
3134/* Note that interrupts must be enabled when calling this function. */
3135int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3136			  void **p)
3137{
3138	struct kmem_cache_cpu *c;
3139	int i;
3140
3141	/* memcg and kmem_cache debug support */
3142	s = slab_pre_alloc_hook(s, flags);
3143	if (unlikely(!s))
3144		return false;
3145	/*
3146	 * Drain objects in the per cpu slab, while disabling local
3147	 * IRQs, which protects against PREEMPT and interrupts
3148	 * handlers invoking normal fastpath.
3149	 */
3150	local_irq_disable();
3151	c = this_cpu_ptr(s->cpu_slab);
3152
3153	for (i = 0; i < size; i++) {
3154		void *object = c->freelist;
3155
3156		if (unlikely(!object)) {
3157			/*
3158			 * Invoking slow path likely have side-effect
3159			 * of re-populating per CPU c->freelist
3160			 */
3161			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3162					    _RET_IP_, c);
3163			if (unlikely(!p[i]))
3164				goto error;
3165
3166			c = this_cpu_ptr(s->cpu_slab);
3167			maybe_wipe_obj_freeptr(s, p[i]);
3168
3169			continue; /* goto for-loop */
3170		}
3171		c->freelist = get_freepointer(s, object);
3172		p[i] = object;
3173		maybe_wipe_obj_freeptr(s, p[i]);
3174	}
3175	c->tid = next_tid(c->tid);
3176	local_irq_enable();
3177
3178	/* Clear memory outside IRQ disabled fastpath loop */
3179	if (unlikely(slab_want_init_on_alloc(flags, s))) {
3180		int j;
3181
3182		for (j = 0; j < i; j++)
3183			memset(p[j], 0, s->object_size);
3184	}
3185
3186	/* memcg and kmem_cache debug support */
3187	slab_post_alloc_hook(s, flags, size, p);
3188	return i;
3189error:
3190	local_irq_enable();
3191	slab_post_alloc_hook(s, flags, i, p);
3192	__kmem_cache_free_bulk(s, i, p);
3193	return 0;
3194}
3195EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3196
3197
3198/*
3199 * Object placement in a slab is made very easy because we always start at
3200 * offset 0. If we tune the size of the object to the alignment then we can
3201 * get the required alignment by putting one properly sized object after
3202 * another.
3203 *
3204 * Notice that the allocation order determines the sizes of the per cpu
3205 * caches. Each processor has always one slab available for allocations.
3206 * Increasing the allocation order reduces the number of times that slabs
3207 * must be moved on and off the partial lists and is therefore a factor in
3208 * locking overhead.
3209 */
3210
3211/*
3212 * Mininum / Maximum order of slab pages. This influences locking overhead
3213 * and slab fragmentation. A higher order reduces the number of partial slabs
3214 * and increases the number of allocations possible without having to
3215 * take the list_lock.
3216 */
3217static unsigned int slub_min_order;
3218static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3219static unsigned int slub_min_objects;
3220
3221/*
3222 * Calculate the order of allocation given an slab object size.
3223 *
3224 * The order of allocation has significant impact on performance and other
3225 * system components. Generally order 0 allocations should be preferred since
3226 * order 0 does not cause fragmentation in the page allocator. Larger objects
3227 * be problematic to put into order 0 slabs because there may be too much
3228 * unused space left. We go to a higher order if more than 1/16th of the slab
3229 * would be wasted.
3230 *
3231 * In order to reach satisfactory performance we must ensure that a minimum
3232 * number of objects is in one slab. Otherwise we may generate too much
3233 * activity on the partial lists which requires taking the list_lock. This is
3234 * less a concern for large slabs though which are rarely used.
3235 *
3236 * slub_max_order specifies the order where we begin to stop considering the
3237 * number of objects in a slab as critical. If we reach slub_max_order then
3238 * we try to keep the page order as low as possible. So we accept more waste
3239 * of space in favor of a small page order.
3240 *
3241 * Higher order allocations also allow the placement of more objects in a
3242 * slab and thereby reduce object handling overhead. If the user has
3243 * requested a higher mininum order then we start with that one instead of
3244 * the smallest order which will fit the object.
3245 */
3246static inline unsigned int slab_order(unsigned int size,
3247		unsigned int min_objects, unsigned int max_order,
3248		unsigned int fract_leftover)
3249{
3250	unsigned int min_order = slub_min_order;
3251	unsigned int order;
3252
3253	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
3254		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3255
3256	for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3257			order <= max_order; order++) {
3258
3259		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3260		unsigned int rem;
3261
3262		rem = slab_size % size;
3263
3264		if (rem <= slab_size / fract_leftover)
3265			break;
3266	}
3267
3268	return order;
3269}
3270
3271static inline int calculate_order(unsigned int size)
3272{
3273	unsigned int order;
3274	unsigned int min_objects;
3275	unsigned int max_objects;
3276
3277	/*
3278	 * Attempt to find best configuration for a slab. This
3279	 * works by first attempting to generate a layout with
3280	 * the best configuration and backing off gradually.
3281	 *
3282	 * First we increase the acceptable waste in a slab. Then
3283	 * we reduce the minimum objects required in a slab.
3284	 */
3285	min_objects = slub_min_objects;
3286	if (!min_objects)
3287		min_objects = 4 * (fls(nr_cpu_ids) + 1);
3288	max_objects = order_objects(slub_max_order, size);
3289	min_objects = min(min_objects, max_objects);
3290
3291	while (min_objects > 1) {
3292		unsigned int fraction;
3293
3294		fraction = 16;
3295		while (fraction >= 4) {
3296			order = slab_order(size, min_objects,
3297					slub_max_order, fraction);
3298			if (order <= slub_max_order)
3299				return order;
3300			fraction /= 2;
3301		}
3302		min_objects--;
3303	}
3304
3305	/*
3306	 * We were unable to place multiple objects in a slab. Now
3307	 * lets see if we can place a single object there.
3308	 */
3309	order = slab_order(size, 1, slub_max_order, 1);
3310	if (order <= slub_max_order)
3311		return order;
3312
3313	/*
3314	 * Doh this slab cannot be placed using slub_max_order.
3315	 */
3316	order = slab_order(size, 1, MAX_ORDER, 1);
3317	if (order < MAX_ORDER)
3318		return order;
3319	return -ENOSYS;
3320}
3321
3322static void
3323init_kmem_cache_node(struct kmem_cache_node *n)
3324{
3325	n->nr_partial = 0;
3326	spin_lock_init(&n->list_lock);
3327	INIT_LIST_HEAD(&n->partial);
3328#ifdef CONFIG_SLUB_DEBUG
3329	atomic_long_set(&n->nr_slabs, 0);
3330	atomic_long_set(&n->total_objects, 0);
3331	INIT_LIST_HEAD(&n->full);
3332#endif
3333}
3334
3335static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3336{
3337	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3338			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3339
3340	/*
3341	 * Must align to double word boundary for the double cmpxchg
3342	 * instructions to work; see __pcpu_double_call_return_bool().
3343	 */
3344	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3345				     2 * sizeof(void *));
3346
3347	if (!s->cpu_slab)
3348		return 0;
3349
3350	init_kmem_cache_cpus(s);
3351
3352	return 1;
3353}
3354
3355static struct kmem_cache *kmem_cache_node;
3356
3357/*
3358 * No kmalloc_node yet so do it by hand. We know that this is the first
3359 * slab on the node for this slabcache. There are no concurrent accesses
3360 * possible.
3361 *
3362 * Note that this function only works on the kmem_cache_node
3363 * when allocating for the kmem_cache_node. This is used for bootstrapping
3364 * memory on a fresh node that has no slab structures yet.
3365 */
3366static void early_kmem_cache_node_alloc(int node)
3367{
3368	struct page *page;
3369	struct kmem_cache_node *n;
3370
3371	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3372
3373	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3374
3375	BUG_ON(!page);
3376	if (page_to_nid(page) != node) {
3377		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3378		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3379	}
3380
3381	n = page->freelist;
3382	BUG_ON(!n);
 
 
 
 
3383#ifdef CONFIG_SLUB_DEBUG
3384	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3385	init_tracking(kmem_cache_node, n);
3386#endif
3387	n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3388		      GFP_KERNEL);
3389	page->freelist = get_freepointer(kmem_cache_node, n);
3390	page->inuse = 1;
3391	page->frozen = 0;
3392	kmem_cache_node->node[node] = n;
3393	init_kmem_cache_node(n);
3394	inc_slabs_node(kmem_cache_node, node, page->objects);
3395
3396	/*
3397	 * No locks need to be taken here as it has just been
3398	 * initialized and there is no concurrent access.
3399	 */
3400	__add_partial(n, page, DEACTIVATE_TO_HEAD);
3401}
3402
3403static void free_kmem_cache_nodes(struct kmem_cache *s)
3404{
3405	int node;
3406	struct kmem_cache_node *n;
3407
3408	for_each_kmem_cache_node(s, node, n) {
3409		s->node[node] = NULL;
3410		kmem_cache_free(kmem_cache_node, n);
3411	}
3412}
3413
3414void __kmem_cache_release(struct kmem_cache *s)
3415{
3416	cache_random_seq_destroy(s);
3417	free_percpu(s->cpu_slab);
3418	free_kmem_cache_nodes(s);
3419}
3420
3421static int init_kmem_cache_nodes(struct kmem_cache *s)
3422{
3423	int node;
3424
3425	for_each_node_state(node, N_NORMAL_MEMORY) {
3426		struct kmem_cache_node *n;
3427
3428		if (slab_state == DOWN) {
3429			early_kmem_cache_node_alloc(node);
3430			continue;
3431		}
3432		n = kmem_cache_alloc_node(kmem_cache_node,
3433						GFP_KERNEL, node);
3434
3435		if (!n) {
3436			free_kmem_cache_nodes(s);
3437			return 0;
3438		}
3439
3440		init_kmem_cache_node(n);
3441		s->node[node] = n;
3442	}
3443	return 1;
3444}
3445
3446static void set_min_partial(struct kmem_cache *s, unsigned long min)
3447{
3448	if (min < MIN_PARTIAL)
3449		min = MIN_PARTIAL;
3450	else if (min > MAX_PARTIAL)
3451		min = MAX_PARTIAL;
3452	s->min_partial = min;
3453}
3454
3455static void set_cpu_partial(struct kmem_cache *s)
3456{
3457#ifdef CONFIG_SLUB_CPU_PARTIAL
3458	/*
3459	 * cpu_partial determined the maximum number of objects kept in the
3460	 * per cpu partial lists of a processor.
3461	 *
3462	 * Per cpu partial lists mainly contain slabs that just have one
3463	 * object freed. If they are used for allocation then they can be
3464	 * filled up again with minimal effort. The slab will never hit the
3465	 * per node partial lists and therefore no locking will be required.
3466	 *
3467	 * This setting also determines
3468	 *
3469	 * A) The number of objects from per cpu partial slabs dumped to the
3470	 *    per node list when we reach the limit.
3471	 * B) The number of objects in cpu partial slabs to extract from the
3472	 *    per node list when we run out of per cpu objects. We only fetch
3473	 *    50% to keep some capacity around for frees.
3474	 */
3475	if (!kmem_cache_has_cpu_partial(s))
3476		s->cpu_partial = 0;
3477	else if (s->size >= PAGE_SIZE)
3478		s->cpu_partial = 2;
3479	else if (s->size >= 1024)
3480		s->cpu_partial = 6;
3481	else if (s->size >= 256)
3482		s->cpu_partial = 13;
3483	else
3484		s->cpu_partial = 30;
3485#endif
3486}
3487
3488/*
3489 * calculate_sizes() determines the order and the distribution of data within
3490 * a slab object.
3491 */
3492static int calculate_sizes(struct kmem_cache *s, int forced_order)
3493{
3494	slab_flags_t flags = s->flags;
3495	unsigned int size = s->object_size;
3496	unsigned int order;
3497
3498	/*
3499	 * Round up object size to the next word boundary. We can only
3500	 * place the free pointer at word boundaries and this determines
3501	 * the possible location of the free pointer.
3502	 */
3503	size = ALIGN(size, sizeof(void *));
3504
3505#ifdef CONFIG_SLUB_DEBUG
3506	/*
3507	 * Determine if we can poison the object itself. If the user of
3508	 * the slab may touch the object after free or before allocation
3509	 * then we should never poison the object itself.
3510	 */
3511	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3512			!s->ctor)
3513		s->flags |= __OBJECT_POISON;
3514	else
3515		s->flags &= ~__OBJECT_POISON;
3516
3517
3518	/*
3519	 * If we are Redzoning then check if there is some space between the
3520	 * end of the object and the free pointer. If not then add an
3521	 * additional word to have some bytes to store Redzone information.
3522	 */
3523	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3524		size += sizeof(void *);
3525#endif
3526
3527	/*
3528	 * With that we have determined the number of bytes in actual use
3529	 * by the object. This is the potential offset to the free pointer.
3530	 */
3531	s->inuse = size;
3532
3533	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3534		s->ctor)) {
3535		/*
3536		 * Relocate free pointer after the object if it is not
3537		 * permitted to overwrite the first word of the object on
3538		 * kmem_cache_free.
3539		 *
3540		 * This is the case if we do RCU, have a constructor or
3541		 * destructor or are poisoning the objects.
3542		 */
3543		s->offset = size;
3544		size += sizeof(void *);
3545	}
3546
3547#ifdef CONFIG_SLUB_DEBUG
3548	if (flags & SLAB_STORE_USER)
3549		/*
3550		 * Need to store information about allocs and frees after
3551		 * the object.
3552		 */
3553		size += 2 * sizeof(struct track);
3554#endif
3555
3556	kasan_cache_create(s, &size, &s->flags);
3557#ifdef CONFIG_SLUB_DEBUG
3558	if (flags & SLAB_RED_ZONE) {
3559		/*
3560		 * Add some empty padding so that we can catch
3561		 * overwrites from earlier objects rather than let
3562		 * tracking information or the free pointer be
3563		 * corrupted if a user writes before the start
3564		 * of the object.
3565		 */
3566		size += sizeof(void *);
3567
3568		s->red_left_pad = sizeof(void *);
3569		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3570		size += s->red_left_pad;
3571	}
3572#endif
3573
3574	/*
3575	 * SLUB stores one object immediately after another beginning from
3576	 * offset 0. In order to align the objects we have to simply size
3577	 * each object to conform to the alignment.
3578	 */
3579	size = ALIGN(size, s->align);
3580	s->size = size;
3581	if (forced_order >= 0)
3582		order = forced_order;
3583	else
3584		order = calculate_order(size);
3585
3586	if ((int)order < 0)
3587		return 0;
3588
3589	s->allocflags = 0;
3590	if (order)
3591		s->allocflags |= __GFP_COMP;
3592
3593	if (s->flags & SLAB_CACHE_DMA)
3594		s->allocflags |= GFP_DMA;
3595
3596	if (s->flags & SLAB_CACHE_DMA32)
3597		s->allocflags |= GFP_DMA32;
3598
3599	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3600		s->allocflags |= __GFP_RECLAIMABLE;
3601
3602	/*
3603	 * Determine the number of objects per slab
3604	 */
3605	s->oo = oo_make(order, size);
3606	s->min = oo_make(get_order(size), size);
3607	if (oo_objects(s->oo) > oo_objects(s->max))
3608		s->max = s->oo;
3609
3610	return !!oo_objects(s->oo);
3611}
3612
3613static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3614{
3615	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
 
3616#ifdef CONFIG_SLAB_FREELIST_HARDENED
3617	s->random = get_random_long();
3618#endif
3619
 
 
 
3620	if (!calculate_sizes(s, -1))
3621		goto error;
3622	if (disable_higher_order_debug) {
3623		/*
3624		 * Disable debugging flags that store metadata if the min slab
3625		 * order increased.
3626		 */
3627		if (get_order(s->size) > get_order(s->object_size)) {
3628			s->flags &= ~DEBUG_METADATA_FLAGS;
3629			s->offset = 0;
3630			if (!calculate_sizes(s, -1))
3631				goto error;
3632		}
3633	}
3634
3635#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3636    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3637	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3638		/* Enable fast mode */
3639		s->flags |= __CMPXCHG_DOUBLE;
3640#endif
3641
3642	/*
3643	 * The larger the object size is, the more pages we want on the partial
3644	 * list to avoid pounding the page allocator excessively.
3645	 */
3646	set_min_partial(s, ilog2(s->size) / 2);
3647
3648	set_cpu_partial(s);
3649
3650#ifdef CONFIG_NUMA
3651	s->remote_node_defrag_ratio = 1000;
3652#endif
3653
3654	/* Initialize the pre-computed randomized freelist if slab is up */
3655	if (slab_state >= UP) {
3656		if (init_cache_random_seq(s))
3657			goto error;
3658	}
3659
3660	if (!init_kmem_cache_nodes(s))
3661		goto error;
3662
3663	if (alloc_kmem_cache_cpus(s))
3664		return 0;
3665
3666	free_kmem_cache_nodes(s);
3667error:
 
 
 
 
3668	return -EINVAL;
3669}
3670
3671static void list_slab_objects(struct kmem_cache *s, struct page *page,
3672							const char *text)
3673{
3674#ifdef CONFIG_SLUB_DEBUG
3675	void *addr = page_address(page);
3676	void *p;
3677	unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
 
3678	if (!map)
3679		return;
3680	slab_err(s, page, text, s->name);
3681	slab_lock(page);
3682
3683	get_map(s, page, map);
3684	for_each_object(p, s, addr, page->objects) {
3685
3686		if (!test_bit(slab_index(p, s, addr), map)) {
3687			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3688			print_tracking(s, p);
3689		}
3690	}
3691	slab_unlock(page);
3692	bitmap_free(map);
3693#endif
3694}
3695
3696/*
3697 * Attempt to free all partial slabs on a node.
3698 * This is called from __kmem_cache_shutdown(). We must take list_lock
3699 * because sysfs file might still access partial list after the shutdowning.
3700 */
3701static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3702{
3703	LIST_HEAD(discard);
3704	struct page *page, *h;
3705
3706	BUG_ON(irqs_disabled());
3707	spin_lock_irq(&n->list_lock);
3708	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
3709		if (!page->inuse) {
3710			remove_partial(n, page);
3711			list_add(&page->slab_list, &discard);
3712		} else {
3713			list_slab_objects(s, page,
3714			"Objects remaining in %s on __kmem_cache_shutdown()");
3715		}
3716	}
3717	spin_unlock_irq(&n->list_lock);
3718
3719	list_for_each_entry_safe(page, h, &discard, slab_list)
3720		discard_slab(s, page);
3721}
3722
3723bool __kmem_cache_empty(struct kmem_cache *s)
3724{
3725	int node;
3726	struct kmem_cache_node *n;
3727
3728	for_each_kmem_cache_node(s, node, n)
3729		if (n->nr_partial || slabs_node(s, node))
3730			return false;
3731	return true;
3732}
3733
3734/*
3735 * Release all resources used by a slab cache.
3736 */
3737int __kmem_cache_shutdown(struct kmem_cache *s)
3738{
3739	int node;
3740	struct kmem_cache_node *n;
3741
3742	flush_all(s);
3743	/* Attempt to free all objects */
3744	for_each_kmem_cache_node(s, node, n) {
3745		free_partial(s, n);
3746		if (n->nr_partial || slabs_node(s, node))
3747			return 1;
3748	}
3749	sysfs_slab_remove(s);
3750	return 0;
3751}
3752
3753/********************************************************************
3754 *		Kmalloc subsystem
3755 *******************************************************************/
3756
3757static int __init setup_slub_min_order(char *str)
3758{
3759	get_option(&str, (int *)&slub_min_order);
3760
3761	return 1;
3762}
3763
3764__setup("slub_min_order=", setup_slub_min_order);
3765
3766static int __init setup_slub_max_order(char *str)
3767{
3768	get_option(&str, (int *)&slub_max_order);
3769	slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
3770
3771	return 1;
3772}
3773
3774__setup("slub_max_order=", setup_slub_max_order);
3775
3776static int __init setup_slub_min_objects(char *str)
3777{
3778	get_option(&str, (int *)&slub_min_objects);
3779
3780	return 1;
3781}
3782
3783__setup("slub_min_objects=", setup_slub_min_objects);
3784
3785void *__kmalloc(size_t size, gfp_t flags)
3786{
3787	struct kmem_cache *s;
3788	void *ret;
3789
3790	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3791		return kmalloc_large(size, flags);
3792
3793	s = kmalloc_slab(size, flags);
3794
3795	if (unlikely(ZERO_OR_NULL_PTR(s)))
3796		return s;
3797
3798	ret = slab_alloc(s, flags, _RET_IP_);
3799
3800	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3801
3802	ret = kasan_kmalloc(s, ret, size, flags);
3803
3804	return ret;
3805}
3806EXPORT_SYMBOL(__kmalloc);
3807
3808#ifdef CONFIG_NUMA
3809static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3810{
3811	struct page *page;
3812	void *ptr = NULL;
3813	unsigned int order = get_order(size);
3814
3815	flags |= __GFP_COMP;
3816	page = alloc_pages_node(node, flags, order);
3817	if (page) {
3818		ptr = page_address(page);
3819		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
3820				    1 << order);
3821	}
3822
3823	return kmalloc_large_node_hook(ptr, size, flags);
 
3824}
3825
3826void *__kmalloc_node(size_t size, gfp_t flags, int node)
3827{
3828	struct kmem_cache *s;
3829	void *ret;
3830
3831	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3832		ret = kmalloc_large_node(size, flags, node);
3833
3834		trace_kmalloc_node(_RET_IP_, ret,
3835				   size, PAGE_SIZE << get_order(size),
3836				   flags, node);
3837
3838		return ret;
3839	}
3840
3841	s = kmalloc_slab(size, flags);
3842
3843	if (unlikely(ZERO_OR_NULL_PTR(s)))
3844		return s;
3845
3846	ret = slab_alloc_node(s, flags, node, _RET_IP_);
3847
3848	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3849
3850	ret = kasan_kmalloc(s, ret, size, flags);
3851
3852	return ret;
3853}
3854EXPORT_SYMBOL(__kmalloc_node);
3855#endif	/* CONFIG_NUMA */
3856
3857#ifdef CONFIG_HARDENED_USERCOPY
3858/*
3859 * Rejects incorrectly sized objects and objects that are to be copied
3860 * to/from userspace but do not fall entirely within the containing slab
3861 * cache's usercopy region.
3862 *
3863 * Returns NULL if check passes, otherwise const char * to name of cache
3864 * to indicate an error.
3865 */
3866void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3867			 bool to_user)
3868{
3869	struct kmem_cache *s;
3870	unsigned int offset;
3871	size_t object_size;
3872
3873	ptr = kasan_reset_tag(ptr);
3874
3875	/* Find object and usable object size. */
3876	s = page->slab_cache;
3877
3878	/* Reject impossible pointers. */
3879	if (ptr < page_address(page))
3880		usercopy_abort("SLUB object not in SLUB page?!", NULL,
3881			       to_user, 0, n);
3882
3883	/* Find offset within object. */
3884	offset = (ptr - page_address(page)) % s->size;
3885
3886	/* Adjust for redzone and reject if within the redzone. */
3887	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3888		if (offset < s->red_left_pad)
3889			usercopy_abort("SLUB object in left red zone",
3890				       s->name, to_user, offset, n);
3891		offset -= s->red_left_pad;
3892	}
3893
3894	/* Allow address range falling entirely within usercopy region. */
3895	if (offset >= s->useroffset &&
3896	    offset - s->useroffset <= s->usersize &&
3897	    n <= s->useroffset - offset + s->usersize)
3898		return;
3899
3900	/*
3901	 * If the copy is still within the allocated object, produce
3902	 * a warning instead of rejecting the copy. This is intended
3903	 * to be a temporary method to find any missing usercopy
3904	 * whitelists.
3905	 */
3906	object_size = slab_ksize(s);
3907	if (usercopy_fallback &&
3908	    offset <= object_size && n <= object_size - offset) {
3909		usercopy_warn("SLUB object", s->name, to_user, offset, n);
3910		return;
3911	}
3912
3913	usercopy_abort("SLUB object", s->name, to_user, offset, n);
3914}
3915#endif /* CONFIG_HARDENED_USERCOPY */
3916
3917size_t __ksize(const void *object)
3918{
3919	struct page *page;
3920
3921	if (unlikely(object == ZERO_SIZE_PTR))
3922		return 0;
3923
3924	page = virt_to_head_page(object);
3925
3926	if (unlikely(!PageSlab(page))) {
3927		WARN_ON(!PageCompound(page));
3928		return page_size(page);
3929	}
3930
3931	return slab_ksize(page->slab_cache);
3932}
3933EXPORT_SYMBOL(__ksize);
 
 
 
 
 
 
 
 
 
 
3934
3935void kfree(const void *x)
3936{
3937	struct page *page;
3938	void *object = (void *)x;
3939
3940	trace_kfree(_RET_IP_, x);
3941
3942	if (unlikely(ZERO_OR_NULL_PTR(x)))
3943		return;
3944
3945	page = virt_to_head_page(x);
3946	if (unlikely(!PageSlab(page))) {
3947		unsigned int order = compound_order(page);
3948
3949		BUG_ON(!PageCompound(page));
3950		kfree_hook(object);
3951		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
3952				    -(1 << order));
3953		__free_pages(page, order);
3954		return;
3955	}
3956	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3957}
3958EXPORT_SYMBOL(kfree);
3959
3960#define SHRINK_PROMOTE_MAX 32
3961
3962/*
3963 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3964 * up most to the head of the partial lists. New allocations will then
3965 * fill those up and thus they can be removed from the partial lists.
3966 *
3967 * The slabs with the least items are placed last. This results in them
3968 * being allocated from last increasing the chance that the last objects
3969 * are freed in them.
3970 */
3971int __kmem_cache_shrink(struct kmem_cache *s)
3972{
3973	int node;
3974	int i;
3975	struct kmem_cache_node *n;
3976	struct page *page;
3977	struct page *t;
3978	struct list_head discard;
3979	struct list_head promote[SHRINK_PROMOTE_MAX];
3980	unsigned long flags;
3981	int ret = 0;
3982
3983	flush_all(s);
3984	for_each_kmem_cache_node(s, node, n) {
3985		INIT_LIST_HEAD(&discard);
3986		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3987			INIT_LIST_HEAD(promote + i);
3988
3989		spin_lock_irqsave(&n->list_lock, flags);
3990
3991		/*
3992		 * Build lists of slabs to discard or promote.
3993		 *
3994		 * Note that concurrent frees may occur while we hold the
3995		 * list_lock. page->inuse here is the upper limit.
3996		 */
3997		list_for_each_entry_safe(page, t, &n->partial, slab_list) {
3998			int free = page->objects - page->inuse;
3999
4000			/* Do not reread page->inuse */
4001			barrier();
4002
4003			/* We do not keep full slabs on the list */
4004			BUG_ON(free <= 0);
4005
4006			if (free == page->objects) {
4007				list_move(&page->slab_list, &discard);
4008				n->nr_partial--;
4009			} else if (free <= SHRINK_PROMOTE_MAX)
4010				list_move(&page->slab_list, promote + free - 1);
4011		}
4012
4013		/*
4014		 * Promote the slabs filled up most to the head of the
4015		 * partial list.
4016		 */
4017		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4018			list_splice(promote + i, &n->partial);
4019
4020		spin_unlock_irqrestore(&n->list_lock, flags);
4021
4022		/* Release empty slabs */
4023		list_for_each_entry_safe(page, t, &discard, slab_list)
4024			discard_slab(s, page);
4025
4026		if (slabs_node(s, node))
4027			ret = 1;
4028	}
4029
4030	return ret;
4031}
4032
4033#ifdef CONFIG_MEMCG
4034void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
4035{
4036	/*
4037	 * Called with all the locks held after a sched RCU grace period.
4038	 * Even if @s becomes empty after shrinking, we can't know that @s
4039	 * doesn't have allocations already in-flight and thus can't
4040	 * destroy @s until the associated memcg is released.
4041	 *
4042	 * However, let's remove the sysfs files for empty caches here.
4043	 * Each cache has a lot of interface files which aren't
4044	 * particularly useful for empty draining caches; otherwise, we can
4045	 * easily end up with millions of unnecessary sysfs files on
4046	 * systems which have a lot of memory and transient cgroups.
4047	 */
4048	if (!__kmem_cache_shrink(s))
4049		sysfs_slab_remove(s);
4050}
4051
4052void __kmemcg_cache_deactivate(struct kmem_cache *s)
4053{
4054	/*
4055	 * Disable empty slabs caching. Used to avoid pinning offline
4056	 * memory cgroups by kmem pages that can be freed.
4057	 */
4058	slub_set_cpu_partial(s, 0);
4059	s->min_partial = 0;
 
 
 
 
 
 
4060}
4061#endif	/* CONFIG_MEMCG */
4062
4063static int slab_mem_going_offline_callback(void *arg)
4064{
4065	struct kmem_cache *s;
4066
4067	mutex_lock(&slab_mutex);
4068	list_for_each_entry(s, &slab_caches, list)
4069		__kmem_cache_shrink(s);
4070	mutex_unlock(&slab_mutex);
4071
4072	return 0;
4073}
4074
4075static void slab_mem_offline_callback(void *arg)
4076{
4077	struct kmem_cache_node *n;
4078	struct kmem_cache *s;
4079	struct memory_notify *marg = arg;
4080	int offline_node;
4081
4082	offline_node = marg->status_change_nid_normal;
4083
4084	/*
4085	 * If the node still has available memory. we need kmem_cache_node
4086	 * for it yet.
4087	 */
4088	if (offline_node < 0)
4089		return;
4090
4091	mutex_lock(&slab_mutex);
4092	list_for_each_entry(s, &slab_caches, list) {
4093		n = get_node(s, offline_node);
4094		if (n) {
4095			/*
4096			 * if n->nr_slabs > 0, slabs still exist on the node
4097			 * that is going down. We were unable to free them,
4098			 * and offline_pages() function shouldn't call this
4099			 * callback. So, we must fail.
4100			 */
4101			BUG_ON(slabs_node(s, offline_node));
4102
4103			s->node[offline_node] = NULL;
4104			kmem_cache_free(kmem_cache_node, n);
4105		}
4106	}
4107	mutex_unlock(&slab_mutex);
4108}
4109
4110static int slab_mem_going_online_callback(void *arg)
4111{
4112	struct kmem_cache_node *n;
4113	struct kmem_cache *s;
4114	struct memory_notify *marg = arg;
4115	int nid = marg->status_change_nid_normal;
4116	int ret = 0;
4117
4118	/*
4119	 * If the node's memory is already available, then kmem_cache_node is
4120	 * already created. Nothing to do.
4121	 */
4122	if (nid < 0)
4123		return 0;
4124
4125	/*
4126	 * We are bringing a node online. No memory is available yet. We must
4127	 * allocate a kmem_cache_node structure in order to bring the node
4128	 * online.
4129	 */
4130	mutex_lock(&slab_mutex);
4131	list_for_each_entry(s, &slab_caches, list) {
4132		/*
4133		 * XXX: kmem_cache_alloc_node will fallback to other nodes
4134		 *      since memory is not yet available from the node that
4135		 *      is brought up.
4136		 */
4137		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4138		if (!n) {
4139			ret = -ENOMEM;
4140			goto out;
4141		}
4142		init_kmem_cache_node(n);
4143		s->node[nid] = n;
4144	}
4145out:
4146	mutex_unlock(&slab_mutex);
4147	return ret;
4148}
4149
4150static int slab_memory_callback(struct notifier_block *self,
4151				unsigned long action, void *arg)
4152{
4153	int ret = 0;
4154
4155	switch (action) {
4156	case MEM_GOING_ONLINE:
4157		ret = slab_mem_going_online_callback(arg);
4158		break;
4159	case MEM_GOING_OFFLINE:
4160		ret = slab_mem_going_offline_callback(arg);
4161		break;
4162	case MEM_OFFLINE:
4163	case MEM_CANCEL_ONLINE:
4164		slab_mem_offline_callback(arg);
4165		break;
4166	case MEM_ONLINE:
4167	case MEM_CANCEL_OFFLINE:
4168		break;
4169	}
4170	if (ret)
4171		ret = notifier_from_errno(ret);
4172	else
4173		ret = NOTIFY_OK;
4174	return ret;
4175}
4176
4177static struct notifier_block slab_memory_callback_nb = {
4178	.notifier_call = slab_memory_callback,
4179	.priority = SLAB_CALLBACK_PRI,
4180};
4181
4182/********************************************************************
4183 *			Basic setup of slabs
4184 *******************************************************************/
4185
4186/*
4187 * Used for early kmem_cache structures that were allocated using
4188 * the page allocator. Allocate them properly then fix up the pointers
4189 * that may be pointing to the wrong kmem_cache structure.
4190 */
4191
4192static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4193{
4194	int node;
4195	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4196	struct kmem_cache_node *n;
4197
4198	memcpy(s, static_cache, kmem_cache->object_size);
4199
4200	/*
4201	 * This runs very early, and only the boot processor is supposed to be
4202	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4203	 * IPIs around.
4204	 */
4205	__flush_cpu_slab(s, smp_processor_id());
4206	for_each_kmem_cache_node(s, node, n) {
4207		struct page *p;
4208
4209		list_for_each_entry(p, &n->partial, slab_list)
4210			p->slab_cache = s;
4211
4212#ifdef CONFIG_SLUB_DEBUG
4213		list_for_each_entry(p, &n->full, slab_list)
4214			p->slab_cache = s;
4215#endif
4216	}
4217	slab_init_memcg_params(s);
4218	list_add(&s->list, &slab_caches);
4219	memcg_link_cache(s, NULL);
4220	return s;
4221}
4222
4223void __init kmem_cache_init(void)
4224{
4225	static __initdata struct kmem_cache boot_kmem_cache,
4226		boot_kmem_cache_node;
4227
4228	if (debug_guardpage_minorder())
4229		slub_max_order = 0;
4230
4231	kmem_cache_node = &boot_kmem_cache_node;
4232	kmem_cache = &boot_kmem_cache;
4233
4234	create_boot_cache(kmem_cache_node, "kmem_cache_node",
4235		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4236
4237	register_hotmemory_notifier(&slab_memory_callback_nb);
4238
4239	/* Able to allocate the per node structures */
4240	slab_state = PARTIAL;
4241
4242	create_boot_cache(kmem_cache, "kmem_cache",
4243			offsetof(struct kmem_cache, node) +
4244				nr_node_ids * sizeof(struct kmem_cache_node *),
4245		       SLAB_HWCACHE_ALIGN, 0, 0);
4246
4247	kmem_cache = bootstrap(&boot_kmem_cache);
 
 
 
 
 
 
4248	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4249
4250	/* Now we can use the kmem_cache to allocate kmalloc slabs */
4251	setup_kmalloc_cache_index_table();
4252	create_kmalloc_caches(0);
4253
4254	/* Setup random freelists for each cache */
4255	init_freelist_randomization();
4256
4257	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4258				  slub_cpu_dead);
4259
4260	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
4261		cache_line_size(),
4262		slub_min_order, slub_max_order, slub_min_objects,
4263		nr_cpu_ids, nr_node_ids);
4264}
4265
4266void __init kmem_cache_init_late(void)
4267{
4268}
4269
4270struct kmem_cache *
4271__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4272		   slab_flags_t flags, void (*ctor)(void *))
4273{
4274	struct kmem_cache *s, *c;
4275
4276	s = find_mergeable(size, align, flags, name, ctor);
4277	if (s) {
4278		s->refcount++;
4279
4280		/*
4281		 * Adjust the object sizes so that we clear
4282		 * the complete object on kzalloc.
4283		 */
4284		s->object_size = max(s->object_size, size);
4285		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4286
4287		for_each_memcg_cache(c, s) {
4288			c->object_size = s->object_size;
4289			c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
4290		}
4291
4292		if (sysfs_slab_alias(s, name)) {
4293			s->refcount--;
4294			s = NULL;
4295		}
4296	}
4297
4298	return s;
4299}
4300
4301int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4302{
4303	int err;
4304
4305	err = kmem_cache_open(s, flags);
4306	if (err)
4307		return err;
4308
4309	/* Mutex is not taken during early boot */
4310	if (slab_state <= UP)
4311		return 0;
4312
4313	memcg_propagate_slab_attrs(s);
4314	err = sysfs_slab_add(s);
4315	if (err)
4316		__kmem_cache_release(s);
4317
4318	return err;
4319}
4320
4321void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4322{
4323	struct kmem_cache *s;
4324	void *ret;
4325
4326	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4327		return kmalloc_large(size, gfpflags);
4328
4329	s = kmalloc_slab(size, gfpflags);
4330
4331	if (unlikely(ZERO_OR_NULL_PTR(s)))
4332		return s;
4333
4334	ret = slab_alloc(s, gfpflags, caller);
4335
4336	/* Honor the call site pointer we received. */
4337	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4338
4339	return ret;
4340}
4341
4342#ifdef CONFIG_NUMA
4343void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4344					int node, unsigned long caller)
4345{
4346	struct kmem_cache *s;
4347	void *ret;
4348
4349	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4350		ret = kmalloc_large_node(size, gfpflags, node);
4351
4352		trace_kmalloc_node(caller, ret,
4353				   size, PAGE_SIZE << get_order(size),
4354				   gfpflags, node);
4355
4356		return ret;
4357	}
4358
4359	s = kmalloc_slab(size, gfpflags);
4360
4361	if (unlikely(ZERO_OR_NULL_PTR(s)))
4362		return s;
4363
4364	ret = slab_alloc_node(s, gfpflags, node, caller);
4365
4366	/* Honor the call site pointer we received. */
4367	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4368
4369	return ret;
4370}
4371#endif
4372
4373#ifdef CONFIG_SYSFS
4374static int count_inuse(struct page *page)
4375{
4376	return page->inuse;
4377}
4378
4379static int count_total(struct page *page)
4380{
4381	return page->objects;
4382}
4383#endif
4384
4385#ifdef CONFIG_SLUB_DEBUG
4386static int validate_slab(struct kmem_cache *s, struct page *page,
4387						unsigned long *map)
4388{
4389	void *p;
4390	void *addr = page_address(page);
4391
4392	if (!check_slab(s, page) ||
4393			!on_freelist(s, page, NULL))
4394		return 0;
4395
4396	/* Now we know that a valid freelist exists */
4397	bitmap_zero(map, page->objects);
4398
4399	get_map(s, page, map);
4400	for_each_object(p, s, addr, page->objects) {
4401		if (test_bit(slab_index(p, s, addr), map))
4402			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4403				return 0;
4404	}
4405
4406	for_each_object(p, s, addr, page->objects)
4407		if (!test_bit(slab_index(p, s, addr), map))
4408			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4409				return 0;
4410	return 1;
4411}
4412
4413static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4414						unsigned long *map)
4415{
4416	slab_lock(page);
4417	validate_slab(s, page, map);
4418	slab_unlock(page);
4419}
4420
4421static int validate_slab_node(struct kmem_cache *s,
4422		struct kmem_cache_node *n, unsigned long *map)
4423{
4424	unsigned long count = 0;
4425	struct page *page;
4426	unsigned long flags;
4427
4428	spin_lock_irqsave(&n->list_lock, flags);
4429
4430	list_for_each_entry(page, &n->partial, slab_list) {
4431		validate_slab_slab(s, page, map);
4432		count++;
4433	}
4434	if (count != n->nr_partial)
4435		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4436		       s->name, count, n->nr_partial);
4437
4438	if (!(s->flags & SLAB_STORE_USER))
4439		goto out;
4440
4441	list_for_each_entry(page, &n->full, slab_list) {
4442		validate_slab_slab(s, page, map);
4443		count++;
4444	}
4445	if (count != atomic_long_read(&n->nr_slabs))
4446		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4447		       s->name, count, atomic_long_read(&n->nr_slabs));
4448
4449out:
4450	spin_unlock_irqrestore(&n->list_lock, flags);
4451	return count;
4452}
4453
4454static long validate_slab_cache(struct kmem_cache *s)
4455{
4456	int node;
4457	unsigned long count = 0;
 
 
4458	struct kmem_cache_node *n;
4459	unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
4460
4461	if (!map)
4462		return -ENOMEM;
4463
4464	flush_all(s);
4465	for_each_kmem_cache_node(s, node, n)
4466		count += validate_slab_node(s, n, map);
4467	bitmap_free(map);
4468	return count;
4469}
4470/*
4471 * Generate lists of code addresses where slabcache objects are allocated
4472 * and freed.
4473 */
4474
4475struct location {
4476	unsigned long count;
4477	unsigned long addr;
4478	long long sum_time;
4479	long min_time;
4480	long max_time;
4481	long min_pid;
4482	long max_pid;
4483	DECLARE_BITMAP(cpus, NR_CPUS);
4484	nodemask_t nodes;
4485};
4486
4487struct loc_track {
4488	unsigned long max;
4489	unsigned long count;
4490	struct location *loc;
4491};
4492
4493static void free_loc_track(struct loc_track *t)
4494{
4495	if (t->max)
4496		free_pages((unsigned long)t->loc,
4497			get_order(sizeof(struct location) * t->max));
4498}
4499
4500static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4501{
4502	struct location *l;
4503	int order;
4504
4505	order = get_order(sizeof(struct location) * max);
4506
4507	l = (void *)__get_free_pages(flags, order);
4508	if (!l)
4509		return 0;
4510
4511	if (t->count) {
4512		memcpy(l, t->loc, sizeof(struct location) * t->count);
4513		free_loc_track(t);
4514	}
4515	t->max = max;
4516	t->loc = l;
4517	return 1;
4518}
4519
4520static int add_location(struct loc_track *t, struct kmem_cache *s,
4521				const struct track *track)
4522{
4523	long start, end, pos;
4524	struct location *l;
4525	unsigned long caddr;
4526	unsigned long age = jiffies - track->when;
4527
4528	start = -1;
4529	end = t->count;
4530
4531	for ( ; ; ) {
4532		pos = start + (end - start + 1) / 2;
4533
4534		/*
4535		 * There is nothing at "end". If we end up there
4536		 * we need to add something to before end.
4537		 */
4538		if (pos == end)
4539			break;
4540
4541		caddr = t->loc[pos].addr;
4542		if (track->addr == caddr) {
4543
4544			l = &t->loc[pos];
4545			l->count++;
4546			if (track->when) {
4547				l->sum_time += age;
4548				if (age < l->min_time)
4549					l->min_time = age;
4550				if (age > l->max_time)
4551					l->max_time = age;
4552
4553				if (track->pid < l->min_pid)
4554					l->min_pid = track->pid;
4555				if (track->pid > l->max_pid)
4556					l->max_pid = track->pid;
4557
4558				cpumask_set_cpu(track->cpu,
4559						to_cpumask(l->cpus));
4560			}
4561			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4562			return 1;
4563		}
4564
4565		if (track->addr < caddr)
4566			end = pos;
4567		else
4568			start = pos;
4569	}
4570
4571	/*
4572	 * Not found. Insert new tracking element.
4573	 */
4574	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4575		return 0;
4576
4577	l = t->loc + pos;
4578	if (pos < t->count)
4579		memmove(l + 1, l,
4580			(t->count - pos) * sizeof(struct location));
4581	t->count++;
4582	l->count = 1;
4583	l->addr = track->addr;
4584	l->sum_time = age;
4585	l->min_time = age;
4586	l->max_time = age;
4587	l->min_pid = track->pid;
4588	l->max_pid = track->pid;
4589	cpumask_clear(to_cpumask(l->cpus));
4590	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4591	nodes_clear(l->nodes);
4592	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4593	return 1;
4594}
4595
4596static void process_slab(struct loc_track *t, struct kmem_cache *s,
4597		struct page *page, enum track_item alloc,
4598		unsigned long *map)
4599{
4600	void *addr = page_address(page);
4601	void *p;
4602
4603	bitmap_zero(map, page->objects);
4604	get_map(s, page, map);
4605
4606	for_each_object(p, s, addr, page->objects)
4607		if (!test_bit(slab_index(p, s, addr), map))
4608			add_location(t, s, get_track(s, p, alloc));
4609}
4610
4611static int list_locations(struct kmem_cache *s, char *buf,
4612					enum track_item alloc)
4613{
4614	int len = 0;
4615	unsigned long i;
4616	struct loc_track t = { 0, 0, NULL };
4617	int node;
 
 
4618	struct kmem_cache_node *n;
4619	unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
4620
4621	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4622				     GFP_KERNEL)) {
4623		bitmap_free(map);
4624		return sprintf(buf, "Out of memory\n");
4625	}
4626	/* Push back cpu slabs */
4627	flush_all(s);
4628
4629	for_each_kmem_cache_node(s, node, n) {
4630		unsigned long flags;
4631		struct page *page;
4632
4633		if (!atomic_long_read(&n->nr_slabs))
4634			continue;
4635
4636		spin_lock_irqsave(&n->list_lock, flags);
4637		list_for_each_entry(page, &n->partial, slab_list)
4638			process_slab(&t, s, page, alloc, map);
4639		list_for_each_entry(page, &n->full, slab_list)
4640			process_slab(&t, s, page, alloc, map);
4641		spin_unlock_irqrestore(&n->list_lock, flags);
4642	}
4643
4644	for (i = 0; i < t.count; i++) {
4645		struct location *l = &t.loc[i];
4646
4647		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4648			break;
4649		len += sprintf(buf + len, "%7ld ", l->count);
4650
4651		if (l->addr)
4652			len += sprintf(buf + len, "%pS", (void *)l->addr);
4653		else
4654			len += sprintf(buf + len, "<not-available>");
4655
4656		if (l->sum_time != l->min_time) {
4657			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4658				l->min_time,
4659				(long)div_u64(l->sum_time, l->count),
4660				l->max_time);
4661		} else
4662			len += sprintf(buf + len, " age=%ld",
4663				l->min_time);
4664
4665		if (l->min_pid != l->max_pid)
4666			len += sprintf(buf + len, " pid=%ld-%ld",
4667				l->min_pid, l->max_pid);
4668		else
4669			len += sprintf(buf + len, " pid=%ld",
4670				l->min_pid);
4671
4672		if (num_online_cpus() > 1 &&
4673				!cpumask_empty(to_cpumask(l->cpus)) &&
4674				len < PAGE_SIZE - 60)
4675			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4676					 " cpus=%*pbl",
4677					 cpumask_pr_args(to_cpumask(l->cpus)));
4678
4679		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4680				len < PAGE_SIZE - 60)
4681			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4682					 " nodes=%*pbl",
4683					 nodemask_pr_args(&l->nodes));
4684
4685		len += sprintf(buf + len, "\n");
4686	}
4687
4688	free_loc_track(&t);
4689	bitmap_free(map);
4690	if (!t.count)
4691		len += sprintf(buf, "No data\n");
4692	return len;
4693}
4694#endif	/* CONFIG_SLUB_DEBUG */
4695
4696#ifdef SLUB_RESILIENCY_TEST
4697static void __init resiliency_test(void)
4698{
4699	u8 *p;
4700	int type = KMALLOC_NORMAL;
4701
4702	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4703
4704	pr_err("SLUB resiliency testing\n");
4705	pr_err("-----------------------\n");
4706	pr_err("A. Corruption after allocation\n");
4707
4708	p = kzalloc(16, GFP_KERNEL);
4709	p[16] = 0x12;
4710	pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4711	       p + 16);
4712
4713	validate_slab_cache(kmalloc_caches[type][4]);
4714
4715	/* Hmmm... The next two are dangerous */
4716	p = kzalloc(32, GFP_KERNEL);
4717	p[32 + sizeof(void *)] = 0x34;
4718	pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4719	       p);
4720	pr_err("If allocated object is overwritten then not detectable\n\n");
4721
4722	validate_slab_cache(kmalloc_caches[type][5]);
4723	p = kzalloc(64, GFP_KERNEL);
4724	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4725	*p = 0x56;
4726	pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4727	       p);
4728	pr_err("If allocated object is overwritten then not detectable\n\n");
4729	validate_slab_cache(kmalloc_caches[type][6]);
4730
4731	pr_err("\nB. Corruption after free\n");
4732	p = kzalloc(128, GFP_KERNEL);
4733	kfree(p);
4734	*p = 0x78;
4735	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4736	validate_slab_cache(kmalloc_caches[type][7]);
4737
4738	p = kzalloc(256, GFP_KERNEL);
4739	kfree(p);
4740	p[50] = 0x9a;
4741	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4742	validate_slab_cache(kmalloc_caches[type][8]);
4743
4744	p = kzalloc(512, GFP_KERNEL);
4745	kfree(p);
4746	p[512] = 0xab;
4747	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4748	validate_slab_cache(kmalloc_caches[type][9]);
4749}
4750#else
4751#ifdef CONFIG_SYSFS
4752static void resiliency_test(void) {};
4753#endif
4754#endif	/* SLUB_RESILIENCY_TEST */
4755
4756#ifdef CONFIG_SYSFS
4757enum slab_stat_type {
4758	SL_ALL,			/* All slabs */
4759	SL_PARTIAL,		/* Only partially allocated slabs */
4760	SL_CPU,			/* Only slabs used for cpu caches */
4761	SL_OBJECTS,		/* Determine allocated objects not slabs */
4762	SL_TOTAL		/* Determine object capacity not slabs */
4763};
4764
4765#define SO_ALL		(1 << SL_ALL)
4766#define SO_PARTIAL	(1 << SL_PARTIAL)
4767#define SO_CPU		(1 << SL_CPU)
4768#define SO_OBJECTS	(1 << SL_OBJECTS)
4769#define SO_TOTAL	(1 << SL_TOTAL)
4770
4771#ifdef CONFIG_MEMCG
4772static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4773
4774static int __init setup_slub_memcg_sysfs(char *str)
4775{
4776	int v;
4777
4778	if (get_option(&str, &v) > 0)
4779		memcg_sysfs_enabled = v;
4780
4781	return 1;
4782}
4783
4784__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4785#endif
4786
4787static ssize_t show_slab_objects(struct kmem_cache *s,
4788			    char *buf, unsigned long flags)
4789{
4790	unsigned long total = 0;
4791	int node;
4792	int x;
4793	unsigned long *nodes;
4794
4795	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
4796	if (!nodes)
4797		return -ENOMEM;
4798
4799	if (flags & SO_CPU) {
4800		int cpu;
4801
4802		for_each_possible_cpu(cpu) {
4803			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4804							       cpu);
4805			int node;
4806			struct page *page;
4807
4808			page = READ_ONCE(c->page);
4809			if (!page)
4810				continue;
4811
4812			node = page_to_nid(page);
4813			if (flags & SO_TOTAL)
4814				x = page->objects;
4815			else if (flags & SO_OBJECTS)
4816				x = page->inuse;
4817			else
4818				x = 1;
4819
4820			total += x;
4821			nodes[node] += x;
4822
4823			page = slub_percpu_partial_read_once(c);
4824			if (page) {
4825				node = page_to_nid(page);
4826				if (flags & SO_TOTAL)
4827					WARN_ON_ONCE(1);
4828				else if (flags & SO_OBJECTS)
4829					WARN_ON_ONCE(1);
4830				else
4831					x = page->pages;
4832				total += x;
4833				nodes[node] += x;
4834			}
4835		}
4836	}
4837
4838	/*
4839	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
4840	 * already held which will conflict with an existing lock order:
4841	 *
4842	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
4843	 *
4844	 * We don't really need mem_hotplug_lock (to hold off
4845	 * slab_mem_going_offline_callback) here because slab's memory hot
4846	 * unplug code doesn't destroy the kmem_cache->node[] data.
4847	 */
4848
4849#ifdef CONFIG_SLUB_DEBUG
4850	if (flags & SO_ALL) {
4851		struct kmem_cache_node *n;
4852
4853		for_each_kmem_cache_node(s, node, n) {
4854
4855			if (flags & SO_TOTAL)
4856				x = atomic_long_read(&n->total_objects);
4857			else if (flags & SO_OBJECTS)
4858				x = atomic_long_read(&n->total_objects) -
4859					count_partial(n, count_free);
4860			else
4861				x = atomic_long_read(&n->nr_slabs);
4862			total += x;
4863			nodes[node] += x;
4864		}
4865
4866	} else
4867#endif
4868	if (flags & SO_PARTIAL) {
4869		struct kmem_cache_node *n;
4870
4871		for_each_kmem_cache_node(s, node, n) {
4872			if (flags & SO_TOTAL)
4873				x = count_partial(n, count_total);
4874			else if (flags & SO_OBJECTS)
4875				x = count_partial(n, count_inuse);
4876			else
4877				x = n->nr_partial;
4878			total += x;
4879			nodes[node] += x;
4880		}
4881	}
4882	x = sprintf(buf, "%lu", total);
4883#ifdef CONFIG_NUMA
4884	for (node = 0; node < nr_node_ids; node++)
4885		if (nodes[node])
4886			x += sprintf(buf + x, " N%d=%lu",
4887					node, nodes[node]);
4888#endif
 
4889	kfree(nodes);
4890	return x + sprintf(buf + x, "\n");
4891}
4892
4893#ifdef CONFIG_SLUB_DEBUG
4894static int any_slab_objects(struct kmem_cache *s)
4895{
4896	int node;
4897	struct kmem_cache_node *n;
4898
4899	for_each_kmem_cache_node(s, node, n)
4900		if (atomic_long_read(&n->total_objects))
4901			return 1;
4902
4903	return 0;
4904}
4905#endif
4906
4907#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4908#define to_slab(n) container_of(n, struct kmem_cache, kobj)
4909
4910struct slab_attribute {
4911	struct attribute attr;
4912	ssize_t (*show)(struct kmem_cache *s, char *buf);
4913	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4914};
4915
4916#define SLAB_ATTR_RO(_name) \
4917	static struct slab_attribute _name##_attr = \
4918	__ATTR(_name, 0400, _name##_show, NULL)
4919
4920#define SLAB_ATTR(_name) \
4921	static struct slab_attribute _name##_attr =  \
4922	__ATTR(_name, 0600, _name##_show, _name##_store)
4923
4924static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4925{
4926	return sprintf(buf, "%u\n", s->size);
4927}
4928SLAB_ATTR_RO(slab_size);
4929
4930static ssize_t align_show(struct kmem_cache *s, char *buf)
4931{
4932	return sprintf(buf, "%u\n", s->align);
4933}
4934SLAB_ATTR_RO(align);
4935
4936static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4937{
4938	return sprintf(buf, "%u\n", s->object_size);
4939}
4940SLAB_ATTR_RO(object_size);
4941
4942static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4943{
4944	return sprintf(buf, "%u\n", oo_objects(s->oo));
4945}
4946SLAB_ATTR_RO(objs_per_slab);
4947
4948static ssize_t order_store(struct kmem_cache *s,
4949				const char *buf, size_t length)
4950{
4951	unsigned int order;
4952	int err;
4953
4954	err = kstrtouint(buf, 10, &order);
4955	if (err)
4956		return err;
4957
4958	if (order > slub_max_order || order < slub_min_order)
4959		return -EINVAL;
4960
4961	calculate_sizes(s, order);
4962	return length;
4963}
4964
4965static ssize_t order_show(struct kmem_cache *s, char *buf)
4966{
4967	return sprintf(buf, "%u\n", oo_order(s->oo));
4968}
4969SLAB_ATTR(order);
4970
4971static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4972{
4973	return sprintf(buf, "%lu\n", s->min_partial);
4974}
4975
4976static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4977				 size_t length)
4978{
4979	unsigned long min;
4980	int err;
4981
4982	err = kstrtoul(buf, 10, &min);
4983	if (err)
4984		return err;
4985
4986	set_min_partial(s, min);
4987	return length;
4988}
4989SLAB_ATTR(min_partial);
4990
4991static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4992{
4993	return sprintf(buf, "%u\n", slub_cpu_partial(s));
4994}
4995
4996static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4997				 size_t length)
4998{
4999	unsigned int objects;
5000	int err;
5001
5002	err = kstrtouint(buf, 10, &objects);
5003	if (err)
5004		return err;
5005	if (objects && !kmem_cache_has_cpu_partial(s))
5006		return -EINVAL;
5007
5008	slub_set_cpu_partial(s, objects);
5009	flush_all(s);
5010	return length;
5011}
5012SLAB_ATTR(cpu_partial);
5013
5014static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5015{
5016	if (!s->ctor)
5017		return 0;
5018	return sprintf(buf, "%pS\n", s->ctor);
5019}
5020SLAB_ATTR_RO(ctor);
5021
5022static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5023{
5024	return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5025}
5026SLAB_ATTR_RO(aliases);
5027
5028static ssize_t partial_show(struct kmem_cache *s, char *buf)
5029{
5030	return show_slab_objects(s, buf, SO_PARTIAL);
5031}
5032SLAB_ATTR_RO(partial);
5033
5034static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5035{
5036	return show_slab_objects(s, buf, SO_CPU);
5037}
5038SLAB_ATTR_RO(cpu_slabs);
5039
5040static ssize_t objects_show(struct kmem_cache *s, char *buf)
5041{
5042	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5043}
5044SLAB_ATTR_RO(objects);
5045
5046static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5047{
5048	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5049}
5050SLAB_ATTR_RO(objects_partial);
5051
5052static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5053{
5054	int objects = 0;
5055	int pages = 0;
5056	int cpu;
5057	int len;
5058
5059	for_each_online_cpu(cpu) {
5060		struct page *page;
5061
5062		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5063
5064		if (page) {
5065			pages += page->pages;
5066			objects += page->pobjects;
5067		}
5068	}
5069
5070	len = sprintf(buf, "%d(%d)", objects, pages);
5071
5072#ifdef CONFIG_SMP
5073	for_each_online_cpu(cpu) {
5074		struct page *page;
5075
5076		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5077
5078		if (page && len < PAGE_SIZE - 20)
5079			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5080				page->pobjects, page->pages);
5081	}
5082#endif
5083	return len + sprintf(buf + len, "\n");
5084}
5085SLAB_ATTR_RO(slabs_cpu_partial);
5086
5087static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5088{
5089	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5090}
5091
5092static ssize_t reclaim_account_store(struct kmem_cache *s,
5093				const char *buf, size_t length)
5094{
5095	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5096	if (buf[0] == '1')
5097		s->flags |= SLAB_RECLAIM_ACCOUNT;
5098	return length;
5099}
5100SLAB_ATTR(reclaim_account);
5101
5102static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5103{
5104	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5105}
5106SLAB_ATTR_RO(hwcache_align);
5107
5108#ifdef CONFIG_ZONE_DMA
5109static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5110{
5111	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5112}
5113SLAB_ATTR_RO(cache_dma);
5114#endif
5115
5116static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5117{
5118	return sprintf(buf, "%u\n", s->usersize);
5119}
5120SLAB_ATTR_RO(usersize);
5121
5122static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5123{
5124	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5125}
5126SLAB_ATTR_RO(destroy_by_rcu);
5127
 
 
 
 
 
 
5128#ifdef CONFIG_SLUB_DEBUG
5129static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5130{
5131	return show_slab_objects(s, buf, SO_ALL);
5132}
5133SLAB_ATTR_RO(slabs);
5134
5135static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5136{
5137	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5138}
5139SLAB_ATTR_RO(total_objects);
5140
5141static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5142{
5143	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5144}
5145
5146static ssize_t sanity_checks_store(struct kmem_cache *s,
5147				const char *buf, size_t length)
5148{
5149	s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5150	if (buf[0] == '1') {
5151		s->flags &= ~__CMPXCHG_DOUBLE;
5152		s->flags |= SLAB_CONSISTENCY_CHECKS;
5153	}
5154	return length;
5155}
5156SLAB_ATTR(sanity_checks);
5157
5158static ssize_t trace_show(struct kmem_cache *s, char *buf)
5159{
5160	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5161}
5162
5163static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5164							size_t length)
5165{
5166	/*
5167	 * Tracing a merged cache is going to give confusing results
5168	 * as well as cause other issues like converting a mergeable
5169	 * cache into an umergeable one.
5170	 */
5171	if (s->refcount > 1)
5172		return -EINVAL;
5173
5174	s->flags &= ~SLAB_TRACE;
5175	if (buf[0] == '1') {
5176		s->flags &= ~__CMPXCHG_DOUBLE;
5177		s->flags |= SLAB_TRACE;
5178	}
5179	return length;
5180}
5181SLAB_ATTR(trace);
5182
5183static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5184{
5185	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5186}
5187
5188static ssize_t red_zone_store(struct kmem_cache *s,
5189				const char *buf, size_t length)
5190{
5191	if (any_slab_objects(s))
5192		return -EBUSY;
5193
5194	s->flags &= ~SLAB_RED_ZONE;
5195	if (buf[0] == '1') {
5196		s->flags |= SLAB_RED_ZONE;
5197	}
5198	calculate_sizes(s, -1);
5199	return length;
5200}
5201SLAB_ATTR(red_zone);
5202
5203static ssize_t poison_show(struct kmem_cache *s, char *buf)
5204{
5205	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5206}
5207
5208static ssize_t poison_store(struct kmem_cache *s,
5209				const char *buf, size_t length)
5210{
5211	if (any_slab_objects(s))
5212		return -EBUSY;
5213
5214	s->flags &= ~SLAB_POISON;
5215	if (buf[0] == '1') {
5216		s->flags |= SLAB_POISON;
5217	}
5218	calculate_sizes(s, -1);
5219	return length;
5220}
5221SLAB_ATTR(poison);
5222
5223static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5224{
5225	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5226}
5227
5228static ssize_t store_user_store(struct kmem_cache *s,
5229				const char *buf, size_t length)
5230{
5231	if (any_slab_objects(s))
5232		return -EBUSY;
5233
5234	s->flags &= ~SLAB_STORE_USER;
5235	if (buf[0] == '1') {
5236		s->flags &= ~__CMPXCHG_DOUBLE;
5237		s->flags |= SLAB_STORE_USER;
5238	}
5239	calculate_sizes(s, -1);
5240	return length;
5241}
5242SLAB_ATTR(store_user);
5243
5244static ssize_t validate_show(struct kmem_cache *s, char *buf)
5245{
5246	return 0;
5247}
5248
5249static ssize_t validate_store(struct kmem_cache *s,
5250			const char *buf, size_t length)
5251{
5252	int ret = -EINVAL;
5253
5254	if (buf[0] == '1') {
5255		ret = validate_slab_cache(s);
5256		if (ret >= 0)
5257			ret = length;
5258	}
5259	return ret;
5260}
5261SLAB_ATTR(validate);
5262
5263static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5264{
5265	if (!(s->flags & SLAB_STORE_USER))
5266		return -ENOSYS;
5267	return list_locations(s, buf, TRACK_ALLOC);
5268}
5269SLAB_ATTR_RO(alloc_calls);
5270
5271static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5272{
5273	if (!(s->flags & SLAB_STORE_USER))
5274		return -ENOSYS;
5275	return list_locations(s, buf, TRACK_FREE);
5276}
5277SLAB_ATTR_RO(free_calls);
5278#endif /* CONFIG_SLUB_DEBUG */
5279
5280#ifdef CONFIG_FAILSLAB
5281static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5282{
5283	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5284}
5285
5286static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5287							size_t length)
5288{
5289	if (s->refcount > 1)
5290		return -EINVAL;
5291
5292	s->flags &= ~SLAB_FAILSLAB;
5293	if (buf[0] == '1')
5294		s->flags |= SLAB_FAILSLAB;
5295	return length;
5296}
5297SLAB_ATTR(failslab);
5298#endif
5299
5300static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5301{
5302	return 0;
5303}
5304
5305static ssize_t shrink_store(struct kmem_cache *s,
5306			const char *buf, size_t length)
5307{
5308	if (buf[0] == '1')
5309		kmem_cache_shrink_all(s);
5310	else
5311		return -EINVAL;
5312	return length;
5313}
5314SLAB_ATTR(shrink);
5315
5316#ifdef CONFIG_NUMA
5317static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5318{
5319	return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5320}
5321
5322static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5323				const char *buf, size_t length)
5324{
5325	unsigned int ratio;
5326	int err;
5327
5328	err = kstrtouint(buf, 10, &ratio);
5329	if (err)
5330		return err;
5331	if (ratio > 100)
5332		return -ERANGE;
5333
5334	s->remote_node_defrag_ratio = ratio * 10;
5335
5336	return length;
5337}
5338SLAB_ATTR(remote_node_defrag_ratio);
5339#endif
5340
5341#ifdef CONFIG_SLUB_STATS
5342static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5343{
5344	unsigned long sum  = 0;
5345	int cpu;
5346	int len;
5347	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
5348
5349	if (!data)
5350		return -ENOMEM;
5351
5352	for_each_online_cpu(cpu) {
5353		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5354
5355		data[cpu] = x;
5356		sum += x;
5357	}
5358
5359	len = sprintf(buf, "%lu", sum);
5360
5361#ifdef CONFIG_SMP
5362	for_each_online_cpu(cpu) {
5363		if (data[cpu] && len < PAGE_SIZE - 20)
5364			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5365	}
5366#endif
5367	kfree(data);
5368	return len + sprintf(buf + len, "\n");
5369}
5370
5371static void clear_stat(struct kmem_cache *s, enum stat_item si)
5372{
5373	int cpu;
5374
5375	for_each_online_cpu(cpu)
5376		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5377}
5378
5379#define STAT_ATTR(si, text) 					\
5380static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5381{								\
5382	return show_stat(s, buf, si);				\
5383}								\
5384static ssize_t text##_store(struct kmem_cache *s,		\
5385				const char *buf, size_t length)	\
5386{								\
5387	if (buf[0] != '0')					\
5388		return -EINVAL;					\
5389	clear_stat(s, si);					\
5390	return length;						\
5391}								\
5392SLAB_ATTR(text);						\
5393
5394STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5395STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5396STAT_ATTR(FREE_FASTPATH, free_fastpath);
5397STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5398STAT_ATTR(FREE_FROZEN, free_frozen);
5399STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5400STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5401STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5402STAT_ATTR(ALLOC_SLAB, alloc_slab);
5403STAT_ATTR(ALLOC_REFILL, alloc_refill);
5404STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5405STAT_ATTR(FREE_SLAB, free_slab);
5406STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5407STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5408STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5409STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5410STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5411STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5412STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5413STAT_ATTR(ORDER_FALLBACK, order_fallback);
5414STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5415STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5416STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5417STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5418STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5419STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5420#endif	/* CONFIG_SLUB_STATS */
5421
5422static struct attribute *slab_attrs[] = {
5423	&slab_size_attr.attr,
5424	&object_size_attr.attr,
5425	&objs_per_slab_attr.attr,
5426	&order_attr.attr,
5427	&min_partial_attr.attr,
5428	&cpu_partial_attr.attr,
5429	&objects_attr.attr,
5430	&objects_partial_attr.attr,
5431	&partial_attr.attr,
5432	&cpu_slabs_attr.attr,
5433	&ctor_attr.attr,
5434	&aliases_attr.attr,
5435	&align_attr.attr,
5436	&hwcache_align_attr.attr,
5437	&reclaim_account_attr.attr,
5438	&destroy_by_rcu_attr.attr,
5439	&shrink_attr.attr,
 
5440	&slabs_cpu_partial_attr.attr,
5441#ifdef CONFIG_SLUB_DEBUG
5442	&total_objects_attr.attr,
5443	&slabs_attr.attr,
5444	&sanity_checks_attr.attr,
5445	&trace_attr.attr,
5446	&red_zone_attr.attr,
5447	&poison_attr.attr,
5448	&store_user_attr.attr,
5449	&validate_attr.attr,
5450	&alloc_calls_attr.attr,
5451	&free_calls_attr.attr,
5452#endif
5453#ifdef CONFIG_ZONE_DMA
5454	&cache_dma_attr.attr,
5455#endif
5456#ifdef CONFIG_NUMA
5457	&remote_node_defrag_ratio_attr.attr,
5458#endif
5459#ifdef CONFIG_SLUB_STATS
5460	&alloc_fastpath_attr.attr,
5461	&alloc_slowpath_attr.attr,
5462	&free_fastpath_attr.attr,
5463	&free_slowpath_attr.attr,
5464	&free_frozen_attr.attr,
5465	&free_add_partial_attr.attr,
5466	&free_remove_partial_attr.attr,
5467	&alloc_from_partial_attr.attr,
5468	&alloc_slab_attr.attr,
5469	&alloc_refill_attr.attr,
5470	&alloc_node_mismatch_attr.attr,
5471	&free_slab_attr.attr,
5472	&cpuslab_flush_attr.attr,
5473	&deactivate_full_attr.attr,
5474	&deactivate_empty_attr.attr,
5475	&deactivate_to_head_attr.attr,
5476	&deactivate_to_tail_attr.attr,
5477	&deactivate_remote_frees_attr.attr,
5478	&deactivate_bypass_attr.attr,
5479	&order_fallback_attr.attr,
5480	&cmpxchg_double_fail_attr.attr,
5481	&cmpxchg_double_cpu_fail_attr.attr,
5482	&cpu_partial_alloc_attr.attr,
5483	&cpu_partial_free_attr.attr,
5484	&cpu_partial_node_attr.attr,
5485	&cpu_partial_drain_attr.attr,
5486#endif
5487#ifdef CONFIG_FAILSLAB
5488	&failslab_attr.attr,
5489#endif
5490	&usersize_attr.attr,
5491
5492	NULL
5493};
5494
5495static const struct attribute_group slab_attr_group = {
5496	.attrs = slab_attrs,
5497};
5498
5499static ssize_t slab_attr_show(struct kobject *kobj,
5500				struct attribute *attr,
5501				char *buf)
5502{
5503	struct slab_attribute *attribute;
5504	struct kmem_cache *s;
5505	int err;
5506
5507	attribute = to_slab_attr(attr);
5508	s = to_slab(kobj);
5509
5510	if (!attribute->show)
5511		return -EIO;
5512
5513	err = attribute->show(s, buf);
5514
5515	return err;
5516}
5517
5518static ssize_t slab_attr_store(struct kobject *kobj,
5519				struct attribute *attr,
5520				const char *buf, size_t len)
5521{
5522	struct slab_attribute *attribute;
5523	struct kmem_cache *s;
5524	int err;
5525
5526	attribute = to_slab_attr(attr);
5527	s = to_slab(kobj);
5528
5529	if (!attribute->store)
5530		return -EIO;
5531
5532	err = attribute->store(s, buf, len);
5533#ifdef CONFIG_MEMCG
5534	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5535		struct kmem_cache *c;
5536
5537		mutex_lock(&slab_mutex);
5538		if (s->max_attr_size < len)
5539			s->max_attr_size = len;
5540
5541		/*
5542		 * This is a best effort propagation, so this function's return
5543		 * value will be determined by the parent cache only. This is
5544		 * basically because not all attributes will have a well
5545		 * defined semantics for rollbacks - most of the actions will
5546		 * have permanent effects.
5547		 *
5548		 * Returning the error value of any of the children that fail
5549		 * is not 100 % defined, in the sense that users seeing the
5550		 * error code won't be able to know anything about the state of
5551		 * the cache.
5552		 *
5553		 * Only returning the error code for the parent cache at least
5554		 * has well defined semantics. The cache being written to
5555		 * directly either failed or succeeded, in which case we loop
5556		 * through the descendants with best-effort propagation.
5557		 */
5558		for_each_memcg_cache(c, s)
5559			attribute->store(c, buf, len);
5560		mutex_unlock(&slab_mutex);
5561	}
5562#endif
5563	return err;
5564}
5565
5566static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5567{
5568#ifdef CONFIG_MEMCG
5569	int i;
5570	char *buffer = NULL;
5571	struct kmem_cache *root_cache;
5572
5573	if (is_root_cache(s))
5574		return;
5575
5576	root_cache = s->memcg_params.root_cache;
5577
5578	/*
5579	 * This mean this cache had no attribute written. Therefore, no point
5580	 * in copying default values around
5581	 */
5582	if (!root_cache->max_attr_size)
5583		return;
5584
5585	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5586		char mbuf[64];
5587		char *buf;
5588		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5589		ssize_t len;
5590
5591		if (!attr || !attr->store || !attr->show)
5592			continue;
5593
5594		/*
5595		 * It is really bad that we have to allocate here, so we will
5596		 * do it only as a fallback. If we actually allocate, though,
5597		 * we can just use the allocated buffer until the end.
5598		 *
5599		 * Most of the slub attributes will tend to be very small in
5600		 * size, but sysfs allows buffers up to a page, so they can
5601		 * theoretically happen.
5602		 */
5603		if (buffer)
5604			buf = buffer;
5605		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
5606			buf = mbuf;
5607		else {
5608			buffer = (char *) get_zeroed_page(GFP_KERNEL);
5609			if (WARN_ON(!buffer))
5610				continue;
5611			buf = buffer;
5612		}
5613
5614		len = attr->show(root_cache, buf);
5615		if (len > 0)
5616			attr->store(s, buf, len);
5617	}
5618
5619	if (buffer)
5620		free_page((unsigned long)buffer);
5621#endif	/* CONFIG_MEMCG */
5622}
5623
5624static void kmem_cache_release(struct kobject *k)
5625{
5626	slab_kmem_cache_release(to_slab(k));
5627}
5628
5629static const struct sysfs_ops slab_sysfs_ops = {
5630	.show = slab_attr_show,
5631	.store = slab_attr_store,
5632};
5633
5634static struct kobj_type slab_ktype = {
5635	.sysfs_ops = &slab_sysfs_ops,
5636	.release = kmem_cache_release,
5637};
5638
5639static int uevent_filter(struct kset *kset, struct kobject *kobj)
5640{
5641	struct kobj_type *ktype = get_ktype(kobj);
5642
5643	if (ktype == &slab_ktype)
5644		return 1;
5645	return 0;
5646}
5647
5648static const struct kset_uevent_ops slab_uevent_ops = {
5649	.filter = uevent_filter,
5650};
5651
5652static struct kset *slab_kset;
5653
5654static inline struct kset *cache_kset(struct kmem_cache *s)
5655{
5656#ifdef CONFIG_MEMCG
5657	if (!is_root_cache(s))
5658		return s->memcg_params.root_cache->memcg_kset;
5659#endif
5660	return slab_kset;
5661}
5662
5663#define ID_STR_LENGTH 64
5664
5665/* Create a unique string id for a slab cache:
5666 *
5667 * Format	:[flags-]size
5668 */
5669static char *create_unique_id(struct kmem_cache *s)
5670{
5671	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5672	char *p = name;
5673
5674	BUG_ON(!name);
5675
5676	*p++ = ':';
5677	/*
5678	 * First flags affecting slabcache operations. We will only
5679	 * get here for aliasable slabs so we do not need to support
5680	 * too many flags. The flags here must cover all flags that
5681	 * are matched during merging to guarantee that the id is
5682	 * unique.
5683	 */
5684	if (s->flags & SLAB_CACHE_DMA)
5685		*p++ = 'd';
5686	if (s->flags & SLAB_CACHE_DMA32)
5687		*p++ = 'D';
5688	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5689		*p++ = 'a';
5690	if (s->flags & SLAB_CONSISTENCY_CHECKS)
5691		*p++ = 'F';
5692	if (s->flags & SLAB_ACCOUNT)
5693		*p++ = 'A';
5694	if (p != name + 1)
5695		*p++ = '-';
5696	p += sprintf(p, "%07u", s->size);
5697
5698	BUG_ON(p > name + ID_STR_LENGTH - 1);
5699	return name;
5700}
5701
5702static void sysfs_slab_remove_workfn(struct work_struct *work)
5703{
5704	struct kmem_cache *s =
5705		container_of(work, struct kmem_cache, kobj_remove_work);
5706
5707	if (!s->kobj.state_in_sysfs)
5708		/*
5709		 * For a memcg cache, this may be called during
5710		 * deactivation and again on shutdown.  Remove only once.
5711		 * A cache is never shut down before deactivation is
5712		 * complete, so no need to worry about synchronization.
5713		 */
5714		goto out;
5715
5716#ifdef CONFIG_MEMCG
5717	kset_unregister(s->memcg_kset);
5718#endif
5719	kobject_uevent(&s->kobj, KOBJ_REMOVE);
 
5720out:
5721	kobject_put(&s->kobj);
5722}
5723
5724static int sysfs_slab_add(struct kmem_cache *s)
5725{
5726	int err;
5727	const char *name;
5728	struct kset *kset = cache_kset(s);
5729	int unmergeable = slab_unmergeable(s);
5730
5731	INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5732
5733	if (!kset) {
5734		kobject_init(&s->kobj, &slab_ktype);
5735		return 0;
5736	}
5737
5738	if (!unmergeable && disable_higher_order_debug &&
5739			(slub_debug & DEBUG_METADATA_FLAGS))
5740		unmergeable = 1;
5741
5742	if (unmergeable) {
5743		/*
5744		 * Slabcache can never be merged so we can use the name proper.
5745		 * This is typically the case for debug situations. In that
5746		 * case we can catch duplicate names easily.
5747		 */
5748		sysfs_remove_link(&slab_kset->kobj, s->name);
5749		name = s->name;
5750	} else {
5751		/*
5752		 * Create a unique name for the slab as a target
5753		 * for the symlinks.
5754		 */
5755		name = create_unique_id(s);
5756	}
5757
5758	s->kobj.kset = kset;
5759	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5760	if (err)
5761		goto out;
5762
5763	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5764	if (err)
5765		goto out_del_kobj;
5766
5767#ifdef CONFIG_MEMCG
5768	if (is_root_cache(s) && memcg_sysfs_enabled) {
5769		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5770		if (!s->memcg_kset) {
5771			err = -ENOMEM;
5772			goto out_del_kobj;
5773		}
5774	}
5775#endif
5776
5777	kobject_uevent(&s->kobj, KOBJ_ADD);
5778	if (!unmergeable) {
5779		/* Setup first alias */
5780		sysfs_slab_alias(s, s->name);
5781	}
5782out:
5783	if (!unmergeable)
5784		kfree(name);
5785	return err;
5786out_del_kobj:
5787	kobject_del(&s->kobj);
5788	goto out;
5789}
5790
5791static void sysfs_slab_remove(struct kmem_cache *s)
5792{
5793	if (slab_state < FULL)
5794		/*
5795		 * Sysfs has not been setup yet so no need to remove the
5796		 * cache from sysfs.
5797		 */
5798		return;
5799
5800	kobject_get(&s->kobj);
5801	schedule_work(&s->kobj_remove_work);
5802}
5803
5804void sysfs_slab_unlink(struct kmem_cache *s)
5805{
5806	if (slab_state >= FULL)
5807		kobject_del(&s->kobj);
5808}
5809
5810void sysfs_slab_release(struct kmem_cache *s)
5811{
5812	if (slab_state >= FULL)
5813		kobject_put(&s->kobj);
5814}
5815
5816/*
5817 * Need to buffer aliases during bootup until sysfs becomes
5818 * available lest we lose that information.
5819 */
5820struct saved_alias {
5821	struct kmem_cache *s;
5822	const char *name;
5823	struct saved_alias *next;
5824};
5825
5826static struct saved_alias *alias_list;
5827
5828static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5829{
5830	struct saved_alias *al;
5831
5832	if (slab_state == FULL) {
5833		/*
5834		 * If we have a leftover link then remove it.
5835		 */
5836		sysfs_remove_link(&slab_kset->kobj, name);
5837		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5838	}
5839
5840	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5841	if (!al)
5842		return -ENOMEM;
5843
5844	al->s = s;
5845	al->name = name;
5846	al->next = alias_list;
5847	alias_list = al;
5848	return 0;
5849}
5850
5851static int __init slab_sysfs_init(void)
5852{
5853	struct kmem_cache *s;
5854	int err;
5855
5856	mutex_lock(&slab_mutex);
5857
5858	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5859	if (!slab_kset) {
5860		mutex_unlock(&slab_mutex);
5861		pr_err("Cannot register slab subsystem.\n");
5862		return -ENOSYS;
5863	}
5864
5865	slab_state = FULL;
5866
5867	list_for_each_entry(s, &slab_caches, list) {
5868		err = sysfs_slab_add(s);
5869		if (err)
5870			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5871			       s->name);
5872	}
5873
5874	while (alias_list) {
5875		struct saved_alias *al = alias_list;
5876
5877		alias_list = alias_list->next;
5878		err = sysfs_slab_alias(al->s, al->name);
5879		if (err)
5880			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5881			       al->name);
5882		kfree(al);
5883	}
5884
5885	mutex_unlock(&slab_mutex);
5886	resiliency_test();
5887	return 0;
5888}
5889
5890__initcall(slab_sysfs_init);
5891#endif /* CONFIG_SYSFS */
5892
5893/*
5894 * The /proc/slabinfo ABI
5895 */
5896#ifdef CONFIG_SLUB_DEBUG
5897void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5898{
5899	unsigned long nr_slabs = 0;
5900	unsigned long nr_objs = 0;
5901	unsigned long nr_free = 0;
5902	int node;
5903	struct kmem_cache_node *n;
5904
5905	for_each_kmem_cache_node(s, node, n) {
5906		nr_slabs += node_nr_slabs(n);
5907		nr_objs += node_nr_objs(n);
5908		nr_free += count_partial(n, count_free);
5909	}
5910
5911	sinfo->active_objs = nr_objs - nr_free;
5912	sinfo->num_objs = nr_objs;
5913	sinfo->active_slabs = nr_slabs;
5914	sinfo->num_slabs = nr_slabs;
5915	sinfo->objects_per_slab = oo_objects(s->oo);
5916	sinfo->cache_order = oo_order(s->oo);
5917}
5918
5919void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5920{
5921}
5922
5923ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5924		       size_t count, loff_t *ppos)
5925{
5926	return -EIO;
5927}
5928#endif /* CONFIG_SLUB_DEBUG */
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SLUB: A slab allocator that limits cache line use instead of queuing
   4 * objects in per cpu and per node lists.
   5 *
   6 * The allocator synchronizes using per slab locks or atomic operatios
   7 * and only uses a centralized lock to manage a pool of partial slabs.
   8 *
   9 * (C) 2007 SGI, Christoph Lameter
  10 * (C) 2011 Linux Foundation, Christoph Lameter
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/swap.h> /* struct reclaim_state */
  15#include <linux/module.h>
  16#include <linux/bit_spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/bitops.h>
  19#include <linux/slab.h>
  20#include "slab.h"
  21#include <linux/proc_fs.h>
  22#include <linux/notifier.h>
  23#include <linux/seq_file.h>
  24#include <linux/kasan.h>
  25#include <linux/cpu.h>
  26#include <linux/cpuset.h>
  27#include <linux/mempolicy.h>
  28#include <linux/ctype.h>
  29#include <linux/debugobjects.h>
  30#include <linux/kallsyms.h>
  31#include <linux/memory.h>
  32#include <linux/math64.h>
  33#include <linux/fault-inject.h>
  34#include <linux/stacktrace.h>
  35#include <linux/prefetch.h>
  36#include <linux/memcontrol.h>
  37#include <linux/random.h>
  38
  39#include <trace/events/kmem.h>
  40
  41#include "internal.h"
  42
  43/*
  44 * Lock order:
  45 *   1. slab_mutex (Global Mutex)
  46 *   2. node->list_lock
  47 *   3. slab_lock(page) (Only on some arches and for debugging)
  48 *
  49 *   slab_mutex
  50 *
  51 *   The role of the slab_mutex is to protect the list of all the slabs
  52 *   and to synchronize major metadata changes to slab cache structures.
  53 *
  54 *   The slab_lock is only used for debugging and on arches that do not
  55 *   have the ability to do a cmpxchg_double. It only protects the second
  56 *   double word in the page struct. Meaning
  57 *	A. page->freelist	-> List of object free in a page
  58 *	B. page->counters	-> Counters of objects
  59 *	C. page->frozen		-> frozen state
 
  60 *
  61 *   If a slab is frozen then it is exempt from list management. It is not
  62 *   on any list. The processor that froze the slab is the one who can
  63 *   perform list operations on the page. Other processors may put objects
  64 *   onto the freelist but the processor that froze the slab is the only
  65 *   one that can retrieve the objects from the page's freelist.
 
  66 *
  67 *   The list_lock protects the partial and full list on each node and
  68 *   the partial slab counter. If taken then no new slabs may be added or
  69 *   removed from the lists nor make the number of partial slabs be modified.
  70 *   (Note that the total number of slabs is an atomic value that may be
  71 *   modified without taking the list lock).
  72 *
  73 *   The list_lock is a centralized lock and thus we avoid taking it as
  74 *   much as possible. As long as SLUB does not have to handle partial
  75 *   slabs, operations can continue without any centralized lock. F.e.
  76 *   allocating a long series of objects that fill up slabs does not require
  77 *   the list lock.
  78 *   Interrupts are disabled during allocation and deallocation in order to
  79 *   make the slab allocator safe to use in the context of an irq. In addition
  80 *   interrupts are disabled to ensure that the processor does not change
  81 *   while handling per_cpu slabs, due to kernel preemption.
  82 *
  83 * SLUB assigns one slab for allocation to each processor.
  84 * Allocations only occur from these slabs called cpu slabs.
  85 *
  86 * Slabs with free elements are kept on a partial list and during regular
  87 * operations no list for full slabs is used. If an object in a full slab is
  88 * freed then the slab will show up again on the partial lists.
  89 * We track full slabs for debugging purposes though because otherwise we
  90 * cannot scan all objects.
  91 *
  92 * Slabs are freed when they become empty. Teardown and setup is
  93 * minimal so we rely on the page allocators per cpu caches for
  94 * fast frees and allocs.
  95 *
  96 * Overloading of page flags that are otherwise used for LRU management.
  97 *
  98 * PageActive 		The slab is frozen and exempt from list processing.
  99 * 			This means that the slab is dedicated to a purpose
 100 * 			such as satisfying allocations for a specific
 101 * 			processor. Objects may be freed in the slab while
 102 * 			it is frozen but slab_free will then skip the usual
 103 * 			list operations. It is up to the processor holding
 104 * 			the slab to integrate the slab into the slab lists
 105 * 			when the slab is no longer needed.
 106 *
 107 * 			One use of this flag is to mark slabs that are
 108 * 			used for allocations. Then such a slab becomes a cpu
 109 * 			slab. The cpu slab may be equipped with an additional
 110 * 			freelist that allows lockless access to
 111 * 			free objects in addition to the regular freelist
 112 * 			that requires the slab lock.
 113 *
 114 * PageError		Slab requires special handling due to debug
 115 * 			options set. This moves	slab handling out of
 116 * 			the fast path and disables lockless freelists.
 117 */
 118
 119static inline int kmem_cache_debug(struct kmem_cache *s)
 120{
 121#ifdef CONFIG_SLUB_DEBUG
 122	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
 123#else
 124	return 0;
 125#endif
 126}
 127
 128void *fixup_red_left(struct kmem_cache *s, void *p)
 129{
 130	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
 131		p += s->red_left_pad;
 132
 133	return p;
 134}
 135
 136static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 137{
 138#ifdef CONFIG_SLUB_CPU_PARTIAL
 139	return !kmem_cache_debug(s);
 140#else
 141	return false;
 142#endif
 143}
 144
 145/*
 146 * Issues still to be resolved:
 147 *
 148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 149 *
 150 * - Variable sizing of the per node arrays
 151 */
 152
 153/* Enable to test recovery from slab corruption on boot */
 154#undef SLUB_RESILIENCY_TEST
 155
 156/* Enable to log cmpxchg failures */
 157#undef SLUB_DEBUG_CMPXCHG
 158
 159/*
 160 * Mininum number of partial slabs. These will be left on the partial
 161 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 162 */
 163#define MIN_PARTIAL 5
 164
 165/*
 166 * Maximum number of desirable partial slabs.
 167 * The existence of more partial slabs makes kmem_cache_shrink
 168 * sort the partial list by the number of objects in use.
 169 */
 170#define MAX_PARTIAL 10
 171
 172#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
 173				SLAB_POISON | SLAB_STORE_USER)
 174
 175/*
 176 * These debug flags cannot use CMPXCHG because there might be consistency
 177 * issues when checking or reading debug information
 178 */
 179#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
 180				SLAB_TRACE)
 181
 182
 183/*
 184 * Debugging flags that require metadata to be stored in the slab.  These get
 185 * disabled when slub_debug=O is used and a cache's min order increases with
 186 * metadata.
 187 */
 188#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 189
 190#define OO_SHIFT	16
 191#define OO_MASK		((1 << OO_SHIFT) - 1)
 192#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
 193
 194/* Internal SLUB flags */
 195/* Poison object */
 196#define __OBJECT_POISON		((slab_flags_t __force)0x80000000U)
 197/* Use cmpxchg_double */
 198#define __CMPXCHG_DOUBLE	((slab_flags_t __force)0x40000000U)
 199
 200/*
 201 * Tracking user of a slab.
 202 */
 203#define TRACK_ADDRS_COUNT 16
 204struct track {
 205	unsigned long addr;	/* Called from address */
 206#ifdef CONFIG_STACKTRACE
 207	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
 208#endif
 209	int cpu;		/* Was running on cpu */
 210	int pid;		/* Pid context */
 211	unsigned long when;	/* When did the operation occur */
 212};
 213
 214enum track_item { TRACK_ALLOC, TRACK_FREE };
 215
 216#ifdef CONFIG_SYSFS
 217static int sysfs_slab_add(struct kmem_cache *);
 218static int sysfs_slab_alias(struct kmem_cache *, const char *);
 219static void memcg_propagate_slab_attrs(struct kmem_cache *s);
 220static void sysfs_slab_remove(struct kmem_cache *s);
 221#else
 222static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 223static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
 224							{ return 0; }
 225static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
 226static inline void sysfs_slab_remove(struct kmem_cache *s) { }
 227#endif
 228
 229static inline void stat(const struct kmem_cache *s, enum stat_item si)
 230{
 231#ifdef CONFIG_SLUB_STATS
 232	/*
 233	 * The rmw is racy on a preemptible kernel but this is acceptable, so
 234	 * avoid this_cpu_add()'s irq-disable overhead.
 235	 */
 236	raw_cpu_inc(s->cpu_slab->stat[si]);
 237#endif
 238}
 239
 240/********************************************************************
 241 * 			Core slab cache functions
 242 *******************************************************************/
 243
 244/*
 245 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 246 * with an XOR of the address where the pointer is held and a per-cache
 247 * random number.
 248 */
 249static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
 250				 unsigned long ptr_addr)
 251{
 252#ifdef CONFIG_SLAB_FREELIST_HARDENED
 253	return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
 
 
 
 
 
 
 
 
 
 
 
 254#else
 255	return ptr;
 256#endif
 257}
 258
 259/* Returns the freelist pointer recorded at location ptr_addr. */
 260static inline void *freelist_dereference(const struct kmem_cache *s,
 261					 void *ptr_addr)
 262{
 263	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
 264			    (unsigned long)ptr_addr);
 265}
 266
 267static inline void *get_freepointer(struct kmem_cache *s, void *object)
 268{
 269	return freelist_dereference(s, object + s->offset);
 270}
 271
 272static void prefetch_freepointer(const struct kmem_cache *s, void *object)
 273{
 274	if (object)
 275		prefetch(freelist_dereference(s, object + s->offset));
 276}
 277
 278static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 279{
 280	unsigned long freepointer_addr;
 281	void *p;
 282
 283	if (!debug_pagealloc_enabled())
 284		return get_freepointer(s, object);
 285
 286	freepointer_addr = (unsigned long)object + s->offset;
 287	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
 288	return freelist_ptr(s, p, freepointer_addr);
 289}
 290
 291static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 292{
 293	unsigned long freeptr_addr = (unsigned long)object + s->offset;
 294
 295#ifdef CONFIG_SLAB_FREELIST_HARDENED
 296	BUG_ON(object == fp); /* naive detection of double free or corruption */
 297#endif
 298
 299	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
 300}
 301
 302/* Loop over all objects in a slab */
 303#define for_each_object(__p, __s, __addr, __objects) \
 304	for (__p = fixup_red_left(__s, __addr); \
 305		__p < (__addr) + (__objects) * (__s)->size; \
 306		__p += (__s)->size)
 307
 308#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
 309	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
 310		__idx <= __objects; \
 311		__p += (__s)->size, __idx++)
 312
 313/* Determine object index from a given position */
 314static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
 315{
 316	return (p - addr) / s->size;
 317}
 318
 319static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved)
 320{
 321	return (((unsigned int)PAGE_SIZE << order) - reserved) / size;
 322}
 323
 324static inline struct kmem_cache_order_objects oo_make(unsigned int order,
 325		unsigned int size, unsigned int reserved)
 326{
 327	struct kmem_cache_order_objects x = {
 328		(order << OO_SHIFT) + order_objects(order, size, reserved)
 329	};
 330
 331	return x;
 332}
 333
 334static inline unsigned int oo_order(struct kmem_cache_order_objects x)
 335{
 336	return x.x >> OO_SHIFT;
 337}
 338
 339static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
 340{
 341	return x.x & OO_MASK;
 342}
 343
 344/*
 345 * Per slab locking using the pagelock
 346 */
 347static __always_inline void slab_lock(struct page *page)
 348{
 349	VM_BUG_ON_PAGE(PageTail(page), page);
 350	bit_spin_lock(PG_locked, &page->flags);
 351}
 352
 353static __always_inline void slab_unlock(struct page *page)
 354{
 355	VM_BUG_ON_PAGE(PageTail(page), page);
 356	__bit_spin_unlock(PG_locked, &page->flags);
 357}
 358
 359static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
 360{
 361	struct page tmp;
 362	tmp.counters = counters_new;
 363	/*
 364	 * page->counters can cover frozen/inuse/objects as well
 365	 * as page->_refcount.  If we assign to ->counters directly
 366	 * we run the risk of losing updates to page->_refcount, so
 367	 * be careful and only assign to the fields we need.
 368	 */
 369	page->frozen  = tmp.frozen;
 370	page->inuse   = tmp.inuse;
 371	page->objects = tmp.objects;
 372}
 373
 374/* Interrupts must be disabled (for the fallback code to work right) */
 375static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 376		void *freelist_old, unsigned long counters_old,
 377		void *freelist_new, unsigned long counters_new,
 378		const char *n)
 379{
 380	VM_BUG_ON(!irqs_disabled());
 381#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 382    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 383	if (s->flags & __CMPXCHG_DOUBLE) {
 384		if (cmpxchg_double(&page->freelist, &page->counters,
 385				   freelist_old, counters_old,
 386				   freelist_new, counters_new))
 387			return true;
 388	} else
 389#endif
 390	{
 391		slab_lock(page);
 392		if (page->freelist == freelist_old &&
 393					page->counters == counters_old) {
 394			page->freelist = freelist_new;
 395			set_page_slub_counters(page, counters_new);
 396			slab_unlock(page);
 397			return true;
 398		}
 399		slab_unlock(page);
 400	}
 401
 402	cpu_relax();
 403	stat(s, CMPXCHG_DOUBLE_FAIL);
 404
 405#ifdef SLUB_DEBUG_CMPXCHG
 406	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 407#endif
 408
 409	return false;
 410}
 411
 412static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 413		void *freelist_old, unsigned long counters_old,
 414		void *freelist_new, unsigned long counters_new,
 415		const char *n)
 416{
 417#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 418    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 419	if (s->flags & __CMPXCHG_DOUBLE) {
 420		if (cmpxchg_double(&page->freelist, &page->counters,
 421				   freelist_old, counters_old,
 422				   freelist_new, counters_new))
 423			return true;
 424	} else
 425#endif
 426	{
 427		unsigned long flags;
 428
 429		local_irq_save(flags);
 430		slab_lock(page);
 431		if (page->freelist == freelist_old &&
 432					page->counters == counters_old) {
 433			page->freelist = freelist_new;
 434			set_page_slub_counters(page, counters_new);
 435			slab_unlock(page);
 436			local_irq_restore(flags);
 437			return true;
 438		}
 439		slab_unlock(page);
 440		local_irq_restore(flags);
 441	}
 442
 443	cpu_relax();
 444	stat(s, CMPXCHG_DOUBLE_FAIL);
 445
 446#ifdef SLUB_DEBUG_CMPXCHG
 447	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 448#endif
 449
 450	return false;
 451}
 452
 453#ifdef CONFIG_SLUB_DEBUG
 454/*
 455 * Determine a map of object in use on a page.
 456 *
 457 * Node listlock must be held to guarantee that the page does
 458 * not vanish from under us.
 459 */
 460static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
 461{
 462	void *p;
 463	void *addr = page_address(page);
 464
 465	for (p = page->freelist; p; p = get_freepointer(s, p))
 466		set_bit(slab_index(p, s, addr), map);
 467}
 468
 469static inline unsigned int size_from_object(struct kmem_cache *s)
 470{
 471	if (s->flags & SLAB_RED_ZONE)
 472		return s->size - s->red_left_pad;
 473
 474	return s->size;
 475}
 476
 477static inline void *restore_red_left(struct kmem_cache *s, void *p)
 478{
 479	if (s->flags & SLAB_RED_ZONE)
 480		p -= s->red_left_pad;
 481
 482	return p;
 483}
 484
 485/*
 486 * Debug settings:
 487 */
 488#if defined(CONFIG_SLUB_DEBUG_ON)
 489static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
 490#else
 491static slab_flags_t slub_debug;
 492#endif
 493
 494static char *slub_debug_slabs;
 495static int disable_higher_order_debug;
 496
 497/*
 498 * slub is about to manipulate internal object metadata.  This memory lies
 499 * outside the range of the allocated object, so accessing it would normally
 500 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 501 * to tell kasan that these accesses are OK.
 502 */
 503static inline void metadata_access_enable(void)
 504{
 505	kasan_disable_current();
 506}
 507
 508static inline void metadata_access_disable(void)
 509{
 510	kasan_enable_current();
 511}
 512
 513/*
 514 * Object debugging
 515 */
 516
 517/* Verify that a pointer has an address that is valid within a slab page */
 518static inline int check_valid_pointer(struct kmem_cache *s,
 519				struct page *page, void *object)
 520{
 521	void *base;
 522
 523	if (!object)
 524		return 1;
 525
 526	base = page_address(page);
 
 527	object = restore_red_left(s, object);
 528	if (object < base || object >= base + page->objects * s->size ||
 529		(object - base) % s->size) {
 530		return 0;
 531	}
 532
 533	return 1;
 534}
 535
 536static void print_section(char *level, char *text, u8 *addr,
 537			  unsigned int length)
 538{
 539	metadata_access_enable();
 540	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
 541			length, 1);
 542	metadata_access_disable();
 543}
 544
 545static struct track *get_track(struct kmem_cache *s, void *object,
 546	enum track_item alloc)
 547{
 548	struct track *p;
 549
 550	if (s->offset)
 551		p = object + s->offset + sizeof(void *);
 552	else
 553		p = object + s->inuse;
 554
 555	return p + alloc;
 556}
 557
 558static void set_track(struct kmem_cache *s, void *object,
 559			enum track_item alloc, unsigned long addr)
 560{
 561	struct track *p = get_track(s, object, alloc);
 562
 563	if (addr) {
 564#ifdef CONFIG_STACKTRACE
 565		struct stack_trace trace;
 566		int i;
 567
 568		trace.nr_entries = 0;
 569		trace.max_entries = TRACK_ADDRS_COUNT;
 570		trace.entries = p->addrs;
 571		trace.skip = 3;
 572		metadata_access_enable();
 573		save_stack_trace(&trace);
 574		metadata_access_disable();
 575
 576		/* See rant in lockdep.c */
 577		if (trace.nr_entries != 0 &&
 578		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
 579			trace.nr_entries--;
 580
 581		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
 582			p->addrs[i] = 0;
 583#endif
 584		p->addr = addr;
 585		p->cpu = smp_processor_id();
 586		p->pid = current->pid;
 587		p->when = jiffies;
 588	} else
 589		memset(p, 0, sizeof(struct track));
 
 590}
 591
 592static void init_tracking(struct kmem_cache *s, void *object)
 593{
 594	if (!(s->flags & SLAB_STORE_USER))
 595		return;
 596
 597	set_track(s, object, TRACK_FREE, 0UL);
 598	set_track(s, object, TRACK_ALLOC, 0UL);
 599}
 600
 601static void print_track(const char *s, struct track *t, unsigned long pr_time)
 602{
 603	if (!t->addr)
 604		return;
 605
 606	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
 607	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
 608#ifdef CONFIG_STACKTRACE
 609	{
 610		int i;
 611		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
 612			if (t->addrs[i])
 613				pr_err("\t%pS\n", (void *)t->addrs[i]);
 614			else
 615				break;
 616	}
 617#endif
 618}
 619
 620static void print_tracking(struct kmem_cache *s, void *object)
 621{
 622	unsigned long pr_time = jiffies;
 623	if (!(s->flags & SLAB_STORE_USER))
 624		return;
 625
 626	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
 627	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
 628}
 629
 630static void print_page_info(struct page *page)
 631{
 632	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
 633	       page, page->objects, page->inuse, page->freelist, page->flags);
 634
 635}
 636
 637static void slab_bug(struct kmem_cache *s, char *fmt, ...)
 638{
 639	struct va_format vaf;
 640	va_list args;
 641
 642	va_start(args, fmt);
 643	vaf.fmt = fmt;
 644	vaf.va = &args;
 645	pr_err("=============================================================================\n");
 646	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
 647	pr_err("-----------------------------------------------------------------------------\n\n");
 648
 649	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 650	va_end(args);
 651}
 652
 653static void slab_fix(struct kmem_cache *s, char *fmt, ...)
 654{
 655	struct va_format vaf;
 656	va_list args;
 657
 658	va_start(args, fmt);
 659	vaf.fmt = fmt;
 660	vaf.va = &args;
 661	pr_err("FIX %s: %pV\n", s->name, &vaf);
 662	va_end(args);
 663}
 664
 665static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 666{
 667	unsigned int off;	/* Offset of last byte */
 668	u8 *addr = page_address(page);
 669
 670	print_tracking(s, p);
 671
 672	print_page_info(page);
 673
 674	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
 675	       p, p - addr, get_freepointer(s, p));
 676
 677	if (s->flags & SLAB_RED_ZONE)
 678		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
 679			      s->red_left_pad);
 680	else if (p > addr + 16)
 681		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 682
 683	print_section(KERN_ERR, "Object ", p,
 684		      min_t(unsigned int, s->object_size, PAGE_SIZE));
 685	if (s->flags & SLAB_RED_ZONE)
 686		print_section(KERN_ERR, "Redzone ", p + s->object_size,
 687			s->inuse - s->object_size);
 688
 689	if (s->offset)
 690		off = s->offset + sizeof(void *);
 691	else
 692		off = s->inuse;
 693
 694	if (s->flags & SLAB_STORE_USER)
 695		off += 2 * sizeof(struct track);
 696
 697	off += kasan_metadata_size(s);
 698
 699	if (off != size_from_object(s))
 700		/* Beginning of the filler is the free pointer */
 701		print_section(KERN_ERR, "Padding ", p + off,
 702			      size_from_object(s) - off);
 703
 704	dump_stack();
 705}
 706
 707void object_err(struct kmem_cache *s, struct page *page,
 708			u8 *object, char *reason)
 709{
 710	slab_bug(s, "%s", reason);
 711	print_trailer(s, page, object);
 712}
 713
 714static void slab_err(struct kmem_cache *s, struct page *page,
 715			const char *fmt, ...)
 716{
 717	va_list args;
 718	char buf[100];
 719
 720	va_start(args, fmt);
 721	vsnprintf(buf, sizeof(buf), fmt, args);
 722	va_end(args);
 723	slab_bug(s, "%s", buf);
 724	print_page_info(page);
 725	dump_stack();
 726}
 727
 728static void init_object(struct kmem_cache *s, void *object, u8 val)
 729{
 730	u8 *p = object;
 731
 732	if (s->flags & SLAB_RED_ZONE)
 733		memset(p - s->red_left_pad, val, s->red_left_pad);
 734
 735	if (s->flags & __OBJECT_POISON) {
 736		memset(p, POISON_FREE, s->object_size - 1);
 737		p[s->object_size - 1] = POISON_END;
 738	}
 739
 740	if (s->flags & SLAB_RED_ZONE)
 741		memset(p + s->object_size, val, s->inuse - s->object_size);
 742}
 743
 744static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 745						void *from, void *to)
 746{
 747	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
 748	memset(from, data, to - from);
 749}
 750
 751static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
 752			u8 *object, char *what,
 753			u8 *start, unsigned int value, unsigned int bytes)
 754{
 755	u8 *fault;
 756	u8 *end;
 757
 758	metadata_access_enable();
 759	fault = memchr_inv(start, value, bytes);
 760	metadata_access_disable();
 761	if (!fault)
 762		return 1;
 763
 764	end = start + bytes;
 765	while (end > fault && end[-1] == value)
 766		end--;
 767
 768	slab_bug(s, "%s overwritten", what);
 769	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
 770					fault, end - 1, fault[0], value);
 771	print_trailer(s, page, object);
 772
 773	restore_bytes(s, what, value, fault, end);
 774	return 0;
 775}
 776
 777/*
 778 * Object layout:
 779 *
 780 * object address
 781 * 	Bytes of the object to be managed.
 782 * 	If the freepointer may overlay the object then the free
 783 * 	pointer is the first word of the object.
 784 *
 785 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 786 * 	0xa5 (POISON_END)
 787 *
 788 * object + s->object_size
 789 * 	Padding to reach word boundary. This is also used for Redzoning.
 790 * 	Padding is extended by another word if Redzoning is enabled and
 791 * 	object_size == inuse.
 792 *
 793 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 794 * 	0xcc (RED_ACTIVE) for objects in use.
 795 *
 796 * object + s->inuse
 797 * 	Meta data starts here.
 798 *
 799 * 	A. Free pointer (if we cannot overwrite object on free)
 800 * 	B. Tracking data for SLAB_STORE_USER
 801 * 	C. Padding to reach required alignment boundary or at mininum
 802 * 		one word if debugging is on to be able to detect writes
 803 * 		before the word boundary.
 804 *
 805 *	Padding is done using 0x5a (POISON_INUSE)
 806 *
 807 * object + s->size
 808 * 	Nothing is used beyond s->size.
 809 *
 810 * If slabcaches are merged then the object_size and inuse boundaries are mostly
 811 * ignored. And therefore no slab options that rely on these boundaries
 812 * may be used with merged slabcaches.
 813 */
 814
 815static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 816{
 817	unsigned long off = s->inuse;	/* The end of info */
 818
 819	if (s->offset)
 820		/* Freepointer is placed after the object. */
 821		off += sizeof(void *);
 822
 823	if (s->flags & SLAB_STORE_USER)
 824		/* We also have user information there */
 825		off += 2 * sizeof(struct track);
 826
 827	off += kasan_metadata_size(s);
 828
 829	if (size_from_object(s) == off)
 830		return 1;
 831
 832	return check_bytes_and_report(s, page, p, "Object padding",
 833			p + off, POISON_INUSE, size_from_object(s) - off);
 834}
 835
 836/* Check the pad bytes at the end of a slab page */
 837static int slab_pad_check(struct kmem_cache *s, struct page *page)
 838{
 839	u8 *start;
 840	u8 *fault;
 841	u8 *end;
 842	u8 *pad;
 843	int length;
 844	int remainder;
 845
 846	if (!(s->flags & SLAB_POISON))
 847		return 1;
 848
 849	start = page_address(page);
 850	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
 851	end = start + length;
 852	remainder = length % s->size;
 853	if (!remainder)
 854		return 1;
 855
 856	pad = end - remainder;
 857	metadata_access_enable();
 858	fault = memchr_inv(pad, POISON_INUSE, remainder);
 859	metadata_access_disable();
 860	if (!fault)
 861		return 1;
 862	while (end > fault && end[-1] == POISON_INUSE)
 863		end--;
 864
 865	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
 866	print_section(KERN_ERR, "Padding ", pad, remainder);
 867
 868	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
 869	return 0;
 870}
 871
 872static int check_object(struct kmem_cache *s, struct page *page,
 873					void *object, u8 val)
 874{
 875	u8 *p = object;
 876	u8 *endobject = object + s->object_size;
 877
 878	if (s->flags & SLAB_RED_ZONE) {
 879		if (!check_bytes_and_report(s, page, object, "Redzone",
 880			object - s->red_left_pad, val, s->red_left_pad))
 881			return 0;
 882
 883		if (!check_bytes_and_report(s, page, object, "Redzone",
 884			endobject, val, s->inuse - s->object_size))
 885			return 0;
 886	} else {
 887		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 888			check_bytes_and_report(s, page, p, "Alignment padding",
 889				endobject, POISON_INUSE,
 890				s->inuse - s->object_size);
 891		}
 892	}
 893
 894	if (s->flags & SLAB_POISON) {
 895		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
 896			(!check_bytes_and_report(s, page, p, "Poison", p,
 897					POISON_FREE, s->object_size - 1) ||
 898			 !check_bytes_and_report(s, page, p, "Poison",
 899				p + s->object_size - 1, POISON_END, 1)))
 900			return 0;
 901		/*
 902		 * check_pad_bytes cleans up on its own.
 903		 */
 904		check_pad_bytes(s, page, p);
 905	}
 906
 907	if (!s->offset && val == SLUB_RED_ACTIVE)
 908		/*
 909		 * Object and freepointer overlap. Cannot check
 910		 * freepointer while object is allocated.
 911		 */
 912		return 1;
 913
 914	/* Check free pointer validity */
 915	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
 916		object_err(s, page, p, "Freepointer corrupt");
 917		/*
 918		 * No choice but to zap it and thus lose the remainder
 919		 * of the free objects in this slab. May cause
 920		 * another error because the object count is now wrong.
 921		 */
 922		set_freepointer(s, p, NULL);
 923		return 0;
 924	}
 925	return 1;
 926}
 927
 928static int check_slab(struct kmem_cache *s, struct page *page)
 929{
 930	int maxobj;
 931
 932	VM_BUG_ON(!irqs_disabled());
 933
 934	if (!PageSlab(page)) {
 935		slab_err(s, page, "Not a valid slab page");
 936		return 0;
 937	}
 938
 939	maxobj = order_objects(compound_order(page), s->size, s->reserved);
 940	if (page->objects > maxobj) {
 941		slab_err(s, page, "objects %u > max %u",
 942			page->objects, maxobj);
 943		return 0;
 944	}
 945	if (page->inuse > page->objects) {
 946		slab_err(s, page, "inuse %u > max %u",
 947			page->inuse, page->objects);
 948		return 0;
 949	}
 950	/* Slab_pad_check fixes things up after itself */
 951	slab_pad_check(s, page);
 952	return 1;
 953}
 954
 955/*
 956 * Determine if a certain object on a page is on the freelist. Must hold the
 957 * slab lock to guarantee that the chains are in a consistent state.
 958 */
 959static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
 960{
 961	int nr = 0;
 962	void *fp;
 963	void *object = NULL;
 964	int max_objects;
 965
 966	fp = page->freelist;
 967	while (fp && nr <= page->objects) {
 968		if (fp == search)
 969			return 1;
 970		if (!check_valid_pointer(s, page, fp)) {
 971			if (object) {
 972				object_err(s, page, object,
 973					"Freechain corrupt");
 974				set_freepointer(s, object, NULL);
 975			} else {
 976				slab_err(s, page, "Freepointer corrupt");
 977				page->freelist = NULL;
 978				page->inuse = page->objects;
 979				slab_fix(s, "Freelist cleared");
 980				return 0;
 981			}
 982			break;
 983		}
 984		object = fp;
 985		fp = get_freepointer(s, object);
 986		nr++;
 987	}
 988
 989	max_objects = order_objects(compound_order(page), s->size, s->reserved);
 990	if (max_objects > MAX_OBJS_PER_PAGE)
 991		max_objects = MAX_OBJS_PER_PAGE;
 992
 993	if (page->objects != max_objects) {
 994		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
 995			 page->objects, max_objects);
 996		page->objects = max_objects;
 997		slab_fix(s, "Number of objects adjusted.");
 998	}
 999	if (page->inuse != page->objects - nr) {
1000		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1001			 page->inuse, page->objects - nr);
1002		page->inuse = page->objects - nr;
1003		slab_fix(s, "Object count adjusted.");
1004	}
1005	return search == NULL;
1006}
1007
1008static void trace(struct kmem_cache *s, struct page *page, void *object,
1009								int alloc)
1010{
1011	if (s->flags & SLAB_TRACE) {
1012		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1013			s->name,
1014			alloc ? "alloc" : "free",
1015			object, page->inuse,
1016			page->freelist);
1017
1018		if (!alloc)
1019			print_section(KERN_INFO, "Object ", (void *)object,
1020					s->object_size);
1021
1022		dump_stack();
1023	}
1024}
1025
1026/*
1027 * Tracking of fully allocated slabs for debugging purposes.
1028 */
1029static void add_full(struct kmem_cache *s,
1030	struct kmem_cache_node *n, struct page *page)
1031{
1032	if (!(s->flags & SLAB_STORE_USER))
1033		return;
1034
1035	lockdep_assert_held(&n->list_lock);
1036	list_add(&page->lru, &n->full);
1037}
1038
1039static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1040{
1041	if (!(s->flags & SLAB_STORE_USER))
1042		return;
1043
1044	lockdep_assert_held(&n->list_lock);
1045	list_del(&page->lru);
1046}
1047
1048/* Tracking of the number of slabs for debugging purposes */
1049static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1050{
1051	struct kmem_cache_node *n = get_node(s, node);
1052
1053	return atomic_long_read(&n->nr_slabs);
1054}
1055
1056static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1057{
1058	return atomic_long_read(&n->nr_slabs);
1059}
1060
1061static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1062{
1063	struct kmem_cache_node *n = get_node(s, node);
1064
1065	/*
1066	 * May be called early in order to allocate a slab for the
1067	 * kmem_cache_node structure. Solve the chicken-egg
1068	 * dilemma by deferring the increment of the count during
1069	 * bootstrap (see early_kmem_cache_node_alloc).
1070	 */
1071	if (likely(n)) {
1072		atomic_long_inc(&n->nr_slabs);
1073		atomic_long_add(objects, &n->total_objects);
1074	}
1075}
1076static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1077{
1078	struct kmem_cache_node *n = get_node(s, node);
1079
1080	atomic_long_dec(&n->nr_slabs);
1081	atomic_long_sub(objects, &n->total_objects);
1082}
1083
1084/* Object debug checks for alloc/free paths */
1085static void setup_object_debug(struct kmem_cache *s, struct page *page,
1086								void *object)
1087{
1088	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1089		return;
1090
1091	init_object(s, object, SLUB_RED_INACTIVE);
1092	init_tracking(s, object);
1093}
1094
 
 
 
 
 
 
 
 
 
 
 
1095static inline int alloc_consistency_checks(struct kmem_cache *s,
1096					struct page *page,
1097					void *object, unsigned long addr)
1098{
1099	if (!check_slab(s, page))
1100		return 0;
1101
1102	if (!check_valid_pointer(s, page, object)) {
1103		object_err(s, page, object, "Freelist Pointer check fails");
1104		return 0;
1105	}
1106
1107	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1108		return 0;
1109
1110	return 1;
1111}
1112
1113static noinline int alloc_debug_processing(struct kmem_cache *s,
1114					struct page *page,
1115					void *object, unsigned long addr)
1116{
1117	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1118		if (!alloc_consistency_checks(s, page, object, addr))
1119			goto bad;
1120	}
1121
1122	/* Success perform special debug activities for allocs */
1123	if (s->flags & SLAB_STORE_USER)
1124		set_track(s, object, TRACK_ALLOC, addr);
1125	trace(s, page, object, 1);
1126	init_object(s, object, SLUB_RED_ACTIVE);
1127	return 1;
1128
1129bad:
1130	if (PageSlab(page)) {
1131		/*
1132		 * If this is a slab page then lets do the best we can
1133		 * to avoid issues in the future. Marking all objects
1134		 * as used avoids touching the remaining objects.
1135		 */
1136		slab_fix(s, "Marking all objects used");
1137		page->inuse = page->objects;
1138		page->freelist = NULL;
1139	}
1140	return 0;
1141}
1142
1143static inline int free_consistency_checks(struct kmem_cache *s,
1144		struct page *page, void *object, unsigned long addr)
1145{
1146	if (!check_valid_pointer(s, page, object)) {
1147		slab_err(s, page, "Invalid object pointer 0x%p", object);
1148		return 0;
1149	}
1150
1151	if (on_freelist(s, page, object)) {
1152		object_err(s, page, object, "Object already free");
1153		return 0;
1154	}
1155
1156	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1157		return 0;
1158
1159	if (unlikely(s != page->slab_cache)) {
1160		if (!PageSlab(page)) {
1161			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1162				 object);
1163		} else if (!page->slab_cache) {
1164			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1165			       object);
1166			dump_stack();
1167		} else
1168			object_err(s, page, object,
1169					"page slab pointer corrupt.");
1170		return 0;
1171	}
1172	return 1;
1173}
1174
1175/* Supports checking bulk free of a constructed freelist */
1176static noinline int free_debug_processing(
1177	struct kmem_cache *s, struct page *page,
1178	void *head, void *tail, int bulk_cnt,
1179	unsigned long addr)
1180{
1181	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1182	void *object = head;
1183	int cnt = 0;
1184	unsigned long uninitialized_var(flags);
1185	int ret = 0;
1186
1187	spin_lock_irqsave(&n->list_lock, flags);
1188	slab_lock(page);
1189
1190	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1191		if (!check_slab(s, page))
1192			goto out;
1193	}
1194
1195next_object:
1196	cnt++;
1197
1198	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1199		if (!free_consistency_checks(s, page, object, addr))
1200			goto out;
1201	}
1202
1203	if (s->flags & SLAB_STORE_USER)
1204		set_track(s, object, TRACK_FREE, addr);
1205	trace(s, page, object, 0);
1206	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1207	init_object(s, object, SLUB_RED_INACTIVE);
1208
1209	/* Reached end of constructed freelist yet? */
1210	if (object != tail) {
1211		object = get_freepointer(s, object);
1212		goto next_object;
1213	}
1214	ret = 1;
1215
1216out:
1217	if (cnt != bulk_cnt)
1218		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1219			 bulk_cnt, cnt);
1220
1221	slab_unlock(page);
1222	spin_unlock_irqrestore(&n->list_lock, flags);
1223	if (!ret)
1224		slab_fix(s, "Object at 0x%p not freed", object);
1225	return ret;
1226}
1227
1228static int __init setup_slub_debug(char *str)
1229{
1230	slub_debug = DEBUG_DEFAULT_FLAGS;
1231	if (*str++ != '=' || !*str)
1232		/*
1233		 * No options specified. Switch on full debugging.
1234		 */
1235		goto out;
1236
1237	if (*str == ',')
1238		/*
1239		 * No options but restriction on slabs. This means full
1240		 * debugging for slabs matching a pattern.
1241		 */
1242		goto check_slabs;
1243
1244	slub_debug = 0;
1245	if (*str == '-')
1246		/*
1247		 * Switch off all debugging measures.
1248		 */
1249		goto out;
1250
1251	/*
1252	 * Determine which debug features should be switched on
1253	 */
1254	for (; *str && *str != ','; str++) {
1255		switch (tolower(*str)) {
1256		case 'f':
1257			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1258			break;
1259		case 'z':
1260			slub_debug |= SLAB_RED_ZONE;
1261			break;
1262		case 'p':
1263			slub_debug |= SLAB_POISON;
1264			break;
1265		case 'u':
1266			slub_debug |= SLAB_STORE_USER;
1267			break;
1268		case 't':
1269			slub_debug |= SLAB_TRACE;
1270			break;
1271		case 'a':
1272			slub_debug |= SLAB_FAILSLAB;
1273			break;
1274		case 'o':
1275			/*
1276			 * Avoid enabling debugging on caches if its minimum
1277			 * order would increase as a result.
1278			 */
1279			disable_higher_order_debug = 1;
1280			break;
1281		default:
1282			pr_err("slub_debug option '%c' unknown. skipped\n",
1283			       *str);
1284		}
1285	}
1286
1287check_slabs:
1288	if (*str == ',')
1289		slub_debug_slabs = str + 1;
1290out:
 
 
 
 
1291	return 1;
1292}
1293
1294__setup("slub_debug", setup_slub_debug);
1295
 
 
 
 
 
 
 
 
 
 
 
 
1296slab_flags_t kmem_cache_flags(unsigned int object_size,
1297	slab_flags_t flags, const char *name,
1298	void (*ctor)(void *))
1299{
1300	/*
1301	 * Enable debugging if selected on the kernel commandline.
1302	 */
1303	if (slub_debug && (!slub_debug_slabs || (name &&
1304		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1305		flags |= slub_debug;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306
1307	return flags;
1308}
1309#else /* !CONFIG_SLUB_DEBUG */
1310static inline void setup_object_debug(struct kmem_cache *s,
1311			struct page *page, void *object) {}
 
 
1312
1313static inline int alloc_debug_processing(struct kmem_cache *s,
1314	struct page *page, void *object, unsigned long addr) { return 0; }
1315
1316static inline int free_debug_processing(
1317	struct kmem_cache *s, struct page *page,
1318	void *head, void *tail, int bulk_cnt,
1319	unsigned long addr) { return 0; }
1320
1321static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1322			{ return 1; }
1323static inline int check_object(struct kmem_cache *s, struct page *page,
1324			void *object, u8 val) { return 1; }
1325static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1326					struct page *page) {}
1327static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1328					struct page *page) {}
1329slab_flags_t kmem_cache_flags(unsigned int object_size,
1330	slab_flags_t flags, const char *name,
1331	void (*ctor)(void *))
1332{
1333	return flags;
1334}
1335#define slub_debug 0
1336
1337#define disable_higher_order_debug 0
1338
1339static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1340							{ return 0; }
1341static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1342							{ return 0; }
1343static inline void inc_slabs_node(struct kmem_cache *s, int node,
1344							int objects) {}
1345static inline void dec_slabs_node(struct kmem_cache *s, int node,
1346							int objects) {}
1347
1348#endif /* CONFIG_SLUB_DEBUG */
1349
1350/*
1351 * Hooks for other subsystems that check memory allocations. In a typical
1352 * production configuration these hooks all should produce no code at all.
1353 */
1354static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1355{
 
 
1356	kmemleak_alloc(ptr, size, 1, flags);
1357	kasan_kmalloc_large(ptr, size, flags);
1358}
1359
1360static __always_inline void kfree_hook(void *x)
1361{
1362	kmemleak_free(x);
1363	kasan_kfree_large(x, _RET_IP_);
1364}
1365
1366static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
1367{
1368	kmemleak_free_recursive(x, s->flags);
1369
1370	/*
1371	 * Trouble is that we may no longer disable interrupts in the fast path
1372	 * So in order to make the debug calls that expect irqs to be
1373	 * disabled we need to disable interrupts temporarily.
1374	 */
1375#ifdef CONFIG_LOCKDEP
1376	{
1377		unsigned long flags;
1378
1379		local_irq_save(flags);
1380		debug_check_no_locks_freed(x, s->object_size);
1381		local_irq_restore(flags);
1382	}
1383#endif
1384	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1385		debug_check_no_obj_freed(x, s->object_size);
1386
1387	/* KASAN might put x into memory quarantine, delaying its reuse */
1388	return kasan_slab_free(s, x, _RET_IP_);
1389}
1390
1391static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1392					   void **head, void **tail)
1393{
1394/*
1395 * Compiler cannot detect this function can be removed if slab_free_hook()
1396 * evaluates to nothing.  Thus, catch all relevant config debug options here.
1397 */
1398#if defined(CONFIG_LOCKDEP)	||		\
1399	defined(CONFIG_DEBUG_KMEMLEAK) ||	\
1400	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
1401	defined(CONFIG_KASAN)
1402
1403	void *object;
1404	void *next = *head;
1405	void *old_tail = *tail ? *tail : *head;
 
1406
1407	/* Head and tail of the reconstructed freelist */
1408	*head = NULL;
1409	*tail = NULL;
1410
1411	do {
1412		object = next;
1413		next = get_freepointer(s, object);
 
 
 
 
 
 
 
 
 
 
 
 
 
1414		/* If object's reuse doesn't have to be delayed */
1415		if (!slab_free_hook(s, object)) {
1416			/* Move object to the new freelist */
1417			set_freepointer(s, object, *head);
1418			*head = object;
1419			if (!*tail)
1420				*tail = object;
1421		}
1422	} while (object != old_tail);
1423
1424	if (*head == *tail)
1425		*tail = NULL;
1426
1427	return *head != NULL;
1428#else
1429	return true;
1430#endif
1431}
1432
1433static void setup_object(struct kmem_cache *s, struct page *page,
1434				void *object)
1435{
1436	setup_object_debug(s, page, object);
1437	kasan_init_slab_obj(s, object);
1438	if (unlikely(s->ctor)) {
1439		kasan_unpoison_object_data(s, object);
1440		s->ctor(object);
1441		kasan_poison_object_data(s, object);
1442	}
 
1443}
1444
1445/*
1446 * Slab allocation and freeing
1447 */
1448static inline struct page *alloc_slab_page(struct kmem_cache *s,
1449		gfp_t flags, int node, struct kmem_cache_order_objects oo)
1450{
1451	struct page *page;
1452	unsigned int order = oo_order(oo);
1453
1454	if (node == NUMA_NO_NODE)
1455		page = alloc_pages(flags, order);
1456	else
1457		page = __alloc_pages_node(node, flags, order);
1458
1459	if (page && memcg_charge_slab(page, flags, order, s)) {
1460		__free_pages(page, order);
1461		page = NULL;
1462	}
1463
1464	return page;
1465}
1466
1467#ifdef CONFIG_SLAB_FREELIST_RANDOM
1468/* Pre-initialize the random sequence cache */
1469static int init_cache_random_seq(struct kmem_cache *s)
1470{
1471	unsigned int count = oo_objects(s->oo);
1472	int err;
1473
1474	/* Bailout if already initialised */
1475	if (s->random_seq)
1476		return 0;
1477
1478	err = cache_random_seq_create(s, count, GFP_KERNEL);
1479	if (err) {
1480		pr_err("SLUB: Unable to initialize free list for %s\n",
1481			s->name);
1482		return err;
1483	}
1484
1485	/* Transform to an offset on the set of pages */
1486	if (s->random_seq) {
1487		unsigned int i;
1488
1489		for (i = 0; i < count; i++)
1490			s->random_seq[i] *= s->size;
1491	}
1492	return 0;
1493}
1494
1495/* Initialize each random sequence freelist per cache */
1496static void __init init_freelist_randomization(void)
1497{
1498	struct kmem_cache *s;
1499
1500	mutex_lock(&slab_mutex);
1501
1502	list_for_each_entry(s, &slab_caches, list)
1503		init_cache_random_seq(s);
1504
1505	mutex_unlock(&slab_mutex);
1506}
1507
1508/* Get the next entry on the pre-computed freelist randomized */
1509static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1510				unsigned long *pos, void *start,
1511				unsigned long page_limit,
1512				unsigned long freelist_count)
1513{
1514	unsigned int idx;
1515
1516	/*
1517	 * If the target page allocation failed, the number of objects on the
1518	 * page might be smaller than the usual size defined by the cache.
1519	 */
1520	do {
1521		idx = s->random_seq[*pos];
1522		*pos += 1;
1523		if (*pos >= freelist_count)
1524			*pos = 0;
1525	} while (unlikely(idx >= page_limit));
1526
1527	return (char *)start + idx;
1528}
1529
1530/* Shuffle the single linked freelist based on a random pre-computed sequence */
1531static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1532{
1533	void *start;
1534	void *cur;
1535	void *next;
1536	unsigned long idx, pos, page_limit, freelist_count;
1537
1538	if (page->objects < 2 || !s->random_seq)
1539		return false;
1540
1541	freelist_count = oo_objects(s->oo);
1542	pos = get_random_int() % freelist_count;
1543
1544	page_limit = page->objects * s->size;
1545	start = fixup_red_left(s, page_address(page));
1546
1547	/* First entry is used as the base of the freelist */
1548	cur = next_freelist_entry(s, page, &pos, start, page_limit,
1549				freelist_count);
 
1550	page->freelist = cur;
1551
1552	for (idx = 1; idx < page->objects; idx++) {
1553		setup_object(s, page, cur);
1554		next = next_freelist_entry(s, page, &pos, start, page_limit,
1555			freelist_count);
 
1556		set_freepointer(s, cur, next);
1557		cur = next;
1558	}
1559	setup_object(s, page, cur);
1560	set_freepointer(s, cur, NULL);
1561
1562	return true;
1563}
1564#else
1565static inline int init_cache_random_seq(struct kmem_cache *s)
1566{
1567	return 0;
1568}
1569static inline void init_freelist_randomization(void) { }
1570static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1571{
1572	return false;
1573}
1574#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1575
1576static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1577{
1578	struct page *page;
1579	struct kmem_cache_order_objects oo = s->oo;
1580	gfp_t alloc_gfp;
1581	void *start, *p;
1582	int idx, order;
1583	bool shuffle;
1584
1585	flags &= gfp_allowed_mask;
1586
1587	if (gfpflags_allow_blocking(flags))
1588		local_irq_enable();
1589
1590	flags |= s->allocflags;
1591
1592	/*
1593	 * Let the initial higher-order allocation fail under memory pressure
1594	 * so we fall-back to the minimum order allocation.
1595	 */
1596	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1597	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1598		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1599
1600	page = alloc_slab_page(s, alloc_gfp, node, oo);
1601	if (unlikely(!page)) {
1602		oo = s->min;
1603		alloc_gfp = flags;
1604		/*
1605		 * Allocation may have failed due to fragmentation.
1606		 * Try a lower order alloc if possible
1607		 */
1608		page = alloc_slab_page(s, alloc_gfp, node, oo);
1609		if (unlikely(!page))
1610			goto out;
1611		stat(s, ORDER_FALLBACK);
1612	}
1613
1614	page->objects = oo_objects(oo);
1615
1616	order = compound_order(page);
1617	page->slab_cache = s;
1618	__SetPageSlab(page);
1619	if (page_is_pfmemalloc(page))
1620		SetPageSlabPfmemalloc(page);
1621
 
 
1622	start = page_address(page);
1623
1624	if (unlikely(s->flags & SLAB_POISON))
1625		memset(start, POISON_INUSE, PAGE_SIZE << order);
1626
1627	kasan_poison_slab(page);
1628
1629	shuffle = shuffle_freelist(s, page);
1630
1631	if (!shuffle) {
1632		for_each_object_idx(p, idx, s, start, page->objects) {
1633			setup_object(s, page, p);
1634			if (likely(idx < page->objects))
1635				set_freepointer(s, p, p + s->size);
1636			else
1637				set_freepointer(s, p, NULL);
 
 
1638		}
1639		page->freelist = fixup_red_left(s, start);
1640	}
1641
1642	page->inuse = page->objects;
1643	page->frozen = 1;
1644
1645out:
1646	if (gfpflags_allow_blocking(flags))
1647		local_irq_disable();
1648	if (!page)
1649		return NULL;
1650
1651	mod_lruvec_page_state(page,
1652		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1653		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1654		1 << oo_order(oo));
1655
1656	inc_slabs_node(s, page_to_nid(page), page->objects);
1657
1658	return page;
1659}
1660
1661static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1662{
1663	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1664		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1665		flags &= ~GFP_SLAB_BUG_MASK;
1666		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1667				invalid_mask, &invalid_mask, flags, &flags);
1668		dump_stack();
1669	}
1670
1671	return allocate_slab(s,
1672		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1673}
1674
1675static void __free_slab(struct kmem_cache *s, struct page *page)
1676{
1677	int order = compound_order(page);
1678	int pages = 1 << order;
1679
1680	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1681		void *p;
1682
1683		slab_pad_check(s, page);
1684		for_each_object(p, s, page_address(page),
1685						page->objects)
1686			check_object(s, page, p, SLUB_RED_INACTIVE);
1687	}
1688
1689	mod_lruvec_page_state(page,
1690		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1691		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1692		-pages);
1693
1694	__ClearPageSlabPfmemalloc(page);
1695	__ClearPageSlab(page);
1696
1697	page_mapcount_reset(page);
1698	if (current->reclaim_state)
1699		current->reclaim_state->reclaimed_slab += pages;
1700	memcg_uncharge_slab(page, order, s);
1701	__free_pages(page, order);
1702}
1703
1704#define need_reserve_slab_rcu						\
1705	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1706
1707static void rcu_free_slab(struct rcu_head *h)
1708{
1709	struct page *page;
1710
1711	if (need_reserve_slab_rcu)
1712		page = virt_to_head_page(h);
1713	else
1714		page = container_of((struct list_head *)h, struct page, lru);
1715
1716	__free_slab(page->slab_cache, page);
1717}
1718
1719static void free_slab(struct kmem_cache *s, struct page *page)
1720{
1721	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1722		struct rcu_head *head;
1723
1724		if (need_reserve_slab_rcu) {
1725			int order = compound_order(page);
1726			int offset = (PAGE_SIZE << order) - s->reserved;
1727
1728			VM_BUG_ON(s->reserved != sizeof(*head));
1729			head = page_address(page) + offset;
1730		} else {
1731			head = &page->rcu_head;
1732		}
1733
1734		call_rcu(head, rcu_free_slab);
1735	} else
1736		__free_slab(s, page);
1737}
1738
1739static void discard_slab(struct kmem_cache *s, struct page *page)
1740{
1741	dec_slabs_node(s, page_to_nid(page), page->objects);
1742	free_slab(s, page);
1743}
1744
1745/*
1746 * Management of partially allocated slabs.
1747 */
1748static inline void
1749__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1750{
1751	n->nr_partial++;
1752	if (tail == DEACTIVATE_TO_TAIL)
1753		list_add_tail(&page->lru, &n->partial);
1754	else
1755		list_add(&page->lru, &n->partial);
1756}
1757
1758static inline void add_partial(struct kmem_cache_node *n,
1759				struct page *page, int tail)
1760{
1761	lockdep_assert_held(&n->list_lock);
1762	__add_partial(n, page, tail);
1763}
1764
1765static inline void remove_partial(struct kmem_cache_node *n,
1766					struct page *page)
1767{
1768	lockdep_assert_held(&n->list_lock);
1769	list_del(&page->lru);
1770	n->nr_partial--;
1771}
1772
1773/*
1774 * Remove slab from the partial list, freeze it and
1775 * return the pointer to the freelist.
1776 *
1777 * Returns a list of objects or NULL if it fails.
1778 */
1779static inline void *acquire_slab(struct kmem_cache *s,
1780		struct kmem_cache_node *n, struct page *page,
1781		int mode, int *objects)
1782{
1783	void *freelist;
1784	unsigned long counters;
1785	struct page new;
1786
1787	lockdep_assert_held(&n->list_lock);
1788
1789	/*
1790	 * Zap the freelist and set the frozen bit.
1791	 * The old freelist is the list of objects for the
1792	 * per cpu allocation list.
1793	 */
1794	freelist = page->freelist;
1795	counters = page->counters;
1796	new.counters = counters;
1797	*objects = new.objects - new.inuse;
1798	if (mode) {
1799		new.inuse = page->objects;
1800		new.freelist = NULL;
1801	} else {
1802		new.freelist = freelist;
1803	}
1804
1805	VM_BUG_ON(new.frozen);
1806	new.frozen = 1;
1807
1808	if (!__cmpxchg_double_slab(s, page,
1809			freelist, counters,
1810			new.freelist, new.counters,
1811			"acquire_slab"))
1812		return NULL;
1813
1814	remove_partial(n, page);
1815	WARN_ON(!freelist);
1816	return freelist;
1817}
1818
1819static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1820static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1821
1822/*
1823 * Try to allocate a partial slab from a specific node.
1824 */
1825static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1826				struct kmem_cache_cpu *c, gfp_t flags)
1827{
1828	struct page *page, *page2;
1829	void *object = NULL;
1830	unsigned int available = 0;
1831	int objects;
1832
1833	/*
1834	 * Racy check. If we mistakenly see no partial slabs then we
1835	 * just allocate an empty slab. If we mistakenly try to get a
1836	 * partial slab and there is none available then get_partials()
1837	 * will return NULL.
1838	 */
1839	if (!n || !n->nr_partial)
1840		return NULL;
1841
1842	spin_lock(&n->list_lock);
1843	list_for_each_entry_safe(page, page2, &n->partial, lru) {
1844		void *t;
1845
1846		if (!pfmemalloc_match(page, flags))
1847			continue;
1848
1849		t = acquire_slab(s, n, page, object == NULL, &objects);
1850		if (!t)
1851			break;
1852
1853		available += objects;
1854		if (!object) {
1855			c->page = page;
1856			stat(s, ALLOC_FROM_PARTIAL);
1857			object = t;
1858		} else {
1859			put_cpu_partial(s, page, 0);
1860			stat(s, CPU_PARTIAL_NODE);
1861		}
1862		if (!kmem_cache_has_cpu_partial(s)
1863			|| available > slub_cpu_partial(s) / 2)
1864			break;
1865
1866	}
1867	spin_unlock(&n->list_lock);
1868	return object;
1869}
1870
1871/*
1872 * Get a page from somewhere. Search in increasing NUMA distances.
1873 */
1874static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1875		struct kmem_cache_cpu *c)
1876{
1877#ifdef CONFIG_NUMA
1878	struct zonelist *zonelist;
1879	struct zoneref *z;
1880	struct zone *zone;
1881	enum zone_type high_zoneidx = gfp_zone(flags);
1882	void *object;
1883	unsigned int cpuset_mems_cookie;
1884
1885	/*
1886	 * The defrag ratio allows a configuration of the tradeoffs between
1887	 * inter node defragmentation and node local allocations. A lower
1888	 * defrag_ratio increases the tendency to do local allocations
1889	 * instead of attempting to obtain partial slabs from other nodes.
1890	 *
1891	 * If the defrag_ratio is set to 0 then kmalloc() always
1892	 * returns node local objects. If the ratio is higher then kmalloc()
1893	 * may return off node objects because partial slabs are obtained
1894	 * from other nodes and filled up.
1895	 *
1896	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1897	 * (which makes defrag_ratio = 1000) then every (well almost)
1898	 * allocation will first attempt to defrag slab caches on other nodes.
1899	 * This means scanning over all nodes to look for partial slabs which
1900	 * may be expensive if we do it every time we are trying to find a slab
1901	 * with available objects.
1902	 */
1903	if (!s->remote_node_defrag_ratio ||
1904			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1905		return NULL;
1906
1907	do {
1908		cpuset_mems_cookie = read_mems_allowed_begin();
1909		zonelist = node_zonelist(mempolicy_slab_node(), flags);
1910		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1911			struct kmem_cache_node *n;
1912
1913			n = get_node(s, zone_to_nid(zone));
1914
1915			if (n && cpuset_zone_allowed(zone, flags) &&
1916					n->nr_partial > s->min_partial) {
1917				object = get_partial_node(s, n, c, flags);
1918				if (object) {
1919					/*
1920					 * Don't check read_mems_allowed_retry()
1921					 * here - if mems_allowed was updated in
1922					 * parallel, that was a harmless race
1923					 * between allocation and the cpuset
1924					 * update
1925					 */
1926					return object;
1927				}
1928			}
1929		}
1930	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1931#endif
1932	return NULL;
1933}
1934
1935/*
1936 * Get a partial page, lock it and return it.
1937 */
1938static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1939		struct kmem_cache_cpu *c)
1940{
1941	void *object;
1942	int searchnode = node;
1943
1944	if (node == NUMA_NO_NODE)
1945		searchnode = numa_mem_id();
1946	else if (!node_present_pages(node))
1947		searchnode = node_to_mem_node(node);
1948
1949	object = get_partial_node(s, get_node(s, searchnode), c, flags);
1950	if (object || node != NUMA_NO_NODE)
1951		return object;
1952
1953	return get_any_partial(s, flags, c);
1954}
1955
1956#ifdef CONFIG_PREEMPT
1957/*
1958 * Calculate the next globally unique transaction for disambiguiation
1959 * during cmpxchg. The transactions start with the cpu number and are then
1960 * incremented by CONFIG_NR_CPUS.
1961 */
1962#define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1963#else
1964/*
1965 * No preemption supported therefore also no need to check for
1966 * different cpus.
1967 */
1968#define TID_STEP 1
1969#endif
1970
1971static inline unsigned long next_tid(unsigned long tid)
1972{
1973	return tid + TID_STEP;
1974}
1975
 
1976static inline unsigned int tid_to_cpu(unsigned long tid)
1977{
1978	return tid % TID_STEP;
1979}
1980
1981static inline unsigned long tid_to_event(unsigned long tid)
1982{
1983	return tid / TID_STEP;
1984}
 
1985
1986static inline unsigned int init_tid(int cpu)
1987{
1988	return cpu;
1989}
1990
1991static inline void note_cmpxchg_failure(const char *n,
1992		const struct kmem_cache *s, unsigned long tid)
1993{
1994#ifdef SLUB_DEBUG_CMPXCHG
1995	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1996
1997	pr_info("%s %s: cmpxchg redo ", n, s->name);
1998
1999#ifdef CONFIG_PREEMPT
2000	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2001		pr_warn("due to cpu change %d -> %d\n",
2002			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2003	else
2004#endif
2005	if (tid_to_event(tid) != tid_to_event(actual_tid))
2006		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2007			tid_to_event(tid), tid_to_event(actual_tid));
2008	else
2009		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2010			actual_tid, tid, next_tid(tid));
2011#endif
2012	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2013}
2014
2015static void init_kmem_cache_cpus(struct kmem_cache *s)
2016{
2017	int cpu;
2018
2019	for_each_possible_cpu(cpu)
2020		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2021}
2022
2023/*
2024 * Remove the cpu slab
2025 */
2026static void deactivate_slab(struct kmem_cache *s, struct page *page,
2027				void *freelist, struct kmem_cache_cpu *c)
2028{
2029	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2030	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2031	int lock = 0;
2032	enum slab_modes l = M_NONE, m = M_NONE;
2033	void *nextfree;
2034	int tail = DEACTIVATE_TO_HEAD;
2035	struct page new;
2036	struct page old;
2037
2038	if (page->freelist) {
2039		stat(s, DEACTIVATE_REMOTE_FREES);
2040		tail = DEACTIVATE_TO_TAIL;
2041	}
2042
2043	/*
2044	 * Stage one: Free all available per cpu objects back
2045	 * to the page freelist while it is still frozen. Leave the
2046	 * last one.
2047	 *
2048	 * There is no need to take the list->lock because the page
2049	 * is still frozen.
2050	 */
2051	while (freelist && (nextfree = get_freepointer(s, freelist))) {
2052		void *prior;
2053		unsigned long counters;
2054
2055		do {
2056			prior = page->freelist;
2057			counters = page->counters;
2058			set_freepointer(s, freelist, prior);
2059			new.counters = counters;
2060			new.inuse--;
2061			VM_BUG_ON(!new.frozen);
2062
2063		} while (!__cmpxchg_double_slab(s, page,
2064			prior, counters,
2065			freelist, new.counters,
2066			"drain percpu freelist"));
2067
2068		freelist = nextfree;
2069	}
2070
2071	/*
2072	 * Stage two: Ensure that the page is unfrozen while the
2073	 * list presence reflects the actual number of objects
2074	 * during unfreeze.
2075	 *
2076	 * We setup the list membership and then perform a cmpxchg
2077	 * with the count. If there is a mismatch then the page
2078	 * is not unfrozen but the page is on the wrong list.
2079	 *
2080	 * Then we restart the process which may have to remove
2081	 * the page from the list that we just put it on again
2082	 * because the number of objects in the slab may have
2083	 * changed.
2084	 */
2085redo:
2086
2087	old.freelist = page->freelist;
2088	old.counters = page->counters;
2089	VM_BUG_ON(!old.frozen);
2090
2091	/* Determine target state of the slab */
2092	new.counters = old.counters;
2093	if (freelist) {
2094		new.inuse--;
2095		set_freepointer(s, freelist, old.freelist);
2096		new.freelist = freelist;
2097	} else
2098		new.freelist = old.freelist;
2099
2100	new.frozen = 0;
2101
2102	if (!new.inuse && n->nr_partial >= s->min_partial)
2103		m = M_FREE;
2104	else if (new.freelist) {
2105		m = M_PARTIAL;
2106		if (!lock) {
2107			lock = 1;
2108			/*
2109			 * Taking the spinlock removes the possiblity
2110			 * that acquire_slab() will see a slab page that
2111			 * is frozen
2112			 */
2113			spin_lock(&n->list_lock);
2114		}
2115	} else {
2116		m = M_FULL;
2117		if (kmem_cache_debug(s) && !lock) {
2118			lock = 1;
2119			/*
2120			 * This also ensures that the scanning of full
2121			 * slabs from diagnostic functions will not see
2122			 * any frozen slabs.
2123			 */
2124			spin_lock(&n->list_lock);
2125		}
2126	}
2127
2128	if (l != m) {
2129
2130		if (l == M_PARTIAL)
2131
2132			remove_partial(n, page);
2133
2134		else if (l == M_FULL)
2135
2136			remove_full(s, n, page);
2137
2138		if (m == M_PARTIAL) {
2139
2140			add_partial(n, page, tail);
2141			stat(s, tail);
2142
2143		} else if (m == M_FULL) {
2144
2145			stat(s, DEACTIVATE_FULL);
2146			add_full(s, n, page);
2147
2148		}
2149	}
2150
2151	l = m;
2152	if (!__cmpxchg_double_slab(s, page,
2153				old.freelist, old.counters,
2154				new.freelist, new.counters,
2155				"unfreezing slab"))
2156		goto redo;
2157
2158	if (lock)
2159		spin_unlock(&n->list_lock);
2160
2161	if (m == M_FREE) {
 
 
 
 
2162		stat(s, DEACTIVATE_EMPTY);
2163		discard_slab(s, page);
2164		stat(s, FREE_SLAB);
2165	}
2166
2167	c->page = NULL;
2168	c->freelist = NULL;
2169}
2170
2171/*
2172 * Unfreeze all the cpu partial slabs.
2173 *
2174 * This function must be called with interrupts disabled
2175 * for the cpu using c (or some other guarantee must be there
2176 * to guarantee no concurrent accesses).
2177 */
2178static void unfreeze_partials(struct kmem_cache *s,
2179		struct kmem_cache_cpu *c)
2180{
2181#ifdef CONFIG_SLUB_CPU_PARTIAL
2182	struct kmem_cache_node *n = NULL, *n2 = NULL;
2183	struct page *page, *discard_page = NULL;
2184
2185	while ((page = c->partial)) {
2186		struct page new;
2187		struct page old;
2188
2189		c->partial = page->next;
2190
2191		n2 = get_node(s, page_to_nid(page));
2192		if (n != n2) {
2193			if (n)
2194				spin_unlock(&n->list_lock);
2195
2196			n = n2;
2197			spin_lock(&n->list_lock);
2198		}
2199
2200		do {
2201
2202			old.freelist = page->freelist;
2203			old.counters = page->counters;
2204			VM_BUG_ON(!old.frozen);
2205
2206			new.counters = old.counters;
2207			new.freelist = old.freelist;
2208
2209			new.frozen = 0;
2210
2211		} while (!__cmpxchg_double_slab(s, page,
2212				old.freelist, old.counters,
2213				new.freelist, new.counters,
2214				"unfreezing slab"));
2215
2216		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2217			page->next = discard_page;
2218			discard_page = page;
2219		} else {
2220			add_partial(n, page, DEACTIVATE_TO_TAIL);
2221			stat(s, FREE_ADD_PARTIAL);
2222		}
2223	}
2224
2225	if (n)
2226		spin_unlock(&n->list_lock);
2227
2228	while (discard_page) {
2229		page = discard_page;
2230		discard_page = discard_page->next;
2231
2232		stat(s, DEACTIVATE_EMPTY);
2233		discard_slab(s, page);
2234		stat(s, FREE_SLAB);
2235	}
2236#endif
2237}
2238
2239/*
2240 * Put a page that was just frozen (in __slab_free) into a partial page
2241 * slot if available.
2242 *
2243 * If we did not find a slot then simply move all the partials to the
2244 * per node partial list.
2245 */
2246static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2247{
2248#ifdef CONFIG_SLUB_CPU_PARTIAL
2249	struct page *oldpage;
2250	int pages;
2251	int pobjects;
2252
2253	preempt_disable();
2254	do {
2255		pages = 0;
2256		pobjects = 0;
2257		oldpage = this_cpu_read(s->cpu_slab->partial);
2258
2259		if (oldpage) {
2260			pobjects = oldpage->pobjects;
2261			pages = oldpage->pages;
2262			if (drain && pobjects > s->cpu_partial) {
2263				unsigned long flags;
2264				/*
2265				 * partial array is full. Move the existing
2266				 * set to the per node partial list.
2267				 */
2268				local_irq_save(flags);
2269				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2270				local_irq_restore(flags);
2271				oldpage = NULL;
2272				pobjects = 0;
2273				pages = 0;
2274				stat(s, CPU_PARTIAL_DRAIN);
2275			}
2276		}
2277
2278		pages++;
2279		pobjects += page->objects - page->inuse;
2280
2281		page->pages = pages;
2282		page->pobjects = pobjects;
2283		page->next = oldpage;
2284
2285	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2286								!= oldpage);
2287	if (unlikely(!s->cpu_partial)) {
2288		unsigned long flags;
2289
2290		local_irq_save(flags);
2291		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2292		local_irq_restore(flags);
2293	}
2294	preempt_enable();
2295#endif
2296}
2297
2298static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2299{
2300	stat(s, CPUSLAB_FLUSH);
2301	deactivate_slab(s, c->page, c->freelist, c);
2302
2303	c->tid = next_tid(c->tid);
2304}
2305
2306/*
2307 * Flush cpu slab.
2308 *
2309 * Called from IPI handler with interrupts disabled.
2310 */
2311static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2312{
2313	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2314
2315	if (likely(c)) {
2316		if (c->page)
2317			flush_slab(s, c);
2318
2319		unfreeze_partials(s, c);
2320	}
2321}
2322
2323static void flush_cpu_slab(void *d)
2324{
2325	struct kmem_cache *s = d;
2326
2327	__flush_cpu_slab(s, smp_processor_id());
2328}
2329
2330static bool has_cpu_slab(int cpu, void *info)
2331{
2332	struct kmem_cache *s = info;
2333	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2334
2335	return c->page || slub_percpu_partial(c);
2336}
2337
2338static void flush_all(struct kmem_cache *s)
2339{
2340	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2341}
2342
2343/*
2344 * Use the cpu notifier to insure that the cpu slabs are flushed when
2345 * necessary.
2346 */
2347static int slub_cpu_dead(unsigned int cpu)
2348{
2349	struct kmem_cache *s;
2350	unsigned long flags;
2351
2352	mutex_lock(&slab_mutex);
2353	list_for_each_entry(s, &slab_caches, list) {
2354		local_irq_save(flags);
2355		__flush_cpu_slab(s, cpu);
2356		local_irq_restore(flags);
2357	}
2358	mutex_unlock(&slab_mutex);
2359	return 0;
2360}
2361
2362/*
2363 * Check if the objects in a per cpu structure fit numa
2364 * locality expectations.
2365 */
2366static inline int node_match(struct page *page, int node)
2367{
2368#ifdef CONFIG_NUMA
2369	if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2370		return 0;
2371#endif
2372	return 1;
2373}
2374
2375#ifdef CONFIG_SLUB_DEBUG
2376static int count_free(struct page *page)
2377{
2378	return page->objects - page->inuse;
2379}
2380
2381static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2382{
2383	return atomic_long_read(&n->total_objects);
2384}
2385#endif /* CONFIG_SLUB_DEBUG */
2386
2387#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2388static unsigned long count_partial(struct kmem_cache_node *n,
2389					int (*get_count)(struct page *))
2390{
2391	unsigned long flags;
2392	unsigned long x = 0;
2393	struct page *page;
2394
2395	spin_lock_irqsave(&n->list_lock, flags);
2396	list_for_each_entry(page, &n->partial, lru)
2397		x += get_count(page);
2398	spin_unlock_irqrestore(&n->list_lock, flags);
2399	return x;
2400}
2401#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2402
2403static noinline void
2404slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2405{
2406#ifdef CONFIG_SLUB_DEBUG
2407	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2408				      DEFAULT_RATELIMIT_BURST);
2409	int node;
2410	struct kmem_cache_node *n;
2411
2412	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2413		return;
2414
2415	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2416		nid, gfpflags, &gfpflags);
2417	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2418		s->name, s->object_size, s->size, oo_order(s->oo),
2419		oo_order(s->min));
2420
2421	if (oo_order(s->min) > get_order(s->object_size))
2422		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2423			s->name);
2424
2425	for_each_kmem_cache_node(s, node, n) {
2426		unsigned long nr_slabs;
2427		unsigned long nr_objs;
2428		unsigned long nr_free;
2429
2430		nr_free  = count_partial(n, count_free);
2431		nr_slabs = node_nr_slabs(n);
2432		nr_objs  = node_nr_objs(n);
2433
2434		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2435			node, nr_slabs, nr_objs, nr_free);
2436	}
2437#endif
2438}
2439
2440static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2441			int node, struct kmem_cache_cpu **pc)
2442{
2443	void *freelist;
2444	struct kmem_cache_cpu *c = *pc;
2445	struct page *page;
2446
 
 
2447	freelist = get_partial(s, flags, node, c);
2448
2449	if (freelist)
2450		return freelist;
2451
2452	page = new_slab(s, flags, node);
2453	if (page) {
2454		c = raw_cpu_ptr(s->cpu_slab);
2455		if (c->page)
2456			flush_slab(s, c);
2457
2458		/*
2459		 * No other reference to the page yet so we can
2460		 * muck around with it freely without cmpxchg
2461		 */
2462		freelist = page->freelist;
2463		page->freelist = NULL;
2464
2465		stat(s, ALLOC_SLAB);
2466		c->page = page;
2467		*pc = c;
2468	} else
2469		freelist = NULL;
2470
2471	return freelist;
2472}
2473
2474static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2475{
2476	if (unlikely(PageSlabPfmemalloc(page)))
2477		return gfp_pfmemalloc_allowed(gfpflags);
2478
2479	return true;
2480}
2481
2482/*
2483 * Check the page->freelist of a page and either transfer the freelist to the
2484 * per cpu freelist or deactivate the page.
2485 *
2486 * The page is still frozen if the return value is not NULL.
2487 *
2488 * If this function returns NULL then the page has been unfrozen.
2489 *
2490 * This function must be called with interrupt disabled.
2491 */
2492static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2493{
2494	struct page new;
2495	unsigned long counters;
2496	void *freelist;
2497
2498	do {
2499		freelist = page->freelist;
2500		counters = page->counters;
2501
2502		new.counters = counters;
2503		VM_BUG_ON(!new.frozen);
2504
2505		new.inuse = page->objects;
2506		new.frozen = freelist != NULL;
2507
2508	} while (!__cmpxchg_double_slab(s, page,
2509		freelist, counters,
2510		NULL, new.counters,
2511		"get_freelist"));
2512
2513	return freelist;
2514}
2515
2516/*
2517 * Slow path. The lockless freelist is empty or we need to perform
2518 * debugging duties.
2519 *
2520 * Processing is still very fast if new objects have been freed to the
2521 * regular freelist. In that case we simply take over the regular freelist
2522 * as the lockless freelist and zap the regular freelist.
2523 *
2524 * If that is not working then we fall back to the partial lists. We take the
2525 * first element of the freelist as the object to allocate now and move the
2526 * rest of the freelist to the lockless freelist.
2527 *
2528 * And if we were unable to get a new slab from the partial slab lists then
2529 * we need to allocate a new slab. This is the slowest path since it involves
2530 * a call to the page allocator and the setup of a new slab.
2531 *
2532 * Version of __slab_alloc to use when we know that interrupts are
2533 * already disabled (which is the case for bulk allocation).
2534 */
2535static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2536			  unsigned long addr, struct kmem_cache_cpu *c)
2537{
2538	void *freelist;
2539	struct page *page;
2540
2541	page = c->page;
2542	if (!page)
2543		goto new_slab;
2544redo:
2545
2546	if (unlikely(!node_match(page, node))) {
2547		int searchnode = node;
2548
2549		if (node != NUMA_NO_NODE && !node_present_pages(node))
2550			searchnode = node_to_mem_node(node);
2551
2552		if (unlikely(!node_match(page, searchnode))) {
2553			stat(s, ALLOC_NODE_MISMATCH);
2554			deactivate_slab(s, page, c->freelist, c);
2555			goto new_slab;
2556		}
2557	}
2558
2559	/*
2560	 * By rights, we should be searching for a slab page that was
2561	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2562	 * information when the page leaves the per-cpu allocator
2563	 */
2564	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2565		deactivate_slab(s, page, c->freelist, c);
2566		goto new_slab;
2567	}
2568
2569	/* must check again c->freelist in case of cpu migration or IRQ */
2570	freelist = c->freelist;
2571	if (freelist)
2572		goto load_freelist;
2573
2574	freelist = get_freelist(s, page);
2575
2576	if (!freelist) {
2577		c->page = NULL;
2578		stat(s, DEACTIVATE_BYPASS);
2579		goto new_slab;
2580	}
2581
2582	stat(s, ALLOC_REFILL);
2583
2584load_freelist:
2585	/*
2586	 * freelist is pointing to the list of objects to be used.
2587	 * page is pointing to the page from which the objects are obtained.
2588	 * That page must be frozen for per cpu allocations to work.
2589	 */
2590	VM_BUG_ON(!c->page->frozen);
2591	c->freelist = get_freepointer(s, freelist);
2592	c->tid = next_tid(c->tid);
2593	return freelist;
2594
2595new_slab:
2596
2597	if (slub_percpu_partial(c)) {
2598		page = c->page = slub_percpu_partial(c);
2599		slub_set_percpu_partial(c, page);
2600		stat(s, CPU_PARTIAL_ALLOC);
2601		goto redo;
2602	}
2603
2604	freelist = new_slab_objects(s, gfpflags, node, &c);
2605
2606	if (unlikely(!freelist)) {
2607		slab_out_of_memory(s, gfpflags, node);
2608		return NULL;
2609	}
2610
2611	page = c->page;
2612	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2613		goto load_freelist;
2614
2615	/* Only entered in the debug case */
2616	if (kmem_cache_debug(s) &&
2617			!alloc_debug_processing(s, page, freelist, addr))
2618		goto new_slab;	/* Slab failed checks. Next slab needed */
2619
2620	deactivate_slab(s, page, get_freepointer(s, freelist), c);
2621	return freelist;
2622}
2623
2624/*
2625 * Another one that disabled interrupt and compensates for possible
2626 * cpu changes by refetching the per cpu area pointer.
2627 */
2628static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2629			  unsigned long addr, struct kmem_cache_cpu *c)
2630{
2631	void *p;
2632	unsigned long flags;
2633
2634	local_irq_save(flags);
2635#ifdef CONFIG_PREEMPT
2636	/*
2637	 * We may have been preempted and rescheduled on a different
2638	 * cpu before disabling interrupts. Need to reload cpu area
2639	 * pointer.
2640	 */
2641	c = this_cpu_ptr(s->cpu_slab);
2642#endif
2643
2644	p = ___slab_alloc(s, gfpflags, node, addr, c);
2645	local_irq_restore(flags);
2646	return p;
2647}
2648
2649/*
 
 
 
 
 
 
 
 
 
 
 
2650 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2651 * have the fastpath folded into their functions. So no function call
2652 * overhead for requests that can be satisfied on the fastpath.
2653 *
2654 * The fastpath works by first checking if the lockless freelist can be used.
2655 * If not then __slab_alloc is called for slow processing.
2656 *
2657 * Otherwise we can simply pick the next object from the lockless free list.
2658 */
2659static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2660		gfp_t gfpflags, int node, unsigned long addr)
2661{
2662	void *object;
2663	struct kmem_cache_cpu *c;
2664	struct page *page;
2665	unsigned long tid;
2666
2667	s = slab_pre_alloc_hook(s, gfpflags);
2668	if (!s)
2669		return NULL;
2670redo:
2671	/*
2672	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2673	 * enabled. We may switch back and forth between cpus while
2674	 * reading from one cpu area. That does not matter as long
2675	 * as we end up on the original cpu again when doing the cmpxchg.
2676	 *
2677	 * We should guarantee that tid and kmem_cache are retrieved on
2678	 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2679	 * to check if it is matched or not.
2680	 */
2681	do {
2682		tid = this_cpu_read(s->cpu_slab->tid);
2683		c = raw_cpu_ptr(s->cpu_slab);
2684	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2685		 unlikely(tid != READ_ONCE(c->tid)));
2686
2687	/*
2688	 * Irqless object alloc/free algorithm used here depends on sequence
2689	 * of fetching cpu_slab's data. tid should be fetched before anything
2690	 * on c to guarantee that object and page associated with previous tid
2691	 * won't be used with current tid. If we fetch tid first, object and
2692	 * page could be one associated with next tid and our alloc/free
2693	 * request will be failed. In this case, we will retry. So, no problem.
2694	 */
2695	barrier();
2696
2697	/*
2698	 * The transaction ids are globally unique per cpu and per operation on
2699	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2700	 * occurs on the right processor and that there was no operation on the
2701	 * linked list in between.
2702	 */
2703
2704	object = c->freelist;
2705	page = c->page;
2706	if (unlikely(!object || !node_match(page, node))) {
2707		object = __slab_alloc(s, gfpflags, node, addr, c);
2708		stat(s, ALLOC_SLOWPATH);
2709	} else {
2710		void *next_object = get_freepointer_safe(s, object);
2711
2712		/*
2713		 * The cmpxchg will only match if there was no additional
2714		 * operation and if we are on the right processor.
2715		 *
2716		 * The cmpxchg does the following atomically (without lock
2717		 * semantics!)
2718		 * 1. Relocate first pointer to the current per cpu area.
2719		 * 2. Verify that tid and freelist have not been changed
2720		 * 3. If they were not changed replace tid and freelist
2721		 *
2722		 * Since this is without lock semantics the protection is only
2723		 * against code executing on this cpu *not* from access by
2724		 * other cpus.
2725		 */
2726		if (unlikely(!this_cpu_cmpxchg_double(
2727				s->cpu_slab->freelist, s->cpu_slab->tid,
2728				object, tid,
2729				next_object, next_tid(tid)))) {
2730
2731			note_cmpxchg_failure("slab_alloc", s, tid);
2732			goto redo;
2733		}
2734		prefetch_freepointer(s, next_object);
2735		stat(s, ALLOC_FASTPATH);
2736	}
2737
2738	if (unlikely(gfpflags & __GFP_ZERO) && object)
 
 
2739		memset(object, 0, s->object_size);
2740
2741	slab_post_alloc_hook(s, gfpflags, 1, &object);
2742
2743	return object;
2744}
2745
2746static __always_inline void *slab_alloc(struct kmem_cache *s,
2747		gfp_t gfpflags, unsigned long addr)
2748{
2749	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2750}
2751
2752void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2753{
2754	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2755
2756	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2757				s->size, gfpflags);
2758
2759	return ret;
2760}
2761EXPORT_SYMBOL(kmem_cache_alloc);
2762
2763#ifdef CONFIG_TRACING
2764void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2765{
2766	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2767	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2768	kasan_kmalloc(s, ret, size, gfpflags);
2769	return ret;
2770}
2771EXPORT_SYMBOL(kmem_cache_alloc_trace);
2772#endif
2773
2774#ifdef CONFIG_NUMA
2775void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2776{
2777	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2778
2779	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2780				    s->object_size, s->size, gfpflags, node);
2781
2782	return ret;
2783}
2784EXPORT_SYMBOL(kmem_cache_alloc_node);
2785
2786#ifdef CONFIG_TRACING
2787void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2788				    gfp_t gfpflags,
2789				    int node, size_t size)
2790{
2791	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2792
2793	trace_kmalloc_node(_RET_IP_, ret,
2794			   size, s->size, gfpflags, node);
2795
2796	kasan_kmalloc(s, ret, size, gfpflags);
2797	return ret;
2798}
2799EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2800#endif
2801#endif
2802
2803/*
2804 * Slow path handling. This may still be called frequently since objects
2805 * have a longer lifetime than the cpu slabs in most processing loads.
2806 *
2807 * So we still attempt to reduce cache line usage. Just take the slab
2808 * lock and free the item. If there is no additional partial page
2809 * handling required then we can return immediately.
2810 */
2811static void __slab_free(struct kmem_cache *s, struct page *page,
2812			void *head, void *tail, int cnt,
2813			unsigned long addr)
2814
2815{
2816	void *prior;
2817	int was_frozen;
2818	struct page new;
2819	unsigned long counters;
2820	struct kmem_cache_node *n = NULL;
2821	unsigned long uninitialized_var(flags);
2822
2823	stat(s, FREE_SLOWPATH);
2824
2825	if (kmem_cache_debug(s) &&
2826	    !free_debug_processing(s, page, head, tail, cnt, addr))
2827		return;
2828
2829	do {
2830		if (unlikely(n)) {
2831			spin_unlock_irqrestore(&n->list_lock, flags);
2832			n = NULL;
2833		}
2834		prior = page->freelist;
2835		counters = page->counters;
2836		set_freepointer(s, tail, prior);
2837		new.counters = counters;
2838		was_frozen = new.frozen;
2839		new.inuse -= cnt;
2840		if ((!new.inuse || !prior) && !was_frozen) {
2841
2842			if (kmem_cache_has_cpu_partial(s) && !prior) {
2843
2844				/*
2845				 * Slab was on no list before and will be
2846				 * partially empty
2847				 * We can defer the list move and instead
2848				 * freeze it.
2849				 */
2850				new.frozen = 1;
2851
2852			} else { /* Needs to be taken off a list */
2853
2854				n = get_node(s, page_to_nid(page));
2855				/*
2856				 * Speculatively acquire the list_lock.
2857				 * If the cmpxchg does not succeed then we may
2858				 * drop the list_lock without any processing.
2859				 *
2860				 * Otherwise the list_lock will synchronize with
2861				 * other processors updating the list of slabs.
2862				 */
2863				spin_lock_irqsave(&n->list_lock, flags);
2864
2865			}
2866		}
2867
2868	} while (!cmpxchg_double_slab(s, page,
2869		prior, counters,
2870		head, new.counters,
2871		"__slab_free"));
2872
2873	if (likely(!n)) {
2874
2875		/*
2876		 * If we just froze the page then put it onto the
2877		 * per cpu partial list.
2878		 */
2879		if (new.frozen && !was_frozen) {
2880			put_cpu_partial(s, page, 1);
2881			stat(s, CPU_PARTIAL_FREE);
2882		}
2883		/*
2884		 * The list lock was not taken therefore no list
2885		 * activity can be necessary.
2886		 */
2887		if (was_frozen)
2888			stat(s, FREE_FROZEN);
2889		return;
2890	}
2891
2892	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2893		goto slab_empty;
2894
2895	/*
2896	 * Objects left in the slab. If it was not on the partial list before
2897	 * then add it.
2898	 */
2899	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2900		if (kmem_cache_debug(s))
2901			remove_full(s, n, page);
2902		add_partial(n, page, DEACTIVATE_TO_TAIL);
2903		stat(s, FREE_ADD_PARTIAL);
2904	}
2905	spin_unlock_irqrestore(&n->list_lock, flags);
2906	return;
2907
2908slab_empty:
2909	if (prior) {
2910		/*
2911		 * Slab on the partial list.
2912		 */
2913		remove_partial(n, page);
2914		stat(s, FREE_REMOVE_PARTIAL);
2915	} else {
2916		/* Slab must be on the full list */
2917		remove_full(s, n, page);
2918	}
2919
2920	spin_unlock_irqrestore(&n->list_lock, flags);
2921	stat(s, FREE_SLAB);
2922	discard_slab(s, page);
2923}
2924
2925/*
2926 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2927 * can perform fastpath freeing without additional function calls.
2928 *
2929 * The fastpath is only possible if we are freeing to the current cpu slab
2930 * of this processor. This typically the case if we have just allocated
2931 * the item before.
2932 *
2933 * If fastpath is not possible then fall back to __slab_free where we deal
2934 * with all sorts of special processing.
2935 *
2936 * Bulk free of a freelist with several objects (all pointing to the
2937 * same page) possible by specifying head and tail ptr, plus objects
2938 * count (cnt). Bulk free indicated by tail pointer being set.
2939 */
2940static __always_inline void do_slab_free(struct kmem_cache *s,
2941				struct page *page, void *head, void *tail,
2942				int cnt, unsigned long addr)
2943{
2944	void *tail_obj = tail ? : head;
2945	struct kmem_cache_cpu *c;
2946	unsigned long tid;
2947redo:
2948	/*
2949	 * Determine the currently cpus per cpu slab.
2950	 * The cpu may change afterward. However that does not matter since
2951	 * data is retrieved via this pointer. If we are on the same cpu
2952	 * during the cmpxchg then the free will succeed.
2953	 */
2954	do {
2955		tid = this_cpu_read(s->cpu_slab->tid);
2956		c = raw_cpu_ptr(s->cpu_slab);
2957	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2958		 unlikely(tid != READ_ONCE(c->tid)));
2959
2960	/* Same with comment on barrier() in slab_alloc_node() */
2961	barrier();
2962
2963	if (likely(page == c->page)) {
2964		set_freepointer(s, tail_obj, c->freelist);
2965
2966		if (unlikely(!this_cpu_cmpxchg_double(
2967				s->cpu_slab->freelist, s->cpu_slab->tid,
2968				c->freelist, tid,
2969				head, next_tid(tid)))) {
2970
2971			note_cmpxchg_failure("slab_free", s, tid);
2972			goto redo;
2973		}
2974		stat(s, FREE_FASTPATH);
2975	} else
2976		__slab_free(s, page, head, tail_obj, cnt, addr);
2977
2978}
2979
2980static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2981				      void *head, void *tail, int cnt,
2982				      unsigned long addr)
2983{
2984	/*
2985	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
2986	 * to remove objects, whose reuse must be delayed.
2987	 */
2988	if (slab_free_freelist_hook(s, &head, &tail))
2989		do_slab_free(s, page, head, tail, cnt, addr);
2990}
2991
2992#ifdef CONFIG_KASAN
2993void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2994{
2995	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2996}
2997#endif
2998
2999void kmem_cache_free(struct kmem_cache *s, void *x)
3000{
3001	s = cache_from_obj(s, x);
3002	if (!s)
3003		return;
3004	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3005	trace_kmem_cache_free(_RET_IP_, x);
3006}
3007EXPORT_SYMBOL(kmem_cache_free);
3008
3009struct detached_freelist {
3010	struct page *page;
3011	void *tail;
3012	void *freelist;
3013	int cnt;
3014	struct kmem_cache *s;
3015};
3016
3017/*
3018 * This function progressively scans the array with free objects (with
3019 * a limited look ahead) and extract objects belonging to the same
3020 * page.  It builds a detached freelist directly within the given
3021 * page/objects.  This can happen without any need for
3022 * synchronization, because the objects are owned by running process.
3023 * The freelist is build up as a single linked list in the objects.
3024 * The idea is, that this detached freelist can then be bulk
3025 * transferred to the real freelist(s), but only requiring a single
3026 * synchronization primitive.  Look ahead in the array is limited due
3027 * to performance reasons.
3028 */
3029static inline
3030int build_detached_freelist(struct kmem_cache *s, size_t size,
3031			    void **p, struct detached_freelist *df)
3032{
3033	size_t first_skipped_index = 0;
3034	int lookahead = 3;
3035	void *object;
3036	struct page *page;
3037
3038	/* Always re-init detached_freelist */
3039	df->page = NULL;
3040
3041	do {
3042		object = p[--size];
3043		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3044	} while (!object && size);
3045
3046	if (!object)
3047		return 0;
3048
3049	page = virt_to_head_page(object);
3050	if (!s) {
3051		/* Handle kalloc'ed objects */
3052		if (unlikely(!PageSlab(page))) {
3053			BUG_ON(!PageCompound(page));
3054			kfree_hook(object);
3055			__free_pages(page, compound_order(page));
3056			p[size] = NULL; /* mark object processed */
3057			return size;
3058		}
3059		/* Derive kmem_cache from object */
3060		df->s = page->slab_cache;
3061	} else {
3062		df->s = cache_from_obj(s, object); /* Support for memcg */
3063	}
3064
3065	/* Start new detached freelist */
3066	df->page = page;
3067	set_freepointer(df->s, object, NULL);
3068	df->tail = object;
3069	df->freelist = object;
3070	p[size] = NULL; /* mark object processed */
3071	df->cnt = 1;
3072
3073	while (size) {
3074		object = p[--size];
3075		if (!object)
3076			continue; /* Skip processed objects */
3077
3078		/* df->page is always set at this point */
3079		if (df->page == virt_to_head_page(object)) {
3080			/* Opportunity build freelist */
3081			set_freepointer(df->s, object, df->freelist);
3082			df->freelist = object;
3083			df->cnt++;
3084			p[size] = NULL; /* mark object processed */
3085
3086			continue;
3087		}
3088
3089		/* Limit look ahead search */
3090		if (!--lookahead)
3091			break;
3092
3093		if (!first_skipped_index)
3094			first_skipped_index = size + 1;
3095	}
3096
3097	return first_skipped_index;
3098}
3099
3100/* Note that interrupts must be enabled when calling this function. */
3101void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3102{
3103	if (WARN_ON(!size))
3104		return;
3105
3106	do {
3107		struct detached_freelist df;
3108
3109		size = build_detached_freelist(s, size, p, &df);
3110		if (!df.page)
3111			continue;
3112
3113		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3114	} while (likely(size));
3115}
3116EXPORT_SYMBOL(kmem_cache_free_bulk);
3117
3118/* Note that interrupts must be enabled when calling this function. */
3119int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3120			  void **p)
3121{
3122	struct kmem_cache_cpu *c;
3123	int i;
3124
3125	/* memcg and kmem_cache debug support */
3126	s = slab_pre_alloc_hook(s, flags);
3127	if (unlikely(!s))
3128		return false;
3129	/*
3130	 * Drain objects in the per cpu slab, while disabling local
3131	 * IRQs, which protects against PREEMPT and interrupts
3132	 * handlers invoking normal fastpath.
3133	 */
3134	local_irq_disable();
3135	c = this_cpu_ptr(s->cpu_slab);
3136
3137	for (i = 0; i < size; i++) {
3138		void *object = c->freelist;
3139
3140		if (unlikely(!object)) {
3141			/*
3142			 * Invoking slow path likely have side-effect
3143			 * of re-populating per CPU c->freelist
3144			 */
3145			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3146					    _RET_IP_, c);
3147			if (unlikely(!p[i]))
3148				goto error;
3149
3150			c = this_cpu_ptr(s->cpu_slab);
 
 
3151			continue; /* goto for-loop */
3152		}
3153		c->freelist = get_freepointer(s, object);
3154		p[i] = object;
 
3155	}
3156	c->tid = next_tid(c->tid);
3157	local_irq_enable();
3158
3159	/* Clear memory outside IRQ disabled fastpath loop */
3160	if (unlikely(flags & __GFP_ZERO)) {
3161		int j;
3162
3163		for (j = 0; j < i; j++)
3164			memset(p[j], 0, s->object_size);
3165	}
3166
3167	/* memcg and kmem_cache debug support */
3168	slab_post_alloc_hook(s, flags, size, p);
3169	return i;
3170error:
3171	local_irq_enable();
3172	slab_post_alloc_hook(s, flags, i, p);
3173	__kmem_cache_free_bulk(s, i, p);
3174	return 0;
3175}
3176EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3177
3178
3179/*
3180 * Object placement in a slab is made very easy because we always start at
3181 * offset 0. If we tune the size of the object to the alignment then we can
3182 * get the required alignment by putting one properly sized object after
3183 * another.
3184 *
3185 * Notice that the allocation order determines the sizes of the per cpu
3186 * caches. Each processor has always one slab available for allocations.
3187 * Increasing the allocation order reduces the number of times that slabs
3188 * must be moved on and off the partial lists and is therefore a factor in
3189 * locking overhead.
3190 */
3191
3192/*
3193 * Mininum / Maximum order of slab pages. This influences locking overhead
3194 * and slab fragmentation. A higher order reduces the number of partial slabs
3195 * and increases the number of allocations possible without having to
3196 * take the list_lock.
3197 */
3198static unsigned int slub_min_order;
3199static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3200static unsigned int slub_min_objects;
3201
3202/*
3203 * Calculate the order of allocation given an slab object size.
3204 *
3205 * The order of allocation has significant impact on performance and other
3206 * system components. Generally order 0 allocations should be preferred since
3207 * order 0 does not cause fragmentation in the page allocator. Larger objects
3208 * be problematic to put into order 0 slabs because there may be too much
3209 * unused space left. We go to a higher order if more than 1/16th of the slab
3210 * would be wasted.
3211 *
3212 * In order to reach satisfactory performance we must ensure that a minimum
3213 * number of objects is in one slab. Otherwise we may generate too much
3214 * activity on the partial lists which requires taking the list_lock. This is
3215 * less a concern for large slabs though which are rarely used.
3216 *
3217 * slub_max_order specifies the order where we begin to stop considering the
3218 * number of objects in a slab as critical. If we reach slub_max_order then
3219 * we try to keep the page order as low as possible. So we accept more waste
3220 * of space in favor of a small page order.
3221 *
3222 * Higher order allocations also allow the placement of more objects in a
3223 * slab and thereby reduce object handling overhead. If the user has
3224 * requested a higher mininum order then we start with that one instead of
3225 * the smallest order which will fit the object.
3226 */
3227static inline unsigned int slab_order(unsigned int size,
3228		unsigned int min_objects, unsigned int max_order,
3229		unsigned int fract_leftover, unsigned int reserved)
3230{
3231	unsigned int min_order = slub_min_order;
3232	unsigned int order;
3233
3234	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
3235		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3236
3237	for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved));
3238			order <= max_order; order++) {
3239
3240		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3241		unsigned int rem;
3242
3243		rem = (slab_size - reserved) % size;
3244
3245		if (rem <= slab_size / fract_leftover)
3246			break;
3247	}
3248
3249	return order;
3250}
3251
3252static inline int calculate_order(unsigned int size, unsigned int reserved)
3253{
3254	unsigned int order;
3255	unsigned int min_objects;
3256	unsigned int max_objects;
3257
3258	/*
3259	 * Attempt to find best configuration for a slab. This
3260	 * works by first attempting to generate a layout with
3261	 * the best configuration and backing off gradually.
3262	 *
3263	 * First we increase the acceptable waste in a slab. Then
3264	 * we reduce the minimum objects required in a slab.
3265	 */
3266	min_objects = slub_min_objects;
3267	if (!min_objects)
3268		min_objects = 4 * (fls(nr_cpu_ids) + 1);
3269	max_objects = order_objects(slub_max_order, size, reserved);
3270	min_objects = min(min_objects, max_objects);
3271
3272	while (min_objects > 1) {
3273		unsigned int fraction;
3274
3275		fraction = 16;
3276		while (fraction >= 4) {
3277			order = slab_order(size, min_objects,
3278					slub_max_order, fraction, reserved);
3279			if (order <= slub_max_order)
3280				return order;
3281			fraction /= 2;
3282		}
3283		min_objects--;
3284	}
3285
3286	/*
3287	 * We were unable to place multiple objects in a slab. Now
3288	 * lets see if we can place a single object there.
3289	 */
3290	order = slab_order(size, 1, slub_max_order, 1, reserved);
3291	if (order <= slub_max_order)
3292		return order;
3293
3294	/*
3295	 * Doh this slab cannot be placed using slub_max_order.
3296	 */
3297	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
3298	if (order < MAX_ORDER)
3299		return order;
3300	return -ENOSYS;
3301}
3302
3303static void
3304init_kmem_cache_node(struct kmem_cache_node *n)
3305{
3306	n->nr_partial = 0;
3307	spin_lock_init(&n->list_lock);
3308	INIT_LIST_HEAD(&n->partial);
3309#ifdef CONFIG_SLUB_DEBUG
3310	atomic_long_set(&n->nr_slabs, 0);
3311	atomic_long_set(&n->total_objects, 0);
3312	INIT_LIST_HEAD(&n->full);
3313#endif
3314}
3315
3316static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3317{
3318	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3319			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3320
3321	/*
3322	 * Must align to double word boundary for the double cmpxchg
3323	 * instructions to work; see __pcpu_double_call_return_bool().
3324	 */
3325	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3326				     2 * sizeof(void *));
3327
3328	if (!s->cpu_slab)
3329		return 0;
3330
3331	init_kmem_cache_cpus(s);
3332
3333	return 1;
3334}
3335
3336static struct kmem_cache *kmem_cache_node;
3337
3338/*
3339 * No kmalloc_node yet so do it by hand. We know that this is the first
3340 * slab on the node for this slabcache. There are no concurrent accesses
3341 * possible.
3342 *
3343 * Note that this function only works on the kmem_cache_node
3344 * when allocating for the kmem_cache_node. This is used for bootstrapping
3345 * memory on a fresh node that has no slab structures yet.
3346 */
3347static void early_kmem_cache_node_alloc(int node)
3348{
3349	struct page *page;
3350	struct kmem_cache_node *n;
3351
3352	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3353
3354	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3355
3356	BUG_ON(!page);
3357	if (page_to_nid(page) != node) {
3358		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3359		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3360	}
3361
3362	n = page->freelist;
3363	BUG_ON(!n);
3364	page->freelist = get_freepointer(kmem_cache_node, n);
3365	page->inuse = 1;
3366	page->frozen = 0;
3367	kmem_cache_node->node[node] = n;
3368#ifdef CONFIG_SLUB_DEBUG
3369	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3370	init_tracking(kmem_cache_node, n);
3371#endif
3372	kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3373		      GFP_KERNEL);
 
 
 
 
3374	init_kmem_cache_node(n);
3375	inc_slabs_node(kmem_cache_node, node, page->objects);
3376
3377	/*
3378	 * No locks need to be taken here as it has just been
3379	 * initialized and there is no concurrent access.
3380	 */
3381	__add_partial(n, page, DEACTIVATE_TO_HEAD);
3382}
3383
3384static void free_kmem_cache_nodes(struct kmem_cache *s)
3385{
3386	int node;
3387	struct kmem_cache_node *n;
3388
3389	for_each_kmem_cache_node(s, node, n) {
3390		s->node[node] = NULL;
3391		kmem_cache_free(kmem_cache_node, n);
3392	}
3393}
3394
3395void __kmem_cache_release(struct kmem_cache *s)
3396{
3397	cache_random_seq_destroy(s);
3398	free_percpu(s->cpu_slab);
3399	free_kmem_cache_nodes(s);
3400}
3401
3402static int init_kmem_cache_nodes(struct kmem_cache *s)
3403{
3404	int node;
3405
3406	for_each_node_state(node, N_NORMAL_MEMORY) {
3407		struct kmem_cache_node *n;
3408
3409		if (slab_state == DOWN) {
3410			early_kmem_cache_node_alloc(node);
3411			continue;
3412		}
3413		n = kmem_cache_alloc_node(kmem_cache_node,
3414						GFP_KERNEL, node);
3415
3416		if (!n) {
3417			free_kmem_cache_nodes(s);
3418			return 0;
3419		}
3420
3421		init_kmem_cache_node(n);
3422		s->node[node] = n;
3423	}
3424	return 1;
3425}
3426
3427static void set_min_partial(struct kmem_cache *s, unsigned long min)
3428{
3429	if (min < MIN_PARTIAL)
3430		min = MIN_PARTIAL;
3431	else if (min > MAX_PARTIAL)
3432		min = MAX_PARTIAL;
3433	s->min_partial = min;
3434}
3435
3436static void set_cpu_partial(struct kmem_cache *s)
3437{
3438#ifdef CONFIG_SLUB_CPU_PARTIAL
3439	/*
3440	 * cpu_partial determined the maximum number of objects kept in the
3441	 * per cpu partial lists of a processor.
3442	 *
3443	 * Per cpu partial lists mainly contain slabs that just have one
3444	 * object freed. If they are used for allocation then they can be
3445	 * filled up again with minimal effort. The slab will never hit the
3446	 * per node partial lists and therefore no locking will be required.
3447	 *
3448	 * This setting also determines
3449	 *
3450	 * A) The number of objects from per cpu partial slabs dumped to the
3451	 *    per node list when we reach the limit.
3452	 * B) The number of objects in cpu partial slabs to extract from the
3453	 *    per node list when we run out of per cpu objects. We only fetch
3454	 *    50% to keep some capacity around for frees.
3455	 */
3456	if (!kmem_cache_has_cpu_partial(s))
3457		s->cpu_partial = 0;
3458	else if (s->size >= PAGE_SIZE)
3459		s->cpu_partial = 2;
3460	else if (s->size >= 1024)
3461		s->cpu_partial = 6;
3462	else if (s->size >= 256)
3463		s->cpu_partial = 13;
3464	else
3465		s->cpu_partial = 30;
3466#endif
3467}
3468
3469/*
3470 * calculate_sizes() determines the order and the distribution of data within
3471 * a slab object.
3472 */
3473static int calculate_sizes(struct kmem_cache *s, int forced_order)
3474{
3475	slab_flags_t flags = s->flags;
3476	unsigned int size = s->object_size;
3477	unsigned int order;
3478
3479	/*
3480	 * Round up object size to the next word boundary. We can only
3481	 * place the free pointer at word boundaries and this determines
3482	 * the possible location of the free pointer.
3483	 */
3484	size = ALIGN(size, sizeof(void *));
3485
3486#ifdef CONFIG_SLUB_DEBUG
3487	/*
3488	 * Determine if we can poison the object itself. If the user of
3489	 * the slab may touch the object after free or before allocation
3490	 * then we should never poison the object itself.
3491	 */
3492	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3493			!s->ctor)
3494		s->flags |= __OBJECT_POISON;
3495	else
3496		s->flags &= ~__OBJECT_POISON;
3497
3498
3499	/*
3500	 * If we are Redzoning then check if there is some space between the
3501	 * end of the object and the free pointer. If not then add an
3502	 * additional word to have some bytes to store Redzone information.
3503	 */
3504	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3505		size += sizeof(void *);
3506#endif
3507
3508	/*
3509	 * With that we have determined the number of bytes in actual use
3510	 * by the object. This is the potential offset to the free pointer.
3511	 */
3512	s->inuse = size;
3513
3514	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3515		s->ctor)) {
3516		/*
3517		 * Relocate free pointer after the object if it is not
3518		 * permitted to overwrite the first word of the object on
3519		 * kmem_cache_free.
3520		 *
3521		 * This is the case if we do RCU, have a constructor or
3522		 * destructor or are poisoning the objects.
3523		 */
3524		s->offset = size;
3525		size += sizeof(void *);
3526	}
3527
3528#ifdef CONFIG_SLUB_DEBUG
3529	if (flags & SLAB_STORE_USER)
3530		/*
3531		 * Need to store information about allocs and frees after
3532		 * the object.
3533		 */
3534		size += 2 * sizeof(struct track);
3535#endif
3536
3537	kasan_cache_create(s, &size, &s->flags);
3538#ifdef CONFIG_SLUB_DEBUG
3539	if (flags & SLAB_RED_ZONE) {
3540		/*
3541		 * Add some empty padding so that we can catch
3542		 * overwrites from earlier objects rather than let
3543		 * tracking information or the free pointer be
3544		 * corrupted if a user writes before the start
3545		 * of the object.
3546		 */
3547		size += sizeof(void *);
3548
3549		s->red_left_pad = sizeof(void *);
3550		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3551		size += s->red_left_pad;
3552	}
3553#endif
3554
3555	/*
3556	 * SLUB stores one object immediately after another beginning from
3557	 * offset 0. In order to align the objects we have to simply size
3558	 * each object to conform to the alignment.
3559	 */
3560	size = ALIGN(size, s->align);
3561	s->size = size;
3562	if (forced_order >= 0)
3563		order = forced_order;
3564	else
3565		order = calculate_order(size, s->reserved);
3566
3567	if ((int)order < 0)
3568		return 0;
3569
3570	s->allocflags = 0;
3571	if (order)
3572		s->allocflags |= __GFP_COMP;
3573
3574	if (s->flags & SLAB_CACHE_DMA)
3575		s->allocflags |= GFP_DMA;
3576
 
 
 
3577	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3578		s->allocflags |= __GFP_RECLAIMABLE;
3579
3580	/*
3581	 * Determine the number of objects per slab
3582	 */
3583	s->oo = oo_make(order, size, s->reserved);
3584	s->min = oo_make(get_order(size), size, s->reserved);
3585	if (oo_objects(s->oo) > oo_objects(s->max))
3586		s->max = s->oo;
3587
3588	return !!oo_objects(s->oo);
3589}
3590
3591static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3592{
3593	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3594	s->reserved = 0;
3595#ifdef CONFIG_SLAB_FREELIST_HARDENED
3596	s->random = get_random_long();
3597#endif
3598
3599	if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
3600		s->reserved = sizeof(struct rcu_head);
3601
3602	if (!calculate_sizes(s, -1))
3603		goto error;
3604	if (disable_higher_order_debug) {
3605		/*
3606		 * Disable debugging flags that store metadata if the min slab
3607		 * order increased.
3608		 */
3609		if (get_order(s->size) > get_order(s->object_size)) {
3610			s->flags &= ~DEBUG_METADATA_FLAGS;
3611			s->offset = 0;
3612			if (!calculate_sizes(s, -1))
3613				goto error;
3614		}
3615	}
3616
3617#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3618    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3619	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3620		/* Enable fast mode */
3621		s->flags |= __CMPXCHG_DOUBLE;
3622#endif
3623
3624	/*
3625	 * The larger the object size is, the more pages we want on the partial
3626	 * list to avoid pounding the page allocator excessively.
3627	 */
3628	set_min_partial(s, ilog2(s->size) / 2);
3629
3630	set_cpu_partial(s);
3631
3632#ifdef CONFIG_NUMA
3633	s->remote_node_defrag_ratio = 1000;
3634#endif
3635
3636	/* Initialize the pre-computed randomized freelist if slab is up */
3637	if (slab_state >= UP) {
3638		if (init_cache_random_seq(s))
3639			goto error;
3640	}
3641
3642	if (!init_kmem_cache_nodes(s))
3643		goto error;
3644
3645	if (alloc_kmem_cache_cpus(s))
3646		return 0;
3647
3648	free_kmem_cache_nodes(s);
3649error:
3650	if (flags & SLAB_PANIC)
3651		panic("Cannot create slab %s size=%u realsize=%u order=%u offset=%u flags=%lx\n",
3652		      s->name, s->size, s->size,
3653		      oo_order(s->oo), s->offset, (unsigned long)flags);
3654	return -EINVAL;
3655}
3656
3657static void list_slab_objects(struct kmem_cache *s, struct page *page,
3658							const char *text)
3659{
3660#ifdef CONFIG_SLUB_DEBUG
3661	void *addr = page_address(page);
3662	void *p;
3663	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3664				     sizeof(long), GFP_ATOMIC);
3665	if (!map)
3666		return;
3667	slab_err(s, page, text, s->name);
3668	slab_lock(page);
3669
3670	get_map(s, page, map);
3671	for_each_object(p, s, addr, page->objects) {
3672
3673		if (!test_bit(slab_index(p, s, addr), map)) {
3674			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3675			print_tracking(s, p);
3676		}
3677	}
3678	slab_unlock(page);
3679	kfree(map);
3680#endif
3681}
3682
3683/*
3684 * Attempt to free all partial slabs on a node.
3685 * This is called from __kmem_cache_shutdown(). We must take list_lock
3686 * because sysfs file might still access partial list after the shutdowning.
3687 */
3688static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3689{
3690	LIST_HEAD(discard);
3691	struct page *page, *h;
3692
3693	BUG_ON(irqs_disabled());
3694	spin_lock_irq(&n->list_lock);
3695	list_for_each_entry_safe(page, h, &n->partial, lru) {
3696		if (!page->inuse) {
3697			remove_partial(n, page);
3698			list_add(&page->lru, &discard);
3699		} else {
3700			list_slab_objects(s, page,
3701			"Objects remaining in %s on __kmem_cache_shutdown()");
3702		}
3703	}
3704	spin_unlock_irq(&n->list_lock);
3705
3706	list_for_each_entry_safe(page, h, &discard, lru)
3707		discard_slab(s, page);
3708}
3709
3710bool __kmem_cache_empty(struct kmem_cache *s)
3711{
3712	int node;
3713	struct kmem_cache_node *n;
3714
3715	for_each_kmem_cache_node(s, node, n)
3716		if (n->nr_partial || slabs_node(s, node))
3717			return false;
3718	return true;
3719}
3720
3721/*
3722 * Release all resources used by a slab cache.
3723 */
3724int __kmem_cache_shutdown(struct kmem_cache *s)
3725{
3726	int node;
3727	struct kmem_cache_node *n;
3728
3729	flush_all(s);
3730	/* Attempt to free all objects */
3731	for_each_kmem_cache_node(s, node, n) {
3732		free_partial(s, n);
3733		if (n->nr_partial || slabs_node(s, node))
3734			return 1;
3735	}
3736	sysfs_slab_remove(s);
3737	return 0;
3738}
3739
3740/********************************************************************
3741 *		Kmalloc subsystem
3742 *******************************************************************/
3743
3744static int __init setup_slub_min_order(char *str)
3745{
3746	get_option(&str, (int *)&slub_min_order);
3747
3748	return 1;
3749}
3750
3751__setup("slub_min_order=", setup_slub_min_order);
3752
3753static int __init setup_slub_max_order(char *str)
3754{
3755	get_option(&str, (int *)&slub_max_order);
3756	slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
3757
3758	return 1;
3759}
3760
3761__setup("slub_max_order=", setup_slub_max_order);
3762
3763static int __init setup_slub_min_objects(char *str)
3764{
3765	get_option(&str, (int *)&slub_min_objects);
3766
3767	return 1;
3768}
3769
3770__setup("slub_min_objects=", setup_slub_min_objects);
3771
3772void *__kmalloc(size_t size, gfp_t flags)
3773{
3774	struct kmem_cache *s;
3775	void *ret;
3776
3777	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3778		return kmalloc_large(size, flags);
3779
3780	s = kmalloc_slab(size, flags);
3781
3782	if (unlikely(ZERO_OR_NULL_PTR(s)))
3783		return s;
3784
3785	ret = slab_alloc(s, flags, _RET_IP_);
3786
3787	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3788
3789	kasan_kmalloc(s, ret, size, flags);
3790
3791	return ret;
3792}
3793EXPORT_SYMBOL(__kmalloc);
3794
3795#ifdef CONFIG_NUMA
3796static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3797{
3798	struct page *page;
3799	void *ptr = NULL;
 
3800
3801	flags |= __GFP_COMP;
3802	page = alloc_pages_node(node, flags, get_order(size));
3803	if (page)
3804		ptr = page_address(page);
 
 
 
3805
3806	kmalloc_large_node_hook(ptr, size, flags);
3807	return ptr;
3808}
3809
3810void *__kmalloc_node(size_t size, gfp_t flags, int node)
3811{
3812	struct kmem_cache *s;
3813	void *ret;
3814
3815	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3816		ret = kmalloc_large_node(size, flags, node);
3817
3818		trace_kmalloc_node(_RET_IP_, ret,
3819				   size, PAGE_SIZE << get_order(size),
3820				   flags, node);
3821
3822		return ret;
3823	}
3824
3825	s = kmalloc_slab(size, flags);
3826
3827	if (unlikely(ZERO_OR_NULL_PTR(s)))
3828		return s;
3829
3830	ret = slab_alloc_node(s, flags, node, _RET_IP_);
3831
3832	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3833
3834	kasan_kmalloc(s, ret, size, flags);
3835
3836	return ret;
3837}
3838EXPORT_SYMBOL(__kmalloc_node);
3839#endif
3840
3841#ifdef CONFIG_HARDENED_USERCOPY
3842/*
3843 * Rejects incorrectly sized objects and objects that are to be copied
3844 * to/from userspace but do not fall entirely within the containing slab
3845 * cache's usercopy region.
3846 *
3847 * Returns NULL if check passes, otherwise const char * to name of cache
3848 * to indicate an error.
3849 */
3850void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3851			 bool to_user)
3852{
3853	struct kmem_cache *s;
3854	unsigned int offset;
3855	size_t object_size;
3856
 
 
3857	/* Find object and usable object size. */
3858	s = page->slab_cache;
3859
3860	/* Reject impossible pointers. */
3861	if (ptr < page_address(page))
3862		usercopy_abort("SLUB object not in SLUB page?!", NULL,
3863			       to_user, 0, n);
3864
3865	/* Find offset within object. */
3866	offset = (ptr - page_address(page)) % s->size;
3867
3868	/* Adjust for redzone and reject if within the redzone. */
3869	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3870		if (offset < s->red_left_pad)
3871			usercopy_abort("SLUB object in left red zone",
3872				       s->name, to_user, offset, n);
3873		offset -= s->red_left_pad;
3874	}
3875
3876	/* Allow address range falling entirely within usercopy region. */
3877	if (offset >= s->useroffset &&
3878	    offset - s->useroffset <= s->usersize &&
3879	    n <= s->useroffset - offset + s->usersize)
3880		return;
3881
3882	/*
3883	 * If the copy is still within the allocated object, produce
3884	 * a warning instead of rejecting the copy. This is intended
3885	 * to be a temporary method to find any missing usercopy
3886	 * whitelists.
3887	 */
3888	object_size = slab_ksize(s);
3889	if (usercopy_fallback &&
3890	    offset <= object_size && n <= object_size - offset) {
3891		usercopy_warn("SLUB object", s->name, to_user, offset, n);
3892		return;
3893	}
3894
3895	usercopy_abort("SLUB object", s->name, to_user, offset, n);
3896}
3897#endif /* CONFIG_HARDENED_USERCOPY */
3898
3899static size_t __ksize(const void *object)
3900{
3901	struct page *page;
3902
3903	if (unlikely(object == ZERO_SIZE_PTR))
3904		return 0;
3905
3906	page = virt_to_head_page(object);
3907
3908	if (unlikely(!PageSlab(page))) {
3909		WARN_ON(!PageCompound(page));
3910		return PAGE_SIZE << compound_order(page);
3911	}
3912
3913	return slab_ksize(page->slab_cache);
3914}
3915
3916size_t ksize(const void *object)
3917{
3918	size_t size = __ksize(object);
3919	/* We assume that ksize callers could use whole allocated area,
3920	 * so we need to unpoison this area.
3921	 */
3922	kasan_unpoison_shadow(object, size);
3923	return size;
3924}
3925EXPORT_SYMBOL(ksize);
3926
3927void kfree(const void *x)
3928{
3929	struct page *page;
3930	void *object = (void *)x;
3931
3932	trace_kfree(_RET_IP_, x);
3933
3934	if (unlikely(ZERO_OR_NULL_PTR(x)))
3935		return;
3936
3937	page = virt_to_head_page(x);
3938	if (unlikely(!PageSlab(page))) {
 
 
3939		BUG_ON(!PageCompound(page));
3940		kfree_hook(object);
3941		__free_pages(page, compound_order(page));
 
 
3942		return;
3943	}
3944	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3945}
3946EXPORT_SYMBOL(kfree);
3947
3948#define SHRINK_PROMOTE_MAX 32
3949
3950/*
3951 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3952 * up most to the head of the partial lists. New allocations will then
3953 * fill those up and thus they can be removed from the partial lists.
3954 *
3955 * The slabs with the least items are placed last. This results in them
3956 * being allocated from last increasing the chance that the last objects
3957 * are freed in them.
3958 */
3959int __kmem_cache_shrink(struct kmem_cache *s)
3960{
3961	int node;
3962	int i;
3963	struct kmem_cache_node *n;
3964	struct page *page;
3965	struct page *t;
3966	struct list_head discard;
3967	struct list_head promote[SHRINK_PROMOTE_MAX];
3968	unsigned long flags;
3969	int ret = 0;
3970
3971	flush_all(s);
3972	for_each_kmem_cache_node(s, node, n) {
3973		INIT_LIST_HEAD(&discard);
3974		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3975			INIT_LIST_HEAD(promote + i);
3976
3977		spin_lock_irqsave(&n->list_lock, flags);
3978
3979		/*
3980		 * Build lists of slabs to discard or promote.
3981		 *
3982		 * Note that concurrent frees may occur while we hold the
3983		 * list_lock. page->inuse here is the upper limit.
3984		 */
3985		list_for_each_entry_safe(page, t, &n->partial, lru) {
3986			int free = page->objects - page->inuse;
3987
3988			/* Do not reread page->inuse */
3989			barrier();
3990
3991			/* We do not keep full slabs on the list */
3992			BUG_ON(free <= 0);
3993
3994			if (free == page->objects) {
3995				list_move(&page->lru, &discard);
3996				n->nr_partial--;
3997			} else if (free <= SHRINK_PROMOTE_MAX)
3998				list_move(&page->lru, promote + free - 1);
3999		}
4000
4001		/*
4002		 * Promote the slabs filled up most to the head of the
4003		 * partial list.
4004		 */
4005		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4006			list_splice(promote + i, &n->partial);
4007
4008		spin_unlock_irqrestore(&n->list_lock, flags);
4009
4010		/* Release empty slabs */
4011		list_for_each_entry_safe(page, t, &discard, lru)
4012			discard_slab(s, page);
4013
4014		if (slabs_node(s, node))
4015			ret = 1;
4016	}
4017
4018	return ret;
4019}
4020
4021#ifdef CONFIG_MEMCG
4022static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
4023{
4024	/*
4025	 * Called with all the locks held after a sched RCU grace period.
4026	 * Even if @s becomes empty after shrinking, we can't know that @s
4027	 * doesn't have allocations already in-flight and thus can't
4028	 * destroy @s until the associated memcg is released.
4029	 *
4030	 * However, let's remove the sysfs files for empty caches here.
4031	 * Each cache has a lot of interface files which aren't
4032	 * particularly useful for empty draining caches; otherwise, we can
4033	 * easily end up with millions of unnecessary sysfs files on
4034	 * systems which have a lot of memory and transient cgroups.
4035	 */
4036	if (!__kmem_cache_shrink(s))
4037		sysfs_slab_remove(s);
4038}
4039
4040void __kmemcg_cache_deactivate(struct kmem_cache *s)
4041{
4042	/*
4043	 * Disable empty slabs caching. Used to avoid pinning offline
4044	 * memory cgroups by kmem pages that can be freed.
4045	 */
4046	slub_set_cpu_partial(s, 0);
4047	s->min_partial = 0;
4048
4049	/*
4050	 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
4051	 * we have to make sure the change is visible before shrinking.
4052	 */
4053	slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
4054}
4055#endif
4056
4057static int slab_mem_going_offline_callback(void *arg)
4058{
4059	struct kmem_cache *s;
4060
4061	mutex_lock(&slab_mutex);
4062	list_for_each_entry(s, &slab_caches, list)
4063		__kmem_cache_shrink(s);
4064	mutex_unlock(&slab_mutex);
4065
4066	return 0;
4067}
4068
4069static void slab_mem_offline_callback(void *arg)
4070{
4071	struct kmem_cache_node *n;
4072	struct kmem_cache *s;
4073	struct memory_notify *marg = arg;
4074	int offline_node;
4075
4076	offline_node = marg->status_change_nid_normal;
4077
4078	/*
4079	 * If the node still has available memory. we need kmem_cache_node
4080	 * for it yet.
4081	 */
4082	if (offline_node < 0)
4083		return;
4084
4085	mutex_lock(&slab_mutex);
4086	list_for_each_entry(s, &slab_caches, list) {
4087		n = get_node(s, offline_node);
4088		if (n) {
4089			/*
4090			 * if n->nr_slabs > 0, slabs still exist on the node
4091			 * that is going down. We were unable to free them,
4092			 * and offline_pages() function shouldn't call this
4093			 * callback. So, we must fail.
4094			 */
4095			BUG_ON(slabs_node(s, offline_node));
4096
4097			s->node[offline_node] = NULL;
4098			kmem_cache_free(kmem_cache_node, n);
4099		}
4100	}
4101	mutex_unlock(&slab_mutex);
4102}
4103
4104static int slab_mem_going_online_callback(void *arg)
4105{
4106	struct kmem_cache_node *n;
4107	struct kmem_cache *s;
4108	struct memory_notify *marg = arg;
4109	int nid = marg->status_change_nid_normal;
4110	int ret = 0;
4111
4112	/*
4113	 * If the node's memory is already available, then kmem_cache_node is
4114	 * already created. Nothing to do.
4115	 */
4116	if (nid < 0)
4117		return 0;
4118
4119	/*
4120	 * We are bringing a node online. No memory is available yet. We must
4121	 * allocate a kmem_cache_node structure in order to bring the node
4122	 * online.
4123	 */
4124	mutex_lock(&slab_mutex);
4125	list_for_each_entry(s, &slab_caches, list) {
4126		/*
4127		 * XXX: kmem_cache_alloc_node will fallback to other nodes
4128		 *      since memory is not yet available from the node that
4129		 *      is brought up.
4130		 */
4131		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4132		if (!n) {
4133			ret = -ENOMEM;
4134			goto out;
4135		}
4136		init_kmem_cache_node(n);
4137		s->node[nid] = n;
4138	}
4139out:
4140	mutex_unlock(&slab_mutex);
4141	return ret;
4142}
4143
4144static int slab_memory_callback(struct notifier_block *self,
4145				unsigned long action, void *arg)
4146{
4147	int ret = 0;
4148
4149	switch (action) {
4150	case MEM_GOING_ONLINE:
4151		ret = slab_mem_going_online_callback(arg);
4152		break;
4153	case MEM_GOING_OFFLINE:
4154		ret = slab_mem_going_offline_callback(arg);
4155		break;
4156	case MEM_OFFLINE:
4157	case MEM_CANCEL_ONLINE:
4158		slab_mem_offline_callback(arg);
4159		break;
4160	case MEM_ONLINE:
4161	case MEM_CANCEL_OFFLINE:
4162		break;
4163	}
4164	if (ret)
4165		ret = notifier_from_errno(ret);
4166	else
4167		ret = NOTIFY_OK;
4168	return ret;
4169}
4170
4171static struct notifier_block slab_memory_callback_nb = {
4172	.notifier_call = slab_memory_callback,
4173	.priority = SLAB_CALLBACK_PRI,
4174};
4175
4176/********************************************************************
4177 *			Basic setup of slabs
4178 *******************************************************************/
4179
4180/*
4181 * Used for early kmem_cache structures that were allocated using
4182 * the page allocator. Allocate them properly then fix up the pointers
4183 * that may be pointing to the wrong kmem_cache structure.
4184 */
4185
4186static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4187{
4188	int node;
4189	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4190	struct kmem_cache_node *n;
4191
4192	memcpy(s, static_cache, kmem_cache->object_size);
4193
4194	/*
4195	 * This runs very early, and only the boot processor is supposed to be
4196	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4197	 * IPIs around.
4198	 */
4199	__flush_cpu_slab(s, smp_processor_id());
4200	for_each_kmem_cache_node(s, node, n) {
4201		struct page *p;
4202
4203		list_for_each_entry(p, &n->partial, lru)
4204			p->slab_cache = s;
4205
4206#ifdef CONFIG_SLUB_DEBUG
4207		list_for_each_entry(p, &n->full, lru)
4208			p->slab_cache = s;
4209#endif
4210	}
4211	slab_init_memcg_params(s);
4212	list_add(&s->list, &slab_caches);
4213	memcg_link_cache(s);
4214	return s;
4215}
4216
4217void __init kmem_cache_init(void)
4218{
4219	static __initdata struct kmem_cache boot_kmem_cache,
4220		boot_kmem_cache_node;
4221
4222	if (debug_guardpage_minorder())
4223		slub_max_order = 0;
4224
4225	kmem_cache_node = &boot_kmem_cache_node;
4226	kmem_cache = &boot_kmem_cache;
4227
4228	create_boot_cache(kmem_cache_node, "kmem_cache_node",
4229		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4230
4231	register_hotmemory_notifier(&slab_memory_callback_nb);
4232
4233	/* Able to allocate the per node structures */
4234	slab_state = PARTIAL;
4235
4236	create_boot_cache(kmem_cache, "kmem_cache",
4237			offsetof(struct kmem_cache, node) +
4238				nr_node_ids * sizeof(struct kmem_cache_node *),
4239		       SLAB_HWCACHE_ALIGN, 0, 0);
4240
4241	kmem_cache = bootstrap(&boot_kmem_cache);
4242
4243	/*
4244	 * Allocate kmem_cache_node properly from the kmem_cache slab.
4245	 * kmem_cache_node is separately allocated so no need to
4246	 * update any list pointers.
4247	 */
4248	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4249
4250	/* Now we can use the kmem_cache to allocate kmalloc slabs */
4251	setup_kmalloc_cache_index_table();
4252	create_kmalloc_caches(0);
4253
4254	/* Setup random freelists for each cache */
4255	init_freelist_randomization();
4256
4257	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4258				  slub_cpu_dead);
4259
4260	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
4261		cache_line_size(),
4262		slub_min_order, slub_max_order, slub_min_objects,
4263		nr_cpu_ids, nr_node_ids);
4264}
4265
4266void __init kmem_cache_init_late(void)
4267{
4268}
4269
4270struct kmem_cache *
4271__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4272		   slab_flags_t flags, void (*ctor)(void *))
4273{
4274	struct kmem_cache *s, *c;
4275
4276	s = find_mergeable(size, align, flags, name, ctor);
4277	if (s) {
4278		s->refcount++;
4279
4280		/*
4281		 * Adjust the object sizes so that we clear
4282		 * the complete object on kzalloc.
4283		 */
4284		s->object_size = max(s->object_size, size);
4285		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4286
4287		for_each_memcg_cache(c, s) {
4288			c->object_size = s->object_size;
4289			c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
4290		}
4291
4292		if (sysfs_slab_alias(s, name)) {
4293			s->refcount--;
4294			s = NULL;
4295		}
4296	}
4297
4298	return s;
4299}
4300
4301int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4302{
4303	int err;
4304
4305	err = kmem_cache_open(s, flags);
4306	if (err)
4307		return err;
4308
4309	/* Mutex is not taken during early boot */
4310	if (slab_state <= UP)
4311		return 0;
4312
4313	memcg_propagate_slab_attrs(s);
4314	err = sysfs_slab_add(s);
4315	if (err)
4316		__kmem_cache_release(s);
4317
4318	return err;
4319}
4320
4321void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4322{
4323	struct kmem_cache *s;
4324	void *ret;
4325
4326	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4327		return kmalloc_large(size, gfpflags);
4328
4329	s = kmalloc_slab(size, gfpflags);
4330
4331	if (unlikely(ZERO_OR_NULL_PTR(s)))
4332		return s;
4333
4334	ret = slab_alloc(s, gfpflags, caller);
4335
4336	/* Honor the call site pointer we received. */
4337	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4338
4339	return ret;
4340}
4341
4342#ifdef CONFIG_NUMA
4343void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4344					int node, unsigned long caller)
4345{
4346	struct kmem_cache *s;
4347	void *ret;
4348
4349	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4350		ret = kmalloc_large_node(size, gfpflags, node);
4351
4352		trace_kmalloc_node(caller, ret,
4353				   size, PAGE_SIZE << get_order(size),
4354				   gfpflags, node);
4355
4356		return ret;
4357	}
4358
4359	s = kmalloc_slab(size, gfpflags);
4360
4361	if (unlikely(ZERO_OR_NULL_PTR(s)))
4362		return s;
4363
4364	ret = slab_alloc_node(s, gfpflags, node, caller);
4365
4366	/* Honor the call site pointer we received. */
4367	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4368
4369	return ret;
4370}
4371#endif
4372
4373#ifdef CONFIG_SYSFS
4374static int count_inuse(struct page *page)
4375{
4376	return page->inuse;
4377}
4378
4379static int count_total(struct page *page)
4380{
4381	return page->objects;
4382}
4383#endif
4384
4385#ifdef CONFIG_SLUB_DEBUG
4386static int validate_slab(struct kmem_cache *s, struct page *page,
4387						unsigned long *map)
4388{
4389	void *p;
4390	void *addr = page_address(page);
4391
4392	if (!check_slab(s, page) ||
4393			!on_freelist(s, page, NULL))
4394		return 0;
4395
4396	/* Now we know that a valid freelist exists */
4397	bitmap_zero(map, page->objects);
4398
4399	get_map(s, page, map);
4400	for_each_object(p, s, addr, page->objects) {
4401		if (test_bit(slab_index(p, s, addr), map))
4402			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4403				return 0;
4404	}
4405
4406	for_each_object(p, s, addr, page->objects)
4407		if (!test_bit(slab_index(p, s, addr), map))
4408			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4409				return 0;
4410	return 1;
4411}
4412
4413static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4414						unsigned long *map)
4415{
4416	slab_lock(page);
4417	validate_slab(s, page, map);
4418	slab_unlock(page);
4419}
4420
4421static int validate_slab_node(struct kmem_cache *s,
4422		struct kmem_cache_node *n, unsigned long *map)
4423{
4424	unsigned long count = 0;
4425	struct page *page;
4426	unsigned long flags;
4427
4428	spin_lock_irqsave(&n->list_lock, flags);
4429
4430	list_for_each_entry(page, &n->partial, lru) {
4431		validate_slab_slab(s, page, map);
4432		count++;
4433	}
4434	if (count != n->nr_partial)
4435		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4436		       s->name, count, n->nr_partial);
4437
4438	if (!(s->flags & SLAB_STORE_USER))
4439		goto out;
4440
4441	list_for_each_entry(page, &n->full, lru) {
4442		validate_slab_slab(s, page, map);
4443		count++;
4444	}
4445	if (count != atomic_long_read(&n->nr_slabs))
4446		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4447		       s->name, count, atomic_long_read(&n->nr_slabs));
4448
4449out:
4450	spin_unlock_irqrestore(&n->list_lock, flags);
4451	return count;
4452}
4453
4454static long validate_slab_cache(struct kmem_cache *s)
4455{
4456	int node;
4457	unsigned long count = 0;
4458	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4459				sizeof(unsigned long), GFP_KERNEL);
4460	struct kmem_cache_node *n;
 
4461
4462	if (!map)
4463		return -ENOMEM;
4464
4465	flush_all(s);
4466	for_each_kmem_cache_node(s, node, n)
4467		count += validate_slab_node(s, n, map);
4468	kfree(map);
4469	return count;
4470}
4471/*
4472 * Generate lists of code addresses where slabcache objects are allocated
4473 * and freed.
4474 */
4475
4476struct location {
4477	unsigned long count;
4478	unsigned long addr;
4479	long long sum_time;
4480	long min_time;
4481	long max_time;
4482	long min_pid;
4483	long max_pid;
4484	DECLARE_BITMAP(cpus, NR_CPUS);
4485	nodemask_t nodes;
4486};
4487
4488struct loc_track {
4489	unsigned long max;
4490	unsigned long count;
4491	struct location *loc;
4492};
4493
4494static void free_loc_track(struct loc_track *t)
4495{
4496	if (t->max)
4497		free_pages((unsigned long)t->loc,
4498			get_order(sizeof(struct location) * t->max));
4499}
4500
4501static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4502{
4503	struct location *l;
4504	int order;
4505
4506	order = get_order(sizeof(struct location) * max);
4507
4508	l = (void *)__get_free_pages(flags, order);
4509	if (!l)
4510		return 0;
4511
4512	if (t->count) {
4513		memcpy(l, t->loc, sizeof(struct location) * t->count);
4514		free_loc_track(t);
4515	}
4516	t->max = max;
4517	t->loc = l;
4518	return 1;
4519}
4520
4521static int add_location(struct loc_track *t, struct kmem_cache *s,
4522				const struct track *track)
4523{
4524	long start, end, pos;
4525	struct location *l;
4526	unsigned long caddr;
4527	unsigned long age = jiffies - track->when;
4528
4529	start = -1;
4530	end = t->count;
4531
4532	for ( ; ; ) {
4533		pos = start + (end - start + 1) / 2;
4534
4535		/*
4536		 * There is nothing at "end". If we end up there
4537		 * we need to add something to before end.
4538		 */
4539		if (pos == end)
4540			break;
4541
4542		caddr = t->loc[pos].addr;
4543		if (track->addr == caddr) {
4544
4545			l = &t->loc[pos];
4546			l->count++;
4547			if (track->when) {
4548				l->sum_time += age;
4549				if (age < l->min_time)
4550					l->min_time = age;
4551				if (age > l->max_time)
4552					l->max_time = age;
4553
4554				if (track->pid < l->min_pid)
4555					l->min_pid = track->pid;
4556				if (track->pid > l->max_pid)
4557					l->max_pid = track->pid;
4558
4559				cpumask_set_cpu(track->cpu,
4560						to_cpumask(l->cpus));
4561			}
4562			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4563			return 1;
4564		}
4565
4566		if (track->addr < caddr)
4567			end = pos;
4568		else
4569			start = pos;
4570	}
4571
4572	/*
4573	 * Not found. Insert new tracking element.
4574	 */
4575	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4576		return 0;
4577
4578	l = t->loc + pos;
4579	if (pos < t->count)
4580		memmove(l + 1, l,
4581			(t->count - pos) * sizeof(struct location));
4582	t->count++;
4583	l->count = 1;
4584	l->addr = track->addr;
4585	l->sum_time = age;
4586	l->min_time = age;
4587	l->max_time = age;
4588	l->min_pid = track->pid;
4589	l->max_pid = track->pid;
4590	cpumask_clear(to_cpumask(l->cpus));
4591	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4592	nodes_clear(l->nodes);
4593	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4594	return 1;
4595}
4596
4597static void process_slab(struct loc_track *t, struct kmem_cache *s,
4598		struct page *page, enum track_item alloc,
4599		unsigned long *map)
4600{
4601	void *addr = page_address(page);
4602	void *p;
4603
4604	bitmap_zero(map, page->objects);
4605	get_map(s, page, map);
4606
4607	for_each_object(p, s, addr, page->objects)
4608		if (!test_bit(slab_index(p, s, addr), map))
4609			add_location(t, s, get_track(s, p, alloc));
4610}
4611
4612static int list_locations(struct kmem_cache *s, char *buf,
4613					enum track_item alloc)
4614{
4615	int len = 0;
4616	unsigned long i;
4617	struct loc_track t = { 0, 0, NULL };
4618	int node;
4619	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4620				     sizeof(unsigned long), GFP_KERNEL);
4621	struct kmem_cache_node *n;
 
4622
4623	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4624				     GFP_KERNEL)) {
4625		kfree(map);
4626		return sprintf(buf, "Out of memory\n");
4627	}
4628	/* Push back cpu slabs */
4629	flush_all(s);
4630
4631	for_each_kmem_cache_node(s, node, n) {
4632		unsigned long flags;
4633		struct page *page;
4634
4635		if (!atomic_long_read(&n->nr_slabs))
4636			continue;
4637
4638		spin_lock_irqsave(&n->list_lock, flags);
4639		list_for_each_entry(page, &n->partial, lru)
4640			process_slab(&t, s, page, alloc, map);
4641		list_for_each_entry(page, &n->full, lru)
4642			process_slab(&t, s, page, alloc, map);
4643		spin_unlock_irqrestore(&n->list_lock, flags);
4644	}
4645
4646	for (i = 0; i < t.count; i++) {
4647		struct location *l = &t.loc[i];
4648
4649		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4650			break;
4651		len += sprintf(buf + len, "%7ld ", l->count);
4652
4653		if (l->addr)
4654			len += sprintf(buf + len, "%pS", (void *)l->addr);
4655		else
4656			len += sprintf(buf + len, "<not-available>");
4657
4658		if (l->sum_time != l->min_time) {
4659			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4660				l->min_time,
4661				(long)div_u64(l->sum_time, l->count),
4662				l->max_time);
4663		} else
4664			len += sprintf(buf + len, " age=%ld",
4665				l->min_time);
4666
4667		if (l->min_pid != l->max_pid)
4668			len += sprintf(buf + len, " pid=%ld-%ld",
4669				l->min_pid, l->max_pid);
4670		else
4671			len += sprintf(buf + len, " pid=%ld",
4672				l->min_pid);
4673
4674		if (num_online_cpus() > 1 &&
4675				!cpumask_empty(to_cpumask(l->cpus)) &&
4676				len < PAGE_SIZE - 60)
4677			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4678					 " cpus=%*pbl",
4679					 cpumask_pr_args(to_cpumask(l->cpus)));
4680
4681		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4682				len < PAGE_SIZE - 60)
4683			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4684					 " nodes=%*pbl",
4685					 nodemask_pr_args(&l->nodes));
4686
4687		len += sprintf(buf + len, "\n");
4688	}
4689
4690	free_loc_track(&t);
4691	kfree(map);
4692	if (!t.count)
4693		len += sprintf(buf, "No data\n");
4694	return len;
4695}
4696#endif
4697
4698#ifdef SLUB_RESILIENCY_TEST
4699static void __init resiliency_test(void)
4700{
4701	u8 *p;
 
4702
4703	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4704
4705	pr_err("SLUB resiliency testing\n");
4706	pr_err("-----------------------\n");
4707	pr_err("A. Corruption after allocation\n");
4708
4709	p = kzalloc(16, GFP_KERNEL);
4710	p[16] = 0x12;
4711	pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4712	       p + 16);
4713
4714	validate_slab_cache(kmalloc_caches[4]);
4715
4716	/* Hmmm... The next two are dangerous */
4717	p = kzalloc(32, GFP_KERNEL);
4718	p[32 + sizeof(void *)] = 0x34;
4719	pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4720	       p);
4721	pr_err("If allocated object is overwritten then not detectable\n\n");
4722
4723	validate_slab_cache(kmalloc_caches[5]);
4724	p = kzalloc(64, GFP_KERNEL);
4725	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4726	*p = 0x56;
4727	pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4728	       p);
4729	pr_err("If allocated object is overwritten then not detectable\n\n");
4730	validate_slab_cache(kmalloc_caches[6]);
4731
4732	pr_err("\nB. Corruption after free\n");
4733	p = kzalloc(128, GFP_KERNEL);
4734	kfree(p);
4735	*p = 0x78;
4736	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4737	validate_slab_cache(kmalloc_caches[7]);
4738
4739	p = kzalloc(256, GFP_KERNEL);
4740	kfree(p);
4741	p[50] = 0x9a;
4742	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4743	validate_slab_cache(kmalloc_caches[8]);
4744
4745	p = kzalloc(512, GFP_KERNEL);
4746	kfree(p);
4747	p[512] = 0xab;
4748	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4749	validate_slab_cache(kmalloc_caches[9]);
4750}
4751#else
4752#ifdef CONFIG_SYSFS
4753static void resiliency_test(void) {};
4754#endif
4755#endif
4756
4757#ifdef CONFIG_SYSFS
4758enum slab_stat_type {
4759	SL_ALL,			/* All slabs */
4760	SL_PARTIAL,		/* Only partially allocated slabs */
4761	SL_CPU,			/* Only slabs used for cpu caches */
4762	SL_OBJECTS,		/* Determine allocated objects not slabs */
4763	SL_TOTAL		/* Determine object capacity not slabs */
4764};
4765
4766#define SO_ALL		(1 << SL_ALL)
4767#define SO_PARTIAL	(1 << SL_PARTIAL)
4768#define SO_CPU		(1 << SL_CPU)
4769#define SO_OBJECTS	(1 << SL_OBJECTS)
4770#define SO_TOTAL	(1 << SL_TOTAL)
4771
4772#ifdef CONFIG_MEMCG
4773static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4774
4775static int __init setup_slub_memcg_sysfs(char *str)
4776{
4777	int v;
4778
4779	if (get_option(&str, &v) > 0)
4780		memcg_sysfs_enabled = v;
4781
4782	return 1;
4783}
4784
4785__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4786#endif
4787
4788static ssize_t show_slab_objects(struct kmem_cache *s,
4789			    char *buf, unsigned long flags)
4790{
4791	unsigned long total = 0;
4792	int node;
4793	int x;
4794	unsigned long *nodes;
4795
4796	nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4797	if (!nodes)
4798		return -ENOMEM;
4799
4800	if (flags & SO_CPU) {
4801		int cpu;
4802
4803		for_each_possible_cpu(cpu) {
4804			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4805							       cpu);
4806			int node;
4807			struct page *page;
4808
4809			page = READ_ONCE(c->page);
4810			if (!page)
4811				continue;
4812
4813			node = page_to_nid(page);
4814			if (flags & SO_TOTAL)
4815				x = page->objects;
4816			else if (flags & SO_OBJECTS)
4817				x = page->inuse;
4818			else
4819				x = 1;
4820
4821			total += x;
4822			nodes[node] += x;
4823
4824			page = slub_percpu_partial_read_once(c);
4825			if (page) {
4826				node = page_to_nid(page);
4827				if (flags & SO_TOTAL)
4828					WARN_ON_ONCE(1);
4829				else if (flags & SO_OBJECTS)
4830					WARN_ON_ONCE(1);
4831				else
4832					x = page->pages;
4833				total += x;
4834				nodes[node] += x;
4835			}
4836		}
4837	}
4838
4839	get_online_mems();
 
 
 
 
 
 
 
 
 
 
4840#ifdef CONFIG_SLUB_DEBUG
4841	if (flags & SO_ALL) {
4842		struct kmem_cache_node *n;
4843
4844		for_each_kmem_cache_node(s, node, n) {
4845
4846			if (flags & SO_TOTAL)
4847				x = atomic_long_read(&n->total_objects);
4848			else if (flags & SO_OBJECTS)
4849				x = atomic_long_read(&n->total_objects) -
4850					count_partial(n, count_free);
4851			else
4852				x = atomic_long_read(&n->nr_slabs);
4853			total += x;
4854			nodes[node] += x;
4855		}
4856
4857	} else
4858#endif
4859	if (flags & SO_PARTIAL) {
4860		struct kmem_cache_node *n;
4861
4862		for_each_kmem_cache_node(s, node, n) {
4863			if (flags & SO_TOTAL)
4864				x = count_partial(n, count_total);
4865			else if (flags & SO_OBJECTS)
4866				x = count_partial(n, count_inuse);
4867			else
4868				x = n->nr_partial;
4869			total += x;
4870			nodes[node] += x;
4871		}
4872	}
4873	x = sprintf(buf, "%lu", total);
4874#ifdef CONFIG_NUMA
4875	for (node = 0; node < nr_node_ids; node++)
4876		if (nodes[node])
4877			x += sprintf(buf + x, " N%d=%lu",
4878					node, nodes[node]);
4879#endif
4880	put_online_mems();
4881	kfree(nodes);
4882	return x + sprintf(buf + x, "\n");
4883}
4884
4885#ifdef CONFIG_SLUB_DEBUG
4886static int any_slab_objects(struct kmem_cache *s)
4887{
4888	int node;
4889	struct kmem_cache_node *n;
4890
4891	for_each_kmem_cache_node(s, node, n)
4892		if (atomic_long_read(&n->total_objects))
4893			return 1;
4894
4895	return 0;
4896}
4897#endif
4898
4899#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4900#define to_slab(n) container_of(n, struct kmem_cache, kobj)
4901
4902struct slab_attribute {
4903	struct attribute attr;
4904	ssize_t (*show)(struct kmem_cache *s, char *buf);
4905	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4906};
4907
4908#define SLAB_ATTR_RO(_name) \
4909	static struct slab_attribute _name##_attr = \
4910	__ATTR(_name, 0400, _name##_show, NULL)
4911
4912#define SLAB_ATTR(_name) \
4913	static struct slab_attribute _name##_attr =  \
4914	__ATTR(_name, 0600, _name##_show, _name##_store)
4915
4916static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4917{
4918	return sprintf(buf, "%u\n", s->size);
4919}
4920SLAB_ATTR_RO(slab_size);
4921
4922static ssize_t align_show(struct kmem_cache *s, char *buf)
4923{
4924	return sprintf(buf, "%u\n", s->align);
4925}
4926SLAB_ATTR_RO(align);
4927
4928static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4929{
4930	return sprintf(buf, "%u\n", s->object_size);
4931}
4932SLAB_ATTR_RO(object_size);
4933
4934static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4935{
4936	return sprintf(buf, "%u\n", oo_objects(s->oo));
4937}
4938SLAB_ATTR_RO(objs_per_slab);
4939
4940static ssize_t order_store(struct kmem_cache *s,
4941				const char *buf, size_t length)
4942{
4943	unsigned int order;
4944	int err;
4945
4946	err = kstrtouint(buf, 10, &order);
4947	if (err)
4948		return err;
4949
4950	if (order > slub_max_order || order < slub_min_order)
4951		return -EINVAL;
4952
4953	calculate_sizes(s, order);
4954	return length;
4955}
4956
4957static ssize_t order_show(struct kmem_cache *s, char *buf)
4958{
4959	return sprintf(buf, "%u\n", oo_order(s->oo));
4960}
4961SLAB_ATTR(order);
4962
4963static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4964{
4965	return sprintf(buf, "%lu\n", s->min_partial);
4966}
4967
4968static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4969				 size_t length)
4970{
4971	unsigned long min;
4972	int err;
4973
4974	err = kstrtoul(buf, 10, &min);
4975	if (err)
4976		return err;
4977
4978	set_min_partial(s, min);
4979	return length;
4980}
4981SLAB_ATTR(min_partial);
4982
4983static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4984{
4985	return sprintf(buf, "%u\n", slub_cpu_partial(s));
4986}
4987
4988static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4989				 size_t length)
4990{
4991	unsigned int objects;
4992	int err;
4993
4994	err = kstrtouint(buf, 10, &objects);
4995	if (err)
4996		return err;
4997	if (objects && !kmem_cache_has_cpu_partial(s))
4998		return -EINVAL;
4999
5000	slub_set_cpu_partial(s, objects);
5001	flush_all(s);
5002	return length;
5003}
5004SLAB_ATTR(cpu_partial);
5005
5006static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5007{
5008	if (!s->ctor)
5009		return 0;
5010	return sprintf(buf, "%pS\n", s->ctor);
5011}
5012SLAB_ATTR_RO(ctor);
5013
5014static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5015{
5016	return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5017}
5018SLAB_ATTR_RO(aliases);
5019
5020static ssize_t partial_show(struct kmem_cache *s, char *buf)
5021{
5022	return show_slab_objects(s, buf, SO_PARTIAL);
5023}
5024SLAB_ATTR_RO(partial);
5025
5026static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5027{
5028	return show_slab_objects(s, buf, SO_CPU);
5029}
5030SLAB_ATTR_RO(cpu_slabs);
5031
5032static ssize_t objects_show(struct kmem_cache *s, char *buf)
5033{
5034	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5035}
5036SLAB_ATTR_RO(objects);
5037
5038static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5039{
5040	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5041}
5042SLAB_ATTR_RO(objects_partial);
5043
5044static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5045{
5046	int objects = 0;
5047	int pages = 0;
5048	int cpu;
5049	int len;
5050
5051	for_each_online_cpu(cpu) {
5052		struct page *page;
5053
5054		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5055
5056		if (page) {
5057			pages += page->pages;
5058			objects += page->pobjects;
5059		}
5060	}
5061
5062	len = sprintf(buf, "%d(%d)", objects, pages);
5063
5064#ifdef CONFIG_SMP
5065	for_each_online_cpu(cpu) {
5066		struct page *page;
5067
5068		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5069
5070		if (page && len < PAGE_SIZE - 20)
5071			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5072				page->pobjects, page->pages);
5073	}
5074#endif
5075	return len + sprintf(buf + len, "\n");
5076}
5077SLAB_ATTR_RO(slabs_cpu_partial);
5078
5079static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5080{
5081	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5082}
5083
5084static ssize_t reclaim_account_store(struct kmem_cache *s,
5085				const char *buf, size_t length)
5086{
5087	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5088	if (buf[0] == '1')
5089		s->flags |= SLAB_RECLAIM_ACCOUNT;
5090	return length;
5091}
5092SLAB_ATTR(reclaim_account);
5093
5094static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5095{
5096	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5097}
5098SLAB_ATTR_RO(hwcache_align);
5099
5100#ifdef CONFIG_ZONE_DMA
5101static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5102{
5103	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5104}
5105SLAB_ATTR_RO(cache_dma);
5106#endif
5107
5108static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5109{
5110	return sprintf(buf, "%u\n", s->usersize);
5111}
5112SLAB_ATTR_RO(usersize);
5113
5114static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5115{
5116	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5117}
5118SLAB_ATTR_RO(destroy_by_rcu);
5119
5120static ssize_t reserved_show(struct kmem_cache *s, char *buf)
5121{
5122	return sprintf(buf, "%u\n", s->reserved);
5123}
5124SLAB_ATTR_RO(reserved);
5125
5126#ifdef CONFIG_SLUB_DEBUG
5127static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5128{
5129	return show_slab_objects(s, buf, SO_ALL);
5130}
5131SLAB_ATTR_RO(slabs);
5132
5133static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5134{
5135	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5136}
5137SLAB_ATTR_RO(total_objects);
5138
5139static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5140{
5141	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5142}
5143
5144static ssize_t sanity_checks_store(struct kmem_cache *s,
5145				const char *buf, size_t length)
5146{
5147	s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5148	if (buf[0] == '1') {
5149		s->flags &= ~__CMPXCHG_DOUBLE;
5150		s->flags |= SLAB_CONSISTENCY_CHECKS;
5151	}
5152	return length;
5153}
5154SLAB_ATTR(sanity_checks);
5155
5156static ssize_t trace_show(struct kmem_cache *s, char *buf)
5157{
5158	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5159}
5160
5161static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5162							size_t length)
5163{
5164	/*
5165	 * Tracing a merged cache is going to give confusing results
5166	 * as well as cause other issues like converting a mergeable
5167	 * cache into an umergeable one.
5168	 */
5169	if (s->refcount > 1)
5170		return -EINVAL;
5171
5172	s->flags &= ~SLAB_TRACE;
5173	if (buf[0] == '1') {
5174		s->flags &= ~__CMPXCHG_DOUBLE;
5175		s->flags |= SLAB_TRACE;
5176	}
5177	return length;
5178}
5179SLAB_ATTR(trace);
5180
5181static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5182{
5183	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5184}
5185
5186static ssize_t red_zone_store(struct kmem_cache *s,
5187				const char *buf, size_t length)
5188{
5189	if (any_slab_objects(s))
5190		return -EBUSY;
5191
5192	s->flags &= ~SLAB_RED_ZONE;
5193	if (buf[0] == '1') {
5194		s->flags |= SLAB_RED_ZONE;
5195	}
5196	calculate_sizes(s, -1);
5197	return length;
5198}
5199SLAB_ATTR(red_zone);
5200
5201static ssize_t poison_show(struct kmem_cache *s, char *buf)
5202{
5203	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5204}
5205
5206static ssize_t poison_store(struct kmem_cache *s,
5207				const char *buf, size_t length)
5208{
5209	if (any_slab_objects(s))
5210		return -EBUSY;
5211
5212	s->flags &= ~SLAB_POISON;
5213	if (buf[0] == '1') {
5214		s->flags |= SLAB_POISON;
5215	}
5216	calculate_sizes(s, -1);
5217	return length;
5218}
5219SLAB_ATTR(poison);
5220
5221static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5222{
5223	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5224}
5225
5226static ssize_t store_user_store(struct kmem_cache *s,
5227				const char *buf, size_t length)
5228{
5229	if (any_slab_objects(s))
5230		return -EBUSY;
5231
5232	s->flags &= ~SLAB_STORE_USER;
5233	if (buf[0] == '1') {
5234		s->flags &= ~__CMPXCHG_DOUBLE;
5235		s->flags |= SLAB_STORE_USER;
5236	}
5237	calculate_sizes(s, -1);
5238	return length;
5239}
5240SLAB_ATTR(store_user);
5241
5242static ssize_t validate_show(struct kmem_cache *s, char *buf)
5243{
5244	return 0;
5245}
5246
5247static ssize_t validate_store(struct kmem_cache *s,
5248			const char *buf, size_t length)
5249{
5250	int ret = -EINVAL;
5251
5252	if (buf[0] == '1') {
5253		ret = validate_slab_cache(s);
5254		if (ret >= 0)
5255			ret = length;
5256	}
5257	return ret;
5258}
5259SLAB_ATTR(validate);
5260
5261static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5262{
5263	if (!(s->flags & SLAB_STORE_USER))
5264		return -ENOSYS;
5265	return list_locations(s, buf, TRACK_ALLOC);
5266}
5267SLAB_ATTR_RO(alloc_calls);
5268
5269static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5270{
5271	if (!(s->flags & SLAB_STORE_USER))
5272		return -ENOSYS;
5273	return list_locations(s, buf, TRACK_FREE);
5274}
5275SLAB_ATTR_RO(free_calls);
5276#endif /* CONFIG_SLUB_DEBUG */
5277
5278#ifdef CONFIG_FAILSLAB
5279static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5280{
5281	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5282}
5283
5284static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5285							size_t length)
5286{
5287	if (s->refcount > 1)
5288		return -EINVAL;
5289
5290	s->flags &= ~SLAB_FAILSLAB;
5291	if (buf[0] == '1')
5292		s->flags |= SLAB_FAILSLAB;
5293	return length;
5294}
5295SLAB_ATTR(failslab);
5296#endif
5297
5298static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5299{
5300	return 0;
5301}
5302
5303static ssize_t shrink_store(struct kmem_cache *s,
5304			const char *buf, size_t length)
5305{
5306	if (buf[0] == '1')
5307		kmem_cache_shrink(s);
5308	else
5309		return -EINVAL;
5310	return length;
5311}
5312SLAB_ATTR(shrink);
5313
5314#ifdef CONFIG_NUMA
5315static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5316{
5317	return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5318}
5319
5320static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5321				const char *buf, size_t length)
5322{
5323	unsigned int ratio;
5324	int err;
5325
5326	err = kstrtouint(buf, 10, &ratio);
5327	if (err)
5328		return err;
5329	if (ratio > 100)
5330		return -ERANGE;
5331
5332	s->remote_node_defrag_ratio = ratio * 10;
5333
5334	return length;
5335}
5336SLAB_ATTR(remote_node_defrag_ratio);
5337#endif
5338
5339#ifdef CONFIG_SLUB_STATS
5340static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5341{
5342	unsigned long sum  = 0;
5343	int cpu;
5344	int len;
5345	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5346
5347	if (!data)
5348		return -ENOMEM;
5349
5350	for_each_online_cpu(cpu) {
5351		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5352
5353		data[cpu] = x;
5354		sum += x;
5355	}
5356
5357	len = sprintf(buf, "%lu", sum);
5358
5359#ifdef CONFIG_SMP
5360	for_each_online_cpu(cpu) {
5361		if (data[cpu] && len < PAGE_SIZE - 20)
5362			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5363	}
5364#endif
5365	kfree(data);
5366	return len + sprintf(buf + len, "\n");
5367}
5368
5369static void clear_stat(struct kmem_cache *s, enum stat_item si)
5370{
5371	int cpu;
5372
5373	for_each_online_cpu(cpu)
5374		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5375}
5376
5377#define STAT_ATTR(si, text) 					\
5378static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5379{								\
5380	return show_stat(s, buf, si);				\
5381}								\
5382static ssize_t text##_store(struct kmem_cache *s,		\
5383				const char *buf, size_t length)	\
5384{								\
5385	if (buf[0] != '0')					\
5386		return -EINVAL;					\
5387	clear_stat(s, si);					\
5388	return length;						\
5389}								\
5390SLAB_ATTR(text);						\
5391
5392STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5393STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5394STAT_ATTR(FREE_FASTPATH, free_fastpath);
5395STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5396STAT_ATTR(FREE_FROZEN, free_frozen);
5397STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5398STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5399STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5400STAT_ATTR(ALLOC_SLAB, alloc_slab);
5401STAT_ATTR(ALLOC_REFILL, alloc_refill);
5402STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5403STAT_ATTR(FREE_SLAB, free_slab);
5404STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5405STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5406STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5407STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5408STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5409STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5410STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5411STAT_ATTR(ORDER_FALLBACK, order_fallback);
5412STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5413STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5414STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5415STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5416STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5417STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5418#endif
5419
5420static struct attribute *slab_attrs[] = {
5421	&slab_size_attr.attr,
5422	&object_size_attr.attr,
5423	&objs_per_slab_attr.attr,
5424	&order_attr.attr,
5425	&min_partial_attr.attr,
5426	&cpu_partial_attr.attr,
5427	&objects_attr.attr,
5428	&objects_partial_attr.attr,
5429	&partial_attr.attr,
5430	&cpu_slabs_attr.attr,
5431	&ctor_attr.attr,
5432	&aliases_attr.attr,
5433	&align_attr.attr,
5434	&hwcache_align_attr.attr,
5435	&reclaim_account_attr.attr,
5436	&destroy_by_rcu_attr.attr,
5437	&shrink_attr.attr,
5438	&reserved_attr.attr,
5439	&slabs_cpu_partial_attr.attr,
5440#ifdef CONFIG_SLUB_DEBUG
5441	&total_objects_attr.attr,
5442	&slabs_attr.attr,
5443	&sanity_checks_attr.attr,
5444	&trace_attr.attr,
5445	&red_zone_attr.attr,
5446	&poison_attr.attr,
5447	&store_user_attr.attr,
5448	&validate_attr.attr,
5449	&alloc_calls_attr.attr,
5450	&free_calls_attr.attr,
5451#endif
5452#ifdef CONFIG_ZONE_DMA
5453	&cache_dma_attr.attr,
5454#endif
5455#ifdef CONFIG_NUMA
5456	&remote_node_defrag_ratio_attr.attr,
5457#endif
5458#ifdef CONFIG_SLUB_STATS
5459	&alloc_fastpath_attr.attr,
5460	&alloc_slowpath_attr.attr,
5461	&free_fastpath_attr.attr,
5462	&free_slowpath_attr.attr,
5463	&free_frozen_attr.attr,
5464	&free_add_partial_attr.attr,
5465	&free_remove_partial_attr.attr,
5466	&alloc_from_partial_attr.attr,
5467	&alloc_slab_attr.attr,
5468	&alloc_refill_attr.attr,
5469	&alloc_node_mismatch_attr.attr,
5470	&free_slab_attr.attr,
5471	&cpuslab_flush_attr.attr,
5472	&deactivate_full_attr.attr,
5473	&deactivate_empty_attr.attr,
5474	&deactivate_to_head_attr.attr,
5475	&deactivate_to_tail_attr.attr,
5476	&deactivate_remote_frees_attr.attr,
5477	&deactivate_bypass_attr.attr,
5478	&order_fallback_attr.attr,
5479	&cmpxchg_double_fail_attr.attr,
5480	&cmpxchg_double_cpu_fail_attr.attr,
5481	&cpu_partial_alloc_attr.attr,
5482	&cpu_partial_free_attr.attr,
5483	&cpu_partial_node_attr.attr,
5484	&cpu_partial_drain_attr.attr,
5485#endif
5486#ifdef CONFIG_FAILSLAB
5487	&failslab_attr.attr,
5488#endif
5489	&usersize_attr.attr,
5490
5491	NULL
5492};
5493
5494static const struct attribute_group slab_attr_group = {
5495	.attrs = slab_attrs,
5496};
5497
5498static ssize_t slab_attr_show(struct kobject *kobj,
5499				struct attribute *attr,
5500				char *buf)
5501{
5502	struct slab_attribute *attribute;
5503	struct kmem_cache *s;
5504	int err;
5505
5506	attribute = to_slab_attr(attr);
5507	s = to_slab(kobj);
5508
5509	if (!attribute->show)
5510		return -EIO;
5511
5512	err = attribute->show(s, buf);
5513
5514	return err;
5515}
5516
5517static ssize_t slab_attr_store(struct kobject *kobj,
5518				struct attribute *attr,
5519				const char *buf, size_t len)
5520{
5521	struct slab_attribute *attribute;
5522	struct kmem_cache *s;
5523	int err;
5524
5525	attribute = to_slab_attr(attr);
5526	s = to_slab(kobj);
5527
5528	if (!attribute->store)
5529		return -EIO;
5530
5531	err = attribute->store(s, buf, len);
5532#ifdef CONFIG_MEMCG
5533	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5534		struct kmem_cache *c;
5535
5536		mutex_lock(&slab_mutex);
5537		if (s->max_attr_size < len)
5538			s->max_attr_size = len;
5539
5540		/*
5541		 * This is a best effort propagation, so this function's return
5542		 * value will be determined by the parent cache only. This is
5543		 * basically because not all attributes will have a well
5544		 * defined semantics for rollbacks - most of the actions will
5545		 * have permanent effects.
5546		 *
5547		 * Returning the error value of any of the children that fail
5548		 * is not 100 % defined, in the sense that users seeing the
5549		 * error code won't be able to know anything about the state of
5550		 * the cache.
5551		 *
5552		 * Only returning the error code for the parent cache at least
5553		 * has well defined semantics. The cache being written to
5554		 * directly either failed or succeeded, in which case we loop
5555		 * through the descendants with best-effort propagation.
5556		 */
5557		for_each_memcg_cache(c, s)
5558			attribute->store(c, buf, len);
5559		mutex_unlock(&slab_mutex);
5560	}
5561#endif
5562	return err;
5563}
5564
5565static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5566{
5567#ifdef CONFIG_MEMCG
5568	int i;
5569	char *buffer = NULL;
5570	struct kmem_cache *root_cache;
5571
5572	if (is_root_cache(s))
5573		return;
5574
5575	root_cache = s->memcg_params.root_cache;
5576
5577	/*
5578	 * This mean this cache had no attribute written. Therefore, no point
5579	 * in copying default values around
5580	 */
5581	if (!root_cache->max_attr_size)
5582		return;
5583
5584	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5585		char mbuf[64];
5586		char *buf;
5587		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5588		ssize_t len;
5589
5590		if (!attr || !attr->store || !attr->show)
5591			continue;
5592
5593		/*
5594		 * It is really bad that we have to allocate here, so we will
5595		 * do it only as a fallback. If we actually allocate, though,
5596		 * we can just use the allocated buffer until the end.
5597		 *
5598		 * Most of the slub attributes will tend to be very small in
5599		 * size, but sysfs allows buffers up to a page, so they can
5600		 * theoretically happen.
5601		 */
5602		if (buffer)
5603			buf = buffer;
5604		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
5605			buf = mbuf;
5606		else {
5607			buffer = (char *) get_zeroed_page(GFP_KERNEL);
5608			if (WARN_ON(!buffer))
5609				continue;
5610			buf = buffer;
5611		}
5612
5613		len = attr->show(root_cache, buf);
5614		if (len > 0)
5615			attr->store(s, buf, len);
5616	}
5617
5618	if (buffer)
5619		free_page((unsigned long)buffer);
5620#endif
5621}
5622
5623static void kmem_cache_release(struct kobject *k)
5624{
5625	slab_kmem_cache_release(to_slab(k));
5626}
5627
5628static const struct sysfs_ops slab_sysfs_ops = {
5629	.show = slab_attr_show,
5630	.store = slab_attr_store,
5631};
5632
5633static struct kobj_type slab_ktype = {
5634	.sysfs_ops = &slab_sysfs_ops,
5635	.release = kmem_cache_release,
5636};
5637
5638static int uevent_filter(struct kset *kset, struct kobject *kobj)
5639{
5640	struct kobj_type *ktype = get_ktype(kobj);
5641
5642	if (ktype == &slab_ktype)
5643		return 1;
5644	return 0;
5645}
5646
5647static const struct kset_uevent_ops slab_uevent_ops = {
5648	.filter = uevent_filter,
5649};
5650
5651static struct kset *slab_kset;
5652
5653static inline struct kset *cache_kset(struct kmem_cache *s)
5654{
5655#ifdef CONFIG_MEMCG
5656	if (!is_root_cache(s))
5657		return s->memcg_params.root_cache->memcg_kset;
5658#endif
5659	return slab_kset;
5660}
5661
5662#define ID_STR_LENGTH 64
5663
5664/* Create a unique string id for a slab cache:
5665 *
5666 * Format	:[flags-]size
5667 */
5668static char *create_unique_id(struct kmem_cache *s)
5669{
5670	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5671	char *p = name;
5672
5673	BUG_ON(!name);
5674
5675	*p++ = ':';
5676	/*
5677	 * First flags affecting slabcache operations. We will only
5678	 * get here for aliasable slabs so we do not need to support
5679	 * too many flags. The flags here must cover all flags that
5680	 * are matched during merging to guarantee that the id is
5681	 * unique.
5682	 */
5683	if (s->flags & SLAB_CACHE_DMA)
5684		*p++ = 'd';
 
 
5685	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5686		*p++ = 'a';
5687	if (s->flags & SLAB_CONSISTENCY_CHECKS)
5688		*p++ = 'F';
5689	if (s->flags & SLAB_ACCOUNT)
5690		*p++ = 'A';
5691	if (p != name + 1)
5692		*p++ = '-';
5693	p += sprintf(p, "%07u", s->size);
5694
5695	BUG_ON(p > name + ID_STR_LENGTH - 1);
5696	return name;
5697}
5698
5699static void sysfs_slab_remove_workfn(struct work_struct *work)
5700{
5701	struct kmem_cache *s =
5702		container_of(work, struct kmem_cache, kobj_remove_work);
5703
5704	if (!s->kobj.state_in_sysfs)
5705		/*
5706		 * For a memcg cache, this may be called during
5707		 * deactivation and again on shutdown.  Remove only once.
5708		 * A cache is never shut down before deactivation is
5709		 * complete, so no need to worry about synchronization.
5710		 */
5711		goto out;
5712
5713#ifdef CONFIG_MEMCG
5714	kset_unregister(s->memcg_kset);
5715#endif
5716	kobject_uevent(&s->kobj, KOBJ_REMOVE);
5717	kobject_del(&s->kobj);
5718out:
5719	kobject_put(&s->kobj);
5720}
5721
5722static int sysfs_slab_add(struct kmem_cache *s)
5723{
5724	int err;
5725	const char *name;
5726	struct kset *kset = cache_kset(s);
5727	int unmergeable = slab_unmergeable(s);
5728
5729	INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5730
5731	if (!kset) {
5732		kobject_init(&s->kobj, &slab_ktype);
5733		return 0;
5734	}
5735
5736	if (!unmergeable && disable_higher_order_debug &&
5737			(slub_debug & DEBUG_METADATA_FLAGS))
5738		unmergeable = 1;
5739
5740	if (unmergeable) {
5741		/*
5742		 * Slabcache can never be merged so we can use the name proper.
5743		 * This is typically the case for debug situations. In that
5744		 * case we can catch duplicate names easily.
5745		 */
5746		sysfs_remove_link(&slab_kset->kobj, s->name);
5747		name = s->name;
5748	} else {
5749		/*
5750		 * Create a unique name for the slab as a target
5751		 * for the symlinks.
5752		 */
5753		name = create_unique_id(s);
5754	}
5755
5756	s->kobj.kset = kset;
5757	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5758	if (err)
5759		goto out;
5760
5761	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5762	if (err)
5763		goto out_del_kobj;
5764
5765#ifdef CONFIG_MEMCG
5766	if (is_root_cache(s) && memcg_sysfs_enabled) {
5767		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5768		if (!s->memcg_kset) {
5769			err = -ENOMEM;
5770			goto out_del_kobj;
5771		}
5772	}
5773#endif
5774
5775	kobject_uevent(&s->kobj, KOBJ_ADD);
5776	if (!unmergeable) {
5777		/* Setup first alias */
5778		sysfs_slab_alias(s, s->name);
5779	}
5780out:
5781	if (!unmergeable)
5782		kfree(name);
5783	return err;
5784out_del_kobj:
5785	kobject_del(&s->kobj);
5786	goto out;
5787}
5788
5789static void sysfs_slab_remove(struct kmem_cache *s)
5790{
5791	if (slab_state < FULL)
5792		/*
5793		 * Sysfs has not been setup yet so no need to remove the
5794		 * cache from sysfs.
5795		 */
5796		return;
5797
5798	kobject_get(&s->kobj);
5799	schedule_work(&s->kobj_remove_work);
 
 
 
 
 
 
5800}
5801
5802void sysfs_slab_release(struct kmem_cache *s)
5803{
5804	if (slab_state >= FULL)
5805		kobject_put(&s->kobj);
5806}
5807
5808/*
5809 * Need to buffer aliases during bootup until sysfs becomes
5810 * available lest we lose that information.
5811 */
5812struct saved_alias {
5813	struct kmem_cache *s;
5814	const char *name;
5815	struct saved_alias *next;
5816};
5817
5818static struct saved_alias *alias_list;
5819
5820static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5821{
5822	struct saved_alias *al;
5823
5824	if (slab_state == FULL) {
5825		/*
5826		 * If we have a leftover link then remove it.
5827		 */
5828		sysfs_remove_link(&slab_kset->kobj, name);
5829		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5830	}
5831
5832	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5833	if (!al)
5834		return -ENOMEM;
5835
5836	al->s = s;
5837	al->name = name;
5838	al->next = alias_list;
5839	alias_list = al;
5840	return 0;
5841}
5842
5843static int __init slab_sysfs_init(void)
5844{
5845	struct kmem_cache *s;
5846	int err;
5847
5848	mutex_lock(&slab_mutex);
5849
5850	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5851	if (!slab_kset) {
5852		mutex_unlock(&slab_mutex);
5853		pr_err("Cannot register slab subsystem.\n");
5854		return -ENOSYS;
5855	}
5856
5857	slab_state = FULL;
5858
5859	list_for_each_entry(s, &slab_caches, list) {
5860		err = sysfs_slab_add(s);
5861		if (err)
5862			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5863			       s->name);
5864	}
5865
5866	while (alias_list) {
5867		struct saved_alias *al = alias_list;
5868
5869		alias_list = alias_list->next;
5870		err = sysfs_slab_alias(al->s, al->name);
5871		if (err)
5872			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5873			       al->name);
5874		kfree(al);
5875	}
5876
5877	mutex_unlock(&slab_mutex);
5878	resiliency_test();
5879	return 0;
5880}
5881
5882__initcall(slab_sysfs_init);
5883#endif /* CONFIG_SYSFS */
5884
5885/*
5886 * The /proc/slabinfo ABI
5887 */
5888#ifdef CONFIG_SLUB_DEBUG
5889void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5890{
5891	unsigned long nr_slabs = 0;
5892	unsigned long nr_objs = 0;
5893	unsigned long nr_free = 0;
5894	int node;
5895	struct kmem_cache_node *n;
5896
5897	for_each_kmem_cache_node(s, node, n) {
5898		nr_slabs += node_nr_slabs(n);
5899		nr_objs += node_nr_objs(n);
5900		nr_free += count_partial(n, count_free);
5901	}
5902
5903	sinfo->active_objs = nr_objs - nr_free;
5904	sinfo->num_objs = nr_objs;
5905	sinfo->active_slabs = nr_slabs;
5906	sinfo->num_slabs = nr_slabs;
5907	sinfo->objects_per_slab = oo_objects(s->oo);
5908	sinfo->cache_order = oo_order(s->oo);
5909}
5910
5911void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5912{
5913}
5914
5915ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5916		       size_t count, loff_t *ppos)
5917{
5918	return -EIO;
5919}
5920#endif /* CONFIG_SLUB_DEBUG */