Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SLUB: A slab allocator that limits cache line use instead of queuing
   4 * objects in per cpu and per node lists.
   5 *
   6 * The allocator synchronizes using per slab locks or atomic operatios
   7 * and only uses a centralized lock to manage a pool of partial slabs.
   8 *
   9 * (C) 2007 SGI, Christoph Lameter
  10 * (C) 2011 Linux Foundation, Christoph Lameter
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/swap.h> /* struct reclaim_state */
  15#include <linux/module.h>
  16#include <linux/bit_spinlock.h>
  17#include <linux/interrupt.h>
 
  18#include <linux/bitops.h>
  19#include <linux/slab.h>
  20#include "slab.h"
  21#include <linux/proc_fs.h>
  22#include <linux/notifier.h>
  23#include <linux/seq_file.h>
  24#include <linux/kasan.h>
  25#include <linux/cpu.h>
  26#include <linux/cpuset.h>
  27#include <linux/mempolicy.h>
  28#include <linux/ctype.h>
  29#include <linux/debugobjects.h>
  30#include <linux/kallsyms.h>
 
  31#include <linux/memory.h>
  32#include <linux/math64.h>
  33#include <linux/fault-inject.h>
  34#include <linux/stacktrace.h>
  35#include <linux/prefetch.h>
  36#include <linux/memcontrol.h>
  37#include <linux/random.h>
 
  38
 
  39#include <trace/events/kmem.h>
  40
  41#include "internal.h"
  42
  43/*
  44 * Lock order:
  45 *   1. slab_mutex (Global Mutex)
  46 *   2. node->list_lock
  47 *   3. slab_lock(page) (Only on some arches and for debugging)
  48 *
  49 *   slab_mutex
  50 *
  51 *   The role of the slab_mutex is to protect the list of all the slabs
  52 *   and to synchronize major metadata changes to slab cache structures.
  53 *
  54 *   The slab_lock is only used for debugging and on arches that do not
  55 *   have the ability to do a cmpxchg_double. It only protects the second
  56 *   double word in the page struct. Meaning
  57 *	A. page->freelist	-> List of object free in a page
  58 *	B. page->counters	-> Counters of objects
  59 *	C. page->frozen		-> frozen state
 
  60 *
  61 *   If a slab is frozen then it is exempt from list management. It is not
  62 *   on any list. The processor that froze the slab is the one who can
  63 *   perform list operations on the page. Other processors may put objects
  64 *   onto the freelist but the processor that froze the slab is the only
  65 *   one that can retrieve the objects from the page's freelist.
 
  66 *
  67 *   The list_lock protects the partial and full list on each node and
  68 *   the partial slab counter. If taken then no new slabs may be added or
  69 *   removed from the lists nor make the number of partial slabs be modified.
  70 *   (Note that the total number of slabs is an atomic value that may be
  71 *   modified without taking the list lock).
  72 *
  73 *   The list_lock is a centralized lock and thus we avoid taking it as
  74 *   much as possible. As long as SLUB does not have to handle partial
  75 *   slabs, operations can continue without any centralized lock. F.e.
  76 *   allocating a long series of objects that fill up slabs does not require
  77 *   the list lock.
  78 *   Interrupts are disabled during allocation and deallocation in order to
  79 *   make the slab allocator safe to use in the context of an irq. In addition
  80 *   interrupts are disabled to ensure that the processor does not change
  81 *   while handling per_cpu slabs, due to kernel preemption.
  82 *
  83 * SLUB assigns one slab for allocation to each processor.
  84 * Allocations only occur from these slabs called cpu slabs.
  85 *
  86 * Slabs with free elements are kept on a partial list and during regular
  87 * operations no list for full slabs is used. If an object in a full slab is
  88 * freed then the slab will show up again on the partial lists.
  89 * We track full slabs for debugging purposes though because otherwise we
  90 * cannot scan all objects.
  91 *
  92 * Slabs are freed when they become empty. Teardown and setup is
  93 * minimal so we rely on the page allocators per cpu caches for
  94 * fast frees and allocs.
  95 *
  96 * Overloading of page flags that are otherwise used for LRU management.
  97 *
  98 * PageActive 		The slab is frozen and exempt from list processing.
  99 * 			This means that the slab is dedicated to a purpose
 100 * 			such as satisfying allocations for a specific
 101 * 			processor. Objects may be freed in the slab while
 102 * 			it is frozen but slab_free will then skip the usual
 103 * 			list operations. It is up to the processor holding
 104 * 			the slab to integrate the slab into the slab lists
 105 * 			when the slab is no longer needed.
 106 *
 107 * 			One use of this flag is to mark slabs that are
 108 * 			used for allocations. Then such a slab becomes a cpu
 109 * 			slab. The cpu slab may be equipped with an additional
 110 * 			freelist that allows lockless access to
 111 * 			free objects in addition to the regular freelist
 112 * 			that requires the slab lock.
 113 *
 114 * PageError		Slab requires special handling due to debug
 115 * 			options set. This moves	slab handling out of
 116 * 			the fast path and disables lockless freelists.
 117 */
 118
 119static inline int kmem_cache_debug(struct kmem_cache *s)
 120{
 121#ifdef CONFIG_SLUB_DEBUG
 122	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
 
 123#else
 124	return 0;
 125#endif
 
 
 
 
 
 126}
 127
 128void *fixup_red_left(struct kmem_cache *s, void *p)
 129{
 130	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
 131		p += s->red_left_pad;
 132
 133	return p;
 134}
 135
 136static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 137{
 138#ifdef CONFIG_SLUB_CPU_PARTIAL
 139	return !kmem_cache_debug(s);
 140#else
 141	return false;
 142#endif
 143}
 144
 145/*
 146 * Issues still to be resolved:
 147 *
 148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 149 *
 150 * - Variable sizing of the per node arrays
 151 */
 152
 153/* Enable to test recovery from slab corruption on boot */
 154#undef SLUB_RESILIENCY_TEST
 155
 156/* Enable to log cmpxchg failures */
 157#undef SLUB_DEBUG_CMPXCHG
 158
 159/*
 160 * Mininum number of partial slabs. These will be left on the partial
 161 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 162 */
 163#define MIN_PARTIAL 5
 164
 165/*
 166 * Maximum number of desirable partial slabs.
 167 * The existence of more partial slabs makes kmem_cache_shrink
 168 * sort the partial list by the number of objects in use.
 169 */
 170#define MAX_PARTIAL 10
 171
 172#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
 173				SLAB_POISON | SLAB_STORE_USER)
 174
 175/*
 176 * These debug flags cannot use CMPXCHG because there might be consistency
 177 * issues when checking or reading debug information
 178 */
 179#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
 180				SLAB_TRACE)
 181
 182
 183/*
 184 * Debugging flags that require metadata to be stored in the slab.  These get
 185 * disabled when slub_debug=O is used and a cache's min order increases with
 186 * metadata.
 187 */
 188#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 189
 190#define OO_SHIFT	16
 191#define OO_MASK		((1 << OO_SHIFT) - 1)
 192#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
 193
 194/* Internal SLUB flags */
 195/* Poison object */
 196#define __OBJECT_POISON		((slab_flags_t __force)0x80000000U)
 197/* Use cmpxchg_double */
 198#define __CMPXCHG_DOUBLE	((slab_flags_t __force)0x40000000U)
 199
 200/*
 201 * Tracking user of a slab.
 202 */
 203#define TRACK_ADDRS_COUNT 16
 204struct track {
 205	unsigned long addr;	/* Called from address */
 206#ifdef CONFIG_STACKTRACE
 207	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
 208#endif
 209	int cpu;		/* Was running on cpu */
 210	int pid;		/* Pid context */
 211	unsigned long when;	/* When did the operation occur */
 212};
 213
 214enum track_item { TRACK_ALLOC, TRACK_FREE };
 215
 216#ifdef CONFIG_SYSFS
 217static int sysfs_slab_add(struct kmem_cache *);
 218static int sysfs_slab_alias(struct kmem_cache *, const char *);
 219static void memcg_propagate_slab_attrs(struct kmem_cache *s);
 220static void sysfs_slab_remove(struct kmem_cache *s);
 221#else
 222static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 223static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
 224							{ return 0; }
 225static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
 226static inline void sysfs_slab_remove(struct kmem_cache *s) { }
 
 
 
 
 227#endif
 228
 229static inline void stat(const struct kmem_cache *s, enum stat_item si)
 230{
 231#ifdef CONFIG_SLUB_STATS
 232	/*
 233	 * The rmw is racy on a preemptible kernel but this is acceptable, so
 234	 * avoid this_cpu_add()'s irq-disable overhead.
 235	 */
 236	raw_cpu_inc(s->cpu_slab->stat[si]);
 237#endif
 238}
 239
 
 
 
 
 
 
 
 
 240/********************************************************************
 241 * 			Core slab cache functions
 242 *******************************************************************/
 243
 244/*
 245 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 246 * with an XOR of the address where the pointer is held and a per-cache
 247 * random number.
 248 */
 249static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
 250				 unsigned long ptr_addr)
 251{
 252#ifdef CONFIG_SLAB_FREELIST_HARDENED
 253	return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
 
 
 
 
 
 
 
 
 
 
 
 254#else
 255	return ptr;
 256#endif
 257}
 258
 259/* Returns the freelist pointer recorded at location ptr_addr. */
 260static inline void *freelist_dereference(const struct kmem_cache *s,
 261					 void *ptr_addr)
 262{
 263	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
 264			    (unsigned long)ptr_addr);
 265}
 266
 267static inline void *get_freepointer(struct kmem_cache *s, void *object)
 268{
 
 269	return freelist_dereference(s, object + s->offset);
 270}
 271
 272static void prefetch_freepointer(const struct kmem_cache *s, void *object)
 273{
 274	if (object)
 275		prefetch(freelist_dereference(s, object + s->offset));
 276}
 277
 278static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 279{
 280	unsigned long freepointer_addr;
 281	void *p;
 282
 283	if (!debug_pagealloc_enabled())
 284		return get_freepointer(s, object);
 285
 
 286	freepointer_addr = (unsigned long)object + s->offset;
 287	probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
 288	return freelist_ptr(s, p, freepointer_addr);
 289}
 290
 291static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 292{
 293	unsigned long freeptr_addr = (unsigned long)object + s->offset;
 294
 295#ifdef CONFIG_SLAB_FREELIST_HARDENED
 296	BUG_ON(object == fp); /* naive detection of double free or corruption */
 297#endif
 298
 
 299	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
 300}
 301
 302/* Loop over all objects in a slab */
 303#define for_each_object(__p, __s, __addr, __objects) \
 304	for (__p = fixup_red_left(__s, __addr); \
 305		__p < (__addr) + (__objects) * (__s)->size; \
 306		__p += (__s)->size)
 307
 308#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
 309	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
 310		__idx <= __objects; \
 311		__p += (__s)->size, __idx++)
 312
 313/* Determine object index from a given position */
 314static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
 315{
 316	return (p - addr) / s->size;
 317}
 318
 319static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved)
 320{
 321	return (((unsigned int)PAGE_SIZE << order) - reserved) / size;
 322}
 323
 324static inline struct kmem_cache_order_objects oo_make(unsigned int order,
 325		unsigned int size, unsigned int reserved)
 326{
 327	struct kmem_cache_order_objects x = {
 328		(order << OO_SHIFT) + order_objects(order, size, reserved)
 329	};
 330
 331	return x;
 332}
 333
 334static inline unsigned int oo_order(struct kmem_cache_order_objects x)
 335{
 336	return x.x >> OO_SHIFT;
 337}
 338
 339static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
 340{
 341	return x.x & OO_MASK;
 342}
 343
 344/*
 345 * Per slab locking using the pagelock
 346 */
 347static __always_inline void slab_lock(struct page *page)
 348{
 349	VM_BUG_ON_PAGE(PageTail(page), page);
 350	bit_spin_lock(PG_locked, &page->flags);
 351}
 352
 353static __always_inline void slab_unlock(struct page *page)
 354{
 355	VM_BUG_ON_PAGE(PageTail(page), page);
 356	__bit_spin_unlock(PG_locked, &page->flags);
 357}
 358
 359static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
 360{
 361	struct page tmp;
 362	tmp.counters = counters_new;
 363	/*
 364	 * page->counters can cover frozen/inuse/objects as well
 365	 * as page->_refcount.  If we assign to ->counters directly
 366	 * we run the risk of losing updates to page->_refcount, so
 367	 * be careful and only assign to the fields we need.
 368	 */
 369	page->frozen  = tmp.frozen;
 370	page->inuse   = tmp.inuse;
 371	page->objects = tmp.objects;
 372}
 373
 374/* Interrupts must be disabled (for the fallback code to work right) */
 375static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 376		void *freelist_old, unsigned long counters_old,
 377		void *freelist_new, unsigned long counters_new,
 378		const char *n)
 379{
 380	VM_BUG_ON(!irqs_disabled());
 381#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 382    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 383	if (s->flags & __CMPXCHG_DOUBLE) {
 384		if (cmpxchg_double(&page->freelist, &page->counters,
 385				   freelist_old, counters_old,
 386				   freelist_new, counters_new))
 387			return true;
 388	} else
 389#endif
 390	{
 391		slab_lock(page);
 392		if (page->freelist == freelist_old &&
 393					page->counters == counters_old) {
 394			page->freelist = freelist_new;
 395			set_page_slub_counters(page, counters_new);
 396			slab_unlock(page);
 397			return true;
 398		}
 399		slab_unlock(page);
 400	}
 401
 402	cpu_relax();
 403	stat(s, CMPXCHG_DOUBLE_FAIL);
 404
 405#ifdef SLUB_DEBUG_CMPXCHG
 406	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 407#endif
 408
 409	return false;
 410}
 411
 412static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 413		void *freelist_old, unsigned long counters_old,
 414		void *freelist_new, unsigned long counters_new,
 415		const char *n)
 416{
 417#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 418    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 419	if (s->flags & __CMPXCHG_DOUBLE) {
 420		if (cmpxchg_double(&page->freelist, &page->counters,
 421				   freelist_old, counters_old,
 422				   freelist_new, counters_new))
 423			return true;
 424	} else
 425#endif
 426	{
 427		unsigned long flags;
 428
 429		local_irq_save(flags);
 430		slab_lock(page);
 431		if (page->freelist == freelist_old &&
 432					page->counters == counters_old) {
 433			page->freelist = freelist_new;
 434			set_page_slub_counters(page, counters_new);
 435			slab_unlock(page);
 436			local_irq_restore(flags);
 437			return true;
 438		}
 439		slab_unlock(page);
 440		local_irq_restore(flags);
 441	}
 442
 443	cpu_relax();
 444	stat(s, CMPXCHG_DOUBLE_FAIL);
 445
 446#ifdef SLUB_DEBUG_CMPXCHG
 447	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 448#endif
 449
 450	return false;
 451}
 452
 453#ifdef CONFIG_SLUB_DEBUG
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454/*
 455 * Determine a map of object in use on a page.
 456 *
 457 * Node listlock must be held to guarantee that the page does
 458 * not vanish from under us.
 459 */
 460static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
 
 461{
 462	void *p;
 463	void *addr = page_address(page);
 464
 
 
 
 
 
 
 465	for (p = page->freelist; p; p = get_freepointer(s, p))
 466		set_bit(slab_index(p, s, addr), map);
 
 
 
 
 
 
 
 
 467}
 468
 469static inline unsigned int size_from_object(struct kmem_cache *s)
 470{
 471	if (s->flags & SLAB_RED_ZONE)
 472		return s->size - s->red_left_pad;
 473
 474	return s->size;
 475}
 476
 477static inline void *restore_red_left(struct kmem_cache *s, void *p)
 478{
 479	if (s->flags & SLAB_RED_ZONE)
 480		p -= s->red_left_pad;
 481
 482	return p;
 483}
 484
 485/*
 486 * Debug settings:
 487 */
 488#if defined(CONFIG_SLUB_DEBUG_ON)
 489static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
 490#else
 491static slab_flags_t slub_debug;
 492#endif
 493
 494static char *slub_debug_slabs;
 495static int disable_higher_order_debug;
 496
 497/*
 498 * slub is about to manipulate internal object metadata.  This memory lies
 499 * outside the range of the allocated object, so accessing it would normally
 500 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 501 * to tell kasan that these accesses are OK.
 502 */
 503static inline void metadata_access_enable(void)
 504{
 505	kasan_disable_current();
 506}
 507
 508static inline void metadata_access_disable(void)
 509{
 510	kasan_enable_current();
 511}
 512
 513/*
 514 * Object debugging
 515 */
 516
 517/* Verify that a pointer has an address that is valid within a slab page */
 518static inline int check_valid_pointer(struct kmem_cache *s,
 519				struct page *page, void *object)
 520{
 521	void *base;
 522
 523	if (!object)
 524		return 1;
 525
 526	base = page_address(page);
 
 527	object = restore_red_left(s, object);
 528	if (object < base || object >= base + page->objects * s->size ||
 529		(object - base) % s->size) {
 530		return 0;
 531	}
 532
 533	return 1;
 534}
 535
 536static void print_section(char *level, char *text, u8 *addr,
 537			  unsigned int length)
 538{
 539	metadata_access_enable();
 540	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
 541			length, 1);
 542	metadata_access_disable();
 543}
 544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545static struct track *get_track(struct kmem_cache *s, void *object,
 546	enum track_item alloc)
 547{
 548	struct track *p;
 549
 550	if (s->offset)
 551		p = object + s->offset + sizeof(void *);
 552	else
 553		p = object + s->inuse;
 554
 555	return p + alloc;
 556}
 557
 558static void set_track(struct kmem_cache *s, void *object,
 559			enum track_item alloc, unsigned long addr)
 560{
 561	struct track *p = get_track(s, object, alloc);
 562
 563	if (addr) {
 564#ifdef CONFIG_STACKTRACE
 565		struct stack_trace trace;
 566		int i;
 567
 568		trace.nr_entries = 0;
 569		trace.max_entries = TRACK_ADDRS_COUNT;
 570		trace.entries = p->addrs;
 571		trace.skip = 3;
 572		metadata_access_enable();
 573		save_stack_trace(&trace);
 
 574		metadata_access_disable();
 575
 576		/* See rant in lockdep.c */
 577		if (trace.nr_entries != 0 &&
 578		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
 579			trace.nr_entries--;
 580
 581		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
 582			p->addrs[i] = 0;
 583#endif
 584		p->addr = addr;
 585		p->cpu = smp_processor_id();
 586		p->pid = current->pid;
 587		p->when = jiffies;
 588	} else
 589		memset(p, 0, sizeof(struct track));
 
 590}
 591
 592static void init_tracking(struct kmem_cache *s, void *object)
 593{
 594	if (!(s->flags & SLAB_STORE_USER))
 595		return;
 596
 597	set_track(s, object, TRACK_FREE, 0UL);
 598	set_track(s, object, TRACK_ALLOC, 0UL);
 599}
 600
 601static void print_track(const char *s, struct track *t, unsigned long pr_time)
 602{
 603	if (!t->addr)
 604		return;
 605
 606	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
 607	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
 608#ifdef CONFIG_STACKTRACE
 609	{
 610		int i;
 611		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
 612			if (t->addrs[i])
 613				pr_err("\t%pS\n", (void *)t->addrs[i]);
 614			else
 615				break;
 616	}
 617#endif
 618}
 619
 620static void print_tracking(struct kmem_cache *s, void *object)
 621{
 622	unsigned long pr_time = jiffies;
 623	if (!(s->flags & SLAB_STORE_USER))
 624		return;
 625
 626	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
 627	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
 628}
 629
 630static void print_page_info(struct page *page)
 631{
 632	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
 633	       page, page->objects, page->inuse, page->freelist, page->flags);
 
 634
 635}
 636
 637static void slab_bug(struct kmem_cache *s, char *fmt, ...)
 638{
 639	struct va_format vaf;
 640	va_list args;
 641
 642	va_start(args, fmt);
 643	vaf.fmt = fmt;
 644	vaf.va = &args;
 645	pr_err("=============================================================================\n");
 646	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
 647	pr_err("-----------------------------------------------------------------------------\n\n");
 648
 649	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 650	va_end(args);
 651}
 652
 
 653static void slab_fix(struct kmem_cache *s, char *fmt, ...)
 654{
 655	struct va_format vaf;
 656	va_list args;
 657
 
 
 
 658	va_start(args, fmt);
 659	vaf.fmt = fmt;
 660	vaf.va = &args;
 661	pr_err("FIX %s: %pV\n", s->name, &vaf);
 662	va_end(args);
 663}
 664
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 665static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 666{
 667	unsigned int off;	/* Offset of last byte */
 668	u8 *addr = page_address(page);
 669
 670	print_tracking(s, p);
 671
 672	print_page_info(page);
 673
 674	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
 675	       p, p - addr, get_freepointer(s, p));
 676
 677	if (s->flags & SLAB_RED_ZONE)
 678		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
 679			      s->red_left_pad);
 680	else if (p > addr + 16)
 681		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 682
 683	print_section(KERN_ERR, "Object ", p,
 684		      min_t(unsigned int, s->object_size, PAGE_SIZE));
 685	if (s->flags & SLAB_RED_ZONE)
 686		print_section(KERN_ERR, "Redzone ", p + s->object_size,
 687			s->inuse - s->object_size);
 688
 689	if (s->offset)
 690		off = s->offset + sizeof(void *);
 691	else
 692		off = s->inuse;
 693
 694	if (s->flags & SLAB_STORE_USER)
 695		off += 2 * sizeof(struct track);
 696
 697	off += kasan_metadata_size(s);
 698
 699	if (off != size_from_object(s))
 700		/* Beginning of the filler is the free pointer */
 701		print_section(KERN_ERR, "Padding ", p + off,
 702			      size_from_object(s) - off);
 703
 704	dump_stack();
 705}
 706
 707void object_err(struct kmem_cache *s, struct page *page,
 708			u8 *object, char *reason)
 709{
 
 
 
 710	slab_bug(s, "%s", reason);
 711	print_trailer(s, page, object);
 
 712}
 713
 714static void slab_err(struct kmem_cache *s, struct page *page,
 715			const char *fmt, ...)
 716{
 717	va_list args;
 718	char buf[100];
 719
 
 
 
 720	va_start(args, fmt);
 721	vsnprintf(buf, sizeof(buf), fmt, args);
 722	va_end(args);
 723	slab_bug(s, "%s", buf);
 724	print_page_info(page);
 725	dump_stack();
 
 726}
 727
 728static void init_object(struct kmem_cache *s, void *object, u8 val)
 729{
 730	u8 *p = object;
 731
 732	if (s->flags & SLAB_RED_ZONE)
 733		memset(p - s->red_left_pad, val, s->red_left_pad);
 734
 735	if (s->flags & __OBJECT_POISON) {
 736		memset(p, POISON_FREE, s->object_size - 1);
 737		p[s->object_size - 1] = POISON_END;
 738	}
 739
 740	if (s->flags & SLAB_RED_ZONE)
 741		memset(p + s->object_size, val, s->inuse - s->object_size);
 742}
 743
 744static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 745						void *from, void *to)
 746{
 747	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
 748	memset(from, data, to - from);
 749}
 750
 751static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
 752			u8 *object, char *what,
 753			u8 *start, unsigned int value, unsigned int bytes)
 754{
 755	u8 *fault;
 756	u8 *end;
 
 757
 758	metadata_access_enable();
 759	fault = memchr_inv(start, value, bytes);
 760	metadata_access_disable();
 761	if (!fault)
 762		return 1;
 763
 764	end = start + bytes;
 765	while (end > fault && end[-1] == value)
 766		end--;
 767
 
 
 
 768	slab_bug(s, "%s overwritten", what);
 769	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
 770					fault, end - 1, fault[0], value);
 
 771	print_trailer(s, page, object);
 
 772
 
 773	restore_bytes(s, what, value, fault, end);
 774	return 0;
 775}
 776
 777/*
 778 * Object layout:
 779 *
 780 * object address
 781 * 	Bytes of the object to be managed.
 782 * 	If the freepointer may overlay the object then the free
 783 * 	pointer is the first word of the object.
 784 *
 785 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 786 * 	0xa5 (POISON_END)
 787 *
 788 * object + s->object_size
 789 * 	Padding to reach word boundary. This is also used for Redzoning.
 790 * 	Padding is extended by another word if Redzoning is enabled and
 791 * 	object_size == inuse.
 792 *
 793 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 794 * 	0xcc (RED_ACTIVE) for objects in use.
 795 *
 796 * object + s->inuse
 797 * 	Meta data starts here.
 798 *
 799 * 	A. Free pointer (if we cannot overwrite object on free)
 800 * 	B. Tracking data for SLAB_STORE_USER
 801 * 	C. Padding to reach required alignment boundary or at mininum
 802 * 		one word if debugging is on to be able to detect writes
 803 * 		before the word boundary.
 804 *
 805 *	Padding is done using 0x5a (POISON_INUSE)
 806 *
 807 * object + s->size
 808 * 	Nothing is used beyond s->size.
 809 *
 810 * If slabcaches are merged then the object_size and inuse boundaries are mostly
 811 * ignored. And therefore no slab options that rely on these boundaries
 812 * may be used with merged slabcaches.
 813 */
 814
 815static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 816{
 817	unsigned long off = s->inuse;	/* The end of info */
 818
 819	if (s->offset)
 820		/* Freepointer is placed after the object. */
 821		off += sizeof(void *);
 822
 823	if (s->flags & SLAB_STORE_USER)
 824		/* We also have user information there */
 825		off += 2 * sizeof(struct track);
 826
 827	off += kasan_metadata_size(s);
 828
 829	if (size_from_object(s) == off)
 830		return 1;
 831
 832	return check_bytes_and_report(s, page, p, "Object padding",
 833			p + off, POISON_INUSE, size_from_object(s) - off);
 834}
 835
 836/* Check the pad bytes at the end of a slab page */
 837static int slab_pad_check(struct kmem_cache *s, struct page *page)
 838{
 839	u8 *start;
 840	u8 *fault;
 841	u8 *end;
 842	u8 *pad;
 843	int length;
 844	int remainder;
 845
 846	if (!(s->flags & SLAB_POISON))
 847		return 1;
 848
 849	start = page_address(page);
 850	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
 851	end = start + length;
 852	remainder = length % s->size;
 853	if (!remainder)
 854		return 1;
 855
 856	pad = end - remainder;
 857	metadata_access_enable();
 858	fault = memchr_inv(pad, POISON_INUSE, remainder);
 859	metadata_access_disable();
 860	if (!fault)
 861		return 1;
 862	while (end > fault && end[-1] == POISON_INUSE)
 863		end--;
 864
 865	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
 
 866	print_section(KERN_ERR, "Padding ", pad, remainder);
 867
 868	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
 869	return 0;
 870}
 871
 872static int check_object(struct kmem_cache *s, struct page *page,
 873					void *object, u8 val)
 874{
 875	u8 *p = object;
 876	u8 *endobject = object + s->object_size;
 877
 878	if (s->flags & SLAB_RED_ZONE) {
 879		if (!check_bytes_and_report(s, page, object, "Redzone",
 880			object - s->red_left_pad, val, s->red_left_pad))
 881			return 0;
 882
 883		if (!check_bytes_and_report(s, page, object, "Redzone",
 884			endobject, val, s->inuse - s->object_size))
 885			return 0;
 886	} else {
 887		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 888			check_bytes_and_report(s, page, p, "Alignment padding",
 889				endobject, POISON_INUSE,
 890				s->inuse - s->object_size);
 891		}
 892	}
 893
 894	if (s->flags & SLAB_POISON) {
 895		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
 896			(!check_bytes_and_report(s, page, p, "Poison", p,
 897					POISON_FREE, s->object_size - 1) ||
 898			 !check_bytes_and_report(s, page, p, "Poison",
 899				p + s->object_size - 1, POISON_END, 1)))
 900			return 0;
 901		/*
 902		 * check_pad_bytes cleans up on its own.
 903		 */
 904		check_pad_bytes(s, page, p);
 905	}
 906
 907	if (!s->offset && val == SLUB_RED_ACTIVE)
 908		/*
 909		 * Object and freepointer overlap. Cannot check
 910		 * freepointer while object is allocated.
 911		 */
 912		return 1;
 913
 914	/* Check free pointer validity */
 915	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
 916		object_err(s, page, p, "Freepointer corrupt");
 917		/*
 918		 * No choice but to zap it and thus lose the remainder
 919		 * of the free objects in this slab. May cause
 920		 * another error because the object count is now wrong.
 921		 */
 922		set_freepointer(s, p, NULL);
 923		return 0;
 924	}
 925	return 1;
 926}
 927
 928static int check_slab(struct kmem_cache *s, struct page *page)
 929{
 930	int maxobj;
 931
 932	VM_BUG_ON(!irqs_disabled());
 933
 934	if (!PageSlab(page)) {
 935		slab_err(s, page, "Not a valid slab page");
 936		return 0;
 937	}
 938
 939	maxobj = order_objects(compound_order(page), s->size, s->reserved);
 940	if (page->objects > maxobj) {
 941		slab_err(s, page, "objects %u > max %u",
 942			page->objects, maxobj);
 943		return 0;
 944	}
 945	if (page->inuse > page->objects) {
 946		slab_err(s, page, "inuse %u > max %u",
 947			page->inuse, page->objects);
 948		return 0;
 949	}
 950	/* Slab_pad_check fixes things up after itself */
 951	slab_pad_check(s, page);
 952	return 1;
 953}
 954
 955/*
 956 * Determine if a certain object on a page is on the freelist. Must hold the
 957 * slab lock to guarantee that the chains are in a consistent state.
 958 */
 959static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
 960{
 961	int nr = 0;
 962	void *fp;
 963	void *object = NULL;
 964	int max_objects;
 965
 966	fp = page->freelist;
 967	while (fp && nr <= page->objects) {
 968		if (fp == search)
 969			return 1;
 970		if (!check_valid_pointer(s, page, fp)) {
 971			if (object) {
 972				object_err(s, page, object,
 973					"Freechain corrupt");
 974				set_freepointer(s, object, NULL);
 975			} else {
 976				slab_err(s, page, "Freepointer corrupt");
 977				page->freelist = NULL;
 978				page->inuse = page->objects;
 979				slab_fix(s, "Freelist cleared");
 980				return 0;
 981			}
 982			break;
 983		}
 984		object = fp;
 985		fp = get_freepointer(s, object);
 986		nr++;
 987	}
 988
 989	max_objects = order_objects(compound_order(page), s->size, s->reserved);
 990	if (max_objects > MAX_OBJS_PER_PAGE)
 991		max_objects = MAX_OBJS_PER_PAGE;
 992
 993	if (page->objects != max_objects) {
 994		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
 995			 page->objects, max_objects);
 996		page->objects = max_objects;
 997		slab_fix(s, "Number of objects adjusted.");
 998	}
 999	if (page->inuse != page->objects - nr) {
1000		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1001			 page->inuse, page->objects - nr);
1002		page->inuse = page->objects - nr;
1003		slab_fix(s, "Object count adjusted.");
1004	}
1005	return search == NULL;
1006}
1007
1008static void trace(struct kmem_cache *s, struct page *page, void *object,
1009								int alloc)
1010{
1011	if (s->flags & SLAB_TRACE) {
1012		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1013			s->name,
1014			alloc ? "alloc" : "free",
1015			object, page->inuse,
1016			page->freelist);
1017
1018		if (!alloc)
1019			print_section(KERN_INFO, "Object ", (void *)object,
1020					s->object_size);
1021
1022		dump_stack();
1023	}
1024}
1025
1026/*
1027 * Tracking of fully allocated slabs for debugging purposes.
1028 */
1029static void add_full(struct kmem_cache *s,
1030	struct kmem_cache_node *n, struct page *page)
1031{
1032	if (!(s->flags & SLAB_STORE_USER))
1033		return;
1034
1035	lockdep_assert_held(&n->list_lock);
1036	list_add(&page->lru, &n->full);
1037}
1038
1039static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1040{
1041	if (!(s->flags & SLAB_STORE_USER))
1042		return;
1043
1044	lockdep_assert_held(&n->list_lock);
1045	list_del(&page->lru);
1046}
1047
1048/* Tracking of the number of slabs for debugging purposes */
1049static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1050{
1051	struct kmem_cache_node *n = get_node(s, node);
1052
1053	return atomic_long_read(&n->nr_slabs);
1054}
1055
1056static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1057{
1058	return atomic_long_read(&n->nr_slabs);
1059}
1060
1061static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1062{
1063	struct kmem_cache_node *n = get_node(s, node);
1064
1065	/*
1066	 * May be called early in order to allocate a slab for the
1067	 * kmem_cache_node structure. Solve the chicken-egg
1068	 * dilemma by deferring the increment of the count during
1069	 * bootstrap (see early_kmem_cache_node_alloc).
1070	 */
1071	if (likely(n)) {
1072		atomic_long_inc(&n->nr_slabs);
1073		atomic_long_add(objects, &n->total_objects);
1074	}
1075}
1076static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1077{
1078	struct kmem_cache_node *n = get_node(s, node);
1079
1080	atomic_long_dec(&n->nr_slabs);
1081	atomic_long_sub(objects, &n->total_objects);
1082}
1083
1084/* Object debug checks for alloc/free paths */
1085static void setup_object_debug(struct kmem_cache *s, struct page *page,
1086								void *object)
1087{
1088	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1089		return;
1090
1091	init_object(s, object, SLUB_RED_INACTIVE);
1092	init_tracking(s, object);
1093}
1094
 
 
 
 
 
 
 
 
 
 
 
1095static inline int alloc_consistency_checks(struct kmem_cache *s,
1096					struct page *page,
1097					void *object, unsigned long addr)
1098{
1099	if (!check_slab(s, page))
1100		return 0;
1101
1102	if (!check_valid_pointer(s, page, object)) {
1103		object_err(s, page, object, "Freelist Pointer check fails");
1104		return 0;
1105	}
1106
1107	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1108		return 0;
1109
1110	return 1;
1111}
1112
1113static noinline int alloc_debug_processing(struct kmem_cache *s,
1114					struct page *page,
1115					void *object, unsigned long addr)
1116{
1117	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1118		if (!alloc_consistency_checks(s, page, object, addr))
1119			goto bad;
1120	}
1121
1122	/* Success perform special debug activities for allocs */
1123	if (s->flags & SLAB_STORE_USER)
1124		set_track(s, object, TRACK_ALLOC, addr);
1125	trace(s, page, object, 1);
1126	init_object(s, object, SLUB_RED_ACTIVE);
1127	return 1;
1128
1129bad:
1130	if (PageSlab(page)) {
1131		/*
1132		 * If this is a slab page then lets do the best we can
1133		 * to avoid issues in the future. Marking all objects
1134		 * as used avoids touching the remaining objects.
1135		 */
1136		slab_fix(s, "Marking all objects used");
1137		page->inuse = page->objects;
1138		page->freelist = NULL;
1139	}
1140	return 0;
1141}
1142
1143static inline int free_consistency_checks(struct kmem_cache *s,
1144		struct page *page, void *object, unsigned long addr)
1145{
1146	if (!check_valid_pointer(s, page, object)) {
1147		slab_err(s, page, "Invalid object pointer 0x%p", object);
1148		return 0;
1149	}
1150
1151	if (on_freelist(s, page, object)) {
1152		object_err(s, page, object, "Object already free");
1153		return 0;
1154	}
1155
1156	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1157		return 0;
1158
1159	if (unlikely(s != page->slab_cache)) {
1160		if (!PageSlab(page)) {
1161			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1162				 object);
1163		} else if (!page->slab_cache) {
1164			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1165			       object);
1166			dump_stack();
1167		} else
1168			object_err(s, page, object,
1169					"page slab pointer corrupt.");
1170		return 0;
1171	}
1172	return 1;
1173}
1174
1175/* Supports checking bulk free of a constructed freelist */
1176static noinline int free_debug_processing(
1177	struct kmem_cache *s, struct page *page,
1178	void *head, void *tail, int bulk_cnt,
1179	unsigned long addr)
1180{
1181	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1182	void *object = head;
1183	int cnt = 0;
1184	unsigned long uninitialized_var(flags);
1185	int ret = 0;
1186
1187	spin_lock_irqsave(&n->list_lock, flags);
1188	slab_lock(page);
1189
1190	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1191		if (!check_slab(s, page))
1192			goto out;
1193	}
1194
1195next_object:
1196	cnt++;
1197
1198	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1199		if (!free_consistency_checks(s, page, object, addr))
1200			goto out;
1201	}
1202
1203	if (s->flags & SLAB_STORE_USER)
1204		set_track(s, object, TRACK_FREE, addr);
1205	trace(s, page, object, 0);
1206	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1207	init_object(s, object, SLUB_RED_INACTIVE);
1208
1209	/* Reached end of constructed freelist yet? */
1210	if (object != tail) {
1211		object = get_freepointer(s, object);
1212		goto next_object;
1213	}
1214	ret = 1;
1215
1216out:
1217	if (cnt != bulk_cnt)
1218		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1219			 bulk_cnt, cnt);
1220
1221	slab_unlock(page);
1222	spin_unlock_irqrestore(&n->list_lock, flags);
1223	if (!ret)
1224		slab_fix(s, "Object at 0x%p not freed", object);
1225	return ret;
1226}
1227
1228static int __init setup_slub_debug(char *str)
 
 
 
 
 
 
 
 
 
 
 
1229{
1230	slub_debug = DEBUG_DEFAULT_FLAGS;
1231	if (*str++ != '=' || !*str)
1232		/*
1233		 * No options specified. Switch on full debugging.
1234		 */
1235		goto out;
1236
1237	if (*str == ',')
 
 
 
 
1238		/*
1239		 * No options but restriction on slabs. This means full
1240		 * debugging for slabs matching a pattern.
1241		 */
 
1242		goto check_slabs;
 
 
1243
1244	slub_debug = 0;
1245	if (*str == '-')
1246		/*
1247		 * Switch off all debugging measures.
1248		 */
1249		goto out;
1250
1251	/*
1252	 * Determine which debug features should be switched on
1253	 */
1254	for (; *str && *str != ','; str++) {
1255		switch (tolower(*str)) {
 
 
 
1256		case 'f':
1257			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1258			break;
1259		case 'z':
1260			slub_debug |= SLAB_RED_ZONE;
1261			break;
1262		case 'p':
1263			slub_debug |= SLAB_POISON;
1264			break;
1265		case 'u':
1266			slub_debug |= SLAB_STORE_USER;
1267			break;
1268		case 't':
1269			slub_debug |= SLAB_TRACE;
1270			break;
1271		case 'a':
1272			slub_debug |= SLAB_FAILSLAB;
1273			break;
1274		case 'o':
1275			/*
1276			 * Avoid enabling debugging on caches if its minimum
1277			 * order would increase as a result.
1278			 */
1279			disable_higher_order_debug = 1;
1280			break;
1281		default:
1282			pr_err("slub_debug option '%c' unknown. skipped\n",
1283			       *str);
1284		}
1285	}
1286
1287check_slabs:
1288	if (*str == ',')
1289		slub_debug_slabs = str + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290out:
 
 
 
 
 
 
 
 
 
1291	return 1;
1292}
1293
1294__setup("slub_debug", setup_slub_debug);
1295
 
 
 
 
 
 
 
 
 
 
 
1296slab_flags_t kmem_cache_flags(unsigned int object_size,
1297	slab_flags_t flags, const char *name,
1298	void (*ctor)(void *))
1299{
1300	/*
1301	 * Enable debugging if selected on the kernel commandline.
1302	 */
1303	if (slub_debug && (!slub_debug_slabs || (name &&
1304		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1305		flags |= slub_debug;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306
1307	return flags;
 
 
 
 
 
 
 
 
 
 
 
1308}
1309#else /* !CONFIG_SLUB_DEBUG */
1310static inline void setup_object_debug(struct kmem_cache *s,
1311			struct page *page, void *object) {}
 
 
1312
1313static inline int alloc_debug_processing(struct kmem_cache *s,
1314	struct page *page, void *object, unsigned long addr) { return 0; }
1315
1316static inline int free_debug_processing(
1317	struct kmem_cache *s, struct page *page,
1318	void *head, void *tail, int bulk_cnt,
1319	unsigned long addr) { return 0; }
1320
1321static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1322			{ return 1; }
1323static inline int check_object(struct kmem_cache *s, struct page *page,
1324			void *object, u8 val) { return 1; }
1325static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1326					struct page *page) {}
1327static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1328					struct page *page) {}
1329slab_flags_t kmem_cache_flags(unsigned int object_size,
1330	slab_flags_t flags, const char *name,
1331	void (*ctor)(void *))
1332{
1333	return flags;
1334}
1335#define slub_debug 0
1336
1337#define disable_higher_order_debug 0
1338
1339static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1340							{ return 0; }
1341static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1342							{ return 0; }
1343static inline void inc_slabs_node(struct kmem_cache *s, int node,
1344							int objects) {}
1345static inline void dec_slabs_node(struct kmem_cache *s, int node,
1346							int objects) {}
1347
 
 
 
 
 
1348#endif /* CONFIG_SLUB_DEBUG */
1349
1350/*
1351 * Hooks for other subsystems that check memory allocations. In a typical
1352 * production configuration these hooks all should produce no code at all.
1353 */
1354static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1355{
 
 
1356	kmemleak_alloc(ptr, size, 1, flags);
1357	kasan_kmalloc_large(ptr, size, flags);
1358}
1359
1360static __always_inline void kfree_hook(void *x)
1361{
1362	kmemleak_free(x);
1363	kasan_kfree_large(x, _RET_IP_);
1364}
1365
1366static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
 
1367{
1368	kmemleak_free_recursive(x, s->flags);
1369
1370	/*
1371	 * Trouble is that we may no longer disable interrupts in the fast path
1372	 * So in order to make the debug calls that expect irqs to be
1373	 * disabled we need to disable interrupts temporarily.
1374	 */
1375#ifdef CONFIG_LOCKDEP
1376	{
1377		unsigned long flags;
1378
1379		local_irq_save(flags);
1380		debug_check_no_locks_freed(x, s->object_size);
1381		local_irq_restore(flags);
1382	}
1383#endif
1384	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1385		debug_check_no_obj_freed(x, s->object_size);
1386
1387	/* KASAN might put x into memory quarantine, delaying its reuse */
1388	return kasan_slab_free(s, x, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389}
1390
1391static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1392					   void **head, void **tail)
 
1393{
1394/*
1395 * Compiler cannot detect this function can be removed if slab_free_hook()
1396 * evaluates to nothing.  Thus, catch all relevant config debug options here.
1397 */
1398#if defined(CONFIG_LOCKDEP)	||		\
1399	defined(CONFIG_DEBUG_KMEMLEAK) ||	\
1400	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
1401	defined(CONFIG_KASAN)
1402
1403	void *object;
1404	void *next = *head;
1405	void *old_tail = *tail ? *tail : *head;
1406
 
 
 
 
 
1407	/* Head and tail of the reconstructed freelist */
1408	*head = NULL;
1409	*tail = NULL;
1410
1411	do {
1412		object = next;
1413		next = get_freepointer(s, object);
 
1414		/* If object's reuse doesn't have to be delayed */
1415		if (!slab_free_hook(s, object)) {
1416			/* Move object to the new freelist */
1417			set_freepointer(s, object, *head);
1418			*head = object;
1419			if (!*tail)
1420				*tail = object;
 
 
 
 
 
 
1421		}
1422	} while (object != old_tail);
1423
1424	if (*head == *tail)
1425		*tail = NULL;
1426
1427	return *head != NULL;
1428#else
1429	return true;
1430#endif
1431}
1432
1433static void setup_object(struct kmem_cache *s, struct page *page,
1434				void *object)
1435{
1436	setup_object_debug(s, page, object);
1437	kasan_init_slab_obj(s, object);
1438	if (unlikely(s->ctor)) {
1439		kasan_unpoison_object_data(s, object);
1440		s->ctor(object);
1441		kasan_poison_object_data(s, object);
1442	}
 
1443}
1444
1445/*
1446 * Slab allocation and freeing
1447 */
1448static inline struct page *alloc_slab_page(struct kmem_cache *s,
1449		gfp_t flags, int node, struct kmem_cache_order_objects oo)
1450{
1451	struct page *page;
1452	unsigned int order = oo_order(oo);
1453
1454	if (node == NUMA_NO_NODE)
1455		page = alloc_pages(flags, order);
1456	else
1457		page = __alloc_pages_node(node, flags, order);
1458
1459	if (page && memcg_charge_slab(page, flags, order, s)) {
1460		__free_pages(page, order);
1461		page = NULL;
1462	}
1463
1464	return page;
1465}
1466
1467#ifdef CONFIG_SLAB_FREELIST_RANDOM
1468/* Pre-initialize the random sequence cache */
1469static int init_cache_random_seq(struct kmem_cache *s)
1470{
1471	unsigned int count = oo_objects(s->oo);
1472	int err;
1473
1474	/* Bailout if already initialised */
1475	if (s->random_seq)
1476		return 0;
1477
1478	err = cache_random_seq_create(s, count, GFP_KERNEL);
1479	if (err) {
1480		pr_err("SLUB: Unable to initialize free list for %s\n",
1481			s->name);
1482		return err;
1483	}
1484
1485	/* Transform to an offset on the set of pages */
1486	if (s->random_seq) {
1487		unsigned int i;
1488
1489		for (i = 0; i < count; i++)
1490			s->random_seq[i] *= s->size;
1491	}
1492	return 0;
1493}
1494
1495/* Initialize each random sequence freelist per cache */
1496static void __init init_freelist_randomization(void)
1497{
1498	struct kmem_cache *s;
1499
1500	mutex_lock(&slab_mutex);
1501
1502	list_for_each_entry(s, &slab_caches, list)
1503		init_cache_random_seq(s);
1504
1505	mutex_unlock(&slab_mutex);
1506}
1507
1508/* Get the next entry on the pre-computed freelist randomized */
1509static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1510				unsigned long *pos, void *start,
1511				unsigned long page_limit,
1512				unsigned long freelist_count)
1513{
1514	unsigned int idx;
1515
1516	/*
1517	 * If the target page allocation failed, the number of objects on the
1518	 * page might be smaller than the usual size defined by the cache.
1519	 */
1520	do {
1521		idx = s->random_seq[*pos];
1522		*pos += 1;
1523		if (*pos >= freelist_count)
1524			*pos = 0;
1525	} while (unlikely(idx >= page_limit));
1526
1527	return (char *)start + idx;
1528}
1529
1530/* Shuffle the single linked freelist based on a random pre-computed sequence */
1531static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1532{
1533	void *start;
1534	void *cur;
1535	void *next;
1536	unsigned long idx, pos, page_limit, freelist_count;
1537
1538	if (page->objects < 2 || !s->random_seq)
1539		return false;
1540
1541	freelist_count = oo_objects(s->oo);
1542	pos = get_random_int() % freelist_count;
1543
1544	page_limit = page->objects * s->size;
1545	start = fixup_red_left(s, page_address(page));
1546
1547	/* First entry is used as the base of the freelist */
1548	cur = next_freelist_entry(s, page, &pos, start, page_limit,
1549				freelist_count);
 
1550	page->freelist = cur;
1551
1552	for (idx = 1; idx < page->objects; idx++) {
1553		setup_object(s, page, cur);
1554		next = next_freelist_entry(s, page, &pos, start, page_limit,
1555			freelist_count);
 
1556		set_freepointer(s, cur, next);
1557		cur = next;
1558	}
1559	setup_object(s, page, cur);
1560	set_freepointer(s, cur, NULL);
1561
1562	return true;
1563}
1564#else
1565static inline int init_cache_random_seq(struct kmem_cache *s)
1566{
1567	return 0;
1568}
1569static inline void init_freelist_randomization(void) { }
1570static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1571{
1572	return false;
1573}
1574#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1575
1576static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1577{
1578	struct page *page;
1579	struct kmem_cache_order_objects oo = s->oo;
1580	gfp_t alloc_gfp;
1581	void *start, *p;
1582	int idx, order;
1583	bool shuffle;
1584
1585	flags &= gfp_allowed_mask;
1586
1587	if (gfpflags_allow_blocking(flags))
1588		local_irq_enable();
1589
1590	flags |= s->allocflags;
1591
1592	/*
1593	 * Let the initial higher-order allocation fail under memory pressure
1594	 * so we fall-back to the minimum order allocation.
1595	 */
1596	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1597	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1598		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1599
1600	page = alloc_slab_page(s, alloc_gfp, node, oo);
1601	if (unlikely(!page)) {
1602		oo = s->min;
1603		alloc_gfp = flags;
1604		/*
1605		 * Allocation may have failed due to fragmentation.
1606		 * Try a lower order alloc if possible
1607		 */
1608		page = alloc_slab_page(s, alloc_gfp, node, oo);
1609		if (unlikely(!page))
1610			goto out;
1611		stat(s, ORDER_FALLBACK);
1612	}
1613
1614	page->objects = oo_objects(oo);
1615
1616	order = compound_order(page);
 
1617	page->slab_cache = s;
1618	__SetPageSlab(page);
1619	if (page_is_pfmemalloc(page))
1620		SetPageSlabPfmemalloc(page);
1621
1622	start = page_address(page);
1623
1624	if (unlikely(s->flags & SLAB_POISON))
1625		memset(start, POISON_INUSE, PAGE_SIZE << order);
1626
1627	kasan_poison_slab(page);
1628
1629	shuffle = shuffle_freelist(s, page);
1630
1631	if (!shuffle) {
1632		for_each_object_idx(p, idx, s, start, page->objects) {
1633			setup_object(s, page, p);
1634			if (likely(idx < page->objects))
1635				set_freepointer(s, p, p + s->size);
1636			else
1637				set_freepointer(s, p, NULL);
 
 
1638		}
1639		page->freelist = fixup_red_left(s, start);
1640	}
1641
1642	page->inuse = page->objects;
1643	page->frozen = 1;
1644
1645out:
1646	if (gfpflags_allow_blocking(flags))
1647		local_irq_disable();
1648	if (!page)
1649		return NULL;
1650
1651	mod_lruvec_page_state(page,
1652		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1653		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1654		1 << oo_order(oo));
1655
1656	inc_slabs_node(s, page_to_nid(page), page->objects);
1657
1658	return page;
1659}
1660
1661static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1662{
1663	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1664		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1665		flags &= ~GFP_SLAB_BUG_MASK;
1666		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1667				invalid_mask, &invalid_mask, flags, &flags);
1668		dump_stack();
1669	}
1670
1671	return allocate_slab(s,
1672		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1673}
1674
1675static void __free_slab(struct kmem_cache *s, struct page *page)
1676{
1677	int order = compound_order(page);
1678	int pages = 1 << order;
1679
1680	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1681		void *p;
1682
1683		slab_pad_check(s, page);
1684		for_each_object(p, s, page_address(page),
1685						page->objects)
1686			check_object(s, page, p, SLUB_RED_INACTIVE);
1687	}
1688
1689	mod_lruvec_page_state(page,
1690		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1691		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1692		-pages);
1693
1694	__ClearPageSlabPfmemalloc(page);
1695	__ClearPageSlab(page);
1696
1697	page_mapcount_reset(page);
1698	if (current->reclaim_state)
1699		current->reclaim_state->reclaimed_slab += pages;
1700	memcg_uncharge_slab(page, order, s);
1701	__free_pages(page, order);
1702}
1703
1704#define need_reserve_slab_rcu						\
1705	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1706
1707static void rcu_free_slab(struct rcu_head *h)
1708{
1709	struct page *page;
1710
1711	if (need_reserve_slab_rcu)
1712		page = virt_to_head_page(h);
1713	else
1714		page = container_of((struct list_head *)h, struct page, lru);
1715
1716	__free_slab(page->slab_cache, page);
1717}
1718
1719static void free_slab(struct kmem_cache *s, struct page *page)
1720{
1721	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1722		struct rcu_head *head;
1723
1724		if (need_reserve_slab_rcu) {
1725			int order = compound_order(page);
1726			int offset = (PAGE_SIZE << order) - s->reserved;
1727
1728			VM_BUG_ON(s->reserved != sizeof(*head));
1729			head = page_address(page) + offset;
1730		} else {
1731			head = &page->rcu_head;
1732		}
1733
1734		call_rcu(head, rcu_free_slab);
1735	} else
1736		__free_slab(s, page);
1737}
1738
1739static void discard_slab(struct kmem_cache *s, struct page *page)
1740{
1741	dec_slabs_node(s, page_to_nid(page), page->objects);
1742	free_slab(s, page);
1743}
1744
1745/*
1746 * Management of partially allocated slabs.
1747 */
1748static inline void
1749__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1750{
1751	n->nr_partial++;
1752	if (tail == DEACTIVATE_TO_TAIL)
1753		list_add_tail(&page->lru, &n->partial);
1754	else
1755		list_add(&page->lru, &n->partial);
1756}
1757
1758static inline void add_partial(struct kmem_cache_node *n,
1759				struct page *page, int tail)
1760{
1761	lockdep_assert_held(&n->list_lock);
1762	__add_partial(n, page, tail);
1763}
1764
1765static inline void remove_partial(struct kmem_cache_node *n,
1766					struct page *page)
1767{
1768	lockdep_assert_held(&n->list_lock);
1769	list_del(&page->lru);
1770	n->nr_partial--;
1771}
1772
1773/*
1774 * Remove slab from the partial list, freeze it and
1775 * return the pointer to the freelist.
1776 *
1777 * Returns a list of objects or NULL if it fails.
1778 */
1779static inline void *acquire_slab(struct kmem_cache *s,
1780		struct kmem_cache_node *n, struct page *page,
1781		int mode, int *objects)
1782{
1783	void *freelist;
1784	unsigned long counters;
1785	struct page new;
1786
1787	lockdep_assert_held(&n->list_lock);
1788
1789	/*
1790	 * Zap the freelist and set the frozen bit.
1791	 * The old freelist is the list of objects for the
1792	 * per cpu allocation list.
1793	 */
1794	freelist = page->freelist;
1795	counters = page->counters;
1796	new.counters = counters;
1797	*objects = new.objects - new.inuse;
1798	if (mode) {
1799		new.inuse = page->objects;
1800		new.freelist = NULL;
1801	} else {
1802		new.freelist = freelist;
1803	}
1804
1805	VM_BUG_ON(new.frozen);
1806	new.frozen = 1;
1807
1808	if (!__cmpxchg_double_slab(s, page,
1809			freelist, counters,
1810			new.freelist, new.counters,
1811			"acquire_slab"))
1812		return NULL;
1813
1814	remove_partial(n, page);
1815	WARN_ON(!freelist);
1816	return freelist;
1817}
1818
1819static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1820static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1821
1822/*
1823 * Try to allocate a partial slab from a specific node.
1824 */
1825static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1826				struct kmem_cache_cpu *c, gfp_t flags)
1827{
1828	struct page *page, *page2;
1829	void *object = NULL;
1830	unsigned int available = 0;
1831	int objects;
1832
1833	/*
1834	 * Racy check. If we mistakenly see no partial slabs then we
1835	 * just allocate an empty slab. If we mistakenly try to get a
1836	 * partial slab and there is none available then get_partials()
1837	 * will return NULL.
1838	 */
1839	if (!n || !n->nr_partial)
1840		return NULL;
1841
1842	spin_lock(&n->list_lock);
1843	list_for_each_entry_safe(page, page2, &n->partial, lru) {
1844		void *t;
1845
1846		if (!pfmemalloc_match(page, flags))
1847			continue;
1848
1849		t = acquire_slab(s, n, page, object == NULL, &objects);
1850		if (!t)
1851			break;
1852
1853		available += objects;
1854		if (!object) {
1855			c->page = page;
1856			stat(s, ALLOC_FROM_PARTIAL);
1857			object = t;
1858		} else {
1859			put_cpu_partial(s, page, 0);
1860			stat(s, CPU_PARTIAL_NODE);
1861		}
1862		if (!kmem_cache_has_cpu_partial(s)
1863			|| available > slub_cpu_partial(s) / 2)
1864			break;
1865
1866	}
1867	spin_unlock(&n->list_lock);
1868	return object;
1869}
1870
1871/*
1872 * Get a page from somewhere. Search in increasing NUMA distances.
1873 */
1874static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1875		struct kmem_cache_cpu *c)
1876{
1877#ifdef CONFIG_NUMA
1878	struct zonelist *zonelist;
1879	struct zoneref *z;
1880	struct zone *zone;
1881	enum zone_type high_zoneidx = gfp_zone(flags);
1882	void *object;
1883	unsigned int cpuset_mems_cookie;
1884
1885	/*
1886	 * The defrag ratio allows a configuration of the tradeoffs between
1887	 * inter node defragmentation and node local allocations. A lower
1888	 * defrag_ratio increases the tendency to do local allocations
1889	 * instead of attempting to obtain partial slabs from other nodes.
1890	 *
1891	 * If the defrag_ratio is set to 0 then kmalloc() always
1892	 * returns node local objects. If the ratio is higher then kmalloc()
1893	 * may return off node objects because partial slabs are obtained
1894	 * from other nodes and filled up.
1895	 *
1896	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1897	 * (which makes defrag_ratio = 1000) then every (well almost)
1898	 * allocation will first attempt to defrag slab caches on other nodes.
1899	 * This means scanning over all nodes to look for partial slabs which
1900	 * may be expensive if we do it every time we are trying to find a slab
1901	 * with available objects.
1902	 */
1903	if (!s->remote_node_defrag_ratio ||
1904			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1905		return NULL;
1906
1907	do {
1908		cpuset_mems_cookie = read_mems_allowed_begin();
1909		zonelist = node_zonelist(mempolicy_slab_node(), flags);
1910		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1911			struct kmem_cache_node *n;
1912
1913			n = get_node(s, zone_to_nid(zone));
1914
1915			if (n && cpuset_zone_allowed(zone, flags) &&
1916					n->nr_partial > s->min_partial) {
1917				object = get_partial_node(s, n, c, flags);
1918				if (object) {
1919					/*
1920					 * Don't check read_mems_allowed_retry()
1921					 * here - if mems_allowed was updated in
1922					 * parallel, that was a harmless race
1923					 * between allocation and the cpuset
1924					 * update
1925					 */
1926					return object;
1927				}
1928			}
1929		}
1930	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1931#endif
1932	return NULL;
1933}
1934
1935/*
1936 * Get a partial page, lock it and return it.
1937 */
1938static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1939		struct kmem_cache_cpu *c)
1940{
1941	void *object;
1942	int searchnode = node;
1943
1944	if (node == NUMA_NO_NODE)
1945		searchnode = numa_mem_id();
1946	else if (!node_present_pages(node))
1947		searchnode = node_to_mem_node(node);
1948
1949	object = get_partial_node(s, get_node(s, searchnode), c, flags);
1950	if (object || node != NUMA_NO_NODE)
1951		return object;
1952
1953	return get_any_partial(s, flags, c);
1954}
1955
1956#ifdef CONFIG_PREEMPT
1957/*
1958 * Calculate the next globally unique transaction for disambiguiation
1959 * during cmpxchg. The transactions start with the cpu number and are then
1960 * incremented by CONFIG_NR_CPUS.
1961 */
1962#define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1963#else
1964/*
1965 * No preemption supported therefore also no need to check for
1966 * different cpus.
1967 */
1968#define TID_STEP 1
1969#endif
1970
1971static inline unsigned long next_tid(unsigned long tid)
1972{
1973	return tid + TID_STEP;
1974}
1975
 
1976static inline unsigned int tid_to_cpu(unsigned long tid)
1977{
1978	return tid % TID_STEP;
1979}
1980
1981static inline unsigned long tid_to_event(unsigned long tid)
1982{
1983	return tid / TID_STEP;
1984}
 
1985
1986static inline unsigned int init_tid(int cpu)
1987{
1988	return cpu;
1989}
1990
1991static inline void note_cmpxchg_failure(const char *n,
1992		const struct kmem_cache *s, unsigned long tid)
1993{
1994#ifdef SLUB_DEBUG_CMPXCHG
1995	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1996
1997	pr_info("%s %s: cmpxchg redo ", n, s->name);
1998
1999#ifdef CONFIG_PREEMPT
2000	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2001		pr_warn("due to cpu change %d -> %d\n",
2002			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2003	else
2004#endif
2005	if (tid_to_event(tid) != tid_to_event(actual_tid))
2006		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2007			tid_to_event(tid), tid_to_event(actual_tid));
2008	else
2009		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2010			actual_tid, tid, next_tid(tid));
2011#endif
2012	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2013}
2014
2015static void init_kmem_cache_cpus(struct kmem_cache *s)
2016{
2017	int cpu;
2018
2019	for_each_possible_cpu(cpu)
2020		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2021}
2022
2023/*
2024 * Remove the cpu slab
2025 */
2026static void deactivate_slab(struct kmem_cache *s, struct page *page,
2027				void *freelist, struct kmem_cache_cpu *c)
2028{
2029	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2030	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2031	int lock = 0;
2032	enum slab_modes l = M_NONE, m = M_NONE;
2033	void *nextfree;
2034	int tail = DEACTIVATE_TO_HEAD;
2035	struct page new;
2036	struct page old;
2037
2038	if (page->freelist) {
2039		stat(s, DEACTIVATE_REMOTE_FREES);
2040		tail = DEACTIVATE_TO_TAIL;
2041	}
2042
2043	/*
2044	 * Stage one: Free all available per cpu objects back
2045	 * to the page freelist while it is still frozen. Leave the
2046	 * last one.
2047	 *
2048	 * There is no need to take the list->lock because the page
2049	 * is still frozen.
2050	 */
2051	while (freelist && (nextfree = get_freepointer(s, freelist))) {
2052		void *prior;
2053		unsigned long counters;
 
2054
2055		do {
2056			prior = page->freelist;
2057			counters = page->counters;
2058			set_freepointer(s, freelist, prior);
2059			new.counters = counters;
2060			new.inuse--;
2061			VM_BUG_ON(!new.frozen);
2062
2063		} while (!__cmpxchg_double_slab(s, page,
2064			prior, counters,
2065			freelist, new.counters,
2066			"drain percpu freelist"));
2067
2068		freelist = nextfree;
2069	}
2070
2071	/*
2072	 * Stage two: Ensure that the page is unfrozen while the
2073	 * list presence reflects the actual number of objects
2074	 * during unfreeze.
 
 
2075	 *
2076	 * We setup the list membership and then perform a cmpxchg
2077	 * with the count. If there is a mismatch then the page
2078	 * is not unfrozen but the page is on the wrong list.
2079	 *
2080	 * Then we restart the process which may have to remove
2081	 * the page from the list that we just put it on again
2082	 * because the number of objects in the slab may have
2083	 * changed.
2084	 */
2085redo:
2086
2087	old.freelist = page->freelist;
2088	old.counters = page->counters;
2089	VM_BUG_ON(!old.frozen);
2090
2091	/* Determine target state of the slab */
2092	new.counters = old.counters;
2093	if (freelist) {
2094		new.inuse--;
2095		set_freepointer(s, freelist, old.freelist);
2096		new.freelist = freelist;
2097	} else
2098		new.freelist = old.freelist;
2099
2100	new.frozen = 0;
2101
2102	if (!new.inuse && n->nr_partial >= s->min_partial)
2103		m = M_FREE;
2104	else if (new.freelist) {
2105		m = M_PARTIAL;
2106		if (!lock) {
2107			lock = 1;
2108			/*
2109			 * Taking the spinlock removes the possiblity
2110			 * that acquire_slab() will see a slab page that
2111			 * is frozen
2112			 */
2113			spin_lock(&n->list_lock);
2114		}
2115	} else {
2116		m = M_FULL;
2117		if (kmem_cache_debug(s) && !lock) {
2118			lock = 1;
2119			/*
2120			 * This also ensures that the scanning of full
2121			 * slabs from diagnostic functions will not see
2122			 * any frozen slabs.
2123			 */
2124			spin_lock(&n->list_lock);
2125		}
2126	}
2127
2128	if (l != m) {
2129
2130		if (l == M_PARTIAL)
2131
2132			remove_partial(n, page);
2133
2134		else if (l == M_FULL)
2135
2136			remove_full(s, n, page);
2137
2138		if (m == M_PARTIAL) {
2139
2140			add_partial(n, page, tail);
2141			stat(s, tail);
2142
2143		} else if (m == M_FULL) {
2144
2145			stat(s, DEACTIVATE_FULL);
2146			add_full(s, n, page);
2147
2148		}
2149	}
2150
2151	l = m;
2152	if (!__cmpxchg_double_slab(s, page,
2153				old.freelist, old.counters,
2154				new.freelist, new.counters,
2155				"unfreezing slab"))
2156		goto redo;
2157
2158	if (lock)
2159		spin_unlock(&n->list_lock);
2160
2161	if (m == M_FREE) {
 
 
 
 
2162		stat(s, DEACTIVATE_EMPTY);
2163		discard_slab(s, page);
2164		stat(s, FREE_SLAB);
2165	}
2166
2167	c->page = NULL;
2168	c->freelist = NULL;
2169}
2170
2171/*
2172 * Unfreeze all the cpu partial slabs.
2173 *
2174 * This function must be called with interrupts disabled
2175 * for the cpu using c (or some other guarantee must be there
2176 * to guarantee no concurrent accesses).
2177 */
2178static void unfreeze_partials(struct kmem_cache *s,
2179		struct kmem_cache_cpu *c)
2180{
2181#ifdef CONFIG_SLUB_CPU_PARTIAL
2182	struct kmem_cache_node *n = NULL, *n2 = NULL;
2183	struct page *page, *discard_page = NULL;
2184
2185	while ((page = c->partial)) {
2186		struct page new;
2187		struct page old;
2188
2189		c->partial = page->next;
2190
2191		n2 = get_node(s, page_to_nid(page));
2192		if (n != n2) {
2193			if (n)
2194				spin_unlock(&n->list_lock);
2195
2196			n = n2;
2197			spin_lock(&n->list_lock);
2198		}
2199
2200		do {
2201
2202			old.freelist = page->freelist;
2203			old.counters = page->counters;
2204			VM_BUG_ON(!old.frozen);
2205
2206			new.counters = old.counters;
2207			new.freelist = old.freelist;
2208
2209			new.frozen = 0;
2210
2211		} while (!__cmpxchg_double_slab(s, page,
2212				old.freelist, old.counters,
2213				new.freelist, new.counters,
2214				"unfreezing slab"));
2215
2216		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2217			page->next = discard_page;
2218			discard_page = page;
2219		} else {
2220			add_partial(n, page, DEACTIVATE_TO_TAIL);
2221			stat(s, FREE_ADD_PARTIAL);
2222		}
2223	}
2224
2225	if (n)
2226		spin_unlock(&n->list_lock);
2227
2228	while (discard_page) {
2229		page = discard_page;
2230		discard_page = discard_page->next;
2231
2232		stat(s, DEACTIVATE_EMPTY);
2233		discard_slab(s, page);
2234		stat(s, FREE_SLAB);
2235	}
2236#endif
2237}
2238
2239/*
2240 * Put a page that was just frozen (in __slab_free) into a partial page
2241 * slot if available.
2242 *
2243 * If we did not find a slot then simply move all the partials to the
2244 * per node partial list.
2245 */
2246static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2247{
2248#ifdef CONFIG_SLUB_CPU_PARTIAL
2249	struct page *oldpage;
2250	int pages;
2251	int pobjects;
2252
2253	preempt_disable();
2254	do {
2255		pages = 0;
2256		pobjects = 0;
2257		oldpage = this_cpu_read(s->cpu_slab->partial);
2258
2259		if (oldpage) {
2260			pobjects = oldpage->pobjects;
2261			pages = oldpage->pages;
2262			if (drain && pobjects > s->cpu_partial) {
2263				unsigned long flags;
2264				/*
2265				 * partial array is full. Move the existing
2266				 * set to the per node partial list.
2267				 */
2268				local_irq_save(flags);
2269				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2270				local_irq_restore(flags);
2271				oldpage = NULL;
2272				pobjects = 0;
2273				pages = 0;
2274				stat(s, CPU_PARTIAL_DRAIN);
2275			}
2276		}
2277
2278		pages++;
2279		pobjects += page->objects - page->inuse;
2280
2281		page->pages = pages;
2282		page->pobjects = pobjects;
2283		page->next = oldpage;
2284
2285	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2286								!= oldpage);
2287	if (unlikely(!s->cpu_partial)) {
2288		unsigned long flags;
2289
2290		local_irq_save(flags);
2291		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2292		local_irq_restore(flags);
2293	}
2294	preempt_enable();
2295#endif
2296}
2297
2298static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2299{
2300	stat(s, CPUSLAB_FLUSH);
2301	deactivate_slab(s, c->page, c->freelist, c);
2302
2303	c->tid = next_tid(c->tid);
2304}
2305
2306/*
2307 * Flush cpu slab.
2308 *
2309 * Called from IPI handler with interrupts disabled.
2310 */
2311static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2312{
2313	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2314
2315	if (likely(c)) {
2316		if (c->page)
2317			flush_slab(s, c);
2318
2319		unfreeze_partials(s, c);
2320	}
2321}
2322
2323static void flush_cpu_slab(void *d)
2324{
2325	struct kmem_cache *s = d;
2326
2327	__flush_cpu_slab(s, smp_processor_id());
2328}
2329
2330static bool has_cpu_slab(int cpu, void *info)
2331{
2332	struct kmem_cache *s = info;
2333	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2334
2335	return c->page || slub_percpu_partial(c);
2336}
2337
2338static void flush_all(struct kmem_cache *s)
2339{
2340	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2341}
2342
2343/*
2344 * Use the cpu notifier to insure that the cpu slabs are flushed when
2345 * necessary.
2346 */
2347static int slub_cpu_dead(unsigned int cpu)
2348{
2349	struct kmem_cache *s;
2350	unsigned long flags;
2351
2352	mutex_lock(&slab_mutex);
2353	list_for_each_entry(s, &slab_caches, list) {
2354		local_irq_save(flags);
2355		__flush_cpu_slab(s, cpu);
2356		local_irq_restore(flags);
2357	}
2358	mutex_unlock(&slab_mutex);
2359	return 0;
2360}
2361
2362/*
2363 * Check if the objects in a per cpu structure fit numa
2364 * locality expectations.
2365 */
2366static inline int node_match(struct page *page, int node)
2367{
2368#ifdef CONFIG_NUMA
2369	if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2370		return 0;
2371#endif
2372	return 1;
2373}
2374
2375#ifdef CONFIG_SLUB_DEBUG
2376static int count_free(struct page *page)
2377{
2378	return page->objects - page->inuse;
2379}
2380
2381static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2382{
2383	return atomic_long_read(&n->total_objects);
2384}
2385#endif /* CONFIG_SLUB_DEBUG */
2386
2387#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2388static unsigned long count_partial(struct kmem_cache_node *n,
2389					int (*get_count)(struct page *))
2390{
2391	unsigned long flags;
2392	unsigned long x = 0;
2393	struct page *page;
2394
2395	spin_lock_irqsave(&n->list_lock, flags);
2396	list_for_each_entry(page, &n->partial, lru)
2397		x += get_count(page);
2398	spin_unlock_irqrestore(&n->list_lock, flags);
2399	return x;
2400}
2401#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2402
2403static noinline void
2404slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2405{
2406#ifdef CONFIG_SLUB_DEBUG
2407	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2408				      DEFAULT_RATELIMIT_BURST);
2409	int node;
2410	struct kmem_cache_node *n;
2411
2412	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2413		return;
2414
2415	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2416		nid, gfpflags, &gfpflags);
2417	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2418		s->name, s->object_size, s->size, oo_order(s->oo),
2419		oo_order(s->min));
2420
2421	if (oo_order(s->min) > get_order(s->object_size))
2422		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2423			s->name);
2424
2425	for_each_kmem_cache_node(s, node, n) {
2426		unsigned long nr_slabs;
2427		unsigned long nr_objs;
2428		unsigned long nr_free;
2429
2430		nr_free  = count_partial(n, count_free);
2431		nr_slabs = node_nr_slabs(n);
2432		nr_objs  = node_nr_objs(n);
2433
2434		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2435			node, nr_slabs, nr_objs, nr_free);
2436	}
2437#endif
2438}
2439
2440static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2441			int node, struct kmem_cache_cpu **pc)
2442{
2443	void *freelist;
2444	struct kmem_cache_cpu *c = *pc;
2445	struct page *page;
2446
 
 
2447	freelist = get_partial(s, flags, node, c);
2448
2449	if (freelist)
2450		return freelist;
2451
2452	page = new_slab(s, flags, node);
2453	if (page) {
2454		c = raw_cpu_ptr(s->cpu_slab);
2455		if (c->page)
2456			flush_slab(s, c);
2457
2458		/*
2459		 * No other reference to the page yet so we can
2460		 * muck around with it freely without cmpxchg
2461		 */
2462		freelist = page->freelist;
2463		page->freelist = NULL;
2464
2465		stat(s, ALLOC_SLAB);
2466		c->page = page;
2467		*pc = c;
2468	} else
2469		freelist = NULL;
2470
2471	return freelist;
2472}
2473
2474static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2475{
2476	if (unlikely(PageSlabPfmemalloc(page)))
2477		return gfp_pfmemalloc_allowed(gfpflags);
2478
2479	return true;
2480}
2481
2482/*
2483 * Check the page->freelist of a page and either transfer the freelist to the
2484 * per cpu freelist or deactivate the page.
2485 *
2486 * The page is still frozen if the return value is not NULL.
2487 *
2488 * If this function returns NULL then the page has been unfrozen.
2489 *
2490 * This function must be called with interrupt disabled.
2491 */
2492static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2493{
2494	struct page new;
2495	unsigned long counters;
2496	void *freelist;
2497
2498	do {
2499		freelist = page->freelist;
2500		counters = page->counters;
2501
2502		new.counters = counters;
2503		VM_BUG_ON(!new.frozen);
2504
2505		new.inuse = page->objects;
2506		new.frozen = freelist != NULL;
2507
2508	} while (!__cmpxchg_double_slab(s, page,
2509		freelist, counters,
2510		NULL, new.counters,
2511		"get_freelist"));
2512
2513	return freelist;
2514}
2515
2516/*
2517 * Slow path. The lockless freelist is empty or we need to perform
2518 * debugging duties.
2519 *
2520 * Processing is still very fast if new objects have been freed to the
2521 * regular freelist. In that case we simply take over the regular freelist
2522 * as the lockless freelist and zap the regular freelist.
2523 *
2524 * If that is not working then we fall back to the partial lists. We take the
2525 * first element of the freelist as the object to allocate now and move the
2526 * rest of the freelist to the lockless freelist.
2527 *
2528 * And if we were unable to get a new slab from the partial slab lists then
2529 * we need to allocate a new slab. This is the slowest path since it involves
2530 * a call to the page allocator and the setup of a new slab.
2531 *
2532 * Version of __slab_alloc to use when we know that interrupts are
2533 * already disabled (which is the case for bulk allocation).
2534 */
2535static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2536			  unsigned long addr, struct kmem_cache_cpu *c)
2537{
2538	void *freelist;
2539	struct page *page;
2540
 
 
2541	page = c->page;
2542	if (!page)
 
 
 
 
 
 
 
2543		goto new_slab;
 
2544redo:
2545
2546	if (unlikely(!node_match(page, node))) {
2547		int searchnode = node;
2548
2549		if (node != NUMA_NO_NODE && !node_present_pages(node))
2550			searchnode = node_to_mem_node(node);
2551
2552		if (unlikely(!node_match(page, searchnode))) {
 
 
2553			stat(s, ALLOC_NODE_MISMATCH);
2554			deactivate_slab(s, page, c->freelist, c);
2555			goto new_slab;
2556		}
2557	}
2558
2559	/*
2560	 * By rights, we should be searching for a slab page that was
2561	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2562	 * information when the page leaves the per-cpu allocator
2563	 */
2564	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2565		deactivate_slab(s, page, c->freelist, c);
2566		goto new_slab;
2567	}
2568
2569	/* must check again c->freelist in case of cpu migration or IRQ */
2570	freelist = c->freelist;
2571	if (freelist)
2572		goto load_freelist;
2573
2574	freelist = get_freelist(s, page);
2575
2576	if (!freelist) {
2577		c->page = NULL;
2578		stat(s, DEACTIVATE_BYPASS);
2579		goto new_slab;
2580	}
2581
2582	stat(s, ALLOC_REFILL);
2583
2584load_freelist:
2585	/*
2586	 * freelist is pointing to the list of objects to be used.
2587	 * page is pointing to the page from which the objects are obtained.
2588	 * That page must be frozen for per cpu allocations to work.
2589	 */
2590	VM_BUG_ON(!c->page->frozen);
2591	c->freelist = get_freepointer(s, freelist);
2592	c->tid = next_tid(c->tid);
2593	return freelist;
2594
2595new_slab:
2596
2597	if (slub_percpu_partial(c)) {
2598		page = c->page = slub_percpu_partial(c);
2599		slub_set_percpu_partial(c, page);
2600		stat(s, CPU_PARTIAL_ALLOC);
2601		goto redo;
2602	}
2603
2604	freelist = new_slab_objects(s, gfpflags, node, &c);
2605
2606	if (unlikely(!freelist)) {
2607		slab_out_of_memory(s, gfpflags, node);
2608		return NULL;
2609	}
2610
2611	page = c->page;
2612	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2613		goto load_freelist;
2614
2615	/* Only entered in the debug case */
2616	if (kmem_cache_debug(s) &&
2617			!alloc_debug_processing(s, page, freelist, addr))
2618		goto new_slab;	/* Slab failed checks. Next slab needed */
2619
2620	deactivate_slab(s, page, get_freepointer(s, freelist), c);
2621	return freelist;
2622}
2623
2624/*
2625 * Another one that disabled interrupt and compensates for possible
2626 * cpu changes by refetching the per cpu area pointer.
2627 */
2628static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2629			  unsigned long addr, struct kmem_cache_cpu *c)
2630{
2631	void *p;
2632	unsigned long flags;
2633
2634	local_irq_save(flags);
2635#ifdef CONFIG_PREEMPT
2636	/*
2637	 * We may have been preempted and rescheduled on a different
2638	 * cpu before disabling interrupts. Need to reload cpu area
2639	 * pointer.
2640	 */
2641	c = this_cpu_ptr(s->cpu_slab);
2642#endif
2643
2644	p = ___slab_alloc(s, gfpflags, node, addr, c);
2645	local_irq_restore(flags);
2646	return p;
2647}
2648
2649/*
 
 
 
 
 
 
 
 
 
 
 
 
2650 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2651 * have the fastpath folded into their functions. So no function call
2652 * overhead for requests that can be satisfied on the fastpath.
2653 *
2654 * The fastpath works by first checking if the lockless freelist can be used.
2655 * If not then __slab_alloc is called for slow processing.
2656 *
2657 * Otherwise we can simply pick the next object from the lockless free list.
2658 */
2659static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2660		gfp_t gfpflags, int node, unsigned long addr)
2661{
2662	void *object;
2663	struct kmem_cache_cpu *c;
2664	struct page *page;
2665	unsigned long tid;
 
 
2666
2667	s = slab_pre_alloc_hook(s, gfpflags);
2668	if (!s)
2669		return NULL;
 
 
 
 
 
2670redo:
2671	/*
2672	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2673	 * enabled. We may switch back and forth between cpus while
2674	 * reading from one cpu area. That does not matter as long
2675	 * as we end up on the original cpu again when doing the cmpxchg.
2676	 *
2677	 * We should guarantee that tid and kmem_cache are retrieved on
2678	 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2679	 * to check if it is matched or not.
2680	 */
2681	do {
2682		tid = this_cpu_read(s->cpu_slab->tid);
2683		c = raw_cpu_ptr(s->cpu_slab);
2684	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2685		 unlikely(tid != READ_ONCE(c->tid)));
2686
2687	/*
2688	 * Irqless object alloc/free algorithm used here depends on sequence
2689	 * of fetching cpu_slab's data. tid should be fetched before anything
2690	 * on c to guarantee that object and page associated with previous tid
2691	 * won't be used with current tid. If we fetch tid first, object and
2692	 * page could be one associated with next tid and our alloc/free
2693	 * request will be failed. In this case, we will retry. So, no problem.
2694	 */
2695	barrier();
2696
2697	/*
2698	 * The transaction ids are globally unique per cpu and per operation on
2699	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2700	 * occurs on the right processor and that there was no operation on the
2701	 * linked list in between.
2702	 */
2703
2704	object = c->freelist;
2705	page = c->page;
2706	if (unlikely(!object || !node_match(page, node))) {
2707		object = __slab_alloc(s, gfpflags, node, addr, c);
2708		stat(s, ALLOC_SLOWPATH);
2709	} else {
2710		void *next_object = get_freepointer_safe(s, object);
2711
2712		/*
2713		 * The cmpxchg will only match if there was no additional
2714		 * operation and if we are on the right processor.
2715		 *
2716		 * The cmpxchg does the following atomically (without lock
2717		 * semantics!)
2718		 * 1. Relocate first pointer to the current per cpu area.
2719		 * 2. Verify that tid and freelist have not been changed
2720		 * 3. If they were not changed replace tid and freelist
2721		 *
2722		 * Since this is without lock semantics the protection is only
2723		 * against code executing on this cpu *not* from access by
2724		 * other cpus.
2725		 */
2726		if (unlikely(!this_cpu_cmpxchg_double(
2727				s->cpu_slab->freelist, s->cpu_slab->tid,
2728				object, tid,
2729				next_object, next_tid(tid)))) {
2730
2731			note_cmpxchg_failure("slab_alloc", s, tid);
2732			goto redo;
2733		}
2734		prefetch_freepointer(s, next_object);
2735		stat(s, ALLOC_FASTPATH);
2736	}
2737
2738	if (unlikely(gfpflags & __GFP_ZERO) && object)
2739		memset(object, 0, s->object_size);
2740
2741	slab_post_alloc_hook(s, gfpflags, 1, &object);
 
2742
2743	return object;
2744}
2745
2746static __always_inline void *slab_alloc(struct kmem_cache *s,
2747		gfp_t gfpflags, unsigned long addr)
2748{
2749	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2750}
2751
2752void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2753{
2754	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2755
2756	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2757				s->size, gfpflags);
2758
2759	return ret;
2760}
2761EXPORT_SYMBOL(kmem_cache_alloc);
2762
2763#ifdef CONFIG_TRACING
2764void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2765{
2766	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2767	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2768	kasan_kmalloc(s, ret, size, gfpflags);
2769	return ret;
2770}
2771EXPORT_SYMBOL(kmem_cache_alloc_trace);
2772#endif
2773
2774#ifdef CONFIG_NUMA
2775void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2776{
2777	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2778
2779	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2780				    s->object_size, s->size, gfpflags, node);
2781
2782	return ret;
2783}
2784EXPORT_SYMBOL(kmem_cache_alloc_node);
2785
2786#ifdef CONFIG_TRACING
2787void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2788				    gfp_t gfpflags,
2789				    int node, size_t size)
2790{
2791	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2792
2793	trace_kmalloc_node(_RET_IP_, ret,
2794			   size, s->size, gfpflags, node);
2795
2796	kasan_kmalloc(s, ret, size, gfpflags);
2797	return ret;
2798}
2799EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2800#endif
2801#endif
2802
2803/*
2804 * Slow path handling. This may still be called frequently since objects
2805 * have a longer lifetime than the cpu slabs in most processing loads.
2806 *
2807 * So we still attempt to reduce cache line usage. Just take the slab
2808 * lock and free the item. If there is no additional partial page
2809 * handling required then we can return immediately.
2810 */
2811static void __slab_free(struct kmem_cache *s, struct page *page,
2812			void *head, void *tail, int cnt,
2813			unsigned long addr)
2814
2815{
2816	void *prior;
2817	int was_frozen;
2818	struct page new;
2819	unsigned long counters;
2820	struct kmem_cache_node *n = NULL;
2821	unsigned long uninitialized_var(flags);
2822
2823	stat(s, FREE_SLOWPATH);
2824
 
 
 
2825	if (kmem_cache_debug(s) &&
2826	    !free_debug_processing(s, page, head, tail, cnt, addr))
2827		return;
2828
2829	do {
2830		if (unlikely(n)) {
2831			spin_unlock_irqrestore(&n->list_lock, flags);
2832			n = NULL;
2833		}
2834		prior = page->freelist;
2835		counters = page->counters;
2836		set_freepointer(s, tail, prior);
2837		new.counters = counters;
2838		was_frozen = new.frozen;
2839		new.inuse -= cnt;
2840		if ((!new.inuse || !prior) && !was_frozen) {
2841
2842			if (kmem_cache_has_cpu_partial(s) && !prior) {
2843
2844				/*
2845				 * Slab was on no list before and will be
2846				 * partially empty
2847				 * We can defer the list move and instead
2848				 * freeze it.
2849				 */
2850				new.frozen = 1;
2851
2852			} else { /* Needs to be taken off a list */
2853
2854				n = get_node(s, page_to_nid(page));
2855				/*
2856				 * Speculatively acquire the list_lock.
2857				 * If the cmpxchg does not succeed then we may
2858				 * drop the list_lock without any processing.
2859				 *
2860				 * Otherwise the list_lock will synchronize with
2861				 * other processors updating the list of slabs.
2862				 */
2863				spin_lock_irqsave(&n->list_lock, flags);
2864
2865			}
2866		}
2867
2868	} while (!cmpxchg_double_slab(s, page,
2869		prior, counters,
2870		head, new.counters,
2871		"__slab_free"));
2872
2873	if (likely(!n)) {
2874
2875		/*
2876		 * If we just froze the page then put it onto the
2877		 * per cpu partial list.
2878		 */
2879		if (new.frozen && !was_frozen) {
 
 
 
 
 
 
2880			put_cpu_partial(s, page, 1);
2881			stat(s, CPU_PARTIAL_FREE);
2882		}
2883		/*
2884		 * The list lock was not taken therefore no list
2885		 * activity can be necessary.
2886		 */
2887		if (was_frozen)
2888			stat(s, FREE_FROZEN);
2889		return;
2890	}
2891
2892	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2893		goto slab_empty;
2894
2895	/*
2896	 * Objects left in the slab. If it was not on the partial list before
2897	 * then add it.
2898	 */
2899	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2900		if (kmem_cache_debug(s))
2901			remove_full(s, n, page);
2902		add_partial(n, page, DEACTIVATE_TO_TAIL);
2903		stat(s, FREE_ADD_PARTIAL);
2904	}
2905	spin_unlock_irqrestore(&n->list_lock, flags);
2906	return;
2907
2908slab_empty:
2909	if (prior) {
2910		/*
2911		 * Slab on the partial list.
2912		 */
2913		remove_partial(n, page);
2914		stat(s, FREE_REMOVE_PARTIAL);
2915	} else {
2916		/* Slab must be on the full list */
2917		remove_full(s, n, page);
2918	}
2919
2920	spin_unlock_irqrestore(&n->list_lock, flags);
2921	stat(s, FREE_SLAB);
2922	discard_slab(s, page);
2923}
2924
2925/*
2926 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2927 * can perform fastpath freeing without additional function calls.
2928 *
2929 * The fastpath is only possible if we are freeing to the current cpu slab
2930 * of this processor. This typically the case if we have just allocated
2931 * the item before.
2932 *
2933 * If fastpath is not possible then fall back to __slab_free where we deal
2934 * with all sorts of special processing.
2935 *
2936 * Bulk free of a freelist with several objects (all pointing to the
2937 * same page) possible by specifying head and tail ptr, plus objects
2938 * count (cnt). Bulk free indicated by tail pointer being set.
2939 */
2940static __always_inline void do_slab_free(struct kmem_cache *s,
2941				struct page *page, void *head, void *tail,
2942				int cnt, unsigned long addr)
2943{
2944	void *tail_obj = tail ? : head;
2945	struct kmem_cache_cpu *c;
2946	unsigned long tid;
 
 
 
 
2947redo:
2948	/*
2949	 * Determine the currently cpus per cpu slab.
2950	 * The cpu may change afterward. However that does not matter since
2951	 * data is retrieved via this pointer. If we are on the same cpu
2952	 * during the cmpxchg then the free will succeed.
2953	 */
2954	do {
2955		tid = this_cpu_read(s->cpu_slab->tid);
2956		c = raw_cpu_ptr(s->cpu_slab);
2957	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2958		 unlikely(tid != READ_ONCE(c->tid)));
2959
2960	/* Same with comment on barrier() in slab_alloc_node() */
2961	barrier();
2962
2963	if (likely(page == c->page)) {
2964		set_freepointer(s, tail_obj, c->freelist);
 
 
2965
2966		if (unlikely(!this_cpu_cmpxchg_double(
2967				s->cpu_slab->freelist, s->cpu_slab->tid,
2968				c->freelist, tid,
2969				head, next_tid(tid)))) {
2970
2971			note_cmpxchg_failure("slab_free", s, tid);
2972			goto redo;
2973		}
2974		stat(s, FREE_FASTPATH);
2975	} else
2976		__slab_free(s, page, head, tail_obj, cnt, addr);
2977
2978}
2979
2980static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2981				      void *head, void *tail, int cnt,
2982				      unsigned long addr)
2983{
2984	/*
2985	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
2986	 * to remove objects, whose reuse must be delayed.
2987	 */
2988	if (slab_free_freelist_hook(s, &head, &tail))
2989		do_slab_free(s, page, head, tail, cnt, addr);
2990}
2991
2992#ifdef CONFIG_KASAN
2993void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2994{
2995	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2996}
2997#endif
2998
2999void kmem_cache_free(struct kmem_cache *s, void *x)
3000{
3001	s = cache_from_obj(s, x);
3002	if (!s)
3003		return;
3004	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3005	trace_kmem_cache_free(_RET_IP_, x);
3006}
3007EXPORT_SYMBOL(kmem_cache_free);
3008
3009struct detached_freelist {
3010	struct page *page;
3011	void *tail;
3012	void *freelist;
3013	int cnt;
3014	struct kmem_cache *s;
3015};
3016
 
 
 
 
 
 
 
 
 
 
3017/*
3018 * This function progressively scans the array with free objects (with
3019 * a limited look ahead) and extract objects belonging to the same
3020 * page.  It builds a detached freelist directly within the given
3021 * page/objects.  This can happen without any need for
3022 * synchronization, because the objects are owned by running process.
3023 * The freelist is build up as a single linked list in the objects.
3024 * The idea is, that this detached freelist can then be bulk
3025 * transferred to the real freelist(s), but only requiring a single
3026 * synchronization primitive.  Look ahead in the array is limited due
3027 * to performance reasons.
3028 */
3029static inline
3030int build_detached_freelist(struct kmem_cache *s, size_t size,
3031			    void **p, struct detached_freelist *df)
3032{
3033	size_t first_skipped_index = 0;
3034	int lookahead = 3;
3035	void *object;
3036	struct page *page;
3037
3038	/* Always re-init detached_freelist */
3039	df->page = NULL;
3040
3041	do {
3042		object = p[--size];
3043		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3044	} while (!object && size);
3045
3046	if (!object)
3047		return 0;
3048
3049	page = virt_to_head_page(object);
3050	if (!s) {
3051		/* Handle kalloc'ed objects */
3052		if (unlikely(!PageSlab(page))) {
3053			BUG_ON(!PageCompound(page));
3054			kfree_hook(object);
3055			__free_pages(page, compound_order(page));
3056			p[size] = NULL; /* mark object processed */
3057			return size;
3058		}
3059		/* Derive kmem_cache from object */
3060		df->s = page->slab_cache;
3061	} else {
3062		df->s = cache_from_obj(s, object); /* Support for memcg */
3063	}
3064
 
 
 
 
 
 
 
3065	/* Start new detached freelist */
3066	df->page = page;
3067	set_freepointer(df->s, object, NULL);
3068	df->tail = object;
3069	df->freelist = object;
3070	p[size] = NULL; /* mark object processed */
3071	df->cnt = 1;
3072
3073	while (size) {
3074		object = p[--size];
3075		if (!object)
3076			continue; /* Skip processed objects */
3077
3078		/* df->page is always set at this point */
3079		if (df->page == virt_to_head_page(object)) {
3080			/* Opportunity build freelist */
3081			set_freepointer(df->s, object, df->freelist);
3082			df->freelist = object;
3083			df->cnt++;
3084			p[size] = NULL; /* mark object processed */
3085
3086			continue;
3087		}
3088
3089		/* Limit look ahead search */
3090		if (!--lookahead)
3091			break;
3092
3093		if (!first_skipped_index)
3094			first_skipped_index = size + 1;
3095	}
3096
3097	return first_skipped_index;
3098}
3099
3100/* Note that interrupts must be enabled when calling this function. */
3101void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3102{
3103	if (WARN_ON(!size))
3104		return;
3105
 
3106	do {
3107		struct detached_freelist df;
3108
3109		size = build_detached_freelist(s, size, p, &df);
3110		if (!df.page)
3111			continue;
3112
3113		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3114	} while (likely(size));
3115}
3116EXPORT_SYMBOL(kmem_cache_free_bulk);
3117
3118/* Note that interrupts must be enabled when calling this function. */
3119int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3120			  void **p)
3121{
3122	struct kmem_cache_cpu *c;
3123	int i;
 
3124
3125	/* memcg and kmem_cache debug support */
3126	s = slab_pre_alloc_hook(s, flags);
3127	if (unlikely(!s))
3128		return false;
3129	/*
3130	 * Drain objects in the per cpu slab, while disabling local
3131	 * IRQs, which protects against PREEMPT and interrupts
3132	 * handlers invoking normal fastpath.
3133	 */
3134	local_irq_disable();
3135	c = this_cpu_ptr(s->cpu_slab);
3136
3137	for (i = 0; i < size; i++) {
3138		void *object = c->freelist;
 
 
 
 
 
3139
 
3140		if (unlikely(!object)) {
3141			/*
 
 
 
 
 
 
 
 
 
3142			 * Invoking slow path likely have side-effect
3143			 * of re-populating per CPU c->freelist
3144			 */
3145			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3146					    _RET_IP_, c);
3147			if (unlikely(!p[i]))
3148				goto error;
3149
3150			c = this_cpu_ptr(s->cpu_slab);
 
 
3151			continue; /* goto for-loop */
3152		}
3153		c->freelist = get_freepointer(s, object);
3154		p[i] = object;
 
3155	}
3156	c->tid = next_tid(c->tid);
3157	local_irq_enable();
3158
3159	/* Clear memory outside IRQ disabled fastpath loop */
3160	if (unlikely(flags & __GFP_ZERO)) {
3161		int j;
3162
3163		for (j = 0; j < i; j++)
3164			memset(p[j], 0, s->object_size);
3165	}
3166
3167	/* memcg and kmem_cache debug support */
3168	slab_post_alloc_hook(s, flags, size, p);
3169	return i;
3170error:
3171	local_irq_enable();
3172	slab_post_alloc_hook(s, flags, i, p);
3173	__kmem_cache_free_bulk(s, i, p);
3174	return 0;
3175}
3176EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3177
3178
3179/*
3180 * Object placement in a slab is made very easy because we always start at
3181 * offset 0. If we tune the size of the object to the alignment then we can
3182 * get the required alignment by putting one properly sized object after
3183 * another.
3184 *
3185 * Notice that the allocation order determines the sizes of the per cpu
3186 * caches. Each processor has always one slab available for allocations.
3187 * Increasing the allocation order reduces the number of times that slabs
3188 * must be moved on and off the partial lists and is therefore a factor in
3189 * locking overhead.
3190 */
3191
3192/*
3193 * Mininum / Maximum order of slab pages. This influences locking overhead
3194 * and slab fragmentation. A higher order reduces the number of partial slabs
3195 * and increases the number of allocations possible without having to
3196 * take the list_lock.
3197 */
3198static unsigned int slub_min_order;
3199static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3200static unsigned int slub_min_objects;
3201
3202/*
3203 * Calculate the order of allocation given an slab object size.
3204 *
3205 * The order of allocation has significant impact on performance and other
3206 * system components. Generally order 0 allocations should be preferred since
3207 * order 0 does not cause fragmentation in the page allocator. Larger objects
3208 * be problematic to put into order 0 slabs because there may be too much
3209 * unused space left. We go to a higher order if more than 1/16th of the slab
3210 * would be wasted.
3211 *
3212 * In order to reach satisfactory performance we must ensure that a minimum
3213 * number of objects is in one slab. Otherwise we may generate too much
3214 * activity on the partial lists which requires taking the list_lock. This is
3215 * less a concern for large slabs though which are rarely used.
3216 *
3217 * slub_max_order specifies the order where we begin to stop considering the
3218 * number of objects in a slab as critical. If we reach slub_max_order then
3219 * we try to keep the page order as low as possible. So we accept more waste
3220 * of space in favor of a small page order.
3221 *
3222 * Higher order allocations also allow the placement of more objects in a
3223 * slab and thereby reduce object handling overhead. If the user has
3224 * requested a higher mininum order then we start with that one instead of
3225 * the smallest order which will fit the object.
3226 */
3227static inline unsigned int slab_order(unsigned int size,
3228		unsigned int min_objects, unsigned int max_order,
3229		unsigned int fract_leftover, unsigned int reserved)
3230{
3231	unsigned int min_order = slub_min_order;
3232	unsigned int order;
3233
3234	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
3235		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3236
3237	for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved));
3238			order <= max_order; order++) {
3239
3240		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3241		unsigned int rem;
3242
3243		rem = (slab_size - reserved) % size;
3244
3245		if (rem <= slab_size / fract_leftover)
3246			break;
3247	}
3248
3249	return order;
3250}
3251
3252static inline int calculate_order(unsigned int size, unsigned int reserved)
3253{
3254	unsigned int order;
3255	unsigned int min_objects;
3256	unsigned int max_objects;
 
3257
3258	/*
3259	 * Attempt to find best configuration for a slab. This
3260	 * works by first attempting to generate a layout with
3261	 * the best configuration and backing off gradually.
3262	 *
3263	 * First we increase the acceptable waste in a slab. Then
3264	 * we reduce the minimum objects required in a slab.
3265	 */
3266	min_objects = slub_min_objects;
3267	if (!min_objects)
3268		min_objects = 4 * (fls(nr_cpu_ids) + 1);
3269	max_objects = order_objects(slub_max_order, size, reserved);
 
 
 
 
 
 
 
 
 
 
 
 
 
3270	min_objects = min(min_objects, max_objects);
3271
3272	while (min_objects > 1) {
3273		unsigned int fraction;
3274
3275		fraction = 16;
3276		while (fraction >= 4) {
3277			order = slab_order(size, min_objects,
3278					slub_max_order, fraction, reserved);
3279			if (order <= slub_max_order)
3280				return order;
3281			fraction /= 2;
3282		}
3283		min_objects--;
3284	}
3285
3286	/*
3287	 * We were unable to place multiple objects in a slab. Now
3288	 * lets see if we can place a single object there.
3289	 */
3290	order = slab_order(size, 1, slub_max_order, 1, reserved);
3291	if (order <= slub_max_order)
3292		return order;
3293
3294	/*
3295	 * Doh this slab cannot be placed using slub_max_order.
3296	 */
3297	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
3298	if (order < MAX_ORDER)
3299		return order;
3300	return -ENOSYS;
3301}
3302
3303static void
3304init_kmem_cache_node(struct kmem_cache_node *n)
3305{
3306	n->nr_partial = 0;
3307	spin_lock_init(&n->list_lock);
3308	INIT_LIST_HEAD(&n->partial);
3309#ifdef CONFIG_SLUB_DEBUG
3310	atomic_long_set(&n->nr_slabs, 0);
3311	atomic_long_set(&n->total_objects, 0);
3312	INIT_LIST_HEAD(&n->full);
3313#endif
3314}
3315
3316static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3317{
3318	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3319			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3320
3321	/*
3322	 * Must align to double word boundary for the double cmpxchg
3323	 * instructions to work; see __pcpu_double_call_return_bool().
3324	 */
3325	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3326				     2 * sizeof(void *));
3327
3328	if (!s->cpu_slab)
3329		return 0;
3330
3331	init_kmem_cache_cpus(s);
3332
3333	return 1;
3334}
3335
3336static struct kmem_cache *kmem_cache_node;
3337
3338/*
3339 * No kmalloc_node yet so do it by hand. We know that this is the first
3340 * slab on the node for this slabcache. There are no concurrent accesses
3341 * possible.
3342 *
3343 * Note that this function only works on the kmem_cache_node
3344 * when allocating for the kmem_cache_node. This is used for bootstrapping
3345 * memory on a fresh node that has no slab structures yet.
3346 */
3347static void early_kmem_cache_node_alloc(int node)
3348{
3349	struct page *page;
3350	struct kmem_cache_node *n;
3351
3352	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3353
3354	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3355
3356	BUG_ON(!page);
3357	if (page_to_nid(page) != node) {
3358		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3359		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3360	}
3361
3362	n = page->freelist;
3363	BUG_ON(!n);
3364	page->freelist = get_freepointer(kmem_cache_node, n);
3365	page->inuse = 1;
3366	page->frozen = 0;
3367	kmem_cache_node->node[node] = n;
3368#ifdef CONFIG_SLUB_DEBUG
3369	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3370	init_tracking(kmem_cache_node, n);
3371#endif
3372	kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3373		      GFP_KERNEL);
 
 
 
3374	init_kmem_cache_node(n);
3375	inc_slabs_node(kmem_cache_node, node, page->objects);
3376
3377	/*
3378	 * No locks need to be taken here as it has just been
3379	 * initialized and there is no concurrent access.
3380	 */
3381	__add_partial(n, page, DEACTIVATE_TO_HEAD);
3382}
3383
3384static void free_kmem_cache_nodes(struct kmem_cache *s)
3385{
3386	int node;
3387	struct kmem_cache_node *n;
3388
3389	for_each_kmem_cache_node(s, node, n) {
3390		s->node[node] = NULL;
3391		kmem_cache_free(kmem_cache_node, n);
3392	}
3393}
3394
3395void __kmem_cache_release(struct kmem_cache *s)
3396{
3397	cache_random_seq_destroy(s);
3398	free_percpu(s->cpu_slab);
3399	free_kmem_cache_nodes(s);
3400}
3401
3402static int init_kmem_cache_nodes(struct kmem_cache *s)
3403{
3404	int node;
3405
3406	for_each_node_state(node, N_NORMAL_MEMORY) {
3407		struct kmem_cache_node *n;
3408
3409		if (slab_state == DOWN) {
3410			early_kmem_cache_node_alloc(node);
3411			continue;
3412		}
3413		n = kmem_cache_alloc_node(kmem_cache_node,
3414						GFP_KERNEL, node);
3415
3416		if (!n) {
3417			free_kmem_cache_nodes(s);
3418			return 0;
3419		}
3420
3421		init_kmem_cache_node(n);
3422		s->node[node] = n;
3423	}
3424	return 1;
3425}
3426
3427static void set_min_partial(struct kmem_cache *s, unsigned long min)
3428{
3429	if (min < MIN_PARTIAL)
3430		min = MIN_PARTIAL;
3431	else if (min > MAX_PARTIAL)
3432		min = MAX_PARTIAL;
3433	s->min_partial = min;
3434}
3435
3436static void set_cpu_partial(struct kmem_cache *s)
3437{
3438#ifdef CONFIG_SLUB_CPU_PARTIAL
3439	/*
3440	 * cpu_partial determined the maximum number of objects kept in the
3441	 * per cpu partial lists of a processor.
3442	 *
3443	 * Per cpu partial lists mainly contain slabs that just have one
3444	 * object freed. If they are used for allocation then they can be
3445	 * filled up again with minimal effort. The slab will never hit the
3446	 * per node partial lists and therefore no locking will be required.
3447	 *
3448	 * This setting also determines
3449	 *
3450	 * A) The number of objects from per cpu partial slabs dumped to the
3451	 *    per node list when we reach the limit.
3452	 * B) The number of objects in cpu partial slabs to extract from the
3453	 *    per node list when we run out of per cpu objects. We only fetch
3454	 *    50% to keep some capacity around for frees.
3455	 */
3456	if (!kmem_cache_has_cpu_partial(s))
3457		s->cpu_partial = 0;
3458	else if (s->size >= PAGE_SIZE)
3459		s->cpu_partial = 2;
3460	else if (s->size >= 1024)
3461		s->cpu_partial = 6;
3462	else if (s->size >= 256)
3463		s->cpu_partial = 13;
3464	else
3465		s->cpu_partial = 30;
3466#endif
3467}
3468
3469/*
3470 * calculate_sizes() determines the order and the distribution of data within
3471 * a slab object.
3472 */
3473static int calculate_sizes(struct kmem_cache *s, int forced_order)
3474{
3475	slab_flags_t flags = s->flags;
3476	unsigned int size = s->object_size;
3477	unsigned int order;
3478
3479	/*
3480	 * Round up object size to the next word boundary. We can only
3481	 * place the free pointer at word boundaries and this determines
3482	 * the possible location of the free pointer.
3483	 */
3484	size = ALIGN(size, sizeof(void *));
3485
3486#ifdef CONFIG_SLUB_DEBUG
3487	/*
3488	 * Determine if we can poison the object itself. If the user of
3489	 * the slab may touch the object after free or before allocation
3490	 * then we should never poison the object itself.
3491	 */
3492	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3493			!s->ctor)
3494		s->flags |= __OBJECT_POISON;
3495	else
3496		s->flags &= ~__OBJECT_POISON;
3497
3498
3499	/*
3500	 * If we are Redzoning then check if there is some space between the
3501	 * end of the object and the free pointer. If not then add an
3502	 * additional word to have some bytes to store Redzone information.
3503	 */
3504	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3505		size += sizeof(void *);
3506#endif
3507
3508	/*
3509	 * With that we have determined the number of bytes in actual use
3510	 * by the object. This is the potential offset to the free pointer.
3511	 */
3512	s->inuse = size;
3513
3514	if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3515		s->ctor)) {
 
3516		/*
3517		 * Relocate free pointer after the object if it is not
3518		 * permitted to overwrite the first word of the object on
3519		 * kmem_cache_free.
3520		 *
3521		 * This is the case if we do RCU, have a constructor or
3522		 * destructor or are poisoning the objects.
 
 
 
 
 
 
3523		 */
3524		s->offset = size;
3525		size += sizeof(void *);
 
 
 
 
 
 
 
3526	}
3527
3528#ifdef CONFIG_SLUB_DEBUG
3529	if (flags & SLAB_STORE_USER)
3530		/*
3531		 * Need to store information about allocs and frees after
3532		 * the object.
3533		 */
3534		size += 2 * sizeof(struct track);
3535#endif
3536
3537	kasan_cache_create(s, &size, &s->flags);
3538#ifdef CONFIG_SLUB_DEBUG
3539	if (flags & SLAB_RED_ZONE) {
3540		/*
3541		 * Add some empty padding so that we can catch
3542		 * overwrites from earlier objects rather than let
3543		 * tracking information or the free pointer be
3544		 * corrupted if a user writes before the start
3545		 * of the object.
3546		 */
3547		size += sizeof(void *);
3548
3549		s->red_left_pad = sizeof(void *);
3550		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3551		size += s->red_left_pad;
3552	}
3553#endif
3554
3555	/*
3556	 * SLUB stores one object immediately after another beginning from
3557	 * offset 0. In order to align the objects we have to simply size
3558	 * each object to conform to the alignment.
3559	 */
3560	size = ALIGN(size, s->align);
3561	s->size = size;
 
3562	if (forced_order >= 0)
3563		order = forced_order;
3564	else
3565		order = calculate_order(size, s->reserved);
3566
3567	if ((int)order < 0)
3568		return 0;
3569
3570	s->allocflags = 0;
3571	if (order)
3572		s->allocflags |= __GFP_COMP;
3573
3574	if (s->flags & SLAB_CACHE_DMA)
3575		s->allocflags |= GFP_DMA;
3576
 
 
 
3577	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3578		s->allocflags |= __GFP_RECLAIMABLE;
3579
3580	/*
3581	 * Determine the number of objects per slab
3582	 */
3583	s->oo = oo_make(order, size, s->reserved);
3584	s->min = oo_make(get_order(size), size, s->reserved);
3585	if (oo_objects(s->oo) > oo_objects(s->max))
3586		s->max = s->oo;
3587
3588	return !!oo_objects(s->oo);
3589}
3590
3591static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3592{
3593	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3594	s->reserved = 0;
3595#ifdef CONFIG_SLAB_FREELIST_HARDENED
3596	s->random = get_random_long();
3597#endif
3598
3599	if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
3600		s->reserved = sizeof(struct rcu_head);
3601
3602	if (!calculate_sizes(s, -1))
3603		goto error;
3604	if (disable_higher_order_debug) {
3605		/*
3606		 * Disable debugging flags that store metadata if the min slab
3607		 * order increased.
3608		 */
3609		if (get_order(s->size) > get_order(s->object_size)) {
3610			s->flags &= ~DEBUG_METADATA_FLAGS;
3611			s->offset = 0;
3612			if (!calculate_sizes(s, -1))
3613				goto error;
3614		}
3615	}
3616
3617#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3618    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3619	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3620		/* Enable fast mode */
3621		s->flags |= __CMPXCHG_DOUBLE;
3622#endif
3623
3624	/*
3625	 * The larger the object size is, the more pages we want on the partial
3626	 * list to avoid pounding the page allocator excessively.
3627	 */
3628	set_min_partial(s, ilog2(s->size) / 2);
3629
3630	set_cpu_partial(s);
3631
3632#ifdef CONFIG_NUMA
3633	s->remote_node_defrag_ratio = 1000;
3634#endif
3635
3636	/* Initialize the pre-computed randomized freelist if slab is up */
3637	if (slab_state >= UP) {
3638		if (init_cache_random_seq(s))
3639			goto error;
3640	}
3641
3642	if (!init_kmem_cache_nodes(s))
3643		goto error;
3644
3645	if (alloc_kmem_cache_cpus(s))
3646		return 0;
3647
3648	free_kmem_cache_nodes(s);
3649error:
3650	if (flags & SLAB_PANIC)
3651		panic("Cannot create slab %s size=%u realsize=%u order=%u offset=%u flags=%lx\n",
3652		      s->name, s->size, s->size,
3653		      oo_order(s->oo), s->offset, (unsigned long)flags);
3654	return -EINVAL;
3655}
3656
3657static void list_slab_objects(struct kmem_cache *s, struct page *page,
3658							const char *text)
3659{
3660#ifdef CONFIG_SLUB_DEBUG
3661	void *addr = page_address(page);
 
3662	void *p;
3663	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3664				     sizeof(long), GFP_ATOMIC);
3665	if (!map)
3666		return;
3667	slab_err(s, page, text, s->name);
3668	slab_lock(page);
3669
3670	get_map(s, page, map);
3671	for_each_object(p, s, addr, page->objects) {
3672
3673		if (!test_bit(slab_index(p, s, addr), map)) {
3674			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3675			print_tracking(s, p);
3676		}
3677	}
 
3678	slab_unlock(page);
3679	kfree(map);
3680#endif
3681}
3682
3683/*
3684 * Attempt to free all partial slabs on a node.
3685 * This is called from __kmem_cache_shutdown(). We must take list_lock
3686 * because sysfs file might still access partial list after the shutdowning.
3687 */
3688static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3689{
3690	LIST_HEAD(discard);
3691	struct page *page, *h;
3692
3693	BUG_ON(irqs_disabled());
3694	spin_lock_irq(&n->list_lock);
3695	list_for_each_entry_safe(page, h, &n->partial, lru) {
3696		if (!page->inuse) {
3697			remove_partial(n, page);
3698			list_add(&page->lru, &discard);
3699		} else {
3700			list_slab_objects(s, page,
3701			"Objects remaining in %s on __kmem_cache_shutdown()");
3702		}
3703	}
3704	spin_unlock_irq(&n->list_lock);
3705
3706	list_for_each_entry_safe(page, h, &discard, lru)
3707		discard_slab(s, page);
3708}
3709
3710bool __kmem_cache_empty(struct kmem_cache *s)
3711{
3712	int node;
3713	struct kmem_cache_node *n;
3714
3715	for_each_kmem_cache_node(s, node, n)
3716		if (n->nr_partial || slabs_node(s, node))
3717			return false;
3718	return true;
3719}
3720
3721/*
3722 * Release all resources used by a slab cache.
3723 */
3724int __kmem_cache_shutdown(struct kmem_cache *s)
3725{
3726	int node;
3727	struct kmem_cache_node *n;
3728
3729	flush_all(s);
3730	/* Attempt to free all objects */
3731	for_each_kmem_cache_node(s, node, n) {
3732		free_partial(s, n);
3733		if (n->nr_partial || slabs_node(s, node))
3734			return 1;
3735	}
3736	sysfs_slab_remove(s);
3737	return 0;
3738}
3739
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3740/********************************************************************
3741 *		Kmalloc subsystem
3742 *******************************************************************/
3743
3744static int __init setup_slub_min_order(char *str)
3745{
3746	get_option(&str, (int *)&slub_min_order);
3747
3748	return 1;
3749}
3750
3751__setup("slub_min_order=", setup_slub_min_order);
3752
3753static int __init setup_slub_max_order(char *str)
3754{
3755	get_option(&str, (int *)&slub_max_order);
3756	slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
3757
3758	return 1;
3759}
3760
3761__setup("slub_max_order=", setup_slub_max_order);
3762
3763static int __init setup_slub_min_objects(char *str)
3764{
3765	get_option(&str, (int *)&slub_min_objects);
3766
3767	return 1;
3768}
3769
3770__setup("slub_min_objects=", setup_slub_min_objects);
3771
3772void *__kmalloc(size_t size, gfp_t flags)
3773{
3774	struct kmem_cache *s;
3775	void *ret;
3776
3777	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3778		return kmalloc_large(size, flags);
3779
3780	s = kmalloc_slab(size, flags);
3781
3782	if (unlikely(ZERO_OR_NULL_PTR(s)))
3783		return s;
3784
3785	ret = slab_alloc(s, flags, _RET_IP_);
3786
3787	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3788
3789	kasan_kmalloc(s, ret, size, flags);
3790
3791	return ret;
3792}
3793EXPORT_SYMBOL(__kmalloc);
3794
3795#ifdef CONFIG_NUMA
3796static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3797{
3798	struct page *page;
3799	void *ptr = NULL;
 
3800
3801	flags |= __GFP_COMP;
3802	page = alloc_pages_node(node, flags, get_order(size));
3803	if (page)
3804		ptr = page_address(page);
 
 
 
3805
3806	kmalloc_large_node_hook(ptr, size, flags);
3807	return ptr;
3808}
3809
3810void *__kmalloc_node(size_t size, gfp_t flags, int node)
3811{
3812	struct kmem_cache *s;
3813	void *ret;
3814
3815	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3816		ret = kmalloc_large_node(size, flags, node);
3817
3818		trace_kmalloc_node(_RET_IP_, ret,
3819				   size, PAGE_SIZE << get_order(size),
3820				   flags, node);
3821
3822		return ret;
3823	}
3824
3825	s = kmalloc_slab(size, flags);
3826
3827	if (unlikely(ZERO_OR_NULL_PTR(s)))
3828		return s;
3829
3830	ret = slab_alloc_node(s, flags, node, _RET_IP_);
3831
3832	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3833
3834	kasan_kmalloc(s, ret, size, flags);
3835
3836	return ret;
3837}
3838EXPORT_SYMBOL(__kmalloc_node);
3839#endif
3840
3841#ifdef CONFIG_HARDENED_USERCOPY
3842/*
3843 * Rejects incorrectly sized objects and objects that are to be copied
3844 * to/from userspace but do not fall entirely within the containing slab
3845 * cache's usercopy region.
3846 *
3847 * Returns NULL if check passes, otherwise const char * to name of cache
3848 * to indicate an error.
3849 */
3850void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3851			 bool to_user)
3852{
3853	struct kmem_cache *s;
3854	unsigned int offset;
3855	size_t object_size;
 
 
 
3856
3857	/* Find object and usable object size. */
3858	s = page->slab_cache;
3859
3860	/* Reject impossible pointers. */
3861	if (ptr < page_address(page))
3862		usercopy_abort("SLUB object not in SLUB page?!", NULL,
3863			       to_user, 0, n);
3864
3865	/* Find offset within object. */
3866	offset = (ptr - page_address(page)) % s->size;
 
 
 
3867
3868	/* Adjust for redzone and reject if within the redzone. */
3869	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3870		if (offset < s->red_left_pad)
3871			usercopy_abort("SLUB object in left red zone",
3872				       s->name, to_user, offset, n);
3873		offset -= s->red_left_pad;
3874	}
3875
3876	/* Allow address range falling entirely within usercopy region. */
3877	if (offset >= s->useroffset &&
3878	    offset - s->useroffset <= s->usersize &&
3879	    n <= s->useroffset - offset + s->usersize)
3880		return;
3881
3882	/*
3883	 * If the copy is still within the allocated object, produce
3884	 * a warning instead of rejecting the copy. This is intended
3885	 * to be a temporary method to find any missing usercopy
3886	 * whitelists.
3887	 */
3888	object_size = slab_ksize(s);
3889	if (usercopy_fallback &&
3890	    offset <= object_size && n <= object_size - offset) {
3891		usercopy_warn("SLUB object", s->name, to_user, offset, n);
3892		return;
3893	}
3894
3895	usercopy_abort("SLUB object", s->name, to_user, offset, n);
3896}
3897#endif /* CONFIG_HARDENED_USERCOPY */
3898
3899static size_t __ksize(const void *object)
3900{
3901	struct page *page;
3902
3903	if (unlikely(object == ZERO_SIZE_PTR))
3904		return 0;
3905
3906	page = virt_to_head_page(object);
3907
3908	if (unlikely(!PageSlab(page))) {
3909		WARN_ON(!PageCompound(page));
3910		return PAGE_SIZE << compound_order(page);
3911	}
3912
3913	return slab_ksize(page->slab_cache);
3914}
3915
3916size_t ksize(const void *object)
3917{
3918	size_t size = __ksize(object);
3919	/* We assume that ksize callers could use whole allocated area,
3920	 * so we need to unpoison this area.
3921	 */
3922	kasan_unpoison_shadow(object, size);
3923	return size;
3924}
3925EXPORT_SYMBOL(ksize);
3926
3927void kfree(const void *x)
3928{
3929	struct page *page;
3930	void *object = (void *)x;
3931
3932	trace_kfree(_RET_IP_, x);
3933
3934	if (unlikely(ZERO_OR_NULL_PTR(x)))
3935		return;
3936
3937	page = virt_to_head_page(x);
3938	if (unlikely(!PageSlab(page))) {
3939		BUG_ON(!PageCompound(page));
3940		kfree_hook(object);
3941		__free_pages(page, compound_order(page));
3942		return;
3943	}
3944	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3945}
3946EXPORT_SYMBOL(kfree);
3947
3948#define SHRINK_PROMOTE_MAX 32
3949
3950/*
3951 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3952 * up most to the head of the partial lists. New allocations will then
3953 * fill those up and thus they can be removed from the partial lists.
3954 *
3955 * The slabs with the least items are placed last. This results in them
3956 * being allocated from last increasing the chance that the last objects
3957 * are freed in them.
3958 */
3959int __kmem_cache_shrink(struct kmem_cache *s)
3960{
3961	int node;
3962	int i;
3963	struct kmem_cache_node *n;
3964	struct page *page;
3965	struct page *t;
3966	struct list_head discard;
3967	struct list_head promote[SHRINK_PROMOTE_MAX];
3968	unsigned long flags;
3969	int ret = 0;
3970
3971	flush_all(s);
3972	for_each_kmem_cache_node(s, node, n) {
3973		INIT_LIST_HEAD(&discard);
3974		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3975			INIT_LIST_HEAD(promote + i);
3976
3977		spin_lock_irqsave(&n->list_lock, flags);
3978
3979		/*
3980		 * Build lists of slabs to discard or promote.
3981		 *
3982		 * Note that concurrent frees may occur while we hold the
3983		 * list_lock. page->inuse here is the upper limit.
3984		 */
3985		list_for_each_entry_safe(page, t, &n->partial, lru) {
3986			int free = page->objects - page->inuse;
3987
3988			/* Do not reread page->inuse */
3989			barrier();
3990
3991			/* We do not keep full slabs on the list */
3992			BUG_ON(free <= 0);
3993
3994			if (free == page->objects) {
3995				list_move(&page->lru, &discard);
3996				n->nr_partial--;
3997			} else if (free <= SHRINK_PROMOTE_MAX)
3998				list_move(&page->lru, promote + free - 1);
3999		}
4000
4001		/*
4002		 * Promote the slabs filled up most to the head of the
4003		 * partial list.
4004		 */
4005		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4006			list_splice(promote + i, &n->partial);
4007
4008		spin_unlock_irqrestore(&n->list_lock, flags);
4009
4010		/* Release empty slabs */
4011		list_for_each_entry_safe(page, t, &discard, lru)
4012			discard_slab(s, page);
4013
4014		if (slabs_node(s, node))
4015			ret = 1;
4016	}
4017
4018	return ret;
4019}
4020
4021#ifdef CONFIG_MEMCG
4022static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
4023{
4024	/*
4025	 * Called with all the locks held after a sched RCU grace period.
4026	 * Even if @s becomes empty after shrinking, we can't know that @s
4027	 * doesn't have allocations already in-flight and thus can't
4028	 * destroy @s until the associated memcg is released.
4029	 *
4030	 * However, let's remove the sysfs files for empty caches here.
4031	 * Each cache has a lot of interface files which aren't
4032	 * particularly useful for empty draining caches; otherwise, we can
4033	 * easily end up with millions of unnecessary sysfs files on
4034	 * systems which have a lot of memory and transient cgroups.
4035	 */
4036	if (!__kmem_cache_shrink(s))
4037		sysfs_slab_remove(s);
4038}
4039
4040void __kmemcg_cache_deactivate(struct kmem_cache *s)
4041{
4042	/*
4043	 * Disable empty slabs caching. Used to avoid pinning offline
4044	 * memory cgroups by kmem pages that can be freed.
4045	 */
4046	slub_set_cpu_partial(s, 0);
4047	s->min_partial = 0;
4048
4049	/*
4050	 * s->cpu_partial is checked locklessly (see put_cpu_partial), so
4051	 * we have to make sure the change is visible before shrinking.
4052	 */
4053	slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
4054}
4055#endif
4056
4057static int slab_mem_going_offline_callback(void *arg)
4058{
4059	struct kmem_cache *s;
4060
4061	mutex_lock(&slab_mutex);
4062	list_for_each_entry(s, &slab_caches, list)
4063		__kmem_cache_shrink(s);
4064	mutex_unlock(&slab_mutex);
4065
4066	return 0;
4067}
4068
4069static void slab_mem_offline_callback(void *arg)
4070{
4071	struct kmem_cache_node *n;
4072	struct kmem_cache *s;
4073	struct memory_notify *marg = arg;
4074	int offline_node;
4075
4076	offline_node = marg->status_change_nid_normal;
4077
4078	/*
4079	 * If the node still has available memory. we need kmem_cache_node
4080	 * for it yet.
4081	 */
4082	if (offline_node < 0)
4083		return;
4084
4085	mutex_lock(&slab_mutex);
4086	list_for_each_entry(s, &slab_caches, list) {
4087		n = get_node(s, offline_node);
4088		if (n) {
4089			/*
4090			 * if n->nr_slabs > 0, slabs still exist on the node
4091			 * that is going down. We were unable to free them,
4092			 * and offline_pages() function shouldn't call this
4093			 * callback. So, we must fail.
4094			 */
4095			BUG_ON(slabs_node(s, offline_node));
4096
4097			s->node[offline_node] = NULL;
4098			kmem_cache_free(kmem_cache_node, n);
4099		}
4100	}
4101	mutex_unlock(&slab_mutex);
4102}
4103
4104static int slab_mem_going_online_callback(void *arg)
4105{
4106	struct kmem_cache_node *n;
4107	struct kmem_cache *s;
4108	struct memory_notify *marg = arg;
4109	int nid = marg->status_change_nid_normal;
4110	int ret = 0;
4111
4112	/*
4113	 * If the node's memory is already available, then kmem_cache_node is
4114	 * already created. Nothing to do.
4115	 */
4116	if (nid < 0)
4117		return 0;
4118
4119	/*
4120	 * We are bringing a node online. No memory is available yet. We must
4121	 * allocate a kmem_cache_node structure in order to bring the node
4122	 * online.
4123	 */
4124	mutex_lock(&slab_mutex);
4125	list_for_each_entry(s, &slab_caches, list) {
4126		/*
 
 
 
 
 
 
4127		 * XXX: kmem_cache_alloc_node will fallback to other nodes
4128		 *      since memory is not yet available from the node that
4129		 *      is brought up.
4130		 */
4131		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4132		if (!n) {
4133			ret = -ENOMEM;
4134			goto out;
4135		}
4136		init_kmem_cache_node(n);
4137		s->node[nid] = n;
4138	}
 
 
 
 
 
4139out:
4140	mutex_unlock(&slab_mutex);
4141	return ret;
4142}
4143
4144static int slab_memory_callback(struct notifier_block *self,
4145				unsigned long action, void *arg)
4146{
4147	int ret = 0;
4148
4149	switch (action) {
4150	case MEM_GOING_ONLINE:
4151		ret = slab_mem_going_online_callback(arg);
4152		break;
4153	case MEM_GOING_OFFLINE:
4154		ret = slab_mem_going_offline_callback(arg);
4155		break;
4156	case MEM_OFFLINE:
4157	case MEM_CANCEL_ONLINE:
4158		slab_mem_offline_callback(arg);
4159		break;
4160	case MEM_ONLINE:
4161	case MEM_CANCEL_OFFLINE:
4162		break;
4163	}
4164	if (ret)
4165		ret = notifier_from_errno(ret);
4166	else
4167		ret = NOTIFY_OK;
4168	return ret;
4169}
4170
4171static struct notifier_block slab_memory_callback_nb = {
4172	.notifier_call = slab_memory_callback,
4173	.priority = SLAB_CALLBACK_PRI,
4174};
4175
4176/********************************************************************
4177 *			Basic setup of slabs
4178 *******************************************************************/
4179
4180/*
4181 * Used for early kmem_cache structures that were allocated using
4182 * the page allocator. Allocate them properly then fix up the pointers
4183 * that may be pointing to the wrong kmem_cache structure.
4184 */
4185
4186static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4187{
4188	int node;
4189	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4190	struct kmem_cache_node *n;
4191
4192	memcpy(s, static_cache, kmem_cache->object_size);
4193
4194	/*
4195	 * This runs very early, and only the boot processor is supposed to be
4196	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4197	 * IPIs around.
4198	 */
4199	__flush_cpu_slab(s, smp_processor_id());
4200	for_each_kmem_cache_node(s, node, n) {
4201		struct page *p;
4202
4203		list_for_each_entry(p, &n->partial, lru)
4204			p->slab_cache = s;
4205
4206#ifdef CONFIG_SLUB_DEBUG
4207		list_for_each_entry(p, &n->full, lru)
4208			p->slab_cache = s;
4209#endif
4210	}
4211	slab_init_memcg_params(s);
4212	list_add(&s->list, &slab_caches);
4213	memcg_link_cache(s);
4214	return s;
4215}
4216
4217void __init kmem_cache_init(void)
4218{
4219	static __initdata struct kmem_cache boot_kmem_cache,
4220		boot_kmem_cache_node;
 
4221
4222	if (debug_guardpage_minorder())
4223		slub_max_order = 0;
4224
 
 
 
 
4225	kmem_cache_node = &boot_kmem_cache_node;
4226	kmem_cache = &boot_kmem_cache;
4227
 
 
 
 
 
 
 
4228	create_boot_cache(kmem_cache_node, "kmem_cache_node",
4229		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4230
4231	register_hotmemory_notifier(&slab_memory_callback_nb);
4232
4233	/* Able to allocate the per node structures */
4234	slab_state = PARTIAL;
4235
4236	create_boot_cache(kmem_cache, "kmem_cache",
4237			offsetof(struct kmem_cache, node) +
4238				nr_node_ids * sizeof(struct kmem_cache_node *),
4239		       SLAB_HWCACHE_ALIGN, 0, 0);
4240
4241	kmem_cache = bootstrap(&boot_kmem_cache);
4242
4243	/*
4244	 * Allocate kmem_cache_node properly from the kmem_cache slab.
4245	 * kmem_cache_node is separately allocated so no need to
4246	 * update any list pointers.
4247	 */
4248	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4249
4250	/* Now we can use the kmem_cache to allocate kmalloc slabs */
4251	setup_kmalloc_cache_index_table();
4252	create_kmalloc_caches(0);
4253
4254	/* Setup random freelists for each cache */
4255	init_freelist_randomization();
4256
4257	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4258				  slub_cpu_dead);
4259
4260	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
4261		cache_line_size(),
4262		slub_min_order, slub_max_order, slub_min_objects,
4263		nr_cpu_ids, nr_node_ids);
4264}
4265
4266void __init kmem_cache_init_late(void)
4267{
4268}
4269
4270struct kmem_cache *
4271__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4272		   slab_flags_t flags, void (*ctor)(void *))
4273{
4274	struct kmem_cache *s, *c;
4275
4276	s = find_mergeable(size, align, flags, name, ctor);
4277	if (s) {
4278		s->refcount++;
4279
4280		/*
4281		 * Adjust the object sizes so that we clear
4282		 * the complete object on kzalloc.
4283		 */
4284		s->object_size = max(s->object_size, size);
4285		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4286
4287		for_each_memcg_cache(c, s) {
4288			c->object_size = s->object_size;
4289			c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
4290		}
4291
4292		if (sysfs_slab_alias(s, name)) {
4293			s->refcount--;
4294			s = NULL;
4295		}
4296	}
4297
4298	return s;
4299}
4300
4301int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4302{
4303	int err;
4304
4305	err = kmem_cache_open(s, flags);
4306	if (err)
4307		return err;
4308
4309	/* Mutex is not taken during early boot */
4310	if (slab_state <= UP)
4311		return 0;
4312
4313	memcg_propagate_slab_attrs(s);
4314	err = sysfs_slab_add(s);
4315	if (err)
4316		__kmem_cache_release(s);
 
 
4317
4318	return err;
 
 
 
4319}
4320
4321void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4322{
4323	struct kmem_cache *s;
4324	void *ret;
4325
4326	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4327		return kmalloc_large(size, gfpflags);
4328
4329	s = kmalloc_slab(size, gfpflags);
4330
4331	if (unlikely(ZERO_OR_NULL_PTR(s)))
4332		return s;
4333
4334	ret = slab_alloc(s, gfpflags, caller);
4335
4336	/* Honor the call site pointer we received. */
4337	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4338
4339	return ret;
4340}
 
4341
4342#ifdef CONFIG_NUMA
4343void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4344					int node, unsigned long caller)
4345{
4346	struct kmem_cache *s;
4347	void *ret;
4348
4349	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4350		ret = kmalloc_large_node(size, gfpflags, node);
4351
4352		trace_kmalloc_node(caller, ret,
4353				   size, PAGE_SIZE << get_order(size),
4354				   gfpflags, node);
4355
4356		return ret;
4357	}
4358
4359	s = kmalloc_slab(size, gfpflags);
4360
4361	if (unlikely(ZERO_OR_NULL_PTR(s)))
4362		return s;
4363
4364	ret = slab_alloc_node(s, gfpflags, node, caller);
4365
4366	/* Honor the call site pointer we received. */
4367	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4368
4369	return ret;
4370}
 
4371#endif
4372
4373#ifdef CONFIG_SYSFS
4374static int count_inuse(struct page *page)
4375{
4376	return page->inuse;
4377}
4378
4379static int count_total(struct page *page)
4380{
4381	return page->objects;
4382}
4383#endif
4384
4385#ifdef CONFIG_SLUB_DEBUG
4386static int validate_slab(struct kmem_cache *s, struct page *page,
4387						unsigned long *map)
4388{
4389	void *p;
4390	void *addr = page_address(page);
 
4391
4392	if (!check_slab(s, page) ||
4393			!on_freelist(s, page, NULL))
4394		return 0;
4395
4396	/* Now we know that a valid freelist exists */
4397	bitmap_zero(map, page->objects);
4398
4399	get_map(s, page, map);
 
4400	for_each_object(p, s, addr, page->objects) {
4401		if (test_bit(slab_index(p, s, addr), map))
4402			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4403				return 0;
4404	}
4405
4406	for_each_object(p, s, addr, page->objects)
4407		if (!test_bit(slab_index(p, s, addr), map))
4408			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4409				return 0;
4410	return 1;
4411}
4412
4413static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4414						unsigned long *map)
4415{
4416	slab_lock(page);
4417	validate_slab(s, page, map);
4418	slab_unlock(page);
4419}
4420
4421static int validate_slab_node(struct kmem_cache *s,
4422		struct kmem_cache_node *n, unsigned long *map)
4423{
4424	unsigned long count = 0;
4425	struct page *page;
4426	unsigned long flags;
4427
4428	spin_lock_irqsave(&n->list_lock, flags);
4429
4430	list_for_each_entry(page, &n->partial, lru) {
4431		validate_slab_slab(s, page, map);
4432		count++;
4433	}
4434	if (count != n->nr_partial)
4435		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4436		       s->name, count, n->nr_partial);
 
 
4437
4438	if (!(s->flags & SLAB_STORE_USER))
4439		goto out;
4440
4441	list_for_each_entry(page, &n->full, lru) {
4442		validate_slab_slab(s, page, map);
4443		count++;
4444	}
4445	if (count != atomic_long_read(&n->nr_slabs))
4446		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4447		       s->name, count, atomic_long_read(&n->nr_slabs));
 
 
4448
4449out:
4450	spin_unlock_irqrestore(&n->list_lock, flags);
4451	return count;
4452}
4453
4454static long validate_slab_cache(struct kmem_cache *s)
4455{
4456	int node;
4457	unsigned long count = 0;
4458	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4459				sizeof(unsigned long), GFP_KERNEL);
4460	struct kmem_cache_node *n;
4461
4462	if (!map)
4463		return -ENOMEM;
4464
4465	flush_all(s);
4466	for_each_kmem_cache_node(s, node, n)
4467		count += validate_slab_node(s, n, map);
4468	kfree(map);
4469	return count;
4470}
 
 
 
4471/*
4472 * Generate lists of code addresses where slabcache objects are allocated
4473 * and freed.
4474 */
4475
4476struct location {
4477	unsigned long count;
4478	unsigned long addr;
4479	long long sum_time;
4480	long min_time;
4481	long max_time;
4482	long min_pid;
4483	long max_pid;
4484	DECLARE_BITMAP(cpus, NR_CPUS);
4485	nodemask_t nodes;
4486};
4487
4488struct loc_track {
4489	unsigned long max;
4490	unsigned long count;
4491	struct location *loc;
4492};
4493
 
 
4494static void free_loc_track(struct loc_track *t)
4495{
4496	if (t->max)
4497		free_pages((unsigned long)t->loc,
4498			get_order(sizeof(struct location) * t->max));
4499}
4500
4501static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4502{
4503	struct location *l;
4504	int order;
4505
4506	order = get_order(sizeof(struct location) * max);
4507
4508	l = (void *)__get_free_pages(flags, order);
4509	if (!l)
4510		return 0;
4511
4512	if (t->count) {
4513		memcpy(l, t->loc, sizeof(struct location) * t->count);
4514		free_loc_track(t);
4515	}
4516	t->max = max;
4517	t->loc = l;
4518	return 1;
4519}
4520
4521static int add_location(struct loc_track *t, struct kmem_cache *s,
4522				const struct track *track)
4523{
4524	long start, end, pos;
4525	struct location *l;
4526	unsigned long caddr;
4527	unsigned long age = jiffies - track->when;
4528
4529	start = -1;
4530	end = t->count;
4531
4532	for ( ; ; ) {
4533		pos = start + (end - start + 1) / 2;
4534
4535		/*
4536		 * There is nothing at "end". If we end up there
4537		 * we need to add something to before end.
4538		 */
4539		if (pos == end)
4540			break;
4541
4542		caddr = t->loc[pos].addr;
4543		if (track->addr == caddr) {
4544
4545			l = &t->loc[pos];
4546			l->count++;
4547			if (track->when) {
4548				l->sum_time += age;
4549				if (age < l->min_time)
4550					l->min_time = age;
4551				if (age > l->max_time)
4552					l->max_time = age;
4553
4554				if (track->pid < l->min_pid)
4555					l->min_pid = track->pid;
4556				if (track->pid > l->max_pid)
4557					l->max_pid = track->pid;
4558
4559				cpumask_set_cpu(track->cpu,
4560						to_cpumask(l->cpus));
4561			}
4562			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4563			return 1;
4564		}
4565
4566		if (track->addr < caddr)
4567			end = pos;
4568		else
4569			start = pos;
4570	}
4571
4572	/*
4573	 * Not found. Insert new tracking element.
4574	 */
4575	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4576		return 0;
4577
4578	l = t->loc + pos;
4579	if (pos < t->count)
4580		memmove(l + 1, l,
4581			(t->count - pos) * sizeof(struct location));
4582	t->count++;
4583	l->count = 1;
4584	l->addr = track->addr;
4585	l->sum_time = age;
4586	l->min_time = age;
4587	l->max_time = age;
4588	l->min_pid = track->pid;
4589	l->max_pid = track->pid;
4590	cpumask_clear(to_cpumask(l->cpus));
4591	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4592	nodes_clear(l->nodes);
4593	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4594	return 1;
4595}
4596
4597static void process_slab(struct loc_track *t, struct kmem_cache *s,
4598		struct page *page, enum track_item alloc,
4599		unsigned long *map)
4600{
4601	void *addr = page_address(page);
4602	void *p;
 
4603
4604	bitmap_zero(map, page->objects);
4605	get_map(s, page, map);
4606
4607	for_each_object(p, s, addr, page->objects)
4608		if (!test_bit(slab_index(p, s, addr), map))
4609			add_location(t, s, get_track(s, p, alloc));
 
4610}
4611
4612static int list_locations(struct kmem_cache *s, char *buf,
4613					enum track_item alloc)
4614{
4615	int len = 0;
4616	unsigned long i;
4617	struct loc_track t = { 0, 0, NULL };
4618	int node;
4619	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4620				     sizeof(unsigned long), GFP_KERNEL);
4621	struct kmem_cache_node *n;
4622
4623	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4624				     GFP_KERNEL)) {
4625		kfree(map);
4626		return sprintf(buf, "Out of memory\n");
4627	}
4628	/* Push back cpu slabs */
4629	flush_all(s);
4630
4631	for_each_kmem_cache_node(s, node, n) {
4632		unsigned long flags;
4633		struct page *page;
4634
4635		if (!atomic_long_read(&n->nr_slabs))
4636			continue;
4637
4638		spin_lock_irqsave(&n->list_lock, flags);
4639		list_for_each_entry(page, &n->partial, lru)
4640			process_slab(&t, s, page, alloc, map);
4641		list_for_each_entry(page, &n->full, lru)
4642			process_slab(&t, s, page, alloc, map);
4643		spin_unlock_irqrestore(&n->list_lock, flags);
4644	}
4645
4646	for (i = 0; i < t.count; i++) {
4647		struct location *l = &t.loc[i];
4648
4649		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4650			break;
4651		len += sprintf(buf + len, "%7ld ", l->count);
4652
4653		if (l->addr)
4654			len += sprintf(buf + len, "%pS", (void *)l->addr);
4655		else
4656			len += sprintf(buf + len, "<not-available>");
4657
4658		if (l->sum_time != l->min_time) {
4659			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4660				l->min_time,
4661				(long)div_u64(l->sum_time, l->count),
4662				l->max_time);
4663		} else
4664			len += sprintf(buf + len, " age=%ld",
4665				l->min_time);
4666
4667		if (l->min_pid != l->max_pid)
4668			len += sprintf(buf + len, " pid=%ld-%ld",
4669				l->min_pid, l->max_pid);
4670		else
4671			len += sprintf(buf + len, " pid=%ld",
4672				l->min_pid);
4673
4674		if (num_online_cpus() > 1 &&
4675				!cpumask_empty(to_cpumask(l->cpus)) &&
4676				len < PAGE_SIZE - 60)
4677			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4678					 " cpus=%*pbl",
4679					 cpumask_pr_args(to_cpumask(l->cpus)));
4680
4681		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4682				len < PAGE_SIZE - 60)
4683			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4684					 " nodes=%*pbl",
4685					 nodemask_pr_args(&l->nodes));
4686
4687		len += sprintf(buf + len, "\n");
4688	}
4689
4690	free_loc_track(&t);
4691	kfree(map);
4692	if (!t.count)
4693		len += sprintf(buf, "No data\n");
4694	return len;
4695}
4696#endif
4697
4698#ifdef SLUB_RESILIENCY_TEST
4699static void __init resiliency_test(void)
4700{
4701	u8 *p;
4702
4703	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4704
4705	pr_err("SLUB resiliency testing\n");
4706	pr_err("-----------------------\n");
4707	pr_err("A. Corruption after allocation\n");
4708
4709	p = kzalloc(16, GFP_KERNEL);
4710	p[16] = 0x12;
4711	pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4712	       p + 16);
4713
4714	validate_slab_cache(kmalloc_caches[4]);
4715
4716	/* Hmmm... The next two are dangerous */
4717	p = kzalloc(32, GFP_KERNEL);
4718	p[32 + sizeof(void *)] = 0x34;
4719	pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4720	       p);
4721	pr_err("If allocated object is overwritten then not detectable\n\n");
4722
4723	validate_slab_cache(kmalloc_caches[5]);
4724	p = kzalloc(64, GFP_KERNEL);
4725	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4726	*p = 0x56;
4727	pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4728	       p);
4729	pr_err("If allocated object is overwritten then not detectable\n\n");
4730	validate_slab_cache(kmalloc_caches[6]);
4731
4732	pr_err("\nB. Corruption after free\n");
4733	p = kzalloc(128, GFP_KERNEL);
4734	kfree(p);
4735	*p = 0x78;
4736	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4737	validate_slab_cache(kmalloc_caches[7]);
4738
4739	p = kzalloc(256, GFP_KERNEL);
4740	kfree(p);
4741	p[50] = 0x9a;
4742	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4743	validate_slab_cache(kmalloc_caches[8]);
4744
4745	p = kzalloc(512, GFP_KERNEL);
4746	kfree(p);
4747	p[512] = 0xab;
4748	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4749	validate_slab_cache(kmalloc_caches[9]);
4750}
4751#else
4752#ifdef CONFIG_SYSFS
4753static void resiliency_test(void) {};
4754#endif
4755#endif
4756
4757#ifdef CONFIG_SYSFS
4758enum slab_stat_type {
4759	SL_ALL,			/* All slabs */
4760	SL_PARTIAL,		/* Only partially allocated slabs */
4761	SL_CPU,			/* Only slabs used for cpu caches */
4762	SL_OBJECTS,		/* Determine allocated objects not slabs */
4763	SL_TOTAL		/* Determine object capacity not slabs */
4764};
4765
4766#define SO_ALL		(1 << SL_ALL)
4767#define SO_PARTIAL	(1 << SL_PARTIAL)
4768#define SO_CPU		(1 << SL_CPU)
4769#define SO_OBJECTS	(1 << SL_OBJECTS)
4770#define SO_TOTAL	(1 << SL_TOTAL)
4771
4772#ifdef CONFIG_MEMCG
4773static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4774
4775static int __init setup_slub_memcg_sysfs(char *str)
4776{
4777	int v;
4778
4779	if (get_option(&str, &v) > 0)
4780		memcg_sysfs_enabled = v;
4781
4782	return 1;
4783}
4784
4785__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4786#endif
4787
4788static ssize_t show_slab_objects(struct kmem_cache *s,
4789			    char *buf, unsigned long flags)
4790{
4791	unsigned long total = 0;
4792	int node;
4793	int x;
4794	unsigned long *nodes;
 
4795
4796	nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4797	if (!nodes)
4798		return -ENOMEM;
4799
4800	if (flags & SO_CPU) {
4801		int cpu;
4802
4803		for_each_possible_cpu(cpu) {
4804			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4805							       cpu);
4806			int node;
4807			struct page *page;
4808
4809			page = READ_ONCE(c->page);
4810			if (!page)
4811				continue;
4812
4813			node = page_to_nid(page);
4814			if (flags & SO_TOTAL)
4815				x = page->objects;
4816			else if (flags & SO_OBJECTS)
4817				x = page->inuse;
4818			else
4819				x = 1;
4820
4821			total += x;
4822			nodes[node] += x;
4823
4824			page = slub_percpu_partial_read_once(c);
4825			if (page) {
4826				node = page_to_nid(page);
4827				if (flags & SO_TOTAL)
4828					WARN_ON_ONCE(1);
4829				else if (flags & SO_OBJECTS)
4830					WARN_ON_ONCE(1);
4831				else
4832					x = page->pages;
4833				total += x;
4834				nodes[node] += x;
4835			}
4836		}
4837	}
4838
4839	get_online_mems();
 
 
 
 
 
 
 
 
 
 
4840#ifdef CONFIG_SLUB_DEBUG
4841	if (flags & SO_ALL) {
4842		struct kmem_cache_node *n;
4843
4844		for_each_kmem_cache_node(s, node, n) {
4845
4846			if (flags & SO_TOTAL)
4847				x = atomic_long_read(&n->total_objects);
4848			else if (flags & SO_OBJECTS)
4849				x = atomic_long_read(&n->total_objects) -
4850					count_partial(n, count_free);
4851			else
4852				x = atomic_long_read(&n->nr_slabs);
4853			total += x;
4854			nodes[node] += x;
4855		}
4856
4857	} else
4858#endif
4859	if (flags & SO_PARTIAL) {
4860		struct kmem_cache_node *n;
4861
4862		for_each_kmem_cache_node(s, node, n) {
4863			if (flags & SO_TOTAL)
4864				x = count_partial(n, count_total);
4865			else if (flags & SO_OBJECTS)
4866				x = count_partial(n, count_inuse);
4867			else
4868				x = n->nr_partial;
4869			total += x;
4870			nodes[node] += x;
4871		}
4872	}
4873	x = sprintf(buf, "%lu", total);
 
4874#ifdef CONFIG_NUMA
4875	for (node = 0; node < nr_node_ids; node++)
4876		if (nodes[node])
4877			x += sprintf(buf + x, " N%d=%lu",
4878					node, nodes[node]);
 
4879#endif
4880	put_online_mems();
4881	kfree(nodes);
4882	return x + sprintf(buf + x, "\n");
4883}
4884
4885#ifdef CONFIG_SLUB_DEBUG
4886static int any_slab_objects(struct kmem_cache *s)
4887{
4888	int node;
4889	struct kmem_cache_node *n;
4890
4891	for_each_kmem_cache_node(s, node, n)
4892		if (atomic_long_read(&n->total_objects))
4893			return 1;
4894
4895	return 0;
4896}
4897#endif
4898
4899#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4900#define to_slab(n) container_of(n, struct kmem_cache, kobj)
4901
4902struct slab_attribute {
4903	struct attribute attr;
4904	ssize_t (*show)(struct kmem_cache *s, char *buf);
4905	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4906};
4907
4908#define SLAB_ATTR_RO(_name) \
4909	static struct slab_attribute _name##_attr = \
4910	__ATTR(_name, 0400, _name##_show, NULL)
4911
4912#define SLAB_ATTR(_name) \
4913	static struct slab_attribute _name##_attr =  \
4914	__ATTR(_name, 0600, _name##_show, _name##_store)
4915
4916static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4917{
4918	return sprintf(buf, "%u\n", s->size);
4919}
4920SLAB_ATTR_RO(slab_size);
4921
4922static ssize_t align_show(struct kmem_cache *s, char *buf)
4923{
4924	return sprintf(buf, "%u\n", s->align);
4925}
4926SLAB_ATTR_RO(align);
4927
4928static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4929{
4930	return sprintf(buf, "%u\n", s->object_size);
4931}
4932SLAB_ATTR_RO(object_size);
4933
4934static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4935{
4936	return sprintf(buf, "%u\n", oo_objects(s->oo));
4937}
4938SLAB_ATTR_RO(objs_per_slab);
4939
4940static ssize_t order_store(struct kmem_cache *s,
4941				const char *buf, size_t length)
4942{
4943	unsigned int order;
4944	int err;
4945
4946	err = kstrtouint(buf, 10, &order);
4947	if (err)
4948		return err;
4949
4950	if (order > slub_max_order || order < slub_min_order)
4951		return -EINVAL;
4952
4953	calculate_sizes(s, order);
4954	return length;
4955}
4956
4957static ssize_t order_show(struct kmem_cache *s, char *buf)
4958{
4959	return sprintf(buf, "%u\n", oo_order(s->oo));
4960}
4961SLAB_ATTR(order);
4962
4963static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4964{
4965	return sprintf(buf, "%lu\n", s->min_partial);
4966}
4967
4968static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4969				 size_t length)
4970{
4971	unsigned long min;
4972	int err;
4973
4974	err = kstrtoul(buf, 10, &min);
4975	if (err)
4976		return err;
4977
4978	set_min_partial(s, min);
4979	return length;
4980}
4981SLAB_ATTR(min_partial);
4982
4983static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4984{
4985	return sprintf(buf, "%u\n", slub_cpu_partial(s));
4986}
4987
4988static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4989				 size_t length)
4990{
4991	unsigned int objects;
4992	int err;
4993
4994	err = kstrtouint(buf, 10, &objects);
4995	if (err)
4996		return err;
4997	if (objects && !kmem_cache_has_cpu_partial(s))
4998		return -EINVAL;
4999
5000	slub_set_cpu_partial(s, objects);
5001	flush_all(s);
5002	return length;
5003}
5004SLAB_ATTR(cpu_partial);
5005
5006static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5007{
5008	if (!s->ctor)
5009		return 0;
5010	return sprintf(buf, "%pS\n", s->ctor);
5011}
5012SLAB_ATTR_RO(ctor);
5013
5014static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5015{
5016	return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5017}
5018SLAB_ATTR_RO(aliases);
5019
5020static ssize_t partial_show(struct kmem_cache *s, char *buf)
5021{
5022	return show_slab_objects(s, buf, SO_PARTIAL);
5023}
5024SLAB_ATTR_RO(partial);
5025
5026static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5027{
5028	return show_slab_objects(s, buf, SO_CPU);
5029}
5030SLAB_ATTR_RO(cpu_slabs);
5031
5032static ssize_t objects_show(struct kmem_cache *s, char *buf)
5033{
5034	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5035}
5036SLAB_ATTR_RO(objects);
5037
5038static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5039{
5040	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5041}
5042SLAB_ATTR_RO(objects_partial);
5043
5044static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5045{
5046	int objects = 0;
5047	int pages = 0;
5048	int cpu;
5049	int len;
5050
5051	for_each_online_cpu(cpu) {
5052		struct page *page;
5053
5054		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5055
5056		if (page) {
5057			pages += page->pages;
5058			objects += page->pobjects;
5059		}
5060	}
5061
5062	len = sprintf(buf, "%d(%d)", objects, pages);
5063
5064#ifdef CONFIG_SMP
5065	for_each_online_cpu(cpu) {
5066		struct page *page;
5067
5068		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5069
5070		if (page && len < PAGE_SIZE - 20)
5071			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5072				page->pobjects, page->pages);
5073	}
5074#endif
5075	return len + sprintf(buf + len, "\n");
 
 
5076}
5077SLAB_ATTR_RO(slabs_cpu_partial);
5078
5079static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5080{
5081	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5082}
5083
5084static ssize_t reclaim_account_store(struct kmem_cache *s,
5085				const char *buf, size_t length)
5086{
5087	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5088	if (buf[0] == '1')
5089		s->flags |= SLAB_RECLAIM_ACCOUNT;
5090	return length;
5091}
5092SLAB_ATTR(reclaim_account);
5093
5094static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5095{
5096	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5097}
5098SLAB_ATTR_RO(hwcache_align);
5099
5100#ifdef CONFIG_ZONE_DMA
5101static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5102{
5103	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5104}
5105SLAB_ATTR_RO(cache_dma);
5106#endif
5107
5108static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5109{
5110	return sprintf(buf, "%u\n", s->usersize);
5111}
5112SLAB_ATTR_RO(usersize);
5113
5114static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5115{
5116	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5117}
5118SLAB_ATTR_RO(destroy_by_rcu);
5119
5120static ssize_t reserved_show(struct kmem_cache *s, char *buf)
5121{
5122	return sprintf(buf, "%u\n", s->reserved);
5123}
5124SLAB_ATTR_RO(reserved);
5125
5126#ifdef CONFIG_SLUB_DEBUG
5127static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5128{
5129	return show_slab_objects(s, buf, SO_ALL);
5130}
5131SLAB_ATTR_RO(slabs);
5132
5133static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5134{
5135	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5136}
5137SLAB_ATTR_RO(total_objects);
5138
5139static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5140{
5141	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5142}
5143
5144static ssize_t sanity_checks_store(struct kmem_cache *s,
5145				const char *buf, size_t length)
5146{
5147	s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5148	if (buf[0] == '1') {
5149		s->flags &= ~__CMPXCHG_DOUBLE;
5150		s->flags |= SLAB_CONSISTENCY_CHECKS;
5151	}
5152	return length;
5153}
5154SLAB_ATTR(sanity_checks);
5155
5156static ssize_t trace_show(struct kmem_cache *s, char *buf)
5157{
5158	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5159}
5160
5161static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5162							size_t length)
5163{
5164	/*
5165	 * Tracing a merged cache is going to give confusing results
5166	 * as well as cause other issues like converting a mergeable
5167	 * cache into an umergeable one.
5168	 */
5169	if (s->refcount > 1)
5170		return -EINVAL;
5171
5172	s->flags &= ~SLAB_TRACE;
5173	if (buf[0] == '1') {
5174		s->flags &= ~__CMPXCHG_DOUBLE;
5175		s->flags |= SLAB_TRACE;
5176	}
5177	return length;
5178}
5179SLAB_ATTR(trace);
5180
5181static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5182{
5183	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5184}
5185
5186static ssize_t red_zone_store(struct kmem_cache *s,
5187				const char *buf, size_t length)
5188{
5189	if (any_slab_objects(s))
5190		return -EBUSY;
5191
5192	s->flags &= ~SLAB_RED_ZONE;
5193	if (buf[0] == '1') {
5194		s->flags |= SLAB_RED_ZONE;
5195	}
5196	calculate_sizes(s, -1);
5197	return length;
5198}
5199SLAB_ATTR(red_zone);
5200
5201static ssize_t poison_show(struct kmem_cache *s, char *buf)
5202{
5203	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5204}
5205
5206static ssize_t poison_store(struct kmem_cache *s,
5207				const char *buf, size_t length)
5208{
5209	if (any_slab_objects(s))
5210		return -EBUSY;
5211
5212	s->flags &= ~SLAB_POISON;
5213	if (buf[0] == '1') {
5214		s->flags |= SLAB_POISON;
5215	}
5216	calculate_sizes(s, -1);
5217	return length;
5218}
5219SLAB_ATTR(poison);
5220
5221static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5222{
5223	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5224}
5225
5226static ssize_t store_user_store(struct kmem_cache *s,
5227				const char *buf, size_t length)
5228{
5229	if (any_slab_objects(s))
5230		return -EBUSY;
5231
5232	s->flags &= ~SLAB_STORE_USER;
5233	if (buf[0] == '1') {
5234		s->flags &= ~__CMPXCHG_DOUBLE;
5235		s->flags |= SLAB_STORE_USER;
5236	}
5237	calculate_sizes(s, -1);
5238	return length;
5239}
5240SLAB_ATTR(store_user);
5241
5242static ssize_t validate_show(struct kmem_cache *s, char *buf)
5243{
5244	return 0;
5245}
5246
5247static ssize_t validate_store(struct kmem_cache *s,
5248			const char *buf, size_t length)
5249{
5250	int ret = -EINVAL;
5251
5252	if (buf[0] == '1') {
5253		ret = validate_slab_cache(s);
5254		if (ret >= 0)
5255			ret = length;
5256	}
5257	return ret;
5258}
5259SLAB_ATTR(validate);
5260
5261static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5262{
5263	if (!(s->flags & SLAB_STORE_USER))
5264		return -ENOSYS;
5265	return list_locations(s, buf, TRACK_ALLOC);
5266}
5267SLAB_ATTR_RO(alloc_calls);
5268
5269static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5270{
5271	if (!(s->flags & SLAB_STORE_USER))
5272		return -ENOSYS;
5273	return list_locations(s, buf, TRACK_FREE);
5274}
5275SLAB_ATTR_RO(free_calls);
5276#endif /* CONFIG_SLUB_DEBUG */
5277
5278#ifdef CONFIG_FAILSLAB
5279static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5280{
5281	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5282}
5283
5284static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5285							size_t length)
5286{
5287	if (s->refcount > 1)
5288		return -EINVAL;
5289
5290	s->flags &= ~SLAB_FAILSLAB;
5291	if (buf[0] == '1')
5292		s->flags |= SLAB_FAILSLAB;
5293	return length;
5294}
5295SLAB_ATTR(failslab);
5296#endif
5297
5298static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5299{
5300	return 0;
5301}
5302
5303static ssize_t shrink_store(struct kmem_cache *s,
5304			const char *buf, size_t length)
5305{
5306	if (buf[0] == '1')
5307		kmem_cache_shrink(s);
5308	else
5309		return -EINVAL;
5310	return length;
5311}
5312SLAB_ATTR(shrink);
5313
5314#ifdef CONFIG_NUMA
5315static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5316{
5317	return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5318}
5319
5320static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5321				const char *buf, size_t length)
5322{
5323	unsigned int ratio;
5324	int err;
5325
5326	err = kstrtouint(buf, 10, &ratio);
5327	if (err)
5328		return err;
5329	if (ratio > 100)
5330		return -ERANGE;
5331
5332	s->remote_node_defrag_ratio = ratio * 10;
5333
5334	return length;
5335}
5336SLAB_ATTR(remote_node_defrag_ratio);
5337#endif
5338
5339#ifdef CONFIG_SLUB_STATS
5340static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5341{
5342	unsigned long sum  = 0;
5343	int cpu;
5344	int len;
5345	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5346
5347	if (!data)
5348		return -ENOMEM;
5349
5350	for_each_online_cpu(cpu) {
5351		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5352
5353		data[cpu] = x;
5354		sum += x;
5355	}
5356
5357	len = sprintf(buf, "%lu", sum);
5358
5359#ifdef CONFIG_SMP
5360	for_each_online_cpu(cpu) {
5361		if (data[cpu] && len < PAGE_SIZE - 20)
5362			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
 
5363	}
5364#endif
5365	kfree(data);
5366	return len + sprintf(buf + len, "\n");
 
 
5367}
5368
5369static void clear_stat(struct kmem_cache *s, enum stat_item si)
5370{
5371	int cpu;
5372
5373	for_each_online_cpu(cpu)
5374		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5375}
5376
5377#define STAT_ATTR(si, text) 					\
5378static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5379{								\
5380	return show_stat(s, buf, si);				\
5381}								\
5382static ssize_t text##_store(struct kmem_cache *s,		\
5383				const char *buf, size_t length)	\
5384{								\
5385	if (buf[0] != '0')					\
5386		return -EINVAL;					\
5387	clear_stat(s, si);					\
5388	return length;						\
5389}								\
5390SLAB_ATTR(text);						\
5391
5392STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5393STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5394STAT_ATTR(FREE_FASTPATH, free_fastpath);
5395STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5396STAT_ATTR(FREE_FROZEN, free_frozen);
5397STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5398STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5399STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5400STAT_ATTR(ALLOC_SLAB, alloc_slab);
5401STAT_ATTR(ALLOC_REFILL, alloc_refill);
5402STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5403STAT_ATTR(FREE_SLAB, free_slab);
5404STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5405STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5406STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5407STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5408STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5409STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5410STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5411STAT_ATTR(ORDER_FALLBACK, order_fallback);
5412STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5413STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5414STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5415STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5416STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5417STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5418#endif
5419
5420static struct attribute *slab_attrs[] = {
5421	&slab_size_attr.attr,
5422	&object_size_attr.attr,
5423	&objs_per_slab_attr.attr,
5424	&order_attr.attr,
5425	&min_partial_attr.attr,
5426	&cpu_partial_attr.attr,
5427	&objects_attr.attr,
5428	&objects_partial_attr.attr,
5429	&partial_attr.attr,
5430	&cpu_slabs_attr.attr,
5431	&ctor_attr.attr,
5432	&aliases_attr.attr,
5433	&align_attr.attr,
5434	&hwcache_align_attr.attr,
5435	&reclaim_account_attr.attr,
5436	&destroy_by_rcu_attr.attr,
5437	&shrink_attr.attr,
5438	&reserved_attr.attr,
5439	&slabs_cpu_partial_attr.attr,
5440#ifdef CONFIG_SLUB_DEBUG
5441	&total_objects_attr.attr,
5442	&slabs_attr.attr,
5443	&sanity_checks_attr.attr,
5444	&trace_attr.attr,
5445	&red_zone_attr.attr,
5446	&poison_attr.attr,
5447	&store_user_attr.attr,
5448	&validate_attr.attr,
5449	&alloc_calls_attr.attr,
5450	&free_calls_attr.attr,
5451#endif
5452#ifdef CONFIG_ZONE_DMA
5453	&cache_dma_attr.attr,
5454#endif
5455#ifdef CONFIG_NUMA
5456	&remote_node_defrag_ratio_attr.attr,
5457#endif
5458#ifdef CONFIG_SLUB_STATS
5459	&alloc_fastpath_attr.attr,
5460	&alloc_slowpath_attr.attr,
5461	&free_fastpath_attr.attr,
5462	&free_slowpath_attr.attr,
5463	&free_frozen_attr.attr,
5464	&free_add_partial_attr.attr,
5465	&free_remove_partial_attr.attr,
5466	&alloc_from_partial_attr.attr,
5467	&alloc_slab_attr.attr,
5468	&alloc_refill_attr.attr,
5469	&alloc_node_mismatch_attr.attr,
5470	&free_slab_attr.attr,
5471	&cpuslab_flush_attr.attr,
5472	&deactivate_full_attr.attr,
5473	&deactivate_empty_attr.attr,
5474	&deactivate_to_head_attr.attr,
5475	&deactivate_to_tail_attr.attr,
5476	&deactivate_remote_frees_attr.attr,
5477	&deactivate_bypass_attr.attr,
5478	&order_fallback_attr.attr,
5479	&cmpxchg_double_fail_attr.attr,
5480	&cmpxchg_double_cpu_fail_attr.attr,
5481	&cpu_partial_alloc_attr.attr,
5482	&cpu_partial_free_attr.attr,
5483	&cpu_partial_node_attr.attr,
5484	&cpu_partial_drain_attr.attr,
5485#endif
5486#ifdef CONFIG_FAILSLAB
5487	&failslab_attr.attr,
5488#endif
5489	&usersize_attr.attr,
5490
5491	NULL
5492};
5493
5494static const struct attribute_group slab_attr_group = {
5495	.attrs = slab_attrs,
5496};
5497
5498static ssize_t slab_attr_show(struct kobject *kobj,
5499				struct attribute *attr,
5500				char *buf)
5501{
5502	struct slab_attribute *attribute;
5503	struct kmem_cache *s;
5504	int err;
5505
5506	attribute = to_slab_attr(attr);
5507	s = to_slab(kobj);
5508
5509	if (!attribute->show)
5510		return -EIO;
5511
5512	err = attribute->show(s, buf);
5513
5514	return err;
5515}
5516
5517static ssize_t slab_attr_store(struct kobject *kobj,
5518				struct attribute *attr,
5519				const char *buf, size_t len)
5520{
5521	struct slab_attribute *attribute;
5522	struct kmem_cache *s;
5523	int err;
5524
5525	attribute = to_slab_attr(attr);
5526	s = to_slab(kobj);
5527
5528	if (!attribute->store)
5529		return -EIO;
5530
5531	err = attribute->store(s, buf, len);
5532#ifdef CONFIG_MEMCG
5533	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5534		struct kmem_cache *c;
5535
5536		mutex_lock(&slab_mutex);
5537		if (s->max_attr_size < len)
5538			s->max_attr_size = len;
5539
5540		/*
5541		 * This is a best effort propagation, so this function's return
5542		 * value will be determined by the parent cache only. This is
5543		 * basically because not all attributes will have a well
5544		 * defined semantics for rollbacks - most of the actions will
5545		 * have permanent effects.
5546		 *
5547		 * Returning the error value of any of the children that fail
5548		 * is not 100 % defined, in the sense that users seeing the
5549		 * error code won't be able to know anything about the state of
5550		 * the cache.
5551		 *
5552		 * Only returning the error code for the parent cache at least
5553		 * has well defined semantics. The cache being written to
5554		 * directly either failed or succeeded, in which case we loop
5555		 * through the descendants with best-effort propagation.
5556		 */
5557		for_each_memcg_cache(c, s)
5558			attribute->store(c, buf, len);
5559		mutex_unlock(&slab_mutex);
5560	}
5561#endif
5562	return err;
5563}
5564
5565static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5566{
5567#ifdef CONFIG_MEMCG
5568	int i;
5569	char *buffer = NULL;
5570	struct kmem_cache *root_cache;
5571
5572	if (is_root_cache(s))
5573		return;
5574
5575	root_cache = s->memcg_params.root_cache;
5576
5577	/*
5578	 * This mean this cache had no attribute written. Therefore, no point
5579	 * in copying default values around
5580	 */
5581	if (!root_cache->max_attr_size)
5582		return;
5583
5584	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5585		char mbuf[64];
5586		char *buf;
5587		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5588		ssize_t len;
5589
5590		if (!attr || !attr->store || !attr->show)
5591			continue;
5592
5593		/*
5594		 * It is really bad that we have to allocate here, so we will
5595		 * do it only as a fallback. If we actually allocate, though,
5596		 * we can just use the allocated buffer until the end.
5597		 *
5598		 * Most of the slub attributes will tend to be very small in
5599		 * size, but sysfs allows buffers up to a page, so they can
5600		 * theoretically happen.
5601		 */
5602		if (buffer)
5603			buf = buffer;
5604		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
5605			buf = mbuf;
5606		else {
5607			buffer = (char *) get_zeroed_page(GFP_KERNEL);
5608			if (WARN_ON(!buffer))
5609				continue;
5610			buf = buffer;
5611		}
5612
5613		len = attr->show(root_cache, buf);
5614		if (len > 0)
5615			attr->store(s, buf, len);
5616	}
5617
5618	if (buffer)
5619		free_page((unsigned long)buffer);
5620#endif
5621}
5622
5623static void kmem_cache_release(struct kobject *k)
5624{
5625	slab_kmem_cache_release(to_slab(k));
5626}
5627
5628static const struct sysfs_ops slab_sysfs_ops = {
5629	.show = slab_attr_show,
5630	.store = slab_attr_store,
5631};
5632
5633static struct kobj_type slab_ktype = {
5634	.sysfs_ops = &slab_sysfs_ops,
5635	.release = kmem_cache_release,
5636};
5637
5638static int uevent_filter(struct kset *kset, struct kobject *kobj)
5639{
5640	struct kobj_type *ktype = get_ktype(kobj);
5641
5642	if (ktype == &slab_ktype)
5643		return 1;
5644	return 0;
5645}
5646
5647static const struct kset_uevent_ops slab_uevent_ops = {
5648	.filter = uevent_filter,
5649};
5650
5651static struct kset *slab_kset;
5652
5653static inline struct kset *cache_kset(struct kmem_cache *s)
5654{
5655#ifdef CONFIG_MEMCG
5656	if (!is_root_cache(s))
5657		return s->memcg_params.root_cache->memcg_kset;
5658#endif
5659	return slab_kset;
5660}
5661
5662#define ID_STR_LENGTH 64
5663
5664/* Create a unique string id for a slab cache:
5665 *
5666 * Format	:[flags-]size
5667 */
5668static char *create_unique_id(struct kmem_cache *s)
5669{
5670	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5671	char *p = name;
5672
5673	BUG_ON(!name);
5674
5675	*p++ = ':';
5676	/*
5677	 * First flags affecting slabcache operations. We will only
5678	 * get here for aliasable slabs so we do not need to support
5679	 * too many flags. The flags here must cover all flags that
5680	 * are matched during merging to guarantee that the id is
5681	 * unique.
5682	 */
5683	if (s->flags & SLAB_CACHE_DMA)
5684		*p++ = 'd';
 
 
5685	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5686		*p++ = 'a';
5687	if (s->flags & SLAB_CONSISTENCY_CHECKS)
5688		*p++ = 'F';
5689	if (s->flags & SLAB_ACCOUNT)
5690		*p++ = 'A';
5691	if (p != name + 1)
5692		*p++ = '-';
5693	p += sprintf(p, "%07u", s->size);
5694
5695	BUG_ON(p > name + ID_STR_LENGTH - 1);
5696	return name;
5697}
5698
5699static void sysfs_slab_remove_workfn(struct work_struct *work)
5700{
5701	struct kmem_cache *s =
5702		container_of(work, struct kmem_cache, kobj_remove_work);
5703
5704	if (!s->kobj.state_in_sysfs)
5705		/*
5706		 * For a memcg cache, this may be called during
5707		 * deactivation and again on shutdown.  Remove only once.
5708		 * A cache is never shut down before deactivation is
5709		 * complete, so no need to worry about synchronization.
5710		 */
5711		goto out;
5712
5713#ifdef CONFIG_MEMCG
5714	kset_unregister(s->memcg_kset);
5715#endif
5716	kobject_uevent(&s->kobj, KOBJ_REMOVE);
5717	kobject_del(&s->kobj);
5718out:
5719	kobject_put(&s->kobj);
5720}
5721
5722static int sysfs_slab_add(struct kmem_cache *s)
5723{
5724	int err;
5725	const char *name;
5726	struct kset *kset = cache_kset(s);
5727	int unmergeable = slab_unmergeable(s);
5728
5729	INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5730
5731	if (!kset) {
5732		kobject_init(&s->kobj, &slab_ktype);
5733		return 0;
5734	}
5735
5736	if (!unmergeable && disable_higher_order_debug &&
5737			(slub_debug & DEBUG_METADATA_FLAGS))
5738		unmergeable = 1;
5739
5740	if (unmergeable) {
5741		/*
5742		 * Slabcache can never be merged so we can use the name proper.
5743		 * This is typically the case for debug situations. In that
5744		 * case we can catch duplicate names easily.
5745		 */
5746		sysfs_remove_link(&slab_kset->kobj, s->name);
5747		name = s->name;
5748	} else {
5749		/*
5750		 * Create a unique name for the slab as a target
5751		 * for the symlinks.
5752		 */
5753		name = create_unique_id(s);
5754	}
5755
5756	s->kobj.kset = kset;
5757	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5758	if (err)
5759		goto out;
5760
5761	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5762	if (err)
5763		goto out_del_kobj;
5764
5765#ifdef CONFIG_MEMCG
5766	if (is_root_cache(s) && memcg_sysfs_enabled) {
5767		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5768		if (!s->memcg_kset) {
5769			err = -ENOMEM;
5770			goto out_del_kobj;
5771		}
5772	}
5773#endif
5774
5775	kobject_uevent(&s->kobj, KOBJ_ADD);
5776	if (!unmergeable) {
5777		/* Setup first alias */
5778		sysfs_slab_alias(s, s->name);
5779	}
5780out:
5781	if (!unmergeable)
5782		kfree(name);
5783	return err;
5784out_del_kobj:
5785	kobject_del(&s->kobj);
5786	goto out;
5787}
5788
5789static void sysfs_slab_remove(struct kmem_cache *s)
5790{
5791	if (slab_state < FULL)
5792		/*
5793		 * Sysfs has not been setup yet so no need to remove the
5794		 * cache from sysfs.
5795		 */
5796		return;
5797
5798	kobject_get(&s->kobj);
5799	schedule_work(&s->kobj_remove_work);
5800}
5801
5802void sysfs_slab_release(struct kmem_cache *s)
5803{
5804	if (slab_state >= FULL)
5805		kobject_put(&s->kobj);
5806}
5807
5808/*
5809 * Need to buffer aliases during bootup until sysfs becomes
5810 * available lest we lose that information.
5811 */
5812struct saved_alias {
5813	struct kmem_cache *s;
5814	const char *name;
5815	struct saved_alias *next;
5816};
5817
5818static struct saved_alias *alias_list;
5819
5820static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5821{
5822	struct saved_alias *al;
5823
5824	if (slab_state == FULL) {
5825		/*
5826		 * If we have a leftover link then remove it.
5827		 */
5828		sysfs_remove_link(&slab_kset->kobj, name);
5829		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5830	}
5831
5832	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5833	if (!al)
5834		return -ENOMEM;
5835
5836	al->s = s;
5837	al->name = name;
5838	al->next = alias_list;
5839	alias_list = al;
5840	return 0;
5841}
5842
5843static int __init slab_sysfs_init(void)
5844{
5845	struct kmem_cache *s;
5846	int err;
5847
5848	mutex_lock(&slab_mutex);
5849
5850	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5851	if (!slab_kset) {
5852		mutex_unlock(&slab_mutex);
5853		pr_err("Cannot register slab subsystem.\n");
5854		return -ENOSYS;
5855	}
5856
5857	slab_state = FULL;
5858
5859	list_for_each_entry(s, &slab_caches, list) {
5860		err = sysfs_slab_add(s);
5861		if (err)
5862			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5863			       s->name);
5864	}
5865
5866	while (alias_list) {
5867		struct saved_alias *al = alias_list;
5868
5869		alias_list = alias_list->next;
5870		err = sysfs_slab_alias(al->s, al->name);
5871		if (err)
5872			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5873			       al->name);
5874		kfree(al);
5875	}
5876
5877	mutex_unlock(&slab_mutex);
5878	resiliency_test();
5879	return 0;
5880}
5881
5882__initcall(slab_sysfs_init);
5883#endif /* CONFIG_SYSFS */
5884
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5885/*
5886 * The /proc/slabinfo ABI
5887 */
5888#ifdef CONFIG_SLUB_DEBUG
5889void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5890{
5891	unsigned long nr_slabs = 0;
5892	unsigned long nr_objs = 0;
5893	unsigned long nr_free = 0;
5894	int node;
5895	struct kmem_cache_node *n;
5896
5897	for_each_kmem_cache_node(s, node, n) {
5898		nr_slabs += node_nr_slabs(n);
5899		nr_objs += node_nr_objs(n);
5900		nr_free += count_partial(n, count_free);
5901	}
5902
5903	sinfo->active_objs = nr_objs - nr_free;
5904	sinfo->num_objs = nr_objs;
5905	sinfo->active_slabs = nr_slabs;
5906	sinfo->num_slabs = nr_slabs;
5907	sinfo->objects_per_slab = oo_objects(s->oo);
5908	sinfo->cache_order = oo_order(s->oo);
5909}
5910
5911void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5912{
5913}
5914
5915ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5916		       size_t count, loff_t *ppos)
5917{
5918	return -EIO;
5919}
5920#endif /* CONFIG_SLUB_DEBUG */
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SLUB: A slab allocator that limits cache line use instead of queuing
   4 * objects in per cpu and per node lists.
   5 *
   6 * The allocator synchronizes using per slab locks or atomic operations
   7 * and only uses a centralized lock to manage a pool of partial slabs.
   8 *
   9 * (C) 2007 SGI, Christoph Lameter
  10 * (C) 2011 Linux Foundation, Christoph Lameter
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/swap.h> /* struct reclaim_state */
  15#include <linux/module.h>
  16#include <linux/bit_spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/swab.h>
  19#include <linux/bitops.h>
  20#include <linux/slab.h>
  21#include "slab.h"
  22#include <linux/proc_fs.h>
 
  23#include <linux/seq_file.h>
  24#include <linux/kasan.h>
  25#include <linux/cpu.h>
  26#include <linux/cpuset.h>
  27#include <linux/mempolicy.h>
  28#include <linux/ctype.h>
  29#include <linux/debugobjects.h>
  30#include <linux/kallsyms.h>
  31#include <linux/kfence.h>
  32#include <linux/memory.h>
  33#include <linux/math64.h>
  34#include <linux/fault-inject.h>
  35#include <linux/stacktrace.h>
  36#include <linux/prefetch.h>
  37#include <linux/memcontrol.h>
  38#include <linux/random.h>
  39#include <kunit/test.h>
  40
  41#include <linux/debugfs.h>
  42#include <trace/events/kmem.h>
  43
  44#include "internal.h"
  45
  46/*
  47 * Lock order:
  48 *   1. slab_mutex (Global Mutex)
  49 *   2. node->list_lock
  50 *   3. slab_lock(page) (Only on some arches and for debugging)
  51 *
  52 *   slab_mutex
  53 *
  54 *   The role of the slab_mutex is to protect the list of all the slabs
  55 *   and to synchronize major metadata changes to slab cache structures.
  56 *
  57 *   The slab_lock is only used for debugging and on arches that do not
  58 *   have the ability to do a cmpxchg_double. It only protects:
 
  59 *	A. page->freelist	-> List of object free in a page
  60 *	B. page->inuse		-> Number of objects in use
  61 *	C. page->objects	-> Number of objects in page
  62 *	D. page->frozen		-> frozen state
  63 *
  64 *   If a slab is frozen then it is exempt from list management. It is not
  65 *   on any list except per cpu partial list. The processor that froze the
  66 *   slab is the one who can perform list operations on the page. Other
  67 *   processors may put objects onto the freelist but the processor that
  68 *   froze the slab is the only one that can retrieve the objects from the
  69 *   page's freelist.
  70 *
  71 *   The list_lock protects the partial and full list on each node and
  72 *   the partial slab counter. If taken then no new slabs may be added or
  73 *   removed from the lists nor make the number of partial slabs be modified.
  74 *   (Note that the total number of slabs is an atomic value that may be
  75 *   modified without taking the list lock).
  76 *
  77 *   The list_lock is a centralized lock and thus we avoid taking it as
  78 *   much as possible. As long as SLUB does not have to handle partial
  79 *   slabs, operations can continue without any centralized lock. F.e.
  80 *   allocating a long series of objects that fill up slabs does not require
  81 *   the list lock.
  82 *   Interrupts are disabled during allocation and deallocation in order to
  83 *   make the slab allocator safe to use in the context of an irq. In addition
  84 *   interrupts are disabled to ensure that the processor does not change
  85 *   while handling per_cpu slabs, due to kernel preemption.
  86 *
  87 * SLUB assigns one slab for allocation to each processor.
  88 * Allocations only occur from these slabs called cpu slabs.
  89 *
  90 * Slabs with free elements are kept on a partial list and during regular
  91 * operations no list for full slabs is used. If an object in a full slab is
  92 * freed then the slab will show up again on the partial lists.
  93 * We track full slabs for debugging purposes though because otherwise we
  94 * cannot scan all objects.
  95 *
  96 * Slabs are freed when they become empty. Teardown and setup is
  97 * minimal so we rely on the page allocators per cpu caches for
  98 * fast frees and allocs.
  99 *
 100 * page->frozen		The slab is frozen and exempt from list processing.
 
 
 101 * 			This means that the slab is dedicated to a purpose
 102 * 			such as satisfying allocations for a specific
 103 * 			processor. Objects may be freed in the slab while
 104 * 			it is frozen but slab_free will then skip the usual
 105 * 			list operations. It is up to the processor holding
 106 * 			the slab to integrate the slab into the slab lists
 107 * 			when the slab is no longer needed.
 108 *
 109 * 			One use of this flag is to mark slabs that are
 110 * 			used for allocations. Then such a slab becomes a cpu
 111 * 			slab. The cpu slab may be equipped with an additional
 112 * 			freelist that allows lockless access to
 113 * 			free objects in addition to the regular freelist
 114 * 			that requires the slab lock.
 115 *
 116 * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
 117 * 			options set. This moves	slab handling out of
 118 * 			the fast path and disables lockless freelists.
 119 */
 120
 
 
 121#ifdef CONFIG_SLUB_DEBUG
 122#ifdef CONFIG_SLUB_DEBUG_ON
 123DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
 124#else
 125DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
 126#endif
 127#endif		/* CONFIG_SLUB_DEBUG */
 128
 129static inline bool kmem_cache_debug(struct kmem_cache *s)
 130{
 131	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
 132}
 133
 134void *fixup_red_left(struct kmem_cache *s, void *p)
 135{
 136	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
 137		p += s->red_left_pad;
 138
 139	return p;
 140}
 141
 142static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 143{
 144#ifdef CONFIG_SLUB_CPU_PARTIAL
 145	return !kmem_cache_debug(s);
 146#else
 147	return false;
 148#endif
 149}
 150
 151/*
 152 * Issues still to be resolved:
 153 *
 154 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
 155 *
 156 * - Variable sizing of the per node arrays
 157 */
 158
 
 
 
 159/* Enable to log cmpxchg failures */
 160#undef SLUB_DEBUG_CMPXCHG
 161
 162/*
 163 * Minimum number of partial slabs. These will be left on the partial
 164 * lists even if they are empty. kmem_cache_shrink may reclaim them.
 165 */
 166#define MIN_PARTIAL 5
 167
 168/*
 169 * Maximum number of desirable partial slabs.
 170 * The existence of more partial slabs makes kmem_cache_shrink
 171 * sort the partial list by the number of objects in use.
 172 */
 173#define MAX_PARTIAL 10
 174
 175#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
 176				SLAB_POISON | SLAB_STORE_USER)
 177
 178/*
 179 * These debug flags cannot use CMPXCHG because there might be consistency
 180 * issues when checking or reading debug information
 181 */
 182#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
 183				SLAB_TRACE)
 184
 185
 186/*
 187 * Debugging flags that require metadata to be stored in the slab.  These get
 188 * disabled when slub_debug=O is used and a cache's min order increases with
 189 * metadata.
 190 */
 191#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 192
 193#define OO_SHIFT	16
 194#define OO_MASK		((1 << OO_SHIFT) - 1)
 195#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
 196
 197/* Internal SLUB flags */
 198/* Poison object */
 199#define __OBJECT_POISON		((slab_flags_t __force)0x80000000U)
 200/* Use cmpxchg_double */
 201#define __CMPXCHG_DOUBLE	((slab_flags_t __force)0x40000000U)
 202
 203/*
 204 * Tracking user of a slab.
 205 */
 206#define TRACK_ADDRS_COUNT 16
 207struct track {
 208	unsigned long addr;	/* Called from address */
 209#ifdef CONFIG_STACKTRACE
 210	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
 211#endif
 212	int cpu;		/* Was running on cpu */
 213	int pid;		/* Pid context */
 214	unsigned long when;	/* When did the operation occur */
 215};
 216
 217enum track_item { TRACK_ALLOC, TRACK_FREE };
 218
 219#ifdef CONFIG_SYSFS
 220static int sysfs_slab_add(struct kmem_cache *);
 221static int sysfs_slab_alias(struct kmem_cache *, const char *);
 
 
 222#else
 223static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 224static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
 225							{ return 0; }
 226#endif
 227
 228#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
 229static void debugfs_slab_add(struct kmem_cache *);
 230#else
 231static inline void debugfs_slab_add(struct kmem_cache *s) { }
 232#endif
 233
 234static inline void stat(const struct kmem_cache *s, enum stat_item si)
 235{
 236#ifdef CONFIG_SLUB_STATS
 237	/*
 238	 * The rmw is racy on a preemptible kernel but this is acceptable, so
 239	 * avoid this_cpu_add()'s irq-disable overhead.
 240	 */
 241	raw_cpu_inc(s->cpu_slab->stat[si]);
 242#endif
 243}
 244
 245/*
 246 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
 247 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
 248 * differ during memory hotplug/hotremove operations.
 249 * Protected by slab_mutex.
 250 */
 251static nodemask_t slab_nodes;
 252
 253/********************************************************************
 254 * 			Core slab cache functions
 255 *******************************************************************/
 256
 257/*
 258 * Returns freelist pointer (ptr). With hardening, this is obfuscated
 259 * with an XOR of the address where the pointer is held and a per-cache
 260 * random number.
 261 */
 262static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
 263				 unsigned long ptr_addr)
 264{
 265#ifdef CONFIG_SLAB_FREELIST_HARDENED
 266	/*
 267	 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
 268	 * Normally, this doesn't cause any issues, as both set_freepointer()
 269	 * and get_freepointer() are called with a pointer with the same tag.
 270	 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
 271	 * example, when __free_slub() iterates over objects in a cache, it
 272	 * passes untagged pointers to check_object(). check_object() in turns
 273	 * calls get_freepointer() with an untagged pointer, which causes the
 274	 * freepointer to be restored incorrectly.
 275	 */
 276	return (void *)((unsigned long)ptr ^ s->random ^
 277			swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
 278#else
 279	return ptr;
 280#endif
 281}
 282
 283/* Returns the freelist pointer recorded at location ptr_addr. */
 284static inline void *freelist_dereference(const struct kmem_cache *s,
 285					 void *ptr_addr)
 286{
 287	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
 288			    (unsigned long)ptr_addr);
 289}
 290
 291static inline void *get_freepointer(struct kmem_cache *s, void *object)
 292{
 293	object = kasan_reset_tag(object);
 294	return freelist_dereference(s, object + s->offset);
 295}
 296
 297static void prefetch_freepointer(const struct kmem_cache *s, void *object)
 298{
 299	prefetch(object + s->offset);
 
 300}
 301
 302static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 303{
 304	unsigned long freepointer_addr;
 305	void *p;
 306
 307	if (!debug_pagealloc_enabled_static())
 308		return get_freepointer(s, object);
 309
 310	object = kasan_reset_tag(object);
 311	freepointer_addr = (unsigned long)object + s->offset;
 312	copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
 313	return freelist_ptr(s, p, freepointer_addr);
 314}
 315
 316static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 317{
 318	unsigned long freeptr_addr = (unsigned long)object + s->offset;
 319
 320#ifdef CONFIG_SLAB_FREELIST_HARDENED
 321	BUG_ON(object == fp); /* naive detection of double free or corruption */
 322#endif
 323
 324	freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
 325	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
 326}
 327
 328/* Loop over all objects in a slab */
 329#define for_each_object(__p, __s, __addr, __objects) \
 330	for (__p = fixup_red_left(__s, __addr); \
 331		__p < (__addr) + (__objects) * (__s)->size; \
 332		__p += (__s)->size)
 333
 334static inline unsigned int order_objects(unsigned int order, unsigned int size)
 
 
 
 
 
 
 
 
 
 
 
 335{
 336	return ((unsigned int)PAGE_SIZE << order) / size;
 337}
 338
 339static inline struct kmem_cache_order_objects oo_make(unsigned int order,
 340		unsigned int size)
 341{
 342	struct kmem_cache_order_objects x = {
 343		(order << OO_SHIFT) + order_objects(order, size)
 344	};
 345
 346	return x;
 347}
 348
 349static inline unsigned int oo_order(struct kmem_cache_order_objects x)
 350{
 351	return x.x >> OO_SHIFT;
 352}
 353
 354static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
 355{
 356	return x.x & OO_MASK;
 357}
 358
 359/*
 360 * Per slab locking using the pagelock
 361 */
 362static __always_inline void slab_lock(struct page *page)
 363{
 364	VM_BUG_ON_PAGE(PageTail(page), page);
 365	bit_spin_lock(PG_locked, &page->flags);
 366}
 367
 368static __always_inline void slab_unlock(struct page *page)
 369{
 370	VM_BUG_ON_PAGE(PageTail(page), page);
 371	__bit_spin_unlock(PG_locked, &page->flags);
 372}
 373
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374/* Interrupts must be disabled (for the fallback code to work right) */
 375static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 376		void *freelist_old, unsigned long counters_old,
 377		void *freelist_new, unsigned long counters_new,
 378		const char *n)
 379{
 380	VM_BUG_ON(!irqs_disabled());
 381#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 382    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 383	if (s->flags & __CMPXCHG_DOUBLE) {
 384		if (cmpxchg_double(&page->freelist, &page->counters,
 385				   freelist_old, counters_old,
 386				   freelist_new, counters_new))
 387			return true;
 388	} else
 389#endif
 390	{
 391		slab_lock(page);
 392		if (page->freelist == freelist_old &&
 393					page->counters == counters_old) {
 394			page->freelist = freelist_new;
 395			page->counters = counters_new;
 396			slab_unlock(page);
 397			return true;
 398		}
 399		slab_unlock(page);
 400	}
 401
 402	cpu_relax();
 403	stat(s, CMPXCHG_DOUBLE_FAIL);
 404
 405#ifdef SLUB_DEBUG_CMPXCHG
 406	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 407#endif
 408
 409	return false;
 410}
 411
 412static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 413		void *freelist_old, unsigned long counters_old,
 414		void *freelist_new, unsigned long counters_new,
 415		const char *n)
 416{
 417#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
 418    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 419	if (s->flags & __CMPXCHG_DOUBLE) {
 420		if (cmpxchg_double(&page->freelist, &page->counters,
 421				   freelist_old, counters_old,
 422				   freelist_new, counters_new))
 423			return true;
 424	} else
 425#endif
 426	{
 427		unsigned long flags;
 428
 429		local_irq_save(flags);
 430		slab_lock(page);
 431		if (page->freelist == freelist_old &&
 432					page->counters == counters_old) {
 433			page->freelist = freelist_new;
 434			page->counters = counters_new;
 435			slab_unlock(page);
 436			local_irq_restore(flags);
 437			return true;
 438		}
 439		slab_unlock(page);
 440		local_irq_restore(flags);
 441	}
 442
 443	cpu_relax();
 444	stat(s, CMPXCHG_DOUBLE_FAIL);
 445
 446#ifdef SLUB_DEBUG_CMPXCHG
 447	pr_info("%s %s: cmpxchg double redo ", n, s->name);
 448#endif
 449
 450	return false;
 451}
 452
 453#ifdef CONFIG_SLUB_DEBUG
 454static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
 455static DEFINE_SPINLOCK(object_map_lock);
 456
 457#if IS_ENABLED(CONFIG_KUNIT)
 458static bool slab_add_kunit_errors(void)
 459{
 460	struct kunit_resource *resource;
 461
 462	if (likely(!current->kunit_test))
 463		return false;
 464
 465	resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
 466	if (!resource)
 467		return false;
 468
 469	(*(int *)resource->data)++;
 470	kunit_put_resource(resource);
 471	return true;
 472}
 473#else
 474static inline bool slab_add_kunit_errors(void) { return false; }
 475#endif
 476
 477/*
 478 * Determine a map of object in use on a page.
 479 *
 480 * Node listlock must be held to guarantee that the page does
 481 * not vanish from under us.
 482 */
 483static unsigned long *get_map(struct kmem_cache *s, struct page *page)
 484	__acquires(&object_map_lock)
 485{
 486	void *p;
 487	void *addr = page_address(page);
 488
 489	VM_BUG_ON(!irqs_disabled());
 490
 491	spin_lock(&object_map_lock);
 492
 493	bitmap_zero(object_map, page->objects);
 494
 495	for (p = page->freelist; p; p = get_freepointer(s, p))
 496		set_bit(__obj_to_index(s, addr, p), object_map);
 497
 498	return object_map;
 499}
 500
 501static void put_map(unsigned long *map) __releases(&object_map_lock)
 502{
 503	VM_BUG_ON(map != object_map);
 504	spin_unlock(&object_map_lock);
 505}
 506
 507static inline unsigned int size_from_object(struct kmem_cache *s)
 508{
 509	if (s->flags & SLAB_RED_ZONE)
 510		return s->size - s->red_left_pad;
 511
 512	return s->size;
 513}
 514
 515static inline void *restore_red_left(struct kmem_cache *s, void *p)
 516{
 517	if (s->flags & SLAB_RED_ZONE)
 518		p -= s->red_left_pad;
 519
 520	return p;
 521}
 522
 523/*
 524 * Debug settings:
 525 */
 526#if defined(CONFIG_SLUB_DEBUG_ON)
 527static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
 528#else
 529static slab_flags_t slub_debug;
 530#endif
 531
 532static char *slub_debug_string;
 533static int disable_higher_order_debug;
 534
 535/*
 536 * slub is about to manipulate internal object metadata.  This memory lies
 537 * outside the range of the allocated object, so accessing it would normally
 538 * be reported by kasan as a bounds error.  metadata_access_enable() is used
 539 * to tell kasan that these accesses are OK.
 540 */
 541static inline void metadata_access_enable(void)
 542{
 543	kasan_disable_current();
 544}
 545
 546static inline void metadata_access_disable(void)
 547{
 548	kasan_enable_current();
 549}
 550
 551/*
 552 * Object debugging
 553 */
 554
 555/* Verify that a pointer has an address that is valid within a slab page */
 556static inline int check_valid_pointer(struct kmem_cache *s,
 557				struct page *page, void *object)
 558{
 559	void *base;
 560
 561	if (!object)
 562		return 1;
 563
 564	base = page_address(page);
 565	object = kasan_reset_tag(object);
 566	object = restore_red_left(s, object);
 567	if (object < base || object >= base + page->objects * s->size ||
 568		(object - base) % s->size) {
 569		return 0;
 570	}
 571
 572	return 1;
 573}
 574
 575static void print_section(char *level, char *text, u8 *addr,
 576			  unsigned int length)
 577{
 578	metadata_access_enable();
 579	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
 580			16, 1, kasan_reset_tag((void *)addr), length, 1);
 581	metadata_access_disable();
 582}
 583
 584/*
 585 * See comment in calculate_sizes().
 586 */
 587static inline bool freeptr_outside_object(struct kmem_cache *s)
 588{
 589	return s->offset >= s->inuse;
 590}
 591
 592/*
 593 * Return offset of the end of info block which is inuse + free pointer if
 594 * not overlapping with object.
 595 */
 596static inline unsigned int get_info_end(struct kmem_cache *s)
 597{
 598	if (freeptr_outside_object(s))
 599		return s->inuse + sizeof(void *);
 600	else
 601		return s->inuse;
 602}
 603
 604static struct track *get_track(struct kmem_cache *s, void *object,
 605	enum track_item alloc)
 606{
 607	struct track *p;
 608
 609	p = object + get_info_end(s);
 
 
 
 610
 611	return kasan_reset_tag(p + alloc);
 612}
 613
 614static void set_track(struct kmem_cache *s, void *object,
 615			enum track_item alloc, unsigned long addr)
 616{
 617	struct track *p = get_track(s, object, alloc);
 618
 619	if (addr) {
 620#ifdef CONFIG_STACKTRACE
 621		unsigned int nr_entries;
 
 622
 
 
 
 
 623		metadata_access_enable();
 624		nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
 625					      TRACK_ADDRS_COUNT, 3);
 626		metadata_access_disable();
 627
 628		if (nr_entries < TRACK_ADDRS_COUNT)
 629			p->addrs[nr_entries] = 0;
 
 
 
 
 
 630#endif
 631		p->addr = addr;
 632		p->cpu = smp_processor_id();
 633		p->pid = current->pid;
 634		p->when = jiffies;
 635	} else {
 636		memset(p, 0, sizeof(struct track));
 637	}
 638}
 639
 640static void init_tracking(struct kmem_cache *s, void *object)
 641{
 642	if (!(s->flags & SLAB_STORE_USER))
 643		return;
 644
 645	set_track(s, object, TRACK_FREE, 0UL);
 646	set_track(s, object, TRACK_ALLOC, 0UL);
 647}
 648
 649static void print_track(const char *s, struct track *t, unsigned long pr_time)
 650{
 651	if (!t->addr)
 652		return;
 653
 654	pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
 655	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
 656#ifdef CONFIG_STACKTRACE
 657	{
 658		int i;
 659		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
 660			if (t->addrs[i])
 661				pr_err("\t%pS\n", (void *)t->addrs[i]);
 662			else
 663				break;
 664	}
 665#endif
 666}
 667
 668void print_tracking(struct kmem_cache *s, void *object)
 669{
 670	unsigned long pr_time = jiffies;
 671	if (!(s->flags & SLAB_STORE_USER))
 672		return;
 673
 674	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
 675	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
 676}
 677
 678static void print_page_info(struct page *page)
 679{
 680	pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n",
 681	       page, page->objects, page->inuse, page->freelist,
 682	       page->flags, &page->flags);
 683
 684}
 685
 686static void slab_bug(struct kmem_cache *s, char *fmt, ...)
 687{
 688	struct va_format vaf;
 689	va_list args;
 690
 691	va_start(args, fmt);
 692	vaf.fmt = fmt;
 693	vaf.va = &args;
 694	pr_err("=============================================================================\n");
 695	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
 696	pr_err("-----------------------------------------------------------------------------\n\n");
 
 
 697	va_end(args);
 698}
 699
 700__printf(2, 3)
 701static void slab_fix(struct kmem_cache *s, char *fmt, ...)
 702{
 703	struct va_format vaf;
 704	va_list args;
 705
 706	if (slab_add_kunit_errors())
 707		return;
 708
 709	va_start(args, fmt);
 710	vaf.fmt = fmt;
 711	vaf.va = &args;
 712	pr_err("FIX %s: %pV\n", s->name, &vaf);
 713	va_end(args);
 714}
 715
 716static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
 717			       void **freelist, void *nextfree)
 718{
 719	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
 720	    !check_valid_pointer(s, page, nextfree) && freelist) {
 721		object_err(s, page, *freelist, "Freechain corrupt");
 722		*freelist = NULL;
 723		slab_fix(s, "Isolate corrupted freechain");
 724		return true;
 725	}
 726
 727	return false;
 728}
 729
 730static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 731{
 732	unsigned int off;	/* Offset of last byte */
 733	u8 *addr = page_address(page);
 734
 735	print_tracking(s, p);
 736
 737	print_page_info(page);
 738
 739	pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
 740	       p, p - addr, get_freepointer(s, p));
 741
 742	if (s->flags & SLAB_RED_ZONE)
 743		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
 744			      s->red_left_pad);
 745	else if (p > addr + 16)
 746		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 747
 748	print_section(KERN_ERR,         "Object   ", p,
 749		      min_t(unsigned int, s->object_size, PAGE_SIZE));
 750	if (s->flags & SLAB_RED_ZONE)
 751		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
 752			s->inuse - s->object_size);
 753
 754	off = get_info_end(s);
 
 
 
 755
 756	if (s->flags & SLAB_STORE_USER)
 757		off += 2 * sizeof(struct track);
 758
 759	off += kasan_metadata_size(s);
 760
 761	if (off != size_from_object(s))
 762		/* Beginning of the filler is the free pointer */
 763		print_section(KERN_ERR, "Padding  ", p + off,
 764			      size_from_object(s) - off);
 765
 766	dump_stack();
 767}
 768
 769void object_err(struct kmem_cache *s, struct page *page,
 770			u8 *object, char *reason)
 771{
 772	if (slab_add_kunit_errors())
 773		return;
 774
 775	slab_bug(s, "%s", reason);
 776	print_trailer(s, page, object);
 777	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 778}
 779
 780static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
 781			const char *fmt, ...)
 782{
 783	va_list args;
 784	char buf[100];
 785
 786	if (slab_add_kunit_errors())
 787		return;
 788
 789	va_start(args, fmt);
 790	vsnprintf(buf, sizeof(buf), fmt, args);
 791	va_end(args);
 792	slab_bug(s, "%s", buf);
 793	print_page_info(page);
 794	dump_stack();
 795	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 796}
 797
 798static void init_object(struct kmem_cache *s, void *object, u8 val)
 799{
 800	u8 *p = kasan_reset_tag(object);
 801
 802	if (s->flags & SLAB_RED_ZONE)
 803		memset(p - s->red_left_pad, val, s->red_left_pad);
 804
 805	if (s->flags & __OBJECT_POISON) {
 806		memset(p, POISON_FREE, s->object_size - 1);
 807		p[s->object_size - 1] = POISON_END;
 808	}
 809
 810	if (s->flags & SLAB_RED_ZONE)
 811		memset(p + s->object_size, val, s->inuse - s->object_size);
 812}
 813
 814static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 815						void *from, void *to)
 816{
 817	slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
 818	memset(from, data, to - from);
 819}
 820
 821static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
 822			u8 *object, char *what,
 823			u8 *start, unsigned int value, unsigned int bytes)
 824{
 825	u8 *fault;
 826	u8 *end;
 827	u8 *addr = page_address(page);
 828
 829	metadata_access_enable();
 830	fault = memchr_inv(kasan_reset_tag(start), value, bytes);
 831	metadata_access_disable();
 832	if (!fault)
 833		return 1;
 834
 835	end = start + bytes;
 836	while (end > fault && end[-1] == value)
 837		end--;
 838
 839	if (slab_add_kunit_errors())
 840		goto skip_bug_print;
 841
 842	slab_bug(s, "%s overwritten", what);
 843	pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
 844					fault, end - 1, fault - addr,
 845					fault[0], value);
 846	print_trailer(s, page, object);
 847	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 848
 849skip_bug_print:
 850	restore_bytes(s, what, value, fault, end);
 851	return 0;
 852}
 853
 854/*
 855 * Object layout:
 856 *
 857 * object address
 858 * 	Bytes of the object to be managed.
 859 * 	If the freepointer may overlay the object then the free
 860 *	pointer is at the middle of the object.
 861 *
 862 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
 863 * 	0xa5 (POISON_END)
 864 *
 865 * object + s->object_size
 866 * 	Padding to reach word boundary. This is also used for Redzoning.
 867 * 	Padding is extended by another word if Redzoning is enabled and
 868 * 	object_size == inuse.
 869 *
 870 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
 871 * 	0xcc (RED_ACTIVE) for objects in use.
 872 *
 873 * object + s->inuse
 874 * 	Meta data starts here.
 875 *
 876 * 	A. Free pointer (if we cannot overwrite object on free)
 877 * 	B. Tracking data for SLAB_STORE_USER
 878 *	C. Padding to reach required alignment boundary or at minimum
 879 * 		one word if debugging is on to be able to detect writes
 880 * 		before the word boundary.
 881 *
 882 *	Padding is done using 0x5a (POISON_INUSE)
 883 *
 884 * object + s->size
 885 * 	Nothing is used beyond s->size.
 886 *
 887 * If slabcaches are merged then the object_size and inuse boundaries are mostly
 888 * ignored. And therefore no slab options that rely on these boundaries
 889 * may be used with merged slabcaches.
 890 */
 891
 892static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 893{
 894	unsigned long off = get_info_end(s);	/* The end of info */
 
 
 
 
 895
 896	if (s->flags & SLAB_STORE_USER)
 897		/* We also have user information there */
 898		off += 2 * sizeof(struct track);
 899
 900	off += kasan_metadata_size(s);
 901
 902	if (size_from_object(s) == off)
 903		return 1;
 904
 905	return check_bytes_and_report(s, page, p, "Object padding",
 906			p + off, POISON_INUSE, size_from_object(s) - off);
 907}
 908
 909/* Check the pad bytes at the end of a slab page */
 910static int slab_pad_check(struct kmem_cache *s, struct page *page)
 911{
 912	u8 *start;
 913	u8 *fault;
 914	u8 *end;
 915	u8 *pad;
 916	int length;
 917	int remainder;
 918
 919	if (!(s->flags & SLAB_POISON))
 920		return 1;
 921
 922	start = page_address(page);
 923	length = page_size(page);
 924	end = start + length;
 925	remainder = length % s->size;
 926	if (!remainder)
 927		return 1;
 928
 929	pad = end - remainder;
 930	metadata_access_enable();
 931	fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
 932	metadata_access_disable();
 933	if (!fault)
 934		return 1;
 935	while (end > fault && end[-1] == POISON_INUSE)
 936		end--;
 937
 938	slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
 939			fault, end - 1, fault - start);
 940	print_section(KERN_ERR, "Padding ", pad, remainder);
 941
 942	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
 943	return 0;
 944}
 945
 946static int check_object(struct kmem_cache *s, struct page *page,
 947					void *object, u8 val)
 948{
 949	u8 *p = object;
 950	u8 *endobject = object + s->object_size;
 951
 952	if (s->flags & SLAB_RED_ZONE) {
 953		if (!check_bytes_and_report(s, page, object, "Left Redzone",
 954			object - s->red_left_pad, val, s->red_left_pad))
 955			return 0;
 956
 957		if (!check_bytes_and_report(s, page, object, "Right Redzone",
 958			endobject, val, s->inuse - s->object_size))
 959			return 0;
 960	} else {
 961		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 962			check_bytes_and_report(s, page, p, "Alignment padding",
 963				endobject, POISON_INUSE,
 964				s->inuse - s->object_size);
 965		}
 966	}
 967
 968	if (s->flags & SLAB_POISON) {
 969		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
 970			(!check_bytes_and_report(s, page, p, "Poison", p,
 971					POISON_FREE, s->object_size - 1) ||
 972			 !check_bytes_and_report(s, page, p, "End Poison",
 973				p + s->object_size - 1, POISON_END, 1)))
 974			return 0;
 975		/*
 976		 * check_pad_bytes cleans up on its own.
 977		 */
 978		check_pad_bytes(s, page, p);
 979	}
 980
 981	if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
 982		/*
 983		 * Object and freepointer overlap. Cannot check
 984		 * freepointer while object is allocated.
 985		 */
 986		return 1;
 987
 988	/* Check free pointer validity */
 989	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
 990		object_err(s, page, p, "Freepointer corrupt");
 991		/*
 992		 * No choice but to zap it and thus lose the remainder
 993		 * of the free objects in this slab. May cause
 994		 * another error because the object count is now wrong.
 995		 */
 996		set_freepointer(s, p, NULL);
 997		return 0;
 998	}
 999	return 1;
1000}
1001
1002static int check_slab(struct kmem_cache *s, struct page *page)
1003{
1004	int maxobj;
1005
1006	VM_BUG_ON(!irqs_disabled());
1007
1008	if (!PageSlab(page)) {
1009		slab_err(s, page, "Not a valid slab page");
1010		return 0;
1011	}
1012
1013	maxobj = order_objects(compound_order(page), s->size);
1014	if (page->objects > maxobj) {
1015		slab_err(s, page, "objects %u > max %u",
1016			page->objects, maxobj);
1017		return 0;
1018	}
1019	if (page->inuse > page->objects) {
1020		slab_err(s, page, "inuse %u > max %u",
1021			page->inuse, page->objects);
1022		return 0;
1023	}
1024	/* Slab_pad_check fixes things up after itself */
1025	slab_pad_check(s, page);
1026	return 1;
1027}
1028
1029/*
1030 * Determine if a certain object on a page is on the freelist. Must hold the
1031 * slab lock to guarantee that the chains are in a consistent state.
1032 */
1033static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
1034{
1035	int nr = 0;
1036	void *fp;
1037	void *object = NULL;
1038	int max_objects;
1039
1040	fp = page->freelist;
1041	while (fp && nr <= page->objects) {
1042		if (fp == search)
1043			return 1;
1044		if (!check_valid_pointer(s, page, fp)) {
1045			if (object) {
1046				object_err(s, page, object,
1047					"Freechain corrupt");
1048				set_freepointer(s, object, NULL);
1049			} else {
1050				slab_err(s, page, "Freepointer corrupt");
1051				page->freelist = NULL;
1052				page->inuse = page->objects;
1053				slab_fix(s, "Freelist cleared");
1054				return 0;
1055			}
1056			break;
1057		}
1058		object = fp;
1059		fp = get_freepointer(s, object);
1060		nr++;
1061	}
1062
1063	max_objects = order_objects(compound_order(page), s->size);
1064	if (max_objects > MAX_OBJS_PER_PAGE)
1065		max_objects = MAX_OBJS_PER_PAGE;
1066
1067	if (page->objects != max_objects) {
1068		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1069			 page->objects, max_objects);
1070		page->objects = max_objects;
1071		slab_fix(s, "Number of objects adjusted");
1072	}
1073	if (page->inuse != page->objects - nr) {
1074		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1075			 page->inuse, page->objects - nr);
1076		page->inuse = page->objects - nr;
1077		slab_fix(s, "Object count adjusted");
1078	}
1079	return search == NULL;
1080}
1081
1082static void trace(struct kmem_cache *s, struct page *page, void *object,
1083								int alloc)
1084{
1085	if (s->flags & SLAB_TRACE) {
1086		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1087			s->name,
1088			alloc ? "alloc" : "free",
1089			object, page->inuse,
1090			page->freelist);
1091
1092		if (!alloc)
1093			print_section(KERN_INFO, "Object ", (void *)object,
1094					s->object_size);
1095
1096		dump_stack();
1097	}
1098}
1099
1100/*
1101 * Tracking of fully allocated slabs for debugging purposes.
1102 */
1103static void add_full(struct kmem_cache *s,
1104	struct kmem_cache_node *n, struct page *page)
1105{
1106	if (!(s->flags & SLAB_STORE_USER))
1107		return;
1108
1109	lockdep_assert_held(&n->list_lock);
1110	list_add(&page->slab_list, &n->full);
1111}
1112
1113static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1114{
1115	if (!(s->flags & SLAB_STORE_USER))
1116		return;
1117
1118	lockdep_assert_held(&n->list_lock);
1119	list_del(&page->slab_list);
1120}
1121
1122/* Tracking of the number of slabs for debugging purposes */
1123static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1124{
1125	struct kmem_cache_node *n = get_node(s, node);
1126
1127	return atomic_long_read(&n->nr_slabs);
1128}
1129
1130static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1131{
1132	return atomic_long_read(&n->nr_slabs);
1133}
1134
1135static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1136{
1137	struct kmem_cache_node *n = get_node(s, node);
1138
1139	/*
1140	 * May be called early in order to allocate a slab for the
1141	 * kmem_cache_node structure. Solve the chicken-egg
1142	 * dilemma by deferring the increment of the count during
1143	 * bootstrap (see early_kmem_cache_node_alloc).
1144	 */
1145	if (likely(n)) {
1146		atomic_long_inc(&n->nr_slabs);
1147		atomic_long_add(objects, &n->total_objects);
1148	}
1149}
1150static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1151{
1152	struct kmem_cache_node *n = get_node(s, node);
1153
1154	atomic_long_dec(&n->nr_slabs);
1155	atomic_long_sub(objects, &n->total_objects);
1156}
1157
1158/* Object debug checks for alloc/free paths */
1159static void setup_object_debug(struct kmem_cache *s, struct page *page,
1160								void *object)
1161{
1162	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1163		return;
1164
1165	init_object(s, object, SLUB_RED_INACTIVE);
1166	init_tracking(s, object);
1167}
1168
1169static
1170void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1171{
1172	if (!kmem_cache_debug_flags(s, SLAB_POISON))
1173		return;
1174
1175	metadata_access_enable();
1176	memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
1177	metadata_access_disable();
1178}
1179
1180static inline int alloc_consistency_checks(struct kmem_cache *s,
1181					struct page *page, void *object)
 
1182{
1183	if (!check_slab(s, page))
1184		return 0;
1185
1186	if (!check_valid_pointer(s, page, object)) {
1187		object_err(s, page, object, "Freelist Pointer check fails");
1188		return 0;
1189	}
1190
1191	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1192		return 0;
1193
1194	return 1;
1195}
1196
1197static noinline int alloc_debug_processing(struct kmem_cache *s,
1198					struct page *page,
1199					void *object, unsigned long addr)
1200{
1201	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1202		if (!alloc_consistency_checks(s, page, object))
1203			goto bad;
1204	}
1205
1206	/* Success perform special debug activities for allocs */
1207	if (s->flags & SLAB_STORE_USER)
1208		set_track(s, object, TRACK_ALLOC, addr);
1209	trace(s, page, object, 1);
1210	init_object(s, object, SLUB_RED_ACTIVE);
1211	return 1;
1212
1213bad:
1214	if (PageSlab(page)) {
1215		/*
1216		 * If this is a slab page then lets do the best we can
1217		 * to avoid issues in the future. Marking all objects
1218		 * as used avoids touching the remaining objects.
1219		 */
1220		slab_fix(s, "Marking all objects used");
1221		page->inuse = page->objects;
1222		page->freelist = NULL;
1223	}
1224	return 0;
1225}
1226
1227static inline int free_consistency_checks(struct kmem_cache *s,
1228		struct page *page, void *object, unsigned long addr)
1229{
1230	if (!check_valid_pointer(s, page, object)) {
1231		slab_err(s, page, "Invalid object pointer 0x%p", object);
1232		return 0;
1233	}
1234
1235	if (on_freelist(s, page, object)) {
1236		object_err(s, page, object, "Object already free");
1237		return 0;
1238	}
1239
1240	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1241		return 0;
1242
1243	if (unlikely(s != page->slab_cache)) {
1244		if (!PageSlab(page)) {
1245			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1246				 object);
1247		} else if (!page->slab_cache) {
1248			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1249			       object);
1250			dump_stack();
1251		} else
1252			object_err(s, page, object,
1253					"page slab pointer corrupt.");
1254		return 0;
1255	}
1256	return 1;
1257}
1258
1259/* Supports checking bulk free of a constructed freelist */
1260static noinline int free_debug_processing(
1261	struct kmem_cache *s, struct page *page,
1262	void *head, void *tail, int bulk_cnt,
1263	unsigned long addr)
1264{
1265	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1266	void *object = head;
1267	int cnt = 0;
1268	unsigned long flags;
1269	int ret = 0;
1270
1271	spin_lock_irqsave(&n->list_lock, flags);
1272	slab_lock(page);
1273
1274	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1275		if (!check_slab(s, page))
1276			goto out;
1277	}
1278
1279next_object:
1280	cnt++;
1281
1282	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1283		if (!free_consistency_checks(s, page, object, addr))
1284			goto out;
1285	}
1286
1287	if (s->flags & SLAB_STORE_USER)
1288		set_track(s, object, TRACK_FREE, addr);
1289	trace(s, page, object, 0);
1290	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1291	init_object(s, object, SLUB_RED_INACTIVE);
1292
1293	/* Reached end of constructed freelist yet? */
1294	if (object != tail) {
1295		object = get_freepointer(s, object);
1296		goto next_object;
1297	}
1298	ret = 1;
1299
1300out:
1301	if (cnt != bulk_cnt)
1302		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1303			 bulk_cnt, cnt);
1304
1305	slab_unlock(page);
1306	spin_unlock_irqrestore(&n->list_lock, flags);
1307	if (!ret)
1308		slab_fix(s, "Object at 0x%p not freed", object);
1309	return ret;
1310}
1311
1312/*
1313 * Parse a block of slub_debug options. Blocks are delimited by ';'
1314 *
1315 * @str:    start of block
1316 * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1317 * @slabs:  return start of list of slabs, or NULL when there's no list
1318 * @init:   assume this is initial parsing and not per-kmem-create parsing
1319 *
1320 * returns the start of next block if there's any, or NULL
1321 */
1322static char *
1323parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1324{
1325	bool higher_order_disable = false;
 
 
 
 
 
1326
1327	/* Skip any completely empty blocks */
1328	while (*str && *str == ';')
1329		str++;
1330
1331	if (*str == ',') {
1332		/*
1333		 * No options but restriction on slabs. This means full
1334		 * debugging for slabs matching a pattern.
1335		 */
1336		*flags = DEBUG_DEFAULT_FLAGS;
1337		goto check_slabs;
1338	}
1339	*flags = 0;
1340
1341	/* Determine which debug features should be switched on */
1342	for (; *str && *str != ',' && *str != ';'; str++) {
 
 
 
 
 
 
 
 
 
1343		switch (tolower(*str)) {
1344		case '-':
1345			*flags = 0;
1346			break;
1347		case 'f':
1348			*flags |= SLAB_CONSISTENCY_CHECKS;
1349			break;
1350		case 'z':
1351			*flags |= SLAB_RED_ZONE;
1352			break;
1353		case 'p':
1354			*flags |= SLAB_POISON;
1355			break;
1356		case 'u':
1357			*flags |= SLAB_STORE_USER;
1358			break;
1359		case 't':
1360			*flags |= SLAB_TRACE;
1361			break;
1362		case 'a':
1363			*flags |= SLAB_FAILSLAB;
1364			break;
1365		case 'o':
1366			/*
1367			 * Avoid enabling debugging on caches if its minimum
1368			 * order would increase as a result.
1369			 */
1370			higher_order_disable = true;
1371			break;
1372		default:
1373			if (init)
1374				pr_err("slub_debug option '%c' unknown. skipped\n", *str);
1375		}
1376	}
 
1377check_slabs:
1378	if (*str == ',')
1379		*slabs = ++str;
1380	else
1381		*slabs = NULL;
1382
1383	/* Skip over the slab list */
1384	while (*str && *str != ';')
1385		str++;
1386
1387	/* Skip any completely empty blocks */
1388	while (*str && *str == ';')
1389		str++;
1390
1391	if (init && higher_order_disable)
1392		disable_higher_order_debug = 1;
1393
1394	if (*str)
1395		return str;
1396	else
1397		return NULL;
1398}
1399
1400static int __init setup_slub_debug(char *str)
1401{
1402	slab_flags_t flags;
1403	slab_flags_t global_flags;
1404	char *saved_str;
1405	char *slab_list;
1406	bool global_slub_debug_changed = false;
1407	bool slab_list_specified = false;
1408
1409	global_flags = DEBUG_DEFAULT_FLAGS;
1410	if (*str++ != '=' || !*str)
1411		/*
1412		 * No options specified. Switch on full debugging.
1413		 */
1414		goto out;
1415
1416	saved_str = str;
1417	while (str) {
1418		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1419
1420		if (!slab_list) {
1421			global_flags = flags;
1422			global_slub_debug_changed = true;
1423		} else {
1424			slab_list_specified = true;
1425		}
1426	}
1427
1428	/*
1429	 * For backwards compatibility, a single list of flags with list of
1430	 * slabs means debugging is only changed for those slabs, so the global
1431	 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1432	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1433	 * long as there is no option specifying flags without a slab list.
1434	 */
1435	if (slab_list_specified) {
1436		if (!global_slub_debug_changed)
1437			global_flags = slub_debug;
1438		slub_debug_string = saved_str;
1439	}
1440out:
1441	slub_debug = global_flags;
1442	if (slub_debug != 0 || slub_debug_string)
1443		static_branch_enable(&slub_debug_enabled);
1444	else
1445		static_branch_disable(&slub_debug_enabled);
1446	if ((static_branch_unlikely(&init_on_alloc) ||
1447	     static_branch_unlikely(&init_on_free)) &&
1448	    (slub_debug & SLAB_POISON))
1449		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1450	return 1;
1451}
1452
1453__setup("slub_debug", setup_slub_debug);
1454
1455/*
1456 * kmem_cache_flags - apply debugging options to the cache
1457 * @object_size:	the size of an object without meta data
1458 * @flags:		flags to set
1459 * @name:		name of the cache
1460 *
1461 * Debug option(s) are applied to @flags. In addition to the debug
1462 * option(s), if a slab name (or multiple) is specified i.e.
1463 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1464 * then only the select slabs will receive the debug option(s).
1465 */
1466slab_flags_t kmem_cache_flags(unsigned int object_size,
1467	slab_flags_t flags, const char *name)
 
1468{
1469	char *iter;
1470	size_t len;
1471	char *next_block;
1472	slab_flags_t block_flags;
1473	slab_flags_t slub_debug_local = slub_debug;
1474
1475	/*
1476	 * If the slab cache is for debugging (e.g. kmemleak) then
1477	 * don't store user (stack trace) information by default,
1478	 * but let the user enable it via the command line below.
1479	 */
1480	if (flags & SLAB_NOLEAKTRACE)
1481		slub_debug_local &= ~SLAB_STORE_USER;
1482
1483	len = strlen(name);
1484	next_block = slub_debug_string;
1485	/* Go through all blocks of debug options, see if any matches our slab's name */
1486	while (next_block) {
1487		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1488		if (!iter)
1489			continue;
1490		/* Found a block that has a slab list, search it */
1491		while (*iter) {
1492			char *end, *glob;
1493			size_t cmplen;
1494
1495			end = strchrnul(iter, ',');
1496			if (next_block && next_block < end)
1497				end = next_block - 1;
1498
1499			glob = strnchr(iter, end - iter, '*');
1500			if (glob)
1501				cmplen = glob - iter;
1502			else
1503				cmplen = max_t(size_t, len, (end - iter));
1504
1505			if (!strncmp(name, iter, cmplen)) {
1506				flags |= block_flags;
1507				return flags;
1508			}
1509
1510			if (!*end || *end == ';')
1511				break;
1512			iter = end + 1;
1513		}
1514	}
1515
1516	return flags | slub_debug_local;
1517}
1518#else /* !CONFIG_SLUB_DEBUG */
1519static inline void setup_object_debug(struct kmem_cache *s,
1520			struct page *page, void *object) {}
1521static inline
1522void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1523
1524static inline int alloc_debug_processing(struct kmem_cache *s,
1525	struct page *page, void *object, unsigned long addr) { return 0; }
1526
1527static inline int free_debug_processing(
1528	struct kmem_cache *s, struct page *page,
1529	void *head, void *tail, int bulk_cnt,
1530	unsigned long addr) { return 0; }
1531
1532static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1533			{ return 1; }
1534static inline int check_object(struct kmem_cache *s, struct page *page,
1535			void *object, u8 val) { return 1; }
1536static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1537					struct page *page) {}
1538static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1539					struct page *page) {}
1540slab_flags_t kmem_cache_flags(unsigned int object_size,
1541	slab_flags_t flags, const char *name)
 
1542{
1543	return flags;
1544}
1545#define slub_debug 0
1546
1547#define disable_higher_order_debug 0
1548
1549static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1550							{ return 0; }
1551static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1552							{ return 0; }
1553static inline void inc_slabs_node(struct kmem_cache *s, int node,
1554							int objects) {}
1555static inline void dec_slabs_node(struct kmem_cache *s, int node,
1556							int objects) {}
1557
1558static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
1559			       void **freelist, void *nextfree)
1560{
1561	return false;
1562}
1563#endif /* CONFIG_SLUB_DEBUG */
1564
1565/*
1566 * Hooks for other subsystems that check memory allocations. In a typical
1567 * production configuration these hooks all should produce no code at all.
1568 */
1569static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1570{
1571	ptr = kasan_kmalloc_large(ptr, size, flags);
1572	/* As ptr might get tagged, call kmemleak hook after KASAN. */
1573	kmemleak_alloc(ptr, size, 1, flags);
1574	return ptr;
1575}
1576
1577static __always_inline void kfree_hook(void *x)
1578{
1579	kmemleak_free(x);
1580	kasan_kfree_large(x);
1581}
1582
1583static __always_inline bool slab_free_hook(struct kmem_cache *s,
1584						void *x, bool init)
1585{
1586	kmemleak_free_recursive(x, s->flags);
1587
1588	/*
1589	 * Trouble is that we may no longer disable interrupts in the fast path
1590	 * So in order to make the debug calls that expect irqs to be
1591	 * disabled we need to disable interrupts temporarily.
1592	 */
1593#ifdef CONFIG_LOCKDEP
1594	{
1595		unsigned long flags;
1596
1597		local_irq_save(flags);
1598		debug_check_no_locks_freed(x, s->object_size);
1599		local_irq_restore(flags);
1600	}
1601#endif
1602	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1603		debug_check_no_obj_freed(x, s->object_size);
1604
1605	/* Use KCSAN to help debug racy use-after-free. */
1606	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
1607		__kcsan_check_access(x, s->object_size,
1608				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
1609
1610	/*
1611	 * As memory initialization might be integrated into KASAN,
1612	 * kasan_slab_free and initialization memset's must be
1613	 * kept together to avoid discrepancies in behavior.
1614	 *
1615	 * The initialization memset's clear the object and the metadata,
1616	 * but don't touch the SLAB redzone.
1617	 */
1618	if (init) {
1619		int rsize;
1620
1621		if (!kasan_has_integrated_init())
1622			memset(kasan_reset_tag(x), 0, s->object_size);
1623		rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
1624		memset((char *)kasan_reset_tag(x) + s->inuse, 0,
1625		       s->size - s->inuse - rsize);
1626	}
1627	/* KASAN might put x into memory quarantine, delaying its reuse. */
1628	return kasan_slab_free(s, x, init);
1629}
1630
1631static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1632					   void **head, void **tail,
1633					   int *cnt)
1634{
 
 
 
 
 
 
 
 
1635
1636	void *object;
1637	void *next = *head;
1638	void *old_tail = *tail ? *tail : *head;
1639
1640	if (is_kfence_address(next)) {
1641		slab_free_hook(s, next, false);
1642		return true;
1643	}
1644
1645	/* Head and tail of the reconstructed freelist */
1646	*head = NULL;
1647	*tail = NULL;
1648
1649	do {
1650		object = next;
1651		next = get_freepointer(s, object);
1652
1653		/* If object's reuse doesn't have to be delayed */
1654		if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
1655			/* Move object to the new freelist */
1656			set_freepointer(s, object, *head);
1657			*head = object;
1658			if (!*tail)
1659				*tail = object;
1660		} else {
1661			/*
1662			 * Adjust the reconstructed freelist depth
1663			 * accordingly if object's reuse is delayed.
1664			 */
1665			--(*cnt);
1666		}
1667	} while (object != old_tail);
1668
1669	if (*head == *tail)
1670		*tail = NULL;
1671
1672	return *head != NULL;
 
 
 
1673}
1674
1675static void *setup_object(struct kmem_cache *s, struct page *page,
1676				void *object)
1677{
1678	setup_object_debug(s, page, object);
1679	object = kasan_init_slab_obj(s, object);
1680	if (unlikely(s->ctor)) {
1681		kasan_unpoison_object_data(s, object);
1682		s->ctor(object);
1683		kasan_poison_object_data(s, object);
1684	}
1685	return object;
1686}
1687
1688/*
1689 * Slab allocation and freeing
1690 */
1691static inline struct page *alloc_slab_page(struct kmem_cache *s,
1692		gfp_t flags, int node, struct kmem_cache_order_objects oo)
1693{
1694	struct page *page;
1695	unsigned int order = oo_order(oo);
1696
1697	if (node == NUMA_NO_NODE)
1698		page = alloc_pages(flags, order);
1699	else
1700		page = __alloc_pages_node(node, flags, order);
1701
 
 
 
 
 
1702	return page;
1703}
1704
1705#ifdef CONFIG_SLAB_FREELIST_RANDOM
1706/* Pre-initialize the random sequence cache */
1707static int init_cache_random_seq(struct kmem_cache *s)
1708{
1709	unsigned int count = oo_objects(s->oo);
1710	int err;
1711
1712	/* Bailout if already initialised */
1713	if (s->random_seq)
1714		return 0;
1715
1716	err = cache_random_seq_create(s, count, GFP_KERNEL);
1717	if (err) {
1718		pr_err("SLUB: Unable to initialize free list for %s\n",
1719			s->name);
1720		return err;
1721	}
1722
1723	/* Transform to an offset on the set of pages */
1724	if (s->random_seq) {
1725		unsigned int i;
1726
1727		for (i = 0; i < count; i++)
1728			s->random_seq[i] *= s->size;
1729	}
1730	return 0;
1731}
1732
1733/* Initialize each random sequence freelist per cache */
1734static void __init init_freelist_randomization(void)
1735{
1736	struct kmem_cache *s;
1737
1738	mutex_lock(&slab_mutex);
1739
1740	list_for_each_entry(s, &slab_caches, list)
1741		init_cache_random_seq(s);
1742
1743	mutex_unlock(&slab_mutex);
1744}
1745
1746/* Get the next entry on the pre-computed freelist randomized */
1747static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1748				unsigned long *pos, void *start,
1749				unsigned long page_limit,
1750				unsigned long freelist_count)
1751{
1752	unsigned int idx;
1753
1754	/*
1755	 * If the target page allocation failed, the number of objects on the
1756	 * page might be smaller than the usual size defined by the cache.
1757	 */
1758	do {
1759		idx = s->random_seq[*pos];
1760		*pos += 1;
1761		if (*pos >= freelist_count)
1762			*pos = 0;
1763	} while (unlikely(idx >= page_limit));
1764
1765	return (char *)start + idx;
1766}
1767
1768/* Shuffle the single linked freelist based on a random pre-computed sequence */
1769static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1770{
1771	void *start;
1772	void *cur;
1773	void *next;
1774	unsigned long idx, pos, page_limit, freelist_count;
1775
1776	if (page->objects < 2 || !s->random_seq)
1777		return false;
1778
1779	freelist_count = oo_objects(s->oo);
1780	pos = get_random_int() % freelist_count;
1781
1782	page_limit = page->objects * s->size;
1783	start = fixup_red_left(s, page_address(page));
1784
1785	/* First entry is used as the base of the freelist */
1786	cur = next_freelist_entry(s, page, &pos, start, page_limit,
1787				freelist_count);
1788	cur = setup_object(s, page, cur);
1789	page->freelist = cur;
1790
1791	for (idx = 1; idx < page->objects; idx++) {
 
1792		next = next_freelist_entry(s, page, &pos, start, page_limit,
1793			freelist_count);
1794		next = setup_object(s, page, next);
1795		set_freepointer(s, cur, next);
1796		cur = next;
1797	}
 
1798	set_freepointer(s, cur, NULL);
1799
1800	return true;
1801}
1802#else
1803static inline int init_cache_random_seq(struct kmem_cache *s)
1804{
1805	return 0;
1806}
1807static inline void init_freelist_randomization(void) { }
1808static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1809{
1810	return false;
1811}
1812#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1813
1814static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1815{
1816	struct page *page;
1817	struct kmem_cache_order_objects oo = s->oo;
1818	gfp_t alloc_gfp;
1819	void *start, *p, *next;
1820	int idx;
1821	bool shuffle;
1822
1823	flags &= gfp_allowed_mask;
1824
1825	if (gfpflags_allow_blocking(flags))
1826		local_irq_enable();
1827
1828	flags |= s->allocflags;
1829
1830	/*
1831	 * Let the initial higher-order allocation fail under memory pressure
1832	 * so we fall-back to the minimum order allocation.
1833	 */
1834	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1835	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1836		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1837
1838	page = alloc_slab_page(s, alloc_gfp, node, oo);
1839	if (unlikely(!page)) {
1840		oo = s->min;
1841		alloc_gfp = flags;
1842		/*
1843		 * Allocation may have failed due to fragmentation.
1844		 * Try a lower order alloc if possible
1845		 */
1846		page = alloc_slab_page(s, alloc_gfp, node, oo);
1847		if (unlikely(!page))
1848			goto out;
1849		stat(s, ORDER_FALLBACK);
1850	}
1851
1852	page->objects = oo_objects(oo);
1853
1854	account_slab_page(page, oo_order(oo), s, flags);
1855
1856	page->slab_cache = s;
1857	__SetPageSlab(page);
1858	if (page_is_pfmemalloc(page))
1859		SetPageSlabPfmemalloc(page);
1860
1861	kasan_poison_slab(page);
1862
1863	start = page_address(page);
 
1864
1865	setup_page_debug(s, page, start);
1866
1867	shuffle = shuffle_freelist(s, page);
1868
1869	if (!shuffle) {
1870		start = fixup_red_left(s, start);
1871		start = setup_object(s, page, start);
1872		page->freelist = start;
1873		for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1874			next = p + s->size;
1875			next = setup_object(s, page, next);
1876			set_freepointer(s, p, next);
1877			p = next;
1878		}
1879		set_freepointer(s, p, NULL);
1880	}
1881
1882	page->inuse = page->objects;
1883	page->frozen = 1;
1884
1885out:
1886	if (gfpflags_allow_blocking(flags))
1887		local_irq_disable();
1888	if (!page)
1889		return NULL;
1890
 
 
 
 
 
1891	inc_slabs_node(s, page_to_nid(page), page->objects);
1892
1893	return page;
1894}
1895
1896static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1897{
1898	if (unlikely(flags & GFP_SLAB_BUG_MASK))
1899		flags = kmalloc_fix_flags(flags);
 
 
 
 
 
1900
1901	return allocate_slab(s,
1902		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1903}
1904
1905static void __free_slab(struct kmem_cache *s, struct page *page)
1906{
1907	int order = compound_order(page);
1908	int pages = 1 << order;
1909
1910	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
1911		void *p;
1912
1913		slab_pad_check(s, page);
1914		for_each_object(p, s, page_address(page),
1915						page->objects)
1916			check_object(s, page, p, SLUB_RED_INACTIVE);
1917	}
1918
 
 
 
 
 
1919	__ClearPageSlabPfmemalloc(page);
1920	__ClearPageSlab(page);
1921	/* In union with page->mapping where page allocator expects NULL */
1922	page->slab_cache = NULL;
1923	if (current->reclaim_state)
1924		current->reclaim_state->reclaimed_slab += pages;
1925	unaccount_slab_page(page, order, s);
1926	__free_pages(page, order);
1927}
1928
 
 
 
1929static void rcu_free_slab(struct rcu_head *h)
1930{
1931	struct page *page = container_of(h, struct page, rcu_head);
 
 
 
 
 
1932
1933	__free_slab(page->slab_cache, page);
1934}
1935
1936static void free_slab(struct kmem_cache *s, struct page *page)
1937{
1938	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1939		call_rcu(&page->rcu_head, rcu_free_slab);
 
 
 
 
 
 
 
 
 
 
 
 
1940	} else
1941		__free_slab(s, page);
1942}
1943
1944static void discard_slab(struct kmem_cache *s, struct page *page)
1945{
1946	dec_slabs_node(s, page_to_nid(page), page->objects);
1947	free_slab(s, page);
1948}
1949
1950/*
1951 * Management of partially allocated slabs.
1952 */
1953static inline void
1954__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1955{
1956	n->nr_partial++;
1957	if (tail == DEACTIVATE_TO_TAIL)
1958		list_add_tail(&page->slab_list, &n->partial);
1959	else
1960		list_add(&page->slab_list, &n->partial);
1961}
1962
1963static inline void add_partial(struct kmem_cache_node *n,
1964				struct page *page, int tail)
1965{
1966	lockdep_assert_held(&n->list_lock);
1967	__add_partial(n, page, tail);
1968}
1969
1970static inline void remove_partial(struct kmem_cache_node *n,
1971					struct page *page)
1972{
1973	lockdep_assert_held(&n->list_lock);
1974	list_del(&page->slab_list);
1975	n->nr_partial--;
1976}
1977
1978/*
1979 * Remove slab from the partial list, freeze it and
1980 * return the pointer to the freelist.
1981 *
1982 * Returns a list of objects or NULL if it fails.
1983 */
1984static inline void *acquire_slab(struct kmem_cache *s,
1985		struct kmem_cache_node *n, struct page *page,
1986		int mode, int *objects)
1987{
1988	void *freelist;
1989	unsigned long counters;
1990	struct page new;
1991
1992	lockdep_assert_held(&n->list_lock);
1993
1994	/*
1995	 * Zap the freelist and set the frozen bit.
1996	 * The old freelist is the list of objects for the
1997	 * per cpu allocation list.
1998	 */
1999	freelist = page->freelist;
2000	counters = page->counters;
2001	new.counters = counters;
2002	*objects = new.objects - new.inuse;
2003	if (mode) {
2004		new.inuse = page->objects;
2005		new.freelist = NULL;
2006	} else {
2007		new.freelist = freelist;
2008	}
2009
2010	VM_BUG_ON(new.frozen);
2011	new.frozen = 1;
2012
2013	if (!__cmpxchg_double_slab(s, page,
2014			freelist, counters,
2015			new.freelist, new.counters,
2016			"acquire_slab"))
2017		return NULL;
2018
2019	remove_partial(n, page);
2020	WARN_ON(!freelist);
2021	return freelist;
2022}
2023
2024static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
2025static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
2026
2027/*
2028 * Try to allocate a partial slab from a specific node.
2029 */
2030static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
2031				struct kmem_cache_cpu *c, gfp_t flags)
2032{
2033	struct page *page, *page2;
2034	void *object = NULL;
2035	unsigned int available = 0;
2036	int objects;
2037
2038	/*
2039	 * Racy check. If we mistakenly see no partial slabs then we
2040	 * just allocate an empty slab. If we mistakenly try to get a
2041	 * partial slab and there is none available then get_partial()
2042	 * will return NULL.
2043	 */
2044	if (!n || !n->nr_partial)
2045		return NULL;
2046
2047	spin_lock(&n->list_lock);
2048	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
2049		void *t;
2050
2051		if (!pfmemalloc_match(page, flags))
2052			continue;
2053
2054		t = acquire_slab(s, n, page, object == NULL, &objects);
2055		if (!t)
2056			break;
2057
2058		available += objects;
2059		if (!object) {
2060			c->page = page;
2061			stat(s, ALLOC_FROM_PARTIAL);
2062			object = t;
2063		} else {
2064			put_cpu_partial(s, page, 0);
2065			stat(s, CPU_PARTIAL_NODE);
2066		}
2067		if (!kmem_cache_has_cpu_partial(s)
2068			|| available > slub_cpu_partial(s) / 2)
2069			break;
2070
2071	}
2072	spin_unlock(&n->list_lock);
2073	return object;
2074}
2075
2076/*
2077 * Get a page from somewhere. Search in increasing NUMA distances.
2078 */
2079static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
2080		struct kmem_cache_cpu *c)
2081{
2082#ifdef CONFIG_NUMA
2083	struct zonelist *zonelist;
2084	struct zoneref *z;
2085	struct zone *zone;
2086	enum zone_type highest_zoneidx = gfp_zone(flags);
2087	void *object;
2088	unsigned int cpuset_mems_cookie;
2089
2090	/*
2091	 * The defrag ratio allows a configuration of the tradeoffs between
2092	 * inter node defragmentation and node local allocations. A lower
2093	 * defrag_ratio increases the tendency to do local allocations
2094	 * instead of attempting to obtain partial slabs from other nodes.
2095	 *
2096	 * If the defrag_ratio is set to 0 then kmalloc() always
2097	 * returns node local objects. If the ratio is higher then kmalloc()
2098	 * may return off node objects because partial slabs are obtained
2099	 * from other nodes and filled up.
2100	 *
2101	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2102	 * (which makes defrag_ratio = 1000) then every (well almost)
2103	 * allocation will first attempt to defrag slab caches on other nodes.
2104	 * This means scanning over all nodes to look for partial slabs which
2105	 * may be expensive if we do it every time we are trying to find a slab
2106	 * with available objects.
2107	 */
2108	if (!s->remote_node_defrag_ratio ||
2109			get_cycles() % 1024 > s->remote_node_defrag_ratio)
2110		return NULL;
2111
2112	do {
2113		cpuset_mems_cookie = read_mems_allowed_begin();
2114		zonelist = node_zonelist(mempolicy_slab_node(), flags);
2115		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2116			struct kmem_cache_node *n;
2117
2118			n = get_node(s, zone_to_nid(zone));
2119
2120			if (n && cpuset_zone_allowed(zone, flags) &&
2121					n->nr_partial > s->min_partial) {
2122				object = get_partial_node(s, n, c, flags);
2123				if (object) {
2124					/*
2125					 * Don't check read_mems_allowed_retry()
2126					 * here - if mems_allowed was updated in
2127					 * parallel, that was a harmless race
2128					 * between allocation and the cpuset
2129					 * update
2130					 */
2131					return object;
2132				}
2133			}
2134		}
2135	} while (read_mems_allowed_retry(cpuset_mems_cookie));
2136#endif	/* CONFIG_NUMA */
2137	return NULL;
2138}
2139
2140/*
2141 * Get a partial page, lock it and return it.
2142 */
2143static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
2144		struct kmem_cache_cpu *c)
2145{
2146	void *object;
2147	int searchnode = node;
2148
2149	if (node == NUMA_NO_NODE)
2150		searchnode = numa_mem_id();
 
 
2151
2152	object = get_partial_node(s, get_node(s, searchnode), c, flags);
2153	if (object || node != NUMA_NO_NODE)
2154		return object;
2155
2156	return get_any_partial(s, flags, c);
2157}
2158
2159#ifdef CONFIG_PREEMPTION
2160/*
2161 * Calculate the next globally unique transaction for disambiguation
2162 * during cmpxchg. The transactions start with the cpu number and are then
2163 * incremented by CONFIG_NR_CPUS.
2164 */
2165#define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
2166#else
2167/*
2168 * No preemption supported therefore also no need to check for
2169 * different cpus.
2170 */
2171#define TID_STEP 1
2172#endif
2173
2174static inline unsigned long next_tid(unsigned long tid)
2175{
2176	return tid + TID_STEP;
2177}
2178
2179#ifdef SLUB_DEBUG_CMPXCHG
2180static inline unsigned int tid_to_cpu(unsigned long tid)
2181{
2182	return tid % TID_STEP;
2183}
2184
2185static inline unsigned long tid_to_event(unsigned long tid)
2186{
2187	return tid / TID_STEP;
2188}
2189#endif
2190
2191static inline unsigned int init_tid(int cpu)
2192{
2193	return cpu;
2194}
2195
2196static inline void note_cmpxchg_failure(const char *n,
2197		const struct kmem_cache *s, unsigned long tid)
2198{
2199#ifdef SLUB_DEBUG_CMPXCHG
2200	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2201
2202	pr_info("%s %s: cmpxchg redo ", n, s->name);
2203
2204#ifdef CONFIG_PREEMPTION
2205	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2206		pr_warn("due to cpu change %d -> %d\n",
2207			tid_to_cpu(tid), tid_to_cpu(actual_tid));
2208	else
2209#endif
2210	if (tid_to_event(tid) != tid_to_event(actual_tid))
2211		pr_warn("due to cpu running other code. Event %ld->%ld\n",
2212			tid_to_event(tid), tid_to_event(actual_tid));
2213	else
2214		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2215			actual_tid, tid, next_tid(tid));
2216#endif
2217	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2218}
2219
2220static void init_kmem_cache_cpus(struct kmem_cache *s)
2221{
2222	int cpu;
2223
2224	for_each_possible_cpu(cpu)
2225		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2226}
2227
2228/*
2229 * Remove the cpu slab
2230 */
2231static void deactivate_slab(struct kmem_cache *s, struct page *page,
2232				void *freelist, struct kmem_cache_cpu *c)
2233{
2234	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2235	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2236	int lock = 0, free_delta = 0;
2237	enum slab_modes l = M_NONE, m = M_NONE;
2238	void *nextfree, *freelist_iter, *freelist_tail;
2239	int tail = DEACTIVATE_TO_HEAD;
2240	struct page new;
2241	struct page old;
2242
2243	if (page->freelist) {
2244		stat(s, DEACTIVATE_REMOTE_FREES);
2245		tail = DEACTIVATE_TO_TAIL;
2246	}
2247
2248	/*
2249	 * Stage one: Count the objects on cpu's freelist as free_delta and
2250	 * remember the last object in freelist_tail for later splicing.
 
 
 
 
2251	 */
2252	freelist_tail = NULL;
2253	freelist_iter = freelist;
2254	while (freelist_iter) {
2255		nextfree = get_freepointer(s, freelist_iter);
2256
2257		/*
2258		 * If 'nextfree' is invalid, it is possible that the object at
2259		 * 'freelist_iter' is already corrupted.  So isolate all objects
2260		 * starting at 'freelist_iter' by skipping them.
2261		 */
2262		if (freelist_corrupted(s, page, &freelist_iter, nextfree))
2263			break;
2264
2265		freelist_tail = freelist_iter;
2266		free_delta++;
 
 
2267
2268		freelist_iter = nextfree;
2269	}
2270
2271	/*
2272	 * Stage two: Unfreeze the page while splicing the per-cpu
2273	 * freelist to the head of page's freelist.
2274	 *
2275	 * Ensure that the page is unfrozen while the list presence
2276	 * reflects the actual number of objects during unfreeze.
2277	 *
2278	 * We setup the list membership and then perform a cmpxchg
2279	 * with the count. If there is a mismatch then the page
2280	 * is not unfrozen but the page is on the wrong list.
2281	 *
2282	 * Then we restart the process which may have to remove
2283	 * the page from the list that we just put it on again
2284	 * because the number of objects in the slab may have
2285	 * changed.
2286	 */
2287redo:
2288
2289	old.freelist = READ_ONCE(page->freelist);
2290	old.counters = READ_ONCE(page->counters);
2291	VM_BUG_ON(!old.frozen);
2292
2293	/* Determine target state of the slab */
2294	new.counters = old.counters;
2295	if (freelist_tail) {
2296		new.inuse -= free_delta;
2297		set_freepointer(s, freelist_tail, old.freelist);
2298		new.freelist = freelist;
2299	} else
2300		new.freelist = old.freelist;
2301
2302	new.frozen = 0;
2303
2304	if (!new.inuse && n->nr_partial >= s->min_partial)
2305		m = M_FREE;
2306	else if (new.freelist) {
2307		m = M_PARTIAL;
2308		if (!lock) {
2309			lock = 1;
2310			/*
2311			 * Taking the spinlock removes the possibility
2312			 * that acquire_slab() will see a slab page that
2313			 * is frozen
2314			 */
2315			spin_lock(&n->list_lock);
2316		}
2317	} else {
2318		m = M_FULL;
2319		if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) {
2320			lock = 1;
2321			/*
2322			 * This also ensures that the scanning of full
2323			 * slabs from diagnostic functions will not see
2324			 * any frozen slabs.
2325			 */
2326			spin_lock(&n->list_lock);
2327		}
2328	}
2329
2330	if (l != m) {
 
2331		if (l == M_PARTIAL)
 
2332			remove_partial(n, page);
 
2333		else if (l == M_FULL)
 
2334			remove_full(s, n, page);
2335
2336		if (m == M_PARTIAL)
 
2337			add_partial(n, page, tail);
2338		else if (m == M_FULL)
 
 
 
 
2339			add_full(s, n, page);
 
 
2340	}
2341
2342	l = m;
2343	if (!__cmpxchg_double_slab(s, page,
2344				old.freelist, old.counters,
2345				new.freelist, new.counters,
2346				"unfreezing slab"))
2347		goto redo;
2348
2349	if (lock)
2350		spin_unlock(&n->list_lock);
2351
2352	if (m == M_PARTIAL)
2353		stat(s, tail);
2354	else if (m == M_FULL)
2355		stat(s, DEACTIVATE_FULL);
2356	else if (m == M_FREE) {
2357		stat(s, DEACTIVATE_EMPTY);
2358		discard_slab(s, page);
2359		stat(s, FREE_SLAB);
2360	}
2361
2362	c->page = NULL;
2363	c->freelist = NULL;
2364}
2365
2366/*
2367 * Unfreeze all the cpu partial slabs.
2368 *
2369 * This function must be called with interrupts disabled
2370 * for the cpu using c (or some other guarantee must be there
2371 * to guarantee no concurrent accesses).
2372 */
2373static void unfreeze_partials(struct kmem_cache *s,
2374		struct kmem_cache_cpu *c)
2375{
2376#ifdef CONFIG_SLUB_CPU_PARTIAL
2377	struct kmem_cache_node *n = NULL, *n2 = NULL;
2378	struct page *page, *discard_page = NULL;
2379
2380	while ((page = slub_percpu_partial(c))) {
2381		struct page new;
2382		struct page old;
2383
2384		slub_set_percpu_partial(c, page);
2385
2386		n2 = get_node(s, page_to_nid(page));
2387		if (n != n2) {
2388			if (n)
2389				spin_unlock(&n->list_lock);
2390
2391			n = n2;
2392			spin_lock(&n->list_lock);
2393		}
2394
2395		do {
2396
2397			old.freelist = page->freelist;
2398			old.counters = page->counters;
2399			VM_BUG_ON(!old.frozen);
2400
2401			new.counters = old.counters;
2402			new.freelist = old.freelist;
2403
2404			new.frozen = 0;
2405
2406		} while (!__cmpxchg_double_slab(s, page,
2407				old.freelist, old.counters,
2408				new.freelist, new.counters,
2409				"unfreezing slab"));
2410
2411		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2412			page->next = discard_page;
2413			discard_page = page;
2414		} else {
2415			add_partial(n, page, DEACTIVATE_TO_TAIL);
2416			stat(s, FREE_ADD_PARTIAL);
2417		}
2418	}
2419
2420	if (n)
2421		spin_unlock(&n->list_lock);
2422
2423	while (discard_page) {
2424		page = discard_page;
2425		discard_page = discard_page->next;
2426
2427		stat(s, DEACTIVATE_EMPTY);
2428		discard_slab(s, page);
2429		stat(s, FREE_SLAB);
2430	}
2431#endif	/* CONFIG_SLUB_CPU_PARTIAL */
2432}
2433
2434/*
2435 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2436 * partial page slot if available.
2437 *
2438 * If we did not find a slot then simply move all the partials to the
2439 * per node partial list.
2440 */
2441static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2442{
2443#ifdef CONFIG_SLUB_CPU_PARTIAL
2444	struct page *oldpage;
2445	int pages;
2446	int pobjects;
2447
2448	preempt_disable();
2449	do {
2450		pages = 0;
2451		pobjects = 0;
2452		oldpage = this_cpu_read(s->cpu_slab->partial);
2453
2454		if (oldpage) {
2455			pobjects = oldpage->pobjects;
2456			pages = oldpage->pages;
2457			if (drain && pobjects > slub_cpu_partial(s)) {
2458				unsigned long flags;
2459				/*
2460				 * partial array is full. Move the existing
2461				 * set to the per node partial list.
2462				 */
2463				local_irq_save(flags);
2464				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2465				local_irq_restore(flags);
2466				oldpage = NULL;
2467				pobjects = 0;
2468				pages = 0;
2469				stat(s, CPU_PARTIAL_DRAIN);
2470			}
2471		}
2472
2473		pages++;
2474		pobjects += page->objects - page->inuse;
2475
2476		page->pages = pages;
2477		page->pobjects = pobjects;
2478		page->next = oldpage;
2479
2480	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2481								!= oldpage);
2482	if (unlikely(!slub_cpu_partial(s))) {
2483		unsigned long flags;
2484
2485		local_irq_save(flags);
2486		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2487		local_irq_restore(flags);
2488	}
2489	preempt_enable();
2490#endif	/* CONFIG_SLUB_CPU_PARTIAL */
2491}
2492
2493static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2494{
2495	stat(s, CPUSLAB_FLUSH);
2496	deactivate_slab(s, c->page, c->freelist, c);
2497
2498	c->tid = next_tid(c->tid);
2499}
2500
2501/*
2502 * Flush cpu slab.
2503 *
2504 * Called from IPI handler with interrupts disabled.
2505 */
2506static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2507{
2508	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2509
2510	if (c->page)
2511		flush_slab(s, c);
 
2512
2513	unfreeze_partials(s, c);
 
2514}
2515
2516static void flush_cpu_slab(void *d)
2517{
2518	struct kmem_cache *s = d;
2519
2520	__flush_cpu_slab(s, smp_processor_id());
2521}
2522
2523static bool has_cpu_slab(int cpu, void *info)
2524{
2525	struct kmem_cache *s = info;
2526	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2527
2528	return c->page || slub_percpu_partial(c);
2529}
2530
2531static void flush_all(struct kmem_cache *s)
2532{
2533	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
2534}
2535
2536/*
2537 * Use the cpu notifier to insure that the cpu slabs are flushed when
2538 * necessary.
2539 */
2540static int slub_cpu_dead(unsigned int cpu)
2541{
2542	struct kmem_cache *s;
2543	unsigned long flags;
2544
2545	mutex_lock(&slab_mutex);
2546	list_for_each_entry(s, &slab_caches, list) {
2547		local_irq_save(flags);
2548		__flush_cpu_slab(s, cpu);
2549		local_irq_restore(flags);
2550	}
2551	mutex_unlock(&slab_mutex);
2552	return 0;
2553}
2554
2555/*
2556 * Check if the objects in a per cpu structure fit numa
2557 * locality expectations.
2558 */
2559static inline int node_match(struct page *page, int node)
2560{
2561#ifdef CONFIG_NUMA
2562	if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2563		return 0;
2564#endif
2565	return 1;
2566}
2567
2568#ifdef CONFIG_SLUB_DEBUG
2569static int count_free(struct page *page)
2570{
2571	return page->objects - page->inuse;
2572}
2573
2574static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2575{
2576	return atomic_long_read(&n->total_objects);
2577}
2578#endif /* CONFIG_SLUB_DEBUG */
2579
2580#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2581static unsigned long count_partial(struct kmem_cache_node *n,
2582					int (*get_count)(struct page *))
2583{
2584	unsigned long flags;
2585	unsigned long x = 0;
2586	struct page *page;
2587
2588	spin_lock_irqsave(&n->list_lock, flags);
2589	list_for_each_entry(page, &n->partial, slab_list)
2590		x += get_count(page);
2591	spin_unlock_irqrestore(&n->list_lock, flags);
2592	return x;
2593}
2594#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2595
2596static noinline void
2597slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2598{
2599#ifdef CONFIG_SLUB_DEBUG
2600	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2601				      DEFAULT_RATELIMIT_BURST);
2602	int node;
2603	struct kmem_cache_node *n;
2604
2605	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2606		return;
2607
2608	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2609		nid, gfpflags, &gfpflags);
2610	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2611		s->name, s->object_size, s->size, oo_order(s->oo),
2612		oo_order(s->min));
2613
2614	if (oo_order(s->min) > get_order(s->object_size))
2615		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2616			s->name);
2617
2618	for_each_kmem_cache_node(s, node, n) {
2619		unsigned long nr_slabs;
2620		unsigned long nr_objs;
2621		unsigned long nr_free;
2622
2623		nr_free  = count_partial(n, count_free);
2624		nr_slabs = node_nr_slabs(n);
2625		nr_objs  = node_nr_objs(n);
2626
2627		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2628			node, nr_slabs, nr_objs, nr_free);
2629	}
2630#endif
2631}
2632
2633static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2634			int node, struct kmem_cache_cpu **pc)
2635{
2636	void *freelist;
2637	struct kmem_cache_cpu *c = *pc;
2638	struct page *page;
2639
2640	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2641
2642	freelist = get_partial(s, flags, node, c);
2643
2644	if (freelist)
2645		return freelist;
2646
2647	page = new_slab(s, flags, node);
2648	if (page) {
2649		c = raw_cpu_ptr(s->cpu_slab);
2650		if (c->page)
2651			flush_slab(s, c);
2652
2653		/*
2654		 * No other reference to the page yet so we can
2655		 * muck around with it freely without cmpxchg
2656		 */
2657		freelist = page->freelist;
2658		page->freelist = NULL;
2659
2660		stat(s, ALLOC_SLAB);
2661		c->page = page;
2662		*pc = c;
2663	}
 
2664
2665	return freelist;
2666}
2667
2668static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2669{
2670	if (unlikely(PageSlabPfmemalloc(page)))
2671		return gfp_pfmemalloc_allowed(gfpflags);
2672
2673	return true;
2674}
2675
2676/*
2677 * Check the page->freelist of a page and either transfer the freelist to the
2678 * per cpu freelist or deactivate the page.
2679 *
2680 * The page is still frozen if the return value is not NULL.
2681 *
2682 * If this function returns NULL then the page has been unfrozen.
2683 *
2684 * This function must be called with interrupt disabled.
2685 */
2686static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2687{
2688	struct page new;
2689	unsigned long counters;
2690	void *freelist;
2691
2692	do {
2693		freelist = page->freelist;
2694		counters = page->counters;
2695
2696		new.counters = counters;
2697		VM_BUG_ON(!new.frozen);
2698
2699		new.inuse = page->objects;
2700		new.frozen = freelist != NULL;
2701
2702	} while (!__cmpxchg_double_slab(s, page,
2703		freelist, counters,
2704		NULL, new.counters,
2705		"get_freelist"));
2706
2707	return freelist;
2708}
2709
2710/*
2711 * Slow path. The lockless freelist is empty or we need to perform
2712 * debugging duties.
2713 *
2714 * Processing is still very fast if new objects have been freed to the
2715 * regular freelist. In that case we simply take over the regular freelist
2716 * as the lockless freelist and zap the regular freelist.
2717 *
2718 * If that is not working then we fall back to the partial lists. We take the
2719 * first element of the freelist as the object to allocate now and move the
2720 * rest of the freelist to the lockless freelist.
2721 *
2722 * And if we were unable to get a new slab from the partial slab lists then
2723 * we need to allocate a new slab. This is the slowest path since it involves
2724 * a call to the page allocator and the setup of a new slab.
2725 *
2726 * Version of __slab_alloc to use when we know that interrupts are
2727 * already disabled (which is the case for bulk allocation).
2728 */
2729static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2730			  unsigned long addr, struct kmem_cache_cpu *c)
2731{
2732	void *freelist;
2733	struct page *page;
2734
2735	stat(s, ALLOC_SLOWPATH);
2736
2737	page = c->page;
2738	if (!page) {
2739		/*
2740		 * if the node is not online or has no normal memory, just
2741		 * ignore the node constraint
2742		 */
2743		if (unlikely(node != NUMA_NO_NODE &&
2744			     !node_isset(node, slab_nodes)))
2745			node = NUMA_NO_NODE;
2746		goto new_slab;
2747	}
2748redo:
2749
2750	if (unlikely(!node_match(page, node))) {
2751		/*
2752		 * same as above but node_match() being false already
2753		 * implies node != NUMA_NO_NODE
2754		 */
2755		if (!node_isset(node, slab_nodes)) {
2756			node = NUMA_NO_NODE;
2757			goto redo;
2758		} else {
2759			stat(s, ALLOC_NODE_MISMATCH);
2760			deactivate_slab(s, page, c->freelist, c);
2761			goto new_slab;
2762		}
2763	}
2764
2765	/*
2766	 * By rights, we should be searching for a slab page that was
2767	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2768	 * information when the page leaves the per-cpu allocator
2769	 */
2770	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2771		deactivate_slab(s, page, c->freelist, c);
2772		goto new_slab;
2773	}
2774
2775	/* must check again c->freelist in case of cpu migration or IRQ */
2776	freelist = c->freelist;
2777	if (freelist)
2778		goto load_freelist;
2779
2780	freelist = get_freelist(s, page);
2781
2782	if (!freelist) {
2783		c->page = NULL;
2784		stat(s, DEACTIVATE_BYPASS);
2785		goto new_slab;
2786	}
2787
2788	stat(s, ALLOC_REFILL);
2789
2790load_freelist:
2791	/*
2792	 * freelist is pointing to the list of objects to be used.
2793	 * page is pointing to the page from which the objects are obtained.
2794	 * That page must be frozen for per cpu allocations to work.
2795	 */
2796	VM_BUG_ON(!c->page->frozen);
2797	c->freelist = get_freepointer(s, freelist);
2798	c->tid = next_tid(c->tid);
2799	return freelist;
2800
2801new_slab:
2802
2803	if (slub_percpu_partial(c)) {
2804		page = c->page = slub_percpu_partial(c);
2805		slub_set_percpu_partial(c, page);
2806		stat(s, CPU_PARTIAL_ALLOC);
2807		goto redo;
2808	}
2809
2810	freelist = new_slab_objects(s, gfpflags, node, &c);
2811
2812	if (unlikely(!freelist)) {
2813		slab_out_of_memory(s, gfpflags, node);
2814		return NULL;
2815	}
2816
2817	page = c->page;
2818	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2819		goto load_freelist;
2820
2821	/* Only entered in the debug case */
2822	if (kmem_cache_debug(s) &&
2823			!alloc_debug_processing(s, page, freelist, addr))
2824		goto new_slab;	/* Slab failed checks. Next slab needed */
2825
2826	deactivate_slab(s, page, get_freepointer(s, freelist), c);
2827	return freelist;
2828}
2829
2830/*
2831 * Another one that disabled interrupt and compensates for possible
2832 * cpu changes by refetching the per cpu area pointer.
2833 */
2834static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2835			  unsigned long addr, struct kmem_cache_cpu *c)
2836{
2837	void *p;
2838	unsigned long flags;
2839
2840	local_irq_save(flags);
2841#ifdef CONFIG_PREEMPTION
2842	/*
2843	 * We may have been preempted and rescheduled on a different
2844	 * cpu before disabling interrupts. Need to reload cpu area
2845	 * pointer.
2846	 */
2847	c = this_cpu_ptr(s->cpu_slab);
2848#endif
2849
2850	p = ___slab_alloc(s, gfpflags, node, addr, c);
2851	local_irq_restore(flags);
2852	return p;
2853}
2854
2855/*
2856 * If the object has been wiped upon free, make sure it's fully initialized by
2857 * zeroing out freelist pointer.
2858 */
2859static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2860						   void *obj)
2861{
2862	if (unlikely(slab_want_init_on_free(s)) && obj)
2863		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
2864			0, sizeof(void *));
2865}
2866
2867/*
2868 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2869 * have the fastpath folded into their functions. So no function call
2870 * overhead for requests that can be satisfied on the fastpath.
2871 *
2872 * The fastpath works by first checking if the lockless freelist can be used.
2873 * If not then __slab_alloc is called for slow processing.
2874 *
2875 * Otherwise we can simply pick the next object from the lockless free list.
2876 */
2877static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2878		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
2879{
2880	void *object;
2881	struct kmem_cache_cpu *c;
2882	struct page *page;
2883	unsigned long tid;
2884	struct obj_cgroup *objcg = NULL;
2885	bool init = false;
2886
2887	s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
2888	if (!s)
2889		return NULL;
2890
2891	object = kfence_alloc(s, orig_size, gfpflags);
2892	if (unlikely(object))
2893		goto out;
2894
2895redo:
2896	/*
2897	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2898	 * enabled. We may switch back and forth between cpus while
2899	 * reading from one cpu area. That does not matter as long
2900	 * as we end up on the original cpu again when doing the cmpxchg.
2901	 *
2902	 * We should guarantee that tid and kmem_cache are retrieved on
2903	 * the same cpu. It could be different if CONFIG_PREEMPTION so we need
2904	 * to check if it is matched or not.
2905	 */
2906	do {
2907		tid = this_cpu_read(s->cpu_slab->tid);
2908		c = raw_cpu_ptr(s->cpu_slab);
2909	} while (IS_ENABLED(CONFIG_PREEMPTION) &&
2910		 unlikely(tid != READ_ONCE(c->tid)));
2911
2912	/*
2913	 * Irqless object alloc/free algorithm used here depends on sequence
2914	 * of fetching cpu_slab's data. tid should be fetched before anything
2915	 * on c to guarantee that object and page associated with previous tid
2916	 * won't be used with current tid. If we fetch tid first, object and
2917	 * page could be one associated with next tid and our alloc/free
2918	 * request will be failed. In this case, we will retry. So, no problem.
2919	 */
2920	barrier();
2921
2922	/*
2923	 * The transaction ids are globally unique per cpu and per operation on
2924	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2925	 * occurs on the right processor and that there was no operation on the
2926	 * linked list in between.
2927	 */
2928
2929	object = c->freelist;
2930	page = c->page;
2931	if (unlikely(!object || !page || !node_match(page, node))) {
2932		object = __slab_alloc(s, gfpflags, node, addr, c);
 
2933	} else {
2934		void *next_object = get_freepointer_safe(s, object);
2935
2936		/*
2937		 * The cmpxchg will only match if there was no additional
2938		 * operation and if we are on the right processor.
2939		 *
2940		 * The cmpxchg does the following atomically (without lock
2941		 * semantics!)
2942		 * 1. Relocate first pointer to the current per cpu area.
2943		 * 2. Verify that tid and freelist have not been changed
2944		 * 3. If they were not changed replace tid and freelist
2945		 *
2946		 * Since this is without lock semantics the protection is only
2947		 * against code executing on this cpu *not* from access by
2948		 * other cpus.
2949		 */
2950		if (unlikely(!this_cpu_cmpxchg_double(
2951				s->cpu_slab->freelist, s->cpu_slab->tid,
2952				object, tid,
2953				next_object, next_tid(tid)))) {
2954
2955			note_cmpxchg_failure("slab_alloc", s, tid);
2956			goto redo;
2957		}
2958		prefetch_freepointer(s, next_object);
2959		stat(s, ALLOC_FASTPATH);
2960	}
2961
2962	maybe_wipe_obj_freeptr(s, object);
2963	init = slab_want_init_on_alloc(gfpflags, s);
2964
2965out:
2966	slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init);
2967
2968	return object;
2969}
2970
2971static __always_inline void *slab_alloc(struct kmem_cache *s,
2972		gfp_t gfpflags, unsigned long addr, size_t orig_size)
2973{
2974	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
2975}
2976
2977void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2978{
2979	void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
2980
2981	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2982				s->size, gfpflags);
2983
2984	return ret;
2985}
2986EXPORT_SYMBOL(kmem_cache_alloc);
2987
2988#ifdef CONFIG_TRACING
2989void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2990{
2991	void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
2992	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2993	ret = kasan_kmalloc(s, ret, size, gfpflags);
2994	return ret;
2995}
2996EXPORT_SYMBOL(kmem_cache_alloc_trace);
2997#endif
2998
2999#ifdef CONFIG_NUMA
3000void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
3001{
3002	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size);
3003
3004	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3005				    s->object_size, s->size, gfpflags, node);
3006
3007	return ret;
3008}
3009EXPORT_SYMBOL(kmem_cache_alloc_node);
3010
3011#ifdef CONFIG_TRACING
3012void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
3013				    gfp_t gfpflags,
3014				    int node, size_t size)
3015{
3016	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size);
3017
3018	trace_kmalloc_node(_RET_IP_, ret,
3019			   size, s->size, gfpflags, node);
3020
3021	ret = kasan_kmalloc(s, ret, size, gfpflags);
3022	return ret;
3023}
3024EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3025#endif
3026#endif	/* CONFIG_NUMA */
3027
3028/*
3029 * Slow path handling. This may still be called frequently since objects
3030 * have a longer lifetime than the cpu slabs in most processing loads.
3031 *
3032 * So we still attempt to reduce cache line usage. Just take the slab
3033 * lock and free the item. If there is no additional partial page
3034 * handling required then we can return immediately.
3035 */
3036static void __slab_free(struct kmem_cache *s, struct page *page,
3037			void *head, void *tail, int cnt,
3038			unsigned long addr)
3039
3040{
3041	void *prior;
3042	int was_frozen;
3043	struct page new;
3044	unsigned long counters;
3045	struct kmem_cache_node *n = NULL;
3046	unsigned long flags;
3047
3048	stat(s, FREE_SLOWPATH);
3049
3050	if (kfence_free(head))
3051		return;
3052
3053	if (kmem_cache_debug(s) &&
3054	    !free_debug_processing(s, page, head, tail, cnt, addr))
3055		return;
3056
3057	do {
3058		if (unlikely(n)) {
3059			spin_unlock_irqrestore(&n->list_lock, flags);
3060			n = NULL;
3061		}
3062		prior = page->freelist;
3063		counters = page->counters;
3064		set_freepointer(s, tail, prior);
3065		new.counters = counters;
3066		was_frozen = new.frozen;
3067		new.inuse -= cnt;
3068		if ((!new.inuse || !prior) && !was_frozen) {
3069
3070			if (kmem_cache_has_cpu_partial(s) && !prior) {
3071
3072				/*
3073				 * Slab was on no list before and will be
3074				 * partially empty
3075				 * We can defer the list move and instead
3076				 * freeze it.
3077				 */
3078				new.frozen = 1;
3079
3080			} else { /* Needs to be taken off a list */
3081
3082				n = get_node(s, page_to_nid(page));
3083				/*
3084				 * Speculatively acquire the list_lock.
3085				 * If the cmpxchg does not succeed then we may
3086				 * drop the list_lock without any processing.
3087				 *
3088				 * Otherwise the list_lock will synchronize with
3089				 * other processors updating the list of slabs.
3090				 */
3091				spin_lock_irqsave(&n->list_lock, flags);
3092
3093			}
3094		}
3095
3096	} while (!cmpxchg_double_slab(s, page,
3097		prior, counters,
3098		head, new.counters,
3099		"__slab_free"));
3100
3101	if (likely(!n)) {
3102
3103		if (likely(was_frozen)) {
3104			/*
3105			 * The list lock was not taken therefore no list
3106			 * activity can be necessary.
3107			 */
3108			stat(s, FREE_FROZEN);
3109		} else if (new.frozen) {
3110			/*
3111			 * If we just froze the page then put it onto the
3112			 * per cpu partial list.
3113			 */
3114			put_cpu_partial(s, page, 1);
3115			stat(s, CPU_PARTIAL_FREE);
3116		}
3117
 
 
 
 
 
3118		return;
3119	}
3120
3121	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
3122		goto slab_empty;
3123
3124	/*
3125	 * Objects left in the slab. If it was not on the partial list before
3126	 * then add it.
3127	 */
3128	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
3129		remove_full(s, n, page);
 
3130		add_partial(n, page, DEACTIVATE_TO_TAIL);
3131		stat(s, FREE_ADD_PARTIAL);
3132	}
3133	spin_unlock_irqrestore(&n->list_lock, flags);
3134	return;
3135
3136slab_empty:
3137	if (prior) {
3138		/*
3139		 * Slab on the partial list.
3140		 */
3141		remove_partial(n, page);
3142		stat(s, FREE_REMOVE_PARTIAL);
3143	} else {
3144		/* Slab must be on the full list */
3145		remove_full(s, n, page);
3146	}
3147
3148	spin_unlock_irqrestore(&n->list_lock, flags);
3149	stat(s, FREE_SLAB);
3150	discard_slab(s, page);
3151}
3152
3153/*
3154 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
3155 * can perform fastpath freeing without additional function calls.
3156 *
3157 * The fastpath is only possible if we are freeing to the current cpu slab
3158 * of this processor. This typically the case if we have just allocated
3159 * the item before.
3160 *
3161 * If fastpath is not possible then fall back to __slab_free where we deal
3162 * with all sorts of special processing.
3163 *
3164 * Bulk free of a freelist with several objects (all pointing to the
3165 * same page) possible by specifying head and tail ptr, plus objects
3166 * count (cnt). Bulk free indicated by tail pointer being set.
3167 */
3168static __always_inline void do_slab_free(struct kmem_cache *s,
3169				struct page *page, void *head, void *tail,
3170				int cnt, unsigned long addr)
3171{
3172	void *tail_obj = tail ? : head;
3173	struct kmem_cache_cpu *c;
3174	unsigned long tid;
3175
3176	/* memcg_slab_free_hook() is already called for bulk free. */
3177	if (!tail)
3178		memcg_slab_free_hook(s, &head, 1);
3179redo:
3180	/*
3181	 * Determine the currently cpus per cpu slab.
3182	 * The cpu may change afterward. However that does not matter since
3183	 * data is retrieved via this pointer. If we are on the same cpu
3184	 * during the cmpxchg then the free will succeed.
3185	 */
3186	do {
3187		tid = this_cpu_read(s->cpu_slab->tid);
3188		c = raw_cpu_ptr(s->cpu_slab);
3189	} while (IS_ENABLED(CONFIG_PREEMPTION) &&
3190		 unlikely(tid != READ_ONCE(c->tid)));
3191
3192	/* Same with comment on barrier() in slab_alloc_node() */
3193	barrier();
3194
3195	if (likely(page == c->page)) {
3196		void **freelist = READ_ONCE(c->freelist);
3197
3198		set_freepointer(s, tail_obj, freelist);
3199
3200		if (unlikely(!this_cpu_cmpxchg_double(
3201				s->cpu_slab->freelist, s->cpu_slab->tid,
3202				freelist, tid,
3203				head, next_tid(tid)))) {
3204
3205			note_cmpxchg_failure("slab_free", s, tid);
3206			goto redo;
3207		}
3208		stat(s, FREE_FASTPATH);
3209	} else
3210		__slab_free(s, page, head, tail_obj, cnt, addr);
3211
3212}
3213
3214static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
3215				      void *head, void *tail, int cnt,
3216				      unsigned long addr)
3217{
3218	/*
3219	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3220	 * to remove objects, whose reuse must be delayed.
3221	 */
3222	if (slab_free_freelist_hook(s, &head, &tail, &cnt))
3223		do_slab_free(s, page, head, tail, cnt, addr);
3224}
3225
3226#ifdef CONFIG_KASAN_GENERIC
3227void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3228{
3229	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3230}
3231#endif
3232
3233void kmem_cache_free(struct kmem_cache *s, void *x)
3234{
3235	s = cache_from_obj(s, x);
3236	if (!s)
3237		return;
3238	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3239	trace_kmem_cache_free(_RET_IP_, x, s->name);
3240}
3241EXPORT_SYMBOL(kmem_cache_free);
3242
3243struct detached_freelist {
3244	struct page *page;
3245	void *tail;
3246	void *freelist;
3247	int cnt;
3248	struct kmem_cache *s;
3249};
3250
3251static inline void free_nonslab_page(struct page *page, void *object)
3252{
3253	unsigned int order = compound_order(page);
3254
3255	VM_BUG_ON_PAGE(!PageCompound(page), page);
3256	kfree_hook(object);
3257	mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
3258	__free_pages(page, order);
3259}
3260
3261/*
3262 * This function progressively scans the array with free objects (with
3263 * a limited look ahead) and extract objects belonging to the same
3264 * page.  It builds a detached freelist directly within the given
3265 * page/objects.  This can happen without any need for
3266 * synchronization, because the objects are owned by running process.
3267 * The freelist is build up as a single linked list in the objects.
3268 * The idea is, that this detached freelist can then be bulk
3269 * transferred to the real freelist(s), but only requiring a single
3270 * synchronization primitive.  Look ahead in the array is limited due
3271 * to performance reasons.
3272 */
3273static inline
3274int build_detached_freelist(struct kmem_cache *s, size_t size,
3275			    void **p, struct detached_freelist *df)
3276{
3277	size_t first_skipped_index = 0;
3278	int lookahead = 3;
3279	void *object;
3280	struct page *page;
3281
3282	/* Always re-init detached_freelist */
3283	df->page = NULL;
3284
3285	do {
3286		object = p[--size];
3287		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3288	} while (!object && size);
3289
3290	if (!object)
3291		return 0;
3292
3293	page = virt_to_head_page(object);
3294	if (!s) {
3295		/* Handle kalloc'ed objects */
3296		if (unlikely(!PageSlab(page))) {
3297			free_nonslab_page(page, object);
 
 
3298			p[size] = NULL; /* mark object processed */
3299			return size;
3300		}
3301		/* Derive kmem_cache from object */
3302		df->s = page->slab_cache;
3303	} else {
3304		df->s = cache_from_obj(s, object); /* Support for memcg */
3305	}
3306
3307	if (is_kfence_address(object)) {
3308		slab_free_hook(df->s, object, false);
3309		__kfence_free(object);
3310		p[size] = NULL; /* mark object processed */
3311		return size;
3312	}
3313
3314	/* Start new detached freelist */
3315	df->page = page;
3316	set_freepointer(df->s, object, NULL);
3317	df->tail = object;
3318	df->freelist = object;
3319	p[size] = NULL; /* mark object processed */
3320	df->cnt = 1;
3321
3322	while (size) {
3323		object = p[--size];
3324		if (!object)
3325			continue; /* Skip processed objects */
3326
3327		/* df->page is always set at this point */
3328		if (df->page == virt_to_head_page(object)) {
3329			/* Opportunity build freelist */
3330			set_freepointer(df->s, object, df->freelist);
3331			df->freelist = object;
3332			df->cnt++;
3333			p[size] = NULL; /* mark object processed */
3334
3335			continue;
3336		}
3337
3338		/* Limit look ahead search */
3339		if (!--lookahead)
3340			break;
3341
3342		if (!first_skipped_index)
3343			first_skipped_index = size + 1;
3344	}
3345
3346	return first_skipped_index;
3347}
3348
3349/* Note that interrupts must be enabled when calling this function. */
3350void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3351{
3352	if (WARN_ON(!size))
3353		return;
3354
3355	memcg_slab_free_hook(s, p, size);
3356	do {
3357		struct detached_freelist df;
3358
3359		size = build_detached_freelist(s, size, p, &df);
3360		if (!df.page)
3361			continue;
3362
3363		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
3364	} while (likely(size));
3365}
3366EXPORT_SYMBOL(kmem_cache_free_bulk);
3367
3368/* Note that interrupts must be enabled when calling this function. */
3369int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3370			  void **p)
3371{
3372	struct kmem_cache_cpu *c;
3373	int i;
3374	struct obj_cgroup *objcg = NULL;
3375
3376	/* memcg and kmem_cache debug support */
3377	s = slab_pre_alloc_hook(s, &objcg, size, flags);
3378	if (unlikely(!s))
3379		return false;
3380	/*
3381	 * Drain objects in the per cpu slab, while disabling local
3382	 * IRQs, which protects against PREEMPT and interrupts
3383	 * handlers invoking normal fastpath.
3384	 */
3385	local_irq_disable();
3386	c = this_cpu_ptr(s->cpu_slab);
3387
3388	for (i = 0; i < size; i++) {
3389		void *object = kfence_alloc(s, s->object_size, flags);
3390
3391		if (unlikely(object)) {
3392			p[i] = object;
3393			continue;
3394		}
3395
3396		object = c->freelist;
3397		if (unlikely(!object)) {
3398			/*
3399			 * We may have removed an object from c->freelist using
3400			 * the fastpath in the previous iteration; in that case,
3401			 * c->tid has not been bumped yet.
3402			 * Since ___slab_alloc() may reenable interrupts while
3403			 * allocating memory, we should bump c->tid now.
3404			 */
3405			c->tid = next_tid(c->tid);
3406
3407			/*
3408			 * Invoking slow path likely have side-effect
3409			 * of re-populating per CPU c->freelist
3410			 */
3411			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3412					    _RET_IP_, c);
3413			if (unlikely(!p[i]))
3414				goto error;
3415
3416			c = this_cpu_ptr(s->cpu_slab);
3417			maybe_wipe_obj_freeptr(s, p[i]);
3418
3419			continue; /* goto for-loop */
3420		}
3421		c->freelist = get_freepointer(s, object);
3422		p[i] = object;
3423		maybe_wipe_obj_freeptr(s, p[i]);
3424	}
3425	c->tid = next_tid(c->tid);
3426	local_irq_enable();
3427
3428	/*
3429	 * memcg and kmem_cache debug support and memory initialization.
3430	 * Done outside of the IRQ disabled fastpath loop.
3431	 */
3432	slab_post_alloc_hook(s, objcg, flags, size, p,
3433				slab_want_init_on_alloc(flags, s));
 
 
 
 
3434	return i;
3435error:
3436	local_irq_enable();
3437	slab_post_alloc_hook(s, objcg, flags, i, p, false);
3438	__kmem_cache_free_bulk(s, i, p);
3439	return 0;
3440}
3441EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3442
3443
3444/*
3445 * Object placement in a slab is made very easy because we always start at
3446 * offset 0. If we tune the size of the object to the alignment then we can
3447 * get the required alignment by putting one properly sized object after
3448 * another.
3449 *
3450 * Notice that the allocation order determines the sizes of the per cpu
3451 * caches. Each processor has always one slab available for allocations.
3452 * Increasing the allocation order reduces the number of times that slabs
3453 * must be moved on and off the partial lists and is therefore a factor in
3454 * locking overhead.
3455 */
3456
3457/*
3458 * Minimum / Maximum order of slab pages. This influences locking overhead
3459 * and slab fragmentation. A higher order reduces the number of partial slabs
3460 * and increases the number of allocations possible without having to
3461 * take the list_lock.
3462 */
3463static unsigned int slub_min_order;
3464static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3465static unsigned int slub_min_objects;
3466
3467/*
3468 * Calculate the order of allocation given an slab object size.
3469 *
3470 * The order of allocation has significant impact on performance and other
3471 * system components. Generally order 0 allocations should be preferred since
3472 * order 0 does not cause fragmentation in the page allocator. Larger objects
3473 * be problematic to put into order 0 slabs because there may be too much
3474 * unused space left. We go to a higher order if more than 1/16th of the slab
3475 * would be wasted.
3476 *
3477 * In order to reach satisfactory performance we must ensure that a minimum
3478 * number of objects is in one slab. Otherwise we may generate too much
3479 * activity on the partial lists which requires taking the list_lock. This is
3480 * less a concern for large slabs though which are rarely used.
3481 *
3482 * slub_max_order specifies the order where we begin to stop considering the
3483 * number of objects in a slab as critical. If we reach slub_max_order then
3484 * we try to keep the page order as low as possible. So we accept more waste
3485 * of space in favor of a small page order.
3486 *
3487 * Higher order allocations also allow the placement of more objects in a
3488 * slab and thereby reduce object handling overhead. If the user has
3489 * requested a higher minimum order then we start with that one instead of
3490 * the smallest order which will fit the object.
3491 */
3492static inline unsigned int slab_order(unsigned int size,
3493		unsigned int min_objects, unsigned int max_order,
3494		unsigned int fract_leftover)
3495{
3496	unsigned int min_order = slub_min_order;
3497	unsigned int order;
3498
3499	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
3500		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3501
3502	for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3503			order <= max_order; order++) {
3504
3505		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3506		unsigned int rem;
3507
3508		rem = slab_size % size;
3509
3510		if (rem <= slab_size / fract_leftover)
3511			break;
3512	}
3513
3514	return order;
3515}
3516
3517static inline int calculate_order(unsigned int size)
3518{
3519	unsigned int order;
3520	unsigned int min_objects;
3521	unsigned int max_objects;
3522	unsigned int nr_cpus;
3523
3524	/*
3525	 * Attempt to find best configuration for a slab. This
3526	 * works by first attempting to generate a layout with
3527	 * the best configuration and backing off gradually.
3528	 *
3529	 * First we increase the acceptable waste in a slab. Then
3530	 * we reduce the minimum objects required in a slab.
3531	 */
3532	min_objects = slub_min_objects;
3533	if (!min_objects) {
3534		/*
3535		 * Some architectures will only update present cpus when
3536		 * onlining them, so don't trust the number if it's just 1. But
3537		 * we also don't want to use nr_cpu_ids always, as on some other
3538		 * architectures, there can be many possible cpus, but never
3539		 * onlined. Here we compromise between trying to avoid too high
3540		 * order on systems that appear larger than they are, and too
3541		 * low order on systems that appear smaller than they are.
3542		 */
3543		nr_cpus = num_present_cpus();
3544		if (nr_cpus <= 1)
3545			nr_cpus = nr_cpu_ids;
3546		min_objects = 4 * (fls(nr_cpus) + 1);
3547	}
3548	max_objects = order_objects(slub_max_order, size);
3549	min_objects = min(min_objects, max_objects);
3550
3551	while (min_objects > 1) {
3552		unsigned int fraction;
3553
3554		fraction = 16;
3555		while (fraction >= 4) {
3556			order = slab_order(size, min_objects,
3557					slub_max_order, fraction);
3558			if (order <= slub_max_order)
3559				return order;
3560			fraction /= 2;
3561		}
3562		min_objects--;
3563	}
3564
3565	/*
3566	 * We were unable to place multiple objects in a slab. Now
3567	 * lets see if we can place a single object there.
3568	 */
3569	order = slab_order(size, 1, slub_max_order, 1);
3570	if (order <= slub_max_order)
3571		return order;
3572
3573	/*
3574	 * Doh this slab cannot be placed using slub_max_order.
3575	 */
3576	order = slab_order(size, 1, MAX_ORDER, 1);
3577	if (order < MAX_ORDER)
3578		return order;
3579	return -ENOSYS;
3580}
3581
3582static void
3583init_kmem_cache_node(struct kmem_cache_node *n)
3584{
3585	n->nr_partial = 0;
3586	spin_lock_init(&n->list_lock);
3587	INIT_LIST_HEAD(&n->partial);
3588#ifdef CONFIG_SLUB_DEBUG
3589	atomic_long_set(&n->nr_slabs, 0);
3590	atomic_long_set(&n->total_objects, 0);
3591	INIT_LIST_HEAD(&n->full);
3592#endif
3593}
3594
3595static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3596{
3597	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3598			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3599
3600	/*
3601	 * Must align to double word boundary for the double cmpxchg
3602	 * instructions to work; see __pcpu_double_call_return_bool().
3603	 */
3604	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3605				     2 * sizeof(void *));
3606
3607	if (!s->cpu_slab)
3608		return 0;
3609
3610	init_kmem_cache_cpus(s);
3611
3612	return 1;
3613}
3614
3615static struct kmem_cache *kmem_cache_node;
3616
3617/*
3618 * No kmalloc_node yet so do it by hand. We know that this is the first
3619 * slab on the node for this slabcache. There are no concurrent accesses
3620 * possible.
3621 *
3622 * Note that this function only works on the kmem_cache_node
3623 * when allocating for the kmem_cache_node. This is used for bootstrapping
3624 * memory on a fresh node that has no slab structures yet.
3625 */
3626static void early_kmem_cache_node_alloc(int node)
3627{
3628	struct page *page;
3629	struct kmem_cache_node *n;
3630
3631	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3632
3633	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3634
3635	BUG_ON(!page);
3636	if (page_to_nid(page) != node) {
3637		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3638		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3639	}
3640
3641	n = page->freelist;
3642	BUG_ON(!n);
 
 
 
 
3643#ifdef CONFIG_SLUB_DEBUG
3644	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3645	init_tracking(kmem_cache_node, n);
3646#endif
3647	n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
3648	page->freelist = get_freepointer(kmem_cache_node, n);
3649	page->inuse = 1;
3650	page->frozen = 0;
3651	kmem_cache_node->node[node] = n;
3652	init_kmem_cache_node(n);
3653	inc_slabs_node(kmem_cache_node, node, page->objects);
3654
3655	/*
3656	 * No locks need to be taken here as it has just been
3657	 * initialized and there is no concurrent access.
3658	 */
3659	__add_partial(n, page, DEACTIVATE_TO_HEAD);
3660}
3661
3662static void free_kmem_cache_nodes(struct kmem_cache *s)
3663{
3664	int node;
3665	struct kmem_cache_node *n;
3666
3667	for_each_kmem_cache_node(s, node, n) {
3668		s->node[node] = NULL;
3669		kmem_cache_free(kmem_cache_node, n);
3670	}
3671}
3672
3673void __kmem_cache_release(struct kmem_cache *s)
3674{
3675	cache_random_seq_destroy(s);
3676	free_percpu(s->cpu_slab);
3677	free_kmem_cache_nodes(s);
3678}
3679
3680static int init_kmem_cache_nodes(struct kmem_cache *s)
3681{
3682	int node;
3683
3684	for_each_node_mask(node, slab_nodes) {
3685		struct kmem_cache_node *n;
3686
3687		if (slab_state == DOWN) {
3688			early_kmem_cache_node_alloc(node);
3689			continue;
3690		}
3691		n = kmem_cache_alloc_node(kmem_cache_node,
3692						GFP_KERNEL, node);
3693
3694		if (!n) {
3695			free_kmem_cache_nodes(s);
3696			return 0;
3697		}
3698
3699		init_kmem_cache_node(n);
3700		s->node[node] = n;
3701	}
3702	return 1;
3703}
3704
3705static void set_min_partial(struct kmem_cache *s, unsigned long min)
3706{
3707	if (min < MIN_PARTIAL)
3708		min = MIN_PARTIAL;
3709	else if (min > MAX_PARTIAL)
3710		min = MAX_PARTIAL;
3711	s->min_partial = min;
3712}
3713
3714static void set_cpu_partial(struct kmem_cache *s)
3715{
3716#ifdef CONFIG_SLUB_CPU_PARTIAL
3717	/*
3718	 * cpu_partial determined the maximum number of objects kept in the
3719	 * per cpu partial lists of a processor.
3720	 *
3721	 * Per cpu partial lists mainly contain slabs that just have one
3722	 * object freed. If they are used for allocation then they can be
3723	 * filled up again with minimal effort. The slab will never hit the
3724	 * per node partial lists and therefore no locking will be required.
3725	 *
3726	 * This setting also determines
3727	 *
3728	 * A) The number of objects from per cpu partial slabs dumped to the
3729	 *    per node list when we reach the limit.
3730	 * B) The number of objects in cpu partial slabs to extract from the
3731	 *    per node list when we run out of per cpu objects. We only fetch
3732	 *    50% to keep some capacity around for frees.
3733	 */
3734	if (!kmem_cache_has_cpu_partial(s))
3735		slub_set_cpu_partial(s, 0);
3736	else if (s->size >= PAGE_SIZE)
3737		slub_set_cpu_partial(s, 2);
3738	else if (s->size >= 1024)
3739		slub_set_cpu_partial(s, 6);
3740	else if (s->size >= 256)
3741		slub_set_cpu_partial(s, 13);
3742	else
3743		slub_set_cpu_partial(s, 30);
3744#endif
3745}
3746
3747/*
3748 * calculate_sizes() determines the order and the distribution of data within
3749 * a slab object.
3750 */
3751static int calculate_sizes(struct kmem_cache *s, int forced_order)
3752{
3753	slab_flags_t flags = s->flags;
3754	unsigned int size = s->object_size;
3755	unsigned int order;
3756
3757	/*
3758	 * Round up object size to the next word boundary. We can only
3759	 * place the free pointer at word boundaries and this determines
3760	 * the possible location of the free pointer.
3761	 */
3762	size = ALIGN(size, sizeof(void *));
3763
3764#ifdef CONFIG_SLUB_DEBUG
3765	/*
3766	 * Determine if we can poison the object itself. If the user of
3767	 * the slab may touch the object after free or before allocation
3768	 * then we should never poison the object itself.
3769	 */
3770	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3771			!s->ctor)
3772		s->flags |= __OBJECT_POISON;
3773	else
3774		s->flags &= ~__OBJECT_POISON;
3775
3776
3777	/*
3778	 * If we are Redzoning then check if there is some space between the
3779	 * end of the object and the free pointer. If not then add an
3780	 * additional word to have some bytes to store Redzone information.
3781	 */
3782	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3783		size += sizeof(void *);
3784#endif
3785
3786	/*
3787	 * With that we have determined the number of bytes in actual use
3788	 * by the object and redzoning.
3789	 */
3790	s->inuse = size;
3791
3792	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3793	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
3794	    s->ctor) {
3795		/*
3796		 * Relocate free pointer after the object if it is not
3797		 * permitted to overwrite the first word of the object on
3798		 * kmem_cache_free.
3799		 *
3800		 * This is the case if we do RCU, have a constructor or
3801		 * destructor, are poisoning the objects, or are
3802		 * redzoning an object smaller than sizeof(void *).
3803		 *
3804		 * The assumption that s->offset >= s->inuse means free
3805		 * pointer is outside of the object is used in the
3806		 * freeptr_outside_object() function. If that is no
3807		 * longer true, the function needs to be modified.
3808		 */
3809		s->offset = size;
3810		size += sizeof(void *);
3811	} else {
3812		/*
3813		 * Store freelist pointer near middle of object to keep
3814		 * it away from the edges of the object to avoid small
3815		 * sized over/underflows from neighboring allocations.
3816		 */
3817		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
3818	}
3819
3820#ifdef CONFIG_SLUB_DEBUG
3821	if (flags & SLAB_STORE_USER)
3822		/*
3823		 * Need to store information about allocs and frees after
3824		 * the object.
3825		 */
3826		size += 2 * sizeof(struct track);
3827#endif
3828
3829	kasan_cache_create(s, &size, &s->flags);
3830#ifdef CONFIG_SLUB_DEBUG
3831	if (flags & SLAB_RED_ZONE) {
3832		/*
3833		 * Add some empty padding so that we can catch
3834		 * overwrites from earlier objects rather than let
3835		 * tracking information or the free pointer be
3836		 * corrupted if a user writes before the start
3837		 * of the object.
3838		 */
3839		size += sizeof(void *);
3840
3841		s->red_left_pad = sizeof(void *);
3842		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3843		size += s->red_left_pad;
3844	}
3845#endif
3846
3847	/*
3848	 * SLUB stores one object immediately after another beginning from
3849	 * offset 0. In order to align the objects we have to simply size
3850	 * each object to conform to the alignment.
3851	 */
3852	size = ALIGN(size, s->align);
3853	s->size = size;
3854	s->reciprocal_size = reciprocal_value(size);
3855	if (forced_order >= 0)
3856		order = forced_order;
3857	else
3858		order = calculate_order(size);
3859
3860	if ((int)order < 0)
3861		return 0;
3862
3863	s->allocflags = 0;
3864	if (order)
3865		s->allocflags |= __GFP_COMP;
3866
3867	if (s->flags & SLAB_CACHE_DMA)
3868		s->allocflags |= GFP_DMA;
3869
3870	if (s->flags & SLAB_CACHE_DMA32)
3871		s->allocflags |= GFP_DMA32;
3872
3873	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3874		s->allocflags |= __GFP_RECLAIMABLE;
3875
3876	/*
3877	 * Determine the number of objects per slab
3878	 */
3879	s->oo = oo_make(order, size);
3880	s->min = oo_make(get_order(size), size);
3881	if (oo_objects(s->oo) > oo_objects(s->max))
3882		s->max = s->oo;
3883
3884	return !!oo_objects(s->oo);
3885}
3886
3887static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3888{
3889	s->flags = kmem_cache_flags(s->size, flags, s->name);
 
3890#ifdef CONFIG_SLAB_FREELIST_HARDENED
3891	s->random = get_random_long();
3892#endif
3893
 
 
 
3894	if (!calculate_sizes(s, -1))
3895		goto error;
3896	if (disable_higher_order_debug) {
3897		/*
3898		 * Disable debugging flags that store metadata if the min slab
3899		 * order increased.
3900		 */
3901		if (get_order(s->size) > get_order(s->object_size)) {
3902			s->flags &= ~DEBUG_METADATA_FLAGS;
3903			s->offset = 0;
3904			if (!calculate_sizes(s, -1))
3905				goto error;
3906		}
3907	}
3908
3909#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3910    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3911	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3912		/* Enable fast mode */
3913		s->flags |= __CMPXCHG_DOUBLE;
3914#endif
3915
3916	/*
3917	 * The larger the object size is, the more pages we want on the partial
3918	 * list to avoid pounding the page allocator excessively.
3919	 */
3920	set_min_partial(s, ilog2(s->size) / 2);
3921
3922	set_cpu_partial(s);
3923
3924#ifdef CONFIG_NUMA
3925	s->remote_node_defrag_ratio = 1000;
3926#endif
3927
3928	/* Initialize the pre-computed randomized freelist if slab is up */
3929	if (slab_state >= UP) {
3930		if (init_cache_random_seq(s))
3931			goto error;
3932	}
3933
3934	if (!init_kmem_cache_nodes(s))
3935		goto error;
3936
3937	if (alloc_kmem_cache_cpus(s))
3938		return 0;
3939
 
3940error:
3941	__kmem_cache_release(s);
 
 
 
3942	return -EINVAL;
3943}
3944
3945static void list_slab_objects(struct kmem_cache *s, struct page *page,
3946			      const char *text)
3947{
3948#ifdef CONFIG_SLUB_DEBUG
3949	void *addr = page_address(page);
3950	unsigned long *map;
3951	void *p;
3952
 
 
 
3953	slab_err(s, page, text, s->name);
3954	slab_lock(page);
3955
3956	map = get_map(s, page);
3957	for_each_object(p, s, addr, page->objects) {
3958
3959		if (!test_bit(__obj_to_index(s, addr, p), map)) {
3960			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
3961			print_tracking(s, p);
3962		}
3963	}
3964	put_map(map);
3965	slab_unlock(page);
 
3966#endif
3967}
3968
3969/*
3970 * Attempt to free all partial slabs on a node.
3971 * This is called from __kmem_cache_shutdown(). We must take list_lock
3972 * because sysfs file might still access partial list after the shutdowning.
3973 */
3974static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3975{
3976	LIST_HEAD(discard);
3977	struct page *page, *h;
3978
3979	BUG_ON(irqs_disabled());
3980	spin_lock_irq(&n->list_lock);
3981	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
3982		if (!page->inuse) {
3983			remove_partial(n, page);
3984			list_add(&page->slab_list, &discard);
3985		} else {
3986			list_slab_objects(s, page,
3987			  "Objects remaining in %s on __kmem_cache_shutdown()");
3988		}
3989	}
3990	spin_unlock_irq(&n->list_lock);
3991
3992	list_for_each_entry_safe(page, h, &discard, slab_list)
3993		discard_slab(s, page);
3994}
3995
3996bool __kmem_cache_empty(struct kmem_cache *s)
3997{
3998	int node;
3999	struct kmem_cache_node *n;
4000
4001	for_each_kmem_cache_node(s, node, n)
4002		if (n->nr_partial || slabs_node(s, node))
4003			return false;
4004	return true;
4005}
4006
4007/*
4008 * Release all resources used by a slab cache.
4009 */
4010int __kmem_cache_shutdown(struct kmem_cache *s)
4011{
4012	int node;
4013	struct kmem_cache_node *n;
4014
4015	flush_all(s);
4016	/* Attempt to free all objects */
4017	for_each_kmem_cache_node(s, node, n) {
4018		free_partial(s, n);
4019		if (n->nr_partial || slabs_node(s, node))
4020			return 1;
4021	}
 
4022	return 0;
4023}
4024
4025#ifdef CONFIG_PRINTK
4026void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
4027{
4028	void *base;
4029	int __maybe_unused i;
4030	unsigned int objnr;
4031	void *objp;
4032	void *objp0;
4033	struct kmem_cache *s = page->slab_cache;
4034	struct track __maybe_unused *trackp;
4035
4036	kpp->kp_ptr = object;
4037	kpp->kp_page = page;
4038	kpp->kp_slab_cache = s;
4039	base = page_address(page);
4040	objp0 = kasan_reset_tag(object);
4041#ifdef CONFIG_SLUB_DEBUG
4042	objp = restore_red_left(s, objp0);
4043#else
4044	objp = objp0;
4045#endif
4046	objnr = obj_to_index(s, page, objp);
4047	kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
4048	objp = base + s->size * objnr;
4049	kpp->kp_objp = objp;
4050	if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) ||
4051	    !(s->flags & SLAB_STORE_USER))
4052		return;
4053#ifdef CONFIG_SLUB_DEBUG
4054	objp = fixup_red_left(s, objp);
4055	trackp = get_track(s, objp, TRACK_ALLOC);
4056	kpp->kp_ret = (void *)trackp->addr;
4057#ifdef CONFIG_STACKTRACE
4058	for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4059		kpp->kp_stack[i] = (void *)trackp->addrs[i];
4060		if (!kpp->kp_stack[i])
4061			break;
4062	}
4063
4064	trackp = get_track(s, objp, TRACK_FREE);
4065	for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4066		kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
4067		if (!kpp->kp_free_stack[i])
4068			break;
4069	}
4070#endif
4071#endif
4072}
4073#endif
4074
4075/********************************************************************
4076 *		Kmalloc subsystem
4077 *******************************************************************/
4078
4079static int __init setup_slub_min_order(char *str)
4080{
4081	get_option(&str, (int *)&slub_min_order);
4082
4083	return 1;
4084}
4085
4086__setup("slub_min_order=", setup_slub_min_order);
4087
4088static int __init setup_slub_max_order(char *str)
4089{
4090	get_option(&str, (int *)&slub_max_order);
4091	slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
4092
4093	return 1;
4094}
4095
4096__setup("slub_max_order=", setup_slub_max_order);
4097
4098static int __init setup_slub_min_objects(char *str)
4099{
4100	get_option(&str, (int *)&slub_min_objects);
4101
4102	return 1;
4103}
4104
4105__setup("slub_min_objects=", setup_slub_min_objects);
4106
4107void *__kmalloc(size_t size, gfp_t flags)
4108{
4109	struct kmem_cache *s;
4110	void *ret;
4111
4112	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4113		return kmalloc_large(size, flags);
4114
4115	s = kmalloc_slab(size, flags);
4116
4117	if (unlikely(ZERO_OR_NULL_PTR(s)))
4118		return s;
4119
4120	ret = slab_alloc(s, flags, _RET_IP_, size);
4121
4122	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
4123
4124	ret = kasan_kmalloc(s, ret, size, flags);
4125
4126	return ret;
4127}
4128EXPORT_SYMBOL(__kmalloc);
4129
4130#ifdef CONFIG_NUMA
4131static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
4132{
4133	struct page *page;
4134	void *ptr = NULL;
4135	unsigned int order = get_order(size);
4136
4137	flags |= __GFP_COMP;
4138	page = alloc_pages_node(node, flags, order);
4139	if (page) {
4140		ptr = page_address(page);
4141		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4142				      PAGE_SIZE << order);
4143	}
4144
4145	return kmalloc_large_node_hook(ptr, size, flags);
 
4146}
4147
4148void *__kmalloc_node(size_t size, gfp_t flags, int node)
4149{
4150	struct kmem_cache *s;
4151	void *ret;
4152
4153	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4154		ret = kmalloc_large_node(size, flags, node);
4155
4156		trace_kmalloc_node(_RET_IP_, ret,
4157				   size, PAGE_SIZE << get_order(size),
4158				   flags, node);
4159
4160		return ret;
4161	}
4162
4163	s = kmalloc_slab(size, flags);
4164
4165	if (unlikely(ZERO_OR_NULL_PTR(s)))
4166		return s;
4167
4168	ret = slab_alloc_node(s, flags, node, _RET_IP_, size);
4169
4170	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
4171
4172	ret = kasan_kmalloc(s, ret, size, flags);
4173
4174	return ret;
4175}
4176EXPORT_SYMBOL(__kmalloc_node);
4177#endif	/* CONFIG_NUMA */
4178
4179#ifdef CONFIG_HARDENED_USERCOPY
4180/*
4181 * Rejects incorrectly sized objects and objects that are to be copied
4182 * to/from userspace but do not fall entirely within the containing slab
4183 * cache's usercopy region.
4184 *
4185 * Returns NULL if check passes, otherwise const char * to name of cache
4186 * to indicate an error.
4187 */
4188void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4189			 bool to_user)
4190{
4191	struct kmem_cache *s;
4192	unsigned int offset;
4193	size_t object_size;
4194	bool is_kfence = is_kfence_address(ptr);
4195
4196	ptr = kasan_reset_tag(ptr);
4197
4198	/* Find object and usable object size. */
4199	s = page->slab_cache;
4200
4201	/* Reject impossible pointers. */
4202	if (ptr < page_address(page))
4203		usercopy_abort("SLUB object not in SLUB page?!", NULL,
4204			       to_user, 0, n);
4205
4206	/* Find offset within object. */
4207	if (is_kfence)
4208		offset = ptr - kfence_object_start(ptr);
4209	else
4210		offset = (ptr - page_address(page)) % s->size;
4211
4212	/* Adjust for redzone and reject if within the redzone. */
4213	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
4214		if (offset < s->red_left_pad)
4215			usercopy_abort("SLUB object in left red zone",
4216				       s->name, to_user, offset, n);
4217		offset -= s->red_left_pad;
4218	}
4219
4220	/* Allow address range falling entirely within usercopy region. */
4221	if (offset >= s->useroffset &&
4222	    offset - s->useroffset <= s->usersize &&
4223	    n <= s->useroffset - offset + s->usersize)
4224		return;
4225
4226	/*
4227	 * If the copy is still within the allocated object, produce
4228	 * a warning instead of rejecting the copy. This is intended
4229	 * to be a temporary method to find any missing usercopy
4230	 * whitelists.
4231	 */
4232	object_size = slab_ksize(s);
4233	if (usercopy_fallback &&
4234	    offset <= object_size && n <= object_size - offset) {
4235		usercopy_warn("SLUB object", s->name, to_user, offset, n);
4236		return;
4237	}
4238
4239	usercopy_abort("SLUB object", s->name, to_user, offset, n);
4240}
4241#endif /* CONFIG_HARDENED_USERCOPY */
4242
4243size_t __ksize(const void *object)
4244{
4245	struct page *page;
4246
4247	if (unlikely(object == ZERO_SIZE_PTR))
4248		return 0;
4249
4250	page = virt_to_head_page(object);
4251
4252	if (unlikely(!PageSlab(page))) {
4253		WARN_ON(!PageCompound(page));
4254		return page_size(page);
4255	}
4256
4257	return slab_ksize(page->slab_cache);
4258}
4259EXPORT_SYMBOL(__ksize);
 
 
 
 
 
 
 
 
 
 
4260
4261void kfree(const void *x)
4262{
4263	struct page *page;
4264	void *object = (void *)x;
4265
4266	trace_kfree(_RET_IP_, x);
4267
4268	if (unlikely(ZERO_OR_NULL_PTR(x)))
4269		return;
4270
4271	page = virt_to_head_page(x);
4272	if (unlikely(!PageSlab(page))) {
4273		free_nonslab_page(page, object);
 
 
4274		return;
4275	}
4276	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4277}
4278EXPORT_SYMBOL(kfree);
4279
4280#define SHRINK_PROMOTE_MAX 32
4281
4282/*
4283 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
4284 * up most to the head of the partial lists. New allocations will then
4285 * fill those up and thus they can be removed from the partial lists.
4286 *
4287 * The slabs with the least items are placed last. This results in them
4288 * being allocated from last increasing the chance that the last objects
4289 * are freed in them.
4290 */
4291int __kmem_cache_shrink(struct kmem_cache *s)
4292{
4293	int node;
4294	int i;
4295	struct kmem_cache_node *n;
4296	struct page *page;
4297	struct page *t;
4298	struct list_head discard;
4299	struct list_head promote[SHRINK_PROMOTE_MAX];
4300	unsigned long flags;
4301	int ret = 0;
4302
4303	flush_all(s);
4304	for_each_kmem_cache_node(s, node, n) {
4305		INIT_LIST_HEAD(&discard);
4306		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
4307			INIT_LIST_HEAD(promote + i);
4308
4309		spin_lock_irqsave(&n->list_lock, flags);
4310
4311		/*
4312		 * Build lists of slabs to discard or promote.
4313		 *
4314		 * Note that concurrent frees may occur while we hold the
4315		 * list_lock. page->inuse here is the upper limit.
4316		 */
4317		list_for_each_entry_safe(page, t, &n->partial, slab_list) {
4318			int free = page->objects - page->inuse;
4319
4320			/* Do not reread page->inuse */
4321			barrier();
4322
4323			/* We do not keep full slabs on the list */
4324			BUG_ON(free <= 0);
4325
4326			if (free == page->objects) {
4327				list_move(&page->slab_list, &discard);
4328				n->nr_partial--;
4329			} else if (free <= SHRINK_PROMOTE_MAX)
4330				list_move(&page->slab_list, promote + free - 1);
4331		}
4332
4333		/*
4334		 * Promote the slabs filled up most to the head of the
4335		 * partial list.
4336		 */
4337		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4338			list_splice(promote + i, &n->partial);
4339
4340		spin_unlock_irqrestore(&n->list_lock, flags);
4341
4342		/* Release empty slabs */
4343		list_for_each_entry_safe(page, t, &discard, slab_list)
4344			discard_slab(s, page);
4345
4346		if (slabs_node(s, node))
4347			ret = 1;
4348	}
4349
4350	return ret;
4351}
4352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4353static int slab_mem_going_offline_callback(void *arg)
4354{
4355	struct kmem_cache *s;
4356
4357	mutex_lock(&slab_mutex);
4358	list_for_each_entry(s, &slab_caches, list)
4359		__kmem_cache_shrink(s);
4360	mutex_unlock(&slab_mutex);
4361
4362	return 0;
4363}
4364
4365static void slab_mem_offline_callback(void *arg)
4366{
 
 
4367	struct memory_notify *marg = arg;
4368	int offline_node;
4369
4370	offline_node = marg->status_change_nid_normal;
4371
4372	/*
4373	 * If the node still has available memory. we need kmem_cache_node
4374	 * for it yet.
4375	 */
4376	if (offline_node < 0)
4377		return;
4378
4379	mutex_lock(&slab_mutex);
4380	node_clear(offline_node, slab_nodes);
4381	/*
4382	 * We no longer free kmem_cache_node structures here, as it would be
4383	 * racy with all get_node() users, and infeasible to protect them with
4384	 * slab_mutex.
4385	 */
 
 
 
 
 
 
 
 
 
4386	mutex_unlock(&slab_mutex);
4387}
4388
4389static int slab_mem_going_online_callback(void *arg)
4390{
4391	struct kmem_cache_node *n;
4392	struct kmem_cache *s;
4393	struct memory_notify *marg = arg;
4394	int nid = marg->status_change_nid_normal;
4395	int ret = 0;
4396
4397	/*
4398	 * If the node's memory is already available, then kmem_cache_node is
4399	 * already created. Nothing to do.
4400	 */
4401	if (nid < 0)
4402		return 0;
4403
4404	/*
4405	 * We are bringing a node online. No memory is available yet. We must
4406	 * allocate a kmem_cache_node structure in order to bring the node
4407	 * online.
4408	 */
4409	mutex_lock(&slab_mutex);
4410	list_for_each_entry(s, &slab_caches, list) {
4411		/*
4412		 * The structure may already exist if the node was previously
4413		 * onlined and offlined.
4414		 */
4415		if (get_node(s, nid))
4416			continue;
4417		/*
4418		 * XXX: kmem_cache_alloc_node will fallback to other nodes
4419		 *      since memory is not yet available from the node that
4420		 *      is brought up.
4421		 */
4422		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4423		if (!n) {
4424			ret = -ENOMEM;
4425			goto out;
4426		}
4427		init_kmem_cache_node(n);
4428		s->node[nid] = n;
4429	}
4430	/*
4431	 * Any cache created after this point will also have kmem_cache_node
4432	 * initialized for the new node.
4433	 */
4434	node_set(nid, slab_nodes);
4435out:
4436	mutex_unlock(&slab_mutex);
4437	return ret;
4438}
4439
4440static int slab_memory_callback(struct notifier_block *self,
4441				unsigned long action, void *arg)
4442{
4443	int ret = 0;
4444
4445	switch (action) {
4446	case MEM_GOING_ONLINE:
4447		ret = slab_mem_going_online_callback(arg);
4448		break;
4449	case MEM_GOING_OFFLINE:
4450		ret = slab_mem_going_offline_callback(arg);
4451		break;
4452	case MEM_OFFLINE:
4453	case MEM_CANCEL_ONLINE:
4454		slab_mem_offline_callback(arg);
4455		break;
4456	case MEM_ONLINE:
4457	case MEM_CANCEL_OFFLINE:
4458		break;
4459	}
4460	if (ret)
4461		ret = notifier_from_errno(ret);
4462	else
4463		ret = NOTIFY_OK;
4464	return ret;
4465}
4466
4467static struct notifier_block slab_memory_callback_nb = {
4468	.notifier_call = slab_memory_callback,
4469	.priority = SLAB_CALLBACK_PRI,
4470};
4471
4472/********************************************************************
4473 *			Basic setup of slabs
4474 *******************************************************************/
4475
4476/*
4477 * Used for early kmem_cache structures that were allocated using
4478 * the page allocator. Allocate them properly then fix up the pointers
4479 * that may be pointing to the wrong kmem_cache structure.
4480 */
4481
4482static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4483{
4484	int node;
4485	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4486	struct kmem_cache_node *n;
4487
4488	memcpy(s, static_cache, kmem_cache->object_size);
4489
4490	/*
4491	 * This runs very early, and only the boot processor is supposed to be
4492	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4493	 * IPIs around.
4494	 */
4495	__flush_cpu_slab(s, smp_processor_id());
4496	for_each_kmem_cache_node(s, node, n) {
4497		struct page *p;
4498
4499		list_for_each_entry(p, &n->partial, slab_list)
4500			p->slab_cache = s;
4501
4502#ifdef CONFIG_SLUB_DEBUG
4503		list_for_each_entry(p, &n->full, slab_list)
4504			p->slab_cache = s;
4505#endif
4506	}
 
4507	list_add(&s->list, &slab_caches);
 
4508	return s;
4509}
4510
4511void __init kmem_cache_init(void)
4512{
4513	static __initdata struct kmem_cache boot_kmem_cache,
4514		boot_kmem_cache_node;
4515	int node;
4516
4517	if (debug_guardpage_minorder())
4518		slub_max_order = 0;
4519
4520	/* Print slub debugging pointers without hashing */
4521	if (__slub_debug_enabled())
4522		no_hash_pointers_enable(NULL);
4523
4524	kmem_cache_node = &boot_kmem_cache_node;
4525	kmem_cache = &boot_kmem_cache;
4526
4527	/*
4528	 * Initialize the nodemask for which we will allocate per node
4529	 * structures. Here we don't need taking slab_mutex yet.
4530	 */
4531	for_each_node_state(node, N_NORMAL_MEMORY)
4532		node_set(node, slab_nodes);
4533
4534	create_boot_cache(kmem_cache_node, "kmem_cache_node",
4535		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4536
4537	register_hotmemory_notifier(&slab_memory_callback_nb);
4538
4539	/* Able to allocate the per node structures */
4540	slab_state = PARTIAL;
4541
4542	create_boot_cache(kmem_cache, "kmem_cache",
4543			offsetof(struct kmem_cache, node) +
4544				nr_node_ids * sizeof(struct kmem_cache_node *),
4545		       SLAB_HWCACHE_ALIGN, 0, 0);
4546
4547	kmem_cache = bootstrap(&boot_kmem_cache);
 
 
 
 
 
 
4548	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4549
4550	/* Now we can use the kmem_cache to allocate kmalloc slabs */
4551	setup_kmalloc_cache_index_table();
4552	create_kmalloc_caches(0);
4553
4554	/* Setup random freelists for each cache */
4555	init_freelist_randomization();
4556
4557	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4558				  slub_cpu_dead);
4559
4560	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
4561		cache_line_size(),
4562		slub_min_order, slub_max_order, slub_min_objects,
4563		nr_cpu_ids, nr_node_ids);
4564}
4565
4566void __init kmem_cache_init_late(void)
4567{
4568}
4569
4570struct kmem_cache *
4571__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4572		   slab_flags_t flags, void (*ctor)(void *))
4573{
4574	struct kmem_cache *s;
4575
4576	s = find_mergeable(size, align, flags, name, ctor);
4577	if (s) {
4578		s->refcount++;
4579
4580		/*
4581		 * Adjust the object sizes so that we clear
4582		 * the complete object on kzalloc.
4583		 */
4584		s->object_size = max(s->object_size, size);
4585		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4586
 
 
 
 
 
4587		if (sysfs_slab_alias(s, name)) {
4588			s->refcount--;
4589			s = NULL;
4590		}
4591	}
4592
4593	return s;
4594}
4595
4596int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4597{
4598	int err;
4599
4600	err = kmem_cache_open(s, flags);
4601	if (err)
4602		return err;
4603
4604	/* Mutex is not taken during early boot */
4605	if (slab_state <= UP)
4606		return 0;
4607
 
4608	err = sysfs_slab_add(s);
4609	if (err) {
4610		__kmem_cache_release(s);
4611		return err;
4612	}
4613
4614	if (s->flags & SLAB_STORE_USER)
4615		debugfs_slab_add(s);
4616
4617	return 0;
4618}
4619
4620void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4621{
4622	struct kmem_cache *s;
4623	void *ret;
4624
4625	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4626		return kmalloc_large(size, gfpflags);
4627
4628	s = kmalloc_slab(size, gfpflags);
4629
4630	if (unlikely(ZERO_OR_NULL_PTR(s)))
4631		return s;
4632
4633	ret = slab_alloc(s, gfpflags, caller, size);
4634
4635	/* Honor the call site pointer we received. */
4636	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4637
4638	return ret;
4639}
4640EXPORT_SYMBOL(__kmalloc_track_caller);
4641
4642#ifdef CONFIG_NUMA
4643void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4644					int node, unsigned long caller)
4645{
4646	struct kmem_cache *s;
4647	void *ret;
4648
4649	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4650		ret = kmalloc_large_node(size, gfpflags, node);
4651
4652		trace_kmalloc_node(caller, ret,
4653				   size, PAGE_SIZE << get_order(size),
4654				   gfpflags, node);
4655
4656		return ret;
4657	}
4658
4659	s = kmalloc_slab(size, gfpflags);
4660
4661	if (unlikely(ZERO_OR_NULL_PTR(s)))
4662		return s;
4663
4664	ret = slab_alloc_node(s, gfpflags, node, caller, size);
4665
4666	/* Honor the call site pointer we received. */
4667	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4668
4669	return ret;
4670}
4671EXPORT_SYMBOL(__kmalloc_node_track_caller);
4672#endif
4673
4674#ifdef CONFIG_SYSFS
4675static int count_inuse(struct page *page)
4676{
4677	return page->inuse;
4678}
4679
4680static int count_total(struct page *page)
4681{
4682	return page->objects;
4683}
4684#endif
4685
4686#ifdef CONFIG_SLUB_DEBUG
4687static void validate_slab(struct kmem_cache *s, struct page *page)
 
4688{
4689	void *p;
4690	void *addr = page_address(page);
4691	unsigned long *map;
4692
4693	slab_lock(page);
 
 
4694
4695	if (!check_slab(s, page) || !on_freelist(s, page, NULL))
4696		goto unlock;
4697
4698	/* Now we know that a valid freelist exists */
4699	map = get_map(s, page);
4700	for_each_object(p, s, addr, page->objects) {
4701		u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
4702			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
 
 
 
 
 
 
 
 
 
4703
4704		if (!check_object(s, page, p, val))
4705			break;
4706	}
4707	put_map(map);
4708unlock:
4709	slab_unlock(page);
4710}
4711
4712static int validate_slab_node(struct kmem_cache *s,
4713		struct kmem_cache_node *n)
4714{
4715	unsigned long count = 0;
4716	struct page *page;
4717	unsigned long flags;
4718
4719	spin_lock_irqsave(&n->list_lock, flags);
4720
4721	list_for_each_entry(page, &n->partial, slab_list) {
4722		validate_slab(s, page);
4723		count++;
4724	}
4725	if (count != n->nr_partial) {
4726		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4727		       s->name, count, n->nr_partial);
4728		slab_add_kunit_errors();
4729	}
4730
4731	if (!(s->flags & SLAB_STORE_USER))
4732		goto out;
4733
4734	list_for_each_entry(page, &n->full, slab_list) {
4735		validate_slab(s, page);
4736		count++;
4737	}
4738	if (count != atomic_long_read(&n->nr_slabs)) {
4739		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4740		       s->name, count, atomic_long_read(&n->nr_slabs));
4741		slab_add_kunit_errors();
4742	}
4743
4744out:
4745	spin_unlock_irqrestore(&n->list_lock, flags);
4746	return count;
4747}
4748
4749long validate_slab_cache(struct kmem_cache *s)
4750{
4751	int node;
4752	unsigned long count = 0;
 
 
4753	struct kmem_cache_node *n;
4754
 
 
 
4755	flush_all(s);
4756	for_each_kmem_cache_node(s, node, n)
4757		count += validate_slab_node(s, n);
4758
4759	return count;
4760}
4761EXPORT_SYMBOL(validate_slab_cache);
4762
4763#ifdef CONFIG_DEBUG_FS
4764/*
4765 * Generate lists of code addresses where slabcache objects are allocated
4766 * and freed.
4767 */
4768
4769struct location {
4770	unsigned long count;
4771	unsigned long addr;
4772	long long sum_time;
4773	long min_time;
4774	long max_time;
4775	long min_pid;
4776	long max_pid;
4777	DECLARE_BITMAP(cpus, NR_CPUS);
4778	nodemask_t nodes;
4779};
4780
4781struct loc_track {
4782	unsigned long max;
4783	unsigned long count;
4784	struct location *loc;
4785};
4786
4787static struct dentry *slab_debugfs_root;
4788
4789static void free_loc_track(struct loc_track *t)
4790{
4791	if (t->max)
4792		free_pages((unsigned long)t->loc,
4793			get_order(sizeof(struct location) * t->max));
4794}
4795
4796static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4797{
4798	struct location *l;
4799	int order;
4800
4801	order = get_order(sizeof(struct location) * max);
4802
4803	l = (void *)__get_free_pages(flags, order);
4804	if (!l)
4805		return 0;
4806
4807	if (t->count) {
4808		memcpy(l, t->loc, sizeof(struct location) * t->count);
4809		free_loc_track(t);
4810	}
4811	t->max = max;
4812	t->loc = l;
4813	return 1;
4814}
4815
4816static int add_location(struct loc_track *t, struct kmem_cache *s,
4817				const struct track *track)
4818{
4819	long start, end, pos;
4820	struct location *l;
4821	unsigned long caddr;
4822	unsigned long age = jiffies - track->when;
4823
4824	start = -1;
4825	end = t->count;
4826
4827	for ( ; ; ) {
4828		pos = start + (end - start + 1) / 2;
4829
4830		/*
4831		 * There is nothing at "end". If we end up there
4832		 * we need to add something to before end.
4833		 */
4834		if (pos == end)
4835			break;
4836
4837		caddr = t->loc[pos].addr;
4838		if (track->addr == caddr) {
4839
4840			l = &t->loc[pos];
4841			l->count++;
4842			if (track->when) {
4843				l->sum_time += age;
4844				if (age < l->min_time)
4845					l->min_time = age;
4846				if (age > l->max_time)
4847					l->max_time = age;
4848
4849				if (track->pid < l->min_pid)
4850					l->min_pid = track->pid;
4851				if (track->pid > l->max_pid)
4852					l->max_pid = track->pid;
4853
4854				cpumask_set_cpu(track->cpu,
4855						to_cpumask(l->cpus));
4856			}
4857			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4858			return 1;
4859		}
4860
4861		if (track->addr < caddr)
4862			end = pos;
4863		else
4864			start = pos;
4865	}
4866
4867	/*
4868	 * Not found. Insert new tracking element.
4869	 */
4870	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4871		return 0;
4872
4873	l = t->loc + pos;
4874	if (pos < t->count)
4875		memmove(l + 1, l,
4876			(t->count - pos) * sizeof(struct location));
4877	t->count++;
4878	l->count = 1;
4879	l->addr = track->addr;
4880	l->sum_time = age;
4881	l->min_time = age;
4882	l->max_time = age;
4883	l->min_pid = track->pid;
4884	l->max_pid = track->pid;
4885	cpumask_clear(to_cpumask(l->cpus));
4886	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4887	nodes_clear(l->nodes);
4888	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4889	return 1;
4890}
4891
4892static void process_slab(struct loc_track *t, struct kmem_cache *s,
4893		struct page *page, enum track_item alloc)
 
4894{
4895	void *addr = page_address(page);
4896	void *p;
4897	unsigned long *map;
4898
4899	map = get_map(s, page);
 
 
4900	for_each_object(p, s, addr, page->objects)
4901		if (!test_bit(__obj_to_index(s, addr, p), map))
4902			add_location(t, s, get_track(s, p, alloc));
4903	put_map(map);
4904}
4905#endif  /* CONFIG_DEBUG_FS   */
4906#endif	/* CONFIG_SLUB_DEBUG */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4907
4908#ifdef CONFIG_SYSFS
4909enum slab_stat_type {
4910	SL_ALL,			/* All slabs */
4911	SL_PARTIAL,		/* Only partially allocated slabs */
4912	SL_CPU,			/* Only slabs used for cpu caches */
4913	SL_OBJECTS,		/* Determine allocated objects not slabs */
4914	SL_TOTAL		/* Determine object capacity not slabs */
4915};
4916
4917#define SO_ALL		(1 << SL_ALL)
4918#define SO_PARTIAL	(1 << SL_PARTIAL)
4919#define SO_CPU		(1 << SL_CPU)
4920#define SO_OBJECTS	(1 << SL_OBJECTS)
4921#define SO_TOTAL	(1 << SL_TOTAL)
4922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4923static ssize_t show_slab_objects(struct kmem_cache *s,
4924				 char *buf, unsigned long flags)
4925{
4926	unsigned long total = 0;
4927	int node;
4928	int x;
4929	unsigned long *nodes;
4930	int len = 0;
4931
4932	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
4933	if (!nodes)
4934		return -ENOMEM;
4935
4936	if (flags & SO_CPU) {
4937		int cpu;
4938
4939		for_each_possible_cpu(cpu) {
4940			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4941							       cpu);
4942			int node;
4943			struct page *page;
4944
4945			page = READ_ONCE(c->page);
4946			if (!page)
4947				continue;
4948
4949			node = page_to_nid(page);
4950			if (flags & SO_TOTAL)
4951				x = page->objects;
4952			else if (flags & SO_OBJECTS)
4953				x = page->inuse;
4954			else
4955				x = 1;
4956
4957			total += x;
4958			nodes[node] += x;
4959
4960			page = slub_percpu_partial_read_once(c);
4961			if (page) {
4962				node = page_to_nid(page);
4963				if (flags & SO_TOTAL)
4964					WARN_ON_ONCE(1);
4965				else if (flags & SO_OBJECTS)
4966					WARN_ON_ONCE(1);
4967				else
4968					x = page->pages;
4969				total += x;
4970				nodes[node] += x;
4971			}
4972		}
4973	}
4974
4975	/*
4976	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
4977	 * already held which will conflict with an existing lock order:
4978	 *
4979	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
4980	 *
4981	 * We don't really need mem_hotplug_lock (to hold off
4982	 * slab_mem_going_offline_callback) here because slab's memory hot
4983	 * unplug code doesn't destroy the kmem_cache->node[] data.
4984	 */
4985
4986#ifdef CONFIG_SLUB_DEBUG
4987	if (flags & SO_ALL) {
4988		struct kmem_cache_node *n;
4989
4990		for_each_kmem_cache_node(s, node, n) {
4991
4992			if (flags & SO_TOTAL)
4993				x = atomic_long_read(&n->total_objects);
4994			else if (flags & SO_OBJECTS)
4995				x = atomic_long_read(&n->total_objects) -
4996					count_partial(n, count_free);
4997			else
4998				x = atomic_long_read(&n->nr_slabs);
4999			total += x;
5000			nodes[node] += x;
5001		}
5002
5003	} else
5004#endif
5005	if (flags & SO_PARTIAL) {
5006		struct kmem_cache_node *n;
5007
5008		for_each_kmem_cache_node(s, node, n) {
5009			if (flags & SO_TOTAL)
5010				x = count_partial(n, count_total);
5011			else if (flags & SO_OBJECTS)
5012				x = count_partial(n, count_inuse);
5013			else
5014				x = n->nr_partial;
5015			total += x;
5016			nodes[node] += x;
5017		}
5018	}
5019
5020	len += sysfs_emit_at(buf, len, "%lu", total);
5021#ifdef CONFIG_NUMA
5022	for (node = 0; node < nr_node_ids; node++) {
5023		if (nodes[node])
5024			len += sysfs_emit_at(buf, len, " N%d=%lu",
5025					     node, nodes[node]);
5026	}
5027#endif
5028	len += sysfs_emit_at(buf, len, "\n");
5029	kfree(nodes);
 
 
5030
5031	return len;
 
 
 
 
 
 
 
 
 
 
5032}
 
5033
5034#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
5035#define to_slab(n) container_of(n, struct kmem_cache, kobj)
5036
5037struct slab_attribute {
5038	struct attribute attr;
5039	ssize_t (*show)(struct kmem_cache *s, char *buf);
5040	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5041};
5042
5043#define SLAB_ATTR_RO(_name) \
5044	static struct slab_attribute _name##_attr = \
5045	__ATTR(_name, 0400, _name##_show, NULL)
5046
5047#define SLAB_ATTR(_name) \
5048	static struct slab_attribute _name##_attr =  \
5049	__ATTR(_name, 0600, _name##_show, _name##_store)
5050
5051static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
5052{
5053	return sysfs_emit(buf, "%u\n", s->size);
5054}
5055SLAB_ATTR_RO(slab_size);
5056
5057static ssize_t align_show(struct kmem_cache *s, char *buf)
5058{
5059	return sysfs_emit(buf, "%u\n", s->align);
5060}
5061SLAB_ATTR_RO(align);
5062
5063static ssize_t object_size_show(struct kmem_cache *s, char *buf)
5064{
5065	return sysfs_emit(buf, "%u\n", s->object_size);
5066}
5067SLAB_ATTR_RO(object_size);
5068
5069static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
5070{
5071	return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
5072}
5073SLAB_ATTR_RO(objs_per_slab);
5074
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5075static ssize_t order_show(struct kmem_cache *s, char *buf)
5076{
5077	return sysfs_emit(buf, "%u\n", oo_order(s->oo));
5078}
5079SLAB_ATTR_RO(order);
5080
5081static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
5082{
5083	return sysfs_emit(buf, "%lu\n", s->min_partial);
5084}
5085
5086static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
5087				 size_t length)
5088{
5089	unsigned long min;
5090	int err;
5091
5092	err = kstrtoul(buf, 10, &min);
5093	if (err)
5094		return err;
5095
5096	set_min_partial(s, min);
5097	return length;
5098}
5099SLAB_ATTR(min_partial);
5100
5101static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
5102{
5103	return sysfs_emit(buf, "%u\n", slub_cpu_partial(s));
5104}
5105
5106static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
5107				 size_t length)
5108{
5109	unsigned int objects;
5110	int err;
5111
5112	err = kstrtouint(buf, 10, &objects);
5113	if (err)
5114		return err;
5115	if (objects && !kmem_cache_has_cpu_partial(s))
5116		return -EINVAL;
5117
5118	slub_set_cpu_partial(s, objects);
5119	flush_all(s);
5120	return length;
5121}
5122SLAB_ATTR(cpu_partial);
5123
5124static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5125{
5126	if (!s->ctor)
5127		return 0;
5128	return sysfs_emit(buf, "%pS\n", s->ctor);
5129}
5130SLAB_ATTR_RO(ctor);
5131
5132static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5133{
5134	return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5135}
5136SLAB_ATTR_RO(aliases);
5137
5138static ssize_t partial_show(struct kmem_cache *s, char *buf)
5139{
5140	return show_slab_objects(s, buf, SO_PARTIAL);
5141}
5142SLAB_ATTR_RO(partial);
5143
5144static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5145{
5146	return show_slab_objects(s, buf, SO_CPU);
5147}
5148SLAB_ATTR_RO(cpu_slabs);
5149
5150static ssize_t objects_show(struct kmem_cache *s, char *buf)
5151{
5152	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5153}
5154SLAB_ATTR_RO(objects);
5155
5156static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5157{
5158	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5159}
5160SLAB_ATTR_RO(objects_partial);
5161
5162static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5163{
5164	int objects = 0;
5165	int pages = 0;
5166	int cpu;
5167	int len = 0;
5168
5169	for_each_online_cpu(cpu) {
5170		struct page *page;
5171
5172		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5173
5174		if (page) {
5175			pages += page->pages;
5176			objects += page->pobjects;
5177		}
5178	}
5179
5180	len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages);
5181
5182#ifdef CONFIG_SMP
5183	for_each_online_cpu(cpu) {
5184		struct page *page;
5185
5186		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5187		if (page)
5188			len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
5189					     cpu, page->pobjects, page->pages);
 
5190	}
5191#endif
5192	len += sysfs_emit_at(buf, len, "\n");
5193
5194	return len;
5195}
5196SLAB_ATTR_RO(slabs_cpu_partial);
5197
5198static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5199{
5200	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
 
 
 
 
 
 
 
 
 
5201}
5202SLAB_ATTR_RO(reclaim_account);
5203
5204static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5205{
5206	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5207}
5208SLAB_ATTR_RO(hwcache_align);
5209
5210#ifdef CONFIG_ZONE_DMA
5211static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5212{
5213	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5214}
5215SLAB_ATTR_RO(cache_dma);
5216#endif
5217
5218static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5219{
5220	return sysfs_emit(buf, "%u\n", s->usersize);
5221}
5222SLAB_ATTR_RO(usersize);
5223
5224static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5225{
5226	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5227}
5228SLAB_ATTR_RO(destroy_by_rcu);
5229
 
 
 
 
 
 
5230#ifdef CONFIG_SLUB_DEBUG
5231static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5232{
5233	return show_slab_objects(s, buf, SO_ALL);
5234}
5235SLAB_ATTR_RO(slabs);
5236
5237static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5238{
5239	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5240}
5241SLAB_ATTR_RO(total_objects);
5242
5243static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5244{
5245	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5246}
5247SLAB_ATTR_RO(sanity_checks);
 
 
 
 
 
 
 
 
 
 
 
5248
5249static ssize_t trace_show(struct kmem_cache *s, char *buf)
5250{
5251	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5252}
5253SLAB_ATTR_RO(trace);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5254
5255static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5256{
5257	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5258}
5259
5260SLAB_ATTR_RO(red_zone);
 
 
 
 
 
 
 
 
 
 
 
 
 
5261
5262static ssize_t poison_show(struct kmem_cache *s, char *buf)
5263{
5264	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
5265}
5266
5267SLAB_ATTR_RO(poison);
 
 
 
 
 
 
 
 
 
 
 
 
 
5268
5269static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5270{
5271	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5272}
5273
5274SLAB_ATTR_RO(store_user);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5275
5276static ssize_t validate_show(struct kmem_cache *s, char *buf)
5277{
5278	return 0;
5279}
5280
5281static ssize_t validate_store(struct kmem_cache *s,
5282			const char *buf, size_t length)
5283{
5284	int ret = -EINVAL;
5285
5286	if (buf[0] == '1') {
5287		ret = validate_slab_cache(s);
5288		if (ret >= 0)
5289			ret = length;
5290	}
5291	return ret;
5292}
5293SLAB_ATTR(validate);
5294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5295#endif /* CONFIG_SLUB_DEBUG */
5296
5297#ifdef CONFIG_FAILSLAB
5298static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5299{
5300	return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5301}
5302SLAB_ATTR_RO(failslab);
 
 
 
 
 
 
 
 
 
 
 
 
5303#endif
5304
5305static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5306{
5307	return 0;
5308}
5309
5310static ssize_t shrink_store(struct kmem_cache *s,
5311			const char *buf, size_t length)
5312{
5313	if (buf[0] == '1')
5314		kmem_cache_shrink(s);
5315	else
5316		return -EINVAL;
5317	return length;
5318}
5319SLAB_ATTR(shrink);
5320
5321#ifdef CONFIG_NUMA
5322static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5323{
5324	return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5325}
5326
5327static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5328				const char *buf, size_t length)
5329{
5330	unsigned int ratio;
5331	int err;
5332
5333	err = kstrtouint(buf, 10, &ratio);
5334	if (err)
5335		return err;
5336	if (ratio > 100)
5337		return -ERANGE;
5338
5339	s->remote_node_defrag_ratio = ratio * 10;
5340
5341	return length;
5342}
5343SLAB_ATTR(remote_node_defrag_ratio);
5344#endif
5345
5346#ifdef CONFIG_SLUB_STATS
5347static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5348{
5349	unsigned long sum  = 0;
5350	int cpu;
5351	int len = 0;
5352	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
5353
5354	if (!data)
5355		return -ENOMEM;
5356
5357	for_each_online_cpu(cpu) {
5358		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5359
5360		data[cpu] = x;
5361		sum += x;
5362	}
5363
5364	len += sysfs_emit_at(buf, len, "%lu", sum);
5365
5366#ifdef CONFIG_SMP
5367	for_each_online_cpu(cpu) {
5368		if (data[cpu])
5369			len += sysfs_emit_at(buf, len, " C%d=%u",
5370					     cpu, data[cpu]);
5371	}
5372#endif
5373	kfree(data);
5374	len += sysfs_emit_at(buf, len, "\n");
5375
5376	return len;
5377}
5378
5379static void clear_stat(struct kmem_cache *s, enum stat_item si)
5380{
5381	int cpu;
5382
5383	for_each_online_cpu(cpu)
5384		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5385}
5386
5387#define STAT_ATTR(si, text) 					\
5388static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5389{								\
5390	return show_stat(s, buf, si);				\
5391}								\
5392static ssize_t text##_store(struct kmem_cache *s,		\
5393				const char *buf, size_t length)	\
5394{								\
5395	if (buf[0] != '0')					\
5396		return -EINVAL;					\
5397	clear_stat(s, si);					\
5398	return length;						\
5399}								\
5400SLAB_ATTR(text);						\
5401
5402STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5403STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5404STAT_ATTR(FREE_FASTPATH, free_fastpath);
5405STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5406STAT_ATTR(FREE_FROZEN, free_frozen);
5407STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5408STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5409STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5410STAT_ATTR(ALLOC_SLAB, alloc_slab);
5411STAT_ATTR(ALLOC_REFILL, alloc_refill);
5412STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5413STAT_ATTR(FREE_SLAB, free_slab);
5414STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5415STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5416STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5417STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5418STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5419STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5420STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5421STAT_ATTR(ORDER_FALLBACK, order_fallback);
5422STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5423STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5424STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5425STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5426STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5427STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5428#endif	/* CONFIG_SLUB_STATS */
5429
5430static struct attribute *slab_attrs[] = {
5431	&slab_size_attr.attr,
5432	&object_size_attr.attr,
5433	&objs_per_slab_attr.attr,
5434	&order_attr.attr,
5435	&min_partial_attr.attr,
5436	&cpu_partial_attr.attr,
5437	&objects_attr.attr,
5438	&objects_partial_attr.attr,
5439	&partial_attr.attr,
5440	&cpu_slabs_attr.attr,
5441	&ctor_attr.attr,
5442	&aliases_attr.attr,
5443	&align_attr.attr,
5444	&hwcache_align_attr.attr,
5445	&reclaim_account_attr.attr,
5446	&destroy_by_rcu_attr.attr,
5447	&shrink_attr.attr,
 
5448	&slabs_cpu_partial_attr.attr,
5449#ifdef CONFIG_SLUB_DEBUG
5450	&total_objects_attr.attr,
5451	&slabs_attr.attr,
5452	&sanity_checks_attr.attr,
5453	&trace_attr.attr,
5454	&red_zone_attr.attr,
5455	&poison_attr.attr,
5456	&store_user_attr.attr,
5457	&validate_attr.attr,
 
 
5458#endif
5459#ifdef CONFIG_ZONE_DMA
5460	&cache_dma_attr.attr,
5461#endif
5462#ifdef CONFIG_NUMA
5463	&remote_node_defrag_ratio_attr.attr,
5464#endif
5465#ifdef CONFIG_SLUB_STATS
5466	&alloc_fastpath_attr.attr,
5467	&alloc_slowpath_attr.attr,
5468	&free_fastpath_attr.attr,
5469	&free_slowpath_attr.attr,
5470	&free_frozen_attr.attr,
5471	&free_add_partial_attr.attr,
5472	&free_remove_partial_attr.attr,
5473	&alloc_from_partial_attr.attr,
5474	&alloc_slab_attr.attr,
5475	&alloc_refill_attr.attr,
5476	&alloc_node_mismatch_attr.attr,
5477	&free_slab_attr.attr,
5478	&cpuslab_flush_attr.attr,
5479	&deactivate_full_attr.attr,
5480	&deactivate_empty_attr.attr,
5481	&deactivate_to_head_attr.attr,
5482	&deactivate_to_tail_attr.attr,
5483	&deactivate_remote_frees_attr.attr,
5484	&deactivate_bypass_attr.attr,
5485	&order_fallback_attr.attr,
5486	&cmpxchg_double_fail_attr.attr,
5487	&cmpxchg_double_cpu_fail_attr.attr,
5488	&cpu_partial_alloc_attr.attr,
5489	&cpu_partial_free_attr.attr,
5490	&cpu_partial_node_attr.attr,
5491	&cpu_partial_drain_attr.attr,
5492#endif
5493#ifdef CONFIG_FAILSLAB
5494	&failslab_attr.attr,
5495#endif
5496	&usersize_attr.attr,
5497
5498	NULL
5499};
5500
5501static const struct attribute_group slab_attr_group = {
5502	.attrs = slab_attrs,
5503};
5504
5505static ssize_t slab_attr_show(struct kobject *kobj,
5506				struct attribute *attr,
5507				char *buf)
5508{
5509	struct slab_attribute *attribute;
5510	struct kmem_cache *s;
5511	int err;
5512
5513	attribute = to_slab_attr(attr);
5514	s = to_slab(kobj);
5515
5516	if (!attribute->show)
5517		return -EIO;
5518
5519	err = attribute->show(s, buf);
5520
5521	return err;
5522}
5523
5524static ssize_t slab_attr_store(struct kobject *kobj,
5525				struct attribute *attr,
5526				const char *buf, size_t len)
5527{
5528	struct slab_attribute *attribute;
5529	struct kmem_cache *s;
5530	int err;
5531
5532	attribute = to_slab_attr(attr);
5533	s = to_slab(kobj);
5534
5535	if (!attribute->store)
5536		return -EIO;
5537
5538	err = attribute->store(s, buf, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5539	return err;
5540}
5541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5542static void kmem_cache_release(struct kobject *k)
5543{
5544	slab_kmem_cache_release(to_slab(k));
5545}
5546
5547static const struct sysfs_ops slab_sysfs_ops = {
5548	.show = slab_attr_show,
5549	.store = slab_attr_store,
5550};
5551
5552static struct kobj_type slab_ktype = {
5553	.sysfs_ops = &slab_sysfs_ops,
5554	.release = kmem_cache_release,
5555};
5556
 
 
 
 
 
 
 
 
 
 
 
 
 
5557static struct kset *slab_kset;
5558
5559static inline struct kset *cache_kset(struct kmem_cache *s)
5560{
 
 
 
 
5561	return slab_kset;
5562}
5563
5564#define ID_STR_LENGTH 64
5565
5566/* Create a unique string id for a slab cache:
5567 *
5568 * Format	:[flags-]size
5569 */
5570static char *create_unique_id(struct kmem_cache *s)
5571{
5572	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5573	char *p = name;
5574
5575	BUG_ON(!name);
5576
5577	*p++ = ':';
5578	/*
5579	 * First flags affecting slabcache operations. We will only
5580	 * get here for aliasable slabs so we do not need to support
5581	 * too many flags. The flags here must cover all flags that
5582	 * are matched during merging to guarantee that the id is
5583	 * unique.
5584	 */
5585	if (s->flags & SLAB_CACHE_DMA)
5586		*p++ = 'd';
5587	if (s->flags & SLAB_CACHE_DMA32)
5588		*p++ = 'D';
5589	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5590		*p++ = 'a';
5591	if (s->flags & SLAB_CONSISTENCY_CHECKS)
5592		*p++ = 'F';
5593	if (s->flags & SLAB_ACCOUNT)
5594		*p++ = 'A';
5595	if (p != name + 1)
5596		*p++ = '-';
5597	p += sprintf(p, "%07u", s->size);
5598
5599	BUG_ON(p > name + ID_STR_LENGTH - 1);
5600	return name;
5601}
5602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5603static int sysfs_slab_add(struct kmem_cache *s)
5604{
5605	int err;
5606	const char *name;
5607	struct kset *kset = cache_kset(s);
5608	int unmergeable = slab_unmergeable(s);
5609
 
 
5610	if (!kset) {
5611		kobject_init(&s->kobj, &slab_ktype);
5612		return 0;
5613	}
5614
5615	if (!unmergeable && disable_higher_order_debug &&
5616			(slub_debug & DEBUG_METADATA_FLAGS))
5617		unmergeable = 1;
5618
5619	if (unmergeable) {
5620		/*
5621		 * Slabcache can never be merged so we can use the name proper.
5622		 * This is typically the case for debug situations. In that
5623		 * case we can catch duplicate names easily.
5624		 */
5625		sysfs_remove_link(&slab_kset->kobj, s->name);
5626		name = s->name;
5627	} else {
5628		/*
5629		 * Create a unique name for the slab as a target
5630		 * for the symlinks.
5631		 */
5632		name = create_unique_id(s);
5633	}
5634
5635	s->kobj.kset = kset;
5636	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5637	if (err)
5638		goto out;
5639
5640	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5641	if (err)
5642		goto out_del_kobj;
5643
 
 
 
 
 
 
 
 
 
 
 
5644	if (!unmergeable) {
5645		/* Setup first alias */
5646		sysfs_slab_alias(s, s->name);
5647	}
5648out:
5649	if (!unmergeable)
5650		kfree(name);
5651	return err;
5652out_del_kobj:
5653	kobject_del(&s->kobj);
5654	goto out;
5655}
5656
5657void sysfs_slab_unlink(struct kmem_cache *s)
5658{
5659	if (slab_state >= FULL)
5660		kobject_del(&s->kobj);
 
 
 
 
 
 
 
5661}
5662
5663void sysfs_slab_release(struct kmem_cache *s)
5664{
5665	if (slab_state >= FULL)
5666		kobject_put(&s->kobj);
5667}
5668
5669/*
5670 * Need to buffer aliases during bootup until sysfs becomes
5671 * available lest we lose that information.
5672 */
5673struct saved_alias {
5674	struct kmem_cache *s;
5675	const char *name;
5676	struct saved_alias *next;
5677};
5678
5679static struct saved_alias *alias_list;
5680
5681static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5682{
5683	struct saved_alias *al;
5684
5685	if (slab_state == FULL) {
5686		/*
5687		 * If we have a leftover link then remove it.
5688		 */
5689		sysfs_remove_link(&slab_kset->kobj, name);
5690		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5691	}
5692
5693	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5694	if (!al)
5695		return -ENOMEM;
5696
5697	al->s = s;
5698	al->name = name;
5699	al->next = alias_list;
5700	alias_list = al;
5701	return 0;
5702}
5703
5704static int __init slab_sysfs_init(void)
5705{
5706	struct kmem_cache *s;
5707	int err;
5708
5709	mutex_lock(&slab_mutex);
5710
5711	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
5712	if (!slab_kset) {
5713		mutex_unlock(&slab_mutex);
5714		pr_err("Cannot register slab subsystem.\n");
5715		return -ENOSYS;
5716	}
5717
5718	slab_state = FULL;
5719
5720	list_for_each_entry(s, &slab_caches, list) {
5721		err = sysfs_slab_add(s);
5722		if (err)
5723			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5724			       s->name);
5725	}
5726
5727	while (alias_list) {
5728		struct saved_alias *al = alias_list;
5729
5730		alias_list = alias_list->next;
5731		err = sysfs_slab_alias(al->s, al->name);
5732		if (err)
5733			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5734			       al->name);
5735		kfree(al);
5736	}
5737
5738	mutex_unlock(&slab_mutex);
 
5739	return 0;
5740}
5741
5742__initcall(slab_sysfs_init);
5743#endif /* CONFIG_SYSFS */
5744
5745#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
5746static int slab_debugfs_show(struct seq_file *seq, void *v)
5747{
5748
5749	struct location *l;
5750	unsigned int idx = *(unsigned int *)v;
5751	struct loc_track *t = seq->private;
5752
5753	if (idx < t->count) {
5754		l = &t->loc[idx];
5755
5756		seq_printf(seq, "%7ld ", l->count);
5757
5758		if (l->addr)
5759			seq_printf(seq, "%pS", (void *)l->addr);
5760		else
5761			seq_puts(seq, "<not-available>");
5762
5763		if (l->sum_time != l->min_time) {
5764			seq_printf(seq, " age=%ld/%llu/%ld",
5765				l->min_time, div_u64(l->sum_time, l->count),
5766				l->max_time);
5767		} else
5768			seq_printf(seq, " age=%ld", l->min_time);
5769
5770		if (l->min_pid != l->max_pid)
5771			seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
5772		else
5773			seq_printf(seq, " pid=%ld",
5774				l->min_pid);
5775
5776		if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
5777			seq_printf(seq, " cpus=%*pbl",
5778				 cpumask_pr_args(to_cpumask(l->cpus)));
5779
5780		if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
5781			seq_printf(seq, " nodes=%*pbl",
5782				 nodemask_pr_args(&l->nodes));
5783
5784		seq_puts(seq, "\n");
5785	}
5786
5787	if (!idx && !t->count)
5788		seq_puts(seq, "No data\n");
5789
5790	return 0;
5791}
5792
5793static void slab_debugfs_stop(struct seq_file *seq, void *v)
5794{
5795}
5796
5797static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
5798{
5799	struct loc_track *t = seq->private;
5800
5801	v = ppos;
5802	++*ppos;
5803	if (*ppos <= t->count)
5804		return v;
5805
5806	return NULL;
5807}
5808
5809static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
5810{
5811	return ppos;
5812}
5813
5814static const struct seq_operations slab_debugfs_sops = {
5815	.start  = slab_debugfs_start,
5816	.next   = slab_debugfs_next,
5817	.stop   = slab_debugfs_stop,
5818	.show   = slab_debugfs_show,
5819};
5820
5821static int slab_debug_trace_open(struct inode *inode, struct file *filep)
5822{
5823
5824	struct kmem_cache_node *n;
5825	enum track_item alloc;
5826	int node;
5827	struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
5828						sizeof(struct loc_track));
5829	struct kmem_cache *s = file_inode(filep)->i_private;
5830
5831	if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
5832		alloc = TRACK_ALLOC;
5833	else
5834		alloc = TRACK_FREE;
5835
5836	if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
5837		return -ENOMEM;
5838
5839	/* Push back cpu slabs */
5840	flush_all(s);
5841
5842	for_each_kmem_cache_node(s, node, n) {
5843		unsigned long flags;
5844		struct page *page;
5845
5846		if (!atomic_long_read(&n->nr_slabs))
5847			continue;
5848
5849		spin_lock_irqsave(&n->list_lock, flags);
5850		list_for_each_entry(page, &n->partial, slab_list)
5851			process_slab(t, s, page, alloc);
5852		list_for_each_entry(page, &n->full, slab_list)
5853			process_slab(t, s, page, alloc);
5854		spin_unlock_irqrestore(&n->list_lock, flags);
5855	}
5856
5857	return 0;
5858}
5859
5860static int slab_debug_trace_release(struct inode *inode, struct file *file)
5861{
5862	struct seq_file *seq = file->private_data;
5863	struct loc_track *t = seq->private;
5864
5865	free_loc_track(t);
5866	return seq_release_private(inode, file);
5867}
5868
5869static const struct file_operations slab_debugfs_fops = {
5870	.open    = slab_debug_trace_open,
5871	.read    = seq_read,
5872	.llseek  = seq_lseek,
5873	.release = slab_debug_trace_release,
5874};
5875
5876static void debugfs_slab_add(struct kmem_cache *s)
5877{
5878	struct dentry *slab_cache_dir;
5879
5880	if (unlikely(!slab_debugfs_root))
5881		return;
5882
5883	slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
5884
5885	debugfs_create_file("alloc_traces", 0400,
5886		slab_cache_dir, s, &slab_debugfs_fops);
5887
5888	debugfs_create_file("free_traces", 0400,
5889		slab_cache_dir, s, &slab_debugfs_fops);
5890}
5891
5892void debugfs_slab_release(struct kmem_cache *s)
5893{
5894	debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
5895}
5896
5897static int __init slab_debugfs_init(void)
5898{
5899	struct kmem_cache *s;
5900
5901	slab_debugfs_root = debugfs_create_dir("slab", NULL);
5902
5903	list_for_each_entry(s, &slab_caches, list)
5904		if (s->flags & SLAB_STORE_USER)
5905			debugfs_slab_add(s);
5906
5907	return 0;
5908
5909}
5910__initcall(slab_debugfs_init);
5911#endif
5912/*
5913 * The /proc/slabinfo ABI
5914 */
5915#ifdef CONFIG_SLUB_DEBUG
5916void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5917{
5918	unsigned long nr_slabs = 0;
5919	unsigned long nr_objs = 0;
5920	unsigned long nr_free = 0;
5921	int node;
5922	struct kmem_cache_node *n;
5923
5924	for_each_kmem_cache_node(s, node, n) {
5925		nr_slabs += node_nr_slabs(n);
5926		nr_objs += node_nr_objs(n);
5927		nr_free += count_partial(n, count_free);
5928	}
5929
5930	sinfo->active_objs = nr_objs - nr_free;
5931	sinfo->num_objs = nr_objs;
5932	sinfo->active_slabs = nr_slabs;
5933	sinfo->num_slabs = nr_slabs;
5934	sinfo->objects_per_slab = oo_objects(s->oo);
5935	sinfo->cache_order = oo_order(s->oo);
5936}
5937
5938void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5939{
5940}
5941
5942ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5943		       size_t count, loff_t *ppos)
5944{
5945	return -EIO;
5946}
5947#endif /* CONFIG_SLUB_DEBUG */