Loading...
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks or atomic operatios
6 * and only uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter
9 * (C) 2011 Linux Foundation, Christoph Lameter
10 */
11
12#include <linux/mm.h>
13#include <linux/swap.h> /* struct reclaim_state */
14#include <linux/module.h>
15#include <linux/bit_spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/kmemcheck.h>
22#include <linux/cpu.h>
23#include <linux/cpuset.h>
24#include <linux/mempolicy.h>
25#include <linux/ctype.h>
26#include <linux/debugobjects.h>
27#include <linux/kallsyms.h>
28#include <linux/memory.h>
29#include <linux/math64.h>
30#include <linux/fault-inject.h>
31#include <linux/stacktrace.h>
32#include <linux/prefetch.h>
33
34#include <trace/events/kmem.h>
35
36/*
37 * Lock order:
38 * 1. slub_lock (Global Semaphore)
39 * 2. node->list_lock
40 * 3. slab_lock(page) (Only on some arches and for debugging)
41 *
42 * slub_lock
43 *
44 * The role of the slub_lock is to protect the list of all the slabs
45 * and to synchronize major metadata changes to slab cache structures.
46 *
47 * The slab_lock is only used for debugging and on arches that do not
48 * have the ability to do a cmpxchg_double. It only protects the second
49 * double word in the page struct. Meaning
50 * A. page->freelist -> List of object free in a page
51 * B. page->counters -> Counters of objects
52 * C. page->frozen -> frozen state
53 *
54 * If a slab is frozen then it is exempt from list management. It is not
55 * on any list. The processor that froze the slab is the one who can
56 * perform list operations on the page. Other processors may put objects
57 * onto the freelist but the processor that froze the slab is the only
58 * one that can retrieve the objects from the page's freelist.
59 *
60 * The list_lock protects the partial and full list on each node and
61 * the partial slab counter. If taken then no new slabs may be added or
62 * removed from the lists nor make the number of partial slabs be modified.
63 * (Note that the total number of slabs is an atomic value that may be
64 * modified without taking the list lock).
65 *
66 * The list_lock is a centralized lock and thus we avoid taking it as
67 * much as possible. As long as SLUB does not have to handle partial
68 * slabs, operations can continue without any centralized lock. F.e.
69 * allocating a long series of objects that fill up slabs does not require
70 * the list lock.
71 * Interrupts are disabled during allocation and deallocation in order to
72 * make the slab allocator safe to use in the context of an irq. In addition
73 * interrupts are disabled to ensure that the processor does not change
74 * while handling per_cpu slabs, due to kernel preemption.
75 *
76 * SLUB assigns one slab for allocation to each processor.
77 * Allocations only occur from these slabs called cpu slabs.
78 *
79 * Slabs with free elements are kept on a partial list and during regular
80 * operations no list for full slabs is used. If an object in a full slab is
81 * freed then the slab will show up again on the partial lists.
82 * We track full slabs for debugging purposes though because otherwise we
83 * cannot scan all objects.
84 *
85 * Slabs are freed when they become empty. Teardown and setup is
86 * minimal so we rely on the page allocators per cpu caches for
87 * fast frees and allocs.
88 *
89 * Overloading of page flags that are otherwise used for LRU management.
90 *
91 * PageActive The slab is frozen and exempt from list processing.
92 * This means that the slab is dedicated to a purpose
93 * such as satisfying allocations for a specific
94 * processor. Objects may be freed in the slab while
95 * it is frozen but slab_free will then skip the usual
96 * list operations. It is up to the processor holding
97 * the slab to integrate the slab into the slab lists
98 * when the slab is no longer needed.
99 *
100 * One use of this flag is to mark slabs that are
101 * used for allocations. Then such a slab becomes a cpu
102 * slab. The cpu slab may be equipped with an additional
103 * freelist that allows lockless access to
104 * free objects in addition to the regular freelist
105 * that requires the slab lock.
106 *
107 * PageError Slab requires special handling due to debug
108 * options set. This moves slab handling out of
109 * the fast path and disables lockless freelists.
110 */
111
112#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
113 SLAB_TRACE | SLAB_DEBUG_FREE)
114
115static inline int kmem_cache_debug(struct kmem_cache *s)
116{
117#ifdef CONFIG_SLUB_DEBUG
118 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
119#else
120 return 0;
121#endif
122}
123
124/*
125 * Issues still to be resolved:
126 *
127 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
128 *
129 * - Variable sizing of the per node arrays
130 */
131
132/* Enable to test recovery from slab corruption on boot */
133#undef SLUB_RESILIENCY_TEST
134
135/* Enable to log cmpxchg failures */
136#undef SLUB_DEBUG_CMPXCHG
137
138/*
139 * Mininum number of partial slabs. These will be left on the partial
140 * lists even if they are empty. kmem_cache_shrink may reclaim them.
141 */
142#define MIN_PARTIAL 5
143
144/*
145 * Maximum number of desirable partial slabs.
146 * The existence of more partial slabs makes kmem_cache_shrink
147 * sort the partial list by the number of objects in the.
148 */
149#define MAX_PARTIAL 10
150
151#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
152 SLAB_POISON | SLAB_STORE_USER)
153
154/*
155 * Debugging flags that require metadata to be stored in the slab. These get
156 * disabled when slub_debug=O is used and a cache's min order increases with
157 * metadata.
158 */
159#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
160
161/*
162 * Set of flags that will prevent slab merging
163 */
164#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
165 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
166 SLAB_FAILSLAB)
167
168#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
169 SLAB_CACHE_DMA | SLAB_NOTRACK)
170
171#define OO_SHIFT 16
172#define OO_MASK ((1 << OO_SHIFT) - 1)
173#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
174
175/* Internal SLUB flags */
176#define __OBJECT_POISON 0x80000000UL /* Poison object */
177#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
178
179static int kmem_size = sizeof(struct kmem_cache);
180
181#ifdef CONFIG_SMP
182static struct notifier_block slab_notifier;
183#endif
184
185static enum {
186 DOWN, /* No slab functionality available */
187 PARTIAL, /* Kmem_cache_node works */
188 UP, /* Everything works but does not show up in sysfs */
189 SYSFS /* Sysfs up */
190} slab_state = DOWN;
191
192/* A list of all slab caches on the system */
193static DECLARE_RWSEM(slub_lock);
194static LIST_HEAD(slab_caches);
195
196/*
197 * Tracking user of a slab.
198 */
199#define TRACK_ADDRS_COUNT 16
200struct track {
201 unsigned long addr; /* Called from address */
202#ifdef CONFIG_STACKTRACE
203 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
204#endif
205 int cpu; /* Was running on cpu */
206 int pid; /* Pid context */
207 unsigned long when; /* When did the operation occur */
208};
209
210enum track_item { TRACK_ALLOC, TRACK_FREE };
211
212#ifdef CONFIG_SYSFS
213static int sysfs_slab_add(struct kmem_cache *);
214static int sysfs_slab_alias(struct kmem_cache *, const char *);
215static void sysfs_slab_remove(struct kmem_cache *);
216
217#else
218static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
219static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
220 { return 0; }
221static inline void sysfs_slab_remove(struct kmem_cache *s)
222{
223 kfree(s->name);
224 kfree(s);
225}
226
227#endif
228
229static inline void stat(const struct kmem_cache *s, enum stat_item si)
230{
231#ifdef CONFIG_SLUB_STATS
232 __this_cpu_inc(s->cpu_slab->stat[si]);
233#endif
234}
235
236/********************************************************************
237 * Core slab cache functions
238 *******************************************************************/
239
240int slab_is_available(void)
241{
242 return slab_state >= UP;
243}
244
245static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
246{
247 return s->node[node];
248}
249
250/* Verify that a pointer has an address that is valid within a slab page */
251static inline int check_valid_pointer(struct kmem_cache *s,
252 struct page *page, const void *object)
253{
254 void *base;
255
256 if (!object)
257 return 1;
258
259 base = page_address(page);
260 if (object < base || object >= base + page->objects * s->size ||
261 (object - base) % s->size) {
262 return 0;
263 }
264
265 return 1;
266}
267
268static inline void *get_freepointer(struct kmem_cache *s, void *object)
269{
270 return *(void **)(object + s->offset);
271}
272
273static void prefetch_freepointer(const struct kmem_cache *s, void *object)
274{
275 prefetch(object + s->offset);
276}
277
278static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
279{
280 void *p;
281
282#ifdef CONFIG_DEBUG_PAGEALLOC
283 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
284#else
285 p = get_freepointer(s, object);
286#endif
287 return p;
288}
289
290static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
291{
292 *(void **)(object + s->offset) = fp;
293}
294
295/* Loop over all objects in a slab */
296#define for_each_object(__p, __s, __addr, __objects) \
297 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
298 __p += (__s)->size)
299
300/* Determine object index from a given position */
301static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
302{
303 return (p - addr) / s->size;
304}
305
306static inline size_t slab_ksize(const struct kmem_cache *s)
307{
308#ifdef CONFIG_SLUB_DEBUG
309 /*
310 * Debugging requires use of the padding between object
311 * and whatever may come after it.
312 */
313 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
314 return s->objsize;
315
316#endif
317 /*
318 * If we have the need to store the freelist pointer
319 * back there or track user information then we can
320 * only use the space before that information.
321 */
322 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
323 return s->inuse;
324 /*
325 * Else we can use all the padding etc for the allocation
326 */
327 return s->size;
328}
329
330static inline int order_objects(int order, unsigned long size, int reserved)
331{
332 return ((PAGE_SIZE << order) - reserved) / size;
333}
334
335static inline struct kmem_cache_order_objects oo_make(int order,
336 unsigned long size, int reserved)
337{
338 struct kmem_cache_order_objects x = {
339 (order << OO_SHIFT) + order_objects(order, size, reserved)
340 };
341
342 return x;
343}
344
345static inline int oo_order(struct kmem_cache_order_objects x)
346{
347 return x.x >> OO_SHIFT;
348}
349
350static inline int oo_objects(struct kmem_cache_order_objects x)
351{
352 return x.x & OO_MASK;
353}
354
355/*
356 * Per slab locking using the pagelock
357 */
358static __always_inline void slab_lock(struct page *page)
359{
360 bit_spin_lock(PG_locked, &page->flags);
361}
362
363static __always_inline void slab_unlock(struct page *page)
364{
365 __bit_spin_unlock(PG_locked, &page->flags);
366}
367
368/* Interrupts must be disabled (for the fallback code to work right) */
369static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
370 void *freelist_old, unsigned long counters_old,
371 void *freelist_new, unsigned long counters_new,
372 const char *n)
373{
374 VM_BUG_ON(!irqs_disabled());
375#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
376 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
377 if (s->flags & __CMPXCHG_DOUBLE) {
378 if (cmpxchg_double(&page->freelist, &page->counters,
379 freelist_old, counters_old,
380 freelist_new, counters_new))
381 return 1;
382 } else
383#endif
384 {
385 slab_lock(page);
386 if (page->freelist == freelist_old && page->counters == counters_old) {
387 page->freelist = freelist_new;
388 page->counters = counters_new;
389 slab_unlock(page);
390 return 1;
391 }
392 slab_unlock(page);
393 }
394
395 cpu_relax();
396 stat(s, CMPXCHG_DOUBLE_FAIL);
397
398#ifdef SLUB_DEBUG_CMPXCHG
399 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
400#endif
401
402 return 0;
403}
404
405static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
406 void *freelist_old, unsigned long counters_old,
407 void *freelist_new, unsigned long counters_new,
408 const char *n)
409{
410#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
411 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
412 if (s->flags & __CMPXCHG_DOUBLE) {
413 if (cmpxchg_double(&page->freelist, &page->counters,
414 freelist_old, counters_old,
415 freelist_new, counters_new))
416 return 1;
417 } else
418#endif
419 {
420 unsigned long flags;
421
422 local_irq_save(flags);
423 slab_lock(page);
424 if (page->freelist == freelist_old && page->counters == counters_old) {
425 page->freelist = freelist_new;
426 page->counters = counters_new;
427 slab_unlock(page);
428 local_irq_restore(flags);
429 return 1;
430 }
431 slab_unlock(page);
432 local_irq_restore(flags);
433 }
434
435 cpu_relax();
436 stat(s, CMPXCHG_DOUBLE_FAIL);
437
438#ifdef SLUB_DEBUG_CMPXCHG
439 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
440#endif
441
442 return 0;
443}
444
445#ifdef CONFIG_SLUB_DEBUG
446/*
447 * Determine a map of object in use on a page.
448 *
449 * Node listlock must be held to guarantee that the page does
450 * not vanish from under us.
451 */
452static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
453{
454 void *p;
455 void *addr = page_address(page);
456
457 for (p = page->freelist; p; p = get_freepointer(s, p))
458 set_bit(slab_index(p, s, addr), map);
459}
460
461/*
462 * Debug settings:
463 */
464#ifdef CONFIG_SLUB_DEBUG_ON
465static int slub_debug = DEBUG_DEFAULT_FLAGS;
466#else
467static int slub_debug;
468#endif
469
470static char *slub_debug_slabs;
471static int disable_higher_order_debug;
472
473/*
474 * Object debugging
475 */
476static void print_section(char *text, u8 *addr, unsigned int length)
477{
478 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
479 length, 1);
480}
481
482static struct track *get_track(struct kmem_cache *s, void *object,
483 enum track_item alloc)
484{
485 struct track *p;
486
487 if (s->offset)
488 p = object + s->offset + sizeof(void *);
489 else
490 p = object + s->inuse;
491
492 return p + alloc;
493}
494
495static void set_track(struct kmem_cache *s, void *object,
496 enum track_item alloc, unsigned long addr)
497{
498 struct track *p = get_track(s, object, alloc);
499
500 if (addr) {
501#ifdef CONFIG_STACKTRACE
502 struct stack_trace trace;
503 int i;
504
505 trace.nr_entries = 0;
506 trace.max_entries = TRACK_ADDRS_COUNT;
507 trace.entries = p->addrs;
508 trace.skip = 3;
509 save_stack_trace(&trace);
510
511 /* See rant in lockdep.c */
512 if (trace.nr_entries != 0 &&
513 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
514 trace.nr_entries--;
515
516 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
517 p->addrs[i] = 0;
518#endif
519 p->addr = addr;
520 p->cpu = smp_processor_id();
521 p->pid = current->pid;
522 p->when = jiffies;
523 } else
524 memset(p, 0, sizeof(struct track));
525}
526
527static void init_tracking(struct kmem_cache *s, void *object)
528{
529 if (!(s->flags & SLAB_STORE_USER))
530 return;
531
532 set_track(s, object, TRACK_FREE, 0UL);
533 set_track(s, object, TRACK_ALLOC, 0UL);
534}
535
536static void print_track(const char *s, struct track *t)
537{
538 if (!t->addr)
539 return;
540
541 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
542 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
543#ifdef CONFIG_STACKTRACE
544 {
545 int i;
546 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
547 if (t->addrs[i])
548 printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
549 else
550 break;
551 }
552#endif
553}
554
555static void print_tracking(struct kmem_cache *s, void *object)
556{
557 if (!(s->flags & SLAB_STORE_USER))
558 return;
559
560 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
561 print_track("Freed", get_track(s, object, TRACK_FREE));
562}
563
564static void print_page_info(struct page *page)
565{
566 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
567 page, page->objects, page->inuse, page->freelist, page->flags);
568
569}
570
571static void slab_bug(struct kmem_cache *s, char *fmt, ...)
572{
573 va_list args;
574 char buf[100];
575
576 va_start(args, fmt);
577 vsnprintf(buf, sizeof(buf), fmt, args);
578 va_end(args);
579 printk(KERN_ERR "========================================"
580 "=====================================\n");
581 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
582 printk(KERN_ERR "----------------------------------------"
583 "-------------------------------------\n\n");
584}
585
586static void slab_fix(struct kmem_cache *s, char *fmt, ...)
587{
588 va_list args;
589 char buf[100];
590
591 va_start(args, fmt);
592 vsnprintf(buf, sizeof(buf), fmt, args);
593 va_end(args);
594 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
595}
596
597static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
598{
599 unsigned int off; /* Offset of last byte */
600 u8 *addr = page_address(page);
601
602 print_tracking(s, p);
603
604 print_page_info(page);
605
606 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
607 p, p - addr, get_freepointer(s, p));
608
609 if (p > addr + 16)
610 print_section("Bytes b4 ", p - 16, 16);
611
612 print_section("Object ", p, min_t(unsigned long, s->objsize,
613 PAGE_SIZE));
614 if (s->flags & SLAB_RED_ZONE)
615 print_section("Redzone ", p + s->objsize,
616 s->inuse - s->objsize);
617
618 if (s->offset)
619 off = s->offset + sizeof(void *);
620 else
621 off = s->inuse;
622
623 if (s->flags & SLAB_STORE_USER)
624 off += 2 * sizeof(struct track);
625
626 if (off != s->size)
627 /* Beginning of the filler is the free pointer */
628 print_section("Padding ", p + off, s->size - off);
629
630 dump_stack();
631}
632
633static void object_err(struct kmem_cache *s, struct page *page,
634 u8 *object, char *reason)
635{
636 slab_bug(s, "%s", reason);
637 print_trailer(s, page, object);
638}
639
640static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
641{
642 va_list args;
643 char buf[100];
644
645 va_start(args, fmt);
646 vsnprintf(buf, sizeof(buf), fmt, args);
647 va_end(args);
648 slab_bug(s, "%s", buf);
649 print_page_info(page);
650 dump_stack();
651}
652
653static void init_object(struct kmem_cache *s, void *object, u8 val)
654{
655 u8 *p = object;
656
657 if (s->flags & __OBJECT_POISON) {
658 memset(p, POISON_FREE, s->objsize - 1);
659 p[s->objsize - 1] = POISON_END;
660 }
661
662 if (s->flags & SLAB_RED_ZONE)
663 memset(p + s->objsize, val, s->inuse - s->objsize);
664}
665
666static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
667 void *from, void *to)
668{
669 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
670 memset(from, data, to - from);
671}
672
673static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
674 u8 *object, char *what,
675 u8 *start, unsigned int value, unsigned int bytes)
676{
677 u8 *fault;
678 u8 *end;
679
680 fault = memchr_inv(start, value, bytes);
681 if (!fault)
682 return 1;
683
684 end = start + bytes;
685 while (end > fault && end[-1] == value)
686 end--;
687
688 slab_bug(s, "%s overwritten", what);
689 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
690 fault, end - 1, fault[0], value);
691 print_trailer(s, page, object);
692
693 restore_bytes(s, what, value, fault, end);
694 return 0;
695}
696
697/*
698 * Object layout:
699 *
700 * object address
701 * Bytes of the object to be managed.
702 * If the freepointer may overlay the object then the free
703 * pointer is the first word of the object.
704 *
705 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
706 * 0xa5 (POISON_END)
707 *
708 * object + s->objsize
709 * Padding to reach word boundary. This is also used for Redzoning.
710 * Padding is extended by another word if Redzoning is enabled and
711 * objsize == inuse.
712 *
713 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
714 * 0xcc (RED_ACTIVE) for objects in use.
715 *
716 * object + s->inuse
717 * Meta data starts here.
718 *
719 * A. Free pointer (if we cannot overwrite object on free)
720 * B. Tracking data for SLAB_STORE_USER
721 * C. Padding to reach required alignment boundary or at mininum
722 * one word if debugging is on to be able to detect writes
723 * before the word boundary.
724 *
725 * Padding is done using 0x5a (POISON_INUSE)
726 *
727 * object + s->size
728 * Nothing is used beyond s->size.
729 *
730 * If slabcaches are merged then the objsize and inuse boundaries are mostly
731 * ignored. And therefore no slab options that rely on these boundaries
732 * may be used with merged slabcaches.
733 */
734
735static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
736{
737 unsigned long off = s->inuse; /* The end of info */
738
739 if (s->offset)
740 /* Freepointer is placed after the object. */
741 off += sizeof(void *);
742
743 if (s->flags & SLAB_STORE_USER)
744 /* We also have user information there */
745 off += 2 * sizeof(struct track);
746
747 if (s->size == off)
748 return 1;
749
750 return check_bytes_and_report(s, page, p, "Object padding",
751 p + off, POISON_INUSE, s->size - off);
752}
753
754/* Check the pad bytes at the end of a slab page */
755static int slab_pad_check(struct kmem_cache *s, struct page *page)
756{
757 u8 *start;
758 u8 *fault;
759 u8 *end;
760 int length;
761 int remainder;
762
763 if (!(s->flags & SLAB_POISON))
764 return 1;
765
766 start = page_address(page);
767 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
768 end = start + length;
769 remainder = length % s->size;
770 if (!remainder)
771 return 1;
772
773 fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
774 if (!fault)
775 return 1;
776 while (end > fault && end[-1] == POISON_INUSE)
777 end--;
778
779 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
780 print_section("Padding ", end - remainder, remainder);
781
782 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
783 return 0;
784}
785
786static int check_object(struct kmem_cache *s, struct page *page,
787 void *object, u8 val)
788{
789 u8 *p = object;
790 u8 *endobject = object + s->objsize;
791
792 if (s->flags & SLAB_RED_ZONE) {
793 if (!check_bytes_and_report(s, page, object, "Redzone",
794 endobject, val, s->inuse - s->objsize))
795 return 0;
796 } else {
797 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
798 check_bytes_and_report(s, page, p, "Alignment padding",
799 endobject, POISON_INUSE, s->inuse - s->objsize);
800 }
801 }
802
803 if (s->flags & SLAB_POISON) {
804 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
805 (!check_bytes_and_report(s, page, p, "Poison", p,
806 POISON_FREE, s->objsize - 1) ||
807 !check_bytes_and_report(s, page, p, "Poison",
808 p + s->objsize - 1, POISON_END, 1)))
809 return 0;
810 /*
811 * check_pad_bytes cleans up on its own.
812 */
813 check_pad_bytes(s, page, p);
814 }
815
816 if (!s->offset && val == SLUB_RED_ACTIVE)
817 /*
818 * Object and freepointer overlap. Cannot check
819 * freepointer while object is allocated.
820 */
821 return 1;
822
823 /* Check free pointer validity */
824 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
825 object_err(s, page, p, "Freepointer corrupt");
826 /*
827 * No choice but to zap it and thus lose the remainder
828 * of the free objects in this slab. May cause
829 * another error because the object count is now wrong.
830 */
831 set_freepointer(s, p, NULL);
832 return 0;
833 }
834 return 1;
835}
836
837static int check_slab(struct kmem_cache *s, struct page *page)
838{
839 int maxobj;
840
841 VM_BUG_ON(!irqs_disabled());
842
843 if (!PageSlab(page)) {
844 slab_err(s, page, "Not a valid slab page");
845 return 0;
846 }
847
848 maxobj = order_objects(compound_order(page), s->size, s->reserved);
849 if (page->objects > maxobj) {
850 slab_err(s, page, "objects %u > max %u",
851 s->name, page->objects, maxobj);
852 return 0;
853 }
854 if (page->inuse > page->objects) {
855 slab_err(s, page, "inuse %u > max %u",
856 s->name, page->inuse, page->objects);
857 return 0;
858 }
859 /* Slab_pad_check fixes things up after itself */
860 slab_pad_check(s, page);
861 return 1;
862}
863
864/*
865 * Determine if a certain object on a page is on the freelist. Must hold the
866 * slab lock to guarantee that the chains are in a consistent state.
867 */
868static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
869{
870 int nr = 0;
871 void *fp;
872 void *object = NULL;
873 unsigned long max_objects;
874
875 fp = page->freelist;
876 while (fp && nr <= page->objects) {
877 if (fp == search)
878 return 1;
879 if (!check_valid_pointer(s, page, fp)) {
880 if (object) {
881 object_err(s, page, object,
882 "Freechain corrupt");
883 set_freepointer(s, object, NULL);
884 break;
885 } else {
886 slab_err(s, page, "Freepointer corrupt");
887 page->freelist = NULL;
888 page->inuse = page->objects;
889 slab_fix(s, "Freelist cleared");
890 return 0;
891 }
892 break;
893 }
894 object = fp;
895 fp = get_freepointer(s, object);
896 nr++;
897 }
898
899 max_objects = order_objects(compound_order(page), s->size, s->reserved);
900 if (max_objects > MAX_OBJS_PER_PAGE)
901 max_objects = MAX_OBJS_PER_PAGE;
902
903 if (page->objects != max_objects) {
904 slab_err(s, page, "Wrong number of objects. Found %d but "
905 "should be %d", page->objects, max_objects);
906 page->objects = max_objects;
907 slab_fix(s, "Number of objects adjusted.");
908 }
909 if (page->inuse != page->objects - nr) {
910 slab_err(s, page, "Wrong object count. Counter is %d but "
911 "counted were %d", page->inuse, page->objects - nr);
912 page->inuse = page->objects - nr;
913 slab_fix(s, "Object count adjusted.");
914 }
915 return search == NULL;
916}
917
918static void trace(struct kmem_cache *s, struct page *page, void *object,
919 int alloc)
920{
921 if (s->flags & SLAB_TRACE) {
922 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
923 s->name,
924 alloc ? "alloc" : "free",
925 object, page->inuse,
926 page->freelist);
927
928 if (!alloc)
929 print_section("Object ", (void *)object, s->objsize);
930
931 dump_stack();
932 }
933}
934
935/*
936 * Hooks for other subsystems that check memory allocations. In a typical
937 * production configuration these hooks all should produce no code at all.
938 */
939static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
940{
941 flags &= gfp_allowed_mask;
942 lockdep_trace_alloc(flags);
943 might_sleep_if(flags & __GFP_WAIT);
944
945 return should_failslab(s->objsize, flags, s->flags);
946}
947
948static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
949{
950 flags &= gfp_allowed_mask;
951 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
952 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
953}
954
955static inline void slab_free_hook(struct kmem_cache *s, void *x)
956{
957 kmemleak_free_recursive(x, s->flags);
958
959 /*
960 * Trouble is that we may no longer disable interupts in the fast path
961 * So in order to make the debug calls that expect irqs to be
962 * disabled we need to disable interrupts temporarily.
963 */
964#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
965 {
966 unsigned long flags;
967
968 local_irq_save(flags);
969 kmemcheck_slab_free(s, x, s->objsize);
970 debug_check_no_locks_freed(x, s->objsize);
971 local_irq_restore(flags);
972 }
973#endif
974 if (!(s->flags & SLAB_DEBUG_OBJECTS))
975 debug_check_no_obj_freed(x, s->objsize);
976}
977
978/*
979 * Tracking of fully allocated slabs for debugging purposes.
980 *
981 * list_lock must be held.
982 */
983static void add_full(struct kmem_cache *s,
984 struct kmem_cache_node *n, struct page *page)
985{
986 if (!(s->flags & SLAB_STORE_USER))
987 return;
988
989 list_add(&page->lru, &n->full);
990}
991
992/*
993 * list_lock must be held.
994 */
995static void remove_full(struct kmem_cache *s, struct page *page)
996{
997 if (!(s->flags & SLAB_STORE_USER))
998 return;
999
1000 list_del(&page->lru);
1001}
1002
1003/* Tracking of the number of slabs for debugging purposes */
1004static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1005{
1006 struct kmem_cache_node *n = get_node(s, node);
1007
1008 return atomic_long_read(&n->nr_slabs);
1009}
1010
1011static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1012{
1013 return atomic_long_read(&n->nr_slabs);
1014}
1015
1016static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1017{
1018 struct kmem_cache_node *n = get_node(s, node);
1019
1020 /*
1021 * May be called early in order to allocate a slab for the
1022 * kmem_cache_node structure. Solve the chicken-egg
1023 * dilemma by deferring the increment of the count during
1024 * bootstrap (see early_kmem_cache_node_alloc).
1025 */
1026 if (n) {
1027 atomic_long_inc(&n->nr_slabs);
1028 atomic_long_add(objects, &n->total_objects);
1029 }
1030}
1031static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1032{
1033 struct kmem_cache_node *n = get_node(s, node);
1034
1035 atomic_long_dec(&n->nr_slabs);
1036 atomic_long_sub(objects, &n->total_objects);
1037}
1038
1039/* Object debug checks for alloc/free paths */
1040static void setup_object_debug(struct kmem_cache *s, struct page *page,
1041 void *object)
1042{
1043 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1044 return;
1045
1046 init_object(s, object, SLUB_RED_INACTIVE);
1047 init_tracking(s, object);
1048}
1049
1050static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
1051 void *object, unsigned long addr)
1052{
1053 if (!check_slab(s, page))
1054 goto bad;
1055
1056 if (!check_valid_pointer(s, page, object)) {
1057 object_err(s, page, object, "Freelist Pointer check fails");
1058 goto bad;
1059 }
1060
1061 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1062 goto bad;
1063
1064 /* Success perform special debug activities for allocs */
1065 if (s->flags & SLAB_STORE_USER)
1066 set_track(s, object, TRACK_ALLOC, addr);
1067 trace(s, page, object, 1);
1068 init_object(s, object, SLUB_RED_ACTIVE);
1069 return 1;
1070
1071bad:
1072 if (PageSlab(page)) {
1073 /*
1074 * If this is a slab page then lets do the best we can
1075 * to avoid issues in the future. Marking all objects
1076 * as used avoids touching the remaining objects.
1077 */
1078 slab_fix(s, "Marking all objects used");
1079 page->inuse = page->objects;
1080 page->freelist = NULL;
1081 }
1082 return 0;
1083}
1084
1085static noinline int free_debug_processing(struct kmem_cache *s,
1086 struct page *page, void *object, unsigned long addr)
1087{
1088 unsigned long flags;
1089 int rc = 0;
1090
1091 local_irq_save(flags);
1092 slab_lock(page);
1093
1094 if (!check_slab(s, page))
1095 goto fail;
1096
1097 if (!check_valid_pointer(s, page, object)) {
1098 slab_err(s, page, "Invalid object pointer 0x%p", object);
1099 goto fail;
1100 }
1101
1102 if (on_freelist(s, page, object)) {
1103 object_err(s, page, object, "Object already free");
1104 goto fail;
1105 }
1106
1107 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1108 goto out;
1109
1110 if (unlikely(s != page->slab)) {
1111 if (!PageSlab(page)) {
1112 slab_err(s, page, "Attempt to free object(0x%p) "
1113 "outside of slab", object);
1114 } else if (!page->slab) {
1115 printk(KERN_ERR
1116 "SLUB <none>: no slab for object 0x%p.\n",
1117 object);
1118 dump_stack();
1119 } else
1120 object_err(s, page, object,
1121 "page slab pointer corrupt.");
1122 goto fail;
1123 }
1124
1125 if (s->flags & SLAB_STORE_USER)
1126 set_track(s, object, TRACK_FREE, addr);
1127 trace(s, page, object, 0);
1128 init_object(s, object, SLUB_RED_INACTIVE);
1129 rc = 1;
1130out:
1131 slab_unlock(page);
1132 local_irq_restore(flags);
1133 return rc;
1134
1135fail:
1136 slab_fix(s, "Object at 0x%p not freed", object);
1137 goto out;
1138}
1139
1140static int __init setup_slub_debug(char *str)
1141{
1142 slub_debug = DEBUG_DEFAULT_FLAGS;
1143 if (*str++ != '=' || !*str)
1144 /*
1145 * No options specified. Switch on full debugging.
1146 */
1147 goto out;
1148
1149 if (*str == ',')
1150 /*
1151 * No options but restriction on slabs. This means full
1152 * debugging for slabs matching a pattern.
1153 */
1154 goto check_slabs;
1155
1156 if (tolower(*str) == 'o') {
1157 /*
1158 * Avoid enabling debugging on caches if its minimum order
1159 * would increase as a result.
1160 */
1161 disable_higher_order_debug = 1;
1162 goto out;
1163 }
1164
1165 slub_debug = 0;
1166 if (*str == '-')
1167 /*
1168 * Switch off all debugging measures.
1169 */
1170 goto out;
1171
1172 /*
1173 * Determine which debug features should be switched on
1174 */
1175 for (; *str && *str != ','; str++) {
1176 switch (tolower(*str)) {
1177 case 'f':
1178 slub_debug |= SLAB_DEBUG_FREE;
1179 break;
1180 case 'z':
1181 slub_debug |= SLAB_RED_ZONE;
1182 break;
1183 case 'p':
1184 slub_debug |= SLAB_POISON;
1185 break;
1186 case 'u':
1187 slub_debug |= SLAB_STORE_USER;
1188 break;
1189 case 't':
1190 slub_debug |= SLAB_TRACE;
1191 break;
1192 case 'a':
1193 slub_debug |= SLAB_FAILSLAB;
1194 break;
1195 default:
1196 printk(KERN_ERR "slub_debug option '%c' "
1197 "unknown. skipped\n", *str);
1198 }
1199 }
1200
1201check_slabs:
1202 if (*str == ',')
1203 slub_debug_slabs = str + 1;
1204out:
1205 return 1;
1206}
1207
1208__setup("slub_debug", setup_slub_debug);
1209
1210static unsigned long kmem_cache_flags(unsigned long objsize,
1211 unsigned long flags, const char *name,
1212 void (*ctor)(void *))
1213{
1214 /*
1215 * Enable debugging if selected on the kernel commandline.
1216 */
1217 if (slub_debug && (!slub_debug_slabs ||
1218 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1219 flags |= slub_debug;
1220
1221 return flags;
1222}
1223#else
1224static inline void setup_object_debug(struct kmem_cache *s,
1225 struct page *page, void *object) {}
1226
1227static inline int alloc_debug_processing(struct kmem_cache *s,
1228 struct page *page, void *object, unsigned long addr) { return 0; }
1229
1230static inline int free_debug_processing(struct kmem_cache *s,
1231 struct page *page, void *object, unsigned long addr) { return 0; }
1232
1233static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1234 { return 1; }
1235static inline int check_object(struct kmem_cache *s, struct page *page,
1236 void *object, u8 val) { return 1; }
1237static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1238 struct page *page) {}
1239static inline void remove_full(struct kmem_cache *s, struct page *page) {}
1240static inline unsigned long kmem_cache_flags(unsigned long objsize,
1241 unsigned long flags, const char *name,
1242 void (*ctor)(void *))
1243{
1244 return flags;
1245}
1246#define slub_debug 0
1247
1248#define disable_higher_order_debug 0
1249
1250static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1251 { return 0; }
1252static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1253 { return 0; }
1254static inline void inc_slabs_node(struct kmem_cache *s, int node,
1255 int objects) {}
1256static inline void dec_slabs_node(struct kmem_cache *s, int node,
1257 int objects) {}
1258
1259static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1260 { return 0; }
1261
1262static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1263 void *object) {}
1264
1265static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1266
1267#endif /* CONFIG_SLUB_DEBUG */
1268
1269/*
1270 * Slab allocation and freeing
1271 */
1272static inline struct page *alloc_slab_page(gfp_t flags, int node,
1273 struct kmem_cache_order_objects oo)
1274{
1275 int order = oo_order(oo);
1276
1277 flags |= __GFP_NOTRACK;
1278
1279 if (node == NUMA_NO_NODE)
1280 return alloc_pages(flags, order);
1281 else
1282 return alloc_pages_exact_node(node, flags, order);
1283}
1284
1285static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1286{
1287 struct page *page;
1288 struct kmem_cache_order_objects oo = s->oo;
1289 gfp_t alloc_gfp;
1290
1291 flags &= gfp_allowed_mask;
1292
1293 if (flags & __GFP_WAIT)
1294 local_irq_enable();
1295
1296 flags |= s->allocflags;
1297
1298 /*
1299 * Let the initial higher-order allocation fail under memory pressure
1300 * so we fall-back to the minimum order allocation.
1301 */
1302 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1303
1304 page = alloc_slab_page(alloc_gfp, node, oo);
1305 if (unlikely(!page)) {
1306 oo = s->min;
1307 /*
1308 * Allocation may have failed due to fragmentation.
1309 * Try a lower order alloc if possible
1310 */
1311 page = alloc_slab_page(flags, node, oo);
1312
1313 if (page)
1314 stat(s, ORDER_FALLBACK);
1315 }
1316
1317 if (flags & __GFP_WAIT)
1318 local_irq_disable();
1319
1320 if (!page)
1321 return NULL;
1322
1323 if (kmemcheck_enabled
1324 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1325 int pages = 1 << oo_order(oo);
1326
1327 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1328
1329 /*
1330 * Objects from caches that have a constructor don't get
1331 * cleared when they're allocated, so we need to do it here.
1332 */
1333 if (s->ctor)
1334 kmemcheck_mark_uninitialized_pages(page, pages);
1335 else
1336 kmemcheck_mark_unallocated_pages(page, pages);
1337 }
1338
1339 page->objects = oo_objects(oo);
1340 mod_zone_page_state(page_zone(page),
1341 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1342 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1343 1 << oo_order(oo));
1344
1345 return page;
1346}
1347
1348static void setup_object(struct kmem_cache *s, struct page *page,
1349 void *object)
1350{
1351 setup_object_debug(s, page, object);
1352 if (unlikely(s->ctor))
1353 s->ctor(object);
1354}
1355
1356static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1357{
1358 struct page *page;
1359 void *start;
1360 void *last;
1361 void *p;
1362
1363 BUG_ON(flags & GFP_SLAB_BUG_MASK);
1364
1365 page = allocate_slab(s,
1366 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1367 if (!page)
1368 goto out;
1369
1370 inc_slabs_node(s, page_to_nid(page), page->objects);
1371 page->slab = s;
1372 __SetPageSlab(page);
1373
1374 start = page_address(page);
1375
1376 if (unlikely(s->flags & SLAB_POISON))
1377 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1378
1379 last = start;
1380 for_each_object(p, s, start, page->objects) {
1381 setup_object(s, page, last);
1382 set_freepointer(s, last, p);
1383 last = p;
1384 }
1385 setup_object(s, page, last);
1386 set_freepointer(s, last, NULL);
1387
1388 page->freelist = start;
1389 page->inuse = page->objects;
1390 page->frozen = 1;
1391out:
1392 return page;
1393}
1394
1395static void __free_slab(struct kmem_cache *s, struct page *page)
1396{
1397 int order = compound_order(page);
1398 int pages = 1 << order;
1399
1400 if (kmem_cache_debug(s)) {
1401 void *p;
1402
1403 slab_pad_check(s, page);
1404 for_each_object(p, s, page_address(page),
1405 page->objects)
1406 check_object(s, page, p, SLUB_RED_INACTIVE);
1407 }
1408
1409 kmemcheck_free_shadow(page, compound_order(page));
1410
1411 mod_zone_page_state(page_zone(page),
1412 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1413 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1414 -pages);
1415
1416 __ClearPageSlab(page);
1417 reset_page_mapcount(page);
1418 if (current->reclaim_state)
1419 current->reclaim_state->reclaimed_slab += pages;
1420 __free_pages(page, order);
1421}
1422
1423#define need_reserve_slab_rcu \
1424 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1425
1426static void rcu_free_slab(struct rcu_head *h)
1427{
1428 struct page *page;
1429
1430 if (need_reserve_slab_rcu)
1431 page = virt_to_head_page(h);
1432 else
1433 page = container_of((struct list_head *)h, struct page, lru);
1434
1435 __free_slab(page->slab, page);
1436}
1437
1438static void free_slab(struct kmem_cache *s, struct page *page)
1439{
1440 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1441 struct rcu_head *head;
1442
1443 if (need_reserve_slab_rcu) {
1444 int order = compound_order(page);
1445 int offset = (PAGE_SIZE << order) - s->reserved;
1446
1447 VM_BUG_ON(s->reserved != sizeof(*head));
1448 head = page_address(page) + offset;
1449 } else {
1450 /*
1451 * RCU free overloads the RCU head over the LRU
1452 */
1453 head = (void *)&page->lru;
1454 }
1455
1456 call_rcu(head, rcu_free_slab);
1457 } else
1458 __free_slab(s, page);
1459}
1460
1461static void discard_slab(struct kmem_cache *s, struct page *page)
1462{
1463 dec_slabs_node(s, page_to_nid(page), page->objects);
1464 free_slab(s, page);
1465}
1466
1467/*
1468 * Management of partially allocated slabs.
1469 *
1470 * list_lock must be held.
1471 */
1472static inline void add_partial(struct kmem_cache_node *n,
1473 struct page *page, int tail)
1474{
1475 n->nr_partial++;
1476 if (tail == DEACTIVATE_TO_TAIL)
1477 list_add_tail(&page->lru, &n->partial);
1478 else
1479 list_add(&page->lru, &n->partial);
1480}
1481
1482/*
1483 * list_lock must be held.
1484 */
1485static inline void remove_partial(struct kmem_cache_node *n,
1486 struct page *page)
1487{
1488 list_del(&page->lru);
1489 n->nr_partial--;
1490}
1491
1492/*
1493 * Lock slab, remove from the partial list and put the object into the
1494 * per cpu freelist.
1495 *
1496 * Returns a list of objects or NULL if it fails.
1497 *
1498 * Must hold list_lock.
1499 */
1500static inline void *acquire_slab(struct kmem_cache *s,
1501 struct kmem_cache_node *n, struct page *page,
1502 int mode)
1503{
1504 void *freelist;
1505 unsigned long counters;
1506 struct page new;
1507
1508 /*
1509 * Zap the freelist and set the frozen bit.
1510 * The old freelist is the list of objects for the
1511 * per cpu allocation list.
1512 */
1513 do {
1514 freelist = page->freelist;
1515 counters = page->counters;
1516 new.counters = counters;
1517 if (mode) {
1518 new.inuse = page->objects;
1519 new.freelist = NULL;
1520 } else {
1521 new.freelist = freelist;
1522 }
1523
1524 VM_BUG_ON(new.frozen);
1525 new.frozen = 1;
1526
1527 } while (!__cmpxchg_double_slab(s, page,
1528 freelist, counters,
1529 new.freelist, new.counters,
1530 "lock and freeze"));
1531
1532 remove_partial(n, page);
1533 return freelist;
1534}
1535
1536static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1537
1538/*
1539 * Try to allocate a partial slab from a specific node.
1540 */
1541static void *get_partial_node(struct kmem_cache *s,
1542 struct kmem_cache_node *n, struct kmem_cache_cpu *c)
1543{
1544 struct page *page, *page2;
1545 void *object = NULL;
1546
1547 /*
1548 * Racy check. If we mistakenly see no partial slabs then we
1549 * just allocate an empty slab. If we mistakenly try to get a
1550 * partial slab and there is none available then get_partials()
1551 * will return NULL.
1552 */
1553 if (!n || !n->nr_partial)
1554 return NULL;
1555
1556 spin_lock(&n->list_lock);
1557 list_for_each_entry_safe(page, page2, &n->partial, lru) {
1558 void *t = acquire_slab(s, n, page, object == NULL);
1559 int available;
1560
1561 if (!t)
1562 break;
1563
1564 if (!object) {
1565 c->page = page;
1566 c->node = page_to_nid(page);
1567 stat(s, ALLOC_FROM_PARTIAL);
1568 object = t;
1569 available = page->objects - page->inuse;
1570 } else {
1571 available = put_cpu_partial(s, page, 0);
1572 stat(s, CPU_PARTIAL_NODE);
1573 }
1574 if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1575 break;
1576
1577 }
1578 spin_unlock(&n->list_lock);
1579 return object;
1580}
1581
1582/*
1583 * Get a page from somewhere. Search in increasing NUMA distances.
1584 */
1585static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1586 struct kmem_cache_cpu *c)
1587{
1588#ifdef CONFIG_NUMA
1589 struct zonelist *zonelist;
1590 struct zoneref *z;
1591 struct zone *zone;
1592 enum zone_type high_zoneidx = gfp_zone(flags);
1593 void *object;
1594 unsigned int cpuset_mems_cookie;
1595
1596 /*
1597 * The defrag ratio allows a configuration of the tradeoffs between
1598 * inter node defragmentation and node local allocations. A lower
1599 * defrag_ratio increases the tendency to do local allocations
1600 * instead of attempting to obtain partial slabs from other nodes.
1601 *
1602 * If the defrag_ratio is set to 0 then kmalloc() always
1603 * returns node local objects. If the ratio is higher then kmalloc()
1604 * may return off node objects because partial slabs are obtained
1605 * from other nodes and filled up.
1606 *
1607 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1608 * defrag_ratio = 1000) then every (well almost) allocation will
1609 * first attempt to defrag slab caches on other nodes. This means
1610 * scanning over all nodes to look for partial slabs which may be
1611 * expensive if we do it every time we are trying to find a slab
1612 * with available objects.
1613 */
1614 if (!s->remote_node_defrag_ratio ||
1615 get_cycles() % 1024 > s->remote_node_defrag_ratio)
1616 return NULL;
1617
1618 do {
1619 cpuset_mems_cookie = get_mems_allowed();
1620 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1621 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1622 struct kmem_cache_node *n;
1623
1624 n = get_node(s, zone_to_nid(zone));
1625
1626 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1627 n->nr_partial > s->min_partial) {
1628 object = get_partial_node(s, n, c);
1629 if (object) {
1630 /*
1631 * Return the object even if
1632 * put_mems_allowed indicated that
1633 * the cpuset mems_allowed was
1634 * updated in parallel. It's a
1635 * harmless race between the alloc
1636 * and the cpuset update.
1637 */
1638 put_mems_allowed(cpuset_mems_cookie);
1639 return object;
1640 }
1641 }
1642 }
1643 } while (!put_mems_allowed(cpuset_mems_cookie));
1644#endif
1645 return NULL;
1646}
1647
1648/*
1649 * Get a partial page, lock it and return it.
1650 */
1651static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1652 struct kmem_cache_cpu *c)
1653{
1654 void *object;
1655 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1656
1657 object = get_partial_node(s, get_node(s, searchnode), c);
1658 if (object || node != NUMA_NO_NODE)
1659 return object;
1660
1661 return get_any_partial(s, flags, c);
1662}
1663
1664#ifdef CONFIG_PREEMPT
1665/*
1666 * Calculate the next globally unique transaction for disambiguiation
1667 * during cmpxchg. The transactions start with the cpu number and are then
1668 * incremented by CONFIG_NR_CPUS.
1669 */
1670#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1671#else
1672/*
1673 * No preemption supported therefore also no need to check for
1674 * different cpus.
1675 */
1676#define TID_STEP 1
1677#endif
1678
1679static inline unsigned long next_tid(unsigned long tid)
1680{
1681 return tid + TID_STEP;
1682}
1683
1684static inline unsigned int tid_to_cpu(unsigned long tid)
1685{
1686 return tid % TID_STEP;
1687}
1688
1689static inline unsigned long tid_to_event(unsigned long tid)
1690{
1691 return tid / TID_STEP;
1692}
1693
1694static inline unsigned int init_tid(int cpu)
1695{
1696 return cpu;
1697}
1698
1699static inline void note_cmpxchg_failure(const char *n,
1700 const struct kmem_cache *s, unsigned long tid)
1701{
1702#ifdef SLUB_DEBUG_CMPXCHG
1703 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1704
1705 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1706
1707#ifdef CONFIG_PREEMPT
1708 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1709 printk("due to cpu change %d -> %d\n",
1710 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1711 else
1712#endif
1713 if (tid_to_event(tid) != tid_to_event(actual_tid))
1714 printk("due to cpu running other code. Event %ld->%ld\n",
1715 tid_to_event(tid), tid_to_event(actual_tid));
1716 else
1717 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1718 actual_tid, tid, next_tid(tid));
1719#endif
1720 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1721}
1722
1723void init_kmem_cache_cpus(struct kmem_cache *s)
1724{
1725 int cpu;
1726
1727 for_each_possible_cpu(cpu)
1728 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1729}
1730
1731/*
1732 * Remove the cpu slab
1733 */
1734static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1735{
1736 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1737 struct page *page = c->page;
1738 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1739 int lock = 0;
1740 enum slab_modes l = M_NONE, m = M_NONE;
1741 void *freelist;
1742 void *nextfree;
1743 int tail = DEACTIVATE_TO_HEAD;
1744 struct page new;
1745 struct page old;
1746
1747 if (page->freelist) {
1748 stat(s, DEACTIVATE_REMOTE_FREES);
1749 tail = DEACTIVATE_TO_TAIL;
1750 }
1751
1752 c->tid = next_tid(c->tid);
1753 c->page = NULL;
1754 freelist = c->freelist;
1755 c->freelist = NULL;
1756
1757 /*
1758 * Stage one: Free all available per cpu objects back
1759 * to the page freelist while it is still frozen. Leave the
1760 * last one.
1761 *
1762 * There is no need to take the list->lock because the page
1763 * is still frozen.
1764 */
1765 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1766 void *prior;
1767 unsigned long counters;
1768
1769 do {
1770 prior = page->freelist;
1771 counters = page->counters;
1772 set_freepointer(s, freelist, prior);
1773 new.counters = counters;
1774 new.inuse--;
1775 VM_BUG_ON(!new.frozen);
1776
1777 } while (!__cmpxchg_double_slab(s, page,
1778 prior, counters,
1779 freelist, new.counters,
1780 "drain percpu freelist"));
1781
1782 freelist = nextfree;
1783 }
1784
1785 /*
1786 * Stage two: Ensure that the page is unfrozen while the
1787 * list presence reflects the actual number of objects
1788 * during unfreeze.
1789 *
1790 * We setup the list membership and then perform a cmpxchg
1791 * with the count. If there is a mismatch then the page
1792 * is not unfrozen but the page is on the wrong list.
1793 *
1794 * Then we restart the process which may have to remove
1795 * the page from the list that we just put it on again
1796 * because the number of objects in the slab may have
1797 * changed.
1798 */
1799redo:
1800
1801 old.freelist = page->freelist;
1802 old.counters = page->counters;
1803 VM_BUG_ON(!old.frozen);
1804
1805 /* Determine target state of the slab */
1806 new.counters = old.counters;
1807 if (freelist) {
1808 new.inuse--;
1809 set_freepointer(s, freelist, old.freelist);
1810 new.freelist = freelist;
1811 } else
1812 new.freelist = old.freelist;
1813
1814 new.frozen = 0;
1815
1816 if (!new.inuse && n->nr_partial > s->min_partial)
1817 m = M_FREE;
1818 else if (new.freelist) {
1819 m = M_PARTIAL;
1820 if (!lock) {
1821 lock = 1;
1822 /*
1823 * Taking the spinlock removes the possiblity
1824 * that acquire_slab() will see a slab page that
1825 * is frozen
1826 */
1827 spin_lock(&n->list_lock);
1828 }
1829 } else {
1830 m = M_FULL;
1831 if (kmem_cache_debug(s) && !lock) {
1832 lock = 1;
1833 /*
1834 * This also ensures that the scanning of full
1835 * slabs from diagnostic functions will not see
1836 * any frozen slabs.
1837 */
1838 spin_lock(&n->list_lock);
1839 }
1840 }
1841
1842 if (l != m) {
1843
1844 if (l == M_PARTIAL)
1845
1846 remove_partial(n, page);
1847
1848 else if (l == M_FULL)
1849
1850 remove_full(s, page);
1851
1852 if (m == M_PARTIAL) {
1853
1854 add_partial(n, page, tail);
1855 stat(s, tail);
1856
1857 } else if (m == M_FULL) {
1858
1859 stat(s, DEACTIVATE_FULL);
1860 add_full(s, n, page);
1861
1862 }
1863 }
1864
1865 l = m;
1866 if (!__cmpxchg_double_slab(s, page,
1867 old.freelist, old.counters,
1868 new.freelist, new.counters,
1869 "unfreezing slab"))
1870 goto redo;
1871
1872 if (lock)
1873 spin_unlock(&n->list_lock);
1874
1875 if (m == M_FREE) {
1876 stat(s, DEACTIVATE_EMPTY);
1877 discard_slab(s, page);
1878 stat(s, FREE_SLAB);
1879 }
1880}
1881
1882/* Unfreeze all the cpu partial slabs */
1883static void unfreeze_partials(struct kmem_cache *s)
1884{
1885 struct kmem_cache_node *n = NULL;
1886 struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1887 struct page *page, *discard_page = NULL;
1888
1889 while ((page = c->partial)) {
1890 enum slab_modes { M_PARTIAL, M_FREE };
1891 enum slab_modes l, m;
1892 struct page new;
1893 struct page old;
1894
1895 c->partial = page->next;
1896 l = M_FREE;
1897
1898 do {
1899
1900 old.freelist = page->freelist;
1901 old.counters = page->counters;
1902 VM_BUG_ON(!old.frozen);
1903
1904 new.counters = old.counters;
1905 new.freelist = old.freelist;
1906
1907 new.frozen = 0;
1908
1909 if (!new.inuse && (!n || n->nr_partial > s->min_partial))
1910 m = M_FREE;
1911 else {
1912 struct kmem_cache_node *n2 = get_node(s,
1913 page_to_nid(page));
1914
1915 m = M_PARTIAL;
1916 if (n != n2) {
1917 if (n)
1918 spin_unlock(&n->list_lock);
1919
1920 n = n2;
1921 spin_lock(&n->list_lock);
1922 }
1923 }
1924
1925 if (l != m) {
1926 if (l == M_PARTIAL) {
1927 remove_partial(n, page);
1928 stat(s, FREE_REMOVE_PARTIAL);
1929 } else {
1930 add_partial(n, page,
1931 DEACTIVATE_TO_TAIL);
1932 stat(s, FREE_ADD_PARTIAL);
1933 }
1934
1935 l = m;
1936 }
1937
1938 } while (!cmpxchg_double_slab(s, page,
1939 old.freelist, old.counters,
1940 new.freelist, new.counters,
1941 "unfreezing slab"));
1942
1943 if (m == M_FREE) {
1944 page->next = discard_page;
1945 discard_page = page;
1946 }
1947 }
1948
1949 if (n)
1950 spin_unlock(&n->list_lock);
1951
1952 while (discard_page) {
1953 page = discard_page;
1954 discard_page = discard_page->next;
1955
1956 stat(s, DEACTIVATE_EMPTY);
1957 discard_slab(s, page);
1958 stat(s, FREE_SLAB);
1959 }
1960}
1961
1962/*
1963 * Put a page that was just frozen (in __slab_free) into a partial page
1964 * slot if available. This is done without interrupts disabled and without
1965 * preemption disabled. The cmpxchg is racy and may put the partial page
1966 * onto a random cpus partial slot.
1967 *
1968 * If we did not find a slot then simply move all the partials to the
1969 * per node partial list.
1970 */
1971int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1972{
1973 struct page *oldpage;
1974 int pages;
1975 int pobjects;
1976
1977 do {
1978 pages = 0;
1979 pobjects = 0;
1980 oldpage = this_cpu_read(s->cpu_slab->partial);
1981
1982 if (oldpage) {
1983 pobjects = oldpage->pobjects;
1984 pages = oldpage->pages;
1985 if (drain && pobjects > s->cpu_partial) {
1986 unsigned long flags;
1987 /*
1988 * partial array is full. Move the existing
1989 * set to the per node partial list.
1990 */
1991 local_irq_save(flags);
1992 unfreeze_partials(s);
1993 local_irq_restore(flags);
1994 pobjects = 0;
1995 pages = 0;
1996 stat(s, CPU_PARTIAL_DRAIN);
1997 }
1998 }
1999
2000 pages++;
2001 pobjects += page->objects - page->inuse;
2002
2003 page->pages = pages;
2004 page->pobjects = pobjects;
2005 page->next = oldpage;
2006
2007 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
2008 return pobjects;
2009}
2010
2011static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2012{
2013 stat(s, CPUSLAB_FLUSH);
2014 deactivate_slab(s, c);
2015}
2016
2017/*
2018 * Flush cpu slab.
2019 *
2020 * Called from IPI handler with interrupts disabled.
2021 */
2022static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2023{
2024 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2025
2026 if (likely(c)) {
2027 if (c->page)
2028 flush_slab(s, c);
2029
2030 unfreeze_partials(s);
2031 }
2032}
2033
2034static void flush_cpu_slab(void *d)
2035{
2036 struct kmem_cache *s = d;
2037
2038 __flush_cpu_slab(s, smp_processor_id());
2039}
2040
2041static bool has_cpu_slab(int cpu, void *info)
2042{
2043 struct kmem_cache *s = info;
2044 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2045
2046 return c->page || c->partial;
2047}
2048
2049static void flush_all(struct kmem_cache *s)
2050{
2051 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2052}
2053
2054/*
2055 * Check if the objects in a per cpu structure fit numa
2056 * locality expectations.
2057 */
2058static inline int node_match(struct kmem_cache_cpu *c, int node)
2059{
2060#ifdef CONFIG_NUMA
2061 if (node != NUMA_NO_NODE && c->node != node)
2062 return 0;
2063#endif
2064 return 1;
2065}
2066
2067static int count_free(struct page *page)
2068{
2069 return page->objects - page->inuse;
2070}
2071
2072static unsigned long count_partial(struct kmem_cache_node *n,
2073 int (*get_count)(struct page *))
2074{
2075 unsigned long flags;
2076 unsigned long x = 0;
2077 struct page *page;
2078
2079 spin_lock_irqsave(&n->list_lock, flags);
2080 list_for_each_entry(page, &n->partial, lru)
2081 x += get_count(page);
2082 spin_unlock_irqrestore(&n->list_lock, flags);
2083 return x;
2084}
2085
2086static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2087{
2088#ifdef CONFIG_SLUB_DEBUG
2089 return atomic_long_read(&n->total_objects);
2090#else
2091 return 0;
2092#endif
2093}
2094
2095static noinline void
2096slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2097{
2098 int node;
2099
2100 printk(KERN_WARNING
2101 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2102 nid, gfpflags);
2103 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
2104 "default order: %d, min order: %d\n", s->name, s->objsize,
2105 s->size, oo_order(s->oo), oo_order(s->min));
2106
2107 if (oo_order(s->min) > get_order(s->objsize))
2108 printk(KERN_WARNING " %s debugging increased min order, use "
2109 "slub_debug=O to disable.\n", s->name);
2110
2111 for_each_online_node(node) {
2112 struct kmem_cache_node *n = get_node(s, node);
2113 unsigned long nr_slabs;
2114 unsigned long nr_objs;
2115 unsigned long nr_free;
2116
2117 if (!n)
2118 continue;
2119
2120 nr_free = count_partial(n, count_free);
2121 nr_slabs = node_nr_slabs(n);
2122 nr_objs = node_nr_objs(n);
2123
2124 printk(KERN_WARNING
2125 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
2126 node, nr_slabs, nr_objs, nr_free);
2127 }
2128}
2129
2130static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2131 int node, struct kmem_cache_cpu **pc)
2132{
2133 void *object;
2134 struct kmem_cache_cpu *c;
2135 struct page *page = new_slab(s, flags, node);
2136
2137 if (page) {
2138 c = __this_cpu_ptr(s->cpu_slab);
2139 if (c->page)
2140 flush_slab(s, c);
2141
2142 /*
2143 * No other reference to the page yet so we can
2144 * muck around with it freely without cmpxchg
2145 */
2146 object = page->freelist;
2147 page->freelist = NULL;
2148
2149 stat(s, ALLOC_SLAB);
2150 c->node = page_to_nid(page);
2151 c->page = page;
2152 *pc = c;
2153 } else
2154 object = NULL;
2155
2156 return object;
2157}
2158
2159/*
2160 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2161 * or deactivate the page.
2162 *
2163 * The page is still frozen if the return value is not NULL.
2164 *
2165 * If this function returns NULL then the page has been unfrozen.
2166 */
2167static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2168{
2169 struct page new;
2170 unsigned long counters;
2171 void *freelist;
2172
2173 do {
2174 freelist = page->freelist;
2175 counters = page->counters;
2176 new.counters = counters;
2177 VM_BUG_ON(!new.frozen);
2178
2179 new.inuse = page->objects;
2180 new.frozen = freelist != NULL;
2181
2182 } while (!cmpxchg_double_slab(s, page,
2183 freelist, counters,
2184 NULL, new.counters,
2185 "get_freelist"));
2186
2187 return freelist;
2188}
2189
2190/*
2191 * Slow path. The lockless freelist is empty or we need to perform
2192 * debugging duties.
2193 *
2194 * Processing is still very fast if new objects have been freed to the
2195 * regular freelist. In that case we simply take over the regular freelist
2196 * as the lockless freelist and zap the regular freelist.
2197 *
2198 * If that is not working then we fall back to the partial lists. We take the
2199 * first element of the freelist as the object to allocate now and move the
2200 * rest of the freelist to the lockless freelist.
2201 *
2202 * And if we were unable to get a new slab from the partial slab lists then
2203 * we need to allocate a new slab. This is the slowest path since it involves
2204 * a call to the page allocator and the setup of a new slab.
2205 */
2206static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2207 unsigned long addr, struct kmem_cache_cpu *c)
2208{
2209 void **object;
2210 unsigned long flags;
2211
2212 local_irq_save(flags);
2213#ifdef CONFIG_PREEMPT
2214 /*
2215 * We may have been preempted and rescheduled on a different
2216 * cpu before disabling interrupts. Need to reload cpu area
2217 * pointer.
2218 */
2219 c = this_cpu_ptr(s->cpu_slab);
2220#endif
2221
2222 if (!c->page)
2223 goto new_slab;
2224redo:
2225 if (unlikely(!node_match(c, node))) {
2226 stat(s, ALLOC_NODE_MISMATCH);
2227 deactivate_slab(s, c);
2228 goto new_slab;
2229 }
2230
2231 /* must check again c->freelist in case of cpu migration or IRQ */
2232 object = c->freelist;
2233 if (object)
2234 goto load_freelist;
2235
2236 stat(s, ALLOC_SLOWPATH);
2237
2238 object = get_freelist(s, c->page);
2239
2240 if (!object) {
2241 c->page = NULL;
2242 stat(s, DEACTIVATE_BYPASS);
2243 goto new_slab;
2244 }
2245
2246 stat(s, ALLOC_REFILL);
2247
2248load_freelist:
2249 c->freelist = get_freepointer(s, object);
2250 c->tid = next_tid(c->tid);
2251 local_irq_restore(flags);
2252 return object;
2253
2254new_slab:
2255
2256 if (c->partial) {
2257 c->page = c->partial;
2258 c->partial = c->page->next;
2259 c->node = page_to_nid(c->page);
2260 stat(s, CPU_PARTIAL_ALLOC);
2261 c->freelist = NULL;
2262 goto redo;
2263 }
2264
2265 /* Then do expensive stuff like retrieving pages from the partial lists */
2266 object = get_partial(s, gfpflags, node, c);
2267
2268 if (unlikely(!object)) {
2269
2270 object = new_slab_objects(s, gfpflags, node, &c);
2271
2272 if (unlikely(!object)) {
2273 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2274 slab_out_of_memory(s, gfpflags, node);
2275
2276 local_irq_restore(flags);
2277 return NULL;
2278 }
2279 }
2280
2281 if (likely(!kmem_cache_debug(s)))
2282 goto load_freelist;
2283
2284 /* Only entered in the debug case */
2285 if (!alloc_debug_processing(s, c->page, object, addr))
2286 goto new_slab; /* Slab failed checks. Next slab needed */
2287
2288 c->freelist = get_freepointer(s, object);
2289 deactivate_slab(s, c);
2290 c->node = NUMA_NO_NODE;
2291 local_irq_restore(flags);
2292 return object;
2293}
2294
2295/*
2296 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2297 * have the fastpath folded into their functions. So no function call
2298 * overhead for requests that can be satisfied on the fastpath.
2299 *
2300 * The fastpath works by first checking if the lockless freelist can be used.
2301 * If not then __slab_alloc is called for slow processing.
2302 *
2303 * Otherwise we can simply pick the next object from the lockless free list.
2304 */
2305static __always_inline void *slab_alloc(struct kmem_cache *s,
2306 gfp_t gfpflags, int node, unsigned long addr)
2307{
2308 void **object;
2309 struct kmem_cache_cpu *c;
2310 unsigned long tid;
2311
2312 if (slab_pre_alloc_hook(s, gfpflags))
2313 return NULL;
2314
2315redo:
2316
2317 /*
2318 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2319 * enabled. We may switch back and forth between cpus while
2320 * reading from one cpu area. That does not matter as long
2321 * as we end up on the original cpu again when doing the cmpxchg.
2322 */
2323 c = __this_cpu_ptr(s->cpu_slab);
2324
2325 /*
2326 * The transaction ids are globally unique per cpu and per operation on
2327 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2328 * occurs on the right processor and that there was no operation on the
2329 * linked list in between.
2330 */
2331 tid = c->tid;
2332 barrier();
2333
2334 object = c->freelist;
2335 if (unlikely(!object || !node_match(c, node)))
2336
2337 object = __slab_alloc(s, gfpflags, node, addr, c);
2338
2339 else {
2340 void *next_object = get_freepointer_safe(s, object);
2341
2342 /*
2343 * The cmpxchg will only match if there was no additional
2344 * operation and if we are on the right processor.
2345 *
2346 * The cmpxchg does the following atomically (without lock semantics!)
2347 * 1. Relocate first pointer to the current per cpu area.
2348 * 2. Verify that tid and freelist have not been changed
2349 * 3. If they were not changed replace tid and freelist
2350 *
2351 * Since this is without lock semantics the protection is only against
2352 * code executing on this cpu *not* from access by other cpus.
2353 */
2354 if (unlikely(!this_cpu_cmpxchg_double(
2355 s->cpu_slab->freelist, s->cpu_slab->tid,
2356 object, tid,
2357 next_object, next_tid(tid)))) {
2358
2359 note_cmpxchg_failure("slab_alloc", s, tid);
2360 goto redo;
2361 }
2362 prefetch_freepointer(s, next_object);
2363 stat(s, ALLOC_FASTPATH);
2364 }
2365
2366 if (unlikely(gfpflags & __GFP_ZERO) && object)
2367 memset(object, 0, s->objsize);
2368
2369 slab_post_alloc_hook(s, gfpflags, object);
2370
2371 return object;
2372}
2373
2374void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2375{
2376 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2377
2378 trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
2379
2380 return ret;
2381}
2382EXPORT_SYMBOL(kmem_cache_alloc);
2383
2384#ifdef CONFIG_TRACING
2385void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2386{
2387 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
2388 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2389 return ret;
2390}
2391EXPORT_SYMBOL(kmem_cache_alloc_trace);
2392
2393void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2394{
2395 void *ret = kmalloc_order(size, flags, order);
2396 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2397 return ret;
2398}
2399EXPORT_SYMBOL(kmalloc_order_trace);
2400#endif
2401
2402#ifdef CONFIG_NUMA
2403void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2404{
2405 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2406
2407 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2408 s->objsize, s->size, gfpflags, node);
2409
2410 return ret;
2411}
2412EXPORT_SYMBOL(kmem_cache_alloc_node);
2413
2414#ifdef CONFIG_TRACING
2415void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2416 gfp_t gfpflags,
2417 int node, size_t size)
2418{
2419 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
2420
2421 trace_kmalloc_node(_RET_IP_, ret,
2422 size, s->size, gfpflags, node);
2423 return ret;
2424}
2425EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2426#endif
2427#endif
2428
2429/*
2430 * Slow patch handling. This may still be called frequently since objects
2431 * have a longer lifetime than the cpu slabs in most processing loads.
2432 *
2433 * So we still attempt to reduce cache line usage. Just take the slab
2434 * lock and free the item. If there is no additional partial page
2435 * handling required then we can return immediately.
2436 */
2437static void __slab_free(struct kmem_cache *s, struct page *page,
2438 void *x, unsigned long addr)
2439{
2440 void *prior;
2441 void **object = (void *)x;
2442 int was_frozen;
2443 int inuse;
2444 struct page new;
2445 unsigned long counters;
2446 struct kmem_cache_node *n = NULL;
2447 unsigned long uninitialized_var(flags);
2448
2449 stat(s, FREE_SLOWPATH);
2450
2451 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
2452 return;
2453
2454 do {
2455 prior = page->freelist;
2456 counters = page->counters;
2457 set_freepointer(s, object, prior);
2458 new.counters = counters;
2459 was_frozen = new.frozen;
2460 new.inuse--;
2461 if ((!new.inuse || !prior) && !was_frozen && !n) {
2462
2463 if (!kmem_cache_debug(s) && !prior)
2464
2465 /*
2466 * Slab was on no list before and will be partially empty
2467 * We can defer the list move and instead freeze it.
2468 */
2469 new.frozen = 1;
2470
2471 else { /* Needs to be taken off a list */
2472
2473 n = get_node(s, page_to_nid(page));
2474 /*
2475 * Speculatively acquire the list_lock.
2476 * If the cmpxchg does not succeed then we may
2477 * drop the list_lock without any processing.
2478 *
2479 * Otherwise the list_lock will synchronize with
2480 * other processors updating the list of slabs.
2481 */
2482 spin_lock_irqsave(&n->list_lock, flags);
2483
2484 }
2485 }
2486 inuse = new.inuse;
2487
2488 } while (!cmpxchg_double_slab(s, page,
2489 prior, counters,
2490 object, new.counters,
2491 "__slab_free"));
2492
2493 if (likely(!n)) {
2494
2495 /*
2496 * If we just froze the page then put it onto the
2497 * per cpu partial list.
2498 */
2499 if (new.frozen && !was_frozen) {
2500 put_cpu_partial(s, page, 1);
2501 stat(s, CPU_PARTIAL_FREE);
2502 }
2503 /*
2504 * The list lock was not taken therefore no list
2505 * activity can be necessary.
2506 */
2507 if (was_frozen)
2508 stat(s, FREE_FROZEN);
2509 return;
2510 }
2511
2512 /*
2513 * was_frozen may have been set after we acquired the list_lock in
2514 * an earlier loop. So we need to check it here again.
2515 */
2516 if (was_frozen)
2517 stat(s, FREE_FROZEN);
2518 else {
2519 if (unlikely(!inuse && n->nr_partial > s->min_partial))
2520 goto slab_empty;
2521
2522 /*
2523 * Objects left in the slab. If it was not on the partial list before
2524 * then add it.
2525 */
2526 if (unlikely(!prior)) {
2527 remove_full(s, page);
2528 add_partial(n, page, DEACTIVATE_TO_TAIL);
2529 stat(s, FREE_ADD_PARTIAL);
2530 }
2531 }
2532 spin_unlock_irqrestore(&n->list_lock, flags);
2533 return;
2534
2535slab_empty:
2536 if (prior) {
2537 /*
2538 * Slab on the partial list.
2539 */
2540 remove_partial(n, page);
2541 stat(s, FREE_REMOVE_PARTIAL);
2542 } else
2543 /* Slab must be on the full list */
2544 remove_full(s, page);
2545
2546 spin_unlock_irqrestore(&n->list_lock, flags);
2547 stat(s, FREE_SLAB);
2548 discard_slab(s, page);
2549}
2550
2551/*
2552 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2553 * can perform fastpath freeing without additional function calls.
2554 *
2555 * The fastpath is only possible if we are freeing to the current cpu slab
2556 * of this processor. This typically the case if we have just allocated
2557 * the item before.
2558 *
2559 * If fastpath is not possible then fall back to __slab_free where we deal
2560 * with all sorts of special processing.
2561 */
2562static __always_inline void slab_free(struct kmem_cache *s,
2563 struct page *page, void *x, unsigned long addr)
2564{
2565 void **object = (void *)x;
2566 struct kmem_cache_cpu *c;
2567 unsigned long tid;
2568
2569 slab_free_hook(s, x);
2570
2571redo:
2572 /*
2573 * Determine the currently cpus per cpu slab.
2574 * The cpu may change afterward. However that does not matter since
2575 * data is retrieved via this pointer. If we are on the same cpu
2576 * during the cmpxchg then the free will succedd.
2577 */
2578 c = __this_cpu_ptr(s->cpu_slab);
2579
2580 tid = c->tid;
2581 barrier();
2582
2583 if (likely(page == c->page)) {
2584 set_freepointer(s, object, c->freelist);
2585
2586 if (unlikely(!this_cpu_cmpxchg_double(
2587 s->cpu_slab->freelist, s->cpu_slab->tid,
2588 c->freelist, tid,
2589 object, next_tid(tid)))) {
2590
2591 note_cmpxchg_failure("slab_free", s, tid);
2592 goto redo;
2593 }
2594 stat(s, FREE_FASTPATH);
2595 } else
2596 __slab_free(s, page, x, addr);
2597
2598}
2599
2600void kmem_cache_free(struct kmem_cache *s, void *x)
2601{
2602 struct page *page;
2603
2604 page = virt_to_head_page(x);
2605
2606 slab_free(s, page, x, _RET_IP_);
2607
2608 trace_kmem_cache_free(_RET_IP_, x);
2609}
2610EXPORT_SYMBOL(kmem_cache_free);
2611
2612/*
2613 * Object placement in a slab is made very easy because we always start at
2614 * offset 0. If we tune the size of the object to the alignment then we can
2615 * get the required alignment by putting one properly sized object after
2616 * another.
2617 *
2618 * Notice that the allocation order determines the sizes of the per cpu
2619 * caches. Each processor has always one slab available for allocations.
2620 * Increasing the allocation order reduces the number of times that slabs
2621 * must be moved on and off the partial lists and is therefore a factor in
2622 * locking overhead.
2623 */
2624
2625/*
2626 * Mininum / Maximum order of slab pages. This influences locking overhead
2627 * and slab fragmentation. A higher order reduces the number of partial slabs
2628 * and increases the number of allocations possible without having to
2629 * take the list_lock.
2630 */
2631static int slub_min_order;
2632static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2633static int slub_min_objects;
2634
2635/*
2636 * Merge control. If this is set then no merging of slab caches will occur.
2637 * (Could be removed. This was introduced to pacify the merge skeptics.)
2638 */
2639static int slub_nomerge;
2640
2641/*
2642 * Calculate the order of allocation given an slab object size.
2643 *
2644 * The order of allocation has significant impact on performance and other
2645 * system components. Generally order 0 allocations should be preferred since
2646 * order 0 does not cause fragmentation in the page allocator. Larger objects
2647 * be problematic to put into order 0 slabs because there may be too much
2648 * unused space left. We go to a higher order if more than 1/16th of the slab
2649 * would be wasted.
2650 *
2651 * In order to reach satisfactory performance we must ensure that a minimum
2652 * number of objects is in one slab. Otherwise we may generate too much
2653 * activity on the partial lists which requires taking the list_lock. This is
2654 * less a concern for large slabs though which are rarely used.
2655 *
2656 * slub_max_order specifies the order where we begin to stop considering the
2657 * number of objects in a slab as critical. If we reach slub_max_order then
2658 * we try to keep the page order as low as possible. So we accept more waste
2659 * of space in favor of a small page order.
2660 *
2661 * Higher order allocations also allow the placement of more objects in a
2662 * slab and thereby reduce object handling overhead. If the user has
2663 * requested a higher mininum order then we start with that one instead of
2664 * the smallest order which will fit the object.
2665 */
2666static inline int slab_order(int size, int min_objects,
2667 int max_order, int fract_leftover, int reserved)
2668{
2669 int order;
2670 int rem;
2671 int min_order = slub_min_order;
2672
2673 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
2674 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
2675
2676 for (order = max(min_order,
2677 fls(min_objects * size - 1) - PAGE_SHIFT);
2678 order <= max_order; order++) {
2679
2680 unsigned long slab_size = PAGE_SIZE << order;
2681
2682 if (slab_size < min_objects * size + reserved)
2683 continue;
2684
2685 rem = (slab_size - reserved) % size;
2686
2687 if (rem <= slab_size / fract_leftover)
2688 break;
2689
2690 }
2691
2692 return order;
2693}
2694
2695static inline int calculate_order(int size, int reserved)
2696{
2697 int order;
2698 int min_objects;
2699 int fraction;
2700 int max_objects;
2701
2702 /*
2703 * Attempt to find best configuration for a slab. This
2704 * works by first attempting to generate a layout with
2705 * the best configuration and backing off gradually.
2706 *
2707 * First we reduce the acceptable waste in a slab. Then
2708 * we reduce the minimum objects required in a slab.
2709 */
2710 min_objects = slub_min_objects;
2711 if (!min_objects)
2712 min_objects = 4 * (fls(nr_cpu_ids) + 1);
2713 max_objects = order_objects(slub_max_order, size, reserved);
2714 min_objects = min(min_objects, max_objects);
2715
2716 while (min_objects > 1) {
2717 fraction = 16;
2718 while (fraction >= 4) {
2719 order = slab_order(size, min_objects,
2720 slub_max_order, fraction, reserved);
2721 if (order <= slub_max_order)
2722 return order;
2723 fraction /= 2;
2724 }
2725 min_objects--;
2726 }
2727
2728 /*
2729 * We were unable to place multiple objects in a slab. Now
2730 * lets see if we can place a single object there.
2731 */
2732 order = slab_order(size, 1, slub_max_order, 1, reserved);
2733 if (order <= slub_max_order)
2734 return order;
2735
2736 /*
2737 * Doh this slab cannot be placed using slub_max_order.
2738 */
2739 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2740 if (order < MAX_ORDER)
2741 return order;
2742 return -ENOSYS;
2743}
2744
2745/*
2746 * Figure out what the alignment of the objects will be.
2747 */
2748static unsigned long calculate_alignment(unsigned long flags,
2749 unsigned long align, unsigned long size)
2750{
2751 /*
2752 * If the user wants hardware cache aligned objects then follow that
2753 * suggestion if the object is sufficiently large.
2754 *
2755 * The hardware cache alignment cannot override the specified
2756 * alignment though. If that is greater then use it.
2757 */
2758 if (flags & SLAB_HWCACHE_ALIGN) {
2759 unsigned long ralign = cache_line_size();
2760 while (size <= ralign / 2)
2761 ralign /= 2;
2762 align = max(align, ralign);
2763 }
2764
2765 if (align < ARCH_SLAB_MINALIGN)
2766 align = ARCH_SLAB_MINALIGN;
2767
2768 return ALIGN(align, sizeof(void *));
2769}
2770
2771static void
2772init_kmem_cache_node(struct kmem_cache_node *n)
2773{
2774 n->nr_partial = 0;
2775 spin_lock_init(&n->list_lock);
2776 INIT_LIST_HEAD(&n->partial);
2777#ifdef CONFIG_SLUB_DEBUG
2778 atomic_long_set(&n->nr_slabs, 0);
2779 atomic_long_set(&n->total_objects, 0);
2780 INIT_LIST_HEAD(&n->full);
2781#endif
2782}
2783
2784static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2785{
2786 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2787 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2788
2789 /*
2790 * Must align to double word boundary for the double cmpxchg
2791 * instructions to work; see __pcpu_double_call_return_bool().
2792 */
2793 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2794 2 * sizeof(void *));
2795
2796 if (!s->cpu_slab)
2797 return 0;
2798
2799 init_kmem_cache_cpus(s);
2800
2801 return 1;
2802}
2803
2804static struct kmem_cache *kmem_cache_node;
2805
2806/*
2807 * No kmalloc_node yet so do it by hand. We know that this is the first
2808 * slab on the node for this slabcache. There are no concurrent accesses
2809 * possible.
2810 *
2811 * Note that this function only works on the kmalloc_node_cache
2812 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2813 * memory on a fresh node that has no slab structures yet.
2814 */
2815static void early_kmem_cache_node_alloc(int node)
2816{
2817 struct page *page;
2818 struct kmem_cache_node *n;
2819
2820 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
2821
2822 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
2823
2824 BUG_ON(!page);
2825 if (page_to_nid(page) != node) {
2826 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2827 "node %d\n", node);
2828 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2829 "in order to be able to continue\n");
2830 }
2831
2832 n = page->freelist;
2833 BUG_ON(!n);
2834 page->freelist = get_freepointer(kmem_cache_node, n);
2835 page->inuse = 1;
2836 page->frozen = 0;
2837 kmem_cache_node->node[node] = n;
2838#ifdef CONFIG_SLUB_DEBUG
2839 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2840 init_tracking(kmem_cache_node, n);
2841#endif
2842 init_kmem_cache_node(n);
2843 inc_slabs_node(kmem_cache_node, node, page->objects);
2844
2845 add_partial(n, page, DEACTIVATE_TO_HEAD);
2846}
2847
2848static void free_kmem_cache_nodes(struct kmem_cache *s)
2849{
2850 int node;
2851
2852 for_each_node_state(node, N_NORMAL_MEMORY) {
2853 struct kmem_cache_node *n = s->node[node];
2854
2855 if (n)
2856 kmem_cache_free(kmem_cache_node, n);
2857
2858 s->node[node] = NULL;
2859 }
2860}
2861
2862static int init_kmem_cache_nodes(struct kmem_cache *s)
2863{
2864 int node;
2865
2866 for_each_node_state(node, N_NORMAL_MEMORY) {
2867 struct kmem_cache_node *n;
2868
2869 if (slab_state == DOWN) {
2870 early_kmem_cache_node_alloc(node);
2871 continue;
2872 }
2873 n = kmem_cache_alloc_node(kmem_cache_node,
2874 GFP_KERNEL, node);
2875
2876 if (!n) {
2877 free_kmem_cache_nodes(s);
2878 return 0;
2879 }
2880
2881 s->node[node] = n;
2882 init_kmem_cache_node(n);
2883 }
2884 return 1;
2885}
2886
2887static void set_min_partial(struct kmem_cache *s, unsigned long min)
2888{
2889 if (min < MIN_PARTIAL)
2890 min = MIN_PARTIAL;
2891 else if (min > MAX_PARTIAL)
2892 min = MAX_PARTIAL;
2893 s->min_partial = min;
2894}
2895
2896/*
2897 * calculate_sizes() determines the order and the distribution of data within
2898 * a slab object.
2899 */
2900static int calculate_sizes(struct kmem_cache *s, int forced_order)
2901{
2902 unsigned long flags = s->flags;
2903 unsigned long size = s->objsize;
2904 unsigned long align = s->align;
2905 int order;
2906
2907 /*
2908 * Round up object size to the next word boundary. We can only
2909 * place the free pointer at word boundaries and this determines
2910 * the possible location of the free pointer.
2911 */
2912 size = ALIGN(size, sizeof(void *));
2913
2914#ifdef CONFIG_SLUB_DEBUG
2915 /*
2916 * Determine if we can poison the object itself. If the user of
2917 * the slab may touch the object after free or before allocation
2918 * then we should never poison the object itself.
2919 */
2920 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2921 !s->ctor)
2922 s->flags |= __OBJECT_POISON;
2923 else
2924 s->flags &= ~__OBJECT_POISON;
2925
2926
2927 /*
2928 * If we are Redzoning then check if there is some space between the
2929 * end of the object and the free pointer. If not then add an
2930 * additional word to have some bytes to store Redzone information.
2931 */
2932 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2933 size += sizeof(void *);
2934#endif
2935
2936 /*
2937 * With that we have determined the number of bytes in actual use
2938 * by the object. This is the potential offset to the free pointer.
2939 */
2940 s->inuse = size;
2941
2942 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2943 s->ctor)) {
2944 /*
2945 * Relocate free pointer after the object if it is not
2946 * permitted to overwrite the first word of the object on
2947 * kmem_cache_free.
2948 *
2949 * This is the case if we do RCU, have a constructor or
2950 * destructor or are poisoning the objects.
2951 */
2952 s->offset = size;
2953 size += sizeof(void *);
2954 }
2955
2956#ifdef CONFIG_SLUB_DEBUG
2957 if (flags & SLAB_STORE_USER)
2958 /*
2959 * Need to store information about allocs and frees after
2960 * the object.
2961 */
2962 size += 2 * sizeof(struct track);
2963
2964 if (flags & SLAB_RED_ZONE)
2965 /*
2966 * Add some empty padding so that we can catch
2967 * overwrites from earlier objects rather than let
2968 * tracking information or the free pointer be
2969 * corrupted if a user writes before the start
2970 * of the object.
2971 */
2972 size += sizeof(void *);
2973#endif
2974
2975 /*
2976 * Determine the alignment based on various parameters that the
2977 * user specified and the dynamic determination of cache line size
2978 * on bootup.
2979 */
2980 align = calculate_alignment(flags, align, s->objsize);
2981 s->align = align;
2982
2983 /*
2984 * SLUB stores one object immediately after another beginning from
2985 * offset 0. In order to align the objects we have to simply size
2986 * each object to conform to the alignment.
2987 */
2988 size = ALIGN(size, align);
2989 s->size = size;
2990 if (forced_order >= 0)
2991 order = forced_order;
2992 else
2993 order = calculate_order(size, s->reserved);
2994
2995 if (order < 0)
2996 return 0;
2997
2998 s->allocflags = 0;
2999 if (order)
3000 s->allocflags |= __GFP_COMP;
3001
3002 if (s->flags & SLAB_CACHE_DMA)
3003 s->allocflags |= SLUB_DMA;
3004
3005 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3006 s->allocflags |= __GFP_RECLAIMABLE;
3007
3008 /*
3009 * Determine the number of objects per slab
3010 */
3011 s->oo = oo_make(order, size, s->reserved);
3012 s->min = oo_make(get_order(size), size, s->reserved);
3013 if (oo_objects(s->oo) > oo_objects(s->max))
3014 s->max = s->oo;
3015
3016 return !!oo_objects(s->oo);
3017
3018}
3019
3020static int kmem_cache_open(struct kmem_cache *s,
3021 const char *name, size_t size,
3022 size_t align, unsigned long flags,
3023 void (*ctor)(void *))
3024{
3025 memset(s, 0, kmem_size);
3026 s->name = name;
3027 s->ctor = ctor;
3028 s->objsize = size;
3029 s->align = align;
3030 s->flags = kmem_cache_flags(size, flags, name, ctor);
3031 s->reserved = 0;
3032
3033 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3034 s->reserved = sizeof(struct rcu_head);
3035
3036 if (!calculate_sizes(s, -1))
3037 goto error;
3038 if (disable_higher_order_debug) {
3039 /*
3040 * Disable debugging flags that store metadata if the min slab
3041 * order increased.
3042 */
3043 if (get_order(s->size) > get_order(s->objsize)) {
3044 s->flags &= ~DEBUG_METADATA_FLAGS;
3045 s->offset = 0;
3046 if (!calculate_sizes(s, -1))
3047 goto error;
3048 }
3049 }
3050
3051#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3052 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3053 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3054 /* Enable fast mode */
3055 s->flags |= __CMPXCHG_DOUBLE;
3056#endif
3057
3058 /*
3059 * The larger the object size is, the more pages we want on the partial
3060 * list to avoid pounding the page allocator excessively.
3061 */
3062 set_min_partial(s, ilog2(s->size) / 2);
3063
3064 /*
3065 * cpu_partial determined the maximum number of objects kept in the
3066 * per cpu partial lists of a processor.
3067 *
3068 * Per cpu partial lists mainly contain slabs that just have one
3069 * object freed. If they are used for allocation then they can be
3070 * filled up again with minimal effort. The slab will never hit the
3071 * per node partial lists and therefore no locking will be required.
3072 *
3073 * This setting also determines
3074 *
3075 * A) The number of objects from per cpu partial slabs dumped to the
3076 * per node list when we reach the limit.
3077 * B) The number of objects in cpu partial slabs to extract from the
3078 * per node list when we run out of per cpu objects. We only fetch 50%
3079 * to keep some capacity around for frees.
3080 */
3081 if (kmem_cache_debug(s))
3082 s->cpu_partial = 0;
3083 else if (s->size >= PAGE_SIZE)
3084 s->cpu_partial = 2;
3085 else if (s->size >= 1024)
3086 s->cpu_partial = 6;
3087 else if (s->size >= 256)
3088 s->cpu_partial = 13;
3089 else
3090 s->cpu_partial = 30;
3091
3092 s->refcount = 1;
3093#ifdef CONFIG_NUMA
3094 s->remote_node_defrag_ratio = 1000;
3095#endif
3096 if (!init_kmem_cache_nodes(s))
3097 goto error;
3098
3099 if (alloc_kmem_cache_cpus(s))
3100 return 1;
3101
3102 free_kmem_cache_nodes(s);
3103error:
3104 if (flags & SLAB_PANIC)
3105 panic("Cannot create slab %s size=%lu realsize=%u "
3106 "order=%u offset=%u flags=%lx\n",
3107 s->name, (unsigned long)size, s->size, oo_order(s->oo),
3108 s->offset, flags);
3109 return 0;
3110}
3111
3112/*
3113 * Determine the size of a slab object
3114 */
3115unsigned int kmem_cache_size(struct kmem_cache *s)
3116{
3117 return s->objsize;
3118}
3119EXPORT_SYMBOL(kmem_cache_size);
3120
3121static void list_slab_objects(struct kmem_cache *s, struct page *page,
3122 const char *text)
3123{
3124#ifdef CONFIG_SLUB_DEBUG
3125 void *addr = page_address(page);
3126 void *p;
3127 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3128 sizeof(long), GFP_ATOMIC);
3129 if (!map)
3130 return;
3131 slab_err(s, page, "%s", text);
3132 slab_lock(page);
3133
3134 get_map(s, page, map);
3135 for_each_object(p, s, addr, page->objects) {
3136
3137 if (!test_bit(slab_index(p, s, addr), map)) {
3138 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3139 p, p - addr);
3140 print_tracking(s, p);
3141 }
3142 }
3143 slab_unlock(page);
3144 kfree(map);
3145#endif
3146}
3147
3148/*
3149 * Attempt to free all partial slabs on a node.
3150 * This is called from kmem_cache_close(). We must be the last thread
3151 * using the cache and therefore we do not need to lock anymore.
3152 */
3153static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3154{
3155 struct page *page, *h;
3156
3157 list_for_each_entry_safe(page, h, &n->partial, lru) {
3158 if (!page->inuse) {
3159 remove_partial(n, page);
3160 discard_slab(s, page);
3161 } else {
3162 list_slab_objects(s, page,
3163 "Objects remaining on kmem_cache_close()");
3164 }
3165 }
3166}
3167
3168/*
3169 * Release all resources used by a slab cache.
3170 */
3171static inline int kmem_cache_close(struct kmem_cache *s)
3172{
3173 int node;
3174
3175 flush_all(s);
3176 free_percpu(s->cpu_slab);
3177 /* Attempt to free all objects */
3178 for_each_node_state(node, N_NORMAL_MEMORY) {
3179 struct kmem_cache_node *n = get_node(s, node);
3180
3181 free_partial(s, n);
3182 if (n->nr_partial || slabs_node(s, node))
3183 return 1;
3184 }
3185 free_kmem_cache_nodes(s);
3186 return 0;
3187}
3188
3189/*
3190 * Close a cache and release the kmem_cache structure
3191 * (must be used for caches created using kmem_cache_create)
3192 */
3193void kmem_cache_destroy(struct kmem_cache *s)
3194{
3195 down_write(&slub_lock);
3196 s->refcount--;
3197 if (!s->refcount) {
3198 list_del(&s->list);
3199 up_write(&slub_lock);
3200 if (kmem_cache_close(s)) {
3201 printk(KERN_ERR "SLUB %s: %s called for cache that "
3202 "still has objects.\n", s->name, __func__);
3203 dump_stack();
3204 }
3205 if (s->flags & SLAB_DESTROY_BY_RCU)
3206 rcu_barrier();
3207 sysfs_slab_remove(s);
3208 } else
3209 up_write(&slub_lock);
3210}
3211EXPORT_SYMBOL(kmem_cache_destroy);
3212
3213/********************************************************************
3214 * Kmalloc subsystem
3215 *******************************************************************/
3216
3217struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
3218EXPORT_SYMBOL(kmalloc_caches);
3219
3220static struct kmem_cache *kmem_cache;
3221
3222#ifdef CONFIG_ZONE_DMA
3223static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
3224#endif
3225
3226static int __init setup_slub_min_order(char *str)
3227{
3228 get_option(&str, &slub_min_order);
3229
3230 return 1;
3231}
3232
3233__setup("slub_min_order=", setup_slub_min_order);
3234
3235static int __init setup_slub_max_order(char *str)
3236{
3237 get_option(&str, &slub_max_order);
3238 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3239
3240 return 1;
3241}
3242
3243__setup("slub_max_order=", setup_slub_max_order);
3244
3245static int __init setup_slub_min_objects(char *str)
3246{
3247 get_option(&str, &slub_min_objects);
3248
3249 return 1;
3250}
3251
3252__setup("slub_min_objects=", setup_slub_min_objects);
3253
3254static int __init setup_slub_nomerge(char *str)
3255{
3256 slub_nomerge = 1;
3257 return 1;
3258}
3259
3260__setup("slub_nomerge", setup_slub_nomerge);
3261
3262static struct kmem_cache *__init create_kmalloc_cache(const char *name,
3263 int size, unsigned int flags)
3264{
3265 struct kmem_cache *s;
3266
3267 s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3268
3269 /*
3270 * This function is called with IRQs disabled during early-boot on
3271 * single CPU so there's no need to take slub_lock here.
3272 */
3273 if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
3274 flags, NULL))
3275 goto panic;
3276
3277 list_add(&s->list, &slab_caches);
3278 return s;
3279
3280panic:
3281 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
3282 return NULL;
3283}
3284
3285/*
3286 * Conversion table for small slabs sizes / 8 to the index in the
3287 * kmalloc array. This is necessary for slabs < 192 since we have non power
3288 * of two cache sizes there. The size of larger slabs can be determined using
3289 * fls.
3290 */
3291static s8 size_index[24] = {
3292 3, /* 8 */
3293 4, /* 16 */
3294 5, /* 24 */
3295 5, /* 32 */
3296 6, /* 40 */
3297 6, /* 48 */
3298 6, /* 56 */
3299 6, /* 64 */
3300 1, /* 72 */
3301 1, /* 80 */
3302 1, /* 88 */
3303 1, /* 96 */
3304 7, /* 104 */
3305 7, /* 112 */
3306 7, /* 120 */
3307 7, /* 128 */
3308 2, /* 136 */
3309 2, /* 144 */
3310 2, /* 152 */
3311 2, /* 160 */
3312 2, /* 168 */
3313 2, /* 176 */
3314 2, /* 184 */
3315 2 /* 192 */
3316};
3317
3318static inline int size_index_elem(size_t bytes)
3319{
3320 return (bytes - 1) / 8;
3321}
3322
3323static struct kmem_cache *get_slab(size_t size, gfp_t flags)
3324{
3325 int index;
3326
3327 if (size <= 192) {
3328 if (!size)
3329 return ZERO_SIZE_PTR;
3330
3331 index = size_index[size_index_elem(size)];
3332 } else
3333 index = fls(size - 1);
3334
3335#ifdef CONFIG_ZONE_DMA
3336 if (unlikely((flags & SLUB_DMA)))
3337 return kmalloc_dma_caches[index];
3338
3339#endif
3340 return kmalloc_caches[index];
3341}
3342
3343void *__kmalloc(size_t size, gfp_t flags)
3344{
3345 struct kmem_cache *s;
3346 void *ret;
3347
3348 if (unlikely(size > SLUB_MAX_SIZE))
3349 return kmalloc_large(size, flags);
3350
3351 s = get_slab(size, flags);
3352
3353 if (unlikely(ZERO_OR_NULL_PTR(s)))
3354 return s;
3355
3356 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
3357
3358 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3359
3360 return ret;
3361}
3362EXPORT_SYMBOL(__kmalloc);
3363
3364#ifdef CONFIG_NUMA
3365static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3366{
3367 struct page *page;
3368 void *ptr = NULL;
3369
3370 flags |= __GFP_COMP | __GFP_NOTRACK;
3371 page = alloc_pages_node(node, flags, get_order(size));
3372 if (page)
3373 ptr = page_address(page);
3374
3375 kmemleak_alloc(ptr, size, 1, flags);
3376 return ptr;
3377}
3378
3379void *__kmalloc_node(size_t size, gfp_t flags, int node)
3380{
3381 struct kmem_cache *s;
3382 void *ret;
3383
3384 if (unlikely(size > SLUB_MAX_SIZE)) {
3385 ret = kmalloc_large_node(size, flags, node);
3386
3387 trace_kmalloc_node(_RET_IP_, ret,
3388 size, PAGE_SIZE << get_order(size),
3389 flags, node);
3390
3391 return ret;
3392 }
3393
3394 s = get_slab(size, flags);
3395
3396 if (unlikely(ZERO_OR_NULL_PTR(s)))
3397 return s;
3398
3399 ret = slab_alloc(s, flags, node, _RET_IP_);
3400
3401 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3402
3403 return ret;
3404}
3405EXPORT_SYMBOL(__kmalloc_node);
3406#endif
3407
3408size_t ksize(const void *object)
3409{
3410 struct page *page;
3411
3412 if (unlikely(object == ZERO_SIZE_PTR))
3413 return 0;
3414
3415 page = virt_to_head_page(object);
3416
3417 if (unlikely(!PageSlab(page))) {
3418 WARN_ON(!PageCompound(page));
3419 return PAGE_SIZE << compound_order(page);
3420 }
3421
3422 return slab_ksize(page->slab);
3423}
3424EXPORT_SYMBOL(ksize);
3425
3426#ifdef CONFIG_SLUB_DEBUG
3427bool verify_mem_not_deleted(const void *x)
3428{
3429 struct page *page;
3430 void *object = (void *)x;
3431 unsigned long flags;
3432 bool rv;
3433
3434 if (unlikely(ZERO_OR_NULL_PTR(x)))
3435 return false;
3436
3437 local_irq_save(flags);
3438
3439 page = virt_to_head_page(x);
3440 if (unlikely(!PageSlab(page))) {
3441 /* maybe it was from stack? */
3442 rv = true;
3443 goto out_unlock;
3444 }
3445
3446 slab_lock(page);
3447 if (on_freelist(page->slab, page, object)) {
3448 object_err(page->slab, page, object, "Object is on free-list");
3449 rv = false;
3450 } else {
3451 rv = true;
3452 }
3453 slab_unlock(page);
3454
3455out_unlock:
3456 local_irq_restore(flags);
3457 return rv;
3458}
3459EXPORT_SYMBOL(verify_mem_not_deleted);
3460#endif
3461
3462void kfree(const void *x)
3463{
3464 struct page *page;
3465 void *object = (void *)x;
3466
3467 trace_kfree(_RET_IP_, x);
3468
3469 if (unlikely(ZERO_OR_NULL_PTR(x)))
3470 return;
3471
3472 page = virt_to_head_page(x);
3473 if (unlikely(!PageSlab(page))) {
3474 BUG_ON(!PageCompound(page));
3475 kmemleak_free(x);
3476 put_page(page);
3477 return;
3478 }
3479 slab_free(page->slab, page, object, _RET_IP_);
3480}
3481EXPORT_SYMBOL(kfree);
3482
3483/*
3484 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3485 * the remaining slabs by the number of items in use. The slabs with the
3486 * most items in use come first. New allocations will then fill those up
3487 * and thus they can be removed from the partial lists.
3488 *
3489 * The slabs with the least items are placed last. This results in them
3490 * being allocated from last increasing the chance that the last objects
3491 * are freed in them.
3492 */
3493int kmem_cache_shrink(struct kmem_cache *s)
3494{
3495 int node;
3496 int i;
3497 struct kmem_cache_node *n;
3498 struct page *page;
3499 struct page *t;
3500 int objects = oo_objects(s->max);
3501 struct list_head *slabs_by_inuse =
3502 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
3503 unsigned long flags;
3504
3505 if (!slabs_by_inuse)
3506 return -ENOMEM;
3507
3508 flush_all(s);
3509 for_each_node_state(node, N_NORMAL_MEMORY) {
3510 n = get_node(s, node);
3511
3512 if (!n->nr_partial)
3513 continue;
3514
3515 for (i = 0; i < objects; i++)
3516 INIT_LIST_HEAD(slabs_by_inuse + i);
3517
3518 spin_lock_irqsave(&n->list_lock, flags);
3519
3520 /*
3521 * Build lists indexed by the items in use in each slab.
3522 *
3523 * Note that concurrent frees may occur while we hold the
3524 * list_lock. page->inuse here is the upper limit.
3525 */
3526 list_for_each_entry_safe(page, t, &n->partial, lru) {
3527 list_move(&page->lru, slabs_by_inuse + page->inuse);
3528 if (!page->inuse)
3529 n->nr_partial--;
3530 }
3531
3532 /*
3533 * Rebuild the partial list with the slabs filled up most
3534 * first and the least used slabs at the end.
3535 */
3536 for (i = objects - 1; i > 0; i--)
3537 list_splice(slabs_by_inuse + i, n->partial.prev);
3538
3539 spin_unlock_irqrestore(&n->list_lock, flags);
3540
3541 /* Release empty slabs */
3542 list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3543 discard_slab(s, page);
3544 }
3545
3546 kfree(slabs_by_inuse);
3547 return 0;
3548}
3549EXPORT_SYMBOL(kmem_cache_shrink);
3550
3551#if defined(CONFIG_MEMORY_HOTPLUG)
3552static int slab_mem_going_offline_callback(void *arg)
3553{
3554 struct kmem_cache *s;
3555
3556 down_read(&slub_lock);
3557 list_for_each_entry(s, &slab_caches, list)
3558 kmem_cache_shrink(s);
3559 up_read(&slub_lock);
3560
3561 return 0;
3562}
3563
3564static void slab_mem_offline_callback(void *arg)
3565{
3566 struct kmem_cache_node *n;
3567 struct kmem_cache *s;
3568 struct memory_notify *marg = arg;
3569 int offline_node;
3570
3571 offline_node = marg->status_change_nid;
3572
3573 /*
3574 * If the node still has available memory. we need kmem_cache_node
3575 * for it yet.
3576 */
3577 if (offline_node < 0)
3578 return;
3579
3580 down_read(&slub_lock);
3581 list_for_each_entry(s, &slab_caches, list) {
3582 n = get_node(s, offline_node);
3583 if (n) {
3584 /*
3585 * if n->nr_slabs > 0, slabs still exist on the node
3586 * that is going down. We were unable to free them,
3587 * and offline_pages() function shouldn't call this
3588 * callback. So, we must fail.
3589 */
3590 BUG_ON(slabs_node(s, offline_node));
3591
3592 s->node[offline_node] = NULL;
3593 kmem_cache_free(kmem_cache_node, n);
3594 }
3595 }
3596 up_read(&slub_lock);
3597}
3598
3599static int slab_mem_going_online_callback(void *arg)
3600{
3601 struct kmem_cache_node *n;
3602 struct kmem_cache *s;
3603 struct memory_notify *marg = arg;
3604 int nid = marg->status_change_nid;
3605 int ret = 0;
3606
3607 /*
3608 * If the node's memory is already available, then kmem_cache_node is
3609 * already created. Nothing to do.
3610 */
3611 if (nid < 0)
3612 return 0;
3613
3614 /*
3615 * We are bringing a node online. No memory is available yet. We must
3616 * allocate a kmem_cache_node structure in order to bring the node
3617 * online.
3618 */
3619 down_read(&slub_lock);
3620 list_for_each_entry(s, &slab_caches, list) {
3621 /*
3622 * XXX: kmem_cache_alloc_node will fallback to other nodes
3623 * since memory is not yet available from the node that
3624 * is brought up.
3625 */
3626 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
3627 if (!n) {
3628 ret = -ENOMEM;
3629 goto out;
3630 }
3631 init_kmem_cache_node(n);
3632 s->node[nid] = n;
3633 }
3634out:
3635 up_read(&slub_lock);
3636 return ret;
3637}
3638
3639static int slab_memory_callback(struct notifier_block *self,
3640 unsigned long action, void *arg)
3641{
3642 int ret = 0;
3643
3644 switch (action) {
3645 case MEM_GOING_ONLINE:
3646 ret = slab_mem_going_online_callback(arg);
3647 break;
3648 case MEM_GOING_OFFLINE:
3649 ret = slab_mem_going_offline_callback(arg);
3650 break;
3651 case MEM_OFFLINE:
3652 case MEM_CANCEL_ONLINE:
3653 slab_mem_offline_callback(arg);
3654 break;
3655 case MEM_ONLINE:
3656 case MEM_CANCEL_OFFLINE:
3657 break;
3658 }
3659 if (ret)
3660 ret = notifier_from_errno(ret);
3661 else
3662 ret = NOTIFY_OK;
3663 return ret;
3664}
3665
3666#endif /* CONFIG_MEMORY_HOTPLUG */
3667
3668/********************************************************************
3669 * Basic setup of slabs
3670 *******************************************************************/
3671
3672/*
3673 * Used for early kmem_cache structures that were allocated using
3674 * the page allocator
3675 */
3676
3677static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
3678{
3679 int node;
3680
3681 list_add(&s->list, &slab_caches);
3682 s->refcount = -1;
3683
3684 for_each_node_state(node, N_NORMAL_MEMORY) {
3685 struct kmem_cache_node *n = get_node(s, node);
3686 struct page *p;
3687
3688 if (n) {
3689 list_for_each_entry(p, &n->partial, lru)
3690 p->slab = s;
3691
3692#ifdef CONFIG_SLUB_DEBUG
3693 list_for_each_entry(p, &n->full, lru)
3694 p->slab = s;
3695#endif
3696 }
3697 }
3698}
3699
3700void __init kmem_cache_init(void)
3701{
3702 int i;
3703 int caches = 0;
3704 struct kmem_cache *temp_kmem_cache;
3705 int order;
3706 struct kmem_cache *temp_kmem_cache_node;
3707 unsigned long kmalloc_size;
3708
3709 if (debug_guardpage_minorder())
3710 slub_max_order = 0;
3711
3712 kmem_size = offsetof(struct kmem_cache, node) +
3713 nr_node_ids * sizeof(struct kmem_cache_node *);
3714
3715 /* Allocate two kmem_caches from the page allocator */
3716 kmalloc_size = ALIGN(kmem_size, cache_line_size());
3717 order = get_order(2 * kmalloc_size);
3718 kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
3719
3720 /*
3721 * Must first have the slab cache available for the allocations of the
3722 * struct kmem_cache_node's. There is special bootstrap code in
3723 * kmem_cache_open for slab_state == DOWN.
3724 */
3725 kmem_cache_node = (void *)kmem_cache + kmalloc_size;
3726
3727 kmem_cache_open(kmem_cache_node, "kmem_cache_node",
3728 sizeof(struct kmem_cache_node),
3729 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3730
3731 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3732
3733 /* Able to allocate the per node structures */
3734 slab_state = PARTIAL;
3735
3736 temp_kmem_cache = kmem_cache;
3737 kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
3738 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3739 kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3740 memcpy(kmem_cache, temp_kmem_cache, kmem_size);
3741
3742 /*
3743 * Allocate kmem_cache_node properly from the kmem_cache slab.
3744 * kmem_cache_node is separately allocated so no need to
3745 * update any list pointers.
3746 */
3747 temp_kmem_cache_node = kmem_cache_node;
3748
3749 kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
3750 memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
3751
3752 kmem_cache_bootstrap_fixup(kmem_cache_node);
3753
3754 caches++;
3755 kmem_cache_bootstrap_fixup(kmem_cache);
3756 caches++;
3757 /* Free temporary boot structure */
3758 free_pages((unsigned long)temp_kmem_cache, order);
3759
3760 /* Now we can use the kmem_cache to allocate kmalloc slabs */
3761
3762 /*
3763 * Patch up the size_index table if we have strange large alignment
3764 * requirements for the kmalloc array. This is only the case for
3765 * MIPS it seems. The standard arches will not generate any code here.
3766 *
3767 * Largest permitted alignment is 256 bytes due to the way we
3768 * handle the index determination for the smaller caches.
3769 *
3770 * Make sure that nothing crazy happens if someone starts tinkering
3771 * around with ARCH_KMALLOC_MINALIGN
3772 */
3773 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3774 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3775
3776 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3777 int elem = size_index_elem(i);
3778 if (elem >= ARRAY_SIZE(size_index))
3779 break;
3780 size_index[elem] = KMALLOC_SHIFT_LOW;
3781 }
3782
3783 if (KMALLOC_MIN_SIZE == 64) {
3784 /*
3785 * The 96 byte size cache is not used if the alignment
3786 * is 64 byte.
3787 */
3788 for (i = 64 + 8; i <= 96; i += 8)
3789 size_index[size_index_elem(i)] = 7;
3790 } else if (KMALLOC_MIN_SIZE == 128) {
3791 /*
3792 * The 192 byte sized cache is not used if the alignment
3793 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3794 * instead.
3795 */
3796 for (i = 128 + 8; i <= 192; i += 8)
3797 size_index[size_index_elem(i)] = 8;
3798 }
3799
3800 /* Caches that are not of the two-to-the-power-of size */
3801 if (KMALLOC_MIN_SIZE <= 32) {
3802 kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
3803 caches++;
3804 }
3805
3806 if (KMALLOC_MIN_SIZE <= 64) {
3807 kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
3808 caches++;
3809 }
3810
3811 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3812 kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
3813 caches++;
3814 }
3815
3816 slab_state = UP;
3817
3818 /* Provide the correct kmalloc names now that the caches are up */
3819 if (KMALLOC_MIN_SIZE <= 32) {
3820 kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
3821 BUG_ON(!kmalloc_caches[1]->name);
3822 }
3823
3824 if (KMALLOC_MIN_SIZE <= 64) {
3825 kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
3826 BUG_ON(!kmalloc_caches[2]->name);
3827 }
3828
3829 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3830 char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3831
3832 BUG_ON(!s);
3833 kmalloc_caches[i]->name = s;
3834 }
3835
3836#ifdef CONFIG_SMP
3837 register_cpu_notifier(&slab_notifier);
3838#endif
3839
3840#ifdef CONFIG_ZONE_DMA
3841 for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
3842 struct kmem_cache *s = kmalloc_caches[i];
3843
3844 if (s && s->size) {
3845 char *name = kasprintf(GFP_NOWAIT,
3846 "dma-kmalloc-%d", s->objsize);
3847
3848 BUG_ON(!name);
3849 kmalloc_dma_caches[i] = create_kmalloc_cache(name,
3850 s->objsize, SLAB_CACHE_DMA);
3851 }
3852 }
3853#endif
3854 printk(KERN_INFO
3855 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3856 " CPUs=%d, Nodes=%d\n",
3857 caches, cache_line_size(),
3858 slub_min_order, slub_max_order, slub_min_objects,
3859 nr_cpu_ids, nr_node_ids);
3860}
3861
3862void __init kmem_cache_init_late(void)
3863{
3864}
3865
3866/*
3867 * Find a mergeable slab cache
3868 */
3869static int slab_unmergeable(struct kmem_cache *s)
3870{
3871 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3872 return 1;
3873
3874 if (s->ctor)
3875 return 1;
3876
3877 /*
3878 * We may have set a slab to be unmergeable during bootstrap.
3879 */
3880 if (s->refcount < 0)
3881 return 1;
3882
3883 return 0;
3884}
3885
3886static struct kmem_cache *find_mergeable(size_t size,
3887 size_t align, unsigned long flags, const char *name,
3888 void (*ctor)(void *))
3889{
3890 struct kmem_cache *s;
3891
3892 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3893 return NULL;
3894
3895 if (ctor)
3896 return NULL;
3897
3898 size = ALIGN(size, sizeof(void *));
3899 align = calculate_alignment(flags, align, size);
3900 size = ALIGN(size, align);
3901 flags = kmem_cache_flags(size, flags, name, NULL);
3902
3903 list_for_each_entry(s, &slab_caches, list) {
3904 if (slab_unmergeable(s))
3905 continue;
3906
3907 if (size > s->size)
3908 continue;
3909
3910 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3911 continue;
3912 /*
3913 * Check if alignment is compatible.
3914 * Courtesy of Adrian Drzewiecki
3915 */
3916 if ((s->size & ~(align - 1)) != s->size)
3917 continue;
3918
3919 if (s->size - size >= sizeof(void *))
3920 continue;
3921
3922 return s;
3923 }
3924 return NULL;
3925}
3926
3927struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3928 size_t align, unsigned long flags, void (*ctor)(void *))
3929{
3930 struct kmem_cache *s;
3931 char *n;
3932
3933 if (WARN_ON(!name))
3934 return NULL;
3935
3936 down_write(&slub_lock);
3937 s = find_mergeable(size, align, flags, name, ctor);
3938 if (s) {
3939 s->refcount++;
3940 /*
3941 * Adjust the object sizes so that we clear
3942 * the complete object on kzalloc.
3943 */
3944 s->objsize = max(s->objsize, (int)size);
3945 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3946
3947 if (sysfs_slab_alias(s, name)) {
3948 s->refcount--;
3949 goto err;
3950 }
3951 up_write(&slub_lock);
3952 return s;
3953 }
3954
3955 n = kstrdup(name, GFP_KERNEL);
3956 if (!n)
3957 goto err;
3958
3959 s = kmalloc(kmem_size, GFP_KERNEL);
3960 if (s) {
3961 if (kmem_cache_open(s, n,
3962 size, align, flags, ctor)) {
3963 list_add(&s->list, &slab_caches);
3964 up_write(&slub_lock);
3965 if (sysfs_slab_add(s)) {
3966 down_write(&slub_lock);
3967 list_del(&s->list);
3968 kfree(n);
3969 kfree(s);
3970 goto err;
3971 }
3972 return s;
3973 }
3974 kfree(s);
3975 }
3976 kfree(n);
3977err:
3978 up_write(&slub_lock);
3979
3980 if (flags & SLAB_PANIC)
3981 panic("Cannot create slabcache %s\n", name);
3982 else
3983 s = NULL;
3984 return s;
3985}
3986EXPORT_SYMBOL(kmem_cache_create);
3987
3988#ifdef CONFIG_SMP
3989/*
3990 * Use the cpu notifier to insure that the cpu slabs are flushed when
3991 * necessary.
3992 */
3993static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3994 unsigned long action, void *hcpu)
3995{
3996 long cpu = (long)hcpu;
3997 struct kmem_cache *s;
3998 unsigned long flags;
3999
4000 switch (action) {
4001 case CPU_UP_CANCELED:
4002 case CPU_UP_CANCELED_FROZEN:
4003 case CPU_DEAD:
4004 case CPU_DEAD_FROZEN:
4005 down_read(&slub_lock);
4006 list_for_each_entry(s, &slab_caches, list) {
4007 local_irq_save(flags);
4008 __flush_cpu_slab(s, cpu);
4009 local_irq_restore(flags);
4010 }
4011 up_read(&slub_lock);
4012 break;
4013 default:
4014 break;
4015 }
4016 return NOTIFY_OK;
4017}
4018
4019static struct notifier_block __cpuinitdata slab_notifier = {
4020 .notifier_call = slab_cpuup_callback
4021};
4022
4023#endif
4024
4025void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4026{
4027 struct kmem_cache *s;
4028 void *ret;
4029
4030 if (unlikely(size > SLUB_MAX_SIZE))
4031 return kmalloc_large(size, gfpflags);
4032
4033 s = get_slab(size, gfpflags);
4034
4035 if (unlikely(ZERO_OR_NULL_PTR(s)))
4036 return s;
4037
4038 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
4039
4040 /* Honor the call site pointer we received. */
4041 trace_kmalloc(caller, ret, size, s->size, gfpflags);
4042
4043 return ret;
4044}
4045
4046#ifdef CONFIG_NUMA
4047void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4048 int node, unsigned long caller)
4049{
4050 struct kmem_cache *s;
4051 void *ret;
4052
4053 if (unlikely(size > SLUB_MAX_SIZE)) {
4054 ret = kmalloc_large_node(size, gfpflags, node);
4055
4056 trace_kmalloc_node(caller, ret,
4057 size, PAGE_SIZE << get_order(size),
4058 gfpflags, node);
4059
4060 return ret;
4061 }
4062
4063 s = get_slab(size, gfpflags);
4064
4065 if (unlikely(ZERO_OR_NULL_PTR(s)))
4066 return s;
4067
4068 ret = slab_alloc(s, gfpflags, node, caller);
4069
4070 /* Honor the call site pointer we received. */
4071 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4072
4073 return ret;
4074}
4075#endif
4076
4077#ifdef CONFIG_SYSFS
4078static int count_inuse(struct page *page)
4079{
4080 return page->inuse;
4081}
4082
4083static int count_total(struct page *page)
4084{
4085 return page->objects;
4086}
4087#endif
4088
4089#ifdef CONFIG_SLUB_DEBUG
4090static int validate_slab(struct kmem_cache *s, struct page *page,
4091 unsigned long *map)
4092{
4093 void *p;
4094 void *addr = page_address(page);
4095
4096 if (!check_slab(s, page) ||
4097 !on_freelist(s, page, NULL))
4098 return 0;
4099
4100 /* Now we know that a valid freelist exists */
4101 bitmap_zero(map, page->objects);
4102
4103 get_map(s, page, map);
4104 for_each_object(p, s, addr, page->objects) {
4105 if (test_bit(slab_index(p, s, addr), map))
4106 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4107 return 0;
4108 }
4109
4110 for_each_object(p, s, addr, page->objects)
4111 if (!test_bit(slab_index(p, s, addr), map))
4112 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4113 return 0;
4114 return 1;
4115}
4116
4117static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4118 unsigned long *map)
4119{
4120 slab_lock(page);
4121 validate_slab(s, page, map);
4122 slab_unlock(page);
4123}
4124
4125static int validate_slab_node(struct kmem_cache *s,
4126 struct kmem_cache_node *n, unsigned long *map)
4127{
4128 unsigned long count = 0;
4129 struct page *page;
4130 unsigned long flags;
4131
4132 spin_lock_irqsave(&n->list_lock, flags);
4133
4134 list_for_each_entry(page, &n->partial, lru) {
4135 validate_slab_slab(s, page, map);
4136 count++;
4137 }
4138 if (count != n->nr_partial)
4139 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
4140 "counter=%ld\n", s->name, count, n->nr_partial);
4141
4142 if (!(s->flags & SLAB_STORE_USER))
4143 goto out;
4144
4145 list_for_each_entry(page, &n->full, lru) {
4146 validate_slab_slab(s, page, map);
4147 count++;
4148 }
4149 if (count != atomic_long_read(&n->nr_slabs))
4150 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
4151 "counter=%ld\n", s->name, count,
4152 atomic_long_read(&n->nr_slabs));
4153
4154out:
4155 spin_unlock_irqrestore(&n->list_lock, flags);
4156 return count;
4157}
4158
4159static long validate_slab_cache(struct kmem_cache *s)
4160{
4161 int node;
4162 unsigned long count = 0;
4163 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4164 sizeof(unsigned long), GFP_KERNEL);
4165
4166 if (!map)
4167 return -ENOMEM;
4168
4169 flush_all(s);
4170 for_each_node_state(node, N_NORMAL_MEMORY) {
4171 struct kmem_cache_node *n = get_node(s, node);
4172
4173 count += validate_slab_node(s, n, map);
4174 }
4175 kfree(map);
4176 return count;
4177}
4178/*
4179 * Generate lists of code addresses where slabcache objects are allocated
4180 * and freed.
4181 */
4182
4183struct location {
4184 unsigned long count;
4185 unsigned long addr;
4186 long long sum_time;
4187 long min_time;
4188 long max_time;
4189 long min_pid;
4190 long max_pid;
4191 DECLARE_BITMAP(cpus, NR_CPUS);
4192 nodemask_t nodes;
4193};
4194
4195struct loc_track {
4196 unsigned long max;
4197 unsigned long count;
4198 struct location *loc;
4199};
4200
4201static void free_loc_track(struct loc_track *t)
4202{
4203 if (t->max)
4204 free_pages((unsigned long)t->loc,
4205 get_order(sizeof(struct location) * t->max));
4206}
4207
4208static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4209{
4210 struct location *l;
4211 int order;
4212
4213 order = get_order(sizeof(struct location) * max);
4214
4215 l = (void *)__get_free_pages(flags, order);
4216 if (!l)
4217 return 0;
4218
4219 if (t->count) {
4220 memcpy(l, t->loc, sizeof(struct location) * t->count);
4221 free_loc_track(t);
4222 }
4223 t->max = max;
4224 t->loc = l;
4225 return 1;
4226}
4227
4228static int add_location(struct loc_track *t, struct kmem_cache *s,
4229 const struct track *track)
4230{
4231 long start, end, pos;
4232 struct location *l;
4233 unsigned long caddr;
4234 unsigned long age = jiffies - track->when;
4235
4236 start = -1;
4237 end = t->count;
4238
4239 for ( ; ; ) {
4240 pos = start + (end - start + 1) / 2;
4241
4242 /*
4243 * There is nothing at "end". If we end up there
4244 * we need to add something to before end.
4245 */
4246 if (pos == end)
4247 break;
4248
4249 caddr = t->loc[pos].addr;
4250 if (track->addr == caddr) {
4251
4252 l = &t->loc[pos];
4253 l->count++;
4254 if (track->when) {
4255 l->sum_time += age;
4256 if (age < l->min_time)
4257 l->min_time = age;
4258 if (age > l->max_time)
4259 l->max_time = age;
4260
4261 if (track->pid < l->min_pid)
4262 l->min_pid = track->pid;
4263 if (track->pid > l->max_pid)
4264 l->max_pid = track->pid;
4265
4266 cpumask_set_cpu(track->cpu,
4267 to_cpumask(l->cpus));
4268 }
4269 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4270 return 1;
4271 }
4272
4273 if (track->addr < caddr)
4274 end = pos;
4275 else
4276 start = pos;
4277 }
4278
4279 /*
4280 * Not found. Insert new tracking element.
4281 */
4282 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4283 return 0;
4284
4285 l = t->loc + pos;
4286 if (pos < t->count)
4287 memmove(l + 1, l,
4288 (t->count - pos) * sizeof(struct location));
4289 t->count++;
4290 l->count = 1;
4291 l->addr = track->addr;
4292 l->sum_time = age;
4293 l->min_time = age;
4294 l->max_time = age;
4295 l->min_pid = track->pid;
4296 l->max_pid = track->pid;
4297 cpumask_clear(to_cpumask(l->cpus));
4298 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4299 nodes_clear(l->nodes);
4300 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4301 return 1;
4302}
4303
4304static void process_slab(struct loc_track *t, struct kmem_cache *s,
4305 struct page *page, enum track_item alloc,
4306 unsigned long *map)
4307{
4308 void *addr = page_address(page);
4309 void *p;
4310
4311 bitmap_zero(map, page->objects);
4312 get_map(s, page, map);
4313
4314 for_each_object(p, s, addr, page->objects)
4315 if (!test_bit(slab_index(p, s, addr), map))
4316 add_location(t, s, get_track(s, p, alloc));
4317}
4318
4319static int list_locations(struct kmem_cache *s, char *buf,
4320 enum track_item alloc)
4321{
4322 int len = 0;
4323 unsigned long i;
4324 struct loc_track t = { 0, 0, NULL };
4325 int node;
4326 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4327 sizeof(unsigned long), GFP_KERNEL);
4328
4329 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4330 GFP_TEMPORARY)) {
4331 kfree(map);
4332 return sprintf(buf, "Out of memory\n");
4333 }
4334 /* Push back cpu slabs */
4335 flush_all(s);
4336
4337 for_each_node_state(node, N_NORMAL_MEMORY) {
4338 struct kmem_cache_node *n = get_node(s, node);
4339 unsigned long flags;
4340 struct page *page;
4341
4342 if (!atomic_long_read(&n->nr_slabs))
4343 continue;
4344
4345 spin_lock_irqsave(&n->list_lock, flags);
4346 list_for_each_entry(page, &n->partial, lru)
4347 process_slab(&t, s, page, alloc, map);
4348 list_for_each_entry(page, &n->full, lru)
4349 process_slab(&t, s, page, alloc, map);
4350 spin_unlock_irqrestore(&n->list_lock, flags);
4351 }
4352
4353 for (i = 0; i < t.count; i++) {
4354 struct location *l = &t.loc[i];
4355
4356 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4357 break;
4358 len += sprintf(buf + len, "%7ld ", l->count);
4359
4360 if (l->addr)
4361 len += sprintf(buf + len, "%pS", (void *)l->addr);
4362 else
4363 len += sprintf(buf + len, "<not-available>");
4364
4365 if (l->sum_time != l->min_time) {
4366 len += sprintf(buf + len, " age=%ld/%ld/%ld",
4367 l->min_time,
4368 (long)div_u64(l->sum_time, l->count),
4369 l->max_time);
4370 } else
4371 len += sprintf(buf + len, " age=%ld",
4372 l->min_time);
4373
4374 if (l->min_pid != l->max_pid)
4375 len += sprintf(buf + len, " pid=%ld-%ld",
4376 l->min_pid, l->max_pid);
4377 else
4378 len += sprintf(buf + len, " pid=%ld",
4379 l->min_pid);
4380
4381 if (num_online_cpus() > 1 &&
4382 !cpumask_empty(to_cpumask(l->cpus)) &&
4383 len < PAGE_SIZE - 60) {
4384 len += sprintf(buf + len, " cpus=");
4385 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4386 to_cpumask(l->cpus));
4387 }
4388
4389 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4390 len < PAGE_SIZE - 60) {
4391 len += sprintf(buf + len, " nodes=");
4392 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
4393 l->nodes);
4394 }
4395
4396 len += sprintf(buf + len, "\n");
4397 }
4398
4399 free_loc_track(&t);
4400 kfree(map);
4401 if (!t.count)
4402 len += sprintf(buf, "No data\n");
4403 return len;
4404}
4405#endif
4406
4407#ifdef SLUB_RESILIENCY_TEST
4408static void resiliency_test(void)
4409{
4410 u8 *p;
4411
4412 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
4413
4414 printk(KERN_ERR "SLUB resiliency testing\n");
4415 printk(KERN_ERR "-----------------------\n");
4416 printk(KERN_ERR "A. Corruption after allocation\n");
4417
4418 p = kzalloc(16, GFP_KERNEL);
4419 p[16] = 0x12;
4420 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4421 " 0x12->0x%p\n\n", p + 16);
4422
4423 validate_slab_cache(kmalloc_caches[4]);
4424
4425 /* Hmmm... The next two are dangerous */
4426 p = kzalloc(32, GFP_KERNEL);
4427 p[32 + sizeof(void *)] = 0x34;
4428 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4429 " 0x34 -> -0x%p\n", p);
4430 printk(KERN_ERR
4431 "If allocated object is overwritten then not detectable\n\n");
4432
4433 validate_slab_cache(kmalloc_caches[5]);
4434 p = kzalloc(64, GFP_KERNEL);
4435 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4436 *p = 0x56;
4437 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4438 p);
4439 printk(KERN_ERR
4440 "If allocated object is overwritten then not detectable\n\n");
4441 validate_slab_cache(kmalloc_caches[6]);
4442
4443 printk(KERN_ERR "\nB. Corruption after free\n");
4444 p = kzalloc(128, GFP_KERNEL);
4445 kfree(p);
4446 *p = 0x78;
4447 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4448 validate_slab_cache(kmalloc_caches[7]);
4449
4450 p = kzalloc(256, GFP_KERNEL);
4451 kfree(p);
4452 p[50] = 0x9a;
4453 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4454 p);
4455 validate_slab_cache(kmalloc_caches[8]);
4456
4457 p = kzalloc(512, GFP_KERNEL);
4458 kfree(p);
4459 p[512] = 0xab;
4460 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4461 validate_slab_cache(kmalloc_caches[9]);
4462}
4463#else
4464#ifdef CONFIG_SYSFS
4465static void resiliency_test(void) {};
4466#endif
4467#endif
4468
4469#ifdef CONFIG_SYSFS
4470enum slab_stat_type {
4471 SL_ALL, /* All slabs */
4472 SL_PARTIAL, /* Only partially allocated slabs */
4473 SL_CPU, /* Only slabs used for cpu caches */
4474 SL_OBJECTS, /* Determine allocated objects not slabs */
4475 SL_TOTAL /* Determine object capacity not slabs */
4476};
4477
4478#define SO_ALL (1 << SL_ALL)
4479#define SO_PARTIAL (1 << SL_PARTIAL)
4480#define SO_CPU (1 << SL_CPU)
4481#define SO_OBJECTS (1 << SL_OBJECTS)
4482#define SO_TOTAL (1 << SL_TOTAL)
4483
4484static ssize_t show_slab_objects(struct kmem_cache *s,
4485 char *buf, unsigned long flags)
4486{
4487 unsigned long total = 0;
4488 int node;
4489 int x;
4490 unsigned long *nodes;
4491 unsigned long *per_cpu;
4492
4493 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4494 if (!nodes)
4495 return -ENOMEM;
4496 per_cpu = nodes + nr_node_ids;
4497
4498 if (flags & SO_CPU) {
4499 int cpu;
4500
4501 for_each_possible_cpu(cpu) {
4502 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
4503 int node = ACCESS_ONCE(c->node);
4504 struct page *page;
4505
4506 if (node < 0)
4507 continue;
4508 page = ACCESS_ONCE(c->page);
4509 if (page) {
4510 if (flags & SO_TOTAL)
4511 x = page->objects;
4512 else if (flags & SO_OBJECTS)
4513 x = page->inuse;
4514 else
4515 x = 1;
4516
4517 total += x;
4518 nodes[node] += x;
4519 }
4520 page = c->partial;
4521
4522 if (page) {
4523 x = page->pobjects;
4524 total += x;
4525 nodes[node] += x;
4526 }
4527 per_cpu[node]++;
4528 }
4529 }
4530
4531 lock_memory_hotplug();
4532#ifdef CONFIG_SLUB_DEBUG
4533 if (flags & SO_ALL) {
4534 for_each_node_state(node, N_NORMAL_MEMORY) {
4535 struct kmem_cache_node *n = get_node(s, node);
4536
4537 if (flags & SO_TOTAL)
4538 x = atomic_long_read(&n->total_objects);
4539 else if (flags & SO_OBJECTS)
4540 x = atomic_long_read(&n->total_objects) -
4541 count_partial(n, count_free);
4542
4543 else
4544 x = atomic_long_read(&n->nr_slabs);
4545 total += x;
4546 nodes[node] += x;
4547 }
4548
4549 } else
4550#endif
4551 if (flags & SO_PARTIAL) {
4552 for_each_node_state(node, N_NORMAL_MEMORY) {
4553 struct kmem_cache_node *n = get_node(s, node);
4554
4555 if (flags & SO_TOTAL)
4556 x = count_partial(n, count_total);
4557 else if (flags & SO_OBJECTS)
4558 x = count_partial(n, count_inuse);
4559 else
4560 x = n->nr_partial;
4561 total += x;
4562 nodes[node] += x;
4563 }
4564 }
4565 x = sprintf(buf, "%lu", total);
4566#ifdef CONFIG_NUMA
4567 for_each_node_state(node, N_NORMAL_MEMORY)
4568 if (nodes[node])
4569 x += sprintf(buf + x, " N%d=%lu",
4570 node, nodes[node]);
4571#endif
4572 unlock_memory_hotplug();
4573 kfree(nodes);
4574 return x + sprintf(buf + x, "\n");
4575}
4576
4577#ifdef CONFIG_SLUB_DEBUG
4578static int any_slab_objects(struct kmem_cache *s)
4579{
4580 int node;
4581
4582 for_each_online_node(node) {
4583 struct kmem_cache_node *n = get_node(s, node);
4584
4585 if (!n)
4586 continue;
4587
4588 if (atomic_long_read(&n->total_objects))
4589 return 1;
4590 }
4591 return 0;
4592}
4593#endif
4594
4595#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4596#define to_slab(n) container_of(n, struct kmem_cache, kobj)
4597
4598struct slab_attribute {
4599 struct attribute attr;
4600 ssize_t (*show)(struct kmem_cache *s, char *buf);
4601 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4602};
4603
4604#define SLAB_ATTR_RO(_name) \
4605 static struct slab_attribute _name##_attr = \
4606 __ATTR(_name, 0400, _name##_show, NULL)
4607
4608#define SLAB_ATTR(_name) \
4609 static struct slab_attribute _name##_attr = \
4610 __ATTR(_name, 0600, _name##_show, _name##_store)
4611
4612static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4613{
4614 return sprintf(buf, "%d\n", s->size);
4615}
4616SLAB_ATTR_RO(slab_size);
4617
4618static ssize_t align_show(struct kmem_cache *s, char *buf)
4619{
4620 return sprintf(buf, "%d\n", s->align);
4621}
4622SLAB_ATTR_RO(align);
4623
4624static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4625{
4626 return sprintf(buf, "%d\n", s->objsize);
4627}
4628SLAB_ATTR_RO(object_size);
4629
4630static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4631{
4632 return sprintf(buf, "%d\n", oo_objects(s->oo));
4633}
4634SLAB_ATTR_RO(objs_per_slab);
4635
4636static ssize_t order_store(struct kmem_cache *s,
4637 const char *buf, size_t length)
4638{
4639 unsigned long order;
4640 int err;
4641
4642 err = strict_strtoul(buf, 10, &order);
4643 if (err)
4644 return err;
4645
4646 if (order > slub_max_order || order < slub_min_order)
4647 return -EINVAL;
4648
4649 calculate_sizes(s, order);
4650 return length;
4651}
4652
4653static ssize_t order_show(struct kmem_cache *s, char *buf)
4654{
4655 return sprintf(buf, "%d\n", oo_order(s->oo));
4656}
4657SLAB_ATTR(order);
4658
4659static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4660{
4661 return sprintf(buf, "%lu\n", s->min_partial);
4662}
4663
4664static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4665 size_t length)
4666{
4667 unsigned long min;
4668 int err;
4669
4670 err = strict_strtoul(buf, 10, &min);
4671 if (err)
4672 return err;
4673
4674 set_min_partial(s, min);
4675 return length;
4676}
4677SLAB_ATTR(min_partial);
4678
4679static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4680{
4681 return sprintf(buf, "%u\n", s->cpu_partial);
4682}
4683
4684static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4685 size_t length)
4686{
4687 unsigned long objects;
4688 int err;
4689
4690 err = strict_strtoul(buf, 10, &objects);
4691 if (err)
4692 return err;
4693 if (objects && kmem_cache_debug(s))
4694 return -EINVAL;
4695
4696 s->cpu_partial = objects;
4697 flush_all(s);
4698 return length;
4699}
4700SLAB_ATTR(cpu_partial);
4701
4702static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4703{
4704 if (!s->ctor)
4705 return 0;
4706 return sprintf(buf, "%pS\n", s->ctor);
4707}
4708SLAB_ATTR_RO(ctor);
4709
4710static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4711{
4712 return sprintf(buf, "%d\n", s->refcount - 1);
4713}
4714SLAB_ATTR_RO(aliases);
4715
4716static ssize_t partial_show(struct kmem_cache *s, char *buf)
4717{
4718 return show_slab_objects(s, buf, SO_PARTIAL);
4719}
4720SLAB_ATTR_RO(partial);
4721
4722static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4723{
4724 return show_slab_objects(s, buf, SO_CPU);
4725}
4726SLAB_ATTR_RO(cpu_slabs);
4727
4728static ssize_t objects_show(struct kmem_cache *s, char *buf)
4729{
4730 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4731}
4732SLAB_ATTR_RO(objects);
4733
4734static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4735{
4736 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4737}
4738SLAB_ATTR_RO(objects_partial);
4739
4740static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4741{
4742 int objects = 0;
4743 int pages = 0;
4744 int cpu;
4745 int len;
4746
4747 for_each_online_cpu(cpu) {
4748 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4749
4750 if (page) {
4751 pages += page->pages;
4752 objects += page->pobjects;
4753 }
4754 }
4755
4756 len = sprintf(buf, "%d(%d)", objects, pages);
4757
4758#ifdef CONFIG_SMP
4759 for_each_online_cpu(cpu) {
4760 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4761
4762 if (page && len < PAGE_SIZE - 20)
4763 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4764 page->pobjects, page->pages);
4765 }
4766#endif
4767 return len + sprintf(buf + len, "\n");
4768}
4769SLAB_ATTR_RO(slabs_cpu_partial);
4770
4771static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4772{
4773 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4774}
4775
4776static ssize_t reclaim_account_store(struct kmem_cache *s,
4777 const char *buf, size_t length)
4778{
4779 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4780 if (buf[0] == '1')
4781 s->flags |= SLAB_RECLAIM_ACCOUNT;
4782 return length;
4783}
4784SLAB_ATTR(reclaim_account);
4785
4786static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4787{
4788 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4789}
4790SLAB_ATTR_RO(hwcache_align);
4791
4792#ifdef CONFIG_ZONE_DMA
4793static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4794{
4795 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4796}
4797SLAB_ATTR_RO(cache_dma);
4798#endif
4799
4800static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4801{
4802 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4803}
4804SLAB_ATTR_RO(destroy_by_rcu);
4805
4806static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4807{
4808 return sprintf(buf, "%d\n", s->reserved);
4809}
4810SLAB_ATTR_RO(reserved);
4811
4812#ifdef CONFIG_SLUB_DEBUG
4813static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4814{
4815 return show_slab_objects(s, buf, SO_ALL);
4816}
4817SLAB_ATTR_RO(slabs);
4818
4819static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4820{
4821 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4822}
4823SLAB_ATTR_RO(total_objects);
4824
4825static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4826{
4827 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4828}
4829
4830static ssize_t sanity_checks_store(struct kmem_cache *s,
4831 const char *buf, size_t length)
4832{
4833 s->flags &= ~SLAB_DEBUG_FREE;
4834 if (buf[0] == '1') {
4835 s->flags &= ~__CMPXCHG_DOUBLE;
4836 s->flags |= SLAB_DEBUG_FREE;
4837 }
4838 return length;
4839}
4840SLAB_ATTR(sanity_checks);
4841
4842static ssize_t trace_show(struct kmem_cache *s, char *buf)
4843{
4844 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4845}
4846
4847static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4848 size_t length)
4849{
4850 s->flags &= ~SLAB_TRACE;
4851 if (buf[0] == '1') {
4852 s->flags &= ~__CMPXCHG_DOUBLE;
4853 s->flags |= SLAB_TRACE;
4854 }
4855 return length;
4856}
4857SLAB_ATTR(trace);
4858
4859static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4860{
4861 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4862}
4863
4864static ssize_t red_zone_store(struct kmem_cache *s,
4865 const char *buf, size_t length)
4866{
4867 if (any_slab_objects(s))
4868 return -EBUSY;
4869
4870 s->flags &= ~SLAB_RED_ZONE;
4871 if (buf[0] == '1') {
4872 s->flags &= ~__CMPXCHG_DOUBLE;
4873 s->flags |= SLAB_RED_ZONE;
4874 }
4875 calculate_sizes(s, -1);
4876 return length;
4877}
4878SLAB_ATTR(red_zone);
4879
4880static ssize_t poison_show(struct kmem_cache *s, char *buf)
4881{
4882 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4883}
4884
4885static ssize_t poison_store(struct kmem_cache *s,
4886 const char *buf, size_t length)
4887{
4888 if (any_slab_objects(s))
4889 return -EBUSY;
4890
4891 s->flags &= ~SLAB_POISON;
4892 if (buf[0] == '1') {
4893 s->flags &= ~__CMPXCHG_DOUBLE;
4894 s->flags |= SLAB_POISON;
4895 }
4896 calculate_sizes(s, -1);
4897 return length;
4898}
4899SLAB_ATTR(poison);
4900
4901static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4902{
4903 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4904}
4905
4906static ssize_t store_user_store(struct kmem_cache *s,
4907 const char *buf, size_t length)
4908{
4909 if (any_slab_objects(s))
4910 return -EBUSY;
4911
4912 s->flags &= ~SLAB_STORE_USER;
4913 if (buf[0] == '1') {
4914 s->flags &= ~__CMPXCHG_DOUBLE;
4915 s->flags |= SLAB_STORE_USER;
4916 }
4917 calculate_sizes(s, -1);
4918 return length;
4919}
4920SLAB_ATTR(store_user);
4921
4922static ssize_t validate_show(struct kmem_cache *s, char *buf)
4923{
4924 return 0;
4925}
4926
4927static ssize_t validate_store(struct kmem_cache *s,
4928 const char *buf, size_t length)
4929{
4930 int ret = -EINVAL;
4931
4932 if (buf[0] == '1') {
4933 ret = validate_slab_cache(s);
4934 if (ret >= 0)
4935 ret = length;
4936 }
4937 return ret;
4938}
4939SLAB_ATTR(validate);
4940
4941static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4942{
4943 if (!(s->flags & SLAB_STORE_USER))
4944 return -ENOSYS;
4945 return list_locations(s, buf, TRACK_ALLOC);
4946}
4947SLAB_ATTR_RO(alloc_calls);
4948
4949static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4950{
4951 if (!(s->flags & SLAB_STORE_USER))
4952 return -ENOSYS;
4953 return list_locations(s, buf, TRACK_FREE);
4954}
4955SLAB_ATTR_RO(free_calls);
4956#endif /* CONFIG_SLUB_DEBUG */
4957
4958#ifdef CONFIG_FAILSLAB
4959static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4960{
4961 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4962}
4963
4964static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4965 size_t length)
4966{
4967 s->flags &= ~SLAB_FAILSLAB;
4968 if (buf[0] == '1')
4969 s->flags |= SLAB_FAILSLAB;
4970 return length;
4971}
4972SLAB_ATTR(failslab);
4973#endif
4974
4975static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4976{
4977 return 0;
4978}
4979
4980static ssize_t shrink_store(struct kmem_cache *s,
4981 const char *buf, size_t length)
4982{
4983 if (buf[0] == '1') {
4984 int rc = kmem_cache_shrink(s);
4985
4986 if (rc)
4987 return rc;
4988 } else
4989 return -EINVAL;
4990 return length;
4991}
4992SLAB_ATTR(shrink);
4993
4994#ifdef CONFIG_NUMA
4995static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4996{
4997 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4998}
4999
5000static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5001 const char *buf, size_t length)
5002{
5003 unsigned long ratio;
5004 int err;
5005
5006 err = strict_strtoul(buf, 10, &ratio);
5007 if (err)
5008 return err;
5009
5010 if (ratio <= 100)
5011 s->remote_node_defrag_ratio = ratio * 10;
5012
5013 return length;
5014}
5015SLAB_ATTR(remote_node_defrag_ratio);
5016#endif
5017
5018#ifdef CONFIG_SLUB_STATS
5019static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5020{
5021 unsigned long sum = 0;
5022 int cpu;
5023 int len;
5024 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5025
5026 if (!data)
5027 return -ENOMEM;
5028
5029 for_each_online_cpu(cpu) {
5030 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5031
5032 data[cpu] = x;
5033 sum += x;
5034 }
5035
5036 len = sprintf(buf, "%lu", sum);
5037
5038#ifdef CONFIG_SMP
5039 for_each_online_cpu(cpu) {
5040 if (data[cpu] && len < PAGE_SIZE - 20)
5041 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5042 }
5043#endif
5044 kfree(data);
5045 return len + sprintf(buf + len, "\n");
5046}
5047
5048static void clear_stat(struct kmem_cache *s, enum stat_item si)
5049{
5050 int cpu;
5051
5052 for_each_online_cpu(cpu)
5053 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5054}
5055
5056#define STAT_ATTR(si, text) \
5057static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5058{ \
5059 return show_stat(s, buf, si); \
5060} \
5061static ssize_t text##_store(struct kmem_cache *s, \
5062 const char *buf, size_t length) \
5063{ \
5064 if (buf[0] != '0') \
5065 return -EINVAL; \
5066 clear_stat(s, si); \
5067 return length; \
5068} \
5069SLAB_ATTR(text); \
5070
5071STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5072STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5073STAT_ATTR(FREE_FASTPATH, free_fastpath);
5074STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5075STAT_ATTR(FREE_FROZEN, free_frozen);
5076STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5077STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5078STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5079STAT_ATTR(ALLOC_SLAB, alloc_slab);
5080STAT_ATTR(ALLOC_REFILL, alloc_refill);
5081STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5082STAT_ATTR(FREE_SLAB, free_slab);
5083STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5084STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5085STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5086STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5087STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5088STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5089STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5090STAT_ATTR(ORDER_FALLBACK, order_fallback);
5091STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5092STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5093STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5094STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5095STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5096STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5097#endif
5098
5099static struct attribute *slab_attrs[] = {
5100 &slab_size_attr.attr,
5101 &object_size_attr.attr,
5102 &objs_per_slab_attr.attr,
5103 &order_attr.attr,
5104 &min_partial_attr.attr,
5105 &cpu_partial_attr.attr,
5106 &objects_attr.attr,
5107 &objects_partial_attr.attr,
5108 &partial_attr.attr,
5109 &cpu_slabs_attr.attr,
5110 &ctor_attr.attr,
5111 &aliases_attr.attr,
5112 &align_attr.attr,
5113 &hwcache_align_attr.attr,
5114 &reclaim_account_attr.attr,
5115 &destroy_by_rcu_attr.attr,
5116 &shrink_attr.attr,
5117 &reserved_attr.attr,
5118 &slabs_cpu_partial_attr.attr,
5119#ifdef CONFIG_SLUB_DEBUG
5120 &total_objects_attr.attr,
5121 &slabs_attr.attr,
5122 &sanity_checks_attr.attr,
5123 &trace_attr.attr,
5124 &red_zone_attr.attr,
5125 &poison_attr.attr,
5126 &store_user_attr.attr,
5127 &validate_attr.attr,
5128 &alloc_calls_attr.attr,
5129 &free_calls_attr.attr,
5130#endif
5131#ifdef CONFIG_ZONE_DMA
5132 &cache_dma_attr.attr,
5133#endif
5134#ifdef CONFIG_NUMA
5135 &remote_node_defrag_ratio_attr.attr,
5136#endif
5137#ifdef CONFIG_SLUB_STATS
5138 &alloc_fastpath_attr.attr,
5139 &alloc_slowpath_attr.attr,
5140 &free_fastpath_attr.attr,
5141 &free_slowpath_attr.attr,
5142 &free_frozen_attr.attr,
5143 &free_add_partial_attr.attr,
5144 &free_remove_partial_attr.attr,
5145 &alloc_from_partial_attr.attr,
5146 &alloc_slab_attr.attr,
5147 &alloc_refill_attr.attr,
5148 &alloc_node_mismatch_attr.attr,
5149 &free_slab_attr.attr,
5150 &cpuslab_flush_attr.attr,
5151 &deactivate_full_attr.attr,
5152 &deactivate_empty_attr.attr,
5153 &deactivate_to_head_attr.attr,
5154 &deactivate_to_tail_attr.attr,
5155 &deactivate_remote_frees_attr.attr,
5156 &deactivate_bypass_attr.attr,
5157 &order_fallback_attr.attr,
5158 &cmpxchg_double_fail_attr.attr,
5159 &cmpxchg_double_cpu_fail_attr.attr,
5160 &cpu_partial_alloc_attr.attr,
5161 &cpu_partial_free_attr.attr,
5162 &cpu_partial_node_attr.attr,
5163 &cpu_partial_drain_attr.attr,
5164#endif
5165#ifdef CONFIG_FAILSLAB
5166 &failslab_attr.attr,
5167#endif
5168
5169 NULL
5170};
5171
5172static struct attribute_group slab_attr_group = {
5173 .attrs = slab_attrs,
5174};
5175
5176static ssize_t slab_attr_show(struct kobject *kobj,
5177 struct attribute *attr,
5178 char *buf)
5179{
5180 struct slab_attribute *attribute;
5181 struct kmem_cache *s;
5182 int err;
5183
5184 attribute = to_slab_attr(attr);
5185 s = to_slab(kobj);
5186
5187 if (!attribute->show)
5188 return -EIO;
5189
5190 err = attribute->show(s, buf);
5191
5192 return err;
5193}
5194
5195static ssize_t slab_attr_store(struct kobject *kobj,
5196 struct attribute *attr,
5197 const char *buf, size_t len)
5198{
5199 struct slab_attribute *attribute;
5200 struct kmem_cache *s;
5201 int err;
5202
5203 attribute = to_slab_attr(attr);
5204 s = to_slab(kobj);
5205
5206 if (!attribute->store)
5207 return -EIO;
5208
5209 err = attribute->store(s, buf, len);
5210
5211 return err;
5212}
5213
5214static void kmem_cache_release(struct kobject *kobj)
5215{
5216 struct kmem_cache *s = to_slab(kobj);
5217
5218 kfree(s->name);
5219 kfree(s);
5220}
5221
5222static const struct sysfs_ops slab_sysfs_ops = {
5223 .show = slab_attr_show,
5224 .store = slab_attr_store,
5225};
5226
5227static struct kobj_type slab_ktype = {
5228 .sysfs_ops = &slab_sysfs_ops,
5229 .release = kmem_cache_release
5230};
5231
5232static int uevent_filter(struct kset *kset, struct kobject *kobj)
5233{
5234 struct kobj_type *ktype = get_ktype(kobj);
5235
5236 if (ktype == &slab_ktype)
5237 return 1;
5238 return 0;
5239}
5240
5241static const struct kset_uevent_ops slab_uevent_ops = {
5242 .filter = uevent_filter,
5243};
5244
5245static struct kset *slab_kset;
5246
5247#define ID_STR_LENGTH 64
5248
5249/* Create a unique string id for a slab cache:
5250 *
5251 * Format :[flags-]size
5252 */
5253static char *create_unique_id(struct kmem_cache *s)
5254{
5255 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5256 char *p = name;
5257
5258 BUG_ON(!name);
5259
5260 *p++ = ':';
5261 /*
5262 * First flags affecting slabcache operations. We will only
5263 * get here for aliasable slabs so we do not need to support
5264 * too many flags. The flags here must cover all flags that
5265 * are matched during merging to guarantee that the id is
5266 * unique.
5267 */
5268 if (s->flags & SLAB_CACHE_DMA)
5269 *p++ = 'd';
5270 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5271 *p++ = 'a';
5272 if (s->flags & SLAB_DEBUG_FREE)
5273 *p++ = 'F';
5274 if (!(s->flags & SLAB_NOTRACK))
5275 *p++ = 't';
5276 if (p != name + 1)
5277 *p++ = '-';
5278 p += sprintf(p, "%07d", s->size);
5279 BUG_ON(p > name + ID_STR_LENGTH - 1);
5280 return name;
5281}
5282
5283static int sysfs_slab_add(struct kmem_cache *s)
5284{
5285 int err;
5286 const char *name;
5287 int unmergeable;
5288
5289 if (slab_state < SYSFS)
5290 /* Defer until later */
5291 return 0;
5292
5293 unmergeable = slab_unmergeable(s);
5294 if (unmergeable) {
5295 /*
5296 * Slabcache can never be merged so we can use the name proper.
5297 * This is typically the case for debug situations. In that
5298 * case we can catch duplicate names easily.
5299 */
5300 sysfs_remove_link(&slab_kset->kobj, s->name);
5301 name = s->name;
5302 } else {
5303 /*
5304 * Create a unique name for the slab as a target
5305 * for the symlinks.
5306 */
5307 name = create_unique_id(s);
5308 }
5309
5310 s->kobj.kset = slab_kset;
5311 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
5312 if (err) {
5313 kobject_put(&s->kobj);
5314 return err;
5315 }
5316
5317 err = sysfs_create_group(&s->kobj, &slab_attr_group);
5318 if (err) {
5319 kobject_del(&s->kobj);
5320 kobject_put(&s->kobj);
5321 return err;
5322 }
5323 kobject_uevent(&s->kobj, KOBJ_ADD);
5324 if (!unmergeable) {
5325 /* Setup first alias */
5326 sysfs_slab_alias(s, s->name);
5327 kfree(name);
5328 }
5329 return 0;
5330}
5331
5332static void sysfs_slab_remove(struct kmem_cache *s)
5333{
5334 if (slab_state < SYSFS)
5335 /*
5336 * Sysfs has not been setup yet so no need to remove the
5337 * cache from sysfs.
5338 */
5339 return;
5340
5341 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5342 kobject_del(&s->kobj);
5343 kobject_put(&s->kobj);
5344}
5345
5346/*
5347 * Need to buffer aliases during bootup until sysfs becomes
5348 * available lest we lose that information.
5349 */
5350struct saved_alias {
5351 struct kmem_cache *s;
5352 const char *name;
5353 struct saved_alias *next;
5354};
5355
5356static struct saved_alias *alias_list;
5357
5358static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5359{
5360 struct saved_alias *al;
5361
5362 if (slab_state == SYSFS) {
5363 /*
5364 * If we have a leftover link then remove it.
5365 */
5366 sysfs_remove_link(&slab_kset->kobj, name);
5367 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5368 }
5369
5370 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5371 if (!al)
5372 return -ENOMEM;
5373
5374 al->s = s;
5375 al->name = name;
5376 al->next = alias_list;
5377 alias_list = al;
5378 return 0;
5379}
5380
5381static int __init slab_sysfs_init(void)
5382{
5383 struct kmem_cache *s;
5384 int err;
5385
5386 down_write(&slub_lock);
5387
5388 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5389 if (!slab_kset) {
5390 up_write(&slub_lock);
5391 printk(KERN_ERR "Cannot register slab subsystem.\n");
5392 return -ENOSYS;
5393 }
5394
5395 slab_state = SYSFS;
5396
5397 list_for_each_entry(s, &slab_caches, list) {
5398 err = sysfs_slab_add(s);
5399 if (err)
5400 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5401 " to sysfs\n", s->name);
5402 }
5403
5404 while (alias_list) {
5405 struct saved_alias *al = alias_list;
5406
5407 alias_list = alias_list->next;
5408 err = sysfs_slab_alias(al->s, al->name);
5409 if (err)
5410 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
5411 " %s to sysfs\n", s->name);
5412 kfree(al);
5413 }
5414
5415 up_write(&slub_lock);
5416 resiliency_test();
5417 return 0;
5418}
5419
5420__initcall(slab_sysfs_init);
5421#endif /* CONFIG_SYSFS */
5422
5423/*
5424 * The /proc/slabinfo ABI
5425 */
5426#ifdef CONFIG_SLABINFO
5427static void print_slabinfo_header(struct seq_file *m)
5428{
5429 seq_puts(m, "slabinfo - version: 2.1\n");
5430 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
5431 "<objperslab> <pagesperslab>");
5432 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
5433 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
5434 seq_putc(m, '\n');
5435}
5436
5437static void *s_start(struct seq_file *m, loff_t *pos)
5438{
5439 loff_t n = *pos;
5440
5441 down_read(&slub_lock);
5442 if (!n)
5443 print_slabinfo_header(m);
5444
5445 return seq_list_start(&slab_caches, *pos);
5446}
5447
5448static void *s_next(struct seq_file *m, void *p, loff_t *pos)
5449{
5450 return seq_list_next(p, &slab_caches, pos);
5451}
5452
5453static void s_stop(struct seq_file *m, void *p)
5454{
5455 up_read(&slub_lock);
5456}
5457
5458static int s_show(struct seq_file *m, void *p)
5459{
5460 unsigned long nr_partials = 0;
5461 unsigned long nr_slabs = 0;
5462 unsigned long nr_inuse = 0;
5463 unsigned long nr_objs = 0;
5464 unsigned long nr_free = 0;
5465 struct kmem_cache *s;
5466 int node;
5467
5468 s = list_entry(p, struct kmem_cache, list);
5469
5470 for_each_online_node(node) {
5471 struct kmem_cache_node *n = get_node(s, node);
5472
5473 if (!n)
5474 continue;
5475
5476 nr_partials += n->nr_partial;
5477 nr_slabs += atomic_long_read(&n->nr_slabs);
5478 nr_objs += atomic_long_read(&n->total_objects);
5479 nr_free += count_partial(n, count_free);
5480 }
5481
5482 nr_inuse = nr_objs - nr_free;
5483
5484 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
5485 nr_objs, s->size, oo_objects(s->oo),
5486 (1 << oo_order(s->oo)));
5487 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
5488 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
5489 0UL);
5490 seq_putc(m, '\n');
5491 return 0;
5492}
5493
5494static const struct seq_operations slabinfo_op = {
5495 .start = s_start,
5496 .next = s_next,
5497 .stop = s_stop,
5498 .show = s_show,
5499};
5500
5501static int slabinfo_open(struct inode *inode, struct file *file)
5502{
5503 return seq_open(file, &slabinfo_op);
5504}
5505
5506static const struct file_operations proc_slabinfo_operations = {
5507 .open = slabinfo_open,
5508 .read = seq_read,
5509 .llseek = seq_lseek,
5510 .release = seq_release,
5511};
5512
5513static int __init slab_proc_init(void)
5514{
5515 proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
5516 return 0;
5517}
5518module_init(slab_proc_init);
5519#endif /* CONFIG_SLABINFO */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *
9 * (C) 2007 SGI, Christoph Lameter
10 * (C) 2011 Linux Foundation, Christoph Lameter
11 */
12
13#include <linux/mm.h>
14#include <linux/swap.h> /* mm_account_reclaimed_pages() */
15#include <linux/module.h>
16#include <linux/bit_spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/swab.h>
19#include <linux/bitops.h>
20#include <linux/slab.h>
21#include "slab.h"
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
24#include <linux/kasan.h>
25#include <linux/kmsan.h>
26#include <linux/cpu.h>
27#include <linux/cpuset.h>
28#include <linux/mempolicy.h>
29#include <linux/ctype.h>
30#include <linux/stackdepot.h>
31#include <linux/debugobjects.h>
32#include <linux/kallsyms.h>
33#include <linux/kfence.h>
34#include <linux/memory.h>
35#include <linux/math64.h>
36#include <linux/fault-inject.h>
37#include <linux/kmemleak.h>
38#include <linux/stacktrace.h>
39#include <linux/prefetch.h>
40#include <linux/memcontrol.h>
41#include <linux/random.h>
42#include <kunit/test.h>
43#include <kunit/test-bug.h>
44#include <linux/sort.h>
45
46#include <linux/debugfs.h>
47#include <trace/events/kmem.h>
48
49#include "internal.h"
50
51/*
52 * Lock order:
53 * 1. slab_mutex (Global Mutex)
54 * 2. node->list_lock (Spinlock)
55 * 3. kmem_cache->cpu_slab->lock (Local lock)
56 * 4. slab_lock(slab) (Only on some arches)
57 * 5. object_map_lock (Only for debugging)
58 *
59 * slab_mutex
60 *
61 * The role of the slab_mutex is to protect the list of all the slabs
62 * and to synchronize major metadata changes to slab cache structures.
63 * Also synchronizes memory hotplug callbacks.
64 *
65 * slab_lock
66 *
67 * The slab_lock is a wrapper around the page lock, thus it is a bit
68 * spinlock.
69 *
70 * The slab_lock is only used on arches that do not have the ability
71 * to do a cmpxchg_double. It only protects:
72 *
73 * A. slab->freelist -> List of free objects in a slab
74 * B. slab->inuse -> Number of objects in use
75 * C. slab->objects -> Number of objects in slab
76 * D. slab->frozen -> frozen state
77 *
78 * Frozen slabs
79 *
80 * If a slab is frozen then it is exempt from list management. It is
81 * the cpu slab which is actively allocated from by the processor that
82 * froze it and it is not on any list. The processor that froze the
83 * slab is the one who can perform list operations on the slab. Other
84 * processors may put objects onto the freelist but the processor that
85 * froze the slab is the only one that can retrieve the objects from the
86 * slab's freelist.
87 *
88 * CPU partial slabs
89 *
90 * The partially empty slabs cached on the CPU partial list are used
91 * for performance reasons, which speeds up the allocation process.
92 * These slabs are not frozen, but are also exempt from list management,
93 * by clearing the PG_workingset flag when moving out of the node
94 * partial list. Please see __slab_free() for more details.
95 *
96 * To sum up, the current scheme is:
97 * - node partial slab: PG_Workingset && !frozen
98 * - cpu partial slab: !PG_Workingset && !frozen
99 * - cpu slab: !PG_Workingset && frozen
100 * - full slab: !PG_Workingset && !frozen
101 *
102 * list_lock
103 *
104 * The list_lock protects the partial and full list on each node and
105 * the partial slab counter. If taken then no new slabs may be added or
106 * removed from the lists nor make the number of partial slabs be modified.
107 * (Note that the total number of slabs is an atomic value that may be
108 * modified without taking the list lock).
109 *
110 * The list_lock is a centralized lock and thus we avoid taking it as
111 * much as possible. As long as SLUB does not have to handle partial
112 * slabs, operations can continue without any centralized lock. F.e.
113 * allocating a long series of objects that fill up slabs does not require
114 * the list lock.
115 *
116 * For debug caches, all allocations are forced to go through a list_lock
117 * protected region to serialize against concurrent validation.
118 *
119 * cpu_slab->lock local lock
120 *
121 * This locks protect slowpath manipulation of all kmem_cache_cpu fields
122 * except the stat counters. This is a percpu structure manipulated only by
123 * the local cpu, so the lock protects against being preempted or interrupted
124 * by an irq. Fast path operations rely on lockless operations instead.
125 *
126 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption
127 * which means the lockless fastpath cannot be used as it might interfere with
128 * an in-progress slow path operations. In this case the local lock is always
129 * taken but it still utilizes the freelist for the common operations.
130 *
131 * lockless fastpaths
132 *
133 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
134 * are fully lockless when satisfied from the percpu slab (and when
135 * cmpxchg_double is possible to use, otherwise slab_lock is taken).
136 * They also don't disable preemption or migration or irqs. They rely on
137 * the transaction id (tid) field to detect being preempted or moved to
138 * another cpu.
139 *
140 * irq, preemption, migration considerations
141 *
142 * Interrupts are disabled as part of list_lock or local_lock operations, or
143 * around the slab_lock operation, in order to make the slab allocator safe
144 * to use in the context of an irq.
145 *
146 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the
147 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
148 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
149 * doesn't have to be revalidated in each section protected by the local lock.
150 *
151 * SLUB assigns one slab for allocation to each processor.
152 * Allocations only occur from these slabs called cpu slabs.
153 *
154 * Slabs with free elements are kept on a partial list and during regular
155 * operations no list for full slabs is used. If an object in a full slab is
156 * freed then the slab will show up again on the partial lists.
157 * We track full slabs for debugging purposes though because otherwise we
158 * cannot scan all objects.
159 *
160 * Slabs are freed when they become empty. Teardown and setup is
161 * minimal so we rely on the page allocators per cpu caches for
162 * fast frees and allocs.
163 *
164 * slab->frozen The slab is frozen and exempt from list processing.
165 * This means that the slab is dedicated to a purpose
166 * such as satisfying allocations for a specific
167 * processor. Objects may be freed in the slab while
168 * it is frozen but slab_free will then skip the usual
169 * list operations. It is up to the processor holding
170 * the slab to integrate the slab into the slab lists
171 * when the slab is no longer needed.
172 *
173 * One use of this flag is to mark slabs that are
174 * used for allocations. Then such a slab becomes a cpu
175 * slab. The cpu slab may be equipped with an additional
176 * freelist that allows lockless access to
177 * free objects in addition to the regular freelist
178 * that requires the slab lock.
179 *
180 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
181 * options set. This moves slab handling out of
182 * the fast path and disables lockless freelists.
183 */
184
185/*
186 * We could simply use migrate_disable()/enable() but as long as it's a
187 * function call even on !PREEMPT_RT, use inline preempt_disable() there.
188 */
189#ifndef CONFIG_PREEMPT_RT
190#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
191#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
192#define USE_LOCKLESS_FAST_PATH() (true)
193#else
194#define slub_get_cpu_ptr(var) \
195({ \
196 migrate_disable(); \
197 this_cpu_ptr(var); \
198})
199#define slub_put_cpu_ptr(var) \
200do { \
201 (void)(var); \
202 migrate_enable(); \
203} while (0)
204#define USE_LOCKLESS_FAST_PATH() (false)
205#endif
206
207#ifndef CONFIG_SLUB_TINY
208#define __fastpath_inline __always_inline
209#else
210#define __fastpath_inline
211#endif
212
213#ifdef CONFIG_SLUB_DEBUG
214#ifdef CONFIG_SLUB_DEBUG_ON
215DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
216#else
217DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
218#endif
219#endif /* CONFIG_SLUB_DEBUG */
220
221#ifdef CONFIG_NUMA
222static DEFINE_STATIC_KEY_FALSE(strict_numa);
223#endif
224
225/* Structure holding parameters for get_partial() call chain */
226struct partial_context {
227 gfp_t flags;
228 unsigned int orig_size;
229 void *object;
230};
231
232static inline bool kmem_cache_debug(struct kmem_cache *s)
233{
234 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
235}
236
237void *fixup_red_left(struct kmem_cache *s, void *p)
238{
239 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
240 p += s->red_left_pad;
241
242 return p;
243}
244
245static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
246{
247#ifdef CONFIG_SLUB_CPU_PARTIAL
248 return !kmem_cache_debug(s);
249#else
250 return false;
251#endif
252}
253
254/*
255 * Issues still to be resolved:
256 *
257 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
258 *
259 * - Variable sizing of the per node arrays
260 */
261
262/* Enable to log cmpxchg failures */
263#undef SLUB_DEBUG_CMPXCHG
264
265#ifndef CONFIG_SLUB_TINY
266/*
267 * Minimum number of partial slabs. These will be left on the partial
268 * lists even if they are empty. kmem_cache_shrink may reclaim them.
269 */
270#define MIN_PARTIAL 5
271
272/*
273 * Maximum number of desirable partial slabs.
274 * The existence of more partial slabs makes kmem_cache_shrink
275 * sort the partial list by the number of objects in use.
276 */
277#define MAX_PARTIAL 10
278#else
279#define MIN_PARTIAL 0
280#define MAX_PARTIAL 0
281#endif
282
283#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
284 SLAB_POISON | SLAB_STORE_USER)
285
286/*
287 * These debug flags cannot use CMPXCHG because there might be consistency
288 * issues when checking or reading debug information
289 */
290#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
291 SLAB_TRACE)
292
293
294/*
295 * Debugging flags that require metadata to be stored in the slab. These get
296 * disabled when slab_debug=O is used and a cache's min order increases with
297 * metadata.
298 */
299#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
300
301#define OO_SHIFT 16
302#define OO_MASK ((1 << OO_SHIFT) - 1)
303#define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
304
305/* Internal SLUB flags */
306/* Poison object */
307#define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
308/* Use cmpxchg_double */
309
310#ifdef system_has_freelist_aba
311#define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
312#else
313#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
314#endif
315
316/*
317 * Tracking user of a slab.
318 */
319#define TRACK_ADDRS_COUNT 16
320struct track {
321 unsigned long addr; /* Called from address */
322#ifdef CONFIG_STACKDEPOT
323 depot_stack_handle_t handle;
324#endif
325 int cpu; /* Was running on cpu */
326 int pid; /* Pid context */
327 unsigned long when; /* When did the operation occur */
328};
329
330enum track_item { TRACK_ALLOC, TRACK_FREE };
331
332#ifdef SLAB_SUPPORTS_SYSFS
333static int sysfs_slab_add(struct kmem_cache *);
334static int sysfs_slab_alias(struct kmem_cache *, const char *);
335#else
336static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
337static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
338 { return 0; }
339#endif
340
341#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
342static void debugfs_slab_add(struct kmem_cache *);
343#else
344static inline void debugfs_slab_add(struct kmem_cache *s) { }
345#endif
346
347enum stat_item {
348 ALLOC_FASTPATH, /* Allocation from cpu slab */
349 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
350 FREE_FASTPATH, /* Free to cpu slab */
351 FREE_SLOWPATH, /* Freeing not to cpu slab */
352 FREE_FROZEN, /* Freeing to frozen slab */
353 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
354 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
355 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
356 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
357 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
358 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
359 FREE_SLAB, /* Slab freed to the page allocator */
360 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
361 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
362 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
363 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
364 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
365 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
366 DEACTIVATE_BYPASS, /* Implicit deactivation */
367 ORDER_FALLBACK, /* Number of times fallback was necessary */
368 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
369 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */
370 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
371 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
372 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
373 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
374 NR_SLUB_STAT_ITEMS
375};
376
377#ifndef CONFIG_SLUB_TINY
378/*
379 * When changing the layout, make sure freelist and tid are still compatible
380 * with this_cpu_cmpxchg_double() alignment requirements.
381 */
382struct kmem_cache_cpu {
383 union {
384 struct {
385 void **freelist; /* Pointer to next available object */
386 unsigned long tid; /* Globally unique transaction id */
387 };
388 freelist_aba_t freelist_tid;
389 };
390 struct slab *slab; /* The slab from which we are allocating */
391#ifdef CONFIG_SLUB_CPU_PARTIAL
392 struct slab *partial; /* Partially allocated slabs */
393#endif
394 local_lock_t lock; /* Protects the fields above */
395#ifdef CONFIG_SLUB_STATS
396 unsigned int stat[NR_SLUB_STAT_ITEMS];
397#endif
398};
399#endif /* CONFIG_SLUB_TINY */
400
401static inline void stat(const struct kmem_cache *s, enum stat_item si)
402{
403#ifdef CONFIG_SLUB_STATS
404 /*
405 * The rmw is racy on a preemptible kernel but this is acceptable, so
406 * avoid this_cpu_add()'s irq-disable overhead.
407 */
408 raw_cpu_inc(s->cpu_slab->stat[si]);
409#endif
410}
411
412static inline
413void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
414{
415#ifdef CONFIG_SLUB_STATS
416 raw_cpu_add(s->cpu_slab->stat[si], v);
417#endif
418}
419
420/*
421 * The slab lists for all objects.
422 */
423struct kmem_cache_node {
424 spinlock_t list_lock;
425 unsigned long nr_partial;
426 struct list_head partial;
427#ifdef CONFIG_SLUB_DEBUG
428 atomic_long_t nr_slabs;
429 atomic_long_t total_objects;
430 struct list_head full;
431#endif
432};
433
434static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
435{
436 return s->node[node];
437}
438
439/*
440 * Iterator over all nodes. The body will be executed for each node that has
441 * a kmem_cache_node structure allocated (which is true for all online nodes)
442 */
443#define for_each_kmem_cache_node(__s, __node, __n) \
444 for (__node = 0; __node < nr_node_ids; __node++) \
445 if ((__n = get_node(__s, __node)))
446
447/*
448 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
449 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
450 * differ during memory hotplug/hotremove operations.
451 * Protected by slab_mutex.
452 */
453static nodemask_t slab_nodes;
454
455#ifndef CONFIG_SLUB_TINY
456/*
457 * Workqueue used for flush_cpu_slab().
458 */
459static struct workqueue_struct *flushwq;
460#endif
461
462/********************************************************************
463 * Core slab cache functions
464 *******************************************************************/
465
466/*
467 * Returns freelist pointer (ptr). With hardening, this is obfuscated
468 * with an XOR of the address where the pointer is held and a per-cache
469 * random number.
470 */
471static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
472 void *ptr, unsigned long ptr_addr)
473{
474 unsigned long encoded;
475
476#ifdef CONFIG_SLAB_FREELIST_HARDENED
477 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
478#else
479 encoded = (unsigned long)ptr;
480#endif
481 return (freeptr_t){.v = encoded};
482}
483
484static inline void *freelist_ptr_decode(const struct kmem_cache *s,
485 freeptr_t ptr, unsigned long ptr_addr)
486{
487 void *decoded;
488
489#ifdef CONFIG_SLAB_FREELIST_HARDENED
490 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
491#else
492 decoded = (void *)ptr.v;
493#endif
494 return decoded;
495}
496
497static inline void *get_freepointer(struct kmem_cache *s, void *object)
498{
499 unsigned long ptr_addr;
500 freeptr_t p;
501
502 object = kasan_reset_tag(object);
503 ptr_addr = (unsigned long)object + s->offset;
504 p = *(freeptr_t *)(ptr_addr);
505 return freelist_ptr_decode(s, p, ptr_addr);
506}
507
508#ifndef CONFIG_SLUB_TINY
509static void prefetch_freepointer(const struct kmem_cache *s, void *object)
510{
511 prefetchw(object + s->offset);
512}
513#endif
514
515/*
516 * When running under KMSAN, get_freepointer_safe() may return an uninitialized
517 * pointer value in the case the current thread loses the race for the next
518 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
519 * slab_alloc_node() will fail, so the uninitialized value won't be used, but
520 * KMSAN will still check all arguments of cmpxchg because of imperfect
521 * handling of inline assembly.
522 * To work around this problem, we apply __no_kmsan_checks to ensure that
523 * get_freepointer_safe() returns initialized memory.
524 */
525__no_kmsan_checks
526static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
527{
528 unsigned long freepointer_addr;
529 freeptr_t p;
530
531 if (!debug_pagealloc_enabled_static())
532 return get_freepointer(s, object);
533
534 object = kasan_reset_tag(object);
535 freepointer_addr = (unsigned long)object + s->offset;
536 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p));
537 return freelist_ptr_decode(s, p, freepointer_addr);
538}
539
540static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
541{
542 unsigned long freeptr_addr = (unsigned long)object + s->offset;
543
544#ifdef CONFIG_SLAB_FREELIST_HARDENED
545 BUG_ON(object == fp); /* naive detection of double free or corruption */
546#endif
547
548 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
549 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
550}
551
552/*
553 * See comment in calculate_sizes().
554 */
555static inline bool freeptr_outside_object(struct kmem_cache *s)
556{
557 return s->offset >= s->inuse;
558}
559
560/*
561 * Return offset of the end of info block which is inuse + free pointer if
562 * not overlapping with object.
563 */
564static inline unsigned int get_info_end(struct kmem_cache *s)
565{
566 if (freeptr_outside_object(s))
567 return s->inuse + sizeof(void *);
568 else
569 return s->inuse;
570}
571
572/* Loop over all objects in a slab */
573#define for_each_object(__p, __s, __addr, __objects) \
574 for (__p = fixup_red_left(__s, __addr); \
575 __p < (__addr) + (__objects) * (__s)->size; \
576 __p += (__s)->size)
577
578static inline unsigned int order_objects(unsigned int order, unsigned int size)
579{
580 return ((unsigned int)PAGE_SIZE << order) / size;
581}
582
583static inline struct kmem_cache_order_objects oo_make(unsigned int order,
584 unsigned int size)
585{
586 struct kmem_cache_order_objects x = {
587 (order << OO_SHIFT) + order_objects(order, size)
588 };
589
590 return x;
591}
592
593static inline unsigned int oo_order(struct kmem_cache_order_objects x)
594{
595 return x.x >> OO_SHIFT;
596}
597
598static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
599{
600 return x.x & OO_MASK;
601}
602
603#ifdef CONFIG_SLUB_CPU_PARTIAL
604static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
605{
606 unsigned int nr_slabs;
607
608 s->cpu_partial = nr_objects;
609
610 /*
611 * We take the number of objects but actually limit the number of
612 * slabs on the per cpu partial list, in order to limit excessive
613 * growth of the list. For simplicity we assume that the slabs will
614 * be half-full.
615 */
616 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
617 s->cpu_partial_slabs = nr_slabs;
618}
619
620static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
621{
622 return s->cpu_partial_slabs;
623}
624#else
625static inline void
626slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
627{
628}
629
630static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
631{
632 return 0;
633}
634#endif /* CONFIG_SLUB_CPU_PARTIAL */
635
636/*
637 * Per slab locking using the pagelock
638 */
639static __always_inline void slab_lock(struct slab *slab)
640{
641 bit_spin_lock(PG_locked, &slab->__page_flags);
642}
643
644static __always_inline void slab_unlock(struct slab *slab)
645{
646 bit_spin_unlock(PG_locked, &slab->__page_flags);
647}
648
649static inline bool
650__update_freelist_fast(struct slab *slab,
651 void *freelist_old, unsigned long counters_old,
652 void *freelist_new, unsigned long counters_new)
653{
654#ifdef system_has_freelist_aba
655 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
656 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
657
658 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
659#else
660 return false;
661#endif
662}
663
664static inline bool
665__update_freelist_slow(struct slab *slab,
666 void *freelist_old, unsigned long counters_old,
667 void *freelist_new, unsigned long counters_new)
668{
669 bool ret = false;
670
671 slab_lock(slab);
672 if (slab->freelist == freelist_old &&
673 slab->counters == counters_old) {
674 slab->freelist = freelist_new;
675 slab->counters = counters_new;
676 ret = true;
677 }
678 slab_unlock(slab);
679
680 return ret;
681}
682
683/*
684 * Interrupts must be disabled (for the fallback code to work right), typically
685 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
686 * part of bit_spin_lock(), is sufficient because the policy is not to allow any
687 * allocation/ free operation in hardirq context. Therefore nothing can
688 * interrupt the operation.
689 */
690static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
691 void *freelist_old, unsigned long counters_old,
692 void *freelist_new, unsigned long counters_new,
693 const char *n)
694{
695 bool ret;
696
697 if (USE_LOCKLESS_FAST_PATH())
698 lockdep_assert_irqs_disabled();
699
700 if (s->flags & __CMPXCHG_DOUBLE) {
701 ret = __update_freelist_fast(slab, freelist_old, counters_old,
702 freelist_new, counters_new);
703 } else {
704 ret = __update_freelist_slow(slab, freelist_old, counters_old,
705 freelist_new, counters_new);
706 }
707 if (likely(ret))
708 return true;
709
710 cpu_relax();
711 stat(s, CMPXCHG_DOUBLE_FAIL);
712
713#ifdef SLUB_DEBUG_CMPXCHG
714 pr_info("%s %s: cmpxchg double redo ", n, s->name);
715#endif
716
717 return false;
718}
719
720static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
721 void *freelist_old, unsigned long counters_old,
722 void *freelist_new, unsigned long counters_new,
723 const char *n)
724{
725 bool ret;
726
727 if (s->flags & __CMPXCHG_DOUBLE) {
728 ret = __update_freelist_fast(slab, freelist_old, counters_old,
729 freelist_new, counters_new);
730 } else {
731 unsigned long flags;
732
733 local_irq_save(flags);
734 ret = __update_freelist_slow(slab, freelist_old, counters_old,
735 freelist_new, counters_new);
736 local_irq_restore(flags);
737 }
738 if (likely(ret))
739 return true;
740
741 cpu_relax();
742 stat(s, CMPXCHG_DOUBLE_FAIL);
743
744#ifdef SLUB_DEBUG_CMPXCHG
745 pr_info("%s %s: cmpxchg double redo ", n, s->name);
746#endif
747
748 return false;
749}
750
751/*
752 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
753 * family will round up the real request size to these fixed ones, so
754 * there could be an extra area than what is requested. Save the original
755 * request size in the meta data area, for better debug and sanity check.
756 */
757static inline void set_orig_size(struct kmem_cache *s,
758 void *object, unsigned int orig_size)
759{
760 void *p = kasan_reset_tag(object);
761
762 if (!slub_debug_orig_size(s))
763 return;
764
765 p += get_info_end(s);
766 p += sizeof(struct track) * 2;
767
768 *(unsigned int *)p = orig_size;
769}
770
771static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
772{
773 void *p = kasan_reset_tag(object);
774
775 if (is_kfence_address(object))
776 return kfence_ksize(object);
777
778 if (!slub_debug_orig_size(s))
779 return s->object_size;
780
781 p += get_info_end(s);
782 p += sizeof(struct track) * 2;
783
784 return *(unsigned int *)p;
785}
786
787#ifdef CONFIG_SLUB_DEBUG
788static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
789static DEFINE_SPINLOCK(object_map_lock);
790
791static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
792 struct slab *slab)
793{
794 void *addr = slab_address(slab);
795 void *p;
796
797 bitmap_zero(obj_map, slab->objects);
798
799 for (p = slab->freelist; p; p = get_freepointer(s, p))
800 set_bit(__obj_to_index(s, addr, p), obj_map);
801}
802
803#if IS_ENABLED(CONFIG_KUNIT)
804static bool slab_add_kunit_errors(void)
805{
806 struct kunit_resource *resource;
807
808 if (!kunit_get_current_test())
809 return false;
810
811 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
812 if (!resource)
813 return false;
814
815 (*(int *)resource->data)++;
816 kunit_put_resource(resource);
817 return true;
818}
819
820bool slab_in_kunit_test(void)
821{
822 struct kunit_resource *resource;
823
824 if (!kunit_get_current_test())
825 return false;
826
827 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
828 if (!resource)
829 return false;
830
831 kunit_put_resource(resource);
832 return true;
833}
834#else
835static inline bool slab_add_kunit_errors(void) { return false; }
836#endif
837
838static inline unsigned int size_from_object(struct kmem_cache *s)
839{
840 if (s->flags & SLAB_RED_ZONE)
841 return s->size - s->red_left_pad;
842
843 return s->size;
844}
845
846static inline void *restore_red_left(struct kmem_cache *s, void *p)
847{
848 if (s->flags & SLAB_RED_ZONE)
849 p -= s->red_left_pad;
850
851 return p;
852}
853
854/*
855 * Debug settings:
856 */
857#if defined(CONFIG_SLUB_DEBUG_ON)
858static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
859#else
860static slab_flags_t slub_debug;
861#endif
862
863static char *slub_debug_string;
864static int disable_higher_order_debug;
865
866/*
867 * slub is about to manipulate internal object metadata. This memory lies
868 * outside the range of the allocated object, so accessing it would normally
869 * be reported by kasan as a bounds error. metadata_access_enable() is used
870 * to tell kasan that these accesses are OK.
871 */
872static inline void metadata_access_enable(void)
873{
874 kasan_disable_current();
875 kmsan_disable_current();
876}
877
878static inline void metadata_access_disable(void)
879{
880 kmsan_enable_current();
881 kasan_enable_current();
882}
883
884/*
885 * Object debugging
886 */
887
888/* Verify that a pointer has an address that is valid within a slab page */
889static inline int check_valid_pointer(struct kmem_cache *s,
890 struct slab *slab, void *object)
891{
892 void *base;
893
894 if (!object)
895 return 1;
896
897 base = slab_address(slab);
898 object = kasan_reset_tag(object);
899 object = restore_red_left(s, object);
900 if (object < base || object >= base + slab->objects * s->size ||
901 (object - base) % s->size) {
902 return 0;
903 }
904
905 return 1;
906}
907
908static void print_section(char *level, char *text, u8 *addr,
909 unsigned int length)
910{
911 metadata_access_enable();
912 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
913 16, 1, kasan_reset_tag((void *)addr), length, 1);
914 metadata_access_disable();
915}
916
917static struct track *get_track(struct kmem_cache *s, void *object,
918 enum track_item alloc)
919{
920 struct track *p;
921
922 p = object + get_info_end(s);
923
924 return kasan_reset_tag(p + alloc);
925}
926
927#ifdef CONFIG_STACKDEPOT
928static noinline depot_stack_handle_t set_track_prepare(void)
929{
930 depot_stack_handle_t handle;
931 unsigned long entries[TRACK_ADDRS_COUNT];
932 unsigned int nr_entries;
933
934 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
935 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
936
937 return handle;
938}
939#else
940static inline depot_stack_handle_t set_track_prepare(void)
941{
942 return 0;
943}
944#endif
945
946static void set_track_update(struct kmem_cache *s, void *object,
947 enum track_item alloc, unsigned long addr,
948 depot_stack_handle_t handle)
949{
950 struct track *p = get_track(s, object, alloc);
951
952#ifdef CONFIG_STACKDEPOT
953 p->handle = handle;
954#endif
955 p->addr = addr;
956 p->cpu = smp_processor_id();
957 p->pid = current->pid;
958 p->when = jiffies;
959}
960
961static __always_inline void set_track(struct kmem_cache *s, void *object,
962 enum track_item alloc, unsigned long addr)
963{
964 depot_stack_handle_t handle = set_track_prepare();
965
966 set_track_update(s, object, alloc, addr, handle);
967}
968
969static void init_tracking(struct kmem_cache *s, void *object)
970{
971 struct track *p;
972
973 if (!(s->flags & SLAB_STORE_USER))
974 return;
975
976 p = get_track(s, object, TRACK_ALLOC);
977 memset(p, 0, 2*sizeof(struct track));
978}
979
980static void print_track(const char *s, struct track *t, unsigned long pr_time)
981{
982 depot_stack_handle_t handle __maybe_unused;
983
984 if (!t->addr)
985 return;
986
987 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
988 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
989#ifdef CONFIG_STACKDEPOT
990 handle = READ_ONCE(t->handle);
991 if (handle)
992 stack_depot_print(handle);
993 else
994 pr_err("object allocation/free stack trace missing\n");
995#endif
996}
997
998void print_tracking(struct kmem_cache *s, void *object)
999{
1000 unsigned long pr_time = jiffies;
1001 if (!(s->flags & SLAB_STORE_USER))
1002 return;
1003
1004 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
1005 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
1006}
1007
1008static void print_slab_info(const struct slab *slab)
1009{
1010 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
1011 slab, slab->objects, slab->inuse, slab->freelist,
1012 &slab->__page_flags);
1013}
1014
1015void skip_orig_size_check(struct kmem_cache *s, const void *object)
1016{
1017 set_orig_size(s, (void *)object, s->object_size);
1018}
1019
1020static void slab_bug(struct kmem_cache *s, char *fmt, ...)
1021{
1022 struct va_format vaf;
1023 va_list args;
1024
1025 va_start(args, fmt);
1026 vaf.fmt = fmt;
1027 vaf.va = &args;
1028 pr_err("=============================================================================\n");
1029 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
1030 pr_err("-----------------------------------------------------------------------------\n\n");
1031 va_end(args);
1032}
1033
1034__printf(2, 3)
1035static void slab_fix(struct kmem_cache *s, char *fmt, ...)
1036{
1037 struct va_format vaf;
1038 va_list args;
1039
1040 if (slab_add_kunit_errors())
1041 return;
1042
1043 va_start(args, fmt);
1044 vaf.fmt = fmt;
1045 vaf.va = &args;
1046 pr_err("FIX %s: %pV\n", s->name, &vaf);
1047 va_end(args);
1048}
1049
1050static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1051{
1052 unsigned int off; /* Offset of last byte */
1053 u8 *addr = slab_address(slab);
1054
1055 print_tracking(s, p);
1056
1057 print_slab_info(slab);
1058
1059 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1060 p, p - addr, get_freepointer(s, p));
1061
1062 if (s->flags & SLAB_RED_ZONE)
1063 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
1064 s->red_left_pad);
1065 else if (p > addr + 16)
1066 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1067
1068 print_section(KERN_ERR, "Object ", p,
1069 min_t(unsigned int, s->object_size, PAGE_SIZE));
1070 if (s->flags & SLAB_RED_ZONE)
1071 print_section(KERN_ERR, "Redzone ", p + s->object_size,
1072 s->inuse - s->object_size);
1073
1074 off = get_info_end(s);
1075
1076 if (s->flags & SLAB_STORE_USER)
1077 off += 2 * sizeof(struct track);
1078
1079 if (slub_debug_orig_size(s))
1080 off += sizeof(unsigned int);
1081
1082 off += kasan_metadata_size(s, false);
1083
1084 if (off != size_from_object(s))
1085 /* Beginning of the filler is the free pointer */
1086 print_section(KERN_ERR, "Padding ", p + off,
1087 size_from_object(s) - off);
1088
1089 dump_stack();
1090}
1091
1092static void object_err(struct kmem_cache *s, struct slab *slab,
1093 u8 *object, char *reason)
1094{
1095 if (slab_add_kunit_errors())
1096 return;
1097
1098 slab_bug(s, "%s", reason);
1099 print_trailer(s, slab, object);
1100 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1101}
1102
1103static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1104 void **freelist, void *nextfree)
1105{
1106 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
1107 !check_valid_pointer(s, slab, nextfree) && freelist) {
1108 object_err(s, slab, *freelist, "Freechain corrupt");
1109 *freelist = NULL;
1110 slab_fix(s, "Isolate corrupted freechain");
1111 return true;
1112 }
1113
1114 return false;
1115}
1116
1117static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1118 const char *fmt, ...)
1119{
1120 va_list args;
1121 char buf[100];
1122
1123 if (slab_add_kunit_errors())
1124 return;
1125
1126 va_start(args, fmt);
1127 vsnprintf(buf, sizeof(buf), fmt, args);
1128 va_end(args);
1129 slab_bug(s, "%s", buf);
1130 print_slab_info(slab);
1131 dump_stack();
1132 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1133}
1134
1135static void init_object(struct kmem_cache *s, void *object, u8 val)
1136{
1137 u8 *p = kasan_reset_tag(object);
1138 unsigned int poison_size = s->object_size;
1139
1140 if (s->flags & SLAB_RED_ZONE) {
1141 /*
1142 * Here and below, avoid overwriting the KMSAN shadow. Keeping
1143 * the shadow makes it possible to distinguish uninit-value
1144 * from use-after-free.
1145 */
1146 memset_no_sanitize_memory(p - s->red_left_pad, val,
1147 s->red_left_pad);
1148
1149 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1150 /*
1151 * Redzone the extra allocated space by kmalloc than
1152 * requested, and the poison size will be limited to
1153 * the original request size accordingly.
1154 */
1155 poison_size = get_orig_size(s, object);
1156 }
1157 }
1158
1159 if (s->flags & __OBJECT_POISON) {
1160 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
1161 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
1162 }
1163
1164 if (s->flags & SLAB_RED_ZONE)
1165 memset_no_sanitize_memory(p + poison_size, val,
1166 s->inuse - poison_size);
1167}
1168
1169static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
1170 void *from, void *to)
1171{
1172 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1173 memset(from, data, to - from);
1174}
1175
1176#ifdef CONFIG_KMSAN
1177#define pad_check_attributes noinline __no_kmsan_checks
1178#else
1179#define pad_check_attributes
1180#endif
1181
1182static pad_check_attributes int
1183check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1184 u8 *object, char *what,
1185 u8 *start, unsigned int value, unsigned int bytes)
1186{
1187 u8 *fault;
1188 u8 *end;
1189 u8 *addr = slab_address(slab);
1190
1191 metadata_access_enable();
1192 fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1193 metadata_access_disable();
1194 if (!fault)
1195 return 1;
1196
1197 end = start + bytes;
1198 while (end > fault && end[-1] == value)
1199 end--;
1200
1201 if (slab_add_kunit_errors())
1202 goto skip_bug_print;
1203
1204 slab_bug(s, "%s overwritten", what);
1205 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1206 fault, end - 1, fault - addr,
1207 fault[0], value);
1208
1209skip_bug_print:
1210 restore_bytes(s, what, value, fault, end);
1211 return 0;
1212}
1213
1214/*
1215 * Object layout:
1216 *
1217 * object address
1218 * Bytes of the object to be managed.
1219 * If the freepointer may overlay the object then the free
1220 * pointer is at the middle of the object.
1221 *
1222 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
1223 * 0xa5 (POISON_END)
1224 *
1225 * object + s->object_size
1226 * Padding to reach word boundary. This is also used for Redzoning.
1227 * Padding is extended by another word if Redzoning is enabled and
1228 * object_size == inuse.
1229 *
1230 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with
1231 * 0xcc (SLUB_RED_ACTIVE) for objects in use.
1232 *
1233 * object + s->inuse
1234 * Meta data starts here.
1235 *
1236 * A. Free pointer (if we cannot overwrite object on free)
1237 * B. Tracking data for SLAB_STORE_USER
1238 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1239 * D. Padding to reach required alignment boundary or at minimum
1240 * one word if debugging is on to be able to detect writes
1241 * before the word boundary.
1242 *
1243 * Padding is done using 0x5a (POISON_INUSE)
1244 *
1245 * object + s->size
1246 * Nothing is used beyond s->size.
1247 *
1248 * If slabcaches are merged then the object_size and inuse boundaries are mostly
1249 * ignored. And therefore no slab options that rely on these boundaries
1250 * may be used with merged slabcaches.
1251 */
1252
1253static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1254{
1255 unsigned long off = get_info_end(s); /* The end of info */
1256
1257 if (s->flags & SLAB_STORE_USER) {
1258 /* We also have user information there */
1259 off += 2 * sizeof(struct track);
1260
1261 if (s->flags & SLAB_KMALLOC)
1262 off += sizeof(unsigned int);
1263 }
1264
1265 off += kasan_metadata_size(s, false);
1266
1267 if (size_from_object(s) == off)
1268 return 1;
1269
1270 return check_bytes_and_report(s, slab, p, "Object padding",
1271 p + off, POISON_INUSE, size_from_object(s) - off);
1272}
1273
1274/* Check the pad bytes at the end of a slab page */
1275static pad_check_attributes void
1276slab_pad_check(struct kmem_cache *s, struct slab *slab)
1277{
1278 u8 *start;
1279 u8 *fault;
1280 u8 *end;
1281 u8 *pad;
1282 int length;
1283 int remainder;
1284
1285 if (!(s->flags & SLAB_POISON))
1286 return;
1287
1288 start = slab_address(slab);
1289 length = slab_size(slab);
1290 end = start + length;
1291 remainder = length % s->size;
1292 if (!remainder)
1293 return;
1294
1295 pad = end - remainder;
1296 metadata_access_enable();
1297 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1298 metadata_access_disable();
1299 if (!fault)
1300 return;
1301 while (end > fault && end[-1] == POISON_INUSE)
1302 end--;
1303
1304 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1305 fault, end - 1, fault - start);
1306 print_section(KERN_ERR, "Padding ", pad, remainder);
1307
1308 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1309}
1310
1311static int check_object(struct kmem_cache *s, struct slab *slab,
1312 void *object, u8 val)
1313{
1314 u8 *p = object;
1315 u8 *endobject = object + s->object_size;
1316 unsigned int orig_size, kasan_meta_size;
1317 int ret = 1;
1318
1319 if (s->flags & SLAB_RED_ZONE) {
1320 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1321 object - s->red_left_pad, val, s->red_left_pad))
1322 ret = 0;
1323
1324 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1325 endobject, val, s->inuse - s->object_size))
1326 ret = 0;
1327
1328 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1329 orig_size = get_orig_size(s, object);
1330
1331 if (s->object_size > orig_size &&
1332 !check_bytes_and_report(s, slab, object,
1333 "kmalloc Redzone", p + orig_size,
1334 val, s->object_size - orig_size)) {
1335 ret = 0;
1336 }
1337 }
1338 } else {
1339 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1340 if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1341 endobject, POISON_INUSE,
1342 s->inuse - s->object_size))
1343 ret = 0;
1344 }
1345 }
1346
1347 if (s->flags & SLAB_POISON) {
1348 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1349 /*
1350 * KASAN can save its free meta data inside of the
1351 * object at offset 0. Thus, skip checking the part of
1352 * the redzone that overlaps with the meta data.
1353 */
1354 kasan_meta_size = kasan_metadata_size(s, true);
1355 if (kasan_meta_size < s->object_size - 1 &&
1356 !check_bytes_and_report(s, slab, p, "Poison",
1357 p + kasan_meta_size, POISON_FREE,
1358 s->object_size - kasan_meta_size - 1))
1359 ret = 0;
1360 if (kasan_meta_size < s->object_size &&
1361 !check_bytes_and_report(s, slab, p, "End Poison",
1362 p + s->object_size - 1, POISON_END, 1))
1363 ret = 0;
1364 }
1365 /*
1366 * check_pad_bytes cleans up on its own.
1367 */
1368 if (!check_pad_bytes(s, slab, p))
1369 ret = 0;
1370 }
1371
1372 /*
1373 * Cannot check freepointer while object is allocated if
1374 * object and freepointer overlap.
1375 */
1376 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1377 !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1378 object_err(s, slab, p, "Freepointer corrupt");
1379 /*
1380 * No choice but to zap it and thus lose the remainder
1381 * of the free objects in this slab. May cause
1382 * another error because the object count is now wrong.
1383 */
1384 set_freepointer(s, p, NULL);
1385 ret = 0;
1386 }
1387
1388 if (!ret && !slab_in_kunit_test()) {
1389 print_trailer(s, slab, object);
1390 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1391 }
1392
1393 return ret;
1394}
1395
1396static int check_slab(struct kmem_cache *s, struct slab *slab)
1397{
1398 int maxobj;
1399
1400 if (!folio_test_slab(slab_folio(slab))) {
1401 slab_err(s, slab, "Not a valid slab page");
1402 return 0;
1403 }
1404
1405 maxobj = order_objects(slab_order(slab), s->size);
1406 if (slab->objects > maxobj) {
1407 slab_err(s, slab, "objects %u > max %u",
1408 slab->objects, maxobj);
1409 return 0;
1410 }
1411 if (slab->inuse > slab->objects) {
1412 slab_err(s, slab, "inuse %u > max %u",
1413 slab->inuse, slab->objects);
1414 return 0;
1415 }
1416 if (slab->frozen) {
1417 slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
1418 return 0;
1419 }
1420
1421 /* Slab_pad_check fixes things up after itself */
1422 slab_pad_check(s, slab);
1423 return 1;
1424}
1425
1426/*
1427 * Determine if a certain object in a slab is on the freelist. Must hold the
1428 * slab lock to guarantee that the chains are in a consistent state.
1429 */
1430static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1431{
1432 int nr = 0;
1433 void *fp;
1434 void *object = NULL;
1435 int max_objects;
1436
1437 fp = slab->freelist;
1438 while (fp && nr <= slab->objects) {
1439 if (fp == search)
1440 return 1;
1441 if (!check_valid_pointer(s, slab, fp)) {
1442 if (object) {
1443 object_err(s, slab, object,
1444 "Freechain corrupt");
1445 set_freepointer(s, object, NULL);
1446 } else {
1447 slab_err(s, slab, "Freepointer corrupt");
1448 slab->freelist = NULL;
1449 slab->inuse = slab->objects;
1450 slab_fix(s, "Freelist cleared");
1451 return 0;
1452 }
1453 break;
1454 }
1455 object = fp;
1456 fp = get_freepointer(s, object);
1457 nr++;
1458 }
1459
1460 max_objects = order_objects(slab_order(slab), s->size);
1461 if (max_objects > MAX_OBJS_PER_PAGE)
1462 max_objects = MAX_OBJS_PER_PAGE;
1463
1464 if (slab->objects != max_objects) {
1465 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1466 slab->objects, max_objects);
1467 slab->objects = max_objects;
1468 slab_fix(s, "Number of objects adjusted");
1469 }
1470 if (slab->inuse != slab->objects - nr) {
1471 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1472 slab->inuse, slab->objects - nr);
1473 slab->inuse = slab->objects - nr;
1474 slab_fix(s, "Object count adjusted");
1475 }
1476 return search == NULL;
1477}
1478
1479static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1480 int alloc)
1481{
1482 if (s->flags & SLAB_TRACE) {
1483 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1484 s->name,
1485 alloc ? "alloc" : "free",
1486 object, slab->inuse,
1487 slab->freelist);
1488
1489 if (!alloc)
1490 print_section(KERN_INFO, "Object ", (void *)object,
1491 s->object_size);
1492
1493 dump_stack();
1494 }
1495}
1496
1497/*
1498 * Tracking of fully allocated slabs for debugging purposes.
1499 */
1500static void add_full(struct kmem_cache *s,
1501 struct kmem_cache_node *n, struct slab *slab)
1502{
1503 if (!(s->flags & SLAB_STORE_USER))
1504 return;
1505
1506 lockdep_assert_held(&n->list_lock);
1507 list_add(&slab->slab_list, &n->full);
1508}
1509
1510static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1511{
1512 if (!(s->flags & SLAB_STORE_USER))
1513 return;
1514
1515 lockdep_assert_held(&n->list_lock);
1516 list_del(&slab->slab_list);
1517}
1518
1519static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1520{
1521 return atomic_long_read(&n->nr_slabs);
1522}
1523
1524static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1525{
1526 struct kmem_cache_node *n = get_node(s, node);
1527
1528 atomic_long_inc(&n->nr_slabs);
1529 atomic_long_add(objects, &n->total_objects);
1530}
1531static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1532{
1533 struct kmem_cache_node *n = get_node(s, node);
1534
1535 atomic_long_dec(&n->nr_slabs);
1536 atomic_long_sub(objects, &n->total_objects);
1537}
1538
1539/* Object debug checks for alloc/free paths */
1540static void setup_object_debug(struct kmem_cache *s, void *object)
1541{
1542 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1543 return;
1544
1545 init_object(s, object, SLUB_RED_INACTIVE);
1546 init_tracking(s, object);
1547}
1548
1549static
1550void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1551{
1552 if (!kmem_cache_debug_flags(s, SLAB_POISON))
1553 return;
1554
1555 metadata_access_enable();
1556 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1557 metadata_access_disable();
1558}
1559
1560static inline int alloc_consistency_checks(struct kmem_cache *s,
1561 struct slab *slab, void *object)
1562{
1563 if (!check_slab(s, slab))
1564 return 0;
1565
1566 if (!check_valid_pointer(s, slab, object)) {
1567 object_err(s, slab, object, "Freelist Pointer check fails");
1568 return 0;
1569 }
1570
1571 if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1572 return 0;
1573
1574 return 1;
1575}
1576
1577static noinline bool alloc_debug_processing(struct kmem_cache *s,
1578 struct slab *slab, void *object, int orig_size)
1579{
1580 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1581 if (!alloc_consistency_checks(s, slab, object))
1582 goto bad;
1583 }
1584
1585 /* Success. Perform special debug activities for allocs */
1586 trace(s, slab, object, 1);
1587 set_orig_size(s, object, orig_size);
1588 init_object(s, object, SLUB_RED_ACTIVE);
1589 return true;
1590
1591bad:
1592 if (folio_test_slab(slab_folio(slab))) {
1593 /*
1594 * If this is a slab page then lets do the best we can
1595 * to avoid issues in the future. Marking all objects
1596 * as used avoids touching the remaining objects.
1597 */
1598 slab_fix(s, "Marking all objects used");
1599 slab->inuse = slab->objects;
1600 slab->freelist = NULL;
1601 slab->frozen = 1; /* mark consistency-failed slab as frozen */
1602 }
1603 return false;
1604}
1605
1606static inline int free_consistency_checks(struct kmem_cache *s,
1607 struct slab *slab, void *object, unsigned long addr)
1608{
1609 if (!check_valid_pointer(s, slab, object)) {
1610 slab_err(s, slab, "Invalid object pointer 0x%p", object);
1611 return 0;
1612 }
1613
1614 if (on_freelist(s, slab, object)) {
1615 object_err(s, slab, object, "Object already free");
1616 return 0;
1617 }
1618
1619 if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1620 return 0;
1621
1622 if (unlikely(s != slab->slab_cache)) {
1623 if (!folio_test_slab(slab_folio(slab))) {
1624 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
1625 object);
1626 } else if (!slab->slab_cache) {
1627 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1628 object);
1629 dump_stack();
1630 } else
1631 object_err(s, slab, object,
1632 "page slab pointer corrupt.");
1633 return 0;
1634 }
1635 return 1;
1636}
1637
1638/*
1639 * Parse a block of slab_debug options. Blocks are delimited by ';'
1640 *
1641 * @str: start of block
1642 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1643 * @slabs: return start of list of slabs, or NULL when there's no list
1644 * @init: assume this is initial parsing and not per-kmem-create parsing
1645 *
1646 * returns the start of next block if there's any, or NULL
1647 */
1648static char *
1649parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1650{
1651 bool higher_order_disable = false;
1652
1653 /* Skip any completely empty blocks */
1654 while (*str && *str == ';')
1655 str++;
1656
1657 if (*str == ',') {
1658 /*
1659 * No options but restriction on slabs. This means full
1660 * debugging for slabs matching a pattern.
1661 */
1662 *flags = DEBUG_DEFAULT_FLAGS;
1663 goto check_slabs;
1664 }
1665 *flags = 0;
1666
1667 /* Determine which debug features should be switched on */
1668 for (; *str && *str != ',' && *str != ';'; str++) {
1669 switch (tolower(*str)) {
1670 case '-':
1671 *flags = 0;
1672 break;
1673 case 'f':
1674 *flags |= SLAB_CONSISTENCY_CHECKS;
1675 break;
1676 case 'z':
1677 *flags |= SLAB_RED_ZONE;
1678 break;
1679 case 'p':
1680 *flags |= SLAB_POISON;
1681 break;
1682 case 'u':
1683 *flags |= SLAB_STORE_USER;
1684 break;
1685 case 't':
1686 *flags |= SLAB_TRACE;
1687 break;
1688 case 'a':
1689 *flags |= SLAB_FAILSLAB;
1690 break;
1691 case 'o':
1692 /*
1693 * Avoid enabling debugging on caches if its minimum
1694 * order would increase as a result.
1695 */
1696 higher_order_disable = true;
1697 break;
1698 default:
1699 if (init)
1700 pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1701 }
1702 }
1703check_slabs:
1704 if (*str == ',')
1705 *slabs = ++str;
1706 else
1707 *slabs = NULL;
1708
1709 /* Skip over the slab list */
1710 while (*str && *str != ';')
1711 str++;
1712
1713 /* Skip any completely empty blocks */
1714 while (*str && *str == ';')
1715 str++;
1716
1717 if (init && higher_order_disable)
1718 disable_higher_order_debug = 1;
1719
1720 if (*str)
1721 return str;
1722 else
1723 return NULL;
1724}
1725
1726static int __init setup_slub_debug(char *str)
1727{
1728 slab_flags_t flags;
1729 slab_flags_t global_flags;
1730 char *saved_str;
1731 char *slab_list;
1732 bool global_slub_debug_changed = false;
1733 bool slab_list_specified = false;
1734
1735 global_flags = DEBUG_DEFAULT_FLAGS;
1736 if (*str++ != '=' || !*str)
1737 /*
1738 * No options specified. Switch on full debugging.
1739 */
1740 goto out;
1741
1742 saved_str = str;
1743 while (str) {
1744 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1745
1746 if (!slab_list) {
1747 global_flags = flags;
1748 global_slub_debug_changed = true;
1749 } else {
1750 slab_list_specified = true;
1751 if (flags & SLAB_STORE_USER)
1752 stack_depot_request_early_init();
1753 }
1754 }
1755
1756 /*
1757 * For backwards compatibility, a single list of flags with list of
1758 * slabs means debugging is only changed for those slabs, so the global
1759 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1760 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1761 * long as there is no option specifying flags without a slab list.
1762 */
1763 if (slab_list_specified) {
1764 if (!global_slub_debug_changed)
1765 global_flags = slub_debug;
1766 slub_debug_string = saved_str;
1767 }
1768out:
1769 slub_debug = global_flags;
1770 if (slub_debug & SLAB_STORE_USER)
1771 stack_depot_request_early_init();
1772 if (slub_debug != 0 || slub_debug_string)
1773 static_branch_enable(&slub_debug_enabled);
1774 else
1775 static_branch_disable(&slub_debug_enabled);
1776 if ((static_branch_unlikely(&init_on_alloc) ||
1777 static_branch_unlikely(&init_on_free)) &&
1778 (slub_debug & SLAB_POISON))
1779 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1780 return 1;
1781}
1782
1783__setup("slab_debug", setup_slub_debug);
1784__setup_param("slub_debug", slub_debug, setup_slub_debug, 0);
1785
1786/*
1787 * kmem_cache_flags - apply debugging options to the cache
1788 * @flags: flags to set
1789 * @name: name of the cache
1790 *
1791 * Debug option(s) are applied to @flags. In addition to the debug
1792 * option(s), if a slab name (or multiple) is specified i.e.
1793 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1794 * then only the select slabs will receive the debug option(s).
1795 */
1796slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1797{
1798 char *iter;
1799 size_t len;
1800 char *next_block;
1801 slab_flags_t block_flags;
1802 slab_flags_t slub_debug_local = slub_debug;
1803
1804 if (flags & SLAB_NO_USER_FLAGS)
1805 return flags;
1806
1807 /*
1808 * If the slab cache is for debugging (e.g. kmemleak) then
1809 * don't store user (stack trace) information by default,
1810 * but let the user enable it via the command line below.
1811 */
1812 if (flags & SLAB_NOLEAKTRACE)
1813 slub_debug_local &= ~SLAB_STORE_USER;
1814
1815 len = strlen(name);
1816 next_block = slub_debug_string;
1817 /* Go through all blocks of debug options, see if any matches our slab's name */
1818 while (next_block) {
1819 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1820 if (!iter)
1821 continue;
1822 /* Found a block that has a slab list, search it */
1823 while (*iter) {
1824 char *end, *glob;
1825 size_t cmplen;
1826
1827 end = strchrnul(iter, ',');
1828 if (next_block && next_block < end)
1829 end = next_block - 1;
1830
1831 glob = strnchr(iter, end - iter, '*');
1832 if (glob)
1833 cmplen = glob - iter;
1834 else
1835 cmplen = max_t(size_t, len, (end - iter));
1836
1837 if (!strncmp(name, iter, cmplen)) {
1838 flags |= block_flags;
1839 return flags;
1840 }
1841
1842 if (!*end || *end == ';')
1843 break;
1844 iter = end + 1;
1845 }
1846 }
1847
1848 return flags | slub_debug_local;
1849}
1850#else /* !CONFIG_SLUB_DEBUG */
1851static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1852static inline
1853void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1854
1855static inline bool alloc_debug_processing(struct kmem_cache *s,
1856 struct slab *slab, void *object, int orig_size) { return true; }
1857
1858static inline bool free_debug_processing(struct kmem_cache *s,
1859 struct slab *slab, void *head, void *tail, int *bulk_cnt,
1860 unsigned long addr, depot_stack_handle_t handle) { return true; }
1861
1862static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
1863static inline int check_object(struct kmem_cache *s, struct slab *slab,
1864 void *object, u8 val) { return 1; }
1865static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
1866static inline void set_track(struct kmem_cache *s, void *object,
1867 enum track_item alloc, unsigned long addr) {}
1868static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1869 struct slab *slab) {}
1870static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1871 struct slab *slab) {}
1872slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1873{
1874 return flags;
1875}
1876#define slub_debug 0
1877
1878#define disable_higher_order_debug 0
1879
1880static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1881 { return 0; }
1882static inline void inc_slabs_node(struct kmem_cache *s, int node,
1883 int objects) {}
1884static inline void dec_slabs_node(struct kmem_cache *s, int node,
1885 int objects) {}
1886#ifndef CONFIG_SLUB_TINY
1887static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1888 void **freelist, void *nextfree)
1889{
1890 return false;
1891}
1892#endif
1893#endif /* CONFIG_SLUB_DEBUG */
1894
1895#ifdef CONFIG_SLAB_OBJ_EXT
1896
1897#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
1898
1899static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
1900{
1901 struct slabobj_ext *slab_exts;
1902 struct slab *obj_exts_slab;
1903
1904 obj_exts_slab = virt_to_slab(obj_exts);
1905 slab_exts = slab_obj_exts(obj_exts_slab);
1906 if (slab_exts) {
1907 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
1908 obj_exts_slab, obj_exts);
1909 /* codetag should be NULL */
1910 WARN_ON(slab_exts[offs].ref.ct);
1911 set_codetag_empty(&slab_exts[offs].ref);
1912 }
1913}
1914
1915static inline void mark_failed_objexts_alloc(struct slab *slab)
1916{
1917 slab->obj_exts = OBJEXTS_ALLOC_FAIL;
1918}
1919
1920static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1921 struct slabobj_ext *vec, unsigned int objects)
1922{
1923 /*
1924 * If vector previously failed to allocate then we have live
1925 * objects with no tag reference. Mark all references in this
1926 * vector as empty to avoid warnings later on.
1927 */
1928 if (obj_exts & OBJEXTS_ALLOC_FAIL) {
1929 unsigned int i;
1930
1931 for (i = 0; i < objects; i++)
1932 set_codetag_empty(&vec[i].ref);
1933 }
1934}
1935
1936#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1937
1938static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
1939static inline void mark_failed_objexts_alloc(struct slab *slab) {}
1940static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1941 struct slabobj_ext *vec, unsigned int objects) {}
1942
1943#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1944
1945/*
1946 * The allocated objcg pointers array is not accounted directly.
1947 * Moreover, it should not come from DMA buffer and is not readily
1948 * reclaimable. So those GFP bits should be masked off.
1949 */
1950#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
1951 __GFP_ACCOUNT | __GFP_NOFAIL)
1952
1953int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
1954 gfp_t gfp, bool new_slab)
1955{
1956 unsigned int objects = objs_per_slab(s, slab);
1957 unsigned long new_exts;
1958 unsigned long old_exts;
1959 struct slabobj_ext *vec;
1960
1961 gfp &= ~OBJCGS_CLEAR_MASK;
1962 /* Prevent recursive extension vector allocation */
1963 gfp |= __GFP_NO_OBJ_EXT;
1964 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
1965 slab_nid(slab));
1966 if (!vec) {
1967 /* Mark vectors which failed to allocate */
1968 if (new_slab)
1969 mark_failed_objexts_alloc(slab);
1970
1971 return -ENOMEM;
1972 }
1973
1974 new_exts = (unsigned long)vec;
1975#ifdef CONFIG_MEMCG
1976 new_exts |= MEMCG_DATA_OBJEXTS;
1977#endif
1978 old_exts = READ_ONCE(slab->obj_exts);
1979 handle_failed_objexts_alloc(old_exts, vec, objects);
1980 if (new_slab) {
1981 /*
1982 * If the slab is brand new and nobody can yet access its
1983 * obj_exts, no synchronization is required and obj_exts can
1984 * be simply assigned.
1985 */
1986 slab->obj_exts = new_exts;
1987 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
1988 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
1989 /*
1990 * If the slab is already in use, somebody can allocate and
1991 * assign slabobj_exts in parallel. In this case the existing
1992 * objcg vector should be reused.
1993 */
1994 mark_objexts_empty(vec);
1995 kfree(vec);
1996 return 0;
1997 }
1998
1999 kmemleak_not_leak(vec);
2000 return 0;
2001}
2002
2003static inline void free_slab_obj_exts(struct slab *slab)
2004{
2005 struct slabobj_ext *obj_exts;
2006
2007 obj_exts = slab_obj_exts(slab);
2008 if (!obj_exts)
2009 return;
2010
2011 /*
2012 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2013 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2014 * warning if slab has extensions but the extension of an object is
2015 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2016 * the extension for obj_exts is expected to be NULL.
2017 */
2018 mark_objexts_empty(obj_exts);
2019 kfree(obj_exts);
2020 slab->obj_exts = 0;
2021}
2022
2023static inline bool need_slab_obj_ext(void)
2024{
2025 if (mem_alloc_profiling_enabled())
2026 return true;
2027
2028 /*
2029 * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
2030 * inside memcg_slab_post_alloc_hook. No other users for now.
2031 */
2032 return false;
2033}
2034
2035#else /* CONFIG_SLAB_OBJ_EXT */
2036
2037static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2038 gfp_t gfp, bool new_slab)
2039{
2040 return 0;
2041}
2042
2043static inline void free_slab_obj_exts(struct slab *slab)
2044{
2045}
2046
2047static inline bool need_slab_obj_ext(void)
2048{
2049 return false;
2050}
2051
2052#endif /* CONFIG_SLAB_OBJ_EXT */
2053
2054#ifdef CONFIG_MEM_ALLOC_PROFILING
2055
2056static inline struct slabobj_ext *
2057prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
2058{
2059 struct slab *slab;
2060
2061 if (!p)
2062 return NULL;
2063
2064 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2065 return NULL;
2066
2067 if (flags & __GFP_NO_OBJ_EXT)
2068 return NULL;
2069
2070 slab = virt_to_slab(p);
2071 if (!slab_obj_exts(slab) &&
2072 WARN(alloc_slab_obj_exts(slab, s, flags, false),
2073 "%s, %s: Failed to create slab extension vector!\n",
2074 __func__, s->name))
2075 return NULL;
2076
2077 return slab_obj_exts(slab) + obj_to_index(s, slab, p);
2078}
2079
2080static inline void
2081alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2082{
2083 if (need_slab_obj_ext()) {
2084 struct slabobj_ext *obj_exts;
2085
2086 obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
2087 /*
2088 * Currently obj_exts is used only for allocation profiling.
2089 * If other users appear then mem_alloc_profiling_enabled()
2090 * check should be added before alloc_tag_add().
2091 */
2092 if (likely(obj_exts))
2093 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
2094 }
2095}
2096
2097static inline void
2098alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2099 int objects)
2100{
2101 struct slabobj_ext *obj_exts;
2102 int i;
2103
2104 if (!mem_alloc_profiling_enabled())
2105 return;
2106
2107 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
2108 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2109 return;
2110
2111 obj_exts = slab_obj_exts(slab);
2112 if (!obj_exts)
2113 return;
2114
2115 for (i = 0; i < objects; i++) {
2116 unsigned int off = obj_to_index(s, slab, p[i]);
2117
2118 alloc_tag_sub(&obj_exts[off].ref, s->size);
2119 }
2120}
2121
2122#else /* CONFIG_MEM_ALLOC_PROFILING */
2123
2124static inline void
2125alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2126{
2127}
2128
2129static inline void
2130alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2131 int objects)
2132{
2133}
2134
2135#endif /* CONFIG_MEM_ALLOC_PROFILING */
2136
2137
2138#ifdef CONFIG_MEMCG
2139
2140static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2141
2142static __fastpath_inline
2143bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2144 gfp_t flags, size_t size, void **p)
2145{
2146 if (likely(!memcg_kmem_online()))
2147 return true;
2148
2149 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2150 return true;
2151
2152 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2153 return true;
2154
2155 if (likely(size == 1)) {
2156 memcg_alloc_abort_single(s, *p);
2157 *p = NULL;
2158 } else {
2159 kmem_cache_free_bulk(s, size, p);
2160 }
2161
2162 return false;
2163}
2164
2165static __fastpath_inline
2166void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2167 int objects)
2168{
2169 struct slabobj_ext *obj_exts;
2170
2171 if (!memcg_kmem_online())
2172 return;
2173
2174 obj_exts = slab_obj_exts(slab);
2175 if (likely(!obj_exts))
2176 return;
2177
2178 __memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2179}
2180
2181static __fastpath_inline
2182bool memcg_slab_post_charge(void *p, gfp_t flags)
2183{
2184 struct slabobj_ext *slab_exts;
2185 struct kmem_cache *s;
2186 struct folio *folio;
2187 struct slab *slab;
2188 unsigned long off;
2189
2190 folio = virt_to_folio(p);
2191 if (!folio_test_slab(folio)) {
2192 int size;
2193
2194 if (folio_memcg_kmem(folio))
2195 return true;
2196
2197 if (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
2198 folio_order(folio)))
2199 return false;
2200
2201 /*
2202 * This folio has already been accounted in the global stats but
2203 * not in the memcg stats. So, subtract from the global and use
2204 * the interface which adds to both global and memcg stats.
2205 */
2206 size = folio_size(folio);
2207 node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size);
2208 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size);
2209 return true;
2210 }
2211
2212 slab = folio_slab(folio);
2213 s = slab->slab_cache;
2214
2215 /*
2216 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
2217 * of slab_obj_exts being allocated from the same slab and thus the slab
2218 * becoming effectively unfreeable.
2219 */
2220 if (is_kmalloc_normal(s))
2221 return true;
2222
2223 /* Ignore already charged objects. */
2224 slab_exts = slab_obj_exts(slab);
2225 if (slab_exts) {
2226 off = obj_to_index(s, slab, p);
2227 if (unlikely(slab_exts[off].objcg))
2228 return true;
2229 }
2230
2231 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
2232}
2233
2234#else /* CONFIG_MEMCG */
2235static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2236 struct list_lru *lru,
2237 gfp_t flags, size_t size,
2238 void **p)
2239{
2240 return true;
2241}
2242
2243static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2244 void **p, int objects)
2245{
2246}
2247
2248static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
2249{
2250 return true;
2251}
2252#endif /* CONFIG_MEMCG */
2253
2254#ifdef CONFIG_SLUB_RCU_DEBUG
2255static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
2256
2257struct rcu_delayed_free {
2258 struct rcu_head head;
2259 void *object;
2260};
2261#endif
2262
2263/*
2264 * Hooks for other subsystems that check memory allocations. In a typical
2265 * production configuration these hooks all should produce no code at all.
2266 *
2267 * Returns true if freeing of the object can proceed, false if its reuse
2268 * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
2269 * to KFENCE.
2270 */
2271static __always_inline
2272bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
2273 bool after_rcu_delay)
2274{
2275 /* Are the object contents still accessible? */
2276 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
2277
2278 kmemleak_free_recursive(x, s->flags);
2279 kmsan_slab_free(s, x);
2280
2281 debug_check_no_locks_freed(x, s->object_size);
2282
2283 if (!(s->flags & SLAB_DEBUG_OBJECTS))
2284 debug_check_no_obj_freed(x, s->object_size);
2285
2286 /* Use KCSAN to help debug racy use-after-free. */
2287 if (!still_accessible)
2288 __kcsan_check_access(x, s->object_size,
2289 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2290
2291 if (kfence_free(x))
2292 return false;
2293
2294 /*
2295 * Give KASAN a chance to notice an invalid free operation before we
2296 * modify the object.
2297 */
2298 if (kasan_slab_pre_free(s, x))
2299 return false;
2300
2301#ifdef CONFIG_SLUB_RCU_DEBUG
2302 if (still_accessible) {
2303 struct rcu_delayed_free *delayed_free;
2304
2305 delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
2306 if (delayed_free) {
2307 /*
2308 * Let KASAN track our call stack as a "related work
2309 * creation", just like if the object had been freed
2310 * normally via kfree_rcu().
2311 * We have to do this manually because the rcu_head is
2312 * not located inside the object.
2313 */
2314 kasan_record_aux_stack_noalloc(x);
2315
2316 delayed_free->object = x;
2317 call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
2318 return false;
2319 }
2320 }
2321#endif /* CONFIG_SLUB_RCU_DEBUG */
2322
2323 /*
2324 * As memory initialization might be integrated into KASAN,
2325 * kasan_slab_free and initialization memset's must be
2326 * kept together to avoid discrepancies in behavior.
2327 *
2328 * The initialization memset's clear the object and the metadata,
2329 * but don't touch the SLAB redzone.
2330 *
2331 * The object's freepointer is also avoided if stored outside the
2332 * object.
2333 */
2334 if (unlikely(init)) {
2335 int rsize;
2336 unsigned int inuse, orig_size;
2337
2338 inuse = get_info_end(s);
2339 orig_size = get_orig_size(s, x);
2340 if (!kasan_has_integrated_init())
2341 memset(kasan_reset_tag(x), 0, orig_size);
2342 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2343 memset((char *)kasan_reset_tag(x) + inuse, 0,
2344 s->size - inuse - rsize);
2345 /*
2346 * Restore orig_size, otherwize kmalloc redzone overwritten
2347 * would be reported
2348 */
2349 set_orig_size(s, x, orig_size);
2350
2351 }
2352 /* KASAN might put x into memory quarantine, delaying its reuse. */
2353 return !kasan_slab_free(s, x, init, still_accessible);
2354}
2355
2356static __fastpath_inline
2357bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2358 int *cnt)
2359{
2360
2361 void *object;
2362 void *next = *head;
2363 void *old_tail = *tail;
2364 bool init;
2365
2366 if (is_kfence_address(next)) {
2367 slab_free_hook(s, next, false, false);
2368 return false;
2369 }
2370
2371 /* Head and tail of the reconstructed freelist */
2372 *head = NULL;
2373 *tail = NULL;
2374
2375 init = slab_want_init_on_free(s);
2376
2377 do {
2378 object = next;
2379 next = get_freepointer(s, object);
2380
2381 /* If object's reuse doesn't have to be delayed */
2382 if (likely(slab_free_hook(s, object, init, false))) {
2383 /* Move object to the new freelist */
2384 set_freepointer(s, object, *head);
2385 *head = object;
2386 if (!*tail)
2387 *tail = object;
2388 } else {
2389 /*
2390 * Adjust the reconstructed freelist depth
2391 * accordingly if object's reuse is delayed.
2392 */
2393 --(*cnt);
2394 }
2395 } while (object != old_tail);
2396
2397 return *head != NULL;
2398}
2399
2400static void *setup_object(struct kmem_cache *s, void *object)
2401{
2402 setup_object_debug(s, object);
2403 object = kasan_init_slab_obj(s, object);
2404 if (unlikely(s->ctor)) {
2405 kasan_unpoison_new_object(s, object);
2406 s->ctor(object);
2407 kasan_poison_new_object(s, object);
2408 }
2409 return object;
2410}
2411
2412/*
2413 * Slab allocation and freeing
2414 */
2415static inline struct slab *alloc_slab_page(gfp_t flags, int node,
2416 struct kmem_cache_order_objects oo)
2417{
2418 struct folio *folio;
2419 struct slab *slab;
2420 unsigned int order = oo_order(oo);
2421
2422 if (node == NUMA_NO_NODE)
2423 folio = (struct folio *)alloc_pages(flags, order);
2424 else
2425 folio = (struct folio *)__alloc_pages_node(node, flags, order);
2426
2427 if (!folio)
2428 return NULL;
2429
2430 slab = folio_slab(folio);
2431 __folio_set_slab(folio);
2432 /* Make the flag visible before any changes to folio->mapping */
2433 smp_wmb();
2434 if (folio_is_pfmemalloc(folio))
2435 slab_set_pfmemalloc(slab);
2436
2437 return slab;
2438}
2439
2440#ifdef CONFIG_SLAB_FREELIST_RANDOM
2441/* Pre-initialize the random sequence cache */
2442static int init_cache_random_seq(struct kmem_cache *s)
2443{
2444 unsigned int count = oo_objects(s->oo);
2445 int err;
2446
2447 /* Bailout if already initialised */
2448 if (s->random_seq)
2449 return 0;
2450
2451 err = cache_random_seq_create(s, count, GFP_KERNEL);
2452 if (err) {
2453 pr_err("SLUB: Unable to initialize free list for %s\n",
2454 s->name);
2455 return err;
2456 }
2457
2458 /* Transform to an offset on the set of pages */
2459 if (s->random_seq) {
2460 unsigned int i;
2461
2462 for (i = 0; i < count; i++)
2463 s->random_seq[i] *= s->size;
2464 }
2465 return 0;
2466}
2467
2468/* Initialize each random sequence freelist per cache */
2469static void __init init_freelist_randomization(void)
2470{
2471 struct kmem_cache *s;
2472
2473 mutex_lock(&slab_mutex);
2474
2475 list_for_each_entry(s, &slab_caches, list)
2476 init_cache_random_seq(s);
2477
2478 mutex_unlock(&slab_mutex);
2479}
2480
2481/* Get the next entry on the pre-computed freelist randomized */
2482static void *next_freelist_entry(struct kmem_cache *s,
2483 unsigned long *pos, void *start,
2484 unsigned long page_limit,
2485 unsigned long freelist_count)
2486{
2487 unsigned int idx;
2488
2489 /*
2490 * If the target page allocation failed, the number of objects on the
2491 * page might be smaller than the usual size defined by the cache.
2492 */
2493 do {
2494 idx = s->random_seq[*pos];
2495 *pos += 1;
2496 if (*pos >= freelist_count)
2497 *pos = 0;
2498 } while (unlikely(idx >= page_limit));
2499
2500 return (char *)start + idx;
2501}
2502
2503/* Shuffle the single linked freelist based on a random pre-computed sequence */
2504static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2505{
2506 void *start;
2507 void *cur;
2508 void *next;
2509 unsigned long idx, pos, page_limit, freelist_count;
2510
2511 if (slab->objects < 2 || !s->random_seq)
2512 return false;
2513
2514 freelist_count = oo_objects(s->oo);
2515 pos = get_random_u32_below(freelist_count);
2516
2517 page_limit = slab->objects * s->size;
2518 start = fixup_red_left(s, slab_address(slab));
2519
2520 /* First entry is used as the base of the freelist */
2521 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
2522 cur = setup_object(s, cur);
2523 slab->freelist = cur;
2524
2525 for (idx = 1; idx < slab->objects; idx++) {
2526 next = next_freelist_entry(s, &pos, start, page_limit,
2527 freelist_count);
2528 next = setup_object(s, next);
2529 set_freepointer(s, cur, next);
2530 cur = next;
2531 }
2532 set_freepointer(s, cur, NULL);
2533
2534 return true;
2535}
2536#else
2537static inline int init_cache_random_seq(struct kmem_cache *s)
2538{
2539 return 0;
2540}
2541static inline void init_freelist_randomization(void) { }
2542static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2543{
2544 return false;
2545}
2546#endif /* CONFIG_SLAB_FREELIST_RANDOM */
2547
2548static __always_inline void account_slab(struct slab *slab, int order,
2549 struct kmem_cache *s, gfp_t gfp)
2550{
2551 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
2552 alloc_slab_obj_exts(slab, s, gfp, true);
2553
2554 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2555 PAGE_SIZE << order);
2556}
2557
2558static __always_inline void unaccount_slab(struct slab *slab, int order,
2559 struct kmem_cache *s)
2560{
2561 if (memcg_kmem_online() || need_slab_obj_ext())
2562 free_slab_obj_exts(slab);
2563
2564 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2565 -(PAGE_SIZE << order));
2566}
2567
2568static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
2569{
2570 struct slab *slab;
2571 struct kmem_cache_order_objects oo = s->oo;
2572 gfp_t alloc_gfp;
2573 void *start, *p, *next;
2574 int idx;
2575 bool shuffle;
2576
2577 flags &= gfp_allowed_mask;
2578
2579 flags |= s->allocflags;
2580
2581 /*
2582 * Let the initial higher-order allocation fail under memory pressure
2583 * so we fall-back to the minimum order allocation.
2584 */
2585 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
2586 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
2587 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
2588
2589 slab = alloc_slab_page(alloc_gfp, node, oo);
2590 if (unlikely(!slab)) {
2591 oo = s->min;
2592 alloc_gfp = flags;
2593 /*
2594 * Allocation may have failed due to fragmentation.
2595 * Try a lower order alloc if possible
2596 */
2597 slab = alloc_slab_page(alloc_gfp, node, oo);
2598 if (unlikely(!slab))
2599 return NULL;
2600 stat(s, ORDER_FALLBACK);
2601 }
2602
2603 slab->objects = oo_objects(oo);
2604 slab->inuse = 0;
2605 slab->frozen = 0;
2606
2607 account_slab(slab, oo_order(oo), s, flags);
2608
2609 slab->slab_cache = s;
2610
2611 kasan_poison_slab(slab);
2612
2613 start = slab_address(slab);
2614
2615 setup_slab_debug(s, slab, start);
2616
2617 shuffle = shuffle_freelist(s, slab);
2618
2619 if (!shuffle) {
2620 start = fixup_red_left(s, start);
2621 start = setup_object(s, start);
2622 slab->freelist = start;
2623 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2624 next = p + s->size;
2625 next = setup_object(s, next);
2626 set_freepointer(s, p, next);
2627 p = next;
2628 }
2629 set_freepointer(s, p, NULL);
2630 }
2631
2632 return slab;
2633}
2634
2635static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
2636{
2637 if (unlikely(flags & GFP_SLAB_BUG_MASK))
2638 flags = kmalloc_fix_flags(flags);
2639
2640 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2641
2642 return allocate_slab(s,
2643 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
2644}
2645
2646static void __free_slab(struct kmem_cache *s, struct slab *slab)
2647{
2648 struct folio *folio = slab_folio(slab);
2649 int order = folio_order(folio);
2650 int pages = 1 << order;
2651
2652 __slab_clear_pfmemalloc(slab);
2653 folio->mapping = NULL;
2654 /* Make the mapping reset visible before clearing the flag */
2655 smp_wmb();
2656 __folio_clear_slab(folio);
2657 mm_account_reclaimed_pages(pages);
2658 unaccount_slab(slab, order, s);
2659 __free_pages(&folio->page, order);
2660}
2661
2662static void rcu_free_slab(struct rcu_head *h)
2663{
2664 struct slab *slab = container_of(h, struct slab, rcu_head);
2665
2666 __free_slab(slab->slab_cache, slab);
2667}
2668
2669static void free_slab(struct kmem_cache *s, struct slab *slab)
2670{
2671 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
2672 void *p;
2673
2674 slab_pad_check(s, slab);
2675 for_each_object(p, s, slab_address(slab), slab->objects)
2676 check_object(s, slab, p, SLUB_RED_INACTIVE);
2677 }
2678
2679 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
2680 call_rcu(&slab->rcu_head, rcu_free_slab);
2681 else
2682 __free_slab(s, slab);
2683}
2684
2685static void discard_slab(struct kmem_cache *s, struct slab *slab)
2686{
2687 dec_slabs_node(s, slab_nid(slab), slab->objects);
2688 free_slab(s, slab);
2689}
2690
2691/*
2692 * SLUB reuses PG_workingset bit to keep track of whether it's on
2693 * the per-node partial list.
2694 */
2695static inline bool slab_test_node_partial(const struct slab *slab)
2696{
2697 return folio_test_workingset(slab_folio(slab));
2698}
2699
2700static inline void slab_set_node_partial(struct slab *slab)
2701{
2702 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2703}
2704
2705static inline void slab_clear_node_partial(struct slab *slab)
2706{
2707 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2708}
2709
2710/*
2711 * Management of partially allocated slabs.
2712 */
2713static inline void
2714__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
2715{
2716 n->nr_partial++;
2717 if (tail == DEACTIVATE_TO_TAIL)
2718 list_add_tail(&slab->slab_list, &n->partial);
2719 else
2720 list_add(&slab->slab_list, &n->partial);
2721 slab_set_node_partial(slab);
2722}
2723
2724static inline void add_partial(struct kmem_cache_node *n,
2725 struct slab *slab, int tail)
2726{
2727 lockdep_assert_held(&n->list_lock);
2728 __add_partial(n, slab, tail);
2729}
2730
2731static inline void remove_partial(struct kmem_cache_node *n,
2732 struct slab *slab)
2733{
2734 lockdep_assert_held(&n->list_lock);
2735 list_del(&slab->slab_list);
2736 slab_clear_node_partial(slab);
2737 n->nr_partial--;
2738}
2739
2740/*
2741 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
2742 * slab from the n->partial list. Remove only a single object from the slab, do
2743 * the alloc_debug_processing() checks and leave the slab on the list, or move
2744 * it to full list if it was the last free object.
2745 */
2746static void *alloc_single_from_partial(struct kmem_cache *s,
2747 struct kmem_cache_node *n, struct slab *slab, int orig_size)
2748{
2749 void *object;
2750
2751 lockdep_assert_held(&n->list_lock);
2752
2753 object = slab->freelist;
2754 slab->freelist = get_freepointer(s, object);
2755 slab->inuse++;
2756
2757 if (!alloc_debug_processing(s, slab, object, orig_size)) {
2758 if (folio_test_slab(slab_folio(slab)))
2759 remove_partial(n, slab);
2760 return NULL;
2761 }
2762
2763 if (slab->inuse == slab->objects) {
2764 remove_partial(n, slab);
2765 add_full(s, n, slab);
2766 }
2767
2768 return object;
2769}
2770
2771/*
2772 * Called only for kmem_cache_debug() caches to allocate from a freshly
2773 * allocated slab. Allocate a single object instead of whole freelist
2774 * and put the slab to the partial (or full) list.
2775 */
2776static void *alloc_single_from_new_slab(struct kmem_cache *s,
2777 struct slab *slab, int orig_size)
2778{
2779 int nid = slab_nid(slab);
2780 struct kmem_cache_node *n = get_node(s, nid);
2781 unsigned long flags;
2782 void *object;
2783
2784
2785 object = slab->freelist;
2786 slab->freelist = get_freepointer(s, object);
2787 slab->inuse = 1;
2788
2789 if (!alloc_debug_processing(s, slab, object, orig_size))
2790 /*
2791 * It's not really expected that this would fail on a
2792 * freshly allocated slab, but a concurrent memory
2793 * corruption in theory could cause that.
2794 */
2795 return NULL;
2796
2797 spin_lock_irqsave(&n->list_lock, flags);
2798
2799 if (slab->inuse == slab->objects)
2800 add_full(s, n, slab);
2801 else
2802 add_partial(n, slab, DEACTIVATE_TO_HEAD);
2803
2804 inc_slabs_node(s, nid, slab->objects);
2805 spin_unlock_irqrestore(&n->list_lock, flags);
2806
2807 return object;
2808}
2809
2810#ifdef CONFIG_SLUB_CPU_PARTIAL
2811static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2812#else
2813static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
2814 int drain) { }
2815#endif
2816static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2817
2818/*
2819 * Try to allocate a partial slab from a specific node.
2820 */
2821static struct slab *get_partial_node(struct kmem_cache *s,
2822 struct kmem_cache_node *n,
2823 struct partial_context *pc)
2824{
2825 struct slab *slab, *slab2, *partial = NULL;
2826 unsigned long flags;
2827 unsigned int partial_slabs = 0;
2828
2829 /*
2830 * Racy check. If we mistakenly see no partial slabs then we
2831 * just allocate an empty slab. If we mistakenly try to get a
2832 * partial slab and there is none available then get_partial()
2833 * will return NULL.
2834 */
2835 if (!n || !n->nr_partial)
2836 return NULL;
2837
2838 spin_lock_irqsave(&n->list_lock, flags);
2839 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
2840 if (!pfmemalloc_match(slab, pc->flags))
2841 continue;
2842
2843 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
2844 void *object = alloc_single_from_partial(s, n, slab,
2845 pc->orig_size);
2846 if (object) {
2847 partial = slab;
2848 pc->object = object;
2849 break;
2850 }
2851 continue;
2852 }
2853
2854 remove_partial(n, slab);
2855
2856 if (!partial) {
2857 partial = slab;
2858 stat(s, ALLOC_FROM_PARTIAL);
2859
2860 if ((slub_get_cpu_partial(s) == 0)) {
2861 break;
2862 }
2863 } else {
2864 put_cpu_partial(s, slab, 0);
2865 stat(s, CPU_PARTIAL_NODE);
2866
2867 if (++partial_slabs > slub_get_cpu_partial(s) / 2) {
2868 break;
2869 }
2870 }
2871 }
2872 spin_unlock_irqrestore(&n->list_lock, flags);
2873 return partial;
2874}
2875
2876/*
2877 * Get a slab from somewhere. Search in increasing NUMA distances.
2878 */
2879static struct slab *get_any_partial(struct kmem_cache *s,
2880 struct partial_context *pc)
2881{
2882#ifdef CONFIG_NUMA
2883 struct zonelist *zonelist;
2884 struct zoneref *z;
2885 struct zone *zone;
2886 enum zone_type highest_zoneidx = gfp_zone(pc->flags);
2887 struct slab *slab;
2888 unsigned int cpuset_mems_cookie;
2889
2890 /*
2891 * The defrag ratio allows a configuration of the tradeoffs between
2892 * inter node defragmentation and node local allocations. A lower
2893 * defrag_ratio increases the tendency to do local allocations
2894 * instead of attempting to obtain partial slabs from other nodes.
2895 *
2896 * If the defrag_ratio is set to 0 then kmalloc() always
2897 * returns node local objects. If the ratio is higher then kmalloc()
2898 * may return off node objects because partial slabs are obtained
2899 * from other nodes and filled up.
2900 *
2901 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2902 * (which makes defrag_ratio = 1000) then every (well almost)
2903 * allocation will first attempt to defrag slab caches on other nodes.
2904 * This means scanning over all nodes to look for partial slabs which
2905 * may be expensive if we do it every time we are trying to find a slab
2906 * with available objects.
2907 */
2908 if (!s->remote_node_defrag_ratio ||
2909 get_cycles() % 1024 > s->remote_node_defrag_ratio)
2910 return NULL;
2911
2912 do {
2913 cpuset_mems_cookie = read_mems_allowed_begin();
2914 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
2915 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2916 struct kmem_cache_node *n;
2917
2918 n = get_node(s, zone_to_nid(zone));
2919
2920 if (n && cpuset_zone_allowed(zone, pc->flags) &&
2921 n->nr_partial > s->min_partial) {
2922 slab = get_partial_node(s, n, pc);
2923 if (slab) {
2924 /*
2925 * Don't check read_mems_allowed_retry()
2926 * here - if mems_allowed was updated in
2927 * parallel, that was a harmless race
2928 * between allocation and the cpuset
2929 * update
2930 */
2931 return slab;
2932 }
2933 }
2934 }
2935 } while (read_mems_allowed_retry(cpuset_mems_cookie));
2936#endif /* CONFIG_NUMA */
2937 return NULL;
2938}
2939
2940/*
2941 * Get a partial slab, lock it and return it.
2942 */
2943static struct slab *get_partial(struct kmem_cache *s, int node,
2944 struct partial_context *pc)
2945{
2946 struct slab *slab;
2947 int searchnode = node;
2948
2949 if (node == NUMA_NO_NODE)
2950 searchnode = numa_mem_id();
2951
2952 slab = get_partial_node(s, get_node(s, searchnode), pc);
2953 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
2954 return slab;
2955
2956 return get_any_partial(s, pc);
2957}
2958
2959#ifndef CONFIG_SLUB_TINY
2960
2961#ifdef CONFIG_PREEMPTION
2962/*
2963 * Calculate the next globally unique transaction for disambiguation
2964 * during cmpxchg. The transactions start with the cpu number and are then
2965 * incremented by CONFIG_NR_CPUS.
2966 */
2967#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
2968#else
2969/*
2970 * No preemption supported therefore also no need to check for
2971 * different cpus.
2972 */
2973#define TID_STEP 1
2974#endif /* CONFIG_PREEMPTION */
2975
2976static inline unsigned long next_tid(unsigned long tid)
2977{
2978 return tid + TID_STEP;
2979}
2980
2981#ifdef SLUB_DEBUG_CMPXCHG
2982static inline unsigned int tid_to_cpu(unsigned long tid)
2983{
2984 return tid % TID_STEP;
2985}
2986
2987static inline unsigned long tid_to_event(unsigned long tid)
2988{
2989 return tid / TID_STEP;
2990}
2991#endif
2992
2993static inline unsigned int init_tid(int cpu)
2994{
2995 return cpu;
2996}
2997
2998static inline void note_cmpxchg_failure(const char *n,
2999 const struct kmem_cache *s, unsigned long tid)
3000{
3001#ifdef SLUB_DEBUG_CMPXCHG
3002 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
3003
3004 pr_info("%s %s: cmpxchg redo ", n, s->name);
3005
3006#ifdef CONFIG_PREEMPTION
3007 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
3008 pr_warn("due to cpu change %d -> %d\n",
3009 tid_to_cpu(tid), tid_to_cpu(actual_tid));
3010 else
3011#endif
3012 if (tid_to_event(tid) != tid_to_event(actual_tid))
3013 pr_warn("due to cpu running other code. Event %ld->%ld\n",
3014 tid_to_event(tid), tid_to_event(actual_tid));
3015 else
3016 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
3017 actual_tid, tid, next_tid(tid));
3018#endif
3019 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
3020}
3021
3022static void init_kmem_cache_cpus(struct kmem_cache *s)
3023{
3024 int cpu;
3025 struct kmem_cache_cpu *c;
3026
3027 for_each_possible_cpu(cpu) {
3028 c = per_cpu_ptr(s->cpu_slab, cpu);
3029 local_lock_init(&c->lock);
3030 c->tid = init_tid(cpu);
3031 }
3032}
3033
3034/*
3035 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
3036 * unfreezes the slabs and puts it on the proper list.
3037 * Assumes the slab has been already safely taken away from kmem_cache_cpu
3038 * by the caller.
3039 */
3040static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
3041 void *freelist)
3042{
3043 struct kmem_cache_node *n = get_node(s, slab_nid(slab));
3044 int free_delta = 0;
3045 void *nextfree, *freelist_iter, *freelist_tail;
3046 int tail = DEACTIVATE_TO_HEAD;
3047 unsigned long flags = 0;
3048 struct slab new;
3049 struct slab old;
3050
3051 if (READ_ONCE(slab->freelist)) {
3052 stat(s, DEACTIVATE_REMOTE_FREES);
3053 tail = DEACTIVATE_TO_TAIL;
3054 }
3055
3056 /*
3057 * Stage one: Count the objects on cpu's freelist as free_delta and
3058 * remember the last object in freelist_tail for later splicing.
3059 */
3060 freelist_tail = NULL;
3061 freelist_iter = freelist;
3062 while (freelist_iter) {
3063 nextfree = get_freepointer(s, freelist_iter);
3064
3065 /*
3066 * If 'nextfree' is invalid, it is possible that the object at
3067 * 'freelist_iter' is already corrupted. So isolate all objects
3068 * starting at 'freelist_iter' by skipping them.
3069 */
3070 if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
3071 break;
3072
3073 freelist_tail = freelist_iter;
3074 free_delta++;
3075
3076 freelist_iter = nextfree;
3077 }
3078
3079 /*
3080 * Stage two: Unfreeze the slab while splicing the per-cpu
3081 * freelist to the head of slab's freelist.
3082 */
3083 do {
3084 old.freelist = READ_ONCE(slab->freelist);
3085 old.counters = READ_ONCE(slab->counters);
3086 VM_BUG_ON(!old.frozen);
3087
3088 /* Determine target state of the slab */
3089 new.counters = old.counters;
3090 new.frozen = 0;
3091 if (freelist_tail) {
3092 new.inuse -= free_delta;
3093 set_freepointer(s, freelist_tail, old.freelist);
3094 new.freelist = freelist;
3095 } else {
3096 new.freelist = old.freelist;
3097 }
3098 } while (!slab_update_freelist(s, slab,
3099 old.freelist, old.counters,
3100 new.freelist, new.counters,
3101 "unfreezing slab"));
3102
3103 /*
3104 * Stage three: Manipulate the slab list based on the updated state.
3105 */
3106 if (!new.inuse && n->nr_partial >= s->min_partial) {
3107 stat(s, DEACTIVATE_EMPTY);
3108 discard_slab(s, slab);
3109 stat(s, FREE_SLAB);
3110 } else if (new.freelist) {
3111 spin_lock_irqsave(&n->list_lock, flags);
3112 add_partial(n, slab, tail);
3113 spin_unlock_irqrestore(&n->list_lock, flags);
3114 stat(s, tail);
3115 } else {
3116 stat(s, DEACTIVATE_FULL);
3117 }
3118}
3119
3120#ifdef CONFIG_SLUB_CPU_PARTIAL
3121static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
3122{
3123 struct kmem_cache_node *n = NULL, *n2 = NULL;
3124 struct slab *slab, *slab_to_discard = NULL;
3125 unsigned long flags = 0;
3126
3127 while (partial_slab) {
3128 slab = partial_slab;
3129 partial_slab = slab->next;
3130
3131 n2 = get_node(s, slab_nid(slab));
3132 if (n != n2) {
3133 if (n)
3134 spin_unlock_irqrestore(&n->list_lock, flags);
3135
3136 n = n2;
3137 spin_lock_irqsave(&n->list_lock, flags);
3138 }
3139
3140 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
3141 slab->next = slab_to_discard;
3142 slab_to_discard = slab;
3143 } else {
3144 add_partial(n, slab, DEACTIVATE_TO_TAIL);
3145 stat(s, FREE_ADD_PARTIAL);
3146 }
3147 }
3148
3149 if (n)
3150 spin_unlock_irqrestore(&n->list_lock, flags);
3151
3152 while (slab_to_discard) {
3153 slab = slab_to_discard;
3154 slab_to_discard = slab_to_discard->next;
3155
3156 stat(s, DEACTIVATE_EMPTY);
3157 discard_slab(s, slab);
3158 stat(s, FREE_SLAB);
3159 }
3160}
3161
3162/*
3163 * Put all the cpu partial slabs to the node partial list.
3164 */
3165static void put_partials(struct kmem_cache *s)
3166{
3167 struct slab *partial_slab;
3168 unsigned long flags;
3169
3170 local_lock_irqsave(&s->cpu_slab->lock, flags);
3171 partial_slab = this_cpu_read(s->cpu_slab->partial);
3172 this_cpu_write(s->cpu_slab->partial, NULL);
3173 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3174
3175 if (partial_slab)
3176 __put_partials(s, partial_slab);
3177}
3178
3179static void put_partials_cpu(struct kmem_cache *s,
3180 struct kmem_cache_cpu *c)
3181{
3182 struct slab *partial_slab;
3183
3184 partial_slab = slub_percpu_partial(c);
3185 c->partial = NULL;
3186
3187 if (partial_slab)
3188 __put_partials(s, partial_slab);
3189}
3190
3191/*
3192 * Put a slab into a partial slab slot if available.
3193 *
3194 * If we did not find a slot then simply move all the partials to the
3195 * per node partial list.
3196 */
3197static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
3198{
3199 struct slab *oldslab;
3200 struct slab *slab_to_put = NULL;
3201 unsigned long flags;
3202 int slabs = 0;
3203
3204 local_lock_irqsave(&s->cpu_slab->lock, flags);
3205
3206 oldslab = this_cpu_read(s->cpu_slab->partial);
3207
3208 if (oldslab) {
3209 if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
3210 /*
3211 * Partial array is full. Move the existing set to the
3212 * per node partial list. Postpone the actual unfreezing
3213 * outside of the critical section.
3214 */
3215 slab_to_put = oldslab;
3216 oldslab = NULL;
3217 } else {
3218 slabs = oldslab->slabs;
3219 }
3220 }
3221
3222 slabs++;
3223
3224 slab->slabs = slabs;
3225 slab->next = oldslab;
3226
3227 this_cpu_write(s->cpu_slab->partial, slab);
3228
3229 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3230
3231 if (slab_to_put) {
3232 __put_partials(s, slab_to_put);
3233 stat(s, CPU_PARTIAL_DRAIN);
3234 }
3235}
3236
3237#else /* CONFIG_SLUB_CPU_PARTIAL */
3238
3239static inline void put_partials(struct kmem_cache *s) { }
3240static inline void put_partials_cpu(struct kmem_cache *s,
3241 struct kmem_cache_cpu *c) { }
3242
3243#endif /* CONFIG_SLUB_CPU_PARTIAL */
3244
3245static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
3246{
3247 unsigned long flags;
3248 struct slab *slab;
3249 void *freelist;
3250
3251 local_lock_irqsave(&s->cpu_slab->lock, flags);
3252
3253 slab = c->slab;
3254 freelist = c->freelist;
3255
3256 c->slab = NULL;
3257 c->freelist = NULL;
3258 c->tid = next_tid(c->tid);
3259
3260 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3261
3262 if (slab) {
3263 deactivate_slab(s, slab, freelist);
3264 stat(s, CPUSLAB_FLUSH);
3265 }
3266}
3267
3268static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
3269{
3270 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3271 void *freelist = c->freelist;
3272 struct slab *slab = c->slab;
3273
3274 c->slab = NULL;
3275 c->freelist = NULL;
3276 c->tid = next_tid(c->tid);
3277
3278 if (slab) {
3279 deactivate_slab(s, slab, freelist);
3280 stat(s, CPUSLAB_FLUSH);
3281 }
3282
3283 put_partials_cpu(s, c);
3284}
3285
3286struct slub_flush_work {
3287 struct work_struct work;
3288 struct kmem_cache *s;
3289 bool skip;
3290};
3291
3292/*
3293 * Flush cpu slab.
3294 *
3295 * Called from CPU work handler with migration disabled.
3296 */
3297static void flush_cpu_slab(struct work_struct *w)
3298{
3299 struct kmem_cache *s;
3300 struct kmem_cache_cpu *c;
3301 struct slub_flush_work *sfw;
3302
3303 sfw = container_of(w, struct slub_flush_work, work);
3304
3305 s = sfw->s;
3306 c = this_cpu_ptr(s->cpu_slab);
3307
3308 if (c->slab)
3309 flush_slab(s, c);
3310
3311 put_partials(s);
3312}
3313
3314static bool has_cpu_slab(int cpu, struct kmem_cache *s)
3315{
3316 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3317
3318 return c->slab || slub_percpu_partial(c);
3319}
3320
3321static DEFINE_MUTEX(flush_lock);
3322static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
3323
3324static void flush_all_cpus_locked(struct kmem_cache *s)
3325{
3326 struct slub_flush_work *sfw;
3327 unsigned int cpu;
3328
3329 lockdep_assert_cpus_held();
3330 mutex_lock(&flush_lock);
3331
3332 for_each_online_cpu(cpu) {
3333 sfw = &per_cpu(slub_flush, cpu);
3334 if (!has_cpu_slab(cpu, s)) {
3335 sfw->skip = true;
3336 continue;
3337 }
3338 INIT_WORK(&sfw->work, flush_cpu_slab);
3339 sfw->skip = false;
3340 sfw->s = s;
3341 queue_work_on(cpu, flushwq, &sfw->work);
3342 }
3343
3344 for_each_online_cpu(cpu) {
3345 sfw = &per_cpu(slub_flush, cpu);
3346 if (sfw->skip)
3347 continue;
3348 flush_work(&sfw->work);
3349 }
3350
3351 mutex_unlock(&flush_lock);
3352}
3353
3354static void flush_all(struct kmem_cache *s)
3355{
3356 cpus_read_lock();
3357 flush_all_cpus_locked(s);
3358 cpus_read_unlock();
3359}
3360
3361/*
3362 * Use the cpu notifier to insure that the cpu slabs are flushed when
3363 * necessary.
3364 */
3365static int slub_cpu_dead(unsigned int cpu)
3366{
3367 struct kmem_cache *s;
3368
3369 mutex_lock(&slab_mutex);
3370 list_for_each_entry(s, &slab_caches, list)
3371 __flush_cpu_slab(s, cpu);
3372 mutex_unlock(&slab_mutex);
3373 return 0;
3374}
3375
3376#else /* CONFIG_SLUB_TINY */
3377static inline void flush_all_cpus_locked(struct kmem_cache *s) { }
3378static inline void flush_all(struct kmem_cache *s) { }
3379static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
3380static inline int slub_cpu_dead(unsigned int cpu) { return 0; }
3381#endif /* CONFIG_SLUB_TINY */
3382
3383/*
3384 * Check if the objects in a per cpu structure fit numa
3385 * locality expectations.
3386 */
3387static inline int node_match(struct slab *slab, int node)
3388{
3389#ifdef CONFIG_NUMA
3390 if (node != NUMA_NO_NODE && slab_nid(slab) != node)
3391 return 0;
3392#endif
3393 return 1;
3394}
3395
3396#ifdef CONFIG_SLUB_DEBUG
3397static int count_free(struct slab *slab)
3398{
3399 return slab->objects - slab->inuse;
3400}
3401
3402static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
3403{
3404 return atomic_long_read(&n->total_objects);
3405}
3406
3407/* Supports checking bulk free of a constructed freelist */
3408static inline bool free_debug_processing(struct kmem_cache *s,
3409 struct slab *slab, void *head, void *tail, int *bulk_cnt,
3410 unsigned long addr, depot_stack_handle_t handle)
3411{
3412 bool checks_ok = false;
3413 void *object = head;
3414 int cnt = 0;
3415
3416 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3417 if (!check_slab(s, slab))
3418 goto out;
3419 }
3420
3421 if (slab->inuse < *bulk_cnt) {
3422 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3423 slab->inuse, *bulk_cnt);
3424 goto out;
3425 }
3426
3427next_object:
3428
3429 if (++cnt > *bulk_cnt)
3430 goto out_cnt;
3431
3432 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3433 if (!free_consistency_checks(s, slab, object, addr))
3434 goto out;
3435 }
3436
3437 if (s->flags & SLAB_STORE_USER)
3438 set_track_update(s, object, TRACK_FREE, addr, handle);
3439 trace(s, slab, object, 0);
3440 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
3441 init_object(s, object, SLUB_RED_INACTIVE);
3442
3443 /* Reached end of constructed freelist yet? */
3444 if (object != tail) {
3445 object = get_freepointer(s, object);
3446 goto next_object;
3447 }
3448 checks_ok = true;
3449
3450out_cnt:
3451 if (cnt != *bulk_cnt) {
3452 slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3453 *bulk_cnt, cnt);
3454 *bulk_cnt = cnt;
3455 }
3456
3457out:
3458
3459 if (!checks_ok)
3460 slab_fix(s, "Object at 0x%p not freed", object);
3461
3462 return checks_ok;
3463}
3464#endif /* CONFIG_SLUB_DEBUG */
3465
3466#if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
3467static unsigned long count_partial(struct kmem_cache_node *n,
3468 int (*get_count)(struct slab *))
3469{
3470 unsigned long flags;
3471 unsigned long x = 0;
3472 struct slab *slab;
3473
3474 spin_lock_irqsave(&n->list_lock, flags);
3475 list_for_each_entry(slab, &n->partial, slab_list)
3476 x += get_count(slab);
3477 spin_unlock_irqrestore(&n->list_lock, flags);
3478 return x;
3479}
3480#endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
3481
3482#ifdef CONFIG_SLUB_DEBUG
3483#define MAX_PARTIAL_TO_SCAN 10000
3484
3485static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
3486{
3487 unsigned long flags;
3488 unsigned long x = 0;
3489 struct slab *slab;
3490
3491 spin_lock_irqsave(&n->list_lock, flags);
3492 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
3493 list_for_each_entry(slab, &n->partial, slab_list)
3494 x += slab->objects - slab->inuse;
3495 } else {
3496 /*
3497 * For a long list, approximate the total count of objects in
3498 * it to meet the limit on the number of slabs to scan.
3499 * Scan from both the list's head and tail for better accuracy.
3500 */
3501 unsigned long scanned = 0;
3502
3503 list_for_each_entry(slab, &n->partial, slab_list) {
3504 x += slab->objects - slab->inuse;
3505 if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
3506 break;
3507 }
3508 list_for_each_entry_reverse(slab, &n->partial, slab_list) {
3509 x += slab->objects - slab->inuse;
3510 if (++scanned == MAX_PARTIAL_TO_SCAN)
3511 break;
3512 }
3513 x = mult_frac(x, n->nr_partial, scanned);
3514 x = min(x, node_nr_objs(n));
3515 }
3516 spin_unlock_irqrestore(&n->list_lock, flags);
3517 return x;
3518}
3519
3520static noinline void
3521slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
3522{
3523 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
3524 DEFAULT_RATELIMIT_BURST);
3525 int cpu = raw_smp_processor_id();
3526 int node;
3527 struct kmem_cache_node *n;
3528
3529 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
3530 return;
3531
3532 pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
3533 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
3534 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
3535 s->name, s->object_size, s->size, oo_order(s->oo),
3536 oo_order(s->min));
3537
3538 if (oo_order(s->min) > get_order(s->object_size))
3539 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n",
3540 s->name);
3541
3542 for_each_kmem_cache_node(s, node, n) {
3543 unsigned long nr_slabs;
3544 unsigned long nr_objs;
3545 unsigned long nr_free;
3546
3547 nr_free = count_partial_free_approx(n);
3548 nr_slabs = node_nr_slabs(n);
3549 nr_objs = node_nr_objs(n);
3550
3551 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
3552 node, nr_slabs, nr_objs, nr_free);
3553 }
3554}
3555#else /* CONFIG_SLUB_DEBUG */
3556static inline void
3557slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
3558#endif
3559
3560static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
3561{
3562 if (unlikely(slab_test_pfmemalloc(slab)))
3563 return gfp_pfmemalloc_allowed(gfpflags);
3564
3565 return true;
3566}
3567
3568#ifndef CONFIG_SLUB_TINY
3569static inline bool
3570__update_cpu_freelist_fast(struct kmem_cache *s,
3571 void *freelist_old, void *freelist_new,
3572 unsigned long tid)
3573{
3574 freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
3575 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
3576
3577 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
3578 &old.full, new.full);
3579}
3580
3581/*
3582 * Check the slab->freelist and either transfer the freelist to the
3583 * per cpu freelist or deactivate the slab.
3584 *
3585 * The slab is still frozen if the return value is not NULL.
3586 *
3587 * If this function returns NULL then the slab has been unfrozen.
3588 */
3589static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
3590{
3591 struct slab new;
3592 unsigned long counters;
3593 void *freelist;
3594
3595 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3596
3597 do {
3598 freelist = slab->freelist;
3599 counters = slab->counters;
3600
3601 new.counters = counters;
3602
3603 new.inuse = slab->objects;
3604 new.frozen = freelist != NULL;
3605
3606 } while (!__slab_update_freelist(s, slab,
3607 freelist, counters,
3608 NULL, new.counters,
3609 "get_freelist"));
3610
3611 return freelist;
3612}
3613
3614/*
3615 * Freeze the partial slab and return the pointer to the freelist.
3616 */
3617static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
3618{
3619 struct slab new;
3620 unsigned long counters;
3621 void *freelist;
3622
3623 do {
3624 freelist = slab->freelist;
3625 counters = slab->counters;
3626
3627 new.counters = counters;
3628 VM_BUG_ON(new.frozen);
3629
3630 new.inuse = slab->objects;
3631 new.frozen = 1;
3632
3633 } while (!slab_update_freelist(s, slab,
3634 freelist, counters,
3635 NULL, new.counters,
3636 "freeze_slab"));
3637
3638 return freelist;
3639}
3640
3641/*
3642 * Slow path. The lockless freelist is empty or we need to perform
3643 * debugging duties.
3644 *
3645 * Processing is still very fast if new objects have been freed to the
3646 * regular freelist. In that case we simply take over the regular freelist
3647 * as the lockless freelist and zap the regular freelist.
3648 *
3649 * If that is not working then we fall back to the partial lists. We take the
3650 * first element of the freelist as the object to allocate now and move the
3651 * rest of the freelist to the lockless freelist.
3652 *
3653 * And if we were unable to get a new slab from the partial slab lists then
3654 * we need to allocate a new slab. This is the slowest path since it involves
3655 * a call to the page allocator and the setup of a new slab.
3656 *
3657 * Version of __slab_alloc to use when we know that preemption is
3658 * already disabled (which is the case for bulk allocation).
3659 */
3660static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3661 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3662{
3663 void *freelist;
3664 struct slab *slab;
3665 unsigned long flags;
3666 struct partial_context pc;
3667 bool try_thisnode = true;
3668
3669 stat(s, ALLOC_SLOWPATH);
3670
3671reread_slab:
3672
3673 slab = READ_ONCE(c->slab);
3674 if (!slab) {
3675 /*
3676 * if the node is not online or has no normal memory, just
3677 * ignore the node constraint
3678 */
3679 if (unlikely(node != NUMA_NO_NODE &&
3680 !node_isset(node, slab_nodes)))
3681 node = NUMA_NO_NODE;
3682 goto new_slab;
3683 }
3684
3685 if (unlikely(!node_match(slab, node))) {
3686 /*
3687 * same as above but node_match() being false already
3688 * implies node != NUMA_NO_NODE
3689 */
3690 if (!node_isset(node, slab_nodes)) {
3691 node = NUMA_NO_NODE;
3692 } else {
3693 stat(s, ALLOC_NODE_MISMATCH);
3694 goto deactivate_slab;
3695 }
3696 }
3697
3698 /*
3699 * By rights, we should be searching for a slab page that was
3700 * PFMEMALLOC but right now, we are losing the pfmemalloc
3701 * information when the page leaves the per-cpu allocator
3702 */
3703 if (unlikely(!pfmemalloc_match(slab, gfpflags)))
3704 goto deactivate_slab;
3705
3706 /* must check again c->slab in case we got preempted and it changed */
3707 local_lock_irqsave(&s->cpu_slab->lock, flags);
3708 if (unlikely(slab != c->slab)) {
3709 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3710 goto reread_slab;
3711 }
3712 freelist = c->freelist;
3713 if (freelist)
3714 goto load_freelist;
3715
3716 freelist = get_freelist(s, slab);
3717
3718 if (!freelist) {
3719 c->slab = NULL;
3720 c->tid = next_tid(c->tid);
3721 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3722 stat(s, DEACTIVATE_BYPASS);
3723 goto new_slab;
3724 }
3725
3726 stat(s, ALLOC_REFILL);
3727
3728load_freelist:
3729
3730 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3731
3732 /*
3733 * freelist is pointing to the list of objects to be used.
3734 * slab is pointing to the slab from which the objects are obtained.
3735 * That slab must be frozen for per cpu allocations to work.
3736 */
3737 VM_BUG_ON(!c->slab->frozen);
3738 c->freelist = get_freepointer(s, freelist);
3739 c->tid = next_tid(c->tid);
3740 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3741 return freelist;
3742
3743deactivate_slab:
3744
3745 local_lock_irqsave(&s->cpu_slab->lock, flags);
3746 if (slab != c->slab) {
3747 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3748 goto reread_slab;
3749 }
3750 freelist = c->freelist;
3751 c->slab = NULL;
3752 c->freelist = NULL;
3753 c->tid = next_tid(c->tid);
3754 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3755 deactivate_slab(s, slab, freelist);
3756
3757new_slab:
3758
3759#ifdef CONFIG_SLUB_CPU_PARTIAL
3760 while (slub_percpu_partial(c)) {
3761 local_lock_irqsave(&s->cpu_slab->lock, flags);
3762 if (unlikely(c->slab)) {
3763 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3764 goto reread_slab;
3765 }
3766 if (unlikely(!slub_percpu_partial(c))) {
3767 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3768 /* we were preempted and partial list got empty */
3769 goto new_objects;
3770 }
3771
3772 slab = slub_percpu_partial(c);
3773 slub_set_percpu_partial(c, slab);
3774
3775 if (likely(node_match(slab, node) &&
3776 pfmemalloc_match(slab, gfpflags))) {
3777 c->slab = slab;
3778 freelist = get_freelist(s, slab);
3779 VM_BUG_ON(!freelist);
3780 stat(s, CPU_PARTIAL_ALLOC);
3781 goto load_freelist;
3782 }
3783
3784 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3785
3786 slab->next = NULL;
3787 __put_partials(s, slab);
3788 }
3789#endif
3790
3791new_objects:
3792
3793 pc.flags = gfpflags;
3794 /*
3795 * When a preferred node is indicated but no __GFP_THISNODE
3796 *
3797 * 1) try to get a partial slab from target node only by having
3798 * __GFP_THISNODE in pc.flags for get_partial()
3799 * 2) if 1) failed, try to allocate a new slab from target node with
3800 * GPF_NOWAIT | __GFP_THISNODE opportunistically
3801 * 3) if 2) failed, retry with original gfpflags which will allow
3802 * get_partial() try partial lists of other nodes before potentially
3803 * allocating new page from other nodes
3804 */
3805 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3806 && try_thisnode))
3807 pc.flags = GFP_NOWAIT | __GFP_THISNODE;
3808
3809 pc.orig_size = orig_size;
3810 slab = get_partial(s, node, &pc);
3811 if (slab) {
3812 if (kmem_cache_debug(s)) {
3813 freelist = pc.object;
3814 /*
3815 * For debug caches here we had to go through
3816 * alloc_single_from_partial() so just store the
3817 * tracking info and return the object.
3818 */
3819 if (s->flags & SLAB_STORE_USER)
3820 set_track(s, freelist, TRACK_ALLOC, addr);
3821
3822 return freelist;
3823 }
3824
3825 freelist = freeze_slab(s, slab);
3826 goto retry_load_slab;
3827 }
3828
3829 slub_put_cpu_ptr(s->cpu_slab);
3830 slab = new_slab(s, pc.flags, node);
3831 c = slub_get_cpu_ptr(s->cpu_slab);
3832
3833 if (unlikely(!slab)) {
3834 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3835 && try_thisnode) {
3836 try_thisnode = false;
3837 goto new_objects;
3838 }
3839 slab_out_of_memory(s, gfpflags, node);
3840 return NULL;
3841 }
3842
3843 stat(s, ALLOC_SLAB);
3844
3845 if (kmem_cache_debug(s)) {
3846 freelist = alloc_single_from_new_slab(s, slab, orig_size);
3847
3848 if (unlikely(!freelist))
3849 goto new_objects;
3850
3851 if (s->flags & SLAB_STORE_USER)
3852 set_track(s, freelist, TRACK_ALLOC, addr);
3853
3854 return freelist;
3855 }
3856
3857 /*
3858 * No other reference to the slab yet so we can
3859 * muck around with it freely without cmpxchg
3860 */
3861 freelist = slab->freelist;
3862 slab->freelist = NULL;
3863 slab->inuse = slab->objects;
3864 slab->frozen = 1;
3865
3866 inc_slabs_node(s, slab_nid(slab), slab->objects);
3867
3868 if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
3869 /*
3870 * For !pfmemalloc_match() case we don't load freelist so that
3871 * we don't make further mismatched allocations easier.
3872 */
3873 deactivate_slab(s, slab, get_freepointer(s, freelist));
3874 return freelist;
3875 }
3876
3877retry_load_slab:
3878
3879 local_lock_irqsave(&s->cpu_slab->lock, flags);
3880 if (unlikely(c->slab)) {
3881 void *flush_freelist = c->freelist;
3882 struct slab *flush_slab = c->slab;
3883
3884 c->slab = NULL;
3885 c->freelist = NULL;
3886 c->tid = next_tid(c->tid);
3887
3888 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3889
3890 deactivate_slab(s, flush_slab, flush_freelist);
3891
3892 stat(s, CPUSLAB_FLUSH);
3893
3894 goto retry_load_slab;
3895 }
3896 c->slab = slab;
3897
3898 goto load_freelist;
3899}
3900
3901/*
3902 * A wrapper for ___slab_alloc() for contexts where preemption is not yet
3903 * disabled. Compensates for possible cpu changes by refetching the per cpu area
3904 * pointer.
3905 */
3906static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3907 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3908{
3909 void *p;
3910
3911#ifdef CONFIG_PREEMPT_COUNT
3912 /*
3913 * We may have been preempted and rescheduled on a different
3914 * cpu before disabling preemption. Need to reload cpu area
3915 * pointer.
3916 */
3917 c = slub_get_cpu_ptr(s->cpu_slab);
3918#endif
3919
3920 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
3921#ifdef CONFIG_PREEMPT_COUNT
3922 slub_put_cpu_ptr(s->cpu_slab);
3923#endif
3924 return p;
3925}
3926
3927static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
3928 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3929{
3930 struct kmem_cache_cpu *c;
3931 struct slab *slab;
3932 unsigned long tid;
3933 void *object;
3934
3935redo:
3936 /*
3937 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
3938 * enabled. We may switch back and forth between cpus while
3939 * reading from one cpu area. That does not matter as long
3940 * as we end up on the original cpu again when doing the cmpxchg.
3941 *
3942 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
3943 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
3944 * the tid. If we are preempted and switched to another cpu between the
3945 * two reads, it's OK as the two are still associated with the same cpu
3946 * and cmpxchg later will validate the cpu.
3947 */
3948 c = raw_cpu_ptr(s->cpu_slab);
3949 tid = READ_ONCE(c->tid);
3950
3951 /*
3952 * Irqless object alloc/free algorithm used here depends on sequence
3953 * of fetching cpu_slab's data. tid should be fetched before anything
3954 * on c to guarantee that object and slab associated with previous tid
3955 * won't be used with current tid. If we fetch tid first, object and
3956 * slab could be one associated with next tid and our alloc/free
3957 * request will be failed. In this case, we will retry. So, no problem.
3958 */
3959 barrier();
3960
3961 /*
3962 * The transaction ids are globally unique per cpu and per operation on
3963 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
3964 * occurs on the right processor and that there was no operation on the
3965 * linked list in between.
3966 */
3967
3968 object = c->freelist;
3969 slab = c->slab;
3970
3971#ifdef CONFIG_NUMA
3972 if (static_branch_unlikely(&strict_numa) &&
3973 node == NUMA_NO_NODE) {
3974
3975 struct mempolicy *mpol = current->mempolicy;
3976
3977 if (mpol) {
3978 /*
3979 * Special BIND rule support. If existing slab
3980 * is in permitted set then do not redirect
3981 * to a particular node.
3982 * Otherwise we apply the memory policy to get
3983 * the node we need to allocate on.
3984 */
3985 if (mpol->mode != MPOL_BIND || !slab ||
3986 !node_isset(slab_nid(slab), mpol->nodes))
3987
3988 node = mempolicy_slab_node();
3989 }
3990 }
3991#endif
3992
3993 if (!USE_LOCKLESS_FAST_PATH() ||
3994 unlikely(!object || !slab || !node_match(slab, node))) {
3995 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
3996 } else {
3997 void *next_object = get_freepointer_safe(s, object);
3998
3999 /*
4000 * The cmpxchg will only match if there was no additional
4001 * operation and if we are on the right processor.
4002 *
4003 * The cmpxchg does the following atomically (without lock
4004 * semantics!)
4005 * 1. Relocate first pointer to the current per cpu area.
4006 * 2. Verify that tid and freelist have not been changed
4007 * 3. If they were not changed replace tid and freelist
4008 *
4009 * Since this is without lock semantics the protection is only
4010 * against code executing on this cpu *not* from access by
4011 * other cpus.
4012 */
4013 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
4014 note_cmpxchg_failure("slab_alloc", s, tid);
4015 goto redo;
4016 }
4017 prefetch_freepointer(s, next_object);
4018 stat(s, ALLOC_FASTPATH);
4019 }
4020
4021 return object;
4022}
4023#else /* CONFIG_SLUB_TINY */
4024static void *__slab_alloc_node(struct kmem_cache *s,
4025 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4026{
4027 struct partial_context pc;
4028 struct slab *slab;
4029 void *object;
4030
4031 pc.flags = gfpflags;
4032 pc.orig_size = orig_size;
4033 slab = get_partial(s, node, &pc);
4034
4035 if (slab)
4036 return pc.object;
4037
4038 slab = new_slab(s, gfpflags, node);
4039 if (unlikely(!slab)) {
4040 slab_out_of_memory(s, gfpflags, node);
4041 return NULL;
4042 }
4043
4044 object = alloc_single_from_new_slab(s, slab, orig_size);
4045
4046 return object;
4047}
4048#endif /* CONFIG_SLUB_TINY */
4049
4050/*
4051 * If the object has been wiped upon free, make sure it's fully initialized by
4052 * zeroing out freelist pointer.
4053 *
4054 * Note that we also wipe custom freelist pointers.
4055 */
4056static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
4057 void *obj)
4058{
4059 if (unlikely(slab_want_init_on_free(s)) && obj &&
4060 !freeptr_outside_object(s))
4061 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
4062 0, sizeof(void *));
4063}
4064
4065static __fastpath_inline
4066struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
4067{
4068 flags &= gfp_allowed_mask;
4069
4070 might_alloc(flags);
4071
4072 if (unlikely(should_failslab(s, flags)))
4073 return NULL;
4074
4075 return s;
4076}
4077
4078static __fastpath_inline
4079bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
4080 gfp_t flags, size_t size, void **p, bool init,
4081 unsigned int orig_size)
4082{
4083 unsigned int zero_size = s->object_size;
4084 bool kasan_init = init;
4085 size_t i;
4086 gfp_t init_flags = flags & gfp_allowed_mask;
4087
4088 /*
4089 * For kmalloc object, the allocated memory size(object_size) is likely
4090 * larger than the requested size(orig_size). If redzone check is
4091 * enabled for the extra space, don't zero it, as it will be redzoned
4092 * soon. The redzone operation for this extra space could be seen as a
4093 * replacement of current poisoning under certain debug option, and
4094 * won't break other sanity checks.
4095 */
4096 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
4097 (s->flags & SLAB_KMALLOC))
4098 zero_size = orig_size;
4099
4100 /*
4101 * When slab_debug is enabled, avoid memory initialization integrated
4102 * into KASAN and instead zero out the memory via the memset below with
4103 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
4104 * cause false-positive reports. This does not lead to a performance
4105 * penalty on production builds, as slab_debug is not intended to be
4106 * enabled there.
4107 */
4108 if (__slub_debug_enabled())
4109 kasan_init = false;
4110
4111 /*
4112 * As memory initialization might be integrated into KASAN,
4113 * kasan_slab_alloc and initialization memset must be
4114 * kept together to avoid discrepancies in behavior.
4115 *
4116 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
4117 */
4118 for (i = 0; i < size; i++) {
4119 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
4120 if (p[i] && init && (!kasan_init ||
4121 !kasan_has_integrated_init()))
4122 memset(p[i], 0, zero_size);
4123 kmemleak_alloc_recursive(p[i], s->object_size, 1,
4124 s->flags, init_flags);
4125 kmsan_slab_alloc(s, p[i], init_flags);
4126 alloc_tagging_slab_alloc_hook(s, p[i], flags);
4127 }
4128
4129 return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
4130}
4131
4132/*
4133 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
4134 * have the fastpath folded into their functions. So no function call
4135 * overhead for requests that can be satisfied on the fastpath.
4136 *
4137 * The fastpath works by first checking if the lockless freelist can be used.
4138 * If not then __slab_alloc is called for slow processing.
4139 *
4140 * Otherwise we can simply pick the next object from the lockless free list.
4141 */
4142static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
4143 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4144{
4145 void *object;
4146 bool init = false;
4147
4148 s = slab_pre_alloc_hook(s, gfpflags);
4149 if (unlikely(!s))
4150 return NULL;
4151
4152 object = kfence_alloc(s, orig_size, gfpflags);
4153 if (unlikely(object))
4154 goto out;
4155
4156 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4157
4158 maybe_wipe_obj_freeptr(s, object);
4159 init = slab_want_init_on_alloc(gfpflags, s);
4160
4161out:
4162 /*
4163 * When init equals 'true', like for kzalloc() family, only
4164 * @orig_size bytes might be zeroed instead of s->object_size
4165 * In case this fails due to memcg_slab_post_alloc_hook(),
4166 * object is set to NULL
4167 */
4168 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4169
4170 return object;
4171}
4172
4173void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4174{
4175 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4176 s->object_size);
4177
4178 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4179
4180 return ret;
4181}
4182EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4183
4184void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4185 gfp_t gfpflags)
4186{
4187 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4188 s->object_size);
4189
4190 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4191
4192 return ret;
4193}
4194EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4195
4196bool kmem_cache_charge(void *objp, gfp_t gfpflags)
4197{
4198 if (!memcg_kmem_online())
4199 return true;
4200
4201 return memcg_slab_post_charge(objp, gfpflags);
4202}
4203EXPORT_SYMBOL(kmem_cache_charge);
4204
4205/**
4206 * kmem_cache_alloc_node - Allocate an object on the specified node
4207 * @s: The cache to allocate from.
4208 * @gfpflags: See kmalloc().
4209 * @node: node number of the target node.
4210 *
4211 * Identical to kmem_cache_alloc but it will allocate memory on the given
4212 * node, which can improve the performance for cpu bound structures.
4213 *
4214 * Fallback to other node is possible if __GFP_THISNODE is not set.
4215 *
4216 * Return: pointer to the new object or %NULL in case of error
4217 */
4218void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4219{
4220 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4221
4222 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4223
4224 return ret;
4225}
4226EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4227
4228/*
4229 * To avoid unnecessary overhead, we pass through large allocation requests
4230 * directly to the page allocator. We use __GFP_COMP, because we will need to
4231 * know the allocation order to free the pages properly in kfree.
4232 */
4233static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
4234{
4235 struct folio *folio;
4236 void *ptr = NULL;
4237 unsigned int order = get_order(size);
4238
4239 if (unlikely(flags & GFP_SLAB_BUG_MASK))
4240 flags = kmalloc_fix_flags(flags);
4241
4242 flags |= __GFP_COMP;
4243 folio = (struct folio *)alloc_pages_node_noprof(node, flags, order);
4244 if (folio) {
4245 ptr = folio_address(folio);
4246 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4247 PAGE_SIZE << order);
4248 }
4249
4250 ptr = kasan_kmalloc_large(ptr, size, flags);
4251 /* As ptr might get tagged, call kmemleak hook after KASAN. */
4252 kmemleak_alloc(ptr, size, 1, flags);
4253 kmsan_kmalloc_large(ptr, size, flags);
4254
4255 return ptr;
4256}
4257
4258void *__kmalloc_large_noprof(size_t size, gfp_t flags)
4259{
4260 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
4261
4262 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4263 flags, NUMA_NO_NODE);
4264 return ret;
4265}
4266EXPORT_SYMBOL(__kmalloc_large_noprof);
4267
4268void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
4269{
4270 void *ret = ___kmalloc_large_node(size, flags, node);
4271
4272 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4273 flags, node);
4274 return ret;
4275}
4276EXPORT_SYMBOL(__kmalloc_large_node_noprof);
4277
4278static __always_inline
4279void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
4280 unsigned long caller)
4281{
4282 struct kmem_cache *s;
4283 void *ret;
4284
4285 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4286 ret = __kmalloc_large_node_noprof(size, flags, node);
4287 trace_kmalloc(caller, ret, size,
4288 PAGE_SIZE << get_order(size), flags, node);
4289 return ret;
4290 }
4291
4292 if (unlikely(!size))
4293 return ZERO_SIZE_PTR;
4294
4295 s = kmalloc_slab(size, b, flags, caller);
4296
4297 ret = slab_alloc_node(s, NULL, flags, node, caller, size);
4298 ret = kasan_kmalloc(s, ret, size, flags);
4299 trace_kmalloc(caller, ret, size, s->size, flags, node);
4300 return ret;
4301}
4302void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
4303{
4304 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
4305}
4306EXPORT_SYMBOL(__kmalloc_node_noprof);
4307
4308void *__kmalloc_noprof(size_t size, gfp_t flags)
4309{
4310 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
4311}
4312EXPORT_SYMBOL(__kmalloc_noprof);
4313
4314void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
4315 int node, unsigned long caller)
4316{
4317 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
4318
4319}
4320EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
4321
4322void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
4323{
4324 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
4325 _RET_IP_, size);
4326
4327 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4328
4329 ret = kasan_kmalloc(s, ret, size, gfpflags);
4330 return ret;
4331}
4332EXPORT_SYMBOL(__kmalloc_cache_noprof);
4333
4334void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
4335 int node, size_t size)
4336{
4337 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4338
4339 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4340
4341 ret = kasan_kmalloc(s, ret, size, gfpflags);
4342 return ret;
4343}
4344EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
4345
4346static noinline void free_to_partial_list(
4347 struct kmem_cache *s, struct slab *slab,
4348 void *head, void *tail, int bulk_cnt,
4349 unsigned long addr)
4350{
4351 struct kmem_cache_node *n = get_node(s, slab_nid(slab));
4352 struct slab *slab_free = NULL;
4353 int cnt = bulk_cnt;
4354 unsigned long flags;
4355 depot_stack_handle_t handle = 0;
4356
4357 if (s->flags & SLAB_STORE_USER)
4358 handle = set_track_prepare();
4359
4360 spin_lock_irqsave(&n->list_lock, flags);
4361
4362 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
4363 void *prior = slab->freelist;
4364
4365 /* Perform the actual freeing while we still hold the locks */
4366 slab->inuse -= cnt;
4367 set_freepointer(s, tail, prior);
4368 slab->freelist = head;
4369
4370 /*
4371 * If the slab is empty, and node's partial list is full,
4372 * it should be discarded anyway no matter it's on full or
4373 * partial list.
4374 */
4375 if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
4376 slab_free = slab;
4377
4378 if (!prior) {
4379 /* was on full list */
4380 remove_full(s, n, slab);
4381 if (!slab_free) {
4382 add_partial(n, slab, DEACTIVATE_TO_TAIL);
4383 stat(s, FREE_ADD_PARTIAL);
4384 }
4385 } else if (slab_free) {
4386 remove_partial(n, slab);
4387 stat(s, FREE_REMOVE_PARTIAL);
4388 }
4389 }
4390
4391 if (slab_free) {
4392 /*
4393 * Update the counters while still holding n->list_lock to
4394 * prevent spurious validation warnings
4395 */
4396 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4397 }
4398
4399 spin_unlock_irqrestore(&n->list_lock, flags);
4400
4401 if (slab_free) {
4402 stat(s, FREE_SLAB);
4403 free_slab(s, slab_free);
4404 }
4405}
4406
4407/*
4408 * Slow path handling. This may still be called frequently since objects
4409 * have a longer lifetime than the cpu slabs in most processing loads.
4410 *
4411 * So we still attempt to reduce cache line usage. Just take the slab
4412 * lock and free the item. If there is no additional partial slab
4413 * handling required then we can return immediately.
4414 */
4415static void __slab_free(struct kmem_cache *s, struct slab *slab,
4416 void *head, void *tail, int cnt,
4417 unsigned long addr)
4418
4419{
4420 void *prior;
4421 int was_frozen;
4422 struct slab new;
4423 unsigned long counters;
4424 struct kmem_cache_node *n = NULL;
4425 unsigned long flags;
4426 bool on_node_partial;
4427
4428 stat(s, FREE_SLOWPATH);
4429
4430 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4431 free_to_partial_list(s, slab, head, tail, cnt, addr);
4432 return;
4433 }
4434
4435 do {
4436 if (unlikely(n)) {
4437 spin_unlock_irqrestore(&n->list_lock, flags);
4438 n = NULL;
4439 }
4440 prior = slab->freelist;
4441 counters = slab->counters;
4442 set_freepointer(s, tail, prior);
4443 new.counters = counters;
4444 was_frozen = new.frozen;
4445 new.inuse -= cnt;
4446 if ((!new.inuse || !prior) && !was_frozen) {
4447 /* Needs to be taken off a list */
4448 if (!kmem_cache_has_cpu_partial(s) || prior) {
4449
4450 n = get_node(s, slab_nid(slab));
4451 /*
4452 * Speculatively acquire the list_lock.
4453 * If the cmpxchg does not succeed then we may
4454 * drop the list_lock without any processing.
4455 *
4456 * Otherwise the list_lock will synchronize with
4457 * other processors updating the list of slabs.
4458 */
4459 spin_lock_irqsave(&n->list_lock, flags);
4460
4461 on_node_partial = slab_test_node_partial(slab);
4462 }
4463 }
4464
4465 } while (!slab_update_freelist(s, slab,
4466 prior, counters,
4467 head, new.counters,
4468 "__slab_free"));
4469
4470 if (likely(!n)) {
4471
4472 if (likely(was_frozen)) {
4473 /*
4474 * The list lock was not taken therefore no list
4475 * activity can be necessary.
4476 */
4477 stat(s, FREE_FROZEN);
4478 } else if (kmem_cache_has_cpu_partial(s) && !prior) {
4479 /*
4480 * If we started with a full slab then put it onto the
4481 * per cpu partial list.
4482 */
4483 put_cpu_partial(s, slab, 1);
4484 stat(s, CPU_PARTIAL_FREE);
4485 }
4486
4487 return;
4488 }
4489
4490 /*
4491 * This slab was partially empty but not on the per-node partial list,
4492 * in which case we shouldn't manipulate its list, just return.
4493 */
4494 if (prior && !on_node_partial) {
4495 spin_unlock_irqrestore(&n->list_lock, flags);
4496 return;
4497 }
4498
4499 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
4500 goto slab_empty;
4501
4502 /*
4503 * Objects left in the slab. If it was not on the partial list before
4504 * then add it.
4505 */
4506 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
4507 add_partial(n, slab, DEACTIVATE_TO_TAIL);
4508 stat(s, FREE_ADD_PARTIAL);
4509 }
4510 spin_unlock_irqrestore(&n->list_lock, flags);
4511 return;
4512
4513slab_empty:
4514 if (prior) {
4515 /*
4516 * Slab on the partial list.
4517 */
4518 remove_partial(n, slab);
4519 stat(s, FREE_REMOVE_PARTIAL);
4520 }
4521
4522 spin_unlock_irqrestore(&n->list_lock, flags);
4523 stat(s, FREE_SLAB);
4524 discard_slab(s, slab);
4525}
4526
4527#ifndef CONFIG_SLUB_TINY
4528/*
4529 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
4530 * can perform fastpath freeing without additional function calls.
4531 *
4532 * The fastpath is only possible if we are freeing to the current cpu slab
4533 * of this processor. This typically the case if we have just allocated
4534 * the item before.
4535 *
4536 * If fastpath is not possible then fall back to __slab_free where we deal
4537 * with all sorts of special processing.
4538 *
4539 * Bulk free of a freelist with several objects (all pointing to the
4540 * same slab) possible by specifying head and tail ptr, plus objects
4541 * count (cnt). Bulk free indicated by tail pointer being set.
4542 */
4543static __always_inline void do_slab_free(struct kmem_cache *s,
4544 struct slab *slab, void *head, void *tail,
4545 int cnt, unsigned long addr)
4546{
4547 struct kmem_cache_cpu *c;
4548 unsigned long tid;
4549 void **freelist;
4550
4551redo:
4552 /*
4553 * Determine the currently cpus per cpu slab.
4554 * The cpu may change afterward. However that does not matter since
4555 * data is retrieved via this pointer. If we are on the same cpu
4556 * during the cmpxchg then the free will succeed.
4557 */
4558 c = raw_cpu_ptr(s->cpu_slab);
4559 tid = READ_ONCE(c->tid);
4560
4561 /* Same with comment on barrier() in __slab_alloc_node() */
4562 barrier();
4563
4564 if (unlikely(slab != c->slab)) {
4565 __slab_free(s, slab, head, tail, cnt, addr);
4566 return;
4567 }
4568
4569 if (USE_LOCKLESS_FAST_PATH()) {
4570 freelist = READ_ONCE(c->freelist);
4571
4572 set_freepointer(s, tail, freelist);
4573
4574 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
4575 note_cmpxchg_failure("slab_free", s, tid);
4576 goto redo;
4577 }
4578 } else {
4579 /* Update the free list under the local lock */
4580 local_lock(&s->cpu_slab->lock);
4581 c = this_cpu_ptr(s->cpu_slab);
4582 if (unlikely(slab != c->slab)) {
4583 local_unlock(&s->cpu_slab->lock);
4584 goto redo;
4585 }
4586 tid = c->tid;
4587 freelist = c->freelist;
4588
4589 set_freepointer(s, tail, freelist);
4590 c->freelist = head;
4591 c->tid = next_tid(tid);
4592
4593 local_unlock(&s->cpu_slab->lock);
4594 }
4595 stat_add(s, FREE_FASTPATH, cnt);
4596}
4597#else /* CONFIG_SLUB_TINY */
4598static void do_slab_free(struct kmem_cache *s,
4599 struct slab *slab, void *head, void *tail,
4600 int cnt, unsigned long addr)
4601{
4602 __slab_free(s, slab, head, tail, cnt, addr);
4603}
4604#endif /* CONFIG_SLUB_TINY */
4605
4606static __fastpath_inline
4607void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
4608 unsigned long addr)
4609{
4610 memcg_slab_free_hook(s, slab, &object, 1);
4611 alloc_tagging_slab_free_hook(s, slab, &object, 1);
4612
4613 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
4614 do_slab_free(s, slab, object, object, 1, addr);
4615}
4616
4617#ifdef CONFIG_MEMCG
4618/* Do not inline the rare memcg charging failed path into the allocation path */
4619static noinline
4620void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
4621{
4622 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
4623 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
4624}
4625#endif
4626
4627static __fastpath_inline
4628void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
4629 void *tail, void **p, int cnt, unsigned long addr)
4630{
4631 memcg_slab_free_hook(s, slab, p, cnt);
4632 alloc_tagging_slab_free_hook(s, slab, p, cnt);
4633 /*
4634 * With KASAN enabled slab_free_freelist_hook modifies the freelist
4635 * to remove objects, whose reuse must be delayed.
4636 */
4637 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt)))
4638 do_slab_free(s, slab, head, tail, cnt, addr);
4639}
4640
4641#ifdef CONFIG_SLUB_RCU_DEBUG
4642static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
4643{
4644 struct rcu_delayed_free *delayed_free =
4645 container_of(rcu_head, struct rcu_delayed_free, head);
4646 void *object = delayed_free->object;
4647 struct slab *slab = virt_to_slab(object);
4648 struct kmem_cache *s;
4649
4650 kfree(delayed_free);
4651
4652 if (WARN_ON(is_kfence_address(object)))
4653 return;
4654
4655 /* find the object and the cache again */
4656 if (WARN_ON(!slab))
4657 return;
4658 s = slab->slab_cache;
4659 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
4660 return;
4661
4662 /* resume freeing */
4663 if (slab_free_hook(s, object, slab_want_init_on_free(s), true))
4664 do_slab_free(s, slab, object, object, 1, _THIS_IP_);
4665}
4666#endif /* CONFIG_SLUB_RCU_DEBUG */
4667
4668#ifdef CONFIG_KASAN_GENERIC
4669void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
4670{
4671 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr);
4672}
4673#endif
4674
4675static inline struct kmem_cache *virt_to_cache(const void *obj)
4676{
4677 struct slab *slab;
4678
4679 slab = virt_to_slab(obj);
4680 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
4681 return NULL;
4682 return slab->slab_cache;
4683}
4684
4685static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
4686{
4687 struct kmem_cache *cachep;
4688
4689 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
4690 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
4691 return s;
4692
4693 cachep = virt_to_cache(x);
4694 if (WARN(cachep && cachep != s,
4695 "%s: Wrong slab cache. %s but object is from %s\n",
4696 __func__, s->name, cachep->name))
4697 print_tracking(cachep, x);
4698 return cachep;
4699}
4700
4701/**
4702 * kmem_cache_free - Deallocate an object
4703 * @s: The cache the allocation was from.
4704 * @x: The previously allocated object.
4705 *
4706 * Free an object which was previously allocated from this
4707 * cache.
4708 */
4709void kmem_cache_free(struct kmem_cache *s, void *x)
4710{
4711 s = cache_from_obj(s, x);
4712 if (!s)
4713 return;
4714 trace_kmem_cache_free(_RET_IP_, x, s);
4715 slab_free(s, virt_to_slab(x), x, _RET_IP_);
4716}
4717EXPORT_SYMBOL(kmem_cache_free);
4718
4719static void free_large_kmalloc(struct folio *folio, void *object)
4720{
4721 unsigned int order = folio_order(folio);
4722
4723 if (WARN_ON_ONCE(order == 0))
4724 pr_warn_once("object pointer: 0x%p\n", object);
4725
4726 kmemleak_free(object);
4727 kasan_kfree_large(object);
4728 kmsan_kfree_large(object);
4729
4730 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4731 -(PAGE_SIZE << order));
4732 folio_put(folio);
4733}
4734
4735/**
4736 * kfree - free previously allocated memory
4737 * @object: pointer returned by kmalloc() or kmem_cache_alloc()
4738 *
4739 * If @object is NULL, no operation is performed.
4740 */
4741void kfree(const void *object)
4742{
4743 struct folio *folio;
4744 struct slab *slab;
4745 struct kmem_cache *s;
4746 void *x = (void *)object;
4747
4748 trace_kfree(_RET_IP_, object);
4749
4750 if (unlikely(ZERO_OR_NULL_PTR(object)))
4751 return;
4752
4753 folio = virt_to_folio(object);
4754 if (unlikely(!folio_test_slab(folio))) {
4755 free_large_kmalloc(folio, (void *)object);
4756 return;
4757 }
4758
4759 slab = folio_slab(folio);
4760 s = slab->slab_cache;
4761 slab_free(s, slab, x, _RET_IP_);
4762}
4763EXPORT_SYMBOL(kfree);
4764
4765static __always_inline __realloc_size(2) void *
4766__do_krealloc(const void *p, size_t new_size, gfp_t flags)
4767{
4768 void *ret;
4769 size_t ks = 0;
4770 int orig_size = 0;
4771 struct kmem_cache *s = NULL;
4772
4773 if (unlikely(ZERO_OR_NULL_PTR(p)))
4774 goto alloc_new;
4775
4776 /* Check for double-free. */
4777 if (!kasan_check_byte(p))
4778 return NULL;
4779
4780 if (is_kfence_address(p)) {
4781 ks = orig_size = kfence_ksize(p);
4782 } else {
4783 struct folio *folio;
4784
4785 folio = virt_to_folio(p);
4786 if (unlikely(!folio_test_slab(folio))) {
4787 /* Big kmalloc object */
4788 WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE);
4789 WARN_ON(p != folio_address(folio));
4790 ks = folio_size(folio);
4791 } else {
4792 s = folio_slab(folio)->slab_cache;
4793 orig_size = get_orig_size(s, (void *)p);
4794 ks = s->object_size;
4795 }
4796 }
4797
4798 /* If the old object doesn't fit, allocate a bigger one */
4799 if (new_size > ks)
4800 goto alloc_new;
4801
4802 /* Zero out spare memory. */
4803 if (want_init_on_alloc(flags)) {
4804 kasan_disable_current();
4805 if (orig_size && orig_size < new_size)
4806 memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size);
4807 else
4808 memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
4809 kasan_enable_current();
4810 }
4811
4812 /* Setup kmalloc redzone when needed */
4813 if (s && slub_debug_orig_size(s)) {
4814 set_orig_size(s, (void *)p, new_size);
4815 if (s->flags & SLAB_RED_ZONE && new_size < ks)
4816 memset_no_sanitize_memory(kasan_reset_tag(p) + new_size,
4817 SLUB_RED_ACTIVE, ks - new_size);
4818 }
4819
4820 p = kasan_krealloc(p, new_size, flags);
4821 return (void *)p;
4822
4823alloc_new:
4824 ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
4825 if (ret && p) {
4826 /* Disable KASAN checks as the object's redzone is accessed. */
4827 kasan_disable_current();
4828 memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
4829 kasan_enable_current();
4830 }
4831
4832 return ret;
4833}
4834
4835/**
4836 * krealloc - reallocate memory. The contents will remain unchanged.
4837 * @p: object to reallocate memory for.
4838 * @new_size: how many bytes of memory are required.
4839 * @flags: the type of memory to allocate.
4840 *
4841 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
4842 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
4843 *
4844 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
4845 * initial memory allocation, every subsequent call to this API for the same
4846 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
4847 * __GFP_ZERO is not fully honored by this API.
4848 *
4849 * When slub_debug_orig_size() is off, krealloc() only knows about the bucket
4850 * size of an allocation (but not the exact size it was allocated with) and
4851 * hence implements the following semantics for shrinking and growing buffers
4852 * with __GFP_ZERO.
4853 *
4854 * new bucket
4855 * 0 size size
4856 * |--------|----------------|
4857 * | keep | zero |
4858 *
4859 * Otherwise, the original allocation size 'orig_size' could be used to
4860 * precisely clear the requested size, and the new size will also be stored
4861 * as the new 'orig_size'.
4862 *
4863 * In any case, the contents of the object pointed to are preserved up to the
4864 * lesser of the new and old sizes.
4865 *
4866 * Return: pointer to the allocated memory or %NULL in case of error
4867 */
4868void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
4869{
4870 void *ret;
4871
4872 if (unlikely(!new_size)) {
4873 kfree(p);
4874 return ZERO_SIZE_PTR;
4875 }
4876
4877 ret = __do_krealloc(p, new_size, flags);
4878 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
4879 kfree(p);
4880
4881 return ret;
4882}
4883EXPORT_SYMBOL(krealloc_noprof);
4884
4885struct detached_freelist {
4886 struct slab *slab;
4887 void *tail;
4888 void *freelist;
4889 int cnt;
4890 struct kmem_cache *s;
4891};
4892
4893/*
4894 * This function progressively scans the array with free objects (with
4895 * a limited look ahead) and extract objects belonging to the same
4896 * slab. It builds a detached freelist directly within the given
4897 * slab/objects. This can happen without any need for
4898 * synchronization, because the objects are owned by running process.
4899 * The freelist is build up as a single linked list in the objects.
4900 * The idea is, that this detached freelist can then be bulk
4901 * transferred to the real freelist(s), but only requiring a single
4902 * synchronization primitive. Look ahead in the array is limited due
4903 * to performance reasons.
4904 */
4905static inline
4906int build_detached_freelist(struct kmem_cache *s, size_t size,
4907 void **p, struct detached_freelist *df)
4908{
4909 int lookahead = 3;
4910 void *object;
4911 struct folio *folio;
4912 size_t same;
4913
4914 object = p[--size];
4915 folio = virt_to_folio(object);
4916 if (!s) {
4917 /* Handle kalloc'ed objects */
4918 if (unlikely(!folio_test_slab(folio))) {
4919 free_large_kmalloc(folio, object);
4920 df->slab = NULL;
4921 return size;
4922 }
4923 /* Derive kmem_cache from object */
4924 df->slab = folio_slab(folio);
4925 df->s = df->slab->slab_cache;
4926 } else {
4927 df->slab = folio_slab(folio);
4928 df->s = cache_from_obj(s, object); /* Support for memcg */
4929 }
4930
4931 /* Start new detached freelist */
4932 df->tail = object;
4933 df->freelist = object;
4934 df->cnt = 1;
4935
4936 if (is_kfence_address(object))
4937 return size;
4938
4939 set_freepointer(df->s, object, NULL);
4940
4941 same = size;
4942 while (size) {
4943 object = p[--size];
4944 /* df->slab is always set at this point */
4945 if (df->slab == virt_to_slab(object)) {
4946 /* Opportunity build freelist */
4947 set_freepointer(df->s, object, df->freelist);
4948 df->freelist = object;
4949 df->cnt++;
4950 same--;
4951 if (size != same)
4952 swap(p[size], p[same]);
4953 continue;
4954 }
4955
4956 /* Limit look ahead search */
4957 if (!--lookahead)
4958 break;
4959 }
4960
4961 return same;
4962}
4963
4964/*
4965 * Internal bulk free of objects that were not initialised by the post alloc
4966 * hooks and thus should not be processed by the free hooks
4967 */
4968static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4969{
4970 if (!size)
4971 return;
4972
4973 do {
4974 struct detached_freelist df;
4975
4976 size = build_detached_freelist(s, size, p, &df);
4977 if (!df.slab)
4978 continue;
4979
4980 if (kfence_free(df.freelist))
4981 continue;
4982
4983 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
4984 _RET_IP_);
4985 } while (likely(size));
4986}
4987
4988/* Note that interrupts must be enabled when calling this function. */
4989void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
4990{
4991 if (!size)
4992 return;
4993
4994 do {
4995 struct detached_freelist df;
4996
4997 size = build_detached_freelist(s, size, p, &df);
4998 if (!df.slab)
4999 continue;
5000
5001 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
5002 df.cnt, _RET_IP_);
5003 } while (likely(size));
5004}
5005EXPORT_SYMBOL(kmem_cache_free_bulk);
5006
5007#ifndef CONFIG_SLUB_TINY
5008static inline
5009int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
5010 void **p)
5011{
5012 struct kmem_cache_cpu *c;
5013 unsigned long irqflags;
5014 int i;
5015
5016 /*
5017 * Drain objects in the per cpu slab, while disabling local
5018 * IRQs, which protects against PREEMPT and interrupts
5019 * handlers invoking normal fastpath.
5020 */
5021 c = slub_get_cpu_ptr(s->cpu_slab);
5022 local_lock_irqsave(&s->cpu_slab->lock, irqflags);
5023
5024 for (i = 0; i < size; i++) {
5025 void *object = kfence_alloc(s, s->object_size, flags);
5026
5027 if (unlikely(object)) {
5028 p[i] = object;
5029 continue;
5030 }
5031
5032 object = c->freelist;
5033 if (unlikely(!object)) {
5034 /*
5035 * We may have removed an object from c->freelist using
5036 * the fastpath in the previous iteration; in that case,
5037 * c->tid has not been bumped yet.
5038 * Since ___slab_alloc() may reenable interrupts while
5039 * allocating memory, we should bump c->tid now.
5040 */
5041 c->tid = next_tid(c->tid);
5042
5043 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
5044
5045 /*
5046 * Invoking slow path likely have side-effect
5047 * of re-populating per CPU c->freelist
5048 */
5049 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
5050 _RET_IP_, c, s->object_size);
5051 if (unlikely(!p[i]))
5052 goto error;
5053
5054 c = this_cpu_ptr(s->cpu_slab);
5055 maybe_wipe_obj_freeptr(s, p[i]);
5056
5057 local_lock_irqsave(&s->cpu_slab->lock, irqflags);
5058
5059 continue; /* goto for-loop */
5060 }
5061 c->freelist = get_freepointer(s, object);
5062 p[i] = object;
5063 maybe_wipe_obj_freeptr(s, p[i]);
5064 stat(s, ALLOC_FASTPATH);
5065 }
5066 c->tid = next_tid(c->tid);
5067 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
5068 slub_put_cpu_ptr(s->cpu_slab);
5069
5070 return i;
5071
5072error:
5073 slub_put_cpu_ptr(s->cpu_slab);
5074 __kmem_cache_free_bulk(s, i, p);
5075 return 0;
5076
5077}
5078#else /* CONFIG_SLUB_TINY */
5079static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
5080 size_t size, void **p)
5081{
5082 int i;
5083
5084 for (i = 0; i < size; i++) {
5085 void *object = kfence_alloc(s, s->object_size, flags);
5086
5087 if (unlikely(object)) {
5088 p[i] = object;
5089 continue;
5090 }
5091
5092 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
5093 _RET_IP_, s->object_size);
5094 if (unlikely(!p[i]))
5095 goto error;
5096
5097 maybe_wipe_obj_freeptr(s, p[i]);
5098 }
5099
5100 return i;
5101
5102error:
5103 __kmem_cache_free_bulk(s, i, p);
5104 return 0;
5105}
5106#endif /* CONFIG_SLUB_TINY */
5107
5108/* Note that interrupts must be enabled when calling this function. */
5109int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
5110 void **p)
5111{
5112 int i;
5113
5114 if (!size)
5115 return 0;
5116
5117 s = slab_pre_alloc_hook(s, flags);
5118 if (unlikely(!s))
5119 return 0;
5120
5121 i = __kmem_cache_alloc_bulk(s, flags, size, p);
5122 if (unlikely(i == 0))
5123 return 0;
5124
5125 /*
5126 * memcg and kmem_cache debug support and memory initialization.
5127 * Done outside of the IRQ disabled fastpath loop.
5128 */
5129 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
5130 slab_want_init_on_alloc(flags, s), s->object_size))) {
5131 return 0;
5132 }
5133 return i;
5134}
5135EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
5136
5137
5138/*
5139 * Object placement in a slab is made very easy because we always start at
5140 * offset 0. If we tune the size of the object to the alignment then we can
5141 * get the required alignment by putting one properly sized object after
5142 * another.
5143 *
5144 * Notice that the allocation order determines the sizes of the per cpu
5145 * caches. Each processor has always one slab available for allocations.
5146 * Increasing the allocation order reduces the number of times that slabs
5147 * must be moved on and off the partial lists and is therefore a factor in
5148 * locking overhead.
5149 */
5150
5151/*
5152 * Minimum / Maximum order of slab pages. This influences locking overhead
5153 * and slab fragmentation. A higher order reduces the number of partial slabs
5154 * and increases the number of allocations possible without having to
5155 * take the list_lock.
5156 */
5157static unsigned int slub_min_order;
5158static unsigned int slub_max_order =
5159 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
5160static unsigned int slub_min_objects;
5161
5162/*
5163 * Calculate the order of allocation given an slab object size.
5164 *
5165 * The order of allocation has significant impact on performance and other
5166 * system components. Generally order 0 allocations should be preferred since
5167 * order 0 does not cause fragmentation in the page allocator. Larger objects
5168 * be problematic to put into order 0 slabs because there may be too much
5169 * unused space left. We go to a higher order if more than 1/16th of the slab
5170 * would be wasted.
5171 *
5172 * In order to reach satisfactory performance we must ensure that a minimum
5173 * number of objects is in one slab. Otherwise we may generate too much
5174 * activity on the partial lists which requires taking the list_lock. This is
5175 * less a concern for large slabs though which are rarely used.
5176 *
5177 * slab_max_order specifies the order where we begin to stop considering the
5178 * number of objects in a slab as critical. If we reach slab_max_order then
5179 * we try to keep the page order as low as possible. So we accept more waste
5180 * of space in favor of a small page order.
5181 *
5182 * Higher order allocations also allow the placement of more objects in a
5183 * slab and thereby reduce object handling overhead. If the user has
5184 * requested a higher minimum order then we start with that one instead of
5185 * the smallest order which will fit the object.
5186 */
5187static inline unsigned int calc_slab_order(unsigned int size,
5188 unsigned int min_order, unsigned int max_order,
5189 unsigned int fract_leftover)
5190{
5191 unsigned int order;
5192
5193 for (order = min_order; order <= max_order; order++) {
5194
5195 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
5196 unsigned int rem;
5197
5198 rem = slab_size % size;
5199
5200 if (rem <= slab_size / fract_leftover)
5201 break;
5202 }
5203
5204 return order;
5205}
5206
5207static inline int calculate_order(unsigned int size)
5208{
5209 unsigned int order;
5210 unsigned int min_objects;
5211 unsigned int max_objects;
5212 unsigned int min_order;
5213
5214 min_objects = slub_min_objects;
5215 if (!min_objects) {
5216 /*
5217 * Some architectures will only update present cpus when
5218 * onlining them, so don't trust the number if it's just 1. But
5219 * we also don't want to use nr_cpu_ids always, as on some other
5220 * architectures, there can be many possible cpus, but never
5221 * onlined. Here we compromise between trying to avoid too high
5222 * order on systems that appear larger than they are, and too
5223 * low order on systems that appear smaller than they are.
5224 */
5225 unsigned int nr_cpus = num_present_cpus();
5226 if (nr_cpus <= 1)
5227 nr_cpus = nr_cpu_ids;
5228 min_objects = 4 * (fls(nr_cpus) + 1);
5229 }
5230 /* min_objects can't be 0 because get_order(0) is undefined */
5231 max_objects = max(order_objects(slub_max_order, size), 1U);
5232 min_objects = min(min_objects, max_objects);
5233
5234 min_order = max_t(unsigned int, slub_min_order,
5235 get_order(min_objects * size));
5236 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
5237 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
5238
5239 /*
5240 * Attempt to find best configuration for a slab. This works by first
5241 * attempting to generate a layout with the best possible configuration
5242 * and backing off gradually.
5243 *
5244 * We start with accepting at most 1/16 waste and try to find the
5245 * smallest order from min_objects-derived/slab_min_order up to
5246 * slab_max_order that will satisfy the constraint. Note that increasing
5247 * the order can only result in same or less fractional waste, not more.
5248 *
5249 * If that fails, we increase the acceptable fraction of waste and try
5250 * again. The last iteration with fraction of 1/2 would effectively
5251 * accept any waste and give us the order determined by min_objects, as
5252 * long as at least single object fits within slab_max_order.
5253 */
5254 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
5255 order = calc_slab_order(size, min_order, slub_max_order,
5256 fraction);
5257 if (order <= slub_max_order)
5258 return order;
5259 }
5260
5261 /*
5262 * Doh this slab cannot be placed using slab_max_order.
5263 */
5264 order = get_order(size);
5265 if (order <= MAX_PAGE_ORDER)
5266 return order;
5267 return -ENOSYS;
5268}
5269
5270static void
5271init_kmem_cache_node(struct kmem_cache_node *n)
5272{
5273 n->nr_partial = 0;
5274 spin_lock_init(&n->list_lock);
5275 INIT_LIST_HEAD(&n->partial);
5276#ifdef CONFIG_SLUB_DEBUG
5277 atomic_long_set(&n->nr_slabs, 0);
5278 atomic_long_set(&n->total_objects, 0);
5279 INIT_LIST_HEAD(&n->full);
5280#endif
5281}
5282
5283#ifndef CONFIG_SLUB_TINY
5284static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
5285{
5286 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
5287 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
5288 sizeof(struct kmem_cache_cpu));
5289
5290 /*
5291 * Must align to double word boundary for the double cmpxchg
5292 * instructions to work; see __pcpu_double_call_return_bool().
5293 */
5294 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
5295 2 * sizeof(void *));
5296
5297 if (!s->cpu_slab)
5298 return 0;
5299
5300 init_kmem_cache_cpus(s);
5301
5302 return 1;
5303}
5304#else
5305static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
5306{
5307 return 1;
5308}
5309#endif /* CONFIG_SLUB_TINY */
5310
5311static struct kmem_cache *kmem_cache_node;
5312
5313/*
5314 * No kmalloc_node yet so do it by hand. We know that this is the first
5315 * slab on the node for this slabcache. There are no concurrent accesses
5316 * possible.
5317 *
5318 * Note that this function only works on the kmem_cache_node
5319 * when allocating for the kmem_cache_node. This is used for bootstrapping
5320 * memory on a fresh node that has no slab structures yet.
5321 */
5322static void early_kmem_cache_node_alloc(int node)
5323{
5324 struct slab *slab;
5325 struct kmem_cache_node *n;
5326
5327 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
5328
5329 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
5330
5331 BUG_ON(!slab);
5332 if (slab_nid(slab) != node) {
5333 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
5334 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
5335 }
5336
5337 n = slab->freelist;
5338 BUG_ON(!n);
5339#ifdef CONFIG_SLUB_DEBUG
5340 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
5341#endif
5342 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
5343 slab->freelist = get_freepointer(kmem_cache_node, n);
5344 slab->inuse = 1;
5345 kmem_cache_node->node[node] = n;
5346 init_kmem_cache_node(n);
5347 inc_slabs_node(kmem_cache_node, node, slab->objects);
5348
5349 /*
5350 * No locks need to be taken here as it has just been
5351 * initialized and there is no concurrent access.
5352 */
5353 __add_partial(n, slab, DEACTIVATE_TO_HEAD);
5354}
5355
5356static void free_kmem_cache_nodes(struct kmem_cache *s)
5357{
5358 int node;
5359 struct kmem_cache_node *n;
5360
5361 for_each_kmem_cache_node(s, node, n) {
5362 s->node[node] = NULL;
5363 kmem_cache_free(kmem_cache_node, n);
5364 }
5365}
5366
5367void __kmem_cache_release(struct kmem_cache *s)
5368{
5369 cache_random_seq_destroy(s);
5370#ifndef CONFIG_SLUB_TINY
5371 free_percpu(s->cpu_slab);
5372#endif
5373 free_kmem_cache_nodes(s);
5374}
5375
5376static int init_kmem_cache_nodes(struct kmem_cache *s)
5377{
5378 int node;
5379
5380 for_each_node_mask(node, slab_nodes) {
5381 struct kmem_cache_node *n;
5382
5383 if (slab_state == DOWN) {
5384 early_kmem_cache_node_alloc(node);
5385 continue;
5386 }
5387 n = kmem_cache_alloc_node(kmem_cache_node,
5388 GFP_KERNEL, node);
5389
5390 if (!n) {
5391 free_kmem_cache_nodes(s);
5392 return 0;
5393 }
5394
5395 init_kmem_cache_node(n);
5396 s->node[node] = n;
5397 }
5398 return 1;
5399}
5400
5401static void set_cpu_partial(struct kmem_cache *s)
5402{
5403#ifdef CONFIG_SLUB_CPU_PARTIAL
5404 unsigned int nr_objects;
5405
5406 /*
5407 * cpu_partial determined the maximum number of objects kept in the
5408 * per cpu partial lists of a processor.
5409 *
5410 * Per cpu partial lists mainly contain slabs that just have one
5411 * object freed. If they are used for allocation then they can be
5412 * filled up again with minimal effort. The slab will never hit the
5413 * per node partial lists and therefore no locking will be required.
5414 *
5415 * For backwards compatibility reasons, this is determined as number
5416 * of objects, even though we now limit maximum number of pages, see
5417 * slub_set_cpu_partial()
5418 */
5419 if (!kmem_cache_has_cpu_partial(s))
5420 nr_objects = 0;
5421 else if (s->size >= PAGE_SIZE)
5422 nr_objects = 6;
5423 else if (s->size >= 1024)
5424 nr_objects = 24;
5425 else if (s->size >= 256)
5426 nr_objects = 52;
5427 else
5428 nr_objects = 120;
5429
5430 slub_set_cpu_partial(s, nr_objects);
5431#endif
5432}
5433
5434/*
5435 * calculate_sizes() determines the order and the distribution of data within
5436 * a slab object.
5437 */
5438static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
5439{
5440 slab_flags_t flags = s->flags;
5441 unsigned int size = s->object_size;
5442 unsigned int order;
5443
5444 /*
5445 * Round up object size to the next word boundary. We can only
5446 * place the free pointer at word boundaries and this determines
5447 * the possible location of the free pointer.
5448 */
5449 size = ALIGN(size, sizeof(void *));
5450
5451#ifdef CONFIG_SLUB_DEBUG
5452 /*
5453 * Determine if we can poison the object itself. If the user of
5454 * the slab may touch the object after free or before allocation
5455 * then we should never poison the object itself.
5456 */
5457 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
5458 !s->ctor)
5459 s->flags |= __OBJECT_POISON;
5460 else
5461 s->flags &= ~__OBJECT_POISON;
5462
5463
5464 /*
5465 * If we are Redzoning then check if there is some space between the
5466 * end of the object and the free pointer. If not then add an
5467 * additional word to have some bytes to store Redzone information.
5468 */
5469 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
5470 size += sizeof(void *);
5471#endif
5472
5473 /*
5474 * With that we have determined the number of bytes in actual use
5475 * by the object and redzoning.
5476 */
5477 s->inuse = size;
5478
5479 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
5480 (flags & SLAB_POISON) || s->ctor ||
5481 ((flags & SLAB_RED_ZONE) &&
5482 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
5483 /*
5484 * Relocate free pointer after the object if it is not
5485 * permitted to overwrite the first word of the object on
5486 * kmem_cache_free.
5487 *
5488 * This is the case if we do RCU, have a constructor or
5489 * destructor, are poisoning the objects, or are
5490 * redzoning an object smaller than sizeof(void *) or are
5491 * redzoning an object with slub_debug_orig_size() enabled,
5492 * in which case the right redzone may be extended.
5493 *
5494 * The assumption that s->offset >= s->inuse means free
5495 * pointer is outside of the object is used in the
5496 * freeptr_outside_object() function. If that is no
5497 * longer true, the function needs to be modified.
5498 */
5499 s->offset = size;
5500 size += sizeof(void *);
5501 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
5502 s->offset = args->freeptr_offset;
5503 } else {
5504 /*
5505 * Store freelist pointer near middle of object to keep
5506 * it away from the edges of the object to avoid small
5507 * sized over/underflows from neighboring allocations.
5508 */
5509 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
5510 }
5511
5512#ifdef CONFIG_SLUB_DEBUG
5513 if (flags & SLAB_STORE_USER) {
5514 /*
5515 * Need to store information about allocs and frees after
5516 * the object.
5517 */
5518 size += 2 * sizeof(struct track);
5519
5520 /* Save the original kmalloc request size */
5521 if (flags & SLAB_KMALLOC)
5522 size += sizeof(unsigned int);
5523 }
5524#endif
5525
5526 kasan_cache_create(s, &size, &s->flags);
5527#ifdef CONFIG_SLUB_DEBUG
5528 if (flags & SLAB_RED_ZONE) {
5529 /*
5530 * Add some empty padding so that we can catch
5531 * overwrites from earlier objects rather than let
5532 * tracking information or the free pointer be
5533 * corrupted if a user writes before the start
5534 * of the object.
5535 */
5536 size += sizeof(void *);
5537
5538 s->red_left_pad = sizeof(void *);
5539 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
5540 size += s->red_left_pad;
5541 }
5542#endif
5543
5544 /*
5545 * SLUB stores one object immediately after another beginning from
5546 * offset 0. In order to align the objects we have to simply size
5547 * each object to conform to the alignment.
5548 */
5549 size = ALIGN(size, s->align);
5550 s->size = size;
5551 s->reciprocal_size = reciprocal_value(size);
5552 order = calculate_order(size);
5553
5554 if ((int)order < 0)
5555 return 0;
5556
5557 s->allocflags = __GFP_COMP;
5558
5559 if (s->flags & SLAB_CACHE_DMA)
5560 s->allocflags |= GFP_DMA;
5561
5562 if (s->flags & SLAB_CACHE_DMA32)
5563 s->allocflags |= GFP_DMA32;
5564
5565 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5566 s->allocflags |= __GFP_RECLAIMABLE;
5567
5568 /*
5569 * Determine the number of objects per slab
5570 */
5571 s->oo = oo_make(order, size);
5572 s->min = oo_make(get_order(size), size);
5573
5574 return !!oo_objects(s->oo);
5575}
5576
5577static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
5578 const char *text)
5579{
5580#ifdef CONFIG_SLUB_DEBUG
5581 void *addr = slab_address(slab);
5582 void *p;
5583
5584 slab_err(s, slab, text, s->name);
5585
5586 spin_lock(&object_map_lock);
5587 __fill_map(object_map, s, slab);
5588
5589 for_each_object(p, s, addr, slab->objects) {
5590
5591 if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
5592 if (slab_add_kunit_errors())
5593 continue;
5594 pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
5595 print_tracking(s, p);
5596 }
5597 }
5598 spin_unlock(&object_map_lock);
5599#endif
5600}
5601
5602/*
5603 * Attempt to free all partial slabs on a node.
5604 * This is called from __kmem_cache_shutdown(). We must take list_lock
5605 * because sysfs file might still access partial list after the shutdowning.
5606 */
5607static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
5608{
5609 LIST_HEAD(discard);
5610 struct slab *slab, *h;
5611
5612 BUG_ON(irqs_disabled());
5613 spin_lock_irq(&n->list_lock);
5614 list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
5615 if (!slab->inuse) {
5616 remove_partial(n, slab);
5617 list_add(&slab->slab_list, &discard);
5618 } else {
5619 list_slab_objects(s, slab,
5620 "Objects remaining in %s on __kmem_cache_shutdown()");
5621 }
5622 }
5623 spin_unlock_irq(&n->list_lock);
5624
5625 list_for_each_entry_safe(slab, h, &discard, slab_list)
5626 discard_slab(s, slab);
5627}
5628
5629bool __kmem_cache_empty(struct kmem_cache *s)
5630{
5631 int node;
5632 struct kmem_cache_node *n;
5633
5634 for_each_kmem_cache_node(s, node, n)
5635 if (n->nr_partial || node_nr_slabs(n))
5636 return false;
5637 return true;
5638}
5639
5640/*
5641 * Release all resources used by a slab cache.
5642 */
5643int __kmem_cache_shutdown(struct kmem_cache *s)
5644{
5645 int node;
5646 struct kmem_cache_node *n;
5647
5648 flush_all_cpus_locked(s);
5649 /* Attempt to free all objects */
5650 for_each_kmem_cache_node(s, node, n) {
5651 free_partial(s, n);
5652 if (n->nr_partial || node_nr_slabs(n))
5653 return 1;
5654 }
5655 return 0;
5656}
5657
5658#ifdef CONFIG_PRINTK
5659void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
5660{
5661 void *base;
5662 int __maybe_unused i;
5663 unsigned int objnr;
5664 void *objp;
5665 void *objp0;
5666 struct kmem_cache *s = slab->slab_cache;
5667 struct track __maybe_unused *trackp;
5668
5669 kpp->kp_ptr = object;
5670 kpp->kp_slab = slab;
5671 kpp->kp_slab_cache = s;
5672 base = slab_address(slab);
5673 objp0 = kasan_reset_tag(object);
5674#ifdef CONFIG_SLUB_DEBUG
5675 objp = restore_red_left(s, objp0);
5676#else
5677 objp = objp0;
5678#endif
5679 objnr = obj_to_index(s, slab, objp);
5680 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
5681 objp = base + s->size * objnr;
5682 kpp->kp_objp = objp;
5683 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5684 || (objp - base) % s->size) ||
5685 !(s->flags & SLAB_STORE_USER))
5686 return;
5687#ifdef CONFIG_SLUB_DEBUG
5688 objp = fixup_red_left(s, objp);
5689 trackp = get_track(s, objp, TRACK_ALLOC);
5690 kpp->kp_ret = (void *)trackp->addr;
5691#ifdef CONFIG_STACKDEPOT
5692 {
5693 depot_stack_handle_t handle;
5694 unsigned long *entries;
5695 unsigned int nr_entries;
5696
5697 handle = READ_ONCE(trackp->handle);
5698 if (handle) {
5699 nr_entries = stack_depot_fetch(handle, &entries);
5700 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5701 kpp->kp_stack[i] = (void *)entries[i];
5702 }
5703
5704 trackp = get_track(s, objp, TRACK_FREE);
5705 handle = READ_ONCE(trackp->handle);
5706 if (handle) {
5707 nr_entries = stack_depot_fetch(handle, &entries);
5708 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5709 kpp->kp_free_stack[i] = (void *)entries[i];
5710 }
5711 }
5712#endif
5713#endif
5714}
5715#endif
5716
5717/********************************************************************
5718 * Kmalloc subsystem
5719 *******************************************************************/
5720
5721static int __init setup_slub_min_order(char *str)
5722{
5723 get_option(&str, (int *)&slub_min_order);
5724
5725 if (slub_min_order > slub_max_order)
5726 slub_max_order = slub_min_order;
5727
5728 return 1;
5729}
5730
5731__setup("slab_min_order=", setup_slub_min_order);
5732__setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0);
5733
5734
5735static int __init setup_slub_max_order(char *str)
5736{
5737 get_option(&str, (int *)&slub_max_order);
5738 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
5739
5740 if (slub_min_order > slub_max_order)
5741 slub_min_order = slub_max_order;
5742
5743 return 1;
5744}
5745
5746__setup("slab_max_order=", setup_slub_max_order);
5747__setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0);
5748
5749static int __init setup_slub_min_objects(char *str)
5750{
5751 get_option(&str, (int *)&slub_min_objects);
5752
5753 return 1;
5754}
5755
5756__setup("slab_min_objects=", setup_slub_min_objects);
5757__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
5758
5759#ifdef CONFIG_NUMA
5760static int __init setup_slab_strict_numa(char *str)
5761{
5762 if (nr_node_ids > 1) {
5763 static_branch_enable(&strict_numa);
5764 pr_info("SLUB: Strict NUMA enabled.\n");
5765 } else {
5766 pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
5767 }
5768
5769 return 1;
5770}
5771
5772__setup("slab_strict_numa", setup_slab_strict_numa);
5773#endif
5774
5775
5776#ifdef CONFIG_HARDENED_USERCOPY
5777/*
5778 * Rejects incorrectly sized objects and objects that are to be copied
5779 * to/from userspace but do not fall entirely within the containing slab
5780 * cache's usercopy region.
5781 *
5782 * Returns NULL if check passes, otherwise const char * to name of cache
5783 * to indicate an error.
5784 */
5785void __check_heap_object(const void *ptr, unsigned long n,
5786 const struct slab *slab, bool to_user)
5787{
5788 struct kmem_cache *s;
5789 unsigned int offset;
5790 bool is_kfence = is_kfence_address(ptr);
5791
5792 ptr = kasan_reset_tag(ptr);
5793
5794 /* Find object and usable object size. */
5795 s = slab->slab_cache;
5796
5797 /* Reject impossible pointers. */
5798 if (ptr < slab_address(slab))
5799 usercopy_abort("SLUB object not in SLUB page?!", NULL,
5800 to_user, 0, n);
5801
5802 /* Find offset within object. */
5803 if (is_kfence)
5804 offset = ptr - kfence_object_start(ptr);
5805 else
5806 offset = (ptr - slab_address(slab)) % s->size;
5807
5808 /* Adjust for redzone and reject if within the redzone. */
5809 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
5810 if (offset < s->red_left_pad)
5811 usercopy_abort("SLUB object in left red zone",
5812 s->name, to_user, offset, n);
5813 offset -= s->red_left_pad;
5814 }
5815
5816 /* Allow address range falling entirely within usercopy region. */
5817 if (offset >= s->useroffset &&
5818 offset - s->useroffset <= s->usersize &&
5819 n <= s->useroffset - offset + s->usersize)
5820 return;
5821
5822 usercopy_abort("SLUB object", s->name, to_user, offset, n);
5823}
5824#endif /* CONFIG_HARDENED_USERCOPY */
5825
5826#define SHRINK_PROMOTE_MAX 32
5827
5828/*
5829 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
5830 * up most to the head of the partial lists. New allocations will then
5831 * fill those up and thus they can be removed from the partial lists.
5832 *
5833 * The slabs with the least items are placed last. This results in them
5834 * being allocated from last increasing the chance that the last objects
5835 * are freed in them.
5836 */
5837static int __kmem_cache_do_shrink(struct kmem_cache *s)
5838{
5839 int node;
5840 int i;
5841 struct kmem_cache_node *n;
5842 struct slab *slab;
5843 struct slab *t;
5844 struct list_head discard;
5845 struct list_head promote[SHRINK_PROMOTE_MAX];
5846 unsigned long flags;
5847 int ret = 0;
5848
5849 for_each_kmem_cache_node(s, node, n) {
5850 INIT_LIST_HEAD(&discard);
5851 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
5852 INIT_LIST_HEAD(promote + i);
5853
5854 spin_lock_irqsave(&n->list_lock, flags);
5855
5856 /*
5857 * Build lists of slabs to discard or promote.
5858 *
5859 * Note that concurrent frees may occur while we hold the
5860 * list_lock. slab->inuse here is the upper limit.
5861 */
5862 list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
5863 int free = slab->objects - slab->inuse;
5864
5865 /* Do not reread slab->inuse */
5866 barrier();
5867
5868 /* We do not keep full slabs on the list */
5869 BUG_ON(free <= 0);
5870
5871 if (free == slab->objects) {
5872 list_move(&slab->slab_list, &discard);
5873 slab_clear_node_partial(slab);
5874 n->nr_partial--;
5875 dec_slabs_node(s, node, slab->objects);
5876 } else if (free <= SHRINK_PROMOTE_MAX)
5877 list_move(&slab->slab_list, promote + free - 1);
5878 }
5879
5880 /*
5881 * Promote the slabs filled up most to the head of the
5882 * partial list.
5883 */
5884 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
5885 list_splice(promote + i, &n->partial);
5886
5887 spin_unlock_irqrestore(&n->list_lock, flags);
5888
5889 /* Release empty slabs */
5890 list_for_each_entry_safe(slab, t, &discard, slab_list)
5891 free_slab(s, slab);
5892
5893 if (node_nr_slabs(n))
5894 ret = 1;
5895 }
5896
5897 return ret;
5898}
5899
5900int __kmem_cache_shrink(struct kmem_cache *s)
5901{
5902 flush_all(s);
5903 return __kmem_cache_do_shrink(s);
5904}
5905
5906static int slab_mem_going_offline_callback(void *arg)
5907{
5908 struct kmem_cache *s;
5909
5910 mutex_lock(&slab_mutex);
5911 list_for_each_entry(s, &slab_caches, list) {
5912 flush_all_cpus_locked(s);
5913 __kmem_cache_do_shrink(s);
5914 }
5915 mutex_unlock(&slab_mutex);
5916
5917 return 0;
5918}
5919
5920static void slab_mem_offline_callback(void *arg)
5921{
5922 struct memory_notify *marg = arg;
5923 int offline_node;
5924
5925 offline_node = marg->status_change_nid_normal;
5926
5927 /*
5928 * If the node still has available memory. we need kmem_cache_node
5929 * for it yet.
5930 */
5931 if (offline_node < 0)
5932 return;
5933
5934 mutex_lock(&slab_mutex);
5935 node_clear(offline_node, slab_nodes);
5936 /*
5937 * We no longer free kmem_cache_node structures here, as it would be
5938 * racy with all get_node() users, and infeasible to protect them with
5939 * slab_mutex.
5940 */
5941 mutex_unlock(&slab_mutex);
5942}
5943
5944static int slab_mem_going_online_callback(void *arg)
5945{
5946 struct kmem_cache_node *n;
5947 struct kmem_cache *s;
5948 struct memory_notify *marg = arg;
5949 int nid = marg->status_change_nid_normal;
5950 int ret = 0;
5951
5952 /*
5953 * If the node's memory is already available, then kmem_cache_node is
5954 * already created. Nothing to do.
5955 */
5956 if (nid < 0)
5957 return 0;
5958
5959 /*
5960 * We are bringing a node online. No memory is available yet. We must
5961 * allocate a kmem_cache_node structure in order to bring the node
5962 * online.
5963 */
5964 mutex_lock(&slab_mutex);
5965 list_for_each_entry(s, &slab_caches, list) {
5966 /*
5967 * The structure may already exist if the node was previously
5968 * onlined and offlined.
5969 */
5970 if (get_node(s, nid))
5971 continue;
5972 /*
5973 * XXX: kmem_cache_alloc_node will fallback to other nodes
5974 * since memory is not yet available from the node that
5975 * is brought up.
5976 */
5977 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
5978 if (!n) {
5979 ret = -ENOMEM;
5980 goto out;
5981 }
5982 init_kmem_cache_node(n);
5983 s->node[nid] = n;
5984 }
5985 /*
5986 * Any cache created after this point will also have kmem_cache_node
5987 * initialized for the new node.
5988 */
5989 node_set(nid, slab_nodes);
5990out:
5991 mutex_unlock(&slab_mutex);
5992 return ret;
5993}
5994
5995static int slab_memory_callback(struct notifier_block *self,
5996 unsigned long action, void *arg)
5997{
5998 int ret = 0;
5999
6000 switch (action) {
6001 case MEM_GOING_ONLINE:
6002 ret = slab_mem_going_online_callback(arg);
6003 break;
6004 case MEM_GOING_OFFLINE:
6005 ret = slab_mem_going_offline_callback(arg);
6006 break;
6007 case MEM_OFFLINE:
6008 case MEM_CANCEL_ONLINE:
6009 slab_mem_offline_callback(arg);
6010 break;
6011 case MEM_ONLINE:
6012 case MEM_CANCEL_OFFLINE:
6013 break;
6014 }
6015 if (ret)
6016 ret = notifier_from_errno(ret);
6017 else
6018 ret = NOTIFY_OK;
6019 return ret;
6020}
6021
6022/********************************************************************
6023 * Basic setup of slabs
6024 *******************************************************************/
6025
6026/*
6027 * Used for early kmem_cache structures that were allocated using
6028 * the page allocator. Allocate them properly then fix up the pointers
6029 * that may be pointing to the wrong kmem_cache structure.
6030 */
6031
6032static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
6033{
6034 int node;
6035 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
6036 struct kmem_cache_node *n;
6037
6038 memcpy(s, static_cache, kmem_cache->object_size);
6039
6040 /*
6041 * This runs very early, and only the boot processor is supposed to be
6042 * up. Even if it weren't true, IRQs are not up so we couldn't fire
6043 * IPIs around.
6044 */
6045 __flush_cpu_slab(s, smp_processor_id());
6046 for_each_kmem_cache_node(s, node, n) {
6047 struct slab *p;
6048
6049 list_for_each_entry(p, &n->partial, slab_list)
6050 p->slab_cache = s;
6051
6052#ifdef CONFIG_SLUB_DEBUG
6053 list_for_each_entry(p, &n->full, slab_list)
6054 p->slab_cache = s;
6055#endif
6056 }
6057 list_add(&s->list, &slab_caches);
6058 return s;
6059}
6060
6061void __init kmem_cache_init(void)
6062{
6063 static __initdata struct kmem_cache boot_kmem_cache,
6064 boot_kmem_cache_node;
6065 int node;
6066
6067 if (debug_guardpage_minorder())
6068 slub_max_order = 0;
6069
6070 /* Print slub debugging pointers without hashing */
6071 if (__slub_debug_enabled())
6072 no_hash_pointers_enable(NULL);
6073
6074 kmem_cache_node = &boot_kmem_cache_node;
6075 kmem_cache = &boot_kmem_cache;
6076
6077 /*
6078 * Initialize the nodemask for which we will allocate per node
6079 * structures. Here we don't need taking slab_mutex yet.
6080 */
6081 for_each_node_state(node, N_NORMAL_MEMORY)
6082 node_set(node, slab_nodes);
6083
6084 create_boot_cache(kmem_cache_node, "kmem_cache_node",
6085 sizeof(struct kmem_cache_node),
6086 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
6087
6088 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
6089
6090 /* Able to allocate the per node structures */
6091 slab_state = PARTIAL;
6092
6093 create_boot_cache(kmem_cache, "kmem_cache",
6094 offsetof(struct kmem_cache, node) +
6095 nr_node_ids * sizeof(struct kmem_cache_node *),
6096 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
6097
6098 kmem_cache = bootstrap(&boot_kmem_cache);
6099 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
6100
6101 /* Now we can use the kmem_cache to allocate kmalloc slabs */
6102 setup_kmalloc_cache_index_table();
6103 create_kmalloc_caches();
6104
6105 /* Setup random freelists for each cache */
6106 init_freelist_randomization();
6107
6108 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
6109 slub_cpu_dead);
6110
6111 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
6112 cache_line_size(),
6113 slub_min_order, slub_max_order, slub_min_objects,
6114 nr_cpu_ids, nr_node_ids);
6115}
6116
6117void __init kmem_cache_init_late(void)
6118{
6119#ifndef CONFIG_SLUB_TINY
6120 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
6121 WARN_ON(!flushwq);
6122#endif
6123}
6124
6125struct kmem_cache *
6126__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
6127 slab_flags_t flags, void (*ctor)(void *))
6128{
6129 struct kmem_cache *s;
6130
6131 s = find_mergeable(size, align, flags, name, ctor);
6132 if (s) {
6133 if (sysfs_slab_alias(s, name))
6134 pr_err("SLUB: Unable to add cache alias %s to sysfs\n",
6135 name);
6136
6137 s->refcount++;
6138
6139 /*
6140 * Adjust the object sizes so that we clear
6141 * the complete object on kzalloc.
6142 */
6143 s->object_size = max(s->object_size, size);
6144 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
6145 }
6146
6147 return s;
6148}
6149
6150int do_kmem_cache_create(struct kmem_cache *s, const char *name,
6151 unsigned int size, struct kmem_cache_args *args,
6152 slab_flags_t flags)
6153{
6154 int err = -EINVAL;
6155
6156 s->name = name;
6157 s->size = s->object_size = size;
6158
6159 s->flags = kmem_cache_flags(flags, s->name);
6160#ifdef CONFIG_SLAB_FREELIST_HARDENED
6161 s->random = get_random_long();
6162#endif
6163 s->align = args->align;
6164 s->ctor = args->ctor;
6165#ifdef CONFIG_HARDENED_USERCOPY
6166 s->useroffset = args->useroffset;
6167 s->usersize = args->usersize;
6168#endif
6169
6170 if (!calculate_sizes(args, s))
6171 goto out;
6172 if (disable_higher_order_debug) {
6173 /*
6174 * Disable debugging flags that store metadata if the min slab
6175 * order increased.
6176 */
6177 if (get_order(s->size) > get_order(s->object_size)) {
6178 s->flags &= ~DEBUG_METADATA_FLAGS;
6179 s->offset = 0;
6180 if (!calculate_sizes(args, s))
6181 goto out;
6182 }
6183 }
6184
6185#ifdef system_has_freelist_aba
6186 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
6187 /* Enable fast mode */
6188 s->flags |= __CMPXCHG_DOUBLE;
6189 }
6190#endif
6191
6192 /*
6193 * The larger the object size is, the more slabs we want on the partial
6194 * list to avoid pounding the page allocator excessively.
6195 */
6196 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
6197 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
6198
6199 set_cpu_partial(s);
6200
6201#ifdef CONFIG_NUMA
6202 s->remote_node_defrag_ratio = 1000;
6203#endif
6204
6205 /* Initialize the pre-computed randomized freelist if slab is up */
6206 if (slab_state >= UP) {
6207 if (init_cache_random_seq(s))
6208 goto out;
6209 }
6210
6211 if (!init_kmem_cache_nodes(s))
6212 goto out;
6213
6214 if (!alloc_kmem_cache_cpus(s))
6215 goto out;
6216
6217 err = 0;
6218
6219 /* Mutex is not taken during early boot */
6220 if (slab_state <= UP)
6221 goto out;
6222
6223 /*
6224 * Failing to create sysfs files is not critical to SLUB functionality.
6225 * If it fails, proceed with cache creation without these files.
6226 */
6227 if (sysfs_slab_add(s))
6228 pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name);
6229
6230 if (s->flags & SLAB_STORE_USER)
6231 debugfs_slab_add(s);
6232
6233out:
6234 if (err)
6235 __kmem_cache_release(s);
6236 return err;
6237}
6238
6239#ifdef SLAB_SUPPORTS_SYSFS
6240static int count_inuse(struct slab *slab)
6241{
6242 return slab->inuse;
6243}
6244
6245static int count_total(struct slab *slab)
6246{
6247 return slab->objects;
6248}
6249#endif
6250
6251#ifdef CONFIG_SLUB_DEBUG
6252static void validate_slab(struct kmem_cache *s, struct slab *slab,
6253 unsigned long *obj_map)
6254{
6255 void *p;
6256 void *addr = slab_address(slab);
6257
6258 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
6259 return;
6260
6261 /* Now we know that a valid freelist exists */
6262 __fill_map(obj_map, s, slab);
6263 for_each_object(p, s, addr, slab->objects) {
6264 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
6265 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
6266
6267 if (!check_object(s, slab, p, val))
6268 break;
6269 }
6270}
6271
6272static int validate_slab_node(struct kmem_cache *s,
6273 struct kmem_cache_node *n, unsigned long *obj_map)
6274{
6275 unsigned long count = 0;
6276 struct slab *slab;
6277 unsigned long flags;
6278
6279 spin_lock_irqsave(&n->list_lock, flags);
6280
6281 list_for_each_entry(slab, &n->partial, slab_list) {
6282 validate_slab(s, slab, obj_map);
6283 count++;
6284 }
6285 if (count != n->nr_partial) {
6286 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
6287 s->name, count, n->nr_partial);
6288 slab_add_kunit_errors();
6289 }
6290
6291 if (!(s->flags & SLAB_STORE_USER))
6292 goto out;
6293
6294 list_for_each_entry(slab, &n->full, slab_list) {
6295 validate_slab(s, slab, obj_map);
6296 count++;
6297 }
6298 if (count != node_nr_slabs(n)) {
6299 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
6300 s->name, count, node_nr_slabs(n));
6301 slab_add_kunit_errors();
6302 }
6303
6304out:
6305 spin_unlock_irqrestore(&n->list_lock, flags);
6306 return count;
6307}
6308
6309long validate_slab_cache(struct kmem_cache *s)
6310{
6311 int node;
6312 unsigned long count = 0;
6313 struct kmem_cache_node *n;
6314 unsigned long *obj_map;
6315
6316 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
6317 if (!obj_map)
6318 return -ENOMEM;
6319
6320 flush_all(s);
6321 for_each_kmem_cache_node(s, node, n)
6322 count += validate_slab_node(s, n, obj_map);
6323
6324 bitmap_free(obj_map);
6325
6326 return count;
6327}
6328EXPORT_SYMBOL(validate_slab_cache);
6329
6330#ifdef CONFIG_DEBUG_FS
6331/*
6332 * Generate lists of code addresses where slabcache objects are allocated
6333 * and freed.
6334 */
6335
6336struct location {
6337 depot_stack_handle_t handle;
6338 unsigned long count;
6339 unsigned long addr;
6340 unsigned long waste;
6341 long long sum_time;
6342 long min_time;
6343 long max_time;
6344 long min_pid;
6345 long max_pid;
6346 DECLARE_BITMAP(cpus, NR_CPUS);
6347 nodemask_t nodes;
6348};
6349
6350struct loc_track {
6351 unsigned long max;
6352 unsigned long count;
6353 struct location *loc;
6354 loff_t idx;
6355};
6356
6357static struct dentry *slab_debugfs_root;
6358
6359static void free_loc_track(struct loc_track *t)
6360{
6361 if (t->max)
6362 free_pages((unsigned long)t->loc,
6363 get_order(sizeof(struct location) * t->max));
6364}
6365
6366static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
6367{
6368 struct location *l;
6369 int order;
6370
6371 order = get_order(sizeof(struct location) * max);
6372
6373 l = (void *)__get_free_pages(flags, order);
6374 if (!l)
6375 return 0;
6376
6377 if (t->count) {
6378 memcpy(l, t->loc, sizeof(struct location) * t->count);
6379 free_loc_track(t);
6380 }
6381 t->max = max;
6382 t->loc = l;
6383 return 1;
6384}
6385
6386static int add_location(struct loc_track *t, struct kmem_cache *s,
6387 const struct track *track,
6388 unsigned int orig_size)
6389{
6390 long start, end, pos;
6391 struct location *l;
6392 unsigned long caddr, chandle, cwaste;
6393 unsigned long age = jiffies - track->when;
6394 depot_stack_handle_t handle = 0;
6395 unsigned int waste = s->object_size - orig_size;
6396
6397#ifdef CONFIG_STACKDEPOT
6398 handle = READ_ONCE(track->handle);
6399#endif
6400 start = -1;
6401 end = t->count;
6402
6403 for ( ; ; ) {
6404 pos = start + (end - start + 1) / 2;
6405
6406 /*
6407 * There is nothing at "end". If we end up there
6408 * we need to add something to before end.
6409 */
6410 if (pos == end)
6411 break;
6412
6413 l = &t->loc[pos];
6414 caddr = l->addr;
6415 chandle = l->handle;
6416 cwaste = l->waste;
6417 if ((track->addr == caddr) && (handle == chandle) &&
6418 (waste == cwaste)) {
6419
6420 l->count++;
6421 if (track->when) {
6422 l->sum_time += age;
6423 if (age < l->min_time)
6424 l->min_time = age;
6425 if (age > l->max_time)
6426 l->max_time = age;
6427
6428 if (track->pid < l->min_pid)
6429 l->min_pid = track->pid;
6430 if (track->pid > l->max_pid)
6431 l->max_pid = track->pid;
6432
6433 cpumask_set_cpu(track->cpu,
6434 to_cpumask(l->cpus));
6435 }
6436 node_set(page_to_nid(virt_to_page(track)), l->nodes);
6437 return 1;
6438 }
6439
6440 if (track->addr < caddr)
6441 end = pos;
6442 else if (track->addr == caddr && handle < chandle)
6443 end = pos;
6444 else if (track->addr == caddr && handle == chandle &&
6445 waste < cwaste)
6446 end = pos;
6447 else
6448 start = pos;
6449 }
6450
6451 /*
6452 * Not found. Insert new tracking element.
6453 */
6454 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
6455 return 0;
6456
6457 l = t->loc + pos;
6458 if (pos < t->count)
6459 memmove(l + 1, l,
6460 (t->count - pos) * sizeof(struct location));
6461 t->count++;
6462 l->count = 1;
6463 l->addr = track->addr;
6464 l->sum_time = age;
6465 l->min_time = age;
6466 l->max_time = age;
6467 l->min_pid = track->pid;
6468 l->max_pid = track->pid;
6469 l->handle = handle;
6470 l->waste = waste;
6471 cpumask_clear(to_cpumask(l->cpus));
6472 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
6473 nodes_clear(l->nodes);
6474 node_set(page_to_nid(virt_to_page(track)), l->nodes);
6475 return 1;
6476}
6477
6478static void process_slab(struct loc_track *t, struct kmem_cache *s,
6479 struct slab *slab, enum track_item alloc,
6480 unsigned long *obj_map)
6481{
6482 void *addr = slab_address(slab);
6483 bool is_alloc = (alloc == TRACK_ALLOC);
6484 void *p;
6485
6486 __fill_map(obj_map, s, slab);
6487
6488 for_each_object(p, s, addr, slab->objects)
6489 if (!test_bit(__obj_to_index(s, addr, p), obj_map))
6490 add_location(t, s, get_track(s, p, alloc),
6491 is_alloc ? get_orig_size(s, p) :
6492 s->object_size);
6493}
6494#endif /* CONFIG_DEBUG_FS */
6495#endif /* CONFIG_SLUB_DEBUG */
6496
6497#ifdef SLAB_SUPPORTS_SYSFS
6498enum slab_stat_type {
6499 SL_ALL, /* All slabs */
6500 SL_PARTIAL, /* Only partially allocated slabs */
6501 SL_CPU, /* Only slabs used for cpu caches */
6502 SL_OBJECTS, /* Determine allocated objects not slabs */
6503 SL_TOTAL /* Determine object capacity not slabs */
6504};
6505
6506#define SO_ALL (1 << SL_ALL)
6507#define SO_PARTIAL (1 << SL_PARTIAL)
6508#define SO_CPU (1 << SL_CPU)
6509#define SO_OBJECTS (1 << SL_OBJECTS)
6510#define SO_TOTAL (1 << SL_TOTAL)
6511
6512static ssize_t show_slab_objects(struct kmem_cache *s,
6513 char *buf, unsigned long flags)
6514{
6515 unsigned long total = 0;
6516 int node;
6517 int x;
6518 unsigned long *nodes;
6519 int len = 0;
6520
6521 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
6522 if (!nodes)
6523 return -ENOMEM;
6524
6525 if (flags & SO_CPU) {
6526 int cpu;
6527
6528 for_each_possible_cpu(cpu) {
6529 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
6530 cpu);
6531 int node;
6532 struct slab *slab;
6533
6534 slab = READ_ONCE(c->slab);
6535 if (!slab)
6536 continue;
6537
6538 node = slab_nid(slab);
6539 if (flags & SO_TOTAL)
6540 x = slab->objects;
6541 else if (flags & SO_OBJECTS)
6542 x = slab->inuse;
6543 else
6544 x = 1;
6545
6546 total += x;
6547 nodes[node] += x;
6548
6549#ifdef CONFIG_SLUB_CPU_PARTIAL
6550 slab = slub_percpu_partial_read_once(c);
6551 if (slab) {
6552 node = slab_nid(slab);
6553 if (flags & SO_TOTAL)
6554 WARN_ON_ONCE(1);
6555 else if (flags & SO_OBJECTS)
6556 WARN_ON_ONCE(1);
6557 else
6558 x = data_race(slab->slabs);
6559 total += x;
6560 nodes[node] += x;
6561 }
6562#endif
6563 }
6564 }
6565
6566 /*
6567 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
6568 * already held which will conflict with an existing lock order:
6569 *
6570 * mem_hotplug_lock->slab_mutex->kernfs_mutex
6571 *
6572 * We don't really need mem_hotplug_lock (to hold off
6573 * slab_mem_going_offline_callback) here because slab's memory hot
6574 * unplug code doesn't destroy the kmem_cache->node[] data.
6575 */
6576
6577#ifdef CONFIG_SLUB_DEBUG
6578 if (flags & SO_ALL) {
6579 struct kmem_cache_node *n;
6580
6581 for_each_kmem_cache_node(s, node, n) {
6582
6583 if (flags & SO_TOTAL)
6584 x = node_nr_objs(n);
6585 else if (flags & SO_OBJECTS)
6586 x = node_nr_objs(n) - count_partial(n, count_free);
6587 else
6588 x = node_nr_slabs(n);
6589 total += x;
6590 nodes[node] += x;
6591 }
6592
6593 } else
6594#endif
6595 if (flags & SO_PARTIAL) {
6596 struct kmem_cache_node *n;
6597
6598 for_each_kmem_cache_node(s, node, n) {
6599 if (flags & SO_TOTAL)
6600 x = count_partial(n, count_total);
6601 else if (flags & SO_OBJECTS)
6602 x = count_partial(n, count_inuse);
6603 else
6604 x = n->nr_partial;
6605 total += x;
6606 nodes[node] += x;
6607 }
6608 }
6609
6610 len += sysfs_emit_at(buf, len, "%lu", total);
6611#ifdef CONFIG_NUMA
6612 for (node = 0; node < nr_node_ids; node++) {
6613 if (nodes[node])
6614 len += sysfs_emit_at(buf, len, " N%d=%lu",
6615 node, nodes[node]);
6616 }
6617#endif
6618 len += sysfs_emit_at(buf, len, "\n");
6619 kfree(nodes);
6620
6621 return len;
6622}
6623
6624#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
6625#define to_slab(n) container_of(n, struct kmem_cache, kobj)
6626
6627struct slab_attribute {
6628 struct attribute attr;
6629 ssize_t (*show)(struct kmem_cache *s, char *buf);
6630 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
6631};
6632
6633#define SLAB_ATTR_RO(_name) \
6634 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
6635
6636#define SLAB_ATTR(_name) \
6637 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
6638
6639static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
6640{
6641 return sysfs_emit(buf, "%u\n", s->size);
6642}
6643SLAB_ATTR_RO(slab_size);
6644
6645static ssize_t align_show(struct kmem_cache *s, char *buf)
6646{
6647 return sysfs_emit(buf, "%u\n", s->align);
6648}
6649SLAB_ATTR_RO(align);
6650
6651static ssize_t object_size_show(struct kmem_cache *s, char *buf)
6652{
6653 return sysfs_emit(buf, "%u\n", s->object_size);
6654}
6655SLAB_ATTR_RO(object_size);
6656
6657static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
6658{
6659 return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
6660}
6661SLAB_ATTR_RO(objs_per_slab);
6662
6663static ssize_t order_show(struct kmem_cache *s, char *buf)
6664{
6665 return sysfs_emit(buf, "%u\n", oo_order(s->oo));
6666}
6667SLAB_ATTR_RO(order);
6668
6669static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
6670{
6671 return sysfs_emit(buf, "%lu\n", s->min_partial);
6672}
6673
6674static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
6675 size_t length)
6676{
6677 unsigned long min;
6678 int err;
6679
6680 err = kstrtoul(buf, 10, &min);
6681 if (err)
6682 return err;
6683
6684 s->min_partial = min;
6685 return length;
6686}
6687SLAB_ATTR(min_partial);
6688
6689static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
6690{
6691 unsigned int nr_partial = 0;
6692#ifdef CONFIG_SLUB_CPU_PARTIAL
6693 nr_partial = s->cpu_partial;
6694#endif
6695
6696 return sysfs_emit(buf, "%u\n", nr_partial);
6697}
6698
6699static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
6700 size_t length)
6701{
6702 unsigned int objects;
6703 int err;
6704
6705 err = kstrtouint(buf, 10, &objects);
6706 if (err)
6707 return err;
6708 if (objects && !kmem_cache_has_cpu_partial(s))
6709 return -EINVAL;
6710
6711 slub_set_cpu_partial(s, objects);
6712 flush_all(s);
6713 return length;
6714}
6715SLAB_ATTR(cpu_partial);
6716
6717static ssize_t ctor_show(struct kmem_cache *s, char *buf)
6718{
6719 if (!s->ctor)
6720 return 0;
6721 return sysfs_emit(buf, "%pS\n", s->ctor);
6722}
6723SLAB_ATTR_RO(ctor);
6724
6725static ssize_t aliases_show(struct kmem_cache *s, char *buf)
6726{
6727 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
6728}
6729SLAB_ATTR_RO(aliases);
6730
6731static ssize_t partial_show(struct kmem_cache *s, char *buf)
6732{
6733 return show_slab_objects(s, buf, SO_PARTIAL);
6734}
6735SLAB_ATTR_RO(partial);
6736
6737static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
6738{
6739 return show_slab_objects(s, buf, SO_CPU);
6740}
6741SLAB_ATTR_RO(cpu_slabs);
6742
6743static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
6744{
6745 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
6746}
6747SLAB_ATTR_RO(objects_partial);
6748
6749static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
6750{
6751 int objects = 0;
6752 int slabs = 0;
6753 int cpu __maybe_unused;
6754 int len = 0;
6755
6756#ifdef CONFIG_SLUB_CPU_PARTIAL
6757 for_each_online_cpu(cpu) {
6758 struct slab *slab;
6759
6760 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6761
6762 if (slab)
6763 slabs += data_race(slab->slabs);
6764 }
6765#endif
6766
6767 /* Approximate half-full slabs, see slub_set_cpu_partial() */
6768 objects = (slabs * oo_objects(s->oo)) / 2;
6769 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
6770
6771#ifdef CONFIG_SLUB_CPU_PARTIAL
6772 for_each_online_cpu(cpu) {
6773 struct slab *slab;
6774
6775 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
6776 if (slab) {
6777 slabs = data_race(slab->slabs);
6778 objects = (slabs * oo_objects(s->oo)) / 2;
6779 len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
6780 cpu, objects, slabs);
6781 }
6782 }
6783#endif
6784 len += sysfs_emit_at(buf, len, "\n");
6785
6786 return len;
6787}
6788SLAB_ATTR_RO(slabs_cpu_partial);
6789
6790static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
6791{
6792 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
6793}
6794SLAB_ATTR_RO(reclaim_account);
6795
6796static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
6797{
6798 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
6799}
6800SLAB_ATTR_RO(hwcache_align);
6801
6802#ifdef CONFIG_ZONE_DMA
6803static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
6804{
6805 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
6806}
6807SLAB_ATTR_RO(cache_dma);
6808#endif
6809
6810#ifdef CONFIG_HARDENED_USERCOPY
6811static ssize_t usersize_show(struct kmem_cache *s, char *buf)
6812{
6813 return sysfs_emit(buf, "%u\n", s->usersize);
6814}
6815SLAB_ATTR_RO(usersize);
6816#endif
6817
6818static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
6819{
6820 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
6821}
6822SLAB_ATTR_RO(destroy_by_rcu);
6823
6824#ifdef CONFIG_SLUB_DEBUG
6825static ssize_t slabs_show(struct kmem_cache *s, char *buf)
6826{
6827 return show_slab_objects(s, buf, SO_ALL);
6828}
6829SLAB_ATTR_RO(slabs);
6830
6831static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
6832{
6833 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
6834}
6835SLAB_ATTR_RO(total_objects);
6836
6837static ssize_t objects_show(struct kmem_cache *s, char *buf)
6838{
6839 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
6840}
6841SLAB_ATTR_RO(objects);
6842
6843static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
6844{
6845 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
6846}
6847SLAB_ATTR_RO(sanity_checks);
6848
6849static ssize_t trace_show(struct kmem_cache *s, char *buf)
6850{
6851 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
6852}
6853SLAB_ATTR_RO(trace);
6854
6855static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
6856{
6857 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
6858}
6859
6860SLAB_ATTR_RO(red_zone);
6861
6862static ssize_t poison_show(struct kmem_cache *s, char *buf)
6863{
6864 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
6865}
6866
6867SLAB_ATTR_RO(poison);
6868
6869static ssize_t store_user_show(struct kmem_cache *s, char *buf)
6870{
6871 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
6872}
6873
6874SLAB_ATTR_RO(store_user);
6875
6876static ssize_t validate_show(struct kmem_cache *s, char *buf)
6877{
6878 return 0;
6879}
6880
6881static ssize_t validate_store(struct kmem_cache *s,
6882 const char *buf, size_t length)
6883{
6884 int ret = -EINVAL;
6885
6886 if (buf[0] == '1' && kmem_cache_debug(s)) {
6887 ret = validate_slab_cache(s);
6888 if (ret >= 0)
6889 ret = length;
6890 }
6891 return ret;
6892}
6893SLAB_ATTR(validate);
6894
6895#endif /* CONFIG_SLUB_DEBUG */
6896
6897#ifdef CONFIG_FAILSLAB
6898static ssize_t failslab_show(struct kmem_cache *s, char *buf)
6899{
6900 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
6901}
6902
6903static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
6904 size_t length)
6905{
6906 if (s->refcount > 1)
6907 return -EINVAL;
6908
6909 if (buf[0] == '1')
6910 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
6911 else
6912 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
6913
6914 return length;
6915}
6916SLAB_ATTR(failslab);
6917#endif
6918
6919static ssize_t shrink_show(struct kmem_cache *s, char *buf)
6920{
6921 return 0;
6922}
6923
6924static ssize_t shrink_store(struct kmem_cache *s,
6925 const char *buf, size_t length)
6926{
6927 if (buf[0] == '1')
6928 kmem_cache_shrink(s);
6929 else
6930 return -EINVAL;
6931 return length;
6932}
6933SLAB_ATTR(shrink);
6934
6935#ifdef CONFIG_NUMA
6936static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
6937{
6938 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
6939}
6940
6941static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
6942 const char *buf, size_t length)
6943{
6944 unsigned int ratio;
6945 int err;
6946
6947 err = kstrtouint(buf, 10, &ratio);
6948 if (err)
6949 return err;
6950 if (ratio > 100)
6951 return -ERANGE;
6952
6953 s->remote_node_defrag_ratio = ratio * 10;
6954
6955 return length;
6956}
6957SLAB_ATTR(remote_node_defrag_ratio);
6958#endif
6959
6960#ifdef CONFIG_SLUB_STATS
6961static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
6962{
6963 unsigned long sum = 0;
6964 int cpu;
6965 int len = 0;
6966 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
6967
6968 if (!data)
6969 return -ENOMEM;
6970
6971 for_each_online_cpu(cpu) {
6972 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
6973
6974 data[cpu] = x;
6975 sum += x;
6976 }
6977
6978 len += sysfs_emit_at(buf, len, "%lu", sum);
6979
6980#ifdef CONFIG_SMP
6981 for_each_online_cpu(cpu) {
6982 if (data[cpu])
6983 len += sysfs_emit_at(buf, len, " C%d=%u",
6984 cpu, data[cpu]);
6985 }
6986#endif
6987 kfree(data);
6988 len += sysfs_emit_at(buf, len, "\n");
6989
6990 return len;
6991}
6992
6993static void clear_stat(struct kmem_cache *s, enum stat_item si)
6994{
6995 int cpu;
6996
6997 for_each_online_cpu(cpu)
6998 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
6999}
7000
7001#define STAT_ATTR(si, text) \
7002static ssize_t text##_show(struct kmem_cache *s, char *buf) \
7003{ \
7004 return show_stat(s, buf, si); \
7005} \
7006static ssize_t text##_store(struct kmem_cache *s, \
7007 const char *buf, size_t length) \
7008{ \
7009 if (buf[0] != '0') \
7010 return -EINVAL; \
7011 clear_stat(s, si); \
7012 return length; \
7013} \
7014SLAB_ATTR(text); \
7015
7016STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
7017STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
7018STAT_ATTR(FREE_FASTPATH, free_fastpath);
7019STAT_ATTR(FREE_SLOWPATH, free_slowpath);
7020STAT_ATTR(FREE_FROZEN, free_frozen);
7021STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
7022STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
7023STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
7024STAT_ATTR(ALLOC_SLAB, alloc_slab);
7025STAT_ATTR(ALLOC_REFILL, alloc_refill);
7026STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
7027STAT_ATTR(FREE_SLAB, free_slab);
7028STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
7029STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
7030STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
7031STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
7032STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
7033STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
7034STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
7035STAT_ATTR(ORDER_FALLBACK, order_fallback);
7036STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
7037STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
7038STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
7039STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
7040STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
7041STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
7042#endif /* CONFIG_SLUB_STATS */
7043
7044#ifdef CONFIG_KFENCE
7045static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
7046{
7047 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
7048}
7049
7050static ssize_t skip_kfence_store(struct kmem_cache *s,
7051 const char *buf, size_t length)
7052{
7053 int ret = length;
7054
7055 if (buf[0] == '0')
7056 s->flags &= ~SLAB_SKIP_KFENCE;
7057 else if (buf[0] == '1')
7058 s->flags |= SLAB_SKIP_KFENCE;
7059 else
7060 ret = -EINVAL;
7061
7062 return ret;
7063}
7064SLAB_ATTR(skip_kfence);
7065#endif
7066
7067static struct attribute *slab_attrs[] = {
7068 &slab_size_attr.attr,
7069 &object_size_attr.attr,
7070 &objs_per_slab_attr.attr,
7071 &order_attr.attr,
7072 &min_partial_attr.attr,
7073 &cpu_partial_attr.attr,
7074 &objects_partial_attr.attr,
7075 &partial_attr.attr,
7076 &cpu_slabs_attr.attr,
7077 &ctor_attr.attr,
7078 &aliases_attr.attr,
7079 &align_attr.attr,
7080 &hwcache_align_attr.attr,
7081 &reclaim_account_attr.attr,
7082 &destroy_by_rcu_attr.attr,
7083 &shrink_attr.attr,
7084 &slabs_cpu_partial_attr.attr,
7085#ifdef CONFIG_SLUB_DEBUG
7086 &total_objects_attr.attr,
7087 &objects_attr.attr,
7088 &slabs_attr.attr,
7089 &sanity_checks_attr.attr,
7090 &trace_attr.attr,
7091 &red_zone_attr.attr,
7092 &poison_attr.attr,
7093 &store_user_attr.attr,
7094 &validate_attr.attr,
7095#endif
7096#ifdef CONFIG_ZONE_DMA
7097 &cache_dma_attr.attr,
7098#endif
7099#ifdef CONFIG_NUMA
7100 &remote_node_defrag_ratio_attr.attr,
7101#endif
7102#ifdef CONFIG_SLUB_STATS
7103 &alloc_fastpath_attr.attr,
7104 &alloc_slowpath_attr.attr,
7105 &free_fastpath_attr.attr,
7106 &free_slowpath_attr.attr,
7107 &free_frozen_attr.attr,
7108 &free_add_partial_attr.attr,
7109 &free_remove_partial_attr.attr,
7110 &alloc_from_partial_attr.attr,
7111 &alloc_slab_attr.attr,
7112 &alloc_refill_attr.attr,
7113 &alloc_node_mismatch_attr.attr,
7114 &free_slab_attr.attr,
7115 &cpuslab_flush_attr.attr,
7116 &deactivate_full_attr.attr,
7117 &deactivate_empty_attr.attr,
7118 &deactivate_to_head_attr.attr,
7119 &deactivate_to_tail_attr.attr,
7120 &deactivate_remote_frees_attr.attr,
7121 &deactivate_bypass_attr.attr,
7122 &order_fallback_attr.attr,
7123 &cmpxchg_double_fail_attr.attr,
7124 &cmpxchg_double_cpu_fail_attr.attr,
7125 &cpu_partial_alloc_attr.attr,
7126 &cpu_partial_free_attr.attr,
7127 &cpu_partial_node_attr.attr,
7128 &cpu_partial_drain_attr.attr,
7129#endif
7130#ifdef CONFIG_FAILSLAB
7131 &failslab_attr.attr,
7132#endif
7133#ifdef CONFIG_HARDENED_USERCOPY
7134 &usersize_attr.attr,
7135#endif
7136#ifdef CONFIG_KFENCE
7137 &skip_kfence_attr.attr,
7138#endif
7139
7140 NULL
7141};
7142
7143static const struct attribute_group slab_attr_group = {
7144 .attrs = slab_attrs,
7145};
7146
7147static ssize_t slab_attr_show(struct kobject *kobj,
7148 struct attribute *attr,
7149 char *buf)
7150{
7151 struct slab_attribute *attribute;
7152 struct kmem_cache *s;
7153
7154 attribute = to_slab_attr(attr);
7155 s = to_slab(kobj);
7156
7157 if (!attribute->show)
7158 return -EIO;
7159
7160 return attribute->show(s, buf);
7161}
7162
7163static ssize_t slab_attr_store(struct kobject *kobj,
7164 struct attribute *attr,
7165 const char *buf, size_t len)
7166{
7167 struct slab_attribute *attribute;
7168 struct kmem_cache *s;
7169
7170 attribute = to_slab_attr(attr);
7171 s = to_slab(kobj);
7172
7173 if (!attribute->store)
7174 return -EIO;
7175
7176 return attribute->store(s, buf, len);
7177}
7178
7179static void kmem_cache_release(struct kobject *k)
7180{
7181 slab_kmem_cache_release(to_slab(k));
7182}
7183
7184static const struct sysfs_ops slab_sysfs_ops = {
7185 .show = slab_attr_show,
7186 .store = slab_attr_store,
7187};
7188
7189static const struct kobj_type slab_ktype = {
7190 .sysfs_ops = &slab_sysfs_ops,
7191 .release = kmem_cache_release,
7192};
7193
7194static struct kset *slab_kset;
7195
7196static inline struct kset *cache_kset(struct kmem_cache *s)
7197{
7198 return slab_kset;
7199}
7200
7201#define ID_STR_LENGTH 32
7202
7203/* Create a unique string id for a slab cache:
7204 *
7205 * Format :[flags-]size
7206 */
7207static char *create_unique_id(struct kmem_cache *s)
7208{
7209 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
7210 char *p = name;
7211
7212 if (!name)
7213 return ERR_PTR(-ENOMEM);
7214
7215 *p++ = ':';
7216 /*
7217 * First flags affecting slabcache operations. We will only
7218 * get here for aliasable slabs so we do not need to support
7219 * too many flags. The flags here must cover all flags that
7220 * are matched during merging to guarantee that the id is
7221 * unique.
7222 */
7223 if (s->flags & SLAB_CACHE_DMA)
7224 *p++ = 'd';
7225 if (s->flags & SLAB_CACHE_DMA32)
7226 *p++ = 'D';
7227 if (s->flags & SLAB_RECLAIM_ACCOUNT)
7228 *p++ = 'a';
7229 if (s->flags & SLAB_CONSISTENCY_CHECKS)
7230 *p++ = 'F';
7231 if (s->flags & SLAB_ACCOUNT)
7232 *p++ = 'A';
7233 if (p != name + 1)
7234 *p++ = '-';
7235 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
7236
7237 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
7238 kfree(name);
7239 return ERR_PTR(-EINVAL);
7240 }
7241 kmsan_unpoison_memory(name, p - name);
7242 return name;
7243}
7244
7245static int sysfs_slab_add(struct kmem_cache *s)
7246{
7247 int err;
7248 const char *name;
7249 struct kset *kset = cache_kset(s);
7250 int unmergeable = slab_unmergeable(s);
7251
7252 if (!unmergeable && disable_higher_order_debug &&
7253 (slub_debug & DEBUG_METADATA_FLAGS))
7254 unmergeable = 1;
7255
7256 if (unmergeable) {
7257 /*
7258 * Slabcache can never be merged so we can use the name proper.
7259 * This is typically the case for debug situations. In that
7260 * case we can catch duplicate names easily.
7261 */
7262 sysfs_remove_link(&slab_kset->kobj, s->name);
7263 name = s->name;
7264 } else {
7265 /*
7266 * Create a unique name for the slab as a target
7267 * for the symlinks.
7268 */
7269 name = create_unique_id(s);
7270 if (IS_ERR(name))
7271 return PTR_ERR(name);
7272 }
7273
7274 s->kobj.kset = kset;
7275 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
7276 if (err)
7277 goto out;
7278
7279 err = sysfs_create_group(&s->kobj, &slab_attr_group);
7280 if (err)
7281 goto out_del_kobj;
7282
7283 if (!unmergeable) {
7284 /* Setup first alias */
7285 sysfs_slab_alias(s, s->name);
7286 }
7287out:
7288 if (!unmergeable)
7289 kfree(name);
7290 return err;
7291out_del_kobj:
7292 kobject_del(&s->kobj);
7293 goto out;
7294}
7295
7296void sysfs_slab_unlink(struct kmem_cache *s)
7297{
7298 if (s->kobj.state_in_sysfs)
7299 kobject_del(&s->kobj);
7300}
7301
7302void sysfs_slab_release(struct kmem_cache *s)
7303{
7304 kobject_put(&s->kobj);
7305}
7306
7307/*
7308 * Need to buffer aliases during bootup until sysfs becomes
7309 * available lest we lose that information.
7310 */
7311struct saved_alias {
7312 struct kmem_cache *s;
7313 const char *name;
7314 struct saved_alias *next;
7315};
7316
7317static struct saved_alias *alias_list;
7318
7319static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
7320{
7321 struct saved_alias *al;
7322
7323 if (slab_state == FULL) {
7324 /*
7325 * If we have a leftover link then remove it.
7326 */
7327 sysfs_remove_link(&slab_kset->kobj, name);
7328 /*
7329 * The original cache may have failed to generate sysfs file.
7330 * In that case, sysfs_create_link() returns -ENOENT and
7331 * symbolic link creation is skipped.
7332 */
7333 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
7334 }
7335
7336 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
7337 if (!al)
7338 return -ENOMEM;
7339
7340 al->s = s;
7341 al->name = name;
7342 al->next = alias_list;
7343 alias_list = al;
7344 kmsan_unpoison_memory(al, sizeof(*al));
7345 return 0;
7346}
7347
7348static int __init slab_sysfs_init(void)
7349{
7350 struct kmem_cache *s;
7351 int err;
7352
7353 mutex_lock(&slab_mutex);
7354
7355 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
7356 if (!slab_kset) {
7357 mutex_unlock(&slab_mutex);
7358 pr_err("Cannot register slab subsystem.\n");
7359 return -ENOMEM;
7360 }
7361
7362 slab_state = FULL;
7363
7364 list_for_each_entry(s, &slab_caches, list) {
7365 err = sysfs_slab_add(s);
7366 if (err)
7367 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
7368 s->name);
7369 }
7370
7371 while (alias_list) {
7372 struct saved_alias *al = alias_list;
7373
7374 alias_list = alias_list->next;
7375 err = sysfs_slab_alias(al->s, al->name);
7376 if (err)
7377 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
7378 al->name);
7379 kfree(al);
7380 }
7381
7382 mutex_unlock(&slab_mutex);
7383 return 0;
7384}
7385late_initcall(slab_sysfs_init);
7386#endif /* SLAB_SUPPORTS_SYSFS */
7387
7388#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
7389static int slab_debugfs_show(struct seq_file *seq, void *v)
7390{
7391 struct loc_track *t = seq->private;
7392 struct location *l;
7393 unsigned long idx;
7394
7395 idx = (unsigned long) t->idx;
7396 if (idx < t->count) {
7397 l = &t->loc[idx];
7398
7399 seq_printf(seq, "%7ld ", l->count);
7400
7401 if (l->addr)
7402 seq_printf(seq, "%pS", (void *)l->addr);
7403 else
7404 seq_puts(seq, "<not-available>");
7405
7406 if (l->waste)
7407 seq_printf(seq, " waste=%lu/%lu",
7408 l->count * l->waste, l->waste);
7409
7410 if (l->sum_time != l->min_time) {
7411 seq_printf(seq, " age=%ld/%llu/%ld",
7412 l->min_time, div_u64(l->sum_time, l->count),
7413 l->max_time);
7414 } else
7415 seq_printf(seq, " age=%ld", l->min_time);
7416
7417 if (l->min_pid != l->max_pid)
7418 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
7419 else
7420 seq_printf(seq, " pid=%ld",
7421 l->min_pid);
7422
7423 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
7424 seq_printf(seq, " cpus=%*pbl",
7425 cpumask_pr_args(to_cpumask(l->cpus)));
7426
7427 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
7428 seq_printf(seq, " nodes=%*pbl",
7429 nodemask_pr_args(&l->nodes));
7430
7431#ifdef CONFIG_STACKDEPOT
7432 {
7433 depot_stack_handle_t handle;
7434 unsigned long *entries;
7435 unsigned int nr_entries, j;
7436
7437 handle = READ_ONCE(l->handle);
7438 if (handle) {
7439 nr_entries = stack_depot_fetch(handle, &entries);
7440 seq_puts(seq, "\n");
7441 for (j = 0; j < nr_entries; j++)
7442 seq_printf(seq, " %pS\n", (void *)entries[j]);
7443 }
7444 }
7445#endif
7446 seq_puts(seq, "\n");
7447 }
7448
7449 if (!idx && !t->count)
7450 seq_puts(seq, "No data\n");
7451
7452 return 0;
7453}
7454
7455static void slab_debugfs_stop(struct seq_file *seq, void *v)
7456{
7457}
7458
7459static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
7460{
7461 struct loc_track *t = seq->private;
7462
7463 t->idx = ++(*ppos);
7464 if (*ppos <= t->count)
7465 return ppos;
7466
7467 return NULL;
7468}
7469
7470static int cmp_loc_by_count(const void *a, const void *b, const void *data)
7471{
7472 struct location *loc1 = (struct location *)a;
7473 struct location *loc2 = (struct location *)b;
7474
7475 if (loc1->count > loc2->count)
7476 return -1;
7477 else
7478 return 1;
7479}
7480
7481static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
7482{
7483 struct loc_track *t = seq->private;
7484
7485 t->idx = *ppos;
7486 return ppos;
7487}
7488
7489static const struct seq_operations slab_debugfs_sops = {
7490 .start = slab_debugfs_start,
7491 .next = slab_debugfs_next,
7492 .stop = slab_debugfs_stop,
7493 .show = slab_debugfs_show,
7494};
7495
7496static int slab_debug_trace_open(struct inode *inode, struct file *filep)
7497{
7498
7499 struct kmem_cache_node *n;
7500 enum track_item alloc;
7501 int node;
7502 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
7503 sizeof(struct loc_track));
7504 struct kmem_cache *s = file_inode(filep)->i_private;
7505 unsigned long *obj_map;
7506
7507 if (!t)
7508 return -ENOMEM;
7509
7510 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
7511 if (!obj_map) {
7512 seq_release_private(inode, filep);
7513 return -ENOMEM;
7514 }
7515
7516 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
7517 alloc = TRACK_ALLOC;
7518 else
7519 alloc = TRACK_FREE;
7520
7521 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
7522 bitmap_free(obj_map);
7523 seq_release_private(inode, filep);
7524 return -ENOMEM;
7525 }
7526
7527 for_each_kmem_cache_node(s, node, n) {
7528 unsigned long flags;
7529 struct slab *slab;
7530
7531 if (!node_nr_slabs(n))
7532 continue;
7533
7534 spin_lock_irqsave(&n->list_lock, flags);
7535 list_for_each_entry(slab, &n->partial, slab_list)
7536 process_slab(t, s, slab, alloc, obj_map);
7537 list_for_each_entry(slab, &n->full, slab_list)
7538 process_slab(t, s, slab, alloc, obj_map);
7539 spin_unlock_irqrestore(&n->list_lock, flags);
7540 }
7541
7542 /* Sort locations by count */
7543 sort_r(t->loc, t->count, sizeof(struct location),
7544 cmp_loc_by_count, NULL, NULL);
7545
7546 bitmap_free(obj_map);
7547 return 0;
7548}
7549
7550static int slab_debug_trace_release(struct inode *inode, struct file *file)
7551{
7552 struct seq_file *seq = file->private_data;
7553 struct loc_track *t = seq->private;
7554
7555 free_loc_track(t);
7556 return seq_release_private(inode, file);
7557}
7558
7559static const struct file_operations slab_debugfs_fops = {
7560 .open = slab_debug_trace_open,
7561 .read = seq_read,
7562 .llseek = seq_lseek,
7563 .release = slab_debug_trace_release,
7564};
7565
7566static void debugfs_slab_add(struct kmem_cache *s)
7567{
7568 struct dentry *slab_cache_dir;
7569
7570 if (unlikely(!slab_debugfs_root))
7571 return;
7572
7573 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
7574
7575 debugfs_create_file("alloc_traces", 0400,
7576 slab_cache_dir, s, &slab_debugfs_fops);
7577
7578 debugfs_create_file("free_traces", 0400,
7579 slab_cache_dir, s, &slab_debugfs_fops);
7580}
7581
7582void debugfs_slab_release(struct kmem_cache *s)
7583{
7584 debugfs_lookup_and_remove(s->name, slab_debugfs_root);
7585}
7586
7587static int __init slab_debugfs_init(void)
7588{
7589 struct kmem_cache *s;
7590
7591 slab_debugfs_root = debugfs_create_dir("slab", NULL);
7592
7593 list_for_each_entry(s, &slab_caches, list)
7594 if (s->flags & SLAB_STORE_USER)
7595 debugfs_slab_add(s);
7596
7597 return 0;
7598
7599}
7600__initcall(slab_debugfs_init);
7601#endif
7602/*
7603 * The /proc/slabinfo ABI
7604 */
7605#ifdef CONFIG_SLUB_DEBUG
7606void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
7607{
7608 unsigned long nr_slabs = 0;
7609 unsigned long nr_objs = 0;
7610 unsigned long nr_free = 0;
7611 int node;
7612 struct kmem_cache_node *n;
7613
7614 for_each_kmem_cache_node(s, node, n) {
7615 nr_slabs += node_nr_slabs(n);
7616 nr_objs += node_nr_objs(n);
7617 nr_free += count_partial_free_approx(n);
7618 }
7619
7620 sinfo->active_objs = nr_objs - nr_free;
7621 sinfo->num_objs = nr_objs;
7622 sinfo->active_slabs = nr_slabs;
7623 sinfo->num_slabs = nr_slabs;
7624 sinfo->objects_per_slab = oo_objects(s->oo);
7625 sinfo->cache_order = oo_order(s->oo);
7626}
7627#endif /* CONFIG_SLUB_DEBUG */