Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
8#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
24 slab_flags_t flags; /* Active flags on the slab */
25 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
27 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
44#include <linux/fault-inject.h>
45#include <linux/kasan.h>
46#include <linux/kmemleak.h>
47#include <linux/random.h>
48#include <linux/sched/mm.h>
49#include <linux/kmemleak.h>
50
51/*
52 * State of the slab allocator.
53 *
54 * This is used to describe the states of the allocator during bootup.
55 * Allocators use this to gradually bootstrap themselves. Most allocators
56 * have the problem that the structures used for managing slab caches are
57 * allocated from slab caches themselves.
58 */
59enum slab_state {
60 DOWN, /* No slab functionality yet */
61 PARTIAL, /* SLUB: kmem_cache_node available */
62 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
63 UP, /* Slab caches usable but not all extras yet */
64 FULL /* Everything is working */
65};
66
67extern enum slab_state slab_state;
68
69/* The slab cache mutex protects the management structures during changes */
70extern struct mutex slab_mutex;
71
72/* The list of all slab caches on the system */
73extern struct list_head slab_caches;
74
75/* The slab cache that manages slab cache information */
76extern struct kmem_cache *kmem_cache;
77
78/* A table of kmalloc cache names and sizes */
79extern const struct kmalloc_info_struct {
80 const char *name[NR_KMALLOC_TYPES];
81 unsigned int size;
82} kmalloc_info[];
83
84#ifndef CONFIG_SLOB
85/* Kmalloc array related functions */
86void setup_kmalloc_cache_index_table(void);
87void create_kmalloc_caches(slab_flags_t);
88
89/* Find the kmalloc slab corresponding for a certain size */
90struct kmem_cache *kmalloc_slab(size_t, gfp_t);
91#endif
92
93gfp_t kmalloc_fix_flags(gfp_t flags);
94
95/* Functions provided by the slab allocators */
96int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
97
98struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
99 slab_flags_t flags, unsigned int useroffset,
100 unsigned int usersize);
101extern void create_boot_cache(struct kmem_cache *, const char *name,
102 unsigned int size, slab_flags_t flags,
103 unsigned int useroffset, unsigned int usersize);
104
105int slab_unmergeable(struct kmem_cache *s);
106struct kmem_cache *find_mergeable(unsigned size, unsigned align,
107 slab_flags_t flags, const char *name, void (*ctor)(void *));
108#ifndef CONFIG_SLOB
109struct kmem_cache *
110__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
111 slab_flags_t flags, void (*ctor)(void *));
112
113slab_flags_t kmem_cache_flags(unsigned int object_size,
114 slab_flags_t flags, const char *name,
115 void (*ctor)(void *));
116#else
117static inline struct kmem_cache *
118__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
119 slab_flags_t flags, void (*ctor)(void *))
120{ return NULL; }
121
122static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
123 slab_flags_t flags, const char *name,
124 void (*ctor)(void *))
125{
126 return flags;
127}
128#endif
129
130
131/* Legal flag mask for kmem_cache_create(), for various configurations */
132#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
133 SLAB_CACHE_DMA32 | SLAB_PANIC | \
134 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
135
136#if defined(CONFIG_DEBUG_SLAB)
137#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
138#elif defined(CONFIG_SLUB_DEBUG)
139#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
140 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
141#else
142#define SLAB_DEBUG_FLAGS (0)
143#endif
144
145#if defined(CONFIG_SLAB)
146#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
147 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
148 SLAB_ACCOUNT)
149#elif defined(CONFIG_SLUB)
150#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
151 SLAB_TEMPORARY | SLAB_ACCOUNT)
152#else
153#define SLAB_CACHE_FLAGS (0)
154#endif
155
156/* Common flags available with current configuration */
157#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
158
159/* Common flags permitted for kmem_cache_create */
160#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
161 SLAB_RED_ZONE | \
162 SLAB_POISON | \
163 SLAB_STORE_USER | \
164 SLAB_TRACE | \
165 SLAB_CONSISTENCY_CHECKS | \
166 SLAB_MEM_SPREAD | \
167 SLAB_NOLEAKTRACE | \
168 SLAB_RECLAIM_ACCOUNT | \
169 SLAB_TEMPORARY | \
170 SLAB_ACCOUNT)
171
172bool __kmem_cache_empty(struct kmem_cache *);
173int __kmem_cache_shutdown(struct kmem_cache *);
174void __kmem_cache_release(struct kmem_cache *);
175int __kmem_cache_shrink(struct kmem_cache *);
176void slab_kmem_cache_release(struct kmem_cache *);
177
178struct seq_file;
179struct file;
180
181struct slabinfo {
182 unsigned long active_objs;
183 unsigned long num_objs;
184 unsigned long active_slabs;
185 unsigned long num_slabs;
186 unsigned long shared_avail;
187 unsigned int limit;
188 unsigned int batchcount;
189 unsigned int shared;
190 unsigned int objects_per_slab;
191 unsigned int cache_order;
192};
193
194void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
195void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
196ssize_t slabinfo_write(struct file *file, const char __user *buffer,
197 size_t count, loff_t *ppos);
198
199/*
200 * Generic implementation of bulk operations
201 * These are useful for situations in which the allocator cannot
202 * perform optimizations. In that case segments of the object listed
203 * may be allocated or freed using these operations.
204 */
205void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
206int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
207
208static inline int cache_vmstat_idx(struct kmem_cache *s)
209{
210 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
211 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
212}
213
214#ifdef CONFIG_SLUB_DEBUG
215#ifdef CONFIG_SLUB_DEBUG_ON
216DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
217#else
218DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
219#endif
220extern void print_tracking(struct kmem_cache *s, void *object);
221#else
222static inline void print_tracking(struct kmem_cache *s, void *object)
223{
224}
225#endif
226
227/*
228 * Returns true if any of the specified slub_debug flags is enabled for the
229 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
230 * the static key.
231 */
232static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
233{
234#ifdef CONFIG_SLUB_DEBUG
235 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
236 if (static_branch_unlikely(&slub_debug_enabled))
237 return s->flags & flags;
238#endif
239 return false;
240}
241
242#ifdef CONFIG_MEMCG_KMEM
243static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
244{
245 /*
246 * page->mem_cgroup and page->obj_cgroups are sharing the same
247 * space. To distinguish between them in case we don't know for sure
248 * that the page is a slab page (e.g. page_cgroup_ino()), let's
249 * always set the lowest bit of obj_cgroups.
250 */
251 return (struct obj_cgroup **)
252 ((unsigned long)page->obj_cgroups & ~0x1UL);
253}
254
255static inline bool page_has_obj_cgroups(struct page *page)
256{
257 return ((unsigned long)page->obj_cgroups & 0x1UL);
258}
259
260int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
261 gfp_t gfp);
262
263static inline void memcg_free_page_obj_cgroups(struct page *page)
264{
265 kfree(page_obj_cgroups(page));
266 page->obj_cgroups = NULL;
267}
268
269static inline size_t obj_full_size(struct kmem_cache *s)
270{
271 /*
272 * For each accounted object there is an extra space which is used
273 * to store obj_cgroup membership. Charge it too.
274 */
275 return s->size + sizeof(struct obj_cgroup *);
276}
277
278static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
279 size_t objects,
280 gfp_t flags)
281{
282 struct obj_cgroup *objcg;
283
284 if (memcg_kmem_bypass())
285 return NULL;
286
287 objcg = get_obj_cgroup_from_current();
288 if (!objcg)
289 return NULL;
290
291 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
292 obj_cgroup_put(objcg);
293 return NULL;
294 }
295
296 return objcg;
297}
298
299static inline void mod_objcg_state(struct obj_cgroup *objcg,
300 struct pglist_data *pgdat,
301 int idx, int nr)
302{
303 struct mem_cgroup *memcg;
304 struct lruvec *lruvec;
305
306 rcu_read_lock();
307 memcg = obj_cgroup_memcg(objcg);
308 lruvec = mem_cgroup_lruvec(memcg, pgdat);
309 mod_memcg_lruvec_state(lruvec, idx, nr);
310 rcu_read_unlock();
311}
312
313static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
314 struct obj_cgroup *objcg,
315 gfp_t flags, size_t size,
316 void **p)
317{
318 struct page *page;
319 unsigned long off;
320 size_t i;
321
322 if (!objcg)
323 return;
324
325 flags &= ~__GFP_ACCOUNT;
326 for (i = 0; i < size; i++) {
327 if (likely(p[i])) {
328 page = virt_to_head_page(p[i]);
329
330 if (!page_has_obj_cgroups(page) &&
331 memcg_alloc_page_obj_cgroups(page, s, flags)) {
332 obj_cgroup_uncharge(objcg, obj_full_size(s));
333 continue;
334 }
335
336 off = obj_to_index(s, page, p[i]);
337 obj_cgroup_get(objcg);
338 page_obj_cgroups(page)[off] = objcg;
339 mod_objcg_state(objcg, page_pgdat(page),
340 cache_vmstat_idx(s), obj_full_size(s));
341 } else {
342 obj_cgroup_uncharge(objcg, obj_full_size(s));
343 }
344 }
345 obj_cgroup_put(objcg);
346}
347
348static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
349 void *p)
350{
351 struct obj_cgroup *objcg;
352 unsigned int off;
353
354 if (!memcg_kmem_enabled())
355 return;
356
357 if (!page_has_obj_cgroups(page))
358 return;
359
360 off = obj_to_index(s, page, p);
361 objcg = page_obj_cgroups(page)[off];
362 page_obj_cgroups(page)[off] = NULL;
363
364 if (!objcg)
365 return;
366
367 obj_cgroup_uncharge(objcg, obj_full_size(s));
368 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
369 -obj_full_size(s));
370
371 obj_cgroup_put(objcg);
372}
373
374#else /* CONFIG_MEMCG_KMEM */
375static inline bool page_has_obj_cgroups(struct page *page)
376{
377 return false;
378}
379
380static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
381{
382 return NULL;
383}
384
385static inline int memcg_alloc_page_obj_cgroups(struct page *page,
386 struct kmem_cache *s, gfp_t gfp)
387{
388 return 0;
389}
390
391static inline void memcg_free_page_obj_cgroups(struct page *page)
392{
393}
394
395static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
396 size_t objects,
397 gfp_t flags)
398{
399 return NULL;
400}
401
402static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
403 struct obj_cgroup *objcg,
404 gfp_t flags, size_t size,
405 void **p)
406{
407}
408
409static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
410 void *p)
411{
412}
413#endif /* CONFIG_MEMCG_KMEM */
414
415static inline struct kmem_cache *virt_to_cache(const void *obj)
416{
417 struct page *page;
418
419 page = virt_to_head_page(obj);
420 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
421 __func__))
422 return NULL;
423 return page->slab_cache;
424}
425
426static __always_inline void account_slab_page(struct page *page, int order,
427 struct kmem_cache *s)
428{
429 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
430 PAGE_SIZE << order);
431}
432
433static __always_inline void unaccount_slab_page(struct page *page, int order,
434 struct kmem_cache *s)
435{
436 if (memcg_kmem_enabled())
437 memcg_free_page_obj_cgroups(page);
438
439 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
440 -(PAGE_SIZE << order));
441}
442
443static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
444{
445 struct kmem_cache *cachep;
446
447 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
448 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
449 return s;
450
451 cachep = virt_to_cache(x);
452 if (WARN(cachep && cachep != s,
453 "%s: Wrong slab cache. %s but object is from %s\n",
454 __func__, s->name, cachep->name))
455 print_tracking(cachep, x);
456 return cachep;
457}
458
459static inline size_t slab_ksize(const struct kmem_cache *s)
460{
461#ifndef CONFIG_SLUB
462 return s->object_size;
463
464#else /* CONFIG_SLUB */
465# ifdef CONFIG_SLUB_DEBUG
466 /*
467 * Debugging requires use of the padding between object
468 * and whatever may come after it.
469 */
470 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
471 return s->object_size;
472# endif
473 if (s->flags & SLAB_KASAN)
474 return s->object_size;
475 /*
476 * If we have the need to store the freelist pointer
477 * back there or track user information then we can
478 * only use the space before that information.
479 */
480 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
481 return s->inuse;
482 /*
483 * Else we can use all the padding etc for the allocation
484 */
485 return s->size;
486#endif
487}
488
489static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
490 struct obj_cgroup **objcgp,
491 size_t size, gfp_t flags)
492{
493 flags &= gfp_allowed_mask;
494
495 fs_reclaim_acquire(flags);
496 fs_reclaim_release(flags);
497
498 might_sleep_if(gfpflags_allow_blocking(flags));
499
500 if (should_failslab(s, flags))
501 return NULL;
502
503 if (memcg_kmem_enabled() &&
504 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
505 *objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
506
507 return s;
508}
509
510static inline void slab_post_alloc_hook(struct kmem_cache *s,
511 struct obj_cgroup *objcg,
512 gfp_t flags, size_t size, void **p)
513{
514 size_t i;
515
516 flags &= gfp_allowed_mask;
517 for (i = 0; i < size; i++) {
518 p[i] = kasan_slab_alloc(s, p[i], flags);
519 /* As p[i] might get tagged, call kmemleak hook after KASAN. */
520 kmemleak_alloc_recursive(p[i], s->object_size, 1,
521 s->flags, flags);
522 }
523
524 if (memcg_kmem_enabled())
525 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
526}
527
528#ifndef CONFIG_SLOB
529/*
530 * The slab lists for all objects.
531 */
532struct kmem_cache_node {
533 spinlock_t list_lock;
534
535#ifdef CONFIG_SLAB
536 struct list_head slabs_partial; /* partial list first, better asm code */
537 struct list_head slabs_full;
538 struct list_head slabs_free;
539 unsigned long total_slabs; /* length of all slab lists */
540 unsigned long free_slabs; /* length of free slab list only */
541 unsigned long free_objects;
542 unsigned int free_limit;
543 unsigned int colour_next; /* Per-node cache coloring */
544 struct array_cache *shared; /* shared per node */
545 struct alien_cache **alien; /* on other nodes */
546 unsigned long next_reap; /* updated without locking */
547 int free_touched; /* updated without locking */
548#endif
549
550#ifdef CONFIG_SLUB
551 unsigned long nr_partial;
552 struct list_head partial;
553#ifdef CONFIG_SLUB_DEBUG
554 atomic_long_t nr_slabs;
555 atomic_long_t total_objects;
556 struct list_head full;
557#endif
558#endif
559
560};
561
562static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
563{
564 return s->node[node];
565}
566
567/*
568 * Iterator over all nodes. The body will be executed for each node that has
569 * a kmem_cache_node structure allocated (which is true for all online nodes)
570 */
571#define for_each_kmem_cache_node(__s, __node, __n) \
572 for (__node = 0; __node < nr_node_ids; __node++) \
573 if ((__n = get_node(__s, __node)))
574
575#endif
576
577void *slab_start(struct seq_file *m, loff_t *pos);
578void *slab_next(struct seq_file *m, void *p, loff_t *pos);
579void slab_stop(struct seq_file *m, void *p);
580int memcg_slab_show(struct seq_file *m, void *p);
581
582#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
583void dump_unreclaimable_slab(void);
584#else
585static inline void dump_unreclaimable_slab(void)
586{
587}
588#endif
589
590void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
591
592#ifdef CONFIG_SLAB_FREELIST_RANDOM
593int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
594 gfp_t gfp);
595void cache_random_seq_destroy(struct kmem_cache *cachep);
596#else
597static inline int cache_random_seq_create(struct kmem_cache *cachep,
598 unsigned int count, gfp_t gfp)
599{
600 return 0;
601}
602static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
603#endif /* CONFIG_SLAB_FREELIST_RANDOM */
604
605static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
606{
607 if (static_branch_unlikely(&init_on_alloc)) {
608 if (c->ctor)
609 return false;
610 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
611 return flags & __GFP_ZERO;
612 return true;
613 }
614 return flags & __GFP_ZERO;
615}
616
617static inline bool slab_want_init_on_free(struct kmem_cache *c)
618{
619 if (static_branch_unlikely(&init_on_free))
620 return !(c->ctor ||
621 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
622 return false;
623}
624
625#endif /* MM_SLAB_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef MM_SLAB_H
3#define MM_SLAB_H
4
5#include <linux/reciprocal_div.h>
6#include <linux/list_lru.h>
7#include <linux/local_lock.h>
8#include <linux/random.h>
9#include <linux/kobject.h>
10#include <linux/sched/mm.h>
11#include <linux/memcontrol.h>
12#include <linux/kfence.h>
13#include <linux/kasan.h>
14
15/*
16 * Internal slab definitions
17 */
18
19#ifdef CONFIG_64BIT
20# ifdef system_has_cmpxchg128
21# define system_has_freelist_aba() system_has_cmpxchg128()
22# define try_cmpxchg_freelist try_cmpxchg128
23# endif
24#define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128
25typedef u128 freelist_full_t;
26#else /* CONFIG_64BIT */
27# ifdef system_has_cmpxchg64
28# define system_has_freelist_aba() system_has_cmpxchg64()
29# define try_cmpxchg_freelist try_cmpxchg64
30# endif
31#define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64
32typedef u64 freelist_full_t;
33#endif /* CONFIG_64BIT */
34
35#if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
36#undef system_has_freelist_aba
37#endif
38
39/*
40 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
41 * problems with cmpxchg of just a pointer.
42 */
43typedef union {
44 struct {
45 void *freelist;
46 unsigned long counter;
47 };
48 freelist_full_t full;
49} freelist_aba_t;
50
51/* Reuses the bits in struct page */
52struct slab {
53 unsigned long __page_flags;
54
55 struct kmem_cache *slab_cache;
56 union {
57 struct {
58 union {
59 struct list_head slab_list;
60#ifdef CONFIG_SLUB_CPU_PARTIAL
61 struct {
62 struct slab *next;
63 int slabs; /* Nr of slabs left */
64 };
65#endif
66 };
67 /* Double-word boundary */
68 union {
69 struct {
70 void *freelist; /* first free object */
71 union {
72 unsigned long counters;
73 struct {
74 unsigned inuse:16;
75 unsigned objects:15;
76 /*
77 * If slab debugging is enabled then the
78 * frozen bit can be reused to indicate
79 * that the slab was corrupted
80 */
81 unsigned frozen:1;
82 };
83 };
84 };
85#ifdef system_has_freelist_aba
86 freelist_aba_t freelist_counter;
87#endif
88 };
89 };
90 struct rcu_head rcu_head;
91 };
92
93 unsigned int __page_type;
94 atomic_t __page_refcount;
95#ifdef CONFIG_SLAB_OBJ_EXT
96 unsigned long obj_exts;
97#endif
98};
99
100#define SLAB_MATCH(pg, sl) \
101 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
102SLAB_MATCH(flags, __page_flags);
103SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
104SLAB_MATCH(_refcount, __page_refcount);
105#ifdef CONFIG_MEMCG
106SLAB_MATCH(memcg_data, obj_exts);
107#elif defined(CONFIG_SLAB_OBJ_EXT)
108SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
109#endif
110#undef SLAB_MATCH
111static_assert(sizeof(struct slab) <= sizeof(struct page));
112#if defined(system_has_freelist_aba)
113static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
114#endif
115
116/**
117 * folio_slab - Converts from folio to slab.
118 * @folio: The folio.
119 *
120 * Currently struct slab is a different representation of a folio where
121 * folio_test_slab() is true.
122 *
123 * Return: The slab which contains this folio.
124 */
125#define folio_slab(folio) (_Generic((folio), \
126 const struct folio *: (const struct slab *)(folio), \
127 struct folio *: (struct slab *)(folio)))
128
129/**
130 * slab_folio - The folio allocated for a slab
131 * @slab: The slab.
132 *
133 * Slabs are allocated as folios that contain the individual objects and are
134 * using some fields in the first struct page of the folio - those fields are
135 * now accessed by struct slab. It is occasionally necessary to convert back to
136 * a folio in order to communicate with the rest of the mm. Please use this
137 * helper function instead of casting yourself, as the implementation may change
138 * in the future.
139 */
140#define slab_folio(s) (_Generic((s), \
141 const struct slab *: (const struct folio *)s, \
142 struct slab *: (struct folio *)s))
143
144/**
145 * page_slab - Converts from first struct page to slab.
146 * @p: The first (either head of compound or single) page of slab.
147 *
148 * A temporary wrapper to convert struct page to struct slab in situations where
149 * we know the page is the compound head, or single order-0 page.
150 *
151 * Long-term ideally everything would work with struct slab directly or go
152 * through folio to struct slab.
153 *
154 * Return: The slab which contains this page
155 */
156#define page_slab(p) (_Generic((p), \
157 const struct page *: (const struct slab *)(p), \
158 struct page *: (struct slab *)(p)))
159
160/**
161 * slab_page - The first struct page allocated for a slab
162 * @slab: The slab.
163 *
164 * A convenience wrapper for converting slab to the first struct page of the
165 * underlying folio, to communicate with code not yet converted to folio or
166 * struct slab.
167 */
168#define slab_page(s) folio_page(slab_folio(s), 0)
169
170/*
171 * If network-based swap is enabled, sl*b must keep track of whether pages
172 * were allocated from pfmemalloc reserves.
173 */
174static inline bool slab_test_pfmemalloc(const struct slab *slab)
175{
176 return folio_test_active(slab_folio(slab));
177}
178
179static inline void slab_set_pfmemalloc(struct slab *slab)
180{
181 folio_set_active(slab_folio(slab));
182}
183
184static inline void slab_clear_pfmemalloc(struct slab *slab)
185{
186 folio_clear_active(slab_folio(slab));
187}
188
189static inline void __slab_clear_pfmemalloc(struct slab *slab)
190{
191 __folio_clear_active(slab_folio(slab));
192}
193
194static inline void *slab_address(const struct slab *slab)
195{
196 return folio_address(slab_folio(slab));
197}
198
199static inline int slab_nid(const struct slab *slab)
200{
201 return folio_nid(slab_folio(slab));
202}
203
204static inline pg_data_t *slab_pgdat(const struct slab *slab)
205{
206 return folio_pgdat(slab_folio(slab));
207}
208
209static inline struct slab *virt_to_slab(const void *addr)
210{
211 struct folio *folio = virt_to_folio(addr);
212
213 if (!folio_test_slab(folio))
214 return NULL;
215
216 return folio_slab(folio);
217}
218
219static inline int slab_order(const struct slab *slab)
220{
221 return folio_order(slab_folio(slab));
222}
223
224static inline size_t slab_size(const struct slab *slab)
225{
226 return PAGE_SIZE << slab_order(slab);
227}
228
229#ifdef CONFIG_SLUB_CPU_PARTIAL
230#define slub_percpu_partial(c) ((c)->partial)
231
232#define slub_set_percpu_partial(c, p) \
233({ \
234 slub_percpu_partial(c) = (p)->next; \
235})
236
237#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
238#else
239#define slub_percpu_partial(c) NULL
240
241#define slub_set_percpu_partial(c, p)
242
243#define slub_percpu_partial_read_once(c) NULL
244#endif // CONFIG_SLUB_CPU_PARTIAL
245
246/*
247 * Word size structure that can be atomically updated or read and that
248 * contains both the order and the number of objects that a slab of the
249 * given order would contain.
250 */
251struct kmem_cache_order_objects {
252 unsigned int x;
253};
254
255/*
256 * Slab cache management.
257 */
258struct kmem_cache {
259#ifndef CONFIG_SLUB_TINY
260 struct kmem_cache_cpu __percpu *cpu_slab;
261#endif
262 /* Used for retrieving partial slabs, etc. */
263 slab_flags_t flags;
264 unsigned long min_partial;
265 unsigned int size; /* Object size including metadata */
266 unsigned int object_size; /* Object size without metadata */
267 struct reciprocal_value reciprocal_size;
268 unsigned int offset; /* Free pointer offset */
269#ifdef CONFIG_SLUB_CPU_PARTIAL
270 /* Number of per cpu partial objects to keep around */
271 unsigned int cpu_partial;
272 /* Number of per cpu partial slabs to keep around */
273 unsigned int cpu_partial_slabs;
274#endif
275 struct kmem_cache_order_objects oo;
276
277 /* Allocation and freeing of slabs */
278 struct kmem_cache_order_objects min;
279 gfp_t allocflags; /* gfp flags to use on each alloc */
280 int refcount; /* Refcount for slab cache destroy */
281 void (*ctor)(void *object); /* Object constructor */
282 unsigned int inuse; /* Offset to metadata */
283 unsigned int align; /* Alignment */
284 unsigned int red_left_pad; /* Left redzone padding size */
285 const char *name; /* Name (only for display!) */
286 struct list_head list; /* List of slab caches */
287#ifdef CONFIG_SYSFS
288 struct kobject kobj; /* For sysfs */
289#endif
290#ifdef CONFIG_SLAB_FREELIST_HARDENED
291 unsigned long random;
292#endif
293
294#ifdef CONFIG_NUMA
295 /*
296 * Defragmentation by allocating from a remote node.
297 */
298 unsigned int remote_node_defrag_ratio;
299#endif
300
301#ifdef CONFIG_SLAB_FREELIST_RANDOM
302 unsigned int *random_seq;
303#endif
304
305#ifdef CONFIG_KASAN_GENERIC
306 struct kasan_cache kasan_info;
307#endif
308
309#ifdef CONFIG_HARDENED_USERCOPY
310 unsigned int useroffset; /* Usercopy region offset */
311 unsigned int usersize; /* Usercopy region size */
312#endif
313
314 struct kmem_cache_node *node[MAX_NUMNODES];
315};
316
317#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
318#define SLAB_SUPPORTS_SYSFS 1
319void sysfs_slab_unlink(struct kmem_cache *s);
320void sysfs_slab_release(struct kmem_cache *s);
321#else
322static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
323static inline void sysfs_slab_release(struct kmem_cache *s) { }
324#endif
325
326void *fixup_red_left(struct kmem_cache *s, void *p);
327
328static inline void *nearest_obj(struct kmem_cache *cache,
329 const struct slab *slab, void *x)
330{
331 void *object = x - (x - slab_address(slab)) % cache->size;
332 void *last_object = slab_address(slab) +
333 (slab->objects - 1) * cache->size;
334 void *result = (unlikely(object > last_object)) ? last_object : object;
335
336 result = fixup_red_left(cache, result);
337 return result;
338}
339
340/* Determine object index from a given position */
341static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
342 void *addr, void *obj)
343{
344 return reciprocal_divide(kasan_reset_tag(obj) - addr,
345 cache->reciprocal_size);
346}
347
348static inline unsigned int obj_to_index(const struct kmem_cache *cache,
349 const struct slab *slab, void *obj)
350{
351 if (is_kfence_address(obj))
352 return 0;
353 return __obj_to_index(cache, slab_address(slab), obj);
354}
355
356static inline int objs_per_slab(const struct kmem_cache *cache,
357 const struct slab *slab)
358{
359 return slab->objects;
360}
361
362/*
363 * State of the slab allocator.
364 *
365 * This is used to describe the states of the allocator during bootup.
366 * Allocators use this to gradually bootstrap themselves. Most allocators
367 * have the problem that the structures used for managing slab caches are
368 * allocated from slab caches themselves.
369 */
370enum slab_state {
371 DOWN, /* No slab functionality yet */
372 PARTIAL, /* SLUB: kmem_cache_node available */
373 UP, /* Slab caches usable but not all extras yet */
374 FULL /* Everything is working */
375};
376
377extern enum slab_state slab_state;
378
379/* The slab cache mutex protects the management structures during changes */
380extern struct mutex slab_mutex;
381
382/* The list of all slab caches on the system */
383extern struct list_head slab_caches;
384
385/* The slab cache that manages slab cache information */
386extern struct kmem_cache *kmem_cache;
387
388/* A table of kmalloc cache names and sizes */
389extern const struct kmalloc_info_struct {
390 const char *name[NR_KMALLOC_TYPES];
391 unsigned int size;
392} kmalloc_info[];
393
394/* Kmalloc array related functions */
395void setup_kmalloc_cache_index_table(void);
396void create_kmalloc_caches(void);
397
398extern u8 kmalloc_size_index[24];
399
400static inline unsigned int size_index_elem(unsigned int bytes)
401{
402 return (bytes - 1) / 8;
403}
404
405/*
406 * Find the kmem_cache structure that serves a given size of
407 * allocation
408 *
409 * This assumes size is larger than zero and not larger than
410 * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
411 */
412static inline struct kmem_cache *
413kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
414{
415 unsigned int index;
416
417 if (!b)
418 b = &kmalloc_caches[kmalloc_type(flags, caller)];
419 if (size <= 192)
420 index = kmalloc_size_index[size_index_elem(size)];
421 else
422 index = fls(size - 1);
423
424 return (*b)[index];
425}
426
427gfp_t kmalloc_fix_flags(gfp_t flags);
428
429/* Functions provided by the slab allocators */
430int do_kmem_cache_create(struct kmem_cache *s, const char *name,
431 unsigned int size, struct kmem_cache_args *args,
432 slab_flags_t flags);
433
434void __init kmem_cache_init(void);
435extern void create_boot_cache(struct kmem_cache *, const char *name,
436 unsigned int size, slab_flags_t flags,
437 unsigned int useroffset, unsigned int usersize);
438
439int slab_unmergeable(struct kmem_cache *s);
440struct kmem_cache *find_mergeable(unsigned size, unsigned align,
441 slab_flags_t flags, const char *name, void (*ctor)(void *));
442struct kmem_cache *
443__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
444 slab_flags_t flags, void (*ctor)(void *));
445
446slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
447
448static inline bool is_kmalloc_cache(struct kmem_cache *s)
449{
450 return (s->flags & SLAB_KMALLOC);
451}
452
453static inline bool is_kmalloc_normal(struct kmem_cache *s)
454{
455 if (!is_kmalloc_cache(s))
456 return false;
457 return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
458}
459
460/* Legal flag mask for kmem_cache_create(), for various configurations */
461#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
462 SLAB_CACHE_DMA32 | SLAB_PANIC | \
463 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
464
465#ifdef CONFIG_SLUB_DEBUG
466#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
467 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
468#else
469#define SLAB_DEBUG_FLAGS (0)
470#endif
471
472#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
473 SLAB_TEMPORARY | SLAB_ACCOUNT | \
474 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
475
476/* Common flags available with current configuration */
477#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
478
479/* Common flags permitted for kmem_cache_create */
480#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
481 SLAB_RED_ZONE | \
482 SLAB_POISON | \
483 SLAB_STORE_USER | \
484 SLAB_TRACE | \
485 SLAB_CONSISTENCY_CHECKS | \
486 SLAB_NOLEAKTRACE | \
487 SLAB_RECLAIM_ACCOUNT | \
488 SLAB_TEMPORARY | \
489 SLAB_ACCOUNT | \
490 SLAB_KMALLOC | \
491 SLAB_NO_MERGE | \
492 SLAB_NO_USER_FLAGS)
493
494bool __kmem_cache_empty(struct kmem_cache *);
495int __kmem_cache_shutdown(struct kmem_cache *);
496void __kmem_cache_release(struct kmem_cache *);
497int __kmem_cache_shrink(struct kmem_cache *);
498void slab_kmem_cache_release(struct kmem_cache *);
499
500struct seq_file;
501struct file;
502
503struct slabinfo {
504 unsigned long active_objs;
505 unsigned long num_objs;
506 unsigned long active_slabs;
507 unsigned long num_slabs;
508 unsigned long shared_avail;
509 unsigned int limit;
510 unsigned int batchcount;
511 unsigned int shared;
512 unsigned int objects_per_slab;
513 unsigned int cache_order;
514};
515
516void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
517
518#ifdef CONFIG_SLUB_DEBUG
519#ifdef CONFIG_SLUB_DEBUG_ON
520DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
521#else
522DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
523#endif
524extern void print_tracking(struct kmem_cache *s, void *object);
525long validate_slab_cache(struct kmem_cache *s);
526static inline bool __slub_debug_enabled(void)
527{
528 return static_branch_unlikely(&slub_debug_enabled);
529}
530#else
531static inline void print_tracking(struct kmem_cache *s, void *object)
532{
533}
534static inline bool __slub_debug_enabled(void)
535{
536 return false;
537}
538#endif
539
540/*
541 * Returns true if any of the specified slab_debug flags is enabled for the
542 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
543 * the static key.
544 */
545static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
546{
547 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
548 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
549 if (__slub_debug_enabled())
550 return s->flags & flags;
551 return false;
552}
553
554#if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
555bool slab_in_kunit_test(void);
556#else
557static inline bool slab_in_kunit_test(void) { return false; }
558#endif
559
560#ifdef CONFIG_SLAB_OBJ_EXT
561
562/*
563 * slab_obj_exts - get the pointer to the slab object extension vector
564 * associated with a slab.
565 * @slab: a pointer to the slab struct
566 *
567 * Returns a pointer to the object extension vector associated with the slab,
568 * or NULL if no such vector has been associated yet.
569 */
570static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
571{
572 unsigned long obj_exts = READ_ONCE(slab->obj_exts);
573
574#ifdef CONFIG_MEMCG
575 VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS),
576 slab_page(slab));
577 VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
578#endif
579 return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
580}
581
582int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
583 gfp_t gfp, bool new_slab);
584
585#else /* CONFIG_SLAB_OBJ_EXT */
586
587static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
588{
589 return NULL;
590}
591
592#endif /* CONFIG_SLAB_OBJ_EXT */
593
594static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
595{
596 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
597 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
598}
599
600#ifdef CONFIG_MEMCG
601bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
602 gfp_t flags, size_t size, void **p);
603void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
604 void **p, int objects, struct slabobj_ext *obj_exts);
605#endif
606
607size_t __ksize(const void *objp);
608
609static inline size_t slab_ksize(const struct kmem_cache *s)
610{
611#ifdef CONFIG_SLUB_DEBUG
612 /*
613 * Debugging requires use of the padding between object
614 * and whatever may come after it.
615 */
616 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
617 return s->object_size;
618#endif
619 if (s->flags & SLAB_KASAN)
620 return s->object_size;
621 /*
622 * If we have the need to store the freelist pointer
623 * back there or track user information then we can
624 * only use the space before that information.
625 */
626 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
627 return s->inuse;
628 /*
629 * Else we can use all the padding etc for the allocation
630 */
631 return s->size;
632}
633
634#ifdef CONFIG_SLUB_DEBUG
635void dump_unreclaimable_slab(void);
636#else
637static inline void dump_unreclaimable_slab(void)
638{
639}
640#endif
641
642void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
643
644#ifdef CONFIG_SLAB_FREELIST_RANDOM
645int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
646 gfp_t gfp);
647void cache_random_seq_destroy(struct kmem_cache *cachep);
648#else
649static inline int cache_random_seq_create(struct kmem_cache *cachep,
650 unsigned int count, gfp_t gfp)
651{
652 return 0;
653}
654static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
655#endif /* CONFIG_SLAB_FREELIST_RANDOM */
656
657static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
658{
659 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
660 &init_on_alloc)) {
661 if (c->ctor)
662 return false;
663 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
664 return flags & __GFP_ZERO;
665 return true;
666 }
667 return flags & __GFP_ZERO;
668}
669
670static inline bool slab_want_init_on_free(struct kmem_cache *c)
671{
672 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
673 &init_on_free))
674 return !(c->ctor ||
675 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
676 return false;
677}
678
679#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
680void debugfs_slab_release(struct kmem_cache *);
681#else
682static inline void debugfs_slab_release(struct kmem_cache *s) { }
683#endif
684
685#ifdef CONFIG_PRINTK
686#define KS_ADDRS_COUNT 16
687struct kmem_obj_info {
688 void *kp_ptr;
689 struct slab *kp_slab;
690 void *kp_objp;
691 unsigned long kp_data_offset;
692 struct kmem_cache *kp_slab_cache;
693 void *kp_ret;
694 void *kp_stack[KS_ADDRS_COUNT];
695 void *kp_free_stack[KS_ADDRS_COUNT];
696};
697void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
698#endif
699
700void __check_heap_object(const void *ptr, unsigned long n,
701 const struct slab *slab, bool to_user);
702
703static inline bool slub_debug_orig_size(struct kmem_cache *s)
704{
705 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
706 (s->flags & SLAB_KMALLOC));
707}
708
709#ifdef CONFIG_SLUB_DEBUG
710void skip_orig_size_check(struct kmem_cache *s, const void *object);
711#endif
712
713#endif /* MM_SLAB_H */