Loading...
1/*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
14#include <linux/cpu.h>
15#include <linux/uaccess.h>
16#include <linux/seq_file.h>
17#include <linux/proc_fs.h>
18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/page.h>
21#include <linux/memcontrol.h>
22
23#define CREATE_TRACE_POINTS
24#include <trace/events/kmem.h>
25
26#include "slab.h"
27
28enum slab_state slab_state;
29LIST_HEAD(slab_caches);
30DEFINE_MUTEX(slab_mutex);
31struct kmem_cache *kmem_cache;
32
33/*
34 * Set of flags that will prevent slab merging
35 */
36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB | SLAB_KASAN)
39
40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
41 SLAB_NOTRACK | SLAB_ACCOUNT)
42
43/*
44 * Merge control. If this is set then no merging of slab caches will occur.
45 * (Could be removed. This was introduced to pacify the merge skeptics.)
46 */
47static int slab_nomerge;
48
49static int __init setup_slab_nomerge(char *str)
50{
51 slab_nomerge = 1;
52 return 1;
53}
54
55#ifdef CONFIG_SLUB
56__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57#endif
58
59__setup("slab_nomerge", setup_slab_nomerge);
60
61/*
62 * Determine the size of a slab object
63 */
64unsigned int kmem_cache_size(struct kmem_cache *s)
65{
66 return s->object_size;
67}
68EXPORT_SYMBOL(kmem_cache_size);
69
70#ifdef CONFIG_DEBUG_VM
71static int kmem_cache_sanity_check(const char *name, size_t size)
72{
73 struct kmem_cache *s = NULL;
74
75 if (!name || in_interrupt() || size < sizeof(void *) ||
76 size > KMALLOC_MAX_SIZE) {
77 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
78 return -EINVAL;
79 }
80
81 list_for_each_entry(s, &slab_caches, list) {
82 char tmp;
83 int res;
84
85 /*
86 * This happens when the module gets unloaded and doesn't
87 * destroy its slab cache and no-one else reuses the vmalloc
88 * area of the module. Print a warning.
89 */
90 res = probe_kernel_address(s->name, tmp);
91 if (res) {
92 pr_err("Slab cache with size %d has lost its name\n",
93 s->object_size);
94 continue;
95 }
96 }
97
98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
99 return 0;
100}
101#else
102static inline int kmem_cache_sanity_check(const char *name, size_t size)
103{
104 return 0;
105}
106#endif
107
108void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
109{
110 size_t i;
111
112 for (i = 0; i < nr; i++) {
113 if (s)
114 kmem_cache_free(s, p[i]);
115 else
116 kfree(p[i]);
117 }
118}
119
120int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121 void **p)
122{
123 size_t i;
124
125 for (i = 0; i < nr; i++) {
126 void *x = p[i] = kmem_cache_alloc(s, flags);
127 if (!x) {
128 __kmem_cache_free_bulk(s, i, p);
129 return 0;
130 }
131 }
132 return i;
133}
134
135#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
136void slab_init_memcg_params(struct kmem_cache *s)
137{
138 s->memcg_params.is_root_cache = true;
139 INIT_LIST_HEAD(&s->memcg_params.list);
140 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
141}
142
143static int init_memcg_params(struct kmem_cache *s,
144 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
145{
146 struct memcg_cache_array *arr;
147
148 if (memcg) {
149 s->memcg_params.is_root_cache = false;
150 s->memcg_params.memcg = memcg;
151 s->memcg_params.root_cache = root_cache;
152 return 0;
153 }
154
155 slab_init_memcg_params(s);
156
157 if (!memcg_nr_cache_ids)
158 return 0;
159
160 arr = kzalloc(sizeof(struct memcg_cache_array) +
161 memcg_nr_cache_ids * sizeof(void *),
162 GFP_KERNEL);
163 if (!arr)
164 return -ENOMEM;
165
166 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
167 return 0;
168}
169
170static void destroy_memcg_params(struct kmem_cache *s)
171{
172 if (is_root_cache(s))
173 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
174}
175
176static int update_memcg_params(struct kmem_cache *s, int new_array_size)
177{
178 struct memcg_cache_array *old, *new;
179
180 if (!is_root_cache(s))
181 return 0;
182
183 new = kzalloc(sizeof(struct memcg_cache_array) +
184 new_array_size * sizeof(void *), GFP_KERNEL);
185 if (!new)
186 return -ENOMEM;
187
188 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
189 lockdep_is_held(&slab_mutex));
190 if (old)
191 memcpy(new->entries, old->entries,
192 memcg_nr_cache_ids * sizeof(void *));
193
194 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
195 if (old)
196 kfree_rcu(old, rcu);
197 return 0;
198}
199
200int memcg_update_all_caches(int num_memcgs)
201{
202 struct kmem_cache *s;
203 int ret = 0;
204
205 mutex_lock(&slab_mutex);
206 list_for_each_entry(s, &slab_caches, list) {
207 ret = update_memcg_params(s, num_memcgs);
208 /*
209 * Instead of freeing the memory, we'll just leave the caches
210 * up to this point in an updated state.
211 */
212 if (ret)
213 break;
214 }
215 mutex_unlock(&slab_mutex);
216 return ret;
217}
218#else
219static inline int init_memcg_params(struct kmem_cache *s,
220 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
221{
222 return 0;
223}
224
225static inline void destroy_memcg_params(struct kmem_cache *s)
226{
227}
228#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
229
230/*
231 * Find a mergeable slab cache
232 */
233int slab_unmergeable(struct kmem_cache *s)
234{
235 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
236 return 1;
237
238 if (!is_root_cache(s))
239 return 1;
240
241 if (s->ctor)
242 return 1;
243
244 /*
245 * We may have set a slab to be unmergeable during bootstrap.
246 */
247 if (s->refcount < 0)
248 return 1;
249
250 return 0;
251}
252
253struct kmem_cache *find_mergeable(size_t size, size_t align,
254 unsigned long flags, const char *name, void (*ctor)(void *))
255{
256 struct kmem_cache *s;
257
258 if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
259 return NULL;
260
261 if (ctor)
262 return NULL;
263
264 size = ALIGN(size, sizeof(void *));
265 align = calculate_alignment(flags, align, size);
266 size = ALIGN(size, align);
267 flags = kmem_cache_flags(size, flags, name, NULL);
268
269 list_for_each_entry_reverse(s, &slab_caches, list) {
270 if (slab_unmergeable(s))
271 continue;
272
273 if (size > s->size)
274 continue;
275
276 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
277 continue;
278 /*
279 * Check if alignment is compatible.
280 * Courtesy of Adrian Drzewiecki
281 */
282 if ((s->size & ~(align - 1)) != s->size)
283 continue;
284
285 if (s->size - size >= sizeof(void *))
286 continue;
287
288 if (IS_ENABLED(CONFIG_SLAB) && align &&
289 (align > s->align || s->align % align))
290 continue;
291
292 return s;
293 }
294 return NULL;
295}
296
297/*
298 * Figure out what the alignment of the objects will be given a set of
299 * flags, a user specified alignment and the size of the objects.
300 */
301unsigned long calculate_alignment(unsigned long flags,
302 unsigned long align, unsigned long size)
303{
304 /*
305 * If the user wants hardware cache aligned objects then follow that
306 * suggestion if the object is sufficiently large.
307 *
308 * The hardware cache alignment cannot override the specified
309 * alignment though. If that is greater then use it.
310 */
311 if (flags & SLAB_HWCACHE_ALIGN) {
312 unsigned long ralign = cache_line_size();
313 while (size <= ralign / 2)
314 ralign /= 2;
315 align = max(align, ralign);
316 }
317
318 if (align < ARCH_SLAB_MINALIGN)
319 align = ARCH_SLAB_MINALIGN;
320
321 return ALIGN(align, sizeof(void *));
322}
323
324static struct kmem_cache *create_cache(const char *name,
325 size_t object_size, size_t size, size_t align,
326 unsigned long flags, void (*ctor)(void *),
327 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
328{
329 struct kmem_cache *s;
330 int err;
331
332 err = -ENOMEM;
333 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
334 if (!s)
335 goto out;
336
337 s->name = name;
338 s->object_size = object_size;
339 s->size = size;
340 s->align = align;
341 s->ctor = ctor;
342
343 err = init_memcg_params(s, memcg, root_cache);
344 if (err)
345 goto out_free_cache;
346
347 err = __kmem_cache_create(s, flags);
348 if (err)
349 goto out_free_cache;
350
351 s->refcount = 1;
352 list_add(&s->list, &slab_caches);
353out:
354 if (err)
355 return ERR_PTR(err);
356 return s;
357
358out_free_cache:
359 destroy_memcg_params(s);
360 kmem_cache_free(kmem_cache, s);
361 goto out;
362}
363
364/*
365 * kmem_cache_create - Create a cache.
366 * @name: A string which is used in /proc/slabinfo to identify this cache.
367 * @size: The size of objects to be created in this cache.
368 * @align: The required alignment for the objects.
369 * @flags: SLAB flags
370 * @ctor: A constructor for the objects.
371 *
372 * Returns a ptr to the cache on success, NULL on failure.
373 * Cannot be called within a interrupt, but can be interrupted.
374 * The @ctor is run when new pages are allocated by the cache.
375 *
376 * The flags are
377 *
378 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
379 * to catch references to uninitialised memory.
380 *
381 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
382 * for buffer overruns.
383 *
384 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
385 * cacheline. This can be beneficial if you're counting cycles as closely
386 * as davem.
387 */
388struct kmem_cache *
389kmem_cache_create(const char *name, size_t size, size_t align,
390 unsigned long flags, void (*ctor)(void *))
391{
392 struct kmem_cache *s = NULL;
393 const char *cache_name;
394 int err;
395
396 get_online_cpus();
397 get_online_mems();
398 memcg_get_cache_ids();
399
400 mutex_lock(&slab_mutex);
401
402 err = kmem_cache_sanity_check(name, size);
403 if (err) {
404 goto out_unlock;
405 }
406
407 /*
408 * Some allocators will constraint the set of valid flags to a subset
409 * of all flags. We expect them to define CACHE_CREATE_MASK in this
410 * case, and we'll just provide them with a sanitized version of the
411 * passed flags.
412 */
413 flags &= CACHE_CREATE_MASK;
414
415 s = __kmem_cache_alias(name, size, align, flags, ctor);
416 if (s)
417 goto out_unlock;
418
419 cache_name = kstrdup_const(name, GFP_KERNEL);
420 if (!cache_name) {
421 err = -ENOMEM;
422 goto out_unlock;
423 }
424
425 s = create_cache(cache_name, size, size,
426 calculate_alignment(flags, align, size),
427 flags, ctor, NULL, NULL);
428 if (IS_ERR(s)) {
429 err = PTR_ERR(s);
430 kfree_const(cache_name);
431 }
432
433out_unlock:
434 mutex_unlock(&slab_mutex);
435
436 memcg_put_cache_ids();
437 put_online_mems();
438 put_online_cpus();
439
440 if (err) {
441 if (flags & SLAB_PANIC)
442 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
443 name, err);
444 else {
445 pr_warn("kmem_cache_create(%s) failed with error %d\n",
446 name, err);
447 dump_stack();
448 }
449 return NULL;
450 }
451 return s;
452}
453EXPORT_SYMBOL(kmem_cache_create);
454
455static int shutdown_cache(struct kmem_cache *s,
456 struct list_head *release, bool *need_rcu_barrier)
457{
458 if (__kmem_cache_shutdown(s) != 0)
459 return -EBUSY;
460
461 if (s->flags & SLAB_DESTROY_BY_RCU)
462 *need_rcu_barrier = true;
463
464 list_move(&s->list, release);
465 return 0;
466}
467
468static void release_caches(struct list_head *release, bool need_rcu_barrier)
469{
470 struct kmem_cache *s, *s2;
471
472 if (need_rcu_barrier)
473 rcu_barrier();
474
475 list_for_each_entry_safe(s, s2, release, list) {
476#ifdef SLAB_SUPPORTS_SYSFS
477 sysfs_slab_remove(s);
478#else
479 slab_kmem_cache_release(s);
480#endif
481 }
482}
483
484#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
485/*
486 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
487 * @memcg: The memory cgroup the new cache is for.
488 * @root_cache: The parent of the new cache.
489 *
490 * This function attempts to create a kmem cache that will serve allocation
491 * requests going from @memcg to @root_cache. The new cache inherits properties
492 * from its parent.
493 */
494void memcg_create_kmem_cache(struct mem_cgroup *memcg,
495 struct kmem_cache *root_cache)
496{
497 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
498 struct cgroup_subsys_state *css = &memcg->css;
499 struct memcg_cache_array *arr;
500 struct kmem_cache *s = NULL;
501 char *cache_name;
502 int idx;
503
504 get_online_cpus();
505 get_online_mems();
506
507 mutex_lock(&slab_mutex);
508
509 /*
510 * The memory cgroup could have been offlined while the cache
511 * creation work was pending.
512 */
513 if (memcg->kmem_state != KMEM_ONLINE)
514 goto out_unlock;
515
516 idx = memcg_cache_id(memcg);
517 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
518 lockdep_is_held(&slab_mutex));
519
520 /*
521 * Since per-memcg caches are created asynchronously on first
522 * allocation (see memcg_kmem_get_cache()), several threads can try to
523 * create the same cache, but only one of them may succeed.
524 */
525 if (arr->entries[idx])
526 goto out_unlock;
527
528 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
529 cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
530 css->id, memcg_name_buf);
531 if (!cache_name)
532 goto out_unlock;
533
534 s = create_cache(cache_name, root_cache->object_size,
535 root_cache->size, root_cache->align,
536 root_cache->flags, root_cache->ctor,
537 memcg, root_cache);
538 /*
539 * If we could not create a memcg cache, do not complain, because
540 * that's not critical at all as we can always proceed with the root
541 * cache.
542 */
543 if (IS_ERR(s)) {
544 kfree(cache_name);
545 goto out_unlock;
546 }
547
548 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
549
550 /*
551 * Since readers won't lock (see cache_from_memcg_idx()), we need a
552 * barrier here to ensure nobody will see the kmem_cache partially
553 * initialized.
554 */
555 smp_wmb();
556 arr->entries[idx] = s;
557
558out_unlock:
559 mutex_unlock(&slab_mutex);
560
561 put_online_mems();
562 put_online_cpus();
563}
564
565void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
566{
567 int idx;
568 struct memcg_cache_array *arr;
569 struct kmem_cache *s, *c;
570
571 idx = memcg_cache_id(memcg);
572
573 get_online_cpus();
574 get_online_mems();
575
576 mutex_lock(&slab_mutex);
577 list_for_each_entry(s, &slab_caches, list) {
578 if (!is_root_cache(s))
579 continue;
580
581 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
582 lockdep_is_held(&slab_mutex));
583 c = arr->entries[idx];
584 if (!c)
585 continue;
586
587 __kmem_cache_shrink(c, true);
588 arr->entries[idx] = NULL;
589 }
590 mutex_unlock(&slab_mutex);
591
592 put_online_mems();
593 put_online_cpus();
594}
595
596static int __shutdown_memcg_cache(struct kmem_cache *s,
597 struct list_head *release, bool *need_rcu_barrier)
598{
599 BUG_ON(is_root_cache(s));
600
601 if (shutdown_cache(s, release, need_rcu_barrier))
602 return -EBUSY;
603
604 list_del(&s->memcg_params.list);
605 return 0;
606}
607
608void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
609{
610 LIST_HEAD(release);
611 bool need_rcu_barrier = false;
612 struct kmem_cache *s, *s2;
613
614 get_online_cpus();
615 get_online_mems();
616
617 mutex_lock(&slab_mutex);
618 list_for_each_entry_safe(s, s2, &slab_caches, list) {
619 if (is_root_cache(s) || s->memcg_params.memcg != memcg)
620 continue;
621 /*
622 * The cgroup is about to be freed and therefore has no charges
623 * left. Hence, all its caches must be empty by now.
624 */
625 BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
626 }
627 mutex_unlock(&slab_mutex);
628
629 put_online_mems();
630 put_online_cpus();
631
632 release_caches(&release, need_rcu_barrier);
633}
634
635static int shutdown_memcg_caches(struct kmem_cache *s,
636 struct list_head *release, bool *need_rcu_barrier)
637{
638 struct memcg_cache_array *arr;
639 struct kmem_cache *c, *c2;
640 LIST_HEAD(busy);
641 int i;
642
643 BUG_ON(!is_root_cache(s));
644
645 /*
646 * First, shutdown active caches, i.e. caches that belong to online
647 * memory cgroups.
648 */
649 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
650 lockdep_is_held(&slab_mutex));
651 for_each_memcg_cache_index(i) {
652 c = arr->entries[i];
653 if (!c)
654 continue;
655 if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
656 /*
657 * The cache still has objects. Move it to a temporary
658 * list so as not to try to destroy it for a second
659 * time while iterating over inactive caches below.
660 */
661 list_move(&c->memcg_params.list, &busy);
662 else
663 /*
664 * The cache is empty and will be destroyed soon. Clear
665 * the pointer to it in the memcg_caches array so that
666 * it will never be accessed even if the root cache
667 * stays alive.
668 */
669 arr->entries[i] = NULL;
670 }
671
672 /*
673 * Second, shutdown all caches left from memory cgroups that are now
674 * offline.
675 */
676 list_for_each_entry_safe(c, c2, &s->memcg_params.list,
677 memcg_params.list)
678 __shutdown_memcg_cache(c, release, need_rcu_barrier);
679
680 list_splice(&busy, &s->memcg_params.list);
681
682 /*
683 * A cache being destroyed must be empty. In particular, this means
684 * that all per memcg caches attached to it must be empty too.
685 */
686 if (!list_empty(&s->memcg_params.list))
687 return -EBUSY;
688 return 0;
689}
690#else
691static inline int shutdown_memcg_caches(struct kmem_cache *s,
692 struct list_head *release, bool *need_rcu_barrier)
693{
694 return 0;
695}
696#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
697
698void slab_kmem_cache_release(struct kmem_cache *s)
699{
700 __kmem_cache_release(s);
701 destroy_memcg_params(s);
702 kfree_const(s->name);
703 kmem_cache_free(kmem_cache, s);
704}
705
706void kmem_cache_destroy(struct kmem_cache *s)
707{
708 LIST_HEAD(release);
709 bool need_rcu_barrier = false;
710 int err;
711
712 if (unlikely(!s))
713 return;
714
715 get_online_cpus();
716 get_online_mems();
717
718 mutex_lock(&slab_mutex);
719
720 s->refcount--;
721 if (s->refcount)
722 goto out_unlock;
723
724 err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
725 if (!err)
726 err = shutdown_cache(s, &release, &need_rcu_barrier);
727
728 if (err) {
729 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
730 s->name);
731 dump_stack();
732 }
733out_unlock:
734 mutex_unlock(&slab_mutex);
735
736 put_online_mems();
737 put_online_cpus();
738
739 release_caches(&release, need_rcu_barrier);
740}
741EXPORT_SYMBOL(kmem_cache_destroy);
742
743/**
744 * kmem_cache_shrink - Shrink a cache.
745 * @cachep: The cache to shrink.
746 *
747 * Releases as many slabs as possible for a cache.
748 * To help debugging, a zero exit status indicates all slabs were released.
749 */
750int kmem_cache_shrink(struct kmem_cache *cachep)
751{
752 int ret;
753
754 get_online_cpus();
755 get_online_mems();
756 ret = __kmem_cache_shrink(cachep, false);
757 put_online_mems();
758 put_online_cpus();
759 return ret;
760}
761EXPORT_SYMBOL(kmem_cache_shrink);
762
763bool slab_is_available(void)
764{
765 return slab_state >= UP;
766}
767
768#ifndef CONFIG_SLOB
769/* Create a cache during boot when no slab services are available yet */
770void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
771 unsigned long flags)
772{
773 int err;
774
775 s->name = name;
776 s->size = s->object_size = size;
777 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
778
779 slab_init_memcg_params(s);
780
781 err = __kmem_cache_create(s, flags);
782
783 if (err)
784 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
785 name, size, err);
786
787 s->refcount = -1; /* Exempt from merging for now */
788}
789
790struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
791 unsigned long flags)
792{
793 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
794
795 if (!s)
796 panic("Out of memory when creating slab %s\n", name);
797
798 create_boot_cache(s, name, size, flags);
799 list_add(&s->list, &slab_caches);
800 s->refcount = 1;
801 return s;
802}
803
804struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
805EXPORT_SYMBOL(kmalloc_caches);
806
807#ifdef CONFIG_ZONE_DMA
808struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
809EXPORT_SYMBOL(kmalloc_dma_caches);
810#endif
811
812/*
813 * Conversion table for small slabs sizes / 8 to the index in the
814 * kmalloc array. This is necessary for slabs < 192 since we have non power
815 * of two cache sizes there. The size of larger slabs can be determined using
816 * fls.
817 */
818static s8 size_index[24] = {
819 3, /* 8 */
820 4, /* 16 */
821 5, /* 24 */
822 5, /* 32 */
823 6, /* 40 */
824 6, /* 48 */
825 6, /* 56 */
826 6, /* 64 */
827 1, /* 72 */
828 1, /* 80 */
829 1, /* 88 */
830 1, /* 96 */
831 7, /* 104 */
832 7, /* 112 */
833 7, /* 120 */
834 7, /* 128 */
835 2, /* 136 */
836 2, /* 144 */
837 2, /* 152 */
838 2, /* 160 */
839 2, /* 168 */
840 2, /* 176 */
841 2, /* 184 */
842 2 /* 192 */
843};
844
845static inline int size_index_elem(size_t bytes)
846{
847 return (bytes - 1) / 8;
848}
849
850/*
851 * Find the kmem_cache structure that serves a given size of
852 * allocation
853 */
854struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
855{
856 int index;
857
858 if (unlikely(size > KMALLOC_MAX_SIZE)) {
859 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
860 return NULL;
861 }
862
863 if (size <= 192) {
864 if (!size)
865 return ZERO_SIZE_PTR;
866
867 index = size_index[size_index_elem(size)];
868 } else
869 index = fls(size - 1);
870
871#ifdef CONFIG_ZONE_DMA
872 if (unlikely((flags & GFP_DMA)))
873 return kmalloc_dma_caches[index];
874
875#endif
876 return kmalloc_caches[index];
877}
878
879/*
880 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
881 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
882 * kmalloc-67108864.
883 */
884static struct {
885 const char *name;
886 unsigned long size;
887} const kmalloc_info[] __initconst = {
888 {NULL, 0}, {"kmalloc-96", 96},
889 {"kmalloc-192", 192}, {"kmalloc-8", 8},
890 {"kmalloc-16", 16}, {"kmalloc-32", 32},
891 {"kmalloc-64", 64}, {"kmalloc-128", 128},
892 {"kmalloc-256", 256}, {"kmalloc-512", 512},
893 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
894 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
895 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
896 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
897 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
898 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
899 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
900 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
901 {"kmalloc-67108864", 67108864}
902};
903
904/*
905 * Patch up the size_index table if we have strange large alignment
906 * requirements for the kmalloc array. This is only the case for
907 * MIPS it seems. The standard arches will not generate any code here.
908 *
909 * Largest permitted alignment is 256 bytes due to the way we
910 * handle the index determination for the smaller caches.
911 *
912 * Make sure that nothing crazy happens if someone starts tinkering
913 * around with ARCH_KMALLOC_MINALIGN
914 */
915void __init setup_kmalloc_cache_index_table(void)
916{
917 int i;
918
919 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
920 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
921
922 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
923 int elem = size_index_elem(i);
924
925 if (elem >= ARRAY_SIZE(size_index))
926 break;
927 size_index[elem] = KMALLOC_SHIFT_LOW;
928 }
929
930 if (KMALLOC_MIN_SIZE >= 64) {
931 /*
932 * The 96 byte size cache is not used if the alignment
933 * is 64 byte.
934 */
935 for (i = 64 + 8; i <= 96; i += 8)
936 size_index[size_index_elem(i)] = 7;
937
938 }
939
940 if (KMALLOC_MIN_SIZE >= 128) {
941 /*
942 * The 192 byte sized cache is not used if the alignment
943 * is 128 byte. Redirect kmalloc to use the 256 byte cache
944 * instead.
945 */
946 for (i = 128 + 8; i <= 192; i += 8)
947 size_index[size_index_elem(i)] = 8;
948 }
949}
950
951static void __init new_kmalloc_cache(int idx, unsigned long flags)
952{
953 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
954 kmalloc_info[idx].size, flags);
955}
956
957/*
958 * Create the kmalloc array. Some of the regular kmalloc arrays
959 * may already have been created because they were needed to
960 * enable allocations for slab creation.
961 */
962void __init create_kmalloc_caches(unsigned long flags)
963{
964 int i;
965
966 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
967 if (!kmalloc_caches[i])
968 new_kmalloc_cache(i, flags);
969
970 /*
971 * Caches that are not of the two-to-the-power-of size.
972 * These have to be created immediately after the
973 * earlier power of two caches
974 */
975 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
976 new_kmalloc_cache(1, flags);
977 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
978 new_kmalloc_cache(2, flags);
979 }
980
981 /* Kmalloc array is now usable */
982 slab_state = UP;
983
984#ifdef CONFIG_ZONE_DMA
985 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
986 struct kmem_cache *s = kmalloc_caches[i];
987
988 if (s) {
989 int size = kmalloc_size(i);
990 char *n = kasprintf(GFP_NOWAIT,
991 "dma-kmalloc-%d", size);
992
993 BUG_ON(!n);
994 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
995 size, SLAB_CACHE_DMA | flags);
996 }
997 }
998#endif
999}
1000#endif /* !CONFIG_SLOB */
1001
1002/*
1003 * To avoid unnecessary overhead, we pass through large allocation requests
1004 * directly to the page allocator. We use __GFP_COMP, because we will need to
1005 * know the allocation order to free the pages properly in kfree.
1006 */
1007void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1008{
1009 void *ret;
1010 struct page *page;
1011
1012 flags |= __GFP_COMP;
1013 page = alloc_kmem_pages(flags, order);
1014 ret = page ? page_address(page) : NULL;
1015 kmemleak_alloc(ret, size, 1, flags);
1016 kasan_kmalloc_large(ret, size, flags);
1017 return ret;
1018}
1019EXPORT_SYMBOL(kmalloc_order);
1020
1021#ifdef CONFIG_TRACING
1022void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1023{
1024 void *ret = kmalloc_order(size, flags, order);
1025 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1026 return ret;
1027}
1028EXPORT_SYMBOL(kmalloc_order_trace);
1029#endif
1030
1031#ifdef CONFIG_SLABINFO
1032
1033#ifdef CONFIG_SLAB
1034#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1035#else
1036#define SLABINFO_RIGHTS S_IRUSR
1037#endif
1038
1039static void print_slabinfo_header(struct seq_file *m)
1040{
1041 /*
1042 * Output format version, so at least we can change it
1043 * without _too_ many complaints.
1044 */
1045#ifdef CONFIG_DEBUG_SLAB
1046 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1047#else
1048 seq_puts(m, "slabinfo - version: 2.1\n");
1049#endif
1050 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1051 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1052 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1053#ifdef CONFIG_DEBUG_SLAB
1054 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1055 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1056#endif
1057 seq_putc(m, '\n');
1058}
1059
1060void *slab_start(struct seq_file *m, loff_t *pos)
1061{
1062 mutex_lock(&slab_mutex);
1063 return seq_list_start(&slab_caches, *pos);
1064}
1065
1066void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1067{
1068 return seq_list_next(p, &slab_caches, pos);
1069}
1070
1071void slab_stop(struct seq_file *m, void *p)
1072{
1073 mutex_unlock(&slab_mutex);
1074}
1075
1076static void
1077memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1078{
1079 struct kmem_cache *c;
1080 struct slabinfo sinfo;
1081
1082 if (!is_root_cache(s))
1083 return;
1084
1085 for_each_memcg_cache(c, s) {
1086 memset(&sinfo, 0, sizeof(sinfo));
1087 get_slabinfo(c, &sinfo);
1088
1089 info->active_slabs += sinfo.active_slabs;
1090 info->num_slabs += sinfo.num_slabs;
1091 info->shared_avail += sinfo.shared_avail;
1092 info->active_objs += sinfo.active_objs;
1093 info->num_objs += sinfo.num_objs;
1094 }
1095}
1096
1097static void cache_show(struct kmem_cache *s, struct seq_file *m)
1098{
1099 struct slabinfo sinfo;
1100
1101 memset(&sinfo, 0, sizeof(sinfo));
1102 get_slabinfo(s, &sinfo);
1103
1104 memcg_accumulate_slabinfo(s, &sinfo);
1105
1106 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1107 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1108 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1109
1110 seq_printf(m, " : tunables %4u %4u %4u",
1111 sinfo.limit, sinfo.batchcount, sinfo.shared);
1112 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1113 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1114 slabinfo_show_stats(m, s);
1115 seq_putc(m, '\n');
1116}
1117
1118static int slab_show(struct seq_file *m, void *p)
1119{
1120 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1121
1122 if (p == slab_caches.next)
1123 print_slabinfo_header(m);
1124 if (is_root_cache(s))
1125 cache_show(s, m);
1126 return 0;
1127}
1128
1129#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
1130int memcg_slab_show(struct seq_file *m, void *p)
1131{
1132 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1134
1135 if (p == slab_caches.next)
1136 print_slabinfo_header(m);
1137 if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
1138 cache_show(s, m);
1139 return 0;
1140}
1141#endif
1142
1143/*
1144 * slabinfo_op - iterator that generates /proc/slabinfo
1145 *
1146 * Output layout:
1147 * cache-name
1148 * num-active-objs
1149 * total-objs
1150 * object size
1151 * num-active-slabs
1152 * total-slabs
1153 * num-pages-per-slab
1154 * + further values on SMP and with statistics enabled
1155 */
1156static const struct seq_operations slabinfo_op = {
1157 .start = slab_start,
1158 .next = slab_next,
1159 .stop = slab_stop,
1160 .show = slab_show,
1161};
1162
1163static int slabinfo_open(struct inode *inode, struct file *file)
1164{
1165 return seq_open(file, &slabinfo_op);
1166}
1167
1168static const struct file_operations proc_slabinfo_operations = {
1169 .open = slabinfo_open,
1170 .read = seq_read,
1171 .write = slabinfo_write,
1172 .llseek = seq_lseek,
1173 .release = seq_release,
1174};
1175
1176static int __init slab_proc_init(void)
1177{
1178 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1179 &proc_slabinfo_operations);
1180 return 0;
1181}
1182module_init(slab_proc_init);
1183#endif /* CONFIG_SLABINFO */
1184
1185static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1186 gfp_t flags)
1187{
1188 void *ret;
1189 size_t ks = 0;
1190
1191 if (p)
1192 ks = ksize(p);
1193
1194 if (ks >= new_size) {
1195 kasan_krealloc((void *)p, new_size, flags);
1196 return (void *)p;
1197 }
1198
1199 ret = kmalloc_track_caller(new_size, flags);
1200 if (ret && p)
1201 memcpy(ret, p, ks);
1202
1203 return ret;
1204}
1205
1206/**
1207 * __krealloc - like krealloc() but don't free @p.
1208 * @p: object to reallocate memory for.
1209 * @new_size: how many bytes of memory are required.
1210 * @flags: the type of memory to allocate.
1211 *
1212 * This function is like krealloc() except it never frees the originally
1213 * allocated buffer. Use this if you don't want to free the buffer immediately
1214 * like, for example, with RCU.
1215 */
1216void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1217{
1218 if (unlikely(!new_size))
1219 return ZERO_SIZE_PTR;
1220
1221 return __do_krealloc(p, new_size, flags);
1222
1223}
1224EXPORT_SYMBOL(__krealloc);
1225
1226/**
1227 * krealloc - reallocate memory. The contents will remain unchanged.
1228 * @p: object to reallocate memory for.
1229 * @new_size: how many bytes of memory are required.
1230 * @flags: the type of memory to allocate.
1231 *
1232 * The contents of the object pointed to are preserved up to the
1233 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1234 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1235 * %NULL pointer, the object pointed to is freed.
1236 */
1237void *krealloc(const void *p, size_t new_size, gfp_t flags)
1238{
1239 void *ret;
1240
1241 if (unlikely(!new_size)) {
1242 kfree(p);
1243 return ZERO_SIZE_PTR;
1244 }
1245
1246 ret = __do_krealloc(p, new_size, flags);
1247 if (ret && p != ret)
1248 kfree(p);
1249
1250 return ret;
1251}
1252EXPORT_SYMBOL(krealloc);
1253
1254/**
1255 * kzfree - like kfree but zero memory
1256 * @p: object to free memory of
1257 *
1258 * The memory of the object @p points to is zeroed before freed.
1259 * If @p is %NULL, kzfree() does nothing.
1260 *
1261 * Note: this function zeroes the whole allocated buffer which can be a good
1262 * deal bigger than the requested buffer size passed to kmalloc(). So be
1263 * careful when using this function in performance sensitive code.
1264 */
1265void kzfree(const void *p)
1266{
1267 size_t ks;
1268 void *mem = (void *)p;
1269
1270 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1271 return;
1272 ks = ksize(mem);
1273 memset(mem, 0, ks);
1274 kfree(mem);
1275}
1276EXPORT_SYMBOL(kzfree);
1277
1278/* Tracepoints definitions. */
1279EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1280EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1281EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1282EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1283EXPORT_TRACEPOINT_SYMBOL(kfree);
1284EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Slab allocator functions that are independent of the allocator strategy
4 *
5 * (C) 2012 Christoph Lameter <cl@linux.com>
6 */
7#include <linux/slab.h>
8
9#include <linux/mm.h>
10#include <linux/poison.h>
11#include <linux/interrupt.h>
12#include <linux/memory.h>
13#include <linux/cache.h>
14#include <linux/compiler.h>
15#include <linux/kfence.h>
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/uaccess.h>
19#include <linux/seq_file.h>
20#include <linux/dma-mapping.h>
21#include <linux/swiotlb.h>
22#include <linux/proc_fs.h>
23#include <linux/debugfs.h>
24#include <linux/kmemleak.h>
25#include <linux/kasan.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28#include <asm/page.h>
29#include <linux/memcontrol.h>
30#include <linux/stackdepot.h>
31
32#include "internal.h"
33#include "slab.h"
34
35#define CREATE_TRACE_POINTS
36#include <trace/events/kmem.h>
37
38enum slab_state slab_state;
39LIST_HEAD(slab_caches);
40DEFINE_MUTEX(slab_mutex);
41struct kmem_cache *kmem_cache;
42
43static LIST_HEAD(slab_caches_to_rcu_destroy);
44static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
45static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
46 slab_caches_to_rcu_destroy_workfn);
47
48/*
49 * Set of flags that will prevent slab merging
50 */
51#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
52 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
53 SLAB_FAILSLAB | SLAB_NO_MERGE)
54
55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
57
58/*
59 * Merge control. If this is set then no merging of slab caches will occur.
60 */
61static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
62
63static int __init setup_slab_nomerge(char *str)
64{
65 slab_nomerge = true;
66 return 1;
67}
68
69static int __init setup_slab_merge(char *str)
70{
71 slab_nomerge = false;
72 return 1;
73}
74
75__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
76__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
77
78__setup("slab_nomerge", setup_slab_nomerge);
79__setup("slab_merge", setup_slab_merge);
80
81/*
82 * Determine the size of a slab object
83 */
84unsigned int kmem_cache_size(struct kmem_cache *s)
85{
86 return s->object_size;
87}
88EXPORT_SYMBOL(kmem_cache_size);
89
90#ifdef CONFIG_DEBUG_VM
91static int kmem_cache_sanity_check(const char *name, unsigned int size)
92{
93 if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
94 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
95 return -EINVAL;
96 }
97
98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
99 return 0;
100}
101#else
102static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
103{
104 return 0;
105}
106#endif
107
108/*
109 * Figure out what the alignment of the objects will be given a set of
110 * flags, a user specified alignment and the size of the objects.
111 */
112static unsigned int calculate_alignment(slab_flags_t flags,
113 unsigned int align, unsigned int size)
114{
115 /*
116 * If the user wants hardware cache aligned objects then follow that
117 * suggestion if the object is sufficiently large.
118 *
119 * The hardware cache alignment cannot override the specified
120 * alignment though. If that is greater then use it.
121 */
122 if (flags & SLAB_HWCACHE_ALIGN) {
123 unsigned int ralign;
124
125 ralign = cache_line_size();
126 while (size <= ralign / 2)
127 ralign /= 2;
128 align = max(align, ralign);
129 }
130
131 align = max(align, arch_slab_minalign());
132
133 return ALIGN(align, sizeof(void *));
134}
135
136/*
137 * Find a mergeable slab cache
138 */
139int slab_unmergeable(struct kmem_cache *s)
140{
141 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
142 return 1;
143
144 if (s->ctor)
145 return 1;
146
147#ifdef CONFIG_HARDENED_USERCOPY
148 if (s->usersize)
149 return 1;
150#endif
151
152 /*
153 * We may have set a slab to be unmergeable during bootstrap.
154 */
155 if (s->refcount < 0)
156 return 1;
157
158 return 0;
159}
160
161struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
162 slab_flags_t flags, const char *name, void (*ctor)(void *))
163{
164 struct kmem_cache *s;
165
166 if (slab_nomerge)
167 return NULL;
168
169 if (ctor)
170 return NULL;
171
172 size = ALIGN(size, sizeof(void *));
173 align = calculate_alignment(flags, align, size);
174 size = ALIGN(size, align);
175 flags = kmem_cache_flags(flags, name);
176
177 if (flags & SLAB_NEVER_MERGE)
178 return NULL;
179
180 list_for_each_entry_reverse(s, &slab_caches, list) {
181 if (slab_unmergeable(s))
182 continue;
183
184 if (size > s->size)
185 continue;
186
187 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
188 continue;
189 /*
190 * Check if alignment is compatible.
191 * Courtesy of Adrian Drzewiecki
192 */
193 if ((s->size & ~(align - 1)) != s->size)
194 continue;
195
196 if (s->size - size >= sizeof(void *))
197 continue;
198
199 return s;
200 }
201 return NULL;
202}
203
204static struct kmem_cache *create_cache(const char *name,
205 unsigned int object_size, unsigned int align,
206 slab_flags_t flags, unsigned int useroffset,
207 unsigned int usersize, void (*ctor)(void *),
208 struct kmem_cache *root_cache)
209{
210 struct kmem_cache *s;
211 int err;
212
213 if (WARN_ON(useroffset + usersize > object_size))
214 useroffset = usersize = 0;
215
216 err = -ENOMEM;
217 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
218 if (!s)
219 goto out;
220
221 s->name = name;
222 s->size = s->object_size = object_size;
223 s->align = align;
224 s->ctor = ctor;
225#ifdef CONFIG_HARDENED_USERCOPY
226 s->useroffset = useroffset;
227 s->usersize = usersize;
228#endif
229
230 err = __kmem_cache_create(s, flags);
231 if (err)
232 goto out_free_cache;
233
234 s->refcount = 1;
235 list_add(&s->list, &slab_caches);
236 return s;
237
238out_free_cache:
239 kmem_cache_free(kmem_cache, s);
240out:
241 return ERR_PTR(err);
242}
243
244/**
245 * kmem_cache_create_usercopy - Create a cache with a region suitable
246 * for copying to userspace
247 * @name: A string which is used in /proc/slabinfo to identify this cache.
248 * @size: The size of objects to be created in this cache.
249 * @align: The required alignment for the objects.
250 * @flags: SLAB flags
251 * @useroffset: Usercopy region offset
252 * @usersize: Usercopy region size
253 * @ctor: A constructor for the objects.
254 *
255 * Cannot be called within a interrupt, but can be interrupted.
256 * The @ctor is run when new pages are allocated by the cache.
257 *
258 * The flags are
259 *
260 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
261 * to catch references to uninitialised memory.
262 *
263 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
264 * for buffer overruns.
265 *
266 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
267 * cacheline. This can be beneficial if you're counting cycles as closely
268 * as davem.
269 *
270 * Return: a pointer to the cache on success, NULL on failure.
271 */
272struct kmem_cache *
273kmem_cache_create_usercopy(const char *name,
274 unsigned int size, unsigned int align,
275 slab_flags_t flags,
276 unsigned int useroffset, unsigned int usersize,
277 void (*ctor)(void *))
278{
279 struct kmem_cache *s = NULL;
280 const char *cache_name;
281 int err;
282
283#ifdef CONFIG_SLUB_DEBUG
284 /*
285 * If no slab_debug was enabled globally, the static key is not yet
286 * enabled by setup_slub_debug(). Enable it if the cache is being
287 * created with any of the debugging flags passed explicitly.
288 * It's also possible that this is the first cache created with
289 * SLAB_STORE_USER and we should init stack_depot for it.
290 */
291 if (flags & SLAB_DEBUG_FLAGS)
292 static_branch_enable(&slub_debug_enabled);
293 if (flags & SLAB_STORE_USER)
294 stack_depot_init();
295#endif
296
297 mutex_lock(&slab_mutex);
298
299 err = kmem_cache_sanity_check(name, size);
300 if (err) {
301 goto out_unlock;
302 }
303
304 /* Refuse requests with allocator specific flags */
305 if (flags & ~SLAB_FLAGS_PERMITTED) {
306 err = -EINVAL;
307 goto out_unlock;
308 }
309
310 /*
311 * Some allocators will constraint the set of valid flags to a subset
312 * of all flags. We expect them to define CACHE_CREATE_MASK in this
313 * case, and we'll just provide them with a sanitized version of the
314 * passed flags.
315 */
316 flags &= CACHE_CREATE_MASK;
317
318 /* Fail closed on bad usersize of useroffset values. */
319 if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
320 WARN_ON(!usersize && useroffset) ||
321 WARN_ON(size < usersize || size - usersize < useroffset))
322 usersize = useroffset = 0;
323
324 if (!usersize)
325 s = __kmem_cache_alias(name, size, align, flags, ctor);
326 if (s)
327 goto out_unlock;
328
329 cache_name = kstrdup_const(name, GFP_KERNEL);
330 if (!cache_name) {
331 err = -ENOMEM;
332 goto out_unlock;
333 }
334
335 s = create_cache(cache_name, size,
336 calculate_alignment(flags, align, size),
337 flags, useroffset, usersize, ctor, NULL);
338 if (IS_ERR(s)) {
339 err = PTR_ERR(s);
340 kfree_const(cache_name);
341 }
342
343out_unlock:
344 mutex_unlock(&slab_mutex);
345
346 if (err) {
347 if (flags & SLAB_PANIC)
348 panic("%s: Failed to create slab '%s'. Error %d\n",
349 __func__, name, err);
350 else {
351 pr_warn("%s(%s) failed with error %d\n",
352 __func__, name, err);
353 dump_stack();
354 }
355 return NULL;
356 }
357 return s;
358}
359EXPORT_SYMBOL(kmem_cache_create_usercopy);
360
361/**
362 * kmem_cache_create - Create a cache.
363 * @name: A string which is used in /proc/slabinfo to identify this cache.
364 * @size: The size of objects to be created in this cache.
365 * @align: The required alignment for the objects.
366 * @flags: SLAB flags
367 * @ctor: A constructor for the objects.
368 *
369 * Cannot be called within a interrupt, but can be interrupted.
370 * The @ctor is run when new pages are allocated by the cache.
371 *
372 * The flags are
373 *
374 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
375 * to catch references to uninitialised memory.
376 *
377 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
378 * for buffer overruns.
379 *
380 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
381 * cacheline. This can be beneficial if you're counting cycles as closely
382 * as davem.
383 *
384 * Return: a pointer to the cache on success, NULL on failure.
385 */
386struct kmem_cache *
387kmem_cache_create(const char *name, unsigned int size, unsigned int align,
388 slab_flags_t flags, void (*ctor)(void *))
389{
390 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
391 ctor);
392}
393EXPORT_SYMBOL(kmem_cache_create);
394
395#ifdef SLAB_SUPPORTS_SYSFS
396/*
397 * For a given kmem_cache, kmem_cache_destroy() should only be called
398 * once or there will be a use-after-free problem. The actual deletion
399 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
400 * protection. So they are now done without holding those locks.
401 *
402 * Note that there will be a slight delay in the deletion of sysfs files
403 * if kmem_cache_release() is called indrectly from a work function.
404 */
405static void kmem_cache_release(struct kmem_cache *s)
406{
407 if (slab_state >= FULL) {
408 sysfs_slab_unlink(s);
409 sysfs_slab_release(s);
410 } else {
411 slab_kmem_cache_release(s);
412 }
413}
414#else
415static void kmem_cache_release(struct kmem_cache *s)
416{
417 slab_kmem_cache_release(s);
418}
419#endif
420
421static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
422{
423 LIST_HEAD(to_destroy);
424 struct kmem_cache *s, *s2;
425
426 /*
427 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
428 * @slab_caches_to_rcu_destroy list. The slab pages are freed
429 * through RCU and the associated kmem_cache are dereferenced
430 * while freeing the pages, so the kmem_caches should be freed only
431 * after the pending RCU operations are finished. As rcu_barrier()
432 * is a pretty slow operation, we batch all pending destructions
433 * asynchronously.
434 */
435 mutex_lock(&slab_mutex);
436 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
437 mutex_unlock(&slab_mutex);
438
439 if (list_empty(&to_destroy))
440 return;
441
442 rcu_barrier();
443
444 list_for_each_entry_safe(s, s2, &to_destroy, list) {
445 debugfs_slab_release(s);
446 kfence_shutdown_cache(s);
447 kmem_cache_release(s);
448 }
449}
450
451static int shutdown_cache(struct kmem_cache *s)
452{
453 /* free asan quarantined objects */
454 kasan_cache_shutdown(s);
455
456 if (__kmem_cache_shutdown(s) != 0)
457 return -EBUSY;
458
459 list_del(&s->list);
460
461 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
462 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
463 schedule_work(&slab_caches_to_rcu_destroy_work);
464 } else {
465 kfence_shutdown_cache(s);
466 debugfs_slab_release(s);
467 }
468
469 return 0;
470}
471
472void slab_kmem_cache_release(struct kmem_cache *s)
473{
474 __kmem_cache_release(s);
475 kfree_const(s->name);
476 kmem_cache_free(kmem_cache, s);
477}
478
479void kmem_cache_destroy(struct kmem_cache *s)
480{
481 int err = -EBUSY;
482 bool rcu_set;
483
484 if (unlikely(!s) || !kasan_check_byte(s))
485 return;
486
487 cpus_read_lock();
488 mutex_lock(&slab_mutex);
489
490 rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
491
492 s->refcount--;
493 if (s->refcount)
494 goto out_unlock;
495
496 err = shutdown_cache(s);
497 WARN(err, "%s %s: Slab cache still has objects when called from %pS",
498 __func__, s->name, (void *)_RET_IP_);
499out_unlock:
500 mutex_unlock(&slab_mutex);
501 cpus_read_unlock();
502 if (!err && !rcu_set)
503 kmem_cache_release(s);
504}
505EXPORT_SYMBOL(kmem_cache_destroy);
506
507/**
508 * kmem_cache_shrink - Shrink a cache.
509 * @cachep: The cache to shrink.
510 *
511 * Releases as many slabs as possible for a cache.
512 * To help debugging, a zero exit status indicates all slabs were released.
513 *
514 * Return: %0 if all slabs were released, non-zero otherwise
515 */
516int kmem_cache_shrink(struct kmem_cache *cachep)
517{
518 kasan_cache_shrink(cachep);
519
520 return __kmem_cache_shrink(cachep);
521}
522EXPORT_SYMBOL(kmem_cache_shrink);
523
524bool slab_is_available(void)
525{
526 return slab_state >= UP;
527}
528
529#ifdef CONFIG_PRINTK
530static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
531{
532 if (__kfence_obj_info(kpp, object, slab))
533 return;
534 __kmem_obj_info(kpp, object, slab);
535}
536
537/**
538 * kmem_dump_obj - Print available slab provenance information
539 * @object: slab object for which to find provenance information.
540 *
541 * This function uses pr_cont(), so that the caller is expected to have
542 * printed out whatever preamble is appropriate. The provenance information
543 * depends on the type of object and on how much debugging is enabled.
544 * For a slab-cache object, the fact that it is a slab object is printed,
545 * and, if available, the slab name, return address, and stack trace from
546 * the allocation and last free path of that object.
547 *
548 * Return: %true if the pointer is to a not-yet-freed object from
549 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
550 * is to an already-freed object, and %false otherwise.
551 */
552bool kmem_dump_obj(void *object)
553{
554 char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
555 int i;
556 struct slab *slab;
557 unsigned long ptroffset;
558 struct kmem_obj_info kp = { };
559
560 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
561 if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
562 return false;
563 slab = virt_to_slab(object);
564 if (!slab)
565 return false;
566
567 kmem_obj_info(&kp, object, slab);
568 if (kp.kp_slab_cache)
569 pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
570 else
571 pr_cont(" slab%s", cp);
572 if (is_kfence_address(object))
573 pr_cont(" (kfence)");
574 if (kp.kp_objp)
575 pr_cont(" start %px", kp.kp_objp);
576 if (kp.kp_data_offset)
577 pr_cont(" data offset %lu", kp.kp_data_offset);
578 if (kp.kp_objp) {
579 ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
580 pr_cont(" pointer offset %lu", ptroffset);
581 }
582 if (kp.kp_slab_cache && kp.kp_slab_cache->object_size)
583 pr_cont(" size %u", kp.kp_slab_cache->object_size);
584 if (kp.kp_ret)
585 pr_cont(" allocated at %pS\n", kp.kp_ret);
586 else
587 pr_cont("\n");
588 for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
589 if (!kp.kp_stack[i])
590 break;
591 pr_info(" %pS\n", kp.kp_stack[i]);
592 }
593
594 if (kp.kp_free_stack[0])
595 pr_cont(" Free path:\n");
596
597 for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
598 if (!kp.kp_free_stack[i])
599 break;
600 pr_info(" %pS\n", kp.kp_free_stack[i]);
601 }
602
603 return true;
604}
605EXPORT_SYMBOL_GPL(kmem_dump_obj);
606#endif
607
608/* Create a cache during boot when no slab services are available yet */
609void __init create_boot_cache(struct kmem_cache *s, const char *name,
610 unsigned int size, slab_flags_t flags,
611 unsigned int useroffset, unsigned int usersize)
612{
613 int err;
614 unsigned int align = ARCH_KMALLOC_MINALIGN;
615
616 s->name = name;
617 s->size = s->object_size = size;
618
619 /*
620 * For power of two sizes, guarantee natural alignment for kmalloc
621 * caches, regardless of SL*B debugging options.
622 */
623 if (is_power_of_2(size))
624 align = max(align, size);
625 s->align = calculate_alignment(flags, align, size);
626
627#ifdef CONFIG_HARDENED_USERCOPY
628 s->useroffset = useroffset;
629 s->usersize = usersize;
630#endif
631
632 err = __kmem_cache_create(s, flags);
633
634 if (err)
635 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
636 name, size, err);
637
638 s->refcount = -1; /* Exempt from merging for now */
639}
640
641static struct kmem_cache *__init create_kmalloc_cache(const char *name,
642 unsigned int size,
643 slab_flags_t flags)
644{
645 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
646
647 if (!s)
648 panic("Out of memory when creating slab %s\n", name);
649
650 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size);
651 list_add(&s->list, &slab_caches);
652 s->refcount = 1;
653 return s;
654}
655
656struct kmem_cache *
657kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
658{ /* initialization for https://llvm.org/pr42570 */ };
659EXPORT_SYMBOL(kmalloc_caches);
660
661#ifdef CONFIG_RANDOM_KMALLOC_CACHES
662unsigned long random_kmalloc_seed __ro_after_init;
663EXPORT_SYMBOL(random_kmalloc_seed);
664#endif
665
666/*
667 * Conversion table for small slabs sizes / 8 to the index in the
668 * kmalloc array. This is necessary for slabs < 192 since we have non power
669 * of two cache sizes there. The size of larger slabs can be determined using
670 * fls.
671 */
672u8 kmalloc_size_index[24] __ro_after_init = {
673 3, /* 8 */
674 4, /* 16 */
675 5, /* 24 */
676 5, /* 32 */
677 6, /* 40 */
678 6, /* 48 */
679 6, /* 56 */
680 6, /* 64 */
681 1, /* 72 */
682 1, /* 80 */
683 1, /* 88 */
684 1, /* 96 */
685 7, /* 104 */
686 7, /* 112 */
687 7, /* 120 */
688 7, /* 128 */
689 2, /* 136 */
690 2, /* 144 */
691 2, /* 152 */
692 2, /* 160 */
693 2, /* 168 */
694 2, /* 176 */
695 2, /* 184 */
696 2 /* 192 */
697};
698
699size_t kmalloc_size_roundup(size_t size)
700{
701 if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
702 /*
703 * The flags don't matter since size_index is common to all.
704 * Neither does the caller for just getting ->object_size.
705 */
706 return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
707 }
708
709 /* Above the smaller buckets, size is a multiple of page size. */
710 if (size && size <= KMALLOC_MAX_SIZE)
711 return PAGE_SIZE << get_order(size);
712
713 /*
714 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
715 * and very large size - kmalloc() may fail.
716 */
717 return size;
718
719}
720EXPORT_SYMBOL(kmalloc_size_roundup);
721
722#ifdef CONFIG_ZONE_DMA
723#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
724#else
725#define KMALLOC_DMA_NAME(sz)
726#endif
727
728#ifdef CONFIG_MEMCG_KMEM
729#define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
730#else
731#define KMALLOC_CGROUP_NAME(sz)
732#endif
733
734#ifndef CONFIG_SLUB_TINY
735#define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
736#else
737#define KMALLOC_RCL_NAME(sz)
738#endif
739
740#ifdef CONFIG_RANDOM_KMALLOC_CACHES
741#define __KMALLOC_RANDOM_CONCAT(a, b) a ## b
742#define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz)
743#define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz,
744#define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz,
745#define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz,
746#define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz,
747#define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz,
748#define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz,
749#define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz,
750#define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz,
751#define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz,
752#define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz,
753#define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz,
754#define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz,
755#define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz,
756#define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz,
757#define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz,
758#else // CONFIG_RANDOM_KMALLOC_CACHES
759#define KMALLOC_RANDOM_NAME(N, sz)
760#endif
761
762#define INIT_KMALLOC_INFO(__size, __short_size) \
763{ \
764 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
765 KMALLOC_RCL_NAME(__short_size) \
766 KMALLOC_CGROUP_NAME(__short_size) \
767 KMALLOC_DMA_NAME(__short_size) \
768 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \
769 .size = __size, \
770}
771
772/*
773 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
774 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
775 * kmalloc-2M.
776 */
777const struct kmalloc_info_struct kmalloc_info[] __initconst = {
778 INIT_KMALLOC_INFO(0, 0),
779 INIT_KMALLOC_INFO(96, 96),
780 INIT_KMALLOC_INFO(192, 192),
781 INIT_KMALLOC_INFO(8, 8),
782 INIT_KMALLOC_INFO(16, 16),
783 INIT_KMALLOC_INFO(32, 32),
784 INIT_KMALLOC_INFO(64, 64),
785 INIT_KMALLOC_INFO(128, 128),
786 INIT_KMALLOC_INFO(256, 256),
787 INIT_KMALLOC_INFO(512, 512),
788 INIT_KMALLOC_INFO(1024, 1k),
789 INIT_KMALLOC_INFO(2048, 2k),
790 INIT_KMALLOC_INFO(4096, 4k),
791 INIT_KMALLOC_INFO(8192, 8k),
792 INIT_KMALLOC_INFO(16384, 16k),
793 INIT_KMALLOC_INFO(32768, 32k),
794 INIT_KMALLOC_INFO(65536, 64k),
795 INIT_KMALLOC_INFO(131072, 128k),
796 INIT_KMALLOC_INFO(262144, 256k),
797 INIT_KMALLOC_INFO(524288, 512k),
798 INIT_KMALLOC_INFO(1048576, 1M),
799 INIT_KMALLOC_INFO(2097152, 2M)
800};
801
802/*
803 * Patch up the size_index table if we have strange large alignment
804 * requirements for the kmalloc array. This is only the case for
805 * MIPS it seems. The standard arches will not generate any code here.
806 *
807 * Largest permitted alignment is 256 bytes due to the way we
808 * handle the index determination for the smaller caches.
809 *
810 * Make sure that nothing crazy happens if someone starts tinkering
811 * around with ARCH_KMALLOC_MINALIGN
812 */
813void __init setup_kmalloc_cache_index_table(void)
814{
815 unsigned int i;
816
817 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
818 !is_power_of_2(KMALLOC_MIN_SIZE));
819
820 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
821 unsigned int elem = size_index_elem(i);
822
823 if (elem >= ARRAY_SIZE(kmalloc_size_index))
824 break;
825 kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
826 }
827
828 if (KMALLOC_MIN_SIZE >= 64) {
829 /*
830 * The 96 byte sized cache is not used if the alignment
831 * is 64 byte.
832 */
833 for (i = 64 + 8; i <= 96; i += 8)
834 kmalloc_size_index[size_index_elem(i)] = 7;
835
836 }
837
838 if (KMALLOC_MIN_SIZE >= 128) {
839 /*
840 * The 192 byte sized cache is not used if the alignment
841 * is 128 byte. Redirect kmalloc to use the 256 byte cache
842 * instead.
843 */
844 for (i = 128 + 8; i <= 192; i += 8)
845 kmalloc_size_index[size_index_elem(i)] = 8;
846 }
847}
848
849static unsigned int __kmalloc_minalign(void)
850{
851 unsigned int minalign = dma_get_cache_alignment();
852
853 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
854 is_swiotlb_allocated())
855 minalign = ARCH_KMALLOC_MINALIGN;
856
857 return max(minalign, arch_slab_minalign());
858}
859
860static void __init
861new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
862{
863 slab_flags_t flags = 0;
864 unsigned int minalign = __kmalloc_minalign();
865 unsigned int aligned_size = kmalloc_info[idx].size;
866 int aligned_idx = idx;
867
868 if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
869 flags |= SLAB_RECLAIM_ACCOUNT;
870 } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
871 if (mem_cgroup_kmem_disabled()) {
872 kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
873 return;
874 }
875 flags |= SLAB_ACCOUNT;
876 } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
877 flags |= SLAB_CACHE_DMA;
878 }
879
880#ifdef CONFIG_RANDOM_KMALLOC_CACHES
881 if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END)
882 flags |= SLAB_NO_MERGE;
883#endif
884
885 /*
886 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
887 * KMALLOC_NORMAL caches.
888 */
889 if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
890 flags |= SLAB_NO_MERGE;
891
892 if (minalign > ARCH_KMALLOC_MINALIGN) {
893 aligned_size = ALIGN(aligned_size, minalign);
894 aligned_idx = __kmalloc_index(aligned_size, false);
895 }
896
897 if (!kmalloc_caches[type][aligned_idx])
898 kmalloc_caches[type][aligned_idx] = create_kmalloc_cache(
899 kmalloc_info[aligned_idx].name[type],
900 aligned_size, flags);
901 if (idx != aligned_idx)
902 kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
903}
904
905/*
906 * Create the kmalloc array. Some of the regular kmalloc arrays
907 * may already have been created because they were needed to
908 * enable allocations for slab creation.
909 */
910void __init create_kmalloc_caches(void)
911{
912 int i;
913 enum kmalloc_cache_type type;
914
915 /*
916 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
917 */
918 for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
919 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
920 if (!kmalloc_caches[type][i])
921 new_kmalloc_cache(i, type);
922
923 /*
924 * Caches that are not of the two-to-the-power-of size.
925 * These have to be created immediately after the
926 * earlier power of two caches
927 */
928 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
929 !kmalloc_caches[type][1])
930 new_kmalloc_cache(1, type);
931 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
932 !kmalloc_caches[type][2])
933 new_kmalloc_cache(2, type);
934 }
935 }
936#ifdef CONFIG_RANDOM_KMALLOC_CACHES
937 random_kmalloc_seed = get_random_u64();
938#endif
939
940 /* Kmalloc array is now usable */
941 slab_state = UP;
942}
943
944/**
945 * __ksize -- Report full size of underlying allocation
946 * @object: pointer to the object
947 *
948 * This should only be used internally to query the true size of allocations.
949 * It is not meant to be a way to discover the usable size of an allocation
950 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
951 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
952 * and/or FORTIFY_SOURCE.
953 *
954 * Return: size of the actual memory used by @object in bytes
955 */
956size_t __ksize(const void *object)
957{
958 struct folio *folio;
959
960 if (unlikely(object == ZERO_SIZE_PTR))
961 return 0;
962
963 folio = virt_to_folio(object);
964
965 if (unlikely(!folio_test_slab(folio))) {
966 if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
967 return 0;
968 if (WARN_ON(object != folio_address(folio)))
969 return 0;
970 return folio_size(folio);
971 }
972
973#ifdef CONFIG_SLUB_DEBUG
974 skip_orig_size_check(folio_slab(folio)->slab_cache, object);
975#endif
976
977 return slab_ksize(folio_slab(folio)->slab_cache);
978}
979
980gfp_t kmalloc_fix_flags(gfp_t flags)
981{
982 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
983
984 flags &= ~GFP_SLAB_BUG_MASK;
985 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
986 invalid_mask, &invalid_mask, flags, &flags);
987 dump_stack();
988
989 return flags;
990}
991
992#ifdef CONFIG_SLAB_FREELIST_RANDOM
993/* Randomize a generic freelist */
994static void freelist_randomize(unsigned int *list,
995 unsigned int count)
996{
997 unsigned int rand;
998 unsigned int i;
999
1000 for (i = 0; i < count; i++)
1001 list[i] = i;
1002
1003 /* Fisher-Yates shuffle */
1004 for (i = count - 1; i > 0; i--) {
1005 rand = get_random_u32_below(i + 1);
1006 swap(list[i], list[rand]);
1007 }
1008}
1009
1010/* Create a random sequence per cache */
1011int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1012 gfp_t gfp)
1013{
1014
1015 if (count < 2 || cachep->random_seq)
1016 return 0;
1017
1018 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1019 if (!cachep->random_seq)
1020 return -ENOMEM;
1021
1022 freelist_randomize(cachep->random_seq, count);
1023 return 0;
1024}
1025
1026/* Destroy the per-cache random freelist sequence */
1027void cache_random_seq_destroy(struct kmem_cache *cachep)
1028{
1029 kfree(cachep->random_seq);
1030 cachep->random_seq = NULL;
1031}
1032#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1033
1034#ifdef CONFIG_SLUB_DEBUG
1035#define SLABINFO_RIGHTS (0400)
1036
1037static void print_slabinfo_header(struct seq_file *m)
1038{
1039 /*
1040 * Output format version, so at least we can change it
1041 * without _too_ many complaints.
1042 */
1043 seq_puts(m, "slabinfo - version: 2.1\n");
1044 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1045 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1046 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1047 seq_putc(m, '\n');
1048}
1049
1050static void *slab_start(struct seq_file *m, loff_t *pos)
1051{
1052 mutex_lock(&slab_mutex);
1053 return seq_list_start(&slab_caches, *pos);
1054}
1055
1056static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1057{
1058 return seq_list_next(p, &slab_caches, pos);
1059}
1060
1061static void slab_stop(struct seq_file *m, void *p)
1062{
1063 mutex_unlock(&slab_mutex);
1064}
1065
1066static void cache_show(struct kmem_cache *s, struct seq_file *m)
1067{
1068 struct slabinfo sinfo;
1069
1070 memset(&sinfo, 0, sizeof(sinfo));
1071 get_slabinfo(s, &sinfo);
1072
1073 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1074 s->name, sinfo.active_objs, sinfo.num_objs, s->size,
1075 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1076
1077 seq_printf(m, " : tunables %4u %4u %4u",
1078 sinfo.limit, sinfo.batchcount, sinfo.shared);
1079 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1080 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1081 slabinfo_show_stats(m, s);
1082 seq_putc(m, '\n');
1083}
1084
1085static int slab_show(struct seq_file *m, void *p)
1086{
1087 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1088
1089 if (p == slab_caches.next)
1090 print_slabinfo_header(m);
1091 cache_show(s, m);
1092 return 0;
1093}
1094
1095void dump_unreclaimable_slab(void)
1096{
1097 struct kmem_cache *s;
1098 struct slabinfo sinfo;
1099
1100 /*
1101 * Here acquiring slab_mutex is risky since we don't prefer to get
1102 * sleep in oom path. But, without mutex hold, it may introduce a
1103 * risk of crash.
1104 * Use mutex_trylock to protect the list traverse, dump nothing
1105 * without acquiring the mutex.
1106 */
1107 if (!mutex_trylock(&slab_mutex)) {
1108 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1109 return;
1110 }
1111
1112 pr_info("Unreclaimable slab info:\n");
1113 pr_info("Name Used Total\n");
1114
1115 list_for_each_entry(s, &slab_caches, list) {
1116 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1117 continue;
1118
1119 get_slabinfo(s, &sinfo);
1120
1121 if (sinfo.num_objs > 0)
1122 pr_info("%-17s %10luKB %10luKB\n", s->name,
1123 (sinfo.active_objs * s->size) / 1024,
1124 (sinfo.num_objs * s->size) / 1024);
1125 }
1126 mutex_unlock(&slab_mutex);
1127}
1128
1129/*
1130 * slabinfo_op - iterator that generates /proc/slabinfo
1131 *
1132 * Output layout:
1133 * cache-name
1134 * num-active-objs
1135 * total-objs
1136 * object size
1137 * num-active-slabs
1138 * total-slabs
1139 * num-pages-per-slab
1140 * + further values on SMP and with statistics enabled
1141 */
1142static const struct seq_operations slabinfo_op = {
1143 .start = slab_start,
1144 .next = slab_next,
1145 .stop = slab_stop,
1146 .show = slab_show,
1147};
1148
1149static int slabinfo_open(struct inode *inode, struct file *file)
1150{
1151 return seq_open(file, &slabinfo_op);
1152}
1153
1154static const struct proc_ops slabinfo_proc_ops = {
1155 .proc_flags = PROC_ENTRY_PERMANENT,
1156 .proc_open = slabinfo_open,
1157 .proc_read = seq_read,
1158 .proc_write = slabinfo_write,
1159 .proc_lseek = seq_lseek,
1160 .proc_release = seq_release,
1161};
1162
1163static int __init slab_proc_init(void)
1164{
1165 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1166 return 0;
1167}
1168module_init(slab_proc_init);
1169
1170#endif /* CONFIG_SLUB_DEBUG */
1171
1172static __always_inline __realloc_size(2) void *
1173__do_krealloc(const void *p, size_t new_size, gfp_t flags)
1174{
1175 void *ret;
1176 size_t ks;
1177
1178 /* Check for double-free before calling ksize. */
1179 if (likely(!ZERO_OR_NULL_PTR(p))) {
1180 if (!kasan_check_byte(p))
1181 return NULL;
1182 ks = ksize(p);
1183 } else
1184 ks = 0;
1185
1186 /* If the object still fits, repoison it precisely. */
1187 if (ks >= new_size) {
1188 p = kasan_krealloc((void *)p, new_size, flags);
1189 return (void *)p;
1190 }
1191
1192 ret = kmalloc_track_caller(new_size, flags);
1193 if (ret && p) {
1194 /* Disable KASAN checks as the object's redzone is accessed. */
1195 kasan_disable_current();
1196 memcpy(ret, kasan_reset_tag(p), ks);
1197 kasan_enable_current();
1198 }
1199
1200 return ret;
1201}
1202
1203/**
1204 * krealloc - reallocate memory. The contents will remain unchanged.
1205 * @p: object to reallocate memory for.
1206 * @new_size: how many bytes of memory are required.
1207 * @flags: the type of memory to allocate.
1208 *
1209 * The contents of the object pointed to are preserved up to the
1210 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1211 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
1212 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1213 *
1214 * Return: pointer to the allocated memory or %NULL in case of error
1215 */
1216void *krealloc(const void *p, size_t new_size, gfp_t flags)
1217{
1218 void *ret;
1219
1220 if (unlikely(!new_size)) {
1221 kfree(p);
1222 return ZERO_SIZE_PTR;
1223 }
1224
1225 ret = __do_krealloc(p, new_size, flags);
1226 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1227 kfree(p);
1228
1229 return ret;
1230}
1231EXPORT_SYMBOL(krealloc);
1232
1233/**
1234 * kfree_sensitive - Clear sensitive information in memory before freeing
1235 * @p: object to free memory of
1236 *
1237 * The memory of the object @p points to is zeroed before freed.
1238 * If @p is %NULL, kfree_sensitive() does nothing.
1239 *
1240 * Note: this function zeroes the whole allocated buffer which can be a good
1241 * deal bigger than the requested buffer size passed to kmalloc(). So be
1242 * careful when using this function in performance sensitive code.
1243 */
1244void kfree_sensitive(const void *p)
1245{
1246 size_t ks;
1247 void *mem = (void *)p;
1248
1249 ks = ksize(mem);
1250 if (ks) {
1251 kasan_unpoison_range(mem, ks);
1252 memzero_explicit(mem, ks);
1253 }
1254 kfree(mem);
1255}
1256EXPORT_SYMBOL(kfree_sensitive);
1257
1258size_t ksize(const void *objp)
1259{
1260 /*
1261 * We need to first check that the pointer to the object is valid.
1262 * The KASAN report printed from ksize() is more useful, then when
1263 * it's printed later when the behaviour could be undefined due to
1264 * a potential use-after-free or double-free.
1265 *
1266 * We use kasan_check_byte(), which is supported for the hardware
1267 * tag-based KASAN mode, unlike kasan_check_read/write().
1268 *
1269 * If the pointed to memory is invalid, we return 0 to avoid users of
1270 * ksize() writing to and potentially corrupting the memory region.
1271 *
1272 * We want to perform the check before __ksize(), to avoid potentially
1273 * crashing in __ksize() due to accessing invalid metadata.
1274 */
1275 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1276 return 0;
1277
1278 return kfence_ksize(objp) ?: __ksize(objp);
1279}
1280EXPORT_SYMBOL(ksize);
1281
1282/* Tracepoints definitions. */
1283EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1284EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1285EXPORT_TRACEPOINT_SYMBOL(kfree);
1286EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1287