Linux Audio

Check our new training course

Loading...
v4.6
 
  1#ifndef MM_SLAB_H
  2#define MM_SLAB_H
  3/*
  4 * Internal slab definitions
  5 */
  6
  7#ifdef CONFIG_SLOB
  8/*
  9 * Common fields provided in kmem_cache by all slab allocators
 10 * This struct is either used directly by the allocator (SLOB)
 11 * or the allocator must include definitions for all fields
 12 * provided in kmem_cache_common in their definition of kmem_cache.
 13 *
 14 * Once we can do anonymous structs (C11 standard) we could put a
 15 * anonymous struct definition in these allocators so that the
 16 * separate allocations in the kmem_cache structure of SLAB and
 17 * SLUB is no longer needed.
 18 */
 19struct kmem_cache {
 20	unsigned int object_size;/* The original size of the object */
 21	unsigned int size;	/* The aligned/padded/added on size  */
 22	unsigned int align;	/* Alignment as calculated */
 23	unsigned long flags;	/* Active flags on the slab */
 
 
 24	const char *name;	/* Slab name for sysfs */
 25	int refcount;		/* Use counter */
 26	void (*ctor)(void *);	/* Called on object slot creation */
 27	struct list_head list;	/* List of all slab caches on the system */
 28};
 29
 30#endif /* CONFIG_SLOB */
 31
 32#ifdef CONFIG_SLAB
 33#include <linux/slab_def.h>
 34#endif
 35
 36#ifdef CONFIG_SLUB
 37#include <linux/slub_def.h>
 38#endif
 39
 40#include <linux/memcontrol.h>
 41#include <linux/fault-inject.h>
 42#include <linux/kmemcheck.h>
 43#include <linux/kasan.h>
 44#include <linux/kmemleak.h>
 
 
 45
 46/*
 47 * State of the slab allocator.
 48 *
 49 * This is used to describe the states of the allocator during bootup.
 50 * Allocators use this to gradually bootstrap themselves. Most allocators
 51 * have the problem that the structures used for managing slab caches are
 52 * allocated from slab caches themselves.
 53 */
 54enum slab_state {
 55	DOWN,			/* No slab functionality yet */
 56	PARTIAL,		/* SLUB: kmem_cache_node available */
 57	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
 58	UP,			/* Slab caches usable but not all extras yet */
 59	FULL			/* Everything is working */
 60};
 61
 62extern enum slab_state slab_state;
 63
 64/* The slab cache mutex protects the management structures during changes */
 65extern struct mutex slab_mutex;
 66
 67/* The list of all slab caches on the system */
 68extern struct list_head slab_caches;
 69
 70/* The slab cache that manages slab cache information */
 71extern struct kmem_cache *kmem_cache;
 72
 73unsigned long calculate_alignment(unsigned long flags,
 74		unsigned long align, unsigned long size);
 
 
 
 75
 76#ifndef CONFIG_SLOB
 77/* Kmalloc array related functions */
 78void setup_kmalloc_cache_index_table(void);
 79void create_kmalloc_caches(unsigned long);
 80
 81/* Find the kmalloc slab corresponding for a certain size */
 82struct kmem_cache *kmalloc_slab(size_t, gfp_t);
 83#endif
 84
 
 85
 86/* Functions provided by the slab allocators */
 87extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
 88
 89extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
 90			unsigned long flags);
 
 91extern void create_boot_cache(struct kmem_cache *, const char *name,
 92			size_t size, unsigned long flags);
 
 93
 94int slab_unmergeable(struct kmem_cache *s);
 95struct kmem_cache *find_mergeable(size_t size, size_t align,
 96		unsigned long flags, const char *name, void (*ctor)(void *));
 97#ifndef CONFIG_SLOB
 98struct kmem_cache *
 99__kmem_cache_alias(const char *name, size_t size, size_t align,
100		   unsigned long flags, void (*ctor)(void *));
101
102unsigned long kmem_cache_flags(unsigned long object_size,
103	unsigned long flags, const char *name,
104	void (*ctor)(void *));
105#else
106static inline struct kmem_cache *
107__kmem_cache_alias(const char *name, size_t size, size_t align,
108		   unsigned long flags, void (*ctor)(void *))
109{ return NULL; }
110
111static inline unsigned long kmem_cache_flags(unsigned long object_size,
112	unsigned long flags, const char *name,
113	void (*ctor)(void *))
114{
115	return flags;
116}
117#endif
118
119
120/* Legal flag mask for kmem_cache_create(), for various configurations */
121#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
122			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
 
123
124#if defined(CONFIG_DEBUG_SLAB)
125#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
126#elif defined(CONFIG_SLUB_DEBUG)
127#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
128			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
129#else
130#define SLAB_DEBUG_FLAGS (0)
131#endif
132
133#if defined(CONFIG_SLAB)
134#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
135			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
136			  SLAB_NOTRACK | SLAB_ACCOUNT)
137#elif defined(CONFIG_SLUB)
138#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
139			  SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
140#else
141#define SLAB_CACHE_FLAGS (0)
142#endif
143
 
144#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146int __kmem_cache_shutdown(struct kmem_cache *);
147void __kmem_cache_release(struct kmem_cache *);
148int __kmem_cache_shrink(struct kmem_cache *, bool);
149void slab_kmem_cache_release(struct kmem_cache *);
150
151struct seq_file;
152struct file;
153
154struct slabinfo {
155	unsigned long active_objs;
156	unsigned long num_objs;
157	unsigned long active_slabs;
158	unsigned long num_slabs;
159	unsigned long shared_avail;
160	unsigned int limit;
161	unsigned int batchcount;
162	unsigned int shared;
163	unsigned int objects_per_slab;
164	unsigned int cache_order;
165};
166
167void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
168void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
169ssize_t slabinfo_write(struct file *file, const char __user *buffer,
170		       size_t count, loff_t *ppos);
171
172/*
173 * Generic implementation of bulk operations
174 * These are useful for situations in which the allocator cannot
175 * perform optimizations. In that case segments of the object listed
176 * may be allocated or freed using these operations.
177 */
178void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
179int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
180
181#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
182/*
183 * Iterate over all memcg caches of the given root cache. The caller must hold
184 * slab_mutex.
185 */
186#define for_each_memcg_cache(iter, root) \
187	list_for_each_entry(iter, &(root)->memcg_params.list, \
188			    memcg_params.list)
189
190static inline bool is_root_cache(struct kmem_cache *s)
191{
192	return s->memcg_params.is_root_cache;
 
193}
194
195static inline bool slab_equal_or_root(struct kmem_cache *s,
196				      struct kmem_cache *p)
 
 
 
 
 
 
 
197{
198	return p == s || p == s->memcg_params.root_cache;
199}
 
 
 
 
 
 
 
 
 
200
201/*
202 * We use suffixes to the name in memcg because we can't have caches
203 * created in the system with the same name. But when we print them
204 * locally, better refer to them with the base name
205 */
206static inline const char *cache_name(struct kmem_cache *s)
207{
208	if (!is_root_cache(s))
209		s = s->memcg_params.root_cache;
210	return s->name;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211}
212
213/*
214 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
215 * That said the caller must assure the memcg's cache won't go away by either
216 * taking a css reference to the owner cgroup, or holding the slab_mutex.
217 */
218static inline struct kmem_cache *
219cache_from_memcg_idx(struct kmem_cache *s, int idx)
 
220{
221	struct kmem_cache *cachep;
222	struct memcg_cache_array *arr;
223
224	rcu_read_lock();
225	arr = rcu_dereference(s->memcg_params.memcg_caches);
226
227	/*
228	 * Make sure we will access the up-to-date value. The code updating
229	 * memcg_caches issues a write barrier to match this (see
230	 * memcg_create_kmem_cache()).
231	 */
232	cachep = lockless_dereference(arr->entries[idx]);
233	rcu_read_unlock();
234
235	return cachep;
 
 
 
 
 
 
 
 
 
 
236}
237
238static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 
 
 
239{
240	if (is_root_cache(s))
241		return s;
242	return s->memcg_params.root_cache;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243}
244
245static __always_inline int memcg_charge_slab(struct page *page,
246					     gfp_t gfp, int order,
247					     struct kmem_cache *s)
248{
249	int ret;
 
 
 
 
 
250
251	if (!memcg_kmem_enabled())
252		return 0;
253	if (is_root_cache(s))
254		return 0;
255
256	ret = __memcg_kmem_charge_memcg(page, gfp, order,
257					s->memcg_params.memcg);
258	if (ret)
259		return ret;
260
261	memcg_kmem_update_page_stat(page,
262			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
263			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
264			1 << order);
265	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
266}
267
268static __always_inline void memcg_uncharge_slab(struct page *page, int order,
269						struct kmem_cache *s)
270{
271	memcg_kmem_update_page_stat(page,
272			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
273			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
274			-(1 << order));
275	memcg_kmem_uncharge(page, order);
276}
277
278extern void slab_init_memcg_params(struct kmem_cache *);
279
280#else /* CONFIG_MEMCG && !CONFIG_SLOB */
281
282#define for_each_memcg_cache(iter, root) \
283	for ((void)(iter), (void)(root); 0; )
284
285static inline bool is_root_cache(struct kmem_cache *s)
286{
287	return true;
288}
289
290static inline bool slab_equal_or_root(struct kmem_cache *s,
291				      struct kmem_cache *p)
292{
293	return true;
294}
295
296static inline const char *cache_name(struct kmem_cache *s)
 
 
297{
298	return s->name;
299}
300
301static inline struct kmem_cache *
302cache_from_memcg_idx(struct kmem_cache *s, int idx)
 
 
303{
304	return NULL;
305}
306
307static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
 
308{
309	return s;
310}
 
311
312static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
313				    struct kmem_cache *s)
314{
315	return 0;
 
 
 
 
 
 
316}
317
318static inline void memcg_uncharge_slab(struct page *page, int order,
319				       struct kmem_cache *s)
 
320{
 
 
 
 
 
321}
322
323static inline void slab_init_memcg_params(struct kmem_cache *s)
 
324{
 
 
 
 
 
325}
326#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
327
328static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
329{
330	struct kmem_cache *cachep;
331	struct page *page;
332
333	/*
334	 * When kmemcg is not being used, both assignments should return the
335	 * same value. but we don't want to pay the assignment price in that
336	 * case. If it is not compiled in, the compiler should be smart enough
337	 * to not do even the assignment. In that case, slab_equal_or_root
338	 * will also be a constant.
339	 */
340	if (!memcg_kmem_enabled() &&
341	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
342		return s;
343
344	page = virt_to_head_page(x);
345	cachep = page->slab_cache;
346	if (slab_equal_or_root(cachep, s))
347		return cachep;
348
349	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
350	       __func__, s->name, cachep->name);
351	WARN_ON_ONCE(1);
352	return s;
353}
354
355static inline size_t slab_ksize(const struct kmem_cache *s)
356{
357#ifndef CONFIG_SLUB
358	return s->object_size;
359
360#else /* CONFIG_SLUB */
361# ifdef CONFIG_SLUB_DEBUG
362	/*
363	 * Debugging requires use of the padding between object
364	 * and whatever may come after it.
365	 */
366	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
367		return s->object_size;
368# endif
 
 
369	/*
370	 * If we have the need to store the freelist pointer
371	 * back there or track user information then we can
372	 * only use the space before that information.
373	 */
374	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
375		return s->inuse;
376	/*
377	 * Else we can use all the padding etc for the allocation
378	 */
379	return s->size;
380#endif
381}
382
383static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
384						     gfp_t flags)
 
385{
386	flags &= gfp_allowed_mask;
387	lockdep_trace_alloc(flags);
388	might_sleep_if(gfpflags_allow_blocking(flags));
389
390	if (should_failslab(s, flags))
391		return NULL;
392
393	return memcg_kmem_get_cache(s, flags);
 
 
 
394}
395
396static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
397					size_t size, void **p)
 
398{
399	size_t i;
400
401	flags &= gfp_allowed_mask;
402	for (i = 0; i < size; i++) {
403		void *object = p[i];
404
405		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
406		kmemleak_alloc_recursive(object, s->object_size, 1,
 
 
 
 
 
 
 
 
 
 
407					 s->flags, flags);
408		kasan_slab_alloc(s, object, flags);
409	}
410	memcg_kmem_put_cache(s);
 
411}
412
413#ifndef CONFIG_SLOB
414/*
415 * The slab lists for all objects.
416 */
417struct kmem_cache_node {
418	spinlock_t list_lock;
419
420#ifdef CONFIG_SLAB
421	struct list_head slabs_partial;	/* partial list first, better asm code */
422	struct list_head slabs_full;
423	struct list_head slabs_free;
 
 
424	unsigned long free_objects;
425	unsigned int free_limit;
426	unsigned int colour_next;	/* Per-node cache coloring */
427	struct array_cache *shared;	/* shared per node */
428	struct alien_cache **alien;	/* on other nodes */
429	unsigned long next_reap;	/* updated without locking */
430	int free_touched;		/* updated without locking */
431#endif
432
433#ifdef CONFIG_SLUB
434	unsigned long nr_partial;
435	struct list_head partial;
436#ifdef CONFIG_SLUB_DEBUG
437	atomic_long_t nr_slabs;
438	atomic_long_t total_objects;
439	struct list_head full;
440#endif
441#endif
442
443};
444
445static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
446{
447	return s->node[node];
448}
449
450/*
451 * Iterator over all nodes. The body will be executed for each node that has
452 * a kmem_cache_node structure allocated (which is true for all online nodes)
453 */
454#define for_each_kmem_cache_node(__s, __node, __n) \
455	for (__node = 0; __node < nr_node_ids; __node++) \
456		 if ((__n = get_node(__s, __node)))
457
458#endif
459
460void *slab_start(struct seq_file *m, loff_t *pos);
461void *slab_next(struct seq_file *m, void *p, loff_t *pos);
462void slab_stop(struct seq_file *m, void *p);
463int memcg_slab_show(struct seq_file *m, void *p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464
465#endif /* MM_SLAB_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef MM_SLAB_H
  3#define MM_SLAB_H
  4/*
  5 * Internal slab definitions
  6 */
  7
  8#ifdef CONFIG_SLOB
  9/*
 10 * Common fields provided in kmem_cache by all slab allocators
 11 * This struct is either used directly by the allocator (SLOB)
 12 * or the allocator must include definitions for all fields
 13 * provided in kmem_cache_common in their definition of kmem_cache.
 14 *
 15 * Once we can do anonymous structs (C11 standard) we could put a
 16 * anonymous struct definition in these allocators so that the
 17 * separate allocations in the kmem_cache structure of SLAB and
 18 * SLUB is no longer needed.
 19 */
 20struct kmem_cache {
 21	unsigned int object_size;/* The original size of the object */
 22	unsigned int size;	/* The aligned/padded/added on size  */
 23	unsigned int align;	/* Alignment as calculated */
 24	slab_flags_t flags;	/* Active flags on the slab */
 25	unsigned int useroffset;/* Usercopy region offset */
 26	unsigned int usersize;	/* Usercopy region size */
 27	const char *name;	/* Slab name for sysfs */
 28	int refcount;		/* Use counter */
 29	void (*ctor)(void *);	/* Called on object slot creation */
 30	struct list_head list;	/* List of all slab caches on the system */
 31};
 32
 33#endif /* CONFIG_SLOB */
 34
 35#ifdef CONFIG_SLAB
 36#include <linux/slab_def.h>
 37#endif
 38
 39#ifdef CONFIG_SLUB
 40#include <linux/slub_def.h>
 41#endif
 42
 43#include <linux/memcontrol.h>
 44#include <linux/fault-inject.h>
 
 45#include <linux/kasan.h>
 46#include <linux/kmemleak.h>
 47#include <linux/random.h>
 48#include <linux/sched/mm.h>
 49
 50/*
 51 * State of the slab allocator.
 52 *
 53 * This is used to describe the states of the allocator during bootup.
 54 * Allocators use this to gradually bootstrap themselves. Most allocators
 55 * have the problem that the structures used for managing slab caches are
 56 * allocated from slab caches themselves.
 57 */
 58enum slab_state {
 59	DOWN,			/* No slab functionality yet */
 60	PARTIAL,		/* SLUB: kmem_cache_node available */
 61	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
 62	UP,			/* Slab caches usable but not all extras yet */
 63	FULL			/* Everything is working */
 64};
 65
 66extern enum slab_state slab_state;
 67
 68/* The slab cache mutex protects the management structures during changes */
 69extern struct mutex slab_mutex;
 70
 71/* The list of all slab caches on the system */
 72extern struct list_head slab_caches;
 73
 74/* The slab cache that manages slab cache information */
 75extern struct kmem_cache *kmem_cache;
 76
 77/* A table of kmalloc cache names and sizes */
 78extern const struct kmalloc_info_struct {
 79	const char *name[NR_KMALLOC_TYPES];
 80	unsigned int size;
 81} kmalloc_info[];
 82
 83#ifndef CONFIG_SLOB
 84/* Kmalloc array related functions */
 85void setup_kmalloc_cache_index_table(void);
 86void create_kmalloc_caches(slab_flags_t);
 87
 88/* Find the kmalloc slab corresponding for a certain size */
 89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
 90#endif
 91
 92gfp_t kmalloc_fix_flags(gfp_t flags);
 93
 94/* Functions provided by the slab allocators */
 95int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
 96
 97struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
 98			slab_flags_t flags, unsigned int useroffset,
 99			unsigned int usersize);
100extern void create_boot_cache(struct kmem_cache *, const char *name,
101			unsigned int size, slab_flags_t flags,
102			unsigned int useroffset, unsigned int usersize);
103
104int slab_unmergeable(struct kmem_cache *s);
105struct kmem_cache *find_mergeable(unsigned size, unsigned align,
106		slab_flags_t flags, const char *name, void (*ctor)(void *));
107#ifndef CONFIG_SLOB
108struct kmem_cache *
109__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
110		   slab_flags_t flags, void (*ctor)(void *));
111
112slab_flags_t kmem_cache_flags(unsigned int object_size,
113	slab_flags_t flags, const char *name);
 
114#else
115static inline struct kmem_cache *
116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
117		   slab_flags_t flags, void (*ctor)(void *))
118{ return NULL; }
119
120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
121	slab_flags_t flags, const char *name)
 
122{
123	return flags;
124}
125#endif
126
127
128/* Legal flag mask for kmem_cache_create(), for various configurations */
129#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
130			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
131			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132
133#if defined(CONFIG_DEBUG_SLAB)
134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135#elif defined(CONFIG_SLUB_DEBUG)
136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
137			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
138#else
139#define SLAB_DEBUG_FLAGS (0)
140#endif
141
142#if defined(CONFIG_SLAB)
143#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
144			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
145			  SLAB_ACCOUNT)
146#elif defined(CONFIG_SLUB)
147#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
148			  SLAB_TEMPORARY | SLAB_ACCOUNT)
149#else
150#define SLAB_CACHE_FLAGS (0)
151#endif
152
153/* Common flags available with current configuration */
154#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
155
156/* Common flags permitted for kmem_cache_create */
157#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
158			      SLAB_RED_ZONE | \
159			      SLAB_POISON | \
160			      SLAB_STORE_USER | \
161			      SLAB_TRACE | \
162			      SLAB_CONSISTENCY_CHECKS | \
163			      SLAB_MEM_SPREAD | \
164			      SLAB_NOLEAKTRACE | \
165			      SLAB_RECLAIM_ACCOUNT | \
166			      SLAB_TEMPORARY | \
167			      SLAB_ACCOUNT)
168
169bool __kmem_cache_empty(struct kmem_cache *);
170int __kmem_cache_shutdown(struct kmem_cache *);
171void __kmem_cache_release(struct kmem_cache *);
172int __kmem_cache_shrink(struct kmem_cache *);
173void slab_kmem_cache_release(struct kmem_cache *);
174
175struct seq_file;
176struct file;
177
178struct slabinfo {
179	unsigned long active_objs;
180	unsigned long num_objs;
181	unsigned long active_slabs;
182	unsigned long num_slabs;
183	unsigned long shared_avail;
184	unsigned int limit;
185	unsigned int batchcount;
186	unsigned int shared;
187	unsigned int objects_per_slab;
188	unsigned int cache_order;
189};
190
191void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
192void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
193ssize_t slabinfo_write(struct file *file, const char __user *buffer,
194		       size_t count, loff_t *ppos);
195
196/*
197 * Generic implementation of bulk operations
198 * These are useful for situations in which the allocator cannot
199 * perform optimizations. In that case segments of the object listed
200 * may be allocated or freed using these operations.
201 */
202void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
203int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
204
205static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
 
 
 
 
 
 
 
 
 
206{
207	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
208		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
209}
210
211#ifdef CONFIG_SLUB_DEBUG
212#ifdef CONFIG_SLUB_DEBUG_ON
213DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
214#else
215DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
216#endif
217extern void print_tracking(struct kmem_cache *s, void *object);
218long validate_slab_cache(struct kmem_cache *s);
219static inline bool __slub_debug_enabled(void)
220{
221	return static_branch_unlikely(&slub_debug_enabled);
222}
223#else
224static inline void print_tracking(struct kmem_cache *s, void *object)
225{
226}
227static inline bool __slub_debug_enabled(void)
228{
229	return false;
230}
231#endif
232
233/*
234 * Returns true if any of the specified slub_debug flags is enabled for the
235 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
236 * the static key.
237 */
238static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
239{
240	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
241		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
242	if (__slub_debug_enabled())
243		return s->flags & flags;
244	return false;
245}
246
247#ifdef CONFIG_MEMCG_KMEM
248int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
249				 gfp_t gfp, bool new_page);
250void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
251		     enum node_stat_item idx, int nr);
252
253static inline void memcg_free_page_obj_cgroups(struct page *page)
254{
255	kfree(page_objcgs(page));
256	page->memcg_data = 0;
257}
258
259static inline size_t obj_full_size(struct kmem_cache *s)
260{
261	/*
262	 * For each accounted object there is an extra space which is used
263	 * to store obj_cgroup membership. Charge it too.
264	 */
265	return s->size + sizeof(struct obj_cgroup *);
266}
267
268/*
269 * Returns false if the allocation should fail.
 
 
270 */
271static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
272					     struct obj_cgroup **objcgp,
273					     size_t objects, gfp_t flags)
274{
275	struct obj_cgroup *objcg;
 
276
277	if (!memcg_kmem_enabled())
278		return true;
279
280	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
281		return true;
 
 
 
 
 
282
283	objcg = get_obj_cgroup_from_current();
284	if (!objcg)
285		return true;
286
287	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
288		obj_cgroup_put(objcg);
289		return false;
290	}
291
292	*objcgp = objcg;
293	return true;
294}
295
296static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
297					      struct obj_cgroup *objcg,
298					      gfp_t flags, size_t size,
299					      void **p)
300{
301	struct page *page;
302	unsigned long off;
303	size_t i;
304
305	if (!memcg_kmem_enabled() || !objcg)
306		return;
307
308	for (i = 0; i < size; i++) {
309		if (likely(p[i])) {
310			page = virt_to_head_page(p[i]);
311
312			if (!page_objcgs(page) &&
313			    memcg_alloc_page_obj_cgroups(page, s, flags,
314							 false)) {
315				obj_cgroup_uncharge(objcg, obj_full_size(s));
316				continue;
317			}
318
319			off = obj_to_index(s, page, p[i]);
320			obj_cgroup_get(objcg);
321			page_objcgs(page)[off] = objcg;
322			mod_objcg_state(objcg, page_pgdat(page),
323					cache_vmstat_idx(s), obj_full_size(s));
324		} else {
325			obj_cgroup_uncharge(objcg, obj_full_size(s));
326		}
327	}
328	obj_cgroup_put(objcg);
329}
330
331static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
332					void **p, int objects)
 
333{
334	struct kmem_cache *s;
335	struct obj_cgroup **objcgs;
336	struct obj_cgroup *objcg;
337	struct page *page;
338	unsigned int off;
339	int i;
340
341	if (!memcg_kmem_enabled())
342		return;
343
344	for (i = 0; i < objects; i++) {
345		if (unlikely(!p[i]))
346			continue;
347
348		page = virt_to_head_page(p[i]);
349		objcgs = page_objcgs_check(page);
350		if (!objcgs)
351			continue;
352
353		if (!s_orig)
354			s = page->slab_cache;
355		else
356			s = s_orig;
357
358		off = obj_to_index(s, page, p[i]);
359		objcg = objcgs[off];
360		if (!objcg)
361			continue;
362
363		objcgs[off] = NULL;
364		obj_cgroup_uncharge(objcg, obj_full_size(s));
365		mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
366				-obj_full_size(s));
367		obj_cgroup_put(objcg);
368	}
369}
370
371#else /* CONFIG_MEMCG_KMEM */
372static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
373{
374	return NULL;
 
 
 
 
375}
376
377static inline int memcg_alloc_page_obj_cgroups(struct page *page,
378					       struct kmem_cache *s, gfp_t gfp,
379					       bool new_page)
 
 
 
 
 
380{
381	return 0;
382}
383
384static inline void memcg_free_page_obj_cgroups(struct page *page)
 
385{
 
386}
387
388static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
389					     struct obj_cgroup **objcgp,
390					     size_t objects, gfp_t flags)
391{
392	return true;
393}
394
395static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
396					      struct obj_cgroup *objcg,
397					      gfp_t flags, size_t size,
398					      void **p)
399{
 
400}
401
402static inline void memcg_slab_free_hook(struct kmem_cache *s,
403					void **p, int objects)
404{
 
405}
406#endif /* CONFIG_MEMCG_KMEM */
407
408static inline struct kmem_cache *virt_to_cache(const void *obj)
 
409{
410	struct page *page;
411
412	page = virt_to_head_page(obj);
413	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
414					__func__))
415		return NULL;
416	return page->slab_cache;
417}
418
419static __always_inline void account_slab_page(struct page *page, int order,
420					      struct kmem_cache *s,
421					      gfp_t gfp)
422{
423	if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
424		memcg_alloc_page_obj_cgroups(page, s, gfp, true);
425
426	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
427			    PAGE_SIZE << order);
428}
429
430static __always_inline void unaccount_slab_page(struct page *page, int order,
431						struct kmem_cache *s)
432{
433	if (memcg_kmem_enabled())
434		memcg_free_page_obj_cgroups(page);
435
436	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
437			    -(PAGE_SIZE << order));
438}
 
439
440static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
441{
442	struct kmem_cache *cachep;
 
443
444	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
445	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
 
 
 
 
 
 
 
446		return s;
447
448	cachep = virt_to_cache(x);
449	if (WARN(cachep && cachep != s,
450		  "%s: Wrong slab cache. %s but object is from %s\n",
451		  __func__, s->name, cachep->name))
452		print_tracking(cachep, x);
453	return cachep;
 
 
 
454}
455
456static inline size_t slab_ksize(const struct kmem_cache *s)
457{
458#ifndef CONFIG_SLUB
459	return s->object_size;
460
461#else /* CONFIG_SLUB */
462# ifdef CONFIG_SLUB_DEBUG
463	/*
464	 * Debugging requires use of the padding between object
465	 * and whatever may come after it.
466	 */
467	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
468		return s->object_size;
469# endif
470	if (s->flags & SLAB_KASAN)
471		return s->object_size;
472	/*
473	 * If we have the need to store the freelist pointer
474	 * back there or track user information then we can
475	 * only use the space before that information.
476	 */
477	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
478		return s->inuse;
479	/*
480	 * Else we can use all the padding etc for the allocation
481	 */
482	return s->size;
483#endif
484}
485
486static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
487						     struct obj_cgroup **objcgp,
488						     size_t size, gfp_t flags)
489{
490	flags &= gfp_allowed_mask;
491
492	might_alloc(flags);
493
494	if (should_failslab(s, flags))
495		return NULL;
496
497	if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
498		return NULL;
499
500	return s;
501}
502
503static inline void slab_post_alloc_hook(struct kmem_cache *s,
504					struct obj_cgroup *objcg, gfp_t flags,
505					size_t size, void **p, bool init)
506{
507	size_t i;
508
509	flags &= gfp_allowed_mask;
 
 
510
511	/*
512	 * As memory initialization might be integrated into KASAN,
513	 * kasan_slab_alloc and initialization memset must be
514	 * kept together to avoid discrepancies in behavior.
515	 *
516	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
517	 */
518	for (i = 0; i < size; i++) {
519		p[i] = kasan_slab_alloc(s, p[i], flags, init);
520		if (p[i] && init && !kasan_has_integrated_init())
521			memset(p[i], 0, s->object_size);
522		kmemleak_alloc_recursive(p[i], s->object_size, 1,
523					 s->flags, flags);
 
524	}
525
526	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
527}
528
529#ifndef CONFIG_SLOB
530/*
531 * The slab lists for all objects.
532 */
533struct kmem_cache_node {
534	spinlock_t list_lock;
535
536#ifdef CONFIG_SLAB
537	struct list_head slabs_partial;	/* partial list first, better asm code */
538	struct list_head slabs_full;
539	struct list_head slabs_free;
540	unsigned long total_slabs;	/* length of all slab lists */
541	unsigned long free_slabs;	/* length of free slab list only */
542	unsigned long free_objects;
543	unsigned int free_limit;
544	unsigned int colour_next;	/* Per-node cache coloring */
545	struct array_cache *shared;	/* shared per node */
546	struct alien_cache **alien;	/* on other nodes */
547	unsigned long next_reap;	/* updated without locking */
548	int free_touched;		/* updated without locking */
549#endif
550
551#ifdef CONFIG_SLUB
552	unsigned long nr_partial;
553	struct list_head partial;
554#ifdef CONFIG_SLUB_DEBUG
555	atomic_long_t nr_slabs;
556	atomic_long_t total_objects;
557	struct list_head full;
558#endif
559#endif
560
561};
562
563static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
564{
565	return s->node[node];
566}
567
568/*
569 * Iterator over all nodes. The body will be executed for each node that has
570 * a kmem_cache_node structure allocated (which is true for all online nodes)
571 */
572#define for_each_kmem_cache_node(__s, __node, __n) \
573	for (__node = 0; __node < nr_node_ids; __node++) \
574		 if ((__n = get_node(__s, __node)))
575
576#endif
577
578void *slab_start(struct seq_file *m, loff_t *pos);
579void *slab_next(struct seq_file *m, void *p, loff_t *pos);
580void slab_stop(struct seq_file *m, void *p);
581int memcg_slab_show(struct seq_file *m, void *p);
582
583#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
584void dump_unreclaimable_slab(void);
585#else
586static inline void dump_unreclaimable_slab(void)
587{
588}
589#endif
590
591void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
592
593#ifdef CONFIG_SLAB_FREELIST_RANDOM
594int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
595			gfp_t gfp);
596void cache_random_seq_destroy(struct kmem_cache *cachep);
597#else
598static inline int cache_random_seq_create(struct kmem_cache *cachep,
599					unsigned int count, gfp_t gfp)
600{
601	return 0;
602}
603static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
604#endif /* CONFIG_SLAB_FREELIST_RANDOM */
605
606static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
607{
608	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
609				&init_on_alloc)) {
610		if (c->ctor)
611			return false;
612		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
613			return flags & __GFP_ZERO;
614		return true;
615	}
616	return flags & __GFP_ZERO;
617}
618
619static inline bool slab_want_init_on_free(struct kmem_cache *c)
620{
621	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
622				&init_on_free))
623		return !(c->ctor ||
624			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
625	return false;
626}
627
628#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
629void debugfs_slab_release(struct kmem_cache *);
630#else
631static inline void debugfs_slab_release(struct kmem_cache *s) { }
632#endif
633
634#ifdef CONFIG_PRINTK
635#define KS_ADDRS_COUNT 16
636struct kmem_obj_info {
637	void *kp_ptr;
638	struct page *kp_page;
639	void *kp_objp;
640	unsigned long kp_data_offset;
641	struct kmem_cache *kp_slab_cache;
642	void *kp_ret;
643	void *kp_stack[KS_ADDRS_COUNT];
644	void *kp_free_stack[KS_ADDRS_COUNT];
645};
646void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page);
647#endif
648
649#endif /* MM_SLAB_H */