Linux Audio

Check our new training course

Loading...
v4.6
 
  1#ifndef MM_SLAB_H
  2#define MM_SLAB_H
  3/*
  4 * Internal slab definitions
  5 */
  6
  7#ifdef CONFIG_SLOB
  8/*
  9 * Common fields provided in kmem_cache by all slab allocators
 10 * This struct is either used directly by the allocator (SLOB)
 11 * or the allocator must include definitions for all fields
 12 * provided in kmem_cache_common in their definition of kmem_cache.
 13 *
 14 * Once we can do anonymous structs (C11 standard) we could put a
 15 * anonymous struct definition in these allocators so that the
 16 * separate allocations in the kmem_cache structure of SLAB and
 17 * SLUB is no longer needed.
 18 */
 19struct kmem_cache {
 20	unsigned int object_size;/* The original size of the object */
 21	unsigned int size;	/* The aligned/padded/added on size  */
 22	unsigned int align;	/* Alignment as calculated */
 23	unsigned long flags;	/* Active flags on the slab */
 
 
 24	const char *name;	/* Slab name for sysfs */
 25	int refcount;		/* Use counter */
 26	void (*ctor)(void *);	/* Called on object slot creation */
 27	struct list_head list;	/* List of all slab caches on the system */
 28};
 29
 30#endif /* CONFIG_SLOB */
 31
 32#ifdef CONFIG_SLAB
 33#include <linux/slab_def.h>
 34#endif
 35
 36#ifdef CONFIG_SLUB
 37#include <linux/slub_def.h>
 38#endif
 39
 40#include <linux/memcontrol.h>
 41#include <linux/fault-inject.h>
 42#include <linux/kmemcheck.h>
 43#include <linux/kasan.h>
 44#include <linux/kmemleak.h>
 
 
 45
 46/*
 47 * State of the slab allocator.
 48 *
 49 * This is used to describe the states of the allocator during bootup.
 50 * Allocators use this to gradually bootstrap themselves. Most allocators
 51 * have the problem that the structures used for managing slab caches are
 52 * allocated from slab caches themselves.
 53 */
 54enum slab_state {
 55	DOWN,			/* No slab functionality yet */
 56	PARTIAL,		/* SLUB: kmem_cache_node available */
 57	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
 58	UP,			/* Slab caches usable but not all extras yet */
 59	FULL			/* Everything is working */
 60};
 61
 62extern enum slab_state slab_state;
 63
 64/* The slab cache mutex protects the management structures during changes */
 65extern struct mutex slab_mutex;
 66
 67/* The list of all slab caches on the system */
 68extern struct list_head slab_caches;
 69
 70/* The slab cache that manages slab cache information */
 71extern struct kmem_cache *kmem_cache;
 72
 73unsigned long calculate_alignment(unsigned long flags,
 74		unsigned long align, unsigned long size);
 
 
 
 75
 76#ifndef CONFIG_SLOB
 77/* Kmalloc array related functions */
 78void setup_kmalloc_cache_index_table(void);
 79void create_kmalloc_caches(unsigned long);
 80
 81/* Find the kmalloc slab corresponding for a certain size */
 82struct kmem_cache *kmalloc_slab(size_t, gfp_t);
 83#endif
 84
 85
 86/* Functions provided by the slab allocators */
 87extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
 88
 89extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
 90			unsigned long flags);
 
 91extern void create_boot_cache(struct kmem_cache *, const char *name,
 92			size_t size, unsigned long flags);
 
 93
 94int slab_unmergeable(struct kmem_cache *s);
 95struct kmem_cache *find_mergeable(size_t size, size_t align,
 96		unsigned long flags, const char *name, void (*ctor)(void *));
 97#ifndef CONFIG_SLOB
 98struct kmem_cache *
 99__kmem_cache_alias(const char *name, size_t size, size_t align,
100		   unsigned long flags, void (*ctor)(void *));
101
102unsigned long kmem_cache_flags(unsigned long object_size,
103	unsigned long flags, const char *name,
104	void (*ctor)(void *));
105#else
106static inline struct kmem_cache *
107__kmem_cache_alias(const char *name, size_t size, size_t align,
108		   unsigned long flags, void (*ctor)(void *))
109{ return NULL; }
110
111static inline unsigned long kmem_cache_flags(unsigned long object_size,
112	unsigned long flags, const char *name,
113	void (*ctor)(void *))
114{
115	return flags;
116}
117#endif
118
119
120/* Legal flag mask for kmem_cache_create(), for various configurations */
121#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
122			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
123
124#if defined(CONFIG_DEBUG_SLAB)
125#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
126#elif defined(CONFIG_SLUB_DEBUG)
127#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
128			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
129#else
130#define SLAB_DEBUG_FLAGS (0)
131#endif
132
133#if defined(CONFIG_SLAB)
134#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
135			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
136			  SLAB_NOTRACK | SLAB_ACCOUNT)
137#elif defined(CONFIG_SLUB)
138#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
139			  SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
140#else
141#define SLAB_CACHE_FLAGS (0)
142#endif
143
 
144#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146int __kmem_cache_shutdown(struct kmem_cache *);
147void __kmem_cache_release(struct kmem_cache *);
148int __kmem_cache_shrink(struct kmem_cache *, bool);
 
149void slab_kmem_cache_release(struct kmem_cache *);
150
151struct seq_file;
152struct file;
153
154struct slabinfo {
155	unsigned long active_objs;
156	unsigned long num_objs;
157	unsigned long active_slabs;
158	unsigned long num_slabs;
159	unsigned long shared_avail;
160	unsigned int limit;
161	unsigned int batchcount;
162	unsigned int shared;
163	unsigned int objects_per_slab;
164	unsigned int cache_order;
165};
166
167void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
168void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
169ssize_t slabinfo_write(struct file *file, const char __user *buffer,
170		       size_t count, loff_t *ppos);
171
172/*
173 * Generic implementation of bulk operations
174 * These are useful for situations in which the allocator cannot
175 * perform optimizations. In that case segments of the object listed
176 * may be allocated or freed using these operations.
177 */
178void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
179int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
180
181#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 
 
 
 
 
182/*
183 * Iterate over all memcg caches of the given root cache. The caller must hold
184 * slab_mutex.
185 */
186#define for_each_memcg_cache(iter, root) \
187	list_for_each_entry(iter, &(root)->memcg_params.list, \
188			    memcg_params.list)
189
190static inline bool is_root_cache(struct kmem_cache *s)
191{
192	return s->memcg_params.is_root_cache;
193}
194
195static inline bool slab_equal_or_root(struct kmem_cache *s,
196				      struct kmem_cache *p)
197{
198	return p == s || p == s->memcg_params.root_cache;
199}
200
201/*
202 * We use suffixes to the name in memcg because we can't have caches
203 * created in the system with the same name. But when we print them
204 * locally, better refer to them with the base name
205 */
206static inline const char *cache_name(struct kmem_cache *s)
207{
208	if (!is_root_cache(s))
209		s = s->memcg_params.root_cache;
210	return s->name;
211}
212
213/*
214 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
215 * That said the caller must assure the memcg's cache won't go away by either
216 * taking a css reference to the owner cgroup, or holding the slab_mutex.
217 */
218static inline struct kmem_cache *
219cache_from_memcg_idx(struct kmem_cache *s, int idx)
220{
221	struct kmem_cache *cachep;
222	struct memcg_cache_array *arr;
223
224	rcu_read_lock();
225	arr = rcu_dereference(s->memcg_params.memcg_caches);
226
227	/*
228	 * Make sure we will access the up-to-date value. The code updating
229	 * memcg_caches issues a write barrier to match this (see
230	 * memcg_create_kmem_cache()).
231	 */
232	cachep = lockless_dereference(arr->entries[idx]);
233	rcu_read_unlock();
234
235	return cachep;
236}
237
238static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
239{
240	if (is_root_cache(s))
241		return s;
242	return s->memcg_params.root_cache;
243}
244
245static __always_inline int memcg_charge_slab(struct page *page,
246					     gfp_t gfp, int order,
247					     struct kmem_cache *s)
248{
249	int ret;
250
251	if (!memcg_kmem_enabled())
252		return 0;
253	if (is_root_cache(s))
254		return 0;
255
256	ret = __memcg_kmem_charge_memcg(page, gfp, order,
257					s->memcg_params.memcg);
258	if (ret)
259		return ret;
260
261	memcg_kmem_update_page_stat(page,
262			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
263			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
264			1 << order);
265	return 0;
266}
267
268static __always_inline void memcg_uncharge_slab(struct page *page, int order,
269						struct kmem_cache *s)
270{
271	memcg_kmem_update_page_stat(page,
272			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
273			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
274			-(1 << order));
275	memcg_kmem_uncharge(page, order);
276}
277
278extern void slab_init_memcg_params(struct kmem_cache *);
 
 
 
279
280#else /* CONFIG_MEMCG && !CONFIG_SLOB */
281
 
 
 
 
282#define for_each_memcg_cache(iter, root) \
283	for ((void)(iter), (void)(root); 0; )
284
285static inline bool is_root_cache(struct kmem_cache *s)
286{
287	return true;
288}
289
290static inline bool slab_equal_or_root(struct kmem_cache *s,
291				      struct kmem_cache *p)
292{
293	return true;
294}
295
296static inline const char *cache_name(struct kmem_cache *s)
297{
298	return s->name;
299}
300
301static inline struct kmem_cache *
302cache_from_memcg_idx(struct kmem_cache *s, int idx)
303{
304	return NULL;
305}
306
307static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
308{
309	return s;
310}
311
312static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
313				    struct kmem_cache *s)
314{
315	return 0;
316}
317
318static inline void memcg_uncharge_slab(struct page *page, int order,
319				       struct kmem_cache *s)
320{
321}
322
323static inline void slab_init_memcg_params(struct kmem_cache *s)
324{
325}
 
 
 
 
 
326#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
327
328static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
329{
330	struct kmem_cache *cachep;
331	struct page *page;
332
333	/*
334	 * When kmemcg is not being used, both assignments should return the
335	 * same value. but we don't want to pay the assignment price in that
336	 * case. If it is not compiled in, the compiler should be smart enough
337	 * to not do even the assignment. In that case, slab_equal_or_root
338	 * will also be a constant.
339	 */
340	if (!memcg_kmem_enabled() &&
341	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
342		return s;
343
344	page = virt_to_head_page(x);
345	cachep = page->slab_cache;
346	if (slab_equal_or_root(cachep, s))
347		return cachep;
348
349	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
350	       __func__, s->name, cachep->name);
351	WARN_ON_ONCE(1);
352	return s;
353}
354
355static inline size_t slab_ksize(const struct kmem_cache *s)
356{
357#ifndef CONFIG_SLUB
358	return s->object_size;
359
360#else /* CONFIG_SLUB */
361# ifdef CONFIG_SLUB_DEBUG
362	/*
363	 * Debugging requires use of the padding between object
364	 * and whatever may come after it.
365	 */
366	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
367		return s->object_size;
368# endif
 
 
369	/*
370	 * If we have the need to store the freelist pointer
371	 * back there or track user information then we can
372	 * only use the space before that information.
373	 */
374	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
375		return s->inuse;
376	/*
377	 * Else we can use all the padding etc for the allocation
378	 */
379	return s->size;
380#endif
381}
382
383static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
384						     gfp_t flags)
385{
386	flags &= gfp_allowed_mask;
387	lockdep_trace_alloc(flags);
 
 
 
388	might_sleep_if(gfpflags_allow_blocking(flags));
389
390	if (should_failslab(s, flags))
391		return NULL;
392
393	return memcg_kmem_get_cache(s, flags);
 
 
 
 
394}
395
396static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
397					size_t size, void **p)
398{
399	size_t i;
400
401	flags &= gfp_allowed_mask;
402	for (i = 0; i < size; i++) {
403		void *object = p[i];
404
405		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
406		kmemleak_alloc_recursive(object, s->object_size, 1,
407					 s->flags, flags);
408		kasan_slab_alloc(s, object, flags);
409	}
410	memcg_kmem_put_cache(s);
 
 
411}
412
413#ifndef CONFIG_SLOB
414/*
415 * The slab lists for all objects.
416 */
417struct kmem_cache_node {
418	spinlock_t list_lock;
419
420#ifdef CONFIG_SLAB
421	struct list_head slabs_partial;	/* partial list first, better asm code */
422	struct list_head slabs_full;
423	struct list_head slabs_free;
 
 
424	unsigned long free_objects;
425	unsigned int free_limit;
426	unsigned int colour_next;	/* Per-node cache coloring */
427	struct array_cache *shared;	/* shared per node */
428	struct alien_cache **alien;	/* on other nodes */
429	unsigned long next_reap;	/* updated without locking */
430	int free_touched;		/* updated without locking */
431#endif
432
433#ifdef CONFIG_SLUB
434	unsigned long nr_partial;
435	struct list_head partial;
436#ifdef CONFIG_SLUB_DEBUG
437	atomic_long_t nr_slabs;
438	atomic_long_t total_objects;
439	struct list_head full;
440#endif
441#endif
442
443};
444
445static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
446{
447	return s->node[node];
448}
449
450/*
451 * Iterator over all nodes. The body will be executed for each node that has
452 * a kmem_cache_node structure allocated (which is true for all online nodes)
453 */
454#define for_each_kmem_cache_node(__s, __node, __n) \
455	for (__node = 0; __node < nr_node_ids; __node++) \
456		 if ((__n = get_node(__s, __node)))
457
458#endif
459
460void *slab_start(struct seq_file *m, loff_t *pos);
461void *slab_next(struct seq_file *m, void *p, loff_t *pos);
462void slab_stop(struct seq_file *m, void *p);
 
 
 
463int memcg_slab_show(struct seq_file *m, void *p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464
465#endif /* MM_SLAB_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef MM_SLAB_H
  3#define MM_SLAB_H
  4/*
  5 * Internal slab definitions
  6 */
  7
  8#ifdef CONFIG_SLOB
  9/*
 10 * Common fields provided in kmem_cache by all slab allocators
 11 * This struct is either used directly by the allocator (SLOB)
 12 * or the allocator must include definitions for all fields
 13 * provided in kmem_cache_common in their definition of kmem_cache.
 14 *
 15 * Once we can do anonymous structs (C11 standard) we could put a
 16 * anonymous struct definition in these allocators so that the
 17 * separate allocations in the kmem_cache structure of SLAB and
 18 * SLUB is no longer needed.
 19 */
 20struct kmem_cache {
 21	unsigned int object_size;/* The original size of the object */
 22	unsigned int size;	/* The aligned/padded/added on size  */
 23	unsigned int align;	/* Alignment as calculated */
 24	slab_flags_t flags;	/* Active flags on the slab */
 25	unsigned int useroffset;/* Usercopy region offset */
 26	unsigned int usersize;	/* Usercopy region size */
 27	const char *name;	/* Slab name for sysfs */
 28	int refcount;		/* Use counter */
 29	void (*ctor)(void *);	/* Called on object slot creation */
 30	struct list_head list;	/* List of all slab caches on the system */
 31};
 32
 33#endif /* CONFIG_SLOB */
 34
 35#ifdef CONFIG_SLAB
 36#include <linux/slab_def.h>
 37#endif
 38
 39#ifdef CONFIG_SLUB
 40#include <linux/slub_def.h>
 41#endif
 42
 43#include <linux/memcontrol.h>
 44#include <linux/fault-inject.h>
 
 45#include <linux/kasan.h>
 46#include <linux/kmemleak.h>
 47#include <linux/random.h>
 48#include <linux/sched/mm.h>
 49
 50/*
 51 * State of the slab allocator.
 52 *
 53 * This is used to describe the states of the allocator during bootup.
 54 * Allocators use this to gradually bootstrap themselves. Most allocators
 55 * have the problem that the structures used for managing slab caches are
 56 * allocated from slab caches themselves.
 57 */
 58enum slab_state {
 59	DOWN,			/* No slab functionality yet */
 60	PARTIAL,		/* SLUB: kmem_cache_node available */
 61	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
 62	UP,			/* Slab caches usable but not all extras yet */
 63	FULL			/* Everything is working */
 64};
 65
 66extern enum slab_state slab_state;
 67
 68/* The slab cache mutex protects the management structures during changes */
 69extern struct mutex slab_mutex;
 70
 71/* The list of all slab caches on the system */
 72extern struct list_head slab_caches;
 73
 74/* The slab cache that manages slab cache information */
 75extern struct kmem_cache *kmem_cache;
 76
 77/* A table of kmalloc cache names and sizes */
 78extern const struct kmalloc_info_struct {
 79	const char *name;
 80	unsigned int size;
 81} kmalloc_info[];
 82
 83#ifndef CONFIG_SLOB
 84/* Kmalloc array related functions */
 85void setup_kmalloc_cache_index_table(void);
 86void create_kmalloc_caches(slab_flags_t);
 87
 88/* Find the kmalloc slab corresponding for a certain size */
 89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
 90#endif
 91
 92
 93/* Functions provided by the slab allocators */
 94int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
 95
 96struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
 97			slab_flags_t flags, unsigned int useroffset,
 98			unsigned int usersize);
 99extern void create_boot_cache(struct kmem_cache *, const char *name,
100			unsigned int size, slab_flags_t flags,
101			unsigned int useroffset, unsigned int usersize);
102
103int slab_unmergeable(struct kmem_cache *s);
104struct kmem_cache *find_mergeable(unsigned size, unsigned align,
105		slab_flags_t flags, const char *name, void (*ctor)(void *));
106#ifndef CONFIG_SLOB
107struct kmem_cache *
108__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
109		   slab_flags_t flags, void (*ctor)(void *));
110
111slab_flags_t kmem_cache_flags(unsigned int object_size,
112	slab_flags_t flags, const char *name,
113	void (*ctor)(void *));
114#else
115static inline struct kmem_cache *
116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
117		   slab_flags_t flags, void (*ctor)(void *))
118{ return NULL; }
119
120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
121	slab_flags_t flags, const char *name,
122	void (*ctor)(void *))
123{
124	return flags;
125}
126#endif
127
128
129/* Legal flag mask for kmem_cache_create(), for various configurations */
130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
131			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132
133#if defined(CONFIG_DEBUG_SLAB)
134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135#elif defined(CONFIG_SLUB_DEBUG)
136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
137			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
138#else
139#define SLAB_DEBUG_FLAGS (0)
140#endif
141
142#if defined(CONFIG_SLAB)
143#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
144			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
145			  SLAB_ACCOUNT)
146#elif defined(CONFIG_SLUB)
147#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
148			  SLAB_TEMPORARY | SLAB_ACCOUNT)
149#else
150#define SLAB_CACHE_FLAGS (0)
151#endif
152
153/* Common flags available with current configuration */
154#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
155
156/* Common flags permitted for kmem_cache_create */
157#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
158			      SLAB_RED_ZONE | \
159			      SLAB_POISON | \
160			      SLAB_STORE_USER | \
161			      SLAB_TRACE | \
162			      SLAB_CONSISTENCY_CHECKS | \
163			      SLAB_MEM_SPREAD | \
164			      SLAB_NOLEAKTRACE | \
165			      SLAB_RECLAIM_ACCOUNT | \
166			      SLAB_TEMPORARY | \
167			      SLAB_ACCOUNT)
168
169bool __kmem_cache_empty(struct kmem_cache *);
170int __kmem_cache_shutdown(struct kmem_cache *);
171void __kmem_cache_release(struct kmem_cache *);
172int __kmem_cache_shrink(struct kmem_cache *);
173void __kmemcg_cache_deactivate(struct kmem_cache *s);
174void slab_kmem_cache_release(struct kmem_cache *);
175
176struct seq_file;
177struct file;
178
179struct slabinfo {
180	unsigned long active_objs;
181	unsigned long num_objs;
182	unsigned long active_slabs;
183	unsigned long num_slabs;
184	unsigned long shared_avail;
185	unsigned int limit;
186	unsigned int batchcount;
187	unsigned int shared;
188	unsigned int objects_per_slab;
189	unsigned int cache_order;
190};
191
192void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
193void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
194ssize_t slabinfo_write(struct file *file, const char __user *buffer,
195		       size_t count, loff_t *ppos);
196
197/*
198 * Generic implementation of bulk operations
199 * These are useful for situations in which the allocator cannot
200 * perform optimizations. In that case segments of the object listed
201 * may be allocated or freed using these operations.
202 */
203void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
204int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
205
206#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
207
208/* List of all root caches. */
209extern struct list_head		slab_root_caches;
210#define root_caches_node	memcg_params.__root_caches_node
211
212/*
213 * Iterate over all memcg caches of the given root cache. The caller must hold
214 * slab_mutex.
215 */
216#define for_each_memcg_cache(iter, root) \
217	list_for_each_entry(iter, &(root)->memcg_params.children, \
218			    memcg_params.children_node)
219
220static inline bool is_root_cache(struct kmem_cache *s)
221{
222	return !s->memcg_params.root_cache;
223}
224
225static inline bool slab_equal_or_root(struct kmem_cache *s,
226				      struct kmem_cache *p)
227{
228	return p == s || p == s->memcg_params.root_cache;
229}
230
231/*
232 * We use suffixes to the name in memcg because we can't have caches
233 * created in the system with the same name. But when we print them
234 * locally, better refer to them with the base name
235 */
236static inline const char *cache_name(struct kmem_cache *s)
237{
238	if (!is_root_cache(s))
239		s = s->memcg_params.root_cache;
240	return s->name;
241}
242
243/*
244 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
245 * That said the caller must assure the memcg's cache won't go away by either
246 * taking a css reference to the owner cgroup, or holding the slab_mutex.
247 */
248static inline struct kmem_cache *
249cache_from_memcg_idx(struct kmem_cache *s, int idx)
250{
251	struct kmem_cache *cachep;
252	struct memcg_cache_array *arr;
253
254	rcu_read_lock();
255	arr = rcu_dereference(s->memcg_params.memcg_caches);
256
257	/*
258	 * Make sure we will access the up-to-date value. The code updating
259	 * memcg_caches issues a write barrier to match this (see
260	 * memcg_create_kmem_cache()).
261	 */
262	cachep = READ_ONCE(arr->entries[idx]);
263	rcu_read_unlock();
264
265	return cachep;
266}
267
268static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
269{
270	if (is_root_cache(s))
271		return s;
272	return s->memcg_params.root_cache;
273}
274
275static __always_inline int memcg_charge_slab(struct page *page,
276					     gfp_t gfp, int order,
277					     struct kmem_cache *s)
278{
 
 
279	if (!memcg_kmem_enabled())
280		return 0;
281	if (is_root_cache(s))
282		return 0;
283	return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
 
 
 
 
 
 
 
 
 
 
284}
285
286static __always_inline void memcg_uncharge_slab(struct page *page, int order,
287						struct kmem_cache *s)
288{
289	if (!memcg_kmem_enabled())
290		return;
 
 
291	memcg_kmem_uncharge(page, order);
292}
293
294extern void slab_init_memcg_params(struct kmem_cache *);
295extern void memcg_link_cache(struct kmem_cache *s);
296extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
297				void (*deact_fn)(struct kmem_cache *));
298
299#else /* CONFIG_MEMCG && !CONFIG_SLOB */
300
301/* If !memcg, all caches are root. */
302#define slab_root_caches	slab_caches
303#define root_caches_node	list
304
305#define for_each_memcg_cache(iter, root) \
306	for ((void)(iter), (void)(root); 0; )
307
308static inline bool is_root_cache(struct kmem_cache *s)
309{
310	return true;
311}
312
313static inline bool slab_equal_or_root(struct kmem_cache *s,
314				      struct kmem_cache *p)
315{
316	return true;
317}
318
319static inline const char *cache_name(struct kmem_cache *s)
320{
321	return s->name;
322}
323
324static inline struct kmem_cache *
325cache_from_memcg_idx(struct kmem_cache *s, int idx)
326{
327	return NULL;
328}
329
330static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
331{
332	return s;
333}
334
335static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
336				    struct kmem_cache *s)
337{
338	return 0;
339}
340
341static inline void memcg_uncharge_slab(struct page *page, int order,
342				       struct kmem_cache *s)
343{
344}
345
346static inline void slab_init_memcg_params(struct kmem_cache *s)
347{
348}
349
350static inline void memcg_link_cache(struct kmem_cache *s)
351{
352}
353
354#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
355
356static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
357{
358	struct kmem_cache *cachep;
359	struct page *page;
360
361	/*
362	 * When kmemcg is not being used, both assignments should return the
363	 * same value. but we don't want to pay the assignment price in that
364	 * case. If it is not compiled in, the compiler should be smart enough
365	 * to not do even the assignment. In that case, slab_equal_or_root
366	 * will also be a constant.
367	 */
368	if (!memcg_kmem_enabled() &&
369	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
370		return s;
371
372	page = virt_to_head_page(x);
373	cachep = page->slab_cache;
374	if (slab_equal_or_root(cachep, s))
375		return cachep;
376
377	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
378	       __func__, s->name, cachep->name);
379	WARN_ON_ONCE(1);
380	return s;
381}
382
383static inline size_t slab_ksize(const struct kmem_cache *s)
384{
385#ifndef CONFIG_SLUB
386	return s->object_size;
387
388#else /* CONFIG_SLUB */
389# ifdef CONFIG_SLUB_DEBUG
390	/*
391	 * Debugging requires use of the padding between object
392	 * and whatever may come after it.
393	 */
394	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
395		return s->object_size;
396# endif
397	if (s->flags & SLAB_KASAN)
398		return s->object_size;
399	/*
400	 * If we have the need to store the freelist pointer
401	 * back there or track user information then we can
402	 * only use the space before that information.
403	 */
404	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
405		return s->inuse;
406	/*
407	 * Else we can use all the padding etc for the allocation
408	 */
409	return s->size;
410#endif
411}
412
413static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
414						     gfp_t flags)
415{
416	flags &= gfp_allowed_mask;
417
418	fs_reclaim_acquire(flags);
419	fs_reclaim_release(flags);
420
421	might_sleep_if(gfpflags_allow_blocking(flags));
422
423	if (should_failslab(s, flags))
424		return NULL;
425
426	if (memcg_kmem_enabled() &&
427	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
428		return memcg_kmem_get_cache(s);
429
430	return s;
431}
432
433static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
434					size_t size, void **p)
435{
436	size_t i;
437
438	flags &= gfp_allowed_mask;
439	for (i = 0; i < size; i++) {
440		void *object = p[i];
441
 
442		kmemleak_alloc_recursive(object, s->object_size, 1,
443					 s->flags, flags);
444		kasan_slab_alloc(s, object, flags);
445	}
446
447	if (memcg_kmem_enabled())
448		memcg_kmem_put_cache(s);
449}
450
451#ifndef CONFIG_SLOB
452/*
453 * The slab lists for all objects.
454 */
455struct kmem_cache_node {
456	spinlock_t list_lock;
457
458#ifdef CONFIG_SLAB
459	struct list_head slabs_partial;	/* partial list first, better asm code */
460	struct list_head slabs_full;
461	struct list_head slabs_free;
462	unsigned long total_slabs;	/* length of all slab lists */
463	unsigned long free_slabs;	/* length of free slab list only */
464	unsigned long free_objects;
465	unsigned int free_limit;
466	unsigned int colour_next;	/* Per-node cache coloring */
467	struct array_cache *shared;	/* shared per node */
468	struct alien_cache **alien;	/* on other nodes */
469	unsigned long next_reap;	/* updated without locking */
470	int free_touched;		/* updated without locking */
471#endif
472
473#ifdef CONFIG_SLUB
474	unsigned long nr_partial;
475	struct list_head partial;
476#ifdef CONFIG_SLUB_DEBUG
477	atomic_long_t nr_slabs;
478	atomic_long_t total_objects;
479	struct list_head full;
480#endif
481#endif
482
483};
484
485static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
486{
487	return s->node[node];
488}
489
490/*
491 * Iterator over all nodes. The body will be executed for each node that has
492 * a kmem_cache_node structure allocated (which is true for all online nodes)
493 */
494#define for_each_kmem_cache_node(__s, __node, __n) \
495	for (__node = 0; __node < nr_node_ids; __node++) \
496		 if ((__n = get_node(__s, __node)))
497
498#endif
499
500void *slab_start(struct seq_file *m, loff_t *pos);
501void *slab_next(struct seq_file *m, void *p, loff_t *pos);
502void slab_stop(struct seq_file *m, void *p);
503void *memcg_slab_start(struct seq_file *m, loff_t *pos);
504void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
505void memcg_slab_stop(struct seq_file *m, void *p);
506int memcg_slab_show(struct seq_file *m, void *p);
507
508#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
509void dump_unreclaimable_slab(void);
510#else
511static inline void dump_unreclaimable_slab(void)
512{
513}
514#endif
515
516void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
517
518#ifdef CONFIG_SLAB_FREELIST_RANDOM
519int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
520			gfp_t gfp);
521void cache_random_seq_destroy(struct kmem_cache *cachep);
522#else
523static inline int cache_random_seq_create(struct kmem_cache *cachep,
524					unsigned int count, gfp_t gfp)
525{
526	return 0;
527}
528static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
529#endif /* CONFIG_SLAB_FREELIST_RANDOM */
530
531#endif /* MM_SLAB_H */