Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * NUMA memory policies for Linux.
  4 * Copyright 2003,2004 Andi Kleen SuSE Labs
  5 */
  6#ifndef _LINUX_MEMPOLICY_H
  7#define _LINUX_MEMPOLICY_H 1
  8
  9#include <linux/sched.h>
 10#include <linux/mmzone.h>
 11#include <linux/dax.h>
 12#include <linux/slab.h>
 13#include <linux/rbtree.h>
 14#include <linux/spinlock.h>
 15#include <linux/nodemask.h>
 16#include <linux/pagemap.h>
 17#include <uapi/linux/mempolicy.h>
 18
 19struct mm_struct;
 20
 21#ifdef CONFIG_NUMA
 22
 23/*
 24 * Describe a memory policy.
 25 *
 26 * A mempolicy can be either associated with a process or with a VMA.
 27 * For VMA related allocations the VMA policy is preferred, otherwise
 28 * the process policy is used. Interrupts ignore the memory policy
 29 * of the current process.
 30 *
 31 * Locking policy for interleave:
 32 * In process context there is no locking because only the process accesses
 33 * its own state. All vma manipulation is somewhat protected by a down_read on
 34 * mmap_lock.
 35 *
 36 * Freeing policy:
 37 * Mempolicy objects are reference counted.  A mempolicy will be freed when
 38 * mpol_put() decrements the reference count to zero.
 39 *
 40 * Duplicating policy objects:
 41 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
 42 * to the new storage.  The reference count of the new object is initialized
 43 * to 1, representing the caller of mpol_dup().
 44 */
 45struct mempolicy {
 46	atomic_t refcnt;
 47	unsigned short mode; 	/* See MPOL_* above */
 48	unsigned short flags;	/* See set_mempolicy() MPOL_F_* above */
 49	nodemask_t nodes;	/* interleave/bind/perfer */
 50
 
 
 
 51	union {
 52		nodemask_t cpuset_mems_allowed;	/* relative to these nodes */
 53		nodemask_t user_nodemask;	/* nodemask passed by user */
 54	} w;
 55};
 56
 57/*
 58 * Support for managing mempolicy data objects (clone, copy, destroy)
 59 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
 60 */
 61
 62extern void __mpol_put(struct mempolicy *pol);
 63static inline void mpol_put(struct mempolicy *pol)
 64{
 65	if (pol)
 66		__mpol_put(pol);
 67}
 68
 69/*
 70 * Does mempolicy pol need explicit unref after use?
 71 * Currently only needed for shared policies.
 72 */
 73static inline int mpol_needs_cond_ref(struct mempolicy *pol)
 74{
 75	return (pol && (pol->flags & MPOL_F_SHARED));
 76}
 77
 78static inline void mpol_cond_put(struct mempolicy *pol)
 79{
 80	if (mpol_needs_cond_ref(pol))
 81		__mpol_put(pol);
 82}
 83
 84extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
 85static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 86{
 87	if (pol)
 88		pol = __mpol_dup(pol);
 89	return pol;
 90}
 91
 92#define vma_policy(vma) ((vma)->vm_policy)
 93
 94static inline void mpol_get(struct mempolicy *pol)
 95{
 96	if (pol)
 97		atomic_inc(&pol->refcnt);
 98}
 99
100extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
101static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
102{
103	if (a == b)
104		return true;
105	return __mpol_equal(a, b);
106}
107
108/*
109 * Tree of shared policies for a shared memory region.
110 * Maintain the policies in a pseudo mm that contains vmas. The vmas
111 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
112 * bytes, so that we can work with shared memory segments bigger than
113 * unsigned long.
114 */
115
116struct sp_node {
117	struct rb_node nd;
118	unsigned long start, end;
119	struct mempolicy *policy;
120};
121
122struct shared_policy {
123	struct rb_root root;
124	rwlock_t lock;
125};
126
127int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
128void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
129int mpol_set_shared_policy(struct shared_policy *info,
130				struct vm_area_struct *vma,
131				struct mempolicy *new);
132void mpol_free_shared_policy(struct shared_policy *p);
133struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
134					    unsigned long idx);
135
136struct mempolicy *get_task_policy(struct task_struct *p);
137struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
138		unsigned long addr);
139bool vma_policy_mof(struct vm_area_struct *vma);
140
141extern void numa_default_policy(void);
142extern void numa_policy_init(void);
143extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
144extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
145
146extern int huge_node(struct vm_area_struct *vma,
147				unsigned long addr, gfp_t gfp_flags,
148				struct mempolicy **mpol, nodemask_t **nodemask);
149extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
150extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
151				const nodemask_t *mask);
152extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
153
154static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
155{
156	struct mempolicy *mpol = get_task_policy(current);
157
158	return policy_nodemask(gfp, mpol);
159}
160
161extern unsigned int mempolicy_slab_node(void);
162
163extern enum zone_type policy_zone;
164
165static inline void check_highest_zone(enum zone_type k)
166{
167	if (k > policy_zone && k != ZONE_MOVABLE)
168		policy_zone = k;
169}
170
171int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
172		     const nodemask_t *to, int flags);
173
174
175#ifdef CONFIG_TMPFS
176extern int mpol_parse_str(char *str, struct mempolicy **mpol);
177#endif
178
179extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
180
181/* Check if a vma is migratable */
182extern bool vma_migratable(struct vm_area_struct *vma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
184extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
185extern void mpol_put_task_policy(struct task_struct *);
186
187#else
188
189struct mempolicy {};
190
191static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
192{
193	return true;
194}
195
196static inline void mpol_put(struct mempolicy *p)
197{
198}
199
200static inline void mpol_cond_put(struct mempolicy *pol)
201{
202}
203
204static inline void mpol_get(struct mempolicy *pol)
205{
206}
207
208struct shared_policy {};
209
210static inline void mpol_shared_policy_init(struct shared_policy *sp,
211						struct mempolicy *mpol)
212{
213}
214
215static inline void mpol_free_shared_policy(struct shared_policy *p)
216{
217}
218
219static inline struct mempolicy *
220mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
221{
222	return NULL;
223}
224
225#define vma_policy(vma) NULL
226
227static inline int
228vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
229{
230	return 0;
231}
232
233static inline void numa_policy_init(void)
234{
235}
236
237static inline void numa_default_policy(void)
238{
239}
240
241static inline void mpol_rebind_task(struct task_struct *tsk,
242				const nodemask_t *new)
243{
244}
245
246static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
247{
248}
249
250static inline int huge_node(struct vm_area_struct *vma,
251				unsigned long addr, gfp_t gfp_flags,
252				struct mempolicy **mpol, nodemask_t **nodemask)
253{
254	*mpol = NULL;
255	*nodemask = NULL;
256	return 0;
257}
258
259static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
260{
261	return false;
262}
263
264static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
265				   const nodemask_t *to, int flags)
266{
267	return 0;
268}
269
270static inline void check_highest_zone(int k)
271{
272}
273
274#ifdef CONFIG_TMPFS
275static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
276{
277	return 1;	/* error */
278}
279#endif
280
281static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
282				 unsigned long address)
283{
284	return -1; /* no node preference */
285}
286
287static inline void mpol_put_task_policy(struct task_struct *task)
288{
289}
290
291static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
292{
293	return NULL;
294}
295#endif /* CONFIG_NUMA */
296#endif
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * NUMA memory policies for Linux.
  4 * Copyright 2003,2004 Andi Kleen SuSE Labs
  5 */
  6#ifndef _LINUX_MEMPOLICY_H
  7#define _LINUX_MEMPOLICY_H 1
  8
  9
 10#include <linux/mmzone.h>
 11#include <linux/dax.h>
 12#include <linux/slab.h>
 13#include <linux/rbtree.h>
 14#include <linux/spinlock.h>
 15#include <linux/nodemask.h>
 16#include <linux/pagemap.h>
 17#include <uapi/linux/mempolicy.h>
 18
 19struct mm_struct;
 20
 21#ifdef CONFIG_NUMA
 22
 23/*
 24 * Describe a memory policy.
 25 *
 26 * A mempolicy can be either associated with a process or with a VMA.
 27 * For VMA related allocations the VMA policy is preferred, otherwise
 28 * the process policy is used. Interrupts ignore the memory policy
 29 * of the current process.
 30 *
 31 * Locking policy for interlave:
 32 * In process context there is no locking because only the process accesses
 33 * its own state. All vma manipulation is somewhat protected by a down_read on
 34 * mmap_sem.
 35 *
 36 * Freeing policy:
 37 * Mempolicy objects are reference counted.  A mempolicy will be freed when
 38 * mpol_put() decrements the reference count to zero.
 39 *
 40 * Duplicating policy objects:
 41 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
 42 * to the new storage.  The reference count of the new object is initialized
 43 * to 1, representing the caller of mpol_dup().
 44 */
 45struct mempolicy {
 46	atomic_t refcnt;
 47	unsigned short mode; 	/* See MPOL_* above */
 48	unsigned short flags;	/* See set_mempolicy() MPOL_F_* above */
 49	union {
 50		short 		 preferred_node; /* preferred */
 51		nodemask_t	 nodes;		/* interleave/bind */
 52		/* undefined for default */
 53	} v;
 54	union {
 55		nodemask_t cpuset_mems_allowed;	/* relative to these nodes */
 56		nodemask_t user_nodemask;	/* nodemask passed by user */
 57	} w;
 58};
 59
 60/*
 61 * Support for managing mempolicy data objects (clone, copy, destroy)
 62 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
 63 */
 64
 65extern void __mpol_put(struct mempolicy *pol);
 66static inline void mpol_put(struct mempolicy *pol)
 67{
 68	if (pol)
 69		__mpol_put(pol);
 70}
 71
 72/*
 73 * Does mempolicy pol need explicit unref after use?
 74 * Currently only needed for shared policies.
 75 */
 76static inline int mpol_needs_cond_ref(struct mempolicy *pol)
 77{
 78	return (pol && (pol->flags & MPOL_F_SHARED));
 79}
 80
 81static inline void mpol_cond_put(struct mempolicy *pol)
 82{
 83	if (mpol_needs_cond_ref(pol))
 84		__mpol_put(pol);
 85}
 86
 87extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
 88static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 89{
 90	if (pol)
 91		pol = __mpol_dup(pol);
 92	return pol;
 93}
 94
 95#define vma_policy(vma) ((vma)->vm_policy)
 96
 97static inline void mpol_get(struct mempolicy *pol)
 98{
 99	if (pol)
100		atomic_inc(&pol->refcnt);
101}
102
103extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
104static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
105{
106	if (a == b)
107		return true;
108	return __mpol_equal(a, b);
109}
110
111/*
112 * Tree of shared policies for a shared memory region.
113 * Maintain the policies in a pseudo mm that contains vmas. The vmas
114 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
115 * bytes, so that we can work with shared memory segments bigger than
116 * unsigned long.
117 */
118
119struct sp_node {
120	struct rb_node nd;
121	unsigned long start, end;
122	struct mempolicy *policy;
123};
124
125struct shared_policy {
126	struct rb_root root;
127	rwlock_t lock;
128};
129
130int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
131void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
132int mpol_set_shared_policy(struct shared_policy *info,
133				struct vm_area_struct *vma,
134				struct mempolicy *new);
135void mpol_free_shared_policy(struct shared_policy *p);
136struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
137					    unsigned long idx);
138
139struct mempolicy *get_task_policy(struct task_struct *p);
140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
141		unsigned long addr);
142bool vma_policy_mof(struct vm_area_struct *vma);
143
144extern void numa_default_policy(void);
145extern void numa_policy_init(void);
146extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
147extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
148
149extern int huge_node(struct vm_area_struct *vma,
150				unsigned long addr, gfp_t gfp_flags,
151				struct mempolicy **mpol, nodemask_t **nodemask);
152extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
153extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
154				const nodemask_t *mask);
 
 
 
 
 
 
 
 
 
155extern unsigned int mempolicy_slab_node(void);
156
157extern enum zone_type policy_zone;
158
159static inline void check_highest_zone(enum zone_type k)
160{
161	if (k > policy_zone && k != ZONE_MOVABLE)
162		policy_zone = k;
163}
164
165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
166		     const nodemask_t *to, int flags);
167
168
169#ifdef CONFIG_TMPFS
170extern int mpol_parse_str(char *str, struct mempolicy **mpol);
171#endif
172
173extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
174
175/* Check if a vma is migratable */
176static inline bool vma_migratable(struct vm_area_struct *vma)
177{
178	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
179		return false;
180
181	/*
182	 * DAX device mappings require predictable access latency, so avoid
183	 * incurring periodic faults.
184	 */
185	if (vma_is_dax(vma))
186		return false;
187
188#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
189	if (vma->vm_flags & VM_HUGETLB)
190		return false;
191#endif
192
193	/*
194	 * Migration allocates pages in the highest zone. If we cannot
195	 * do so then migration (at least from node to node) is not
196	 * possible.
197	 */
198	if (vma->vm_file &&
199		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
200								< policy_zone)
201			return false;
202	return true;
203}
204
205extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
206extern void mpol_put_task_policy(struct task_struct *);
207
208#else
209
210struct mempolicy {};
211
212static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
213{
214	return true;
215}
216
217static inline void mpol_put(struct mempolicy *p)
218{
219}
220
221static inline void mpol_cond_put(struct mempolicy *pol)
222{
223}
224
225static inline void mpol_get(struct mempolicy *pol)
226{
227}
228
229struct shared_policy {};
230
231static inline void mpol_shared_policy_init(struct shared_policy *sp,
232						struct mempolicy *mpol)
233{
234}
235
236static inline void mpol_free_shared_policy(struct shared_policy *p)
237{
238}
239
240static inline struct mempolicy *
241mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
242{
243	return NULL;
244}
245
246#define vma_policy(vma) NULL
247
248static inline int
249vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
250{
251	return 0;
252}
253
254static inline void numa_policy_init(void)
255{
256}
257
258static inline void numa_default_policy(void)
259{
260}
261
262static inline void mpol_rebind_task(struct task_struct *tsk,
263				const nodemask_t *new)
264{
265}
266
267static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
268{
269}
270
271static inline int huge_node(struct vm_area_struct *vma,
272				unsigned long addr, gfp_t gfp_flags,
273				struct mempolicy **mpol, nodemask_t **nodemask)
274{
275	*mpol = NULL;
276	*nodemask = NULL;
277	return 0;
278}
279
280static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
281{
282	return false;
283}
284
285static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
286				   const nodemask_t *to, int flags)
287{
288	return 0;
289}
290
291static inline void check_highest_zone(int k)
292{
293}
294
295#ifdef CONFIG_TMPFS
296static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
297{
298	return 1;	/* error */
299}
300#endif
301
302static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
303				 unsigned long address)
304{
305	return -1; /* no node preference */
306}
307
308static inline void mpol_put_task_policy(struct task_struct *task)
309{
 
 
 
 
 
310}
311#endif /* CONFIG_NUMA */
312#endif