Loading...
1/*
2 * NUMA memory policies for Linux.
3 * Copyright 2003,2004 Andi Kleen SuSE Labs
4 */
5#ifndef _LINUX_MEMPOLICY_H
6#define _LINUX_MEMPOLICY_H 1
7
8
9#include <linux/mmzone.h>
10#include <linux/dax.h>
11#include <linux/slab.h>
12#include <linux/rbtree.h>
13#include <linux/spinlock.h>
14#include <linux/nodemask.h>
15#include <linux/pagemap.h>
16#include <uapi/linux/mempolicy.h>
17
18struct mm_struct;
19
20#ifdef CONFIG_NUMA
21
22/*
23 * Describe a memory policy.
24 *
25 * A mempolicy can be either associated with a process or with a VMA.
26 * For VMA related allocations the VMA policy is preferred, otherwise
27 * the process policy is used. Interrupts ignore the memory policy
28 * of the current process.
29 *
30 * Locking policy for interlave:
31 * In process context there is no locking because only the process accesses
32 * its own state. All vma manipulation is somewhat protected by a down_read on
33 * mmap_sem.
34 *
35 * Freeing policy:
36 * Mempolicy objects are reference counted. A mempolicy will be freed when
37 * mpol_put() decrements the reference count to zero.
38 *
39 * Duplicating policy objects:
40 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
41 * to the new storage. The reference count of the new object is initialized
42 * to 1, representing the caller of mpol_dup().
43 */
44struct mempolicy {
45 atomic_t refcnt;
46 unsigned short mode; /* See MPOL_* above */
47 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
48 union {
49 short preferred_node; /* preferred */
50 nodemask_t nodes; /* interleave/bind */
51 /* undefined for default */
52 } v;
53 union {
54 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
55 nodemask_t user_nodemask; /* nodemask passed by user */
56 } w;
57};
58
59/*
60 * Support for managing mempolicy data objects (clone, copy, destroy)
61 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
62 */
63
64extern void __mpol_put(struct mempolicy *pol);
65static inline void mpol_put(struct mempolicy *pol)
66{
67 if (pol)
68 __mpol_put(pol);
69}
70
71/*
72 * Does mempolicy pol need explicit unref after use?
73 * Currently only needed for shared policies.
74 */
75static inline int mpol_needs_cond_ref(struct mempolicy *pol)
76{
77 return (pol && (pol->flags & MPOL_F_SHARED));
78}
79
80static inline void mpol_cond_put(struct mempolicy *pol)
81{
82 if (mpol_needs_cond_ref(pol))
83 __mpol_put(pol);
84}
85
86extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
87static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
88{
89 if (pol)
90 pol = __mpol_dup(pol);
91 return pol;
92}
93
94#define vma_policy(vma) ((vma)->vm_policy)
95
96static inline void mpol_get(struct mempolicy *pol)
97{
98 if (pol)
99 atomic_inc(&pol->refcnt);
100}
101
102extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
103static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
104{
105 if (a == b)
106 return true;
107 return __mpol_equal(a, b);
108}
109
110/*
111 * Tree of shared policies for a shared memory region.
112 * Maintain the policies in a pseudo mm that contains vmas. The vmas
113 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
114 * bytes, so that we can work with shared memory segments bigger than
115 * unsigned long.
116 */
117
118struct sp_node {
119 struct rb_node nd;
120 unsigned long start, end;
121 struct mempolicy *policy;
122};
123
124struct shared_policy {
125 struct rb_root root;
126 rwlock_t lock;
127};
128
129int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
130void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
131int mpol_set_shared_policy(struct shared_policy *info,
132 struct vm_area_struct *vma,
133 struct mempolicy *new);
134void mpol_free_shared_policy(struct shared_policy *p);
135struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
136 unsigned long idx);
137
138struct mempolicy *get_task_policy(struct task_struct *p);
139struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
140 unsigned long addr);
141bool vma_policy_mof(struct vm_area_struct *vma);
142
143extern void numa_default_policy(void);
144extern void numa_policy_init(void);
145extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
146 enum mpol_rebind_step step);
147extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
148
149extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
150 unsigned long addr, gfp_t gfp_flags,
151 struct mempolicy **mpol, nodemask_t **nodemask);
152extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
153extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
154 const nodemask_t *mask);
155extern unsigned int mempolicy_slab_node(void);
156
157extern enum zone_type policy_zone;
158
159static inline void check_highest_zone(enum zone_type k)
160{
161 if (k > policy_zone && k != ZONE_MOVABLE)
162 policy_zone = k;
163}
164
165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
166 const nodemask_t *to, int flags);
167
168
169#ifdef CONFIG_TMPFS
170extern int mpol_parse_str(char *str, struct mempolicy **mpol);
171#endif
172
173extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
174
175/* Check if a vma is migratable */
176static inline bool vma_migratable(struct vm_area_struct *vma)
177{
178 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
179 return false;
180
181 /*
182 * DAX device mappings require predictable access latency, so avoid
183 * incurring periodic faults.
184 */
185 if (vma_is_dax(vma))
186 return false;
187
188#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
189 if (vma->vm_flags & VM_HUGETLB)
190 return false;
191#endif
192
193 /*
194 * Migration allocates pages in the highest zone. If we cannot
195 * do so then migration (at least from node to node) is not
196 * possible.
197 */
198 if (vma->vm_file &&
199 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
200 < policy_zone)
201 return false;
202 return true;
203}
204
205extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
206extern void mpol_put_task_policy(struct task_struct *);
207
208#else
209
210struct mempolicy {};
211
212static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
213{
214 return true;
215}
216
217static inline void mpol_put(struct mempolicy *p)
218{
219}
220
221static inline void mpol_cond_put(struct mempolicy *pol)
222{
223}
224
225static inline void mpol_get(struct mempolicy *pol)
226{
227}
228
229struct shared_policy {};
230
231static inline void mpol_shared_policy_init(struct shared_policy *sp,
232 struct mempolicy *mpol)
233{
234}
235
236static inline void mpol_free_shared_policy(struct shared_policy *p)
237{
238}
239
240static inline struct mempolicy *
241mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
242{
243 return NULL;
244}
245
246#define vma_policy(vma) NULL
247
248static inline int
249vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
250{
251 return 0;
252}
253
254static inline void numa_policy_init(void)
255{
256}
257
258static inline void numa_default_policy(void)
259{
260}
261
262static inline void mpol_rebind_task(struct task_struct *tsk,
263 const nodemask_t *new,
264 enum mpol_rebind_step step)
265{
266}
267
268static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
269{
270}
271
272static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
273 unsigned long addr, gfp_t gfp_flags,
274 struct mempolicy **mpol, nodemask_t **nodemask)
275{
276 *mpol = NULL;
277 *nodemask = NULL;
278 return node_zonelist(0, gfp_flags);
279}
280
281static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
282{
283 return false;
284}
285
286static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
287 const nodemask_t *to, int flags)
288{
289 return 0;
290}
291
292static inline void check_highest_zone(int k)
293{
294}
295
296#ifdef CONFIG_TMPFS
297static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
298{
299 return 1; /* error */
300}
301#endif
302
303static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
304 unsigned long address)
305{
306 return -1; /* no node preference */
307}
308
309static inline void mpol_put_task_policy(struct task_struct *task)
310{
311}
312#endif /* CONFIG_NUMA */
313#endif
1/*
2 * NUMA memory policies for Linux.
3 * Copyright 2003,2004 Andi Kleen SuSE Labs
4 */
5#ifndef _LINUX_MEMPOLICY_H
6#define _LINUX_MEMPOLICY_H 1
7
8
9#include <linux/mmzone.h>
10#include <linux/slab.h>
11#include <linux/rbtree.h>
12#include <linux/spinlock.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <uapi/linux/mempolicy.h>
16
17struct mm_struct;
18
19#ifdef CONFIG_NUMA
20
21/*
22 * Describe a memory policy.
23 *
24 * A mempolicy can be either associated with a process or with a VMA.
25 * For VMA related allocations the VMA policy is preferred, otherwise
26 * the process policy is used. Interrupts ignore the memory policy
27 * of the current process.
28 *
29 * Locking policy for interlave:
30 * In process context there is no locking because only the process accesses
31 * its own state. All vma manipulation is somewhat protected by a down_read on
32 * mmap_sem.
33 *
34 * Freeing policy:
35 * Mempolicy objects are reference counted. A mempolicy will be freed when
36 * mpol_put() decrements the reference count to zero.
37 *
38 * Duplicating policy objects:
39 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
40 * to the new storage. The reference count of the new object is initialized
41 * to 1, representing the caller of mpol_dup().
42 */
43struct mempolicy {
44 atomic_t refcnt;
45 unsigned short mode; /* See MPOL_* above */
46 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
47 union {
48 short preferred_node; /* preferred */
49 nodemask_t nodes; /* interleave/bind */
50 /* undefined for default */
51 } v;
52 union {
53 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
54 nodemask_t user_nodemask; /* nodemask passed by user */
55 } w;
56};
57
58/*
59 * Support for managing mempolicy data objects (clone, copy, destroy)
60 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
61 */
62
63extern void __mpol_put(struct mempolicy *pol);
64static inline void mpol_put(struct mempolicy *pol)
65{
66 if (pol)
67 __mpol_put(pol);
68}
69
70/*
71 * Does mempolicy pol need explicit unref after use?
72 * Currently only needed for shared policies.
73 */
74static inline int mpol_needs_cond_ref(struct mempolicy *pol)
75{
76 return (pol && (pol->flags & MPOL_F_SHARED));
77}
78
79static inline void mpol_cond_put(struct mempolicy *pol)
80{
81 if (mpol_needs_cond_ref(pol))
82 __mpol_put(pol);
83}
84
85extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
86static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
87{
88 if (pol)
89 pol = __mpol_dup(pol);
90 return pol;
91}
92
93#define vma_policy(vma) ((vma)->vm_policy)
94
95static inline void mpol_get(struct mempolicy *pol)
96{
97 if (pol)
98 atomic_inc(&pol->refcnt);
99}
100
101extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
102static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
103{
104 if (a == b)
105 return true;
106 return __mpol_equal(a, b);
107}
108
109/*
110 * Tree of shared policies for a shared memory region.
111 * Maintain the policies in a pseudo mm that contains vmas. The vmas
112 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
113 * bytes, so that we can work with shared memory segments bigger than
114 * unsigned long.
115 */
116
117struct sp_node {
118 struct rb_node nd;
119 unsigned long start, end;
120 struct mempolicy *policy;
121};
122
123struct shared_policy {
124 struct rb_root root;
125 spinlock_t lock;
126};
127
128int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
129void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
130int mpol_set_shared_policy(struct shared_policy *info,
131 struct vm_area_struct *vma,
132 struct mempolicy *new);
133void mpol_free_shared_policy(struct shared_policy *p);
134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
135 unsigned long idx);
136
137struct mempolicy *get_vma_policy(struct task_struct *tsk,
138 struct vm_area_struct *vma, unsigned long addr);
139bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma);
140
141extern void numa_default_policy(void);
142extern void numa_policy_init(void);
143extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
144 enum mpol_rebind_step step);
145extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
146
147extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
148 unsigned long addr, gfp_t gfp_flags,
149 struct mempolicy **mpol, nodemask_t **nodemask);
150extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
151extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
152 const nodemask_t *mask);
153extern unsigned int mempolicy_slab_node(void);
154
155extern enum zone_type policy_zone;
156
157static inline void check_highest_zone(enum zone_type k)
158{
159 if (k > policy_zone && k != ZONE_MOVABLE)
160 policy_zone = k;
161}
162
163int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
164 const nodemask_t *to, int flags);
165
166
167#ifdef CONFIG_TMPFS
168extern int mpol_parse_str(char *str, struct mempolicy **mpol);
169#endif
170
171extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
172
173/* Check if a vma is migratable */
174static inline int vma_migratable(struct vm_area_struct *vma)
175{
176 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
177 return 0;
178 /*
179 * Migration allocates pages in the highest zone. If we cannot
180 * do so then migration (at least from node to node) is not
181 * possible.
182 */
183 if (vma->vm_file &&
184 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
185 < policy_zone)
186 return 0;
187 return 1;
188}
189
190extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
191
192#else
193
194struct mempolicy {};
195
196static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
197{
198 return true;
199}
200
201static inline void mpol_put(struct mempolicy *p)
202{
203}
204
205static inline void mpol_cond_put(struct mempolicy *pol)
206{
207}
208
209static inline void mpol_get(struct mempolicy *pol)
210{
211}
212
213struct shared_policy {};
214
215static inline void mpol_shared_policy_init(struct shared_policy *sp,
216 struct mempolicy *mpol)
217{
218}
219
220static inline void mpol_free_shared_policy(struct shared_policy *p)
221{
222}
223
224#define vma_policy(vma) NULL
225
226static inline int
227vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
228{
229 return 0;
230}
231
232static inline void numa_policy_init(void)
233{
234}
235
236static inline void numa_default_policy(void)
237{
238}
239
240static inline void mpol_rebind_task(struct task_struct *tsk,
241 const nodemask_t *new,
242 enum mpol_rebind_step step)
243{
244}
245
246static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
247{
248}
249
250static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
251 unsigned long addr, gfp_t gfp_flags,
252 struct mempolicy **mpol, nodemask_t **nodemask)
253{
254 *mpol = NULL;
255 *nodemask = NULL;
256 return node_zonelist(0, gfp_flags);
257}
258
259static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
260{
261 return false;
262}
263
264static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
265 const nodemask_t *to, int flags)
266{
267 return 0;
268}
269
270static inline void check_highest_zone(int k)
271{
272}
273
274#ifdef CONFIG_TMPFS
275static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
276{
277 return 1; /* error */
278}
279#endif
280
281static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
282 unsigned long address)
283{
284 return -1; /* no node preference */
285}
286
287#endif /* CONFIG_NUMA */
288#endif