Loading...
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/slab.h>
77#include <linux/string.h>
78#include <linux/export.h>
79#include <linux/nsproxy.h>
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
83#include <linux/swap.h>
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
86#include <linux/migrate.h>
87#include <linux/ksm.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92#include <linux/mm_inline.h>
93#include <linux/mmu_notifier.h>
94
95#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
97#include <linux/random.h>
98
99#include "internal.h"
100
101/* Internal flags */
102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
104
105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
107
108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
110enum zone_type policy_zone = 0;
111
112/*
113 * run-time system-wide default policy => local allocation
114 */
115static struct mempolicy default_policy = {
116 .refcnt = ATOMIC_INIT(1), /* never free it */
117 .mode = MPOL_PREFERRED,
118 .flags = MPOL_F_LOCAL,
119};
120
121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
126
127 if (!pol) {
128 int node = numa_node_id();
129
130 if (node != NUMA_NO_NODE) {
131 pol = &preferred_node_policy[node];
132 /*
133 * preferred_node_policy is not initialised early in
134 * boot
135 */
136 if (!pol->mode)
137 pol = NULL;
138 }
139 }
140
141 return pol;
142}
143
144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
162} mpol_ops[MPOL_MAX];
163
164/* Check that the nodemask contains at least one populated zone */
165static int is_valid_nodemask(const nodemask_t *nodemask)
166{
167 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
168}
169
170static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171{
172 return pol->flags & MPOL_MODE_FLAGS;
173}
174
175static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
176 const nodemask_t *rel)
177{
178 nodemask_t tmp;
179 nodes_fold(tmp, *orig, nodes_weight(*rel));
180 nodes_onto(*ret, tmp, *rel);
181}
182
183static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
184{
185 if (nodes_empty(*nodes))
186 return -EINVAL;
187 pol->v.nodes = *nodes;
188 return 0;
189}
190
191static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
192{
193 if (!nodes)
194 pol->flags |= MPOL_F_LOCAL; /* local allocation */
195 else if (nodes_empty(*nodes))
196 return -EINVAL; /* no allowed nodes */
197 else
198 pol->v.preferred_node = first_node(*nodes);
199 return 0;
200}
201
202static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
203{
204 if (!is_valid_nodemask(nodes))
205 return -EINVAL;
206 pol->v.nodes = *nodes;
207 return 0;
208}
209
210/*
211 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
212 * any, for the new policy. mpol_new() has already validated the nodes
213 * parameter with respect to the policy mode and flags. But, we need to
214 * handle an empty nodemask with MPOL_PREFERRED here.
215 *
216 * Must be called holding task's alloc_lock to protect task's mems_allowed
217 * and mempolicy. May also be called holding the mmap_semaphore for write.
218 */
219static int mpol_set_nodemask(struct mempolicy *pol,
220 const nodemask_t *nodes, struct nodemask_scratch *nsc)
221{
222 int ret;
223
224 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
225 if (pol == NULL)
226 return 0;
227 /* Check N_MEMORY */
228 nodes_and(nsc->mask1,
229 cpuset_current_mems_allowed, node_states[N_MEMORY]);
230
231 VM_BUG_ON(!nodes);
232 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
233 nodes = NULL; /* explicit local allocation */
234 else {
235 if (pol->flags & MPOL_F_RELATIVE_NODES)
236 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
237 else
238 nodes_and(nsc->mask2, *nodes, nsc->mask1);
239
240 if (mpol_store_user_nodemask(pol))
241 pol->w.user_nodemask = *nodes;
242 else
243 pol->w.cpuset_mems_allowed =
244 cpuset_current_mems_allowed;
245 }
246
247 if (nodes)
248 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
249 else
250 ret = mpol_ops[pol->mode].create(pol, NULL);
251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
260{
261 struct mempolicy *policy;
262
263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265
266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
268 return ERR_PTR(-EINVAL);
269 return NULL;
270 }
271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
283 }
284 } else if (mode == MPOL_LOCAL) {
285 if (!nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
287 mode = MPOL_PREFERRED;
288 } else if (nodes_empty(*nodes))
289 return ERR_PTR(-EINVAL);
290 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
291 if (!policy)
292 return ERR_PTR(-ENOMEM);
293 atomic_set(&policy->refcnt, 1);
294 policy->mode = mode;
295 policy->flags = flags;
296
297 return policy;
298}
299
300/* Slow path of a mpol destructor. */
301void __mpol_put(struct mempolicy *p)
302{
303 if (!atomic_dec_and_test(&p->refcnt))
304 return;
305 kmem_cache_free(policy_cache, p);
306}
307
308static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
310{
311}
312
313/*
314 * step:
315 * MPOL_REBIND_ONCE - do rebind work at once
316 * MPOL_REBIND_STEP1 - set all the newly nodes
317 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
318 */
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320 enum mpol_rebind_step step)
321{
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
329 /*
330 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331 * result
332 */
333 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334 nodes_remap(tmp, pol->v.nodes,
335 pol->w.cpuset_mems_allowed, *nodes);
336 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337 } else if (step == MPOL_REBIND_STEP2) {
338 tmp = pol->w.cpuset_mems_allowed;
339 pol->w.cpuset_mems_allowed = *nodes;
340 } else
341 BUG();
342 }
343
344 if (nodes_empty(tmp))
345 tmp = *nodes;
346
347 if (step == MPOL_REBIND_STEP1)
348 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
350 pol->v.nodes = tmp;
351 else
352 BUG();
353
354 if (!node_isset(current->il_next, tmp)) {
355 current->il_next = next_node(current->il_next, tmp);
356 if (current->il_next >= MAX_NUMNODES)
357 current->il_next = first_node(tmp);
358 if (current->il_next >= MAX_NUMNODES)
359 current->il_next = numa_node_id();
360 }
361}
362
363static void mpol_rebind_preferred(struct mempolicy *pol,
364 const nodemask_t *nodes,
365 enum mpol_rebind_step step)
366{
367 nodemask_t tmp;
368
369 if (pol->flags & MPOL_F_STATIC_NODES) {
370 int node = first_node(pol->w.user_nodemask);
371
372 if (node_isset(node, *nodes)) {
373 pol->v.preferred_node = node;
374 pol->flags &= ~MPOL_F_LOCAL;
375 } else
376 pol->flags |= MPOL_F_LOCAL;
377 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
378 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
379 pol->v.preferred_node = first_node(tmp);
380 } else if (!(pol->flags & MPOL_F_LOCAL)) {
381 pol->v.preferred_node = node_remap(pol->v.preferred_node,
382 pol->w.cpuset_mems_allowed,
383 *nodes);
384 pol->w.cpuset_mems_allowed = *nodes;
385 }
386}
387
388/*
389 * mpol_rebind_policy - Migrate a policy to a different set of nodes
390 *
391 * If read-side task has no lock to protect task->mempolicy, write-side
392 * task will rebind the task->mempolicy by two step. The first step is
393 * setting all the newly nodes, and the second step is cleaning all the
394 * disallowed nodes. In this way, we can avoid finding no node to alloc
395 * page.
396 * If we have a lock to protect task->mempolicy in read-side, we do
397 * rebind directly.
398 *
399 * step:
400 * MPOL_REBIND_ONCE - do rebind work at once
401 * MPOL_REBIND_STEP1 - set all the newly nodes
402 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
403 */
404static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405 enum mpol_rebind_step step)
406{
407 if (!pol)
408 return;
409 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
410 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
411 return;
412
413 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414 return;
415
416 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417 BUG();
418
419 if (step == MPOL_REBIND_STEP1)
420 pol->flags |= MPOL_F_REBINDING;
421 else if (step == MPOL_REBIND_STEP2)
422 pol->flags &= ~MPOL_F_REBINDING;
423 else if (step >= MPOL_REBIND_NSTEP)
424 BUG();
425
426 mpol_ops[pol->mode].rebind(pol, newmask, step);
427}
428
429/*
430 * Wrapper for mpol_rebind_policy() that just requires task
431 * pointer, and updates task mempolicy.
432 *
433 * Called with task's alloc_lock held.
434 */
435
436void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437 enum mpol_rebind_step step)
438{
439 mpol_rebind_policy(tsk->mempolicy, new, step);
440}
441
442/*
443 * Rebind each vma in mm to new nodemask.
444 *
445 * Call holding a reference to mm. Takes mm->mmap_sem during call.
446 */
447
448void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
449{
450 struct vm_area_struct *vma;
451
452 down_write(&mm->mmap_sem);
453 for (vma = mm->mmap; vma; vma = vma->vm_next)
454 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
455 up_write(&mm->mmap_sem);
456}
457
458static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
459 [MPOL_DEFAULT] = {
460 .rebind = mpol_rebind_default,
461 },
462 [MPOL_INTERLEAVE] = {
463 .create = mpol_new_interleave,
464 .rebind = mpol_rebind_nodemask,
465 },
466 [MPOL_PREFERRED] = {
467 .create = mpol_new_preferred,
468 .rebind = mpol_rebind_preferred,
469 },
470 [MPOL_BIND] = {
471 .create = mpol_new_bind,
472 .rebind = mpol_rebind_nodemask,
473 },
474};
475
476static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 unsigned long flags);
478
479/*
480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do.
482 */
483static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
484 unsigned long addr, unsigned long end,
485 const nodemask_t *nodes, unsigned long flags,
486 void *private)
487{
488 pte_t *orig_pte;
489 pte_t *pte;
490 spinlock_t *ptl;
491
492 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
493 do {
494 struct page *page;
495 int nid;
496
497 if (!pte_present(*pte))
498 continue;
499 page = vm_normal_page(vma, addr, *pte);
500 if (!page)
501 continue;
502 /*
503 * vm_normal_page() filters out zero pages, but there might
504 * still be PageReserved pages to skip, perhaps in a VDSO.
505 */
506 if (PageReserved(page))
507 continue;
508 nid = page_to_nid(page);
509 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
510 continue;
511
512 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
513 migrate_page_add(page, private, flags);
514 else
515 break;
516 } while (pte++, addr += PAGE_SIZE, addr != end);
517 pte_unmap_unlock(orig_pte, ptl);
518 return addr != end;
519}
520
521static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
522 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
523 void *private)
524{
525#ifdef CONFIG_HUGETLB_PAGE
526 int nid;
527 struct page *page;
528 spinlock_t *ptl;
529 pte_t entry;
530
531 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
532 entry = huge_ptep_get((pte_t *)pmd);
533 if (!pte_present(entry))
534 goto unlock;
535 page = pte_page(entry);
536 nid = page_to_nid(page);
537 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
538 goto unlock;
539 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
540 if (flags & (MPOL_MF_MOVE_ALL) ||
541 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
542 isolate_huge_page(page, private);
543unlock:
544 spin_unlock(ptl);
545#else
546 BUG();
547#endif
548}
549
550static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
551 unsigned long addr, unsigned long end,
552 const nodemask_t *nodes, unsigned long flags,
553 void *private)
554{
555 pmd_t *pmd;
556 unsigned long next;
557
558 pmd = pmd_offset(pud, addr);
559 do {
560 next = pmd_addr_end(addr, end);
561 if (!pmd_present(*pmd))
562 continue;
563 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
564 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
565 flags, private);
566 continue;
567 }
568 split_huge_page_pmd(vma, addr, pmd);
569 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
570 continue;
571 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
572 flags, private))
573 return -EIO;
574 } while (pmd++, addr = next, addr != end);
575 return 0;
576}
577
578static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
579 unsigned long addr, unsigned long end,
580 const nodemask_t *nodes, unsigned long flags,
581 void *private)
582{
583 pud_t *pud;
584 unsigned long next;
585
586 pud = pud_offset(pgd, addr);
587 do {
588 next = pud_addr_end(addr, end);
589 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
590 continue;
591 if (pud_none_or_clear_bad(pud))
592 continue;
593 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
594 flags, private))
595 return -EIO;
596 } while (pud++, addr = next, addr != end);
597 return 0;
598}
599
600static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
601 unsigned long addr, unsigned long end,
602 const nodemask_t *nodes, unsigned long flags,
603 void *private)
604{
605 pgd_t *pgd;
606 unsigned long next;
607
608 pgd = pgd_offset(vma->vm_mm, addr);
609 do {
610 next = pgd_addr_end(addr, end);
611 if (pgd_none_or_clear_bad(pgd))
612 continue;
613 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
614 flags, private))
615 return -EIO;
616 } while (pgd++, addr = next, addr != end);
617 return 0;
618}
619
620#ifdef CONFIG_NUMA_BALANCING
621/*
622 * This is used to mark a range of virtual addresses to be inaccessible.
623 * These are later cleared by a NUMA hinting fault. Depending on these
624 * faults, pages may be migrated for better NUMA placement.
625 *
626 * This is assuming that NUMA faults are handled using PROT_NONE. If
627 * an architecture makes a different choice, it will need further
628 * changes to the core.
629 */
630unsigned long change_prot_numa(struct vm_area_struct *vma,
631 unsigned long addr, unsigned long end)
632{
633 int nr_updated;
634
635 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
636 if (nr_updated)
637 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
638
639 return nr_updated;
640}
641#else
642static unsigned long change_prot_numa(struct vm_area_struct *vma,
643 unsigned long addr, unsigned long end)
644{
645 return 0;
646}
647#endif /* CONFIG_NUMA_BALANCING */
648
649/*
650 * Walk through page tables and collect pages to be migrated.
651 *
652 * If pages found in a given range are on a set of nodes (determined by
653 * @nodes and @flags,) it's isolated and queued to the pagelist which is
654 * passed via @private.)
655 */
656static struct vm_area_struct *
657queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
658 const nodemask_t *nodes, unsigned long flags, void *private)
659{
660 int err;
661 struct vm_area_struct *first, *vma, *prev;
662
663
664 first = find_vma(mm, start);
665 if (!first)
666 return ERR_PTR(-EFAULT);
667 prev = NULL;
668 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
669 unsigned long endvma = vma->vm_end;
670
671 if (endvma > end)
672 endvma = end;
673 if (vma->vm_start > start)
674 start = vma->vm_start;
675
676 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
677 if (!vma->vm_next && vma->vm_end < end)
678 return ERR_PTR(-EFAULT);
679 if (prev && prev->vm_end < vma->vm_start)
680 return ERR_PTR(-EFAULT);
681 }
682
683 if (flags & MPOL_MF_LAZY) {
684 change_prot_numa(vma, start, endvma);
685 goto next;
686 }
687
688 if ((flags & MPOL_MF_STRICT) ||
689 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
690 vma_migratable(vma))) {
691
692 err = queue_pages_pgd_range(vma, start, endvma, nodes,
693 flags, private);
694 if (err) {
695 first = ERR_PTR(err);
696 break;
697 }
698 }
699next:
700 prev = vma;
701 }
702 return first;
703}
704
705/*
706 * Apply policy to a single VMA
707 * This must be called with the mmap_sem held for writing.
708 */
709static int vma_replace_policy(struct vm_area_struct *vma,
710 struct mempolicy *pol)
711{
712 int err;
713 struct mempolicy *old;
714 struct mempolicy *new;
715
716 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
717 vma->vm_start, vma->vm_end, vma->vm_pgoff,
718 vma->vm_ops, vma->vm_file,
719 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
720
721 new = mpol_dup(pol);
722 if (IS_ERR(new))
723 return PTR_ERR(new);
724
725 if (vma->vm_ops && vma->vm_ops->set_policy) {
726 err = vma->vm_ops->set_policy(vma, new);
727 if (err)
728 goto err_out;
729 }
730
731 old = vma->vm_policy;
732 vma->vm_policy = new; /* protected by mmap_sem */
733 mpol_put(old);
734
735 return 0;
736 err_out:
737 mpol_put(new);
738 return err;
739}
740
741/* Step 2: apply policy to a range and do splits. */
742static int mbind_range(struct mm_struct *mm, unsigned long start,
743 unsigned long end, struct mempolicy *new_pol)
744{
745 struct vm_area_struct *next;
746 struct vm_area_struct *prev;
747 struct vm_area_struct *vma;
748 int err = 0;
749 pgoff_t pgoff;
750 unsigned long vmstart;
751 unsigned long vmend;
752
753 vma = find_vma(mm, start);
754 if (!vma || vma->vm_start > start)
755 return -EFAULT;
756
757 prev = vma->vm_prev;
758 if (start > vma->vm_start)
759 prev = vma;
760
761 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
762 next = vma->vm_next;
763 vmstart = max(start, vma->vm_start);
764 vmend = min(end, vma->vm_end);
765
766 if (mpol_equal(vma_policy(vma), new_pol))
767 continue;
768
769 pgoff = vma->vm_pgoff +
770 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
771 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
772 vma->anon_vma, vma->vm_file, pgoff,
773 new_pol);
774 if (prev) {
775 vma = prev;
776 next = vma->vm_next;
777 if (mpol_equal(vma_policy(vma), new_pol))
778 continue;
779 /* vma_merge() joined vma && vma->next, case 8 */
780 goto replace;
781 }
782 if (vma->vm_start != vmstart) {
783 err = split_vma(vma->vm_mm, vma, vmstart, 1);
784 if (err)
785 goto out;
786 }
787 if (vma->vm_end != vmend) {
788 err = split_vma(vma->vm_mm, vma, vmend, 0);
789 if (err)
790 goto out;
791 }
792 replace:
793 err = vma_replace_policy(vma, new_pol);
794 if (err)
795 goto out;
796 }
797
798 out:
799 return err;
800}
801
802/* Set the process memory policy */
803static long do_set_mempolicy(unsigned short mode, unsigned short flags,
804 nodemask_t *nodes)
805{
806 struct mempolicy *new, *old;
807 struct mm_struct *mm = current->mm;
808 NODEMASK_SCRATCH(scratch);
809 int ret;
810
811 if (!scratch)
812 return -ENOMEM;
813
814 new = mpol_new(mode, flags, nodes);
815 if (IS_ERR(new)) {
816 ret = PTR_ERR(new);
817 goto out;
818 }
819 /*
820 * prevent changing our mempolicy while show_numa_maps()
821 * is using it.
822 * Note: do_set_mempolicy() can be called at init time
823 * with no 'mm'.
824 */
825 if (mm)
826 down_write(&mm->mmap_sem);
827 task_lock(current);
828 ret = mpol_set_nodemask(new, nodes, scratch);
829 if (ret) {
830 task_unlock(current);
831 if (mm)
832 up_write(&mm->mmap_sem);
833 mpol_put(new);
834 goto out;
835 }
836 old = current->mempolicy;
837 current->mempolicy = new;
838 if (new && new->mode == MPOL_INTERLEAVE &&
839 nodes_weight(new->v.nodes))
840 current->il_next = first_node(new->v.nodes);
841 task_unlock(current);
842 if (mm)
843 up_write(&mm->mmap_sem);
844
845 mpol_put(old);
846 ret = 0;
847out:
848 NODEMASK_SCRATCH_FREE(scratch);
849 return ret;
850}
851
852/*
853 * Return nodemask for policy for get_mempolicy() query
854 *
855 * Called with task's alloc_lock held
856 */
857static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
858{
859 nodes_clear(*nodes);
860 if (p == &default_policy)
861 return;
862
863 switch (p->mode) {
864 case MPOL_BIND:
865 /* Fall through */
866 case MPOL_INTERLEAVE:
867 *nodes = p->v.nodes;
868 break;
869 case MPOL_PREFERRED:
870 if (!(p->flags & MPOL_F_LOCAL))
871 node_set(p->v.preferred_node, *nodes);
872 /* else return empty node mask for local allocation */
873 break;
874 default:
875 BUG();
876 }
877}
878
879static int lookup_node(struct mm_struct *mm, unsigned long addr)
880{
881 struct page *p;
882 int err;
883
884 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
885 if (err >= 0) {
886 err = page_to_nid(p);
887 put_page(p);
888 }
889 return err;
890}
891
892/* Retrieve NUMA policy */
893static long do_get_mempolicy(int *policy, nodemask_t *nmask,
894 unsigned long addr, unsigned long flags)
895{
896 int err;
897 struct mm_struct *mm = current->mm;
898 struct vm_area_struct *vma = NULL;
899 struct mempolicy *pol = current->mempolicy;
900
901 if (flags &
902 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
903 return -EINVAL;
904
905 if (flags & MPOL_F_MEMS_ALLOWED) {
906 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
907 return -EINVAL;
908 *policy = 0; /* just so it's initialized */
909 task_lock(current);
910 *nmask = cpuset_current_mems_allowed;
911 task_unlock(current);
912 return 0;
913 }
914
915 if (flags & MPOL_F_ADDR) {
916 /*
917 * Do NOT fall back to task policy if the
918 * vma/shared policy at addr is NULL. We
919 * want to return MPOL_DEFAULT in this case.
920 */
921 down_read(&mm->mmap_sem);
922 vma = find_vma_intersection(mm, addr, addr+1);
923 if (!vma) {
924 up_read(&mm->mmap_sem);
925 return -EFAULT;
926 }
927 if (vma->vm_ops && vma->vm_ops->get_policy)
928 pol = vma->vm_ops->get_policy(vma, addr);
929 else
930 pol = vma->vm_policy;
931 } else if (addr)
932 return -EINVAL;
933
934 if (!pol)
935 pol = &default_policy; /* indicates default behavior */
936
937 if (flags & MPOL_F_NODE) {
938 if (flags & MPOL_F_ADDR) {
939 err = lookup_node(mm, addr);
940 if (err < 0)
941 goto out;
942 *policy = err;
943 } else if (pol == current->mempolicy &&
944 pol->mode == MPOL_INTERLEAVE) {
945 *policy = current->il_next;
946 } else {
947 err = -EINVAL;
948 goto out;
949 }
950 } else {
951 *policy = pol == &default_policy ? MPOL_DEFAULT :
952 pol->mode;
953 /*
954 * Internal mempolicy flags must be masked off before exposing
955 * the policy to userspace.
956 */
957 *policy |= (pol->flags & MPOL_MODE_FLAGS);
958 }
959
960 if (vma) {
961 up_read(¤t->mm->mmap_sem);
962 vma = NULL;
963 }
964
965 err = 0;
966 if (nmask) {
967 if (mpol_store_user_nodemask(pol)) {
968 *nmask = pol->w.user_nodemask;
969 } else {
970 task_lock(current);
971 get_policy_nodemask(pol, nmask);
972 task_unlock(current);
973 }
974 }
975
976 out:
977 mpol_cond_put(pol);
978 if (vma)
979 up_read(¤t->mm->mmap_sem);
980 return err;
981}
982
983#ifdef CONFIG_MIGRATION
984/*
985 * page migration
986 */
987static void migrate_page_add(struct page *page, struct list_head *pagelist,
988 unsigned long flags)
989{
990 /*
991 * Avoid migrating a page that is shared with others.
992 */
993 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
994 if (!isolate_lru_page(page)) {
995 list_add_tail(&page->lru, pagelist);
996 inc_zone_page_state(page, NR_ISOLATED_ANON +
997 page_is_file_cache(page));
998 }
999 }
1000}
1001
1002static struct page *new_node_page(struct page *page, unsigned long node, int **x)
1003{
1004 if (PageHuge(page))
1005 return alloc_huge_page_node(page_hstate(compound_head(page)),
1006 node);
1007 else
1008 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
1009}
1010
1011/*
1012 * Migrate pages from one node to a target node.
1013 * Returns error or the number of pages not migrated.
1014 */
1015static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1016 int flags)
1017{
1018 nodemask_t nmask;
1019 LIST_HEAD(pagelist);
1020 int err = 0;
1021
1022 nodes_clear(nmask);
1023 node_set(source, nmask);
1024
1025 /*
1026 * This does not "check" the range but isolates all pages that
1027 * need migration. Between passing in the full user address
1028 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1029 */
1030 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1031 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1032 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1033
1034 if (!list_empty(&pagelist)) {
1035 err = migrate_pages(&pagelist, new_node_page, dest,
1036 MIGRATE_SYNC, MR_SYSCALL);
1037 if (err)
1038 putback_movable_pages(&pagelist);
1039 }
1040
1041 return err;
1042}
1043
1044/*
1045 * Move pages between the two nodesets so as to preserve the physical
1046 * layout as much as possible.
1047 *
1048 * Returns the number of page that could not be moved.
1049 */
1050int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1051 const nodemask_t *to, int flags)
1052{
1053 int busy = 0;
1054 int err;
1055 nodemask_t tmp;
1056
1057 err = migrate_prep();
1058 if (err)
1059 return err;
1060
1061 down_read(&mm->mmap_sem);
1062
1063 err = migrate_vmas(mm, from, to, flags);
1064 if (err)
1065 goto out;
1066
1067 /*
1068 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1069 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1070 * bit in 'tmp', and return that <source, dest> pair for migration.
1071 * The pair of nodemasks 'to' and 'from' define the map.
1072 *
1073 * If no pair of bits is found that way, fallback to picking some
1074 * pair of 'source' and 'dest' bits that are not the same. If the
1075 * 'source' and 'dest' bits are the same, this represents a node
1076 * that will be migrating to itself, so no pages need move.
1077 *
1078 * If no bits are left in 'tmp', or if all remaining bits left
1079 * in 'tmp' correspond to the same bit in 'to', return false
1080 * (nothing left to migrate).
1081 *
1082 * This lets us pick a pair of nodes to migrate between, such that
1083 * if possible the dest node is not already occupied by some other
1084 * source node, minimizing the risk of overloading the memory on a
1085 * node that would happen if we migrated incoming memory to a node
1086 * before migrating outgoing memory source that same node.
1087 *
1088 * A single scan of tmp is sufficient. As we go, we remember the
1089 * most recent <s, d> pair that moved (s != d). If we find a pair
1090 * that not only moved, but what's better, moved to an empty slot
1091 * (d is not set in tmp), then we break out then, with that pair.
1092 * Otherwise when we finish scanning from_tmp, we at least have the
1093 * most recent <s, d> pair that moved. If we get all the way through
1094 * the scan of tmp without finding any node that moved, much less
1095 * moved to an empty node, then there is nothing left worth migrating.
1096 */
1097
1098 tmp = *from;
1099 while (!nodes_empty(tmp)) {
1100 int s,d;
1101 int source = NUMA_NO_NODE;
1102 int dest = 0;
1103
1104 for_each_node_mask(s, tmp) {
1105
1106 /*
1107 * do_migrate_pages() tries to maintain the relative
1108 * node relationship of the pages established between
1109 * threads and memory areas.
1110 *
1111 * However if the number of source nodes is not equal to
1112 * the number of destination nodes we can not preserve
1113 * this node relative relationship. In that case, skip
1114 * copying memory from a node that is in the destination
1115 * mask.
1116 *
1117 * Example: [2,3,4] -> [3,4,5] moves everything.
1118 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1119 */
1120
1121 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1122 (node_isset(s, *to)))
1123 continue;
1124
1125 d = node_remap(s, *from, *to);
1126 if (s == d)
1127 continue;
1128
1129 source = s; /* Node moved. Memorize */
1130 dest = d;
1131
1132 /* dest not in remaining from nodes? */
1133 if (!node_isset(dest, tmp))
1134 break;
1135 }
1136 if (source == NUMA_NO_NODE)
1137 break;
1138
1139 node_clear(source, tmp);
1140 err = migrate_to_node(mm, source, dest, flags);
1141 if (err > 0)
1142 busy += err;
1143 if (err < 0)
1144 break;
1145 }
1146out:
1147 up_read(&mm->mmap_sem);
1148 if (err < 0)
1149 return err;
1150 return busy;
1151
1152}
1153
1154/*
1155 * Allocate a new page for page migration based on vma policy.
1156 * Start assuming that page is mapped by vma pointed to by @private.
1157 * Search forward from there, if not. N.B., this assumes that the
1158 * list of pages handed to migrate_pages()--which is how we get here--
1159 * is in virtual address order.
1160 */
1161static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1162{
1163 struct vm_area_struct *vma = (struct vm_area_struct *)private;
1164 unsigned long uninitialized_var(address);
1165
1166 while (vma) {
1167 address = page_address_in_vma(page, vma);
1168 if (address != -EFAULT)
1169 break;
1170 vma = vma->vm_next;
1171 }
1172
1173 if (PageHuge(page)) {
1174 BUG_ON(!vma);
1175 return alloc_huge_page_noerr(vma, address, 1);
1176 }
1177 /*
1178 * if !vma, alloc_page_vma() will use task or system default policy
1179 */
1180 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1181}
1182#else
1183
1184static void migrate_page_add(struct page *page, struct list_head *pagelist,
1185 unsigned long flags)
1186{
1187}
1188
1189int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1190 const nodemask_t *to, int flags)
1191{
1192 return -ENOSYS;
1193}
1194
1195static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1196{
1197 return NULL;
1198}
1199#endif
1200
1201static long do_mbind(unsigned long start, unsigned long len,
1202 unsigned short mode, unsigned short mode_flags,
1203 nodemask_t *nmask, unsigned long flags)
1204{
1205 struct vm_area_struct *vma;
1206 struct mm_struct *mm = current->mm;
1207 struct mempolicy *new;
1208 unsigned long end;
1209 int err;
1210 LIST_HEAD(pagelist);
1211
1212 if (flags & ~(unsigned long)MPOL_MF_VALID)
1213 return -EINVAL;
1214 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1215 return -EPERM;
1216
1217 if (start & ~PAGE_MASK)
1218 return -EINVAL;
1219
1220 if (mode == MPOL_DEFAULT)
1221 flags &= ~MPOL_MF_STRICT;
1222
1223 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1224 end = start + len;
1225
1226 if (end < start)
1227 return -EINVAL;
1228 if (end == start)
1229 return 0;
1230
1231 new = mpol_new(mode, mode_flags, nmask);
1232 if (IS_ERR(new))
1233 return PTR_ERR(new);
1234
1235 if (flags & MPOL_MF_LAZY)
1236 new->flags |= MPOL_F_MOF;
1237
1238 /*
1239 * If we are using the default policy then operation
1240 * on discontinuous address spaces is okay after all
1241 */
1242 if (!new)
1243 flags |= MPOL_MF_DISCONTIG_OK;
1244
1245 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1246 start, start + len, mode, mode_flags,
1247 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1248
1249 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1250
1251 err = migrate_prep();
1252 if (err)
1253 goto mpol_out;
1254 }
1255 {
1256 NODEMASK_SCRATCH(scratch);
1257 if (scratch) {
1258 down_write(&mm->mmap_sem);
1259 task_lock(current);
1260 err = mpol_set_nodemask(new, nmask, scratch);
1261 task_unlock(current);
1262 if (err)
1263 up_write(&mm->mmap_sem);
1264 } else
1265 err = -ENOMEM;
1266 NODEMASK_SCRATCH_FREE(scratch);
1267 }
1268 if (err)
1269 goto mpol_out;
1270
1271 vma = queue_pages_range(mm, start, end, nmask,
1272 flags | MPOL_MF_INVERT, &pagelist);
1273
1274 err = PTR_ERR(vma); /* maybe ... */
1275 if (!IS_ERR(vma))
1276 err = mbind_range(mm, start, end, new);
1277
1278 if (!err) {
1279 int nr_failed = 0;
1280
1281 if (!list_empty(&pagelist)) {
1282 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1283 nr_failed = migrate_pages(&pagelist, new_vma_page,
1284 (unsigned long)vma,
1285 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1286 if (nr_failed)
1287 putback_movable_pages(&pagelist);
1288 }
1289
1290 if (nr_failed && (flags & MPOL_MF_STRICT))
1291 err = -EIO;
1292 } else
1293 putback_movable_pages(&pagelist);
1294
1295 up_write(&mm->mmap_sem);
1296 mpol_out:
1297 mpol_put(new);
1298 return err;
1299}
1300
1301/*
1302 * User space interface with variable sized bitmaps for nodelists.
1303 */
1304
1305/* Copy a node mask from user space. */
1306static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1307 unsigned long maxnode)
1308{
1309 unsigned long k;
1310 unsigned long nlongs;
1311 unsigned long endmask;
1312
1313 --maxnode;
1314 nodes_clear(*nodes);
1315 if (maxnode == 0 || !nmask)
1316 return 0;
1317 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1318 return -EINVAL;
1319
1320 nlongs = BITS_TO_LONGS(maxnode);
1321 if ((maxnode % BITS_PER_LONG) == 0)
1322 endmask = ~0UL;
1323 else
1324 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1325
1326 /* When the user specified more nodes than supported just check
1327 if the non supported part is all zero. */
1328 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1329 if (nlongs > PAGE_SIZE/sizeof(long))
1330 return -EINVAL;
1331 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1332 unsigned long t;
1333 if (get_user(t, nmask + k))
1334 return -EFAULT;
1335 if (k == nlongs - 1) {
1336 if (t & endmask)
1337 return -EINVAL;
1338 } else if (t)
1339 return -EINVAL;
1340 }
1341 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1342 endmask = ~0UL;
1343 }
1344
1345 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1346 return -EFAULT;
1347 nodes_addr(*nodes)[nlongs-1] &= endmask;
1348 return 0;
1349}
1350
1351/* Copy a kernel node mask to user space */
1352static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1353 nodemask_t *nodes)
1354{
1355 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1356 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1357
1358 if (copy > nbytes) {
1359 if (copy > PAGE_SIZE)
1360 return -EINVAL;
1361 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1362 return -EFAULT;
1363 copy = nbytes;
1364 }
1365 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1366}
1367
1368SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1369 unsigned long, mode, unsigned long __user *, nmask,
1370 unsigned long, maxnode, unsigned, flags)
1371{
1372 nodemask_t nodes;
1373 int err;
1374 unsigned short mode_flags;
1375
1376 mode_flags = mode & MPOL_MODE_FLAGS;
1377 mode &= ~MPOL_MODE_FLAGS;
1378 if (mode >= MPOL_MAX)
1379 return -EINVAL;
1380 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1381 (mode_flags & MPOL_F_RELATIVE_NODES))
1382 return -EINVAL;
1383 err = get_nodes(&nodes, nmask, maxnode);
1384 if (err)
1385 return err;
1386 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1387}
1388
1389/* Set the process memory policy */
1390SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1391 unsigned long, maxnode)
1392{
1393 int err;
1394 nodemask_t nodes;
1395 unsigned short flags;
1396
1397 flags = mode & MPOL_MODE_FLAGS;
1398 mode &= ~MPOL_MODE_FLAGS;
1399 if ((unsigned int)mode >= MPOL_MAX)
1400 return -EINVAL;
1401 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1402 return -EINVAL;
1403 err = get_nodes(&nodes, nmask, maxnode);
1404 if (err)
1405 return err;
1406 return do_set_mempolicy(mode, flags, &nodes);
1407}
1408
1409SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1410 const unsigned long __user *, old_nodes,
1411 const unsigned long __user *, new_nodes)
1412{
1413 const struct cred *cred = current_cred(), *tcred;
1414 struct mm_struct *mm = NULL;
1415 struct task_struct *task;
1416 nodemask_t task_nodes;
1417 int err;
1418 nodemask_t *old;
1419 nodemask_t *new;
1420 NODEMASK_SCRATCH(scratch);
1421
1422 if (!scratch)
1423 return -ENOMEM;
1424
1425 old = &scratch->mask1;
1426 new = &scratch->mask2;
1427
1428 err = get_nodes(old, old_nodes, maxnode);
1429 if (err)
1430 goto out;
1431
1432 err = get_nodes(new, new_nodes, maxnode);
1433 if (err)
1434 goto out;
1435
1436 /* Find the mm_struct */
1437 rcu_read_lock();
1438 task = pid ? find_task_by_vpid(pid) : current;
1439 if (!task) {
1440 rcu_read_unlock();
1441 err = -ESRCH;
1442 goto out;
1443 }
1444 get_task_struct(task);
1445
1446 err = -EINVAL;
1447
1448 /*
1449 * Check if this process has the right to modify the specified
1450 * process. The right exists if the process has administrative
1451 * capabilities, superuser privileges or the same
1452 * userid as the target process.
1453 */
1454 tcred = __task_cred(task);
1455 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1456 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1457 !capable(CAP_SYS_NICE)) {
1458 rcu_read_unlock();
1459 err = -EPERM;
1460 goto out_put;
1461 }
1462 rcu_read_unlock();
1463
1464 task_nodes = cpuset_mems_allowed(task);
1465 /* Is the user allowed to access the target nodes? */
1466 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1467 err = -EPERM;
1468 goto out_put;
1469 }
1470
1471 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1472 err = -EINVAL;
1473 goto out_put;
1474 }
1475
1476 err = security_task_movememory(task);
1477 if (err)
1478 goto out_put;
1479
1480 mm = get_task_mm(task);
1481 put_task_struct(task);
1482
1483 if (!mm) {
1484 err = -EINVAL;
1485 goto out;
1486 }
1487
1488 err = do_migrate_pages(mm, old, new,
1489 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1490
1491 mmput(mm);
1492out:
1493 NODEMASK_SCRATCH_FREE(scratch);
1494
1495 return err;
1496
1497out_put:
1498 put_task_struct(task);
1499 goto out;
1500
1501}
1502
1503
1504/* Retrieve NUMA policy */
1505SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1506 unsigned long __user *, nmask, unsigned long, maxnode,
1507 unsigned long, addr, unsigned long, flags)
1508{
1509 int err;
1510 int uninitialized_var(pval);
1511 nodemask_t nodes;
1512
1513 if (nmask != NULL && maxnode < MAX_NUMNODES)
1514 return -EINVAL;
1515
1516 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1517
1518 if (err)
1519 return err;
1520
1521 if (policy && put_user(pval, policy))
1522 return -EFAULT;
1523
1524 if (nmask)
1525 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1526
1527 return err;
1528}
1529
1530#ifdef CONFIG_COMPAT
1531
1532COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1533 compat_ulong_t __user *, nmask,
1534 compat_ulong_t, maxnode,
1535 compat_ulong_t, addr, compat_ulong_t, flags)
1536{
1537 long err;
1538 unsigned long __user *nm = NULL;
1539 unsigned long nr_bits, alloc_size;
1540 DECLARE_BITMAP(bm, MAX_NUMNODES);
1541
1542 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1543 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1544
1545 if (nmask)
1546 nm = compat_alloc_user_space(alloc_size);
1547
1548 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1549
1550 if (!err && nmask) {
1551 unsigned long copy_size;
1552 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1553 err = copy_from_user(bm, nm, copy_size);
1554 /* ensure entire bitmap is zeroed */
1555 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1556 err |= compat_put_bitmap(nmask, bm, nr_bits);
1557 }
1558
1559 return err;
1560}
1561
1562COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1563 compat_ulong_t, maxnode)
1564{
1565 long err = 0;
1566 unsigned long __user *nm = NULL;
1567 unsigned long nr_bits, alloc_size;
1568 DECLARE_BITMAP(bm, MAX_NUMNODES);
1569
1570 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1571 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1572
1573 if (nmask) {
1574 err = compat_get_bitmap(bm, nmask, nr_bits);
1575 nm = compat_alloc_user_space(alloc_size);
1576 err |= copy_to_user(nm, bm, alloc_size);
1577 }
1578
1579 if (err)
1580 return -EFAULT;
1581
1582 return sys_set_mempolicy(mode, nm, nr_bits+1);
1583}
1584
1585COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1586 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1587 compat_ulong_t, maxnode, compat_ulong_t, flags)
1588{
1589 long err = 0;
1590 unsigned long __user *nm = NULL;
1591 unsigned long nr_bits, alloc_size;
1592 nodemask_t bm;
1593
1594 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1595 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1596
1597 if (nmask) {
1598 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1599 nm = compat_alloc_user_space(alloc_size);
1600 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1601 }
1602
1603 if (err)
1604 return -EFAULT;
1605
1606 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1607}
1608
1609#endif
1610
1611/*
1612 * get_vma_policy(@task, @vma, @addr)
1613 * @task - task for fallback if vma policy == default
1614 * @vma - virtual memory area whose policy is sought
1615 * @addr - address in @vma for shared policy lookup
1616 *
1617 * Returns effective policy for a VMA at specified address.
1618 * Falls back to @task or system default policy, as necessary.
1619 * Current or other task's task mempolicy and non-shared vma policies must be
1620 * protected by task_lock(task) by the caller.
1621 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1622 * count--added by the get_policy() vm_op, as appropriate--to protect against
1623 * freeing by another task. It is the caller's responsibility to free the
1624 * extra reference for shared policies.
1625 */
1626struct mempolicy *get_vma_policy(struct task_struct *task,
1627 struct vm_area_struct *vma, unsigned long addr)
1628{
1629 struct mempolicy *pol = get_task_policy(task);
1630
1631 if (vma) {
1632 if (vma->vm_ops && vma->vm_ops->get_policy) {
1633 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1634 addr);
1635 if (vpol)
1636 pol = vpol;
1637 } else if (vma->vm_policy) {
1638 pol = vma->vm_policy;
1639
1640 /*
1641 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1642 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1643 * count on these policies which will be dropped by
1644 * mpol_cond_put() later
1645 */
1646 if (mpol_needs_cond_ref(pol))
1647 mpol_get(pol);
1648 }
1649 }
1650 if (!pol)
1651 pol = &default_policy;
1652 return pol;
1653}
1654
1655bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
1656{
1657 struct mempolicy *pol = get_task_policy(task);
1658 if (vma) {
1659 if (vma->vm_ops && vma->vm_ops->get_policy) {
1660 bool ret = false;
1661
1662 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1663 if (pol && (pol->flags & MPOL_F_MOF))
1664 ret = true;
1665 mpol_cond_put(pol);
1666
1667 return ret;
1668 } else if (vma->vm_policy) {
1669 pol = vma->vm_policy;
1670 }
1671 }
1672
1673 if (!pol)
1674 return default_policy.flags & MPOL_F_MOF;
1675
1676 return pol->flags & MPOL_F_MOF;
1677}
1678
1679static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1680{
1681 enum zone_type dynamic_policy_zone = policy_zone;
1682
1683 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1684
1685 /*
1686 * if policy->v.nodes has movable memory only,
1687 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1688 *
1689 * policy->v.nodes is intersect with node_states[N_MEMORY].
1690 * so if the following test faile, it implies
1691 * policy->v.nodes has movable memory only.
1692 */
1693 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1694 dynamic_policy_zone = ZONE_MOVABLE;
1695
1696 return zone >= dynamic_policy_zone;
1697}
1698
1699/*
1700 * Return a nodemask representing a mempolicy for filtering nodes for
1701 * page allocation
1702 */
1703static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1704{
1705 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1706 if (unlikely(policy->mode == MPOL_BIND) &&
1707 apply_policy_zone(policy, gfp_zone(gfp)) &&
1708 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1709 return &policy->v.nodes;
1710
1711 return NULL;
1712}
1713
1714/* Return a zonelist indicated by gfp for node representing a mempolicy */
1715static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1716 int nd)
1717{
1718 switch (policy->mode) {
1719 case MPOL_PREFERRED:
1720 if (!(policy->flags & MPOL_F_LOCAL))
1721 nd = policy->v.preferred_node;
1722 break;
1723 case MPOL_BIND:
1724 /*
1725 * Normally, MPOL_BIND allocations are node-local within the
1726 * allowed nodemask. However, if __GFP_THISNODE is set and the
1727 * current node isn't part of the mask, we use the zonelist for
1728 * the first node in the mask instead.
1729 */
1730 if (unlikely(gfp & __GFP_THISNODE) &&
1731 unlikely(!node_isset(nd, policy->v.nodes)))
1732 nd = first_node(policy->v.nodes);
1733 break;
1734 default:
1735 BUG();
1736 }
1737 return node_zonelist(nd, gfp);
1738}
1739
1740/* Do dynamic interleaving for a process */
1741static unsigned interleave_nodes(struct mempolicy *policy)
1742{
1743 unsigned nid, next;
1744 struct task_struct *me = current;
1745
1746 nid = me->il_next;
1747 next = next_node(nid, policy->v.nodes);
1748 if (next >= MAX_NUMNODES)
1749 next = first_node(policy->v.nodes);
1750 if (next < MAX_NUMNODES)
1751 me->il_next = next;
1752 return nid;
1753}
1754
1755/*
1756 * Depending on the memory policy provide a node from which to allocate the
1757 * next slab entry.
1758 */
1759unsigned int mempolicy_slab_node(void)
1760{
1761 struct mempolicy *policy;
1762 int node = numa_mem_id();
1763
1764 if (in_interrupt())
1765 return node;
1766
1767 policy = current->mempolicy;
1768 if (!policy || policy->flags & MPOL_F_LOCAL)
1769 return node;
1770
1771 switch (policy->mode) {
1772 case MPOL_PREFERRED:
1773 /*
1774 * handled MPOL_F_LOCAL above
1775 */
1776 return policy->v.preferred_node;
1777
1778 case MPOL_INTERLEAVE:
1779 return interleave_nodes(policy);
1780
1781 case MPOL_BIND: {
1782 /*
1783 * Follow bind policy behavior and start allocation at the
1784 * first node.
1785 */
1786 struct zonelist *zonelist;
1787 struct zone *zone;
1788 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1789 zonelist = &NODE_DATA(node)->node_zonelists[0];
1790 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1791 &policy->v.nodes,
1792 &zone);
1793 return zone ? zone->node : node;
1794 }
1795
1796 default:
1797 BUG();
1798 }
1799}
1800
1801/* Do static interleaving for a VMA with known offset. */
1802static unsigned offset_il_node(struct mempolicy *pol,
1803 struct vm_area_struct *vma, unsigned long off)
1804{
1805 unsigned nnodes = nodes_weight(pol->v.nodes);
1806 unsigned target;
1807 int c;
1808 int nid = NUMA_NO_NODE;
1809
1810 if (!nnodes)
1811 return numa_node_id();
1812 target = (unsigned int)off % nnodes;
1813 c = 0;
1814 do {
1815 nid = next_node(nid, pol->v.nodes);
1816 c++;
1817 } while (c <= target);
1818 return nid;
1819}
1820
1821/* Determine a node number for interleave */
1822static inline unsigned interleave_nid(struct mempolicy *pol,
1823 struct vm_area_struct *vma, unsigned long addr, int shift)
1824{
1825 if (vma) {
1826 unsigned long off;
1827
1828 /*
1829 * for small pages, there is no difference between
1830 * shift and PAGE_SHIFT, so the bit-shift is safe.
1831 * for huge pages, since vm_pgoff is in units of small
1832 * pages, we need to shift off the always 0 bits to get
1833 * a useful offset.
1834 */
1835 BUG_ON(shift < PAGE_SHIFT);
1836 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1837 off += (addr - vma->vm_start) >> shift;
1838 return offset_il_node(pol, vma, off);
1839 } else
1840 return interleave_nodes(pol);
1841}
1842
1843/*
1844 * Return the bit number of a random bit set in the nodemask.
1845 * (returns NUMA_NO_NODE if nodemask is empty)
1846 */
1847int node_random(const nodemask_t *maskp)
1848{
1849 int w, bit = NUMA_NO_NODE;
1850
1851 w = nodes_weight(*maskp);
1852 if (w)
1853 bit = bitmap_ord_to_pos(maskp->bits,
1854 get_random_int() % w, MAX_NUMNODES);
1855 return bit;
1856}
1857
1858#ifdef CONFIG_HUGETLBFS
1859/*
1860 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1861 * @vma = virtual memory area whose policy is sought
1862 * @addr = address in @vma for shared policy lookup and interleave policy
1863 * @gfp_flags = for requested zone
1864 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1865 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1866 *
1867 * Returns a zonelist suitable for a huge page allocation and a pointer
1868 * to the struct mempolicy for conditional unref after allocation.
1869 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1870 * @nodemask for filtering the zonelist.
1871 *
1872 * Must be protected by read_mems_allowed_begin()
1873 */
1874struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1875 gfp_t gfp_flags, struct mempolicy **mpol,
1876 nodemask_t **nodemask)
1877{
1878 struct zonelist *zl;
1879
1880 *mpol = get_vma_policy(current, vma, addr);
1881 *nodemask = NULL; /* assume !MPOL_BIND */
1882
1883 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1884 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1885 huge_page_shift(hstate_vma(vma))), gfp_flags);
1886 } else {
1887 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1888 if ((*mpol)->mode == MPOL_BIND)
1889 *nodemask = &(*mpol)->v.nodes;
1890 }
1891 return zl;
1892}
1893
1894/*
1895 * init_nodemask_of_mempolicy
1896 *
1897 * If the current task's mempolicy is "default" [NULL], return 'false'
1898 * to indicate default policy. Otherwise, extract the policy nodemask
1899 * for 'bind' or 'interleave' policy into the argument nodemask, or
1900 * initialize the argument nodemask to contain the single node for
1901 * 'preferred' or 'local' policy and return 'true' to indicate presence
1902 * of non-default mempolicy.
1903 *
1904 * We don't bother with reference counting the mempolicy [mpol_get/put]
1905 * because the current task is examining it's own mempolicy and a task's
1906 * mempolicy is only ever changed by the task itself.
1907 *
1908 * N.B., it is the caller's responsibility to free a returned nodemask.
1909 */
1910bool init_nodemask_of_mempolicy(nodemask_t *mask)
1911{
1912 struct mempolicy *mempolicy;
1913 int nid;
1914
1915 if (!(mask && current->mempolicy))
1916 return false;
1917
1918 task_lock(current);
1919 mempolicy = current->mempolicy;
1920 switch (mempolicy->mode) {
1921 case MPOL_PREFERRED:
1922 if (mempolicy->flags & MPOL_F_LOCAL)
1923 nid = numa_node_id();
1924 else
1925 nid = mempolicy->v.preferred_node;
1926 init_nodemask_of_node(mask, nid);
1927 break;
1928
1929 case MPOL_BIND:
1930 /* Fall through */
1931 case MPOL_INTERLEAVE:
1932 *mask = mempolicy->v.nodes;
1933 break;
1934
1935 default:
1936 BUG();
1937 }
1938 task_unlock(current);
1939
1940 return true;
1941}
1942#endif
1943
1944/*
1945 * mempolicy_nodemask_intersects
1946 *
1947 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1948 * policy. Otherwise, check for intersection between mask and the policy
1949 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1950 * policy, always return true since it may allocate elsewhere on fallback.
1951 *
1952 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1953 */
1954bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1955 const nodemask_t *mask)
1956{
1957 struct mempolicy *mempolicy;
1958 bool ret = true;
1959
1960 if (!mask)
1961 return ret;
1962 task_lock(tsk);
1963 mempolicy = tsk->mempolicy;
1964 if (!mempolicy)
1965 goto out;
1966
1967 switch (mempolicy->mode) {
1968 case MPOL_PREFERRED:
1969 /*
1970 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1971 * allocate from, they may fallback to other nodes when oom.
1972 * Thus, it's possible for tsk to have allocated memory from
1973 * nodes in mask.
1974 */
1975 break;
1976 case MPOL_BIND:
1977 case MPOL_INTERLEAVE:
1978 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1979 break;
1980 default:
1981 BUG();
1982 }
1983out:
1984 task_unlock(tsk);
1985 return ret;
1986}
1987
1988/* Allocate a page in interleaved policy.
1989 Own path because it needs to do special accounting. */
1990static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1991 unsigned nid)
1992{
1993 struct zonelist *zl;
1994 struct page *page;
1995
1996 zl = node_zonelist(nid, gfp);
1997 page = __alloc_pages(gfp, order, zl);
1998 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1999 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
2000 return page;
2001}
2002
2003/**
2004 * alloc_pages_vma - Allocate a page for a VMA.
2005 *
2006 * @gfp:
2007 * %GFP_USER user allocation.
2008 * %GFP_KERNEL kernel allocations,
2009 * %GFP_HIGHMEM highmem/user allocations,
2010 * %GFP_FS allocation should not call back into a file system.
2011 * %GFP_ATOMIC don't sleep.
2012 *
2013 * @order:Order of the GFP allocation.
2014 * @vma: Pointer to VMA or NULL if not available.
2015 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2016 *
2017 * This function allocates a page from the kernel page pool and applies
2018 * a NUMA policy associated with the VMA or the current process.
2019 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2020 * mm_struct of the VMA to prevent it from going away. Should be used for
2021 * all allocations for pages that will be mapped into
2022 * user space. Returns NULL when no page can be allocated.
2023 *
2024 * Should be called with the mm_sem of the vma hold.
2025 */
2026struct page *
2027alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2028 unsigned long addr, int node)
2029{
2030 struct mempolicy *pol;
2031 struct page *page;
2032 unsigned int cpuset_mems_cookie;
2033
2034retry_cpuset:
2035 pol = get_vma_policy(current, vma, addr);
2036 cpuset_mems_cookie = read_mems_allowed_begin();
2037
2038 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
2039 unsigned nid;
2040
2041 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2042 mpol_cond_put(pol);
2043 page = alloc_page_interleave(gfp, order, nid);
2044 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2045 goto retry_cpuset;
2046
2047 return page;
2048 }
2049 page = __alloc_pages_nodemask(gfp, order,
2050 policy_zonelist(gfp, pol, node),
2051 policy_nodemask(gfp, pol));
2052 if (unlikely(mpol_needs_cond_ref(pol)))
2053 __mpol_put(pol);
2054 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2055 goto retry_cpuset;
2056 return page;
2057}
2058
2059/**
2060 * alloc_pages_current - Allocate pages.
2061 *
2062 * @gfp:
2063 * %GFP_USER user allocation,
2064 * %GFP_KERNEL kernel allocation,
2065 * %GFP_HIGHMEM highmem allocation,
2066 * %GFP_FS don't call back into a file system.
2067 * %GFP_ATOMIC don't sleep.
2068 * @order: Power of two of allocation size in pages. 0 is a single page.
2069 *
2070 * Allocate a page from the kernel page pool. When not in
2071 * interrupt context and apply the current process NUMA policy.
2072 * Returns NULL when no page can be allocated.
2073 *
2074 * Don't call cpuset_update_task_memory_state() unless
2075 * 1) it's ok to take cpuset_sem (can WAIT), and
2076 * 2) allocating for current task (not interrupt).
2077 */
2078struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2079{
2080 struct mempolicy *pol = get_task_policy(current);
2081 struct page *page;
2082 unsigned int cpuset_mems_cookie;
2083
2084 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
2085 pol = &default_policy;
2086
2087retry_cpuset:
2088 cpuset_mems_cookie = read_mems_allowed_begin();
2089
2090 /*
2091 * No reference counting needed for current->mempolicy
2092 * nor system default_policy
2093 */
2094 if (pol->mode == MPOL_INTERLEAVE)
2095 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2096 else
2097 page = __alloc_pages_nodemask(gfp, order,
2098 policy_zonelist(gfp, pol, numa_node_id()),
2099 policy_nodemask(gfp, pol));
2100
2101 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2102 goto retry_cpuset;
2103
2104 return page;
2105}
2106EXPORT_SYMBOL(alloc_pages_current);
2107
2108int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2109{
2110 struct mempolicy *pol = mpol_dup(vma_policy(src));
2111
2112 if (IS_ERR(pol))
2113 return PTR_ERR(pol);
2114 dst->vm_policy = pol;
2115 return 0;
2116}
2117
2118/*
2119 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2120 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2121 * with the mems_allowed returned by cpuset_mems_allowed(). This
2122 * keeps mempolicies cpuset relative after its cpuset moves. See
2123 * further kernel/cpuset.c update_nodemask().
2124 *
2125 * current's mempolicy may be rebinded by the other task(the task that changes
2126 * cpuset's mems), so we needn't do rebind work for current task.
2127 */
2128
2129/* Slow path of a mempolicy duplicate */
2130struct mempolicy *__mpol_dup(struct mempolicy *old)
2131{
2132 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2133
2134 if (!new)
2135 return ERR_PTR(-ENOMEM);
2136
2137 /* task's mempolicy is protected by alloc_lock */
2138 if (old == current->mempolicy) {
2139 task_lock(current);
2140 *new = *old;
2141 task_unlock(current);
2142 } else
2143 *new = *old;
2144
2145 rcu_read_lock();
2146 if (current_cpuset_is_being_rebound()) {
2147 nodemask_t mems = cpuset_mems_allowed(current);
2148 if (new->flags & MPOL_F_REBINDING)
2149 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2150 else
2151 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2152 }
2153 rcu_read_unlock();
2154 atomic_set(&new->refcnt, 1);
2155 return new;
2156}
2157
2158/* Slow path of a mempolicy comparison */
2159bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2160{
2161 if (!a || !b)
2162 return false;
2163 if (a->mode != b->mode)
2164 return false;
2165 if (a->flags != b->flags)
2166 return false;
2167 if (mpol_store_user_nodemask(a))
2168 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2169 return false;
2170
2171 switch (a->mode) {
2172 case MPOL_BIND:
2173 /* Fall through */
2174 case MPOL_INTERLEAVE:
2175 return !!nodes_equal(a->v.nodes, b->v.nodes);
2176 case MPOL_PREFERRED:
2177 return a->v.preferred_node == b->v.preferred_node;
2178 default:
2179 BUG();
2180 return false;
2181 }
2182}
2183
2184/*
2185 * Shared memory backing store policy support.
2186 *
2187 * Remember policies even when nobody has shared memory mapped.
2188 * The policies are kept in Red-Black tree linked from the inode.
2189 * They are protected by the sp->lock spinlock, which should be held
2190 * for any accesses to the tree.
2191 */
2192
2193/* lookup first element intersecting start-end */
2194/* Caller holds sp->lock */
2195static struct sp_node *
2196sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2197{
2198 struct rb_node *n = sp->root.rb_node;
2199
2200 while (n) {
2201 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2202
2203 if (start >= p->end)
2204 n = n->rb_right;
2205 else if (end <= p->start)
2206 n = n->rb_left;
2207 else
2208 break;
2209 }
2210 if (!n)
2211 return NULL;
2212 for (;;) {
2213 struct sp_node *w = NULL;
2214 struct rb_node *prev = rb_prev(n);
2215 if (!prev)
2216 break;
2217 w = rb_entry(prev, struct sp_node, nd);
2218 if (w->end <= start)
2219 break;
2220 n = prev;
2221 }
2222 return rb_entry(n, struct sp_node, nd);
2223}
2224
2225/* Insert a new shared policy into the list. */
2226/* Caller holds sp->lock */
2227static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2228{
2229 struct rb_node **p = &sp->root.rb_node;
2230 struct rb_node *parent = NULL;
2231 struct sp_node *nd;
2232
2233 while (*p) {
2234 parent = *p;
2235 nd = rb_entry(parent, struct sp_node, nd);
2236 if (new->start < nd->start)
2237 p = &(*p)->rb_left;
2238 else if (new->end > nd->end)
2239 p = &(*p)->rb_right;
2240 else
2241 BUG();
2242 }
2243 rb_link_node(&new->nd, parent, p);
2244 rb_insert_color(&new->nd, &sp->root);
2245 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2246 new->policy ? new->policy->mode : 0);
2247}
2248
2249/* Find shared policy intersecting idx */
2250struct mempolicy *
2251mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2252{
2253 struct mempolicy *pol = NULL;
2254 struct sp_node *sn;
2255
2256 if (!sp->root.rb_node)
2257 return NULL;
2258 spin_lock(&sp->lock);
2259 sn = sp_lookup(sp, idx, idx+1);
2260 if (sn) {
2261 mpol_get(sn->policy);
2262 pol = sn->policy;
2263 }
2264 spin_unlock(&sp->lock);
2265 return pol;
2266}
2267
2268static void sp_free(struct sp_node *n)
2269{
2270 mpol_put(n->policy);
2271 kmem_cache_free(sn_cache, n);
2272}
2273
2274/**
2275 * mpol_misplaced - check whether current page node is valid in policy
2276 *
2277 * @page - page to be checked
2278 * @vma - vm area where page mapped
2279 * @addr - virtual address where page mapped
2280 *
2281 * Lookup current policy node id for vma,addr and "compare to" page's
2282 * node id.
2283 *
2284 * Returns:
2285 * -1 - not misplaced, page is in the right node
2286 * node - node id where the page should be
2287 *
2288 * Policy determination "mimics" alloc_page_vma().
2289 * Called from fault path where we know the vma and faulting address.
2290 */
2291int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2292{
2293 struct mempolicy *pol;
2294 struct zone *zone;
2295 int curnid = page_to_nid(page);
2296 unsigned long pgoff;
2297 int thiscpu = raw_smp_processor_id();
2298 int thisnid = cpu_to_node(thiscpu);
2299 int polnid = -1;
2300 int ret = -1;
2301
2302 BUG_ON(!vma);
2303
2304 pol = get_vma_policy(current, vma, addr);
2305 if (!(pol->flags & MPOL_F_MOF))
2306 goto out;
2307
2308 switch (pol->mode) {
2309 case MPOL_INTERLEAVE:
2310 BUG_ON(addr >= vma->vm_end);
2311 BUG_ON(addr < vma->vm_start);
2312
2313 pgoff = vma->vm_pgoff;
2314 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2315 polnid = offset_il_node(pol, vma, pgoff);
2316 break;
2317
2318 case MPOL_PREFERRED:
2319 if (pol->flags & MPOL_F_LOCAL)
2320 polnid = numa_node_id();
2321 else
2322 polnid = pol->v.preferred_node;
2323 break;
2324
2325 case MPOL_BIND:
2326 /*
2327 * allows binding to multiple nodes.
2328 * use current page if in policy nodemask,
2329 * else select nearest allowed node, if any.
2330 * If no allowed nodes, use current [!misplaced].
2331 */
2332 if (node_isset(curnid, pol->v.nodes))
2333 goto out;
2334 (void)first_zones_zonelist(
2335 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2336 gfp_zone(GFP_HIGHUSER),
2337 &pol->v.nodes, &zone);
2338 polnid = zone->node;
2339 break;
2340
2341 default:
2342 BUG();
2343 }
2344
2345 /* Migrate the page towards the node whose CPU is referencing it */
2346 if (pol->flags & MPOL_F_MORON) {
2347 polnid = thisnid;
2348
2349 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2350 goto out;
2351 }
2352
2353 if (curnid != polnid)
2354 ret = polnid;
2355out:
2356 mpol_cond_put(pol);
2357
2358 return ret;
2359}
2360
2361static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2362{
2363 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2364 rb_erase(&n->nd, &sp->root);
2365 sp_free(n);
2366}
2367
2368static void sp_node_init(struct sp_node *node, unsigned long start,
2369 unsigned long end, struct mempolicy *pol)
2370{
2371 node->start = start;
2372 node->end = end;
2373 node->policy = pol;
2374}
2375
2376static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2377 struct mempolicy *pol)
2378{
2379 struct sp_node *n;
2380 struct mempolicy *newpol;
2381
2382 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2383 if (!n)
2384 return NULL;
2385
2386 newpol = mpol_dup(pol);
2387 if (IS_ERR(newpol)) {
2388 kmem_cache_free(sn_cache, n);
2389 return NULL;
2390 }
2391 newpol->flags |= MPOL_F_SHARED;
2392 sp_node_init(n, start, end, newpol);
2393
2394 return n;
2395}
2396
2397/* Replace a policy range. */
2398static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2399 unsigned long end, struct sp_node *new)
2400{
2401 struct sp_node *n;
2402 struct sp_node *n_new = NULL;
2403 struct mempolicy *mpol_new = NULL;
2404 int ret = 0;
2405
2406restart:
2407 spin_lock(&sp->lock);
2408 n = sp_lookup(sp, start, end);
2409 /* Take care of old policies in the same range. */
2410 while (n && n->start < end) {
2411 struct rb_node *next = rb_next(&n->nd);
2412 if (n->start >= start) {
2413 if (n->end <= end)
2414 sp_delete(sp, n);
2415 else
2416 n->start = end;
2417 } else {
2418 /* Old policy spanning whole new range. */
2419 if (n->end > end) {
2420 if (!n_new)
2421 goto alloc_new;
2422
2423 *mpol_new = *n->policy;
2424 atomic_set(&mpol_new->refcnt, 1);
2425 sp_node_init(n_new, end, n->end, mpol_new);
2426 n->end = start;
2427 sp_insert(sp, n_new);
2428 n_new = NULL;
2429 mpol_new = NULL;
2430 break;
2431 } else
2432 n->end = start;
2433 }
2434 if (!next)
2435 break;
2436 n = rb_entry(next, struct sp_node, nd);
2437 }
2438 if (new)
2439 sp_insert(sp, new);
2440 spin_unlock(&sp->lock);
2441 ret = 0;
2442
2443err_out:
2444 if (mpol_new)
2445 mpol_put(mpol_new);
2446 if (n_new)
2447 kmem_cache_free(sn_cache, n_new);
2448
2449 return ret;
2450
2451alloc_new:
2452 spin_unlock(&sp->lock);
2453 ret = -ENOMEM;
2454 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2455 if (!n_new)
2456 goto err_out;
2457 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2458 if (!mpol_new)
2459 goto err_out;
2460 goto restart;
2461}
2462
2463/**
2464 * mpol_shared_policy_init - initialize shared policy for inode
2465 * @sp: pointer to inode shared policy
2466 * @mpol: struct mempolicy to install
2467 *
2468 * Install non-NULL @mpol in inode's shared policy rb-tree.
2469 * On entry, the current task has a reference on a non-NULL @mpol.
2470 * This must be released on exit.
2471 * This is called at get_inode() calls and we can use GFP_KERNEL.
2472 */
2473void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2474{
2475 int ret;
2476
2477 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2478 spin_lock_init(&sp->lock);
2479
2480 if (mpol) {
2481 struct vm_area_struct pvma;
2482 struct mempolicy *new;
2483 NODEMASK_SCRATCH(scratch);
2484
2485 if (!scratch)
2486 goto put_mpol;
2487 /* contextualize the tmpfs mount point mempolicy */
2488 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2489 if (IS_ERR(new))
2490 goto free_scratch; /* no valid nodemask intersection */
2491
2492 task_lock(current);
2493 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2494 task_unlock(current);
2495 if (ret)
2496 goto put_new;
2497
2498 /* Create pseudo-vma that contains just the policy */
2499 memset(&pvma, 0, sizeof(struct vm_area_struct));
2500 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2501 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2502
2503put_new:
2504 mpol_put(new); /* drop initial ref */
2505free_scratch:
2506 NODEMASK_SCRATCH_FREE(scratch);
2507put_mpol:
2508 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2509 }
2510}
2511
2512int mpol_set_shared_policy(struct shared_policy *info,
2513 struct vm_area_struct *vma, struct mempolicy *npol)
2514{
2515 int err;
2516 struct sp_node *new = NULL;
2517 unsigned long sz = vma_pages(vma);
2518
2519 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2520 vma->vm_pgoff,
2521 sz, npol ? npol->mode : -1,
2522 npol ? npol->flags : -1,
2523 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2524
2525 if (npol) {
2526 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2527 if (!new)
2528 return -ENOMEM;
2529 }
2530 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2531 if (err && new)
2532 sp_free(new);
2533 return err;
2534}
2535
2536/* Free a backing policy store on inode delete. */
2537void mpol_free_shared_policy(struct shared_policy *p)
2538{
2539 struct sp_node *n;
2540 struct rb_node *next;
2541
2542 if (!p->root.rb_node)
2543 return;
2544 spin_lock(&p->lock);
2545 next = rb_first(&p->root);
2546 while (next) {
2547 n = rb_entry(next, struct sp_node, nd);
2548 next = rb_next(&n->nd);
2549 sp_delete(p, n);
2550 }
2551 spin_unlock(&p->lock);
2552}
2553
2554#ifdef CONFIG_NUMA_BALANCING
2555static int __initdata numabalancing_override;
2556
2557static void __init check_numabalancing_enable(void)
2558{
2559 bool numabalancing_default = false;
2560
2561 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2562 numabalancing_default = true;
2563
2564 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2565 if (numabalancing_override)
2566 set_numabalancing_state(numabalancing_override == 1);
2567
2568 if (nr_node_ids > 1 && !numabalancing_override) {
2569 pr_info("%s automatic NUMA balancing. "
2570 "Configure with numa_balancing= or the "
2571 "kernel.numa_balancing sysctl",
2572 numabalancing_default ? "Enabling" : "Disabling");
2573 set_numabalancing_state(numabalancing_default);
2574 }
2575}
2576
2577static int __init setup_numabalancing(char *str)
2578{
2579 int ret = 0;
2580 if (!str)
2581 goto out;
2582
2583 if (!strcmp(str, "enable")) {
2584 numabalancing_override = 1;
2585 ret = 1;
2586 } else if (!strcmp(str, "disable")) {
2587 numabalancing_override = -1;
2588 ret = 1;
2589 }
2590out:
2591 if (!ret)
2592 pr_warn("Unable to parse numa_balancing=\n");
2593
2594 return ret;
2595}
2596__setup("numa_balancing=", setup_numabalancing);
2597#else
2598static inline void __init check_numabalancing_enable(void)
2599{
2600}
2601#endif /* CONFIG_NUMA_BALANCING */
2602
2603/* assumes fs == KERNEL_DS */
2604void __init numa_policy_init(void)
2605{
2606 nodemask_t interleave_nodes;
2607 unsigned long largest = 0;
2608 int nid, prefer = 0;
2609
2610 policy_cache = kmem_cache_create("numa_policy",
2611 sizeof(struct mempolicy),
2612 0, SLAB_PANIC, NULL);
2613
2614 sn_cache = kmem_cache_create("shared_policy_node",
2615 sizeof(struct sp_node),
2616 0, SLAB_PANIC, NULL);
2617
2618 for_each_node(nid) {
2619 preferred_node_policy[nid] = (struct mempolicy) {
2620 .refcnt = ATOMIC_INIT(1),
2621 .mode = MPOL_PREFERRED,
2622 .flags = MPOL_F_MOF | MPOL_F_MORON,
2623 .v = { .preferred_node = nid, },
2624 };
2625 }
2626
2627 /*
2628 * Set interleaving policy for system init. Interleaving is only
2629 * enabled across suitably sized nodes (default is >= 16MB), or
2630 * fall back to the largest node if they're all smaller.
2631 */
2632 nodes_clear(interleave_nodes);
2633 for_each_node_state(nid, N_MEMORY) {
2634 unsigned long total_pages = node_present_pages(nid);
2635
2636 /* Preserve the largest node */
2637 if (largest < total_pages) {
2638 largest = total_pages;
2639 prefer = nid;
2640 }
2641
2642 /* Interleave this node? */
2643 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2644 node_set(nid, interleave_nodes);
2645 }
2646
2647 /* All too small, use the largest */
2648 if (unlikely(nodes_empty(interleave_nodes)))
2649 node_set(prefer, interleave_nodes);
2650
2651 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2652 printk("numa_policy_init: interleaving failed\n");
2653
2654 check_numabalancing_enable();
2655}
2656
2657/* Reset policy of current process to default */
2658void numa_default_policy(void)
2659{
2660 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2661}
2662
2663/*
2664 * Parse and format mempolicy from/to strings
2665 */
2666
2667/*
2668 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2669 */
2670static const char * const policy_modes[] =
2671{
2672 [MPOL_DEFAULT] = "default",
2673 [MPOL_PREFERRED] = "prefer",
2674 [MPOL_BIND] = "bind",
2675 [MPOL_INTERLEAVE] = "interleave",
2676 [MPOL_LOCAL] = "local",
2677};
2678
2679
2680#ifdef CONFIG_TMPFS
2681/**
2682 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2683 * @str: string containing mempolicy to parse
2684 * @mpol: pointer to struct mempolicy pointer, returned on success.
2685 *
2686 * Format of input:
2687 * <mode>[=<flags>][:<nodelist>]
2688 *
2689 * On success, returns 0, else 1
2690 */
2691int mpol_parse_str(char *str, struct mempolicy **mpol)
2692{
2693 struct mempolicy *new = NULL;
2694 unsigned short mode;
2695 unsigned short mode_flags;
2696 nodemask_t nodes;
2697 char *nodelist = strchr(str, ':');
2698 char *flags = strchr(str, '=');
2699 int err = 1;
2700
2701 if (nodelist) {
2702 /* NUL-terminate mode or flags string */
2703 *nodelist++ = '\0';
2704 if (nodelist_parse(nodelist, nodes))
2705 goto out;
2706 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2707 goto out;
2708 } else
2709 nodes_clear(nodes);
2710
2711 if (flags)
2712 *flags++ = '\0'; /* terminate mode string */
2713
2714 for (mode = 0; mode < MPOL_MAX; mode++) {
2715 if (!strcmp(str, policy_modes[mode])) {
2716 break;
2717 }
2718 }
2719 if (mode >= MPOL_MAX)
2720 goto out;
2721
2722 switch (mode) {
2723 case MPOL_PREFERRED:
2724 /*
2725 * Insist on a nodelist of one node only
2726 */
2727 if (nodelist) {
2728 char *rest = nodelist;
2729 while (isdigit(*rest))
2730 rest++;
2731 if (*rest)
2732 goto out;
2733 }
2734 break;
2735 case MPOL_INTERLEAVE:
2736 /*
2737 * Default to online nodes with memory if no nodelist
2738 */
2739 if (!nodelist)
2740 nodes = node_states[N_MEMORY];
2741 break;
2742 case MPOL_LOCAL:
2743 /*
2744 * Don't allow a nodelist; mpol_new() checks flags
2745 */
2746 if (nodelist)
2747 goto out;
2748 mode = MPOL_PREFERRED;
2749 break;
2750 case MPOL_DEFAULT:
2751 /*
2752 * Insist on a empty nodelist
2753 */
2754 if (!nodelist)
2755 err = 0;
2756 goto out;
2757 case MPOL_BIND:
2758 /*
2759 * Insist on a nodelist
2760 */
2761 if (!nodelist)
2762 goto out;
2763 }
2764
2765 mode_flags = 0;
2766 if (flags) {
2767 /*
2768 * Currently, we only support two mutually exclusive
2769 * mode flags.
2770 */
2771 if (!strcmp(flags, "static"))
2772 mode_flags |= MPOL_F_STATIC_NODES;
2773 else if (!strcmp(flags, "relative"))
2774 mode_flags |= MPOL_F_RELATIVE_NODES;
2775 else
2776 goto out;
2777 }
2778
2779 new = mpol_new(mode, mode_flags, &nodes);
2780 if (IS_ERR(new))
2781 goto out;
2782
2783 /*
2784 * Save nodes for mpol_to_str() to show the tmpfs mount options
2785 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2786 */
2787 if (mode != MPOL_PREFERRED)
2788 new->v.nodes = nodes;
2789 else if (nodelist)
2790 new->v.preferred_node = first_node(nodes);
2791 else
2792 new->flags |= MPOL_F_LOCAL;
2793
2794 /*
2795 * Save nodes for contextualization: this will be used to "clone"
2796 * the mempolicy in a specific context [cpuset] at a later time.
2797 */
2798 new->w.user_nodemask = nodes;
2799
2800 err = 0;
2801
2802out:
2803 /* Restore string for error message */
2804 if (nodelist)
2805 *--nodelist = ':';
2806 if (flags)
2807 *--flags = '=';
2808 if (!err)
2809 *mpol = new;
2810 return err;
2811}
2812#endif /* CONFIG_TMPFS */
2813
2814/**
2815 * mpol_to_str - format a mempolicy structure for printing
2816 * @buffer: to contain formatted mempolicy string
2817 * @maxlen: length of @buffer
2818 * @pol: pointer to mempolicy to be formatted
2819 *
2820 * Convert @pol into a string. If @buffer is too short, truncate the string.
2821 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2822 * longest flag, "relative", and to display at least a few node ids.
2823 */
2824void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2825{
2826 char *p = buffer;
2827 nodemask_t nodes = NODE_MASK_NONE;
2828 unsigned short mode = MPOL_DEFAULT;
2829 unsigned short flags = 0;
2830
2831 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2832 mode = pol->mode;
2833 flags = pol->flags;
2834 }
2835
2836 switch (mode) {
2837 case MPOL_DEFAULT:
2838 break;
2839 case MPOL_PREFERRED:
2840 if (flags & MPOL_F_LOCAL)
2841 mode = MPOL_LOCAL;
2842 else
2843 node_set(pol->v.preferred_node, nodes);
2844 break;
2845 case MPOL_BIND:
2846 case MPOL_INTERLEAVE:
2847 nodes = pol->v.nodes;
2848 break;
2849 default:
2850 WARN_ON_ONCE(1);
2851 snprintf(p, maxlen, "unknown");
2852 return;
2853 }
2854
2855 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2856
2857 if (flags & MPOL_MODE_FLAGS) {
2858 p += snprintf(p, buffer + maxlen - p, "=");
2859
2860 /*
2861 * Currently, the only defined flags are mutually exclusive
2862 */
2863 if (flags & MPOL_F_STATIC_NODES)
2864 p += snprintf(p, buffer + maxlen - p, "static");
2865 else if (flags & MPOL_F_RELATIVE_NODES)
2866 p += snprintf(p, buffer + maxlen - p, "relative");
2867 }
2868
2869 if (!nodes_empty(nodes)) {
2870 p += snprintf(p, buffer + maxlen - p, ":");
2871 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2872 }
2873}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66*/
67
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
70#include <linux/mempolicy.h>
71#include <linux/pagewalk.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
76#include <linux/sched/mm.h>
77#include <linux/sched/numa_balancing.h>
78#include <linux/sched/task.h>
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
81#include <linux/slab.h>
82#include <linux/string.h>
83#include <linux/export.h>
84#include <linux/nsproxy.h>
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
88#include <linux/ptrace.h>
89#include <linux/swap.h>
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
92#include <linux/migrate.h>
93#include <linux/ksm.h>
94#include <linux/rmap.h>
95#include <linux/security.h>
96#include <linux/syscalls.h>
97#include <linux/ctype.h>
98#include <linux/mm_inline.h>
99#include <linux/mmu_notifier.h>
100#include <linux/printk.h>
101#include <linux/swapops.h>
102
103#include <asm/tlbflush.h>
104#include <linux/uaccess.h>
105
106#include "internal.h"
107
108/* Internal flags */
109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
111
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
114
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
117enum zone_type policy_zone = 0;
118
119/*
120 * run-time system-wide default policy => local allocation
121 */
122static struct mempolicy default_policy = {
123 .refcnt = ATOMIC_INIT(1), /* never free it */
124 .mode = MPOL_LOCAL,
125};
126
127static struct mempolicy preferred_node_policy[MAX_NUMNODES];
128
129/**
130 * numa_map_to_online_node - Find closest online node
131 * @node: Node id to start the search
132 *
133 * Lookup the next closest node by distance if @nid is not online.
134 */
135int numa_map_to_online_node(int node)
136{
137 int min_dist = INT_MAX, dist, n, min_node;
138
139 if (node == NUMA_NO_NODE || node_online(node))
140 return node;
141
142 min_node = node;
143 for_each_online_node(n) {
144 dist = node_distance(node, n);
145 if (dist < min_dist) {
146 min_dist = dist;
147 min_node = n;
148 }
149 }
150
151 return min_node;
152}
153EXPORT_SYMBOL_GPL(numa_map_to_online_node);
154
155struct mempolicy *get_task_policy(struct task_struct *p)
156{
157 struct mempolicy *pol = p->mempolicy;
158 int node;
159
160 if (pol)
161 return pol;
162
163 node = numa_node_id();
164 if (node != NUMA_NO_NODE) {
165 pol = &preferred_node_policy[node];
166 /* preferred_node_policy is not initialised early in boot */
167 if (pol->mode)
168 return pol;
169 }
170
171 return &default_policy;
172}
173
174static const struct mempolicy_operations {
175 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
176 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
177} mpol_ops[MPOL_MAX];
178
179static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
180{
181 return pol->flags & MPOL_MODE_FLAGS;
182}
183
184static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
185 const nodemask_t *rel)
186{
187 nodemask_t tmp;
188 nodes_fold(tmp, *orig, nodes_weight(*rel));
189 nodes_onto(*ret, tmp, *rel);
190}
191
192static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
193{
194 if (nodes_empty(*nodes))
195 return -EINVAL;
196 pol->nodes = *nodes;
197 return 0;
198}
199
200static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
201{
202 if (nodes_empty(*nodes))
203 return -EINVAL;
204
205 nodes_clear(pol->nodes);
206 node_set(first_node(*nodes), pol->nodes);
207 return 0;
208}
209
210static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
211{
212 if (nodes_empty(*nodes))
213 return -EINVAL;
214 pol->nodes = *nodes;
215 return 0;
216}
217
218/*
219 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
220 * any, for the new policy. mpol_new() has already validated the nodes
221 * parameter with respect to the policy mode and flags.
222 *
223 * Must be called holding task's alloc_lock to protect task's mems_allowed
224 * and mempolicy. May also be called holding the mmap_lock for write.
225 */
226static int mpol_set_nodemask(struct mempolicy *pol,
227 const nodemask_t *nodes, struct nodemask_scratch *nsc)
228{
229 int ret;
230
231 /*
232 * Default (pol==NULL) resp. local memory policies are not a
233 * subject of any remapping. They also do not need any special
234 * constructor.
235 */
236 if (!pol || pol->mode == MPOL_LOCAL)
237 return 0;
238
239 /* Check N_MEMORY */
240 nodes_and(nsc->mask1,
241 cpuset_current_mems_allowed, node_states[N_MEMORY]);
242
243 VM_BUG_ON(!nodes);
244
245 if (pol->flags & MPOL_F_RELATIVE_NODES)
246 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
247 else
248 nodes_and(nsc->mask2, *nodes, nsc->mask1);
249
250 if (mpol_store_user_nodemask(pol))
251 pol->w.user_nodemask = *nodes;
252 else
253 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
254
255 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
256 return ret;
257}
258
259/*
260 * This function just creates a new policy, does some check and simple
261 * initialization. You must invoke mpol_set_nodemask() to set nodes.
262 */
263static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
264 nodemask_t *nodes)
265{
266 struct mempolicy *policy;
267
268 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
269 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
270
271 if (mode == MPOL_DEFAULT) {
272 if (nodes && !nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
274 return NULL;
275 }
276 VM_BUG_ON(!nodes);
277
278 /*
279 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
280 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
281 * All other modes require a valid pointer to a non-empty nodemask.
282 */
283 if (mode == MPOL_PREFERRED) {
284 if (nodes_empty(*nodes)) {
285 if (((flags & MPOL_F_STATIC_NODES) ||
286 (flags & MPOL_F_RELATIVE_NODES)))
287 return ERR_PTR(-EINVAL);
288
289 mode = MPOL_LOCAL;
290 }
291 } else if (mode == MPOL_LOCAL) {
292 if (!nodes_empty(*nodes) ||
293 (flags & MPOL_F_STATIC_NODES) ||
294 (flags & MPOL_F_RELATIVE_NODES))
295 return ERR_PTR(-EINVAL);
296 } else if (nodes_empty(*nodes))
297 return ERR_PTR(-EINVAL);
298 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
299 if (!policy)
300 return ERR_PTR(-ENOMEM);
301 atomic_set(&policy->refcnt, 1);
302 policy->mode = mode;
303 policy->flags = flags;
304
305 return policy;
306}
307
308/* Slow path of a mpol destructor. */
309void __mpol_put(struct mempolicy *p)
310{
311 if (!atomic_dec_and_test(&p->refcnt))
312 return;
313 kmem_cache_free(policy_cache, p);
314}
315
316static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
317{
318}
319
320static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
321{
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
329 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
330 *nodes);
331 pol->w.cpuset_mems_allowed = *nodes;
332 }
333
334 if (nodes_empty(tmp))
335 tmp = *nodes;
336
337 pol->nodes = tmp;
338}
339
340static void mpol_rebind_preferred(struct mempolicy *pol,
341 const nodemask_t *nodes)
342{
343 pol->w.cpuset_mems_allowed = *nodes;
344}
345
346/*
347 * mpol_rebind_policy - Migrate a policy to a different set of nodes
348 *
349 * Per-vma policies are protected by mmap_lock. Allocations using per-task
350 * policies are protected by task->mems_allowed_seq to prevent a premature
351 * OOM/allocation failure due to parallel nodemask modification.
352 */
353static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
354{
355 if (!pol)
356 return;
357 if (!mpol_store_user_nodemask(pol) &&
358 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
359 return;
360
361 mpol_ops[pol->mode].rebind(pol, newmask);
362}
363
364/*
365 * Wrapper for mpol_rebind_policy() that just requires task
366 * pointer, and updates task mempolicy.
367 *
368 * Called with task's alloc_lock held.
369 */
370
371void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
372{
373 mpol_rebind_policy(tsk->mempolicy, new);
374}
375
376/*
377 * Rebind each vma in mm to new nodemask.
378 *
379 * Call holding a reference to mm. Takes mm->mmap_lock during call.
380 */
381
382void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
383{
384 struct vm_area_struct *vma;
385
386 mmap_write_lock(mm);
387 for (vma = mm->mmap; vma; vma = vma->vm_next)
388 mpol_rebind_policy(vma->vm_policy, new);
389 mmap_write_unlock(mm);
390}
391
392static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
393 [MPOL_DEFAULT] = {
394 .rebind = mpol_rebind_default,
395 },
396 [MPOL_INTERLEAVE] = {
397 .create = mpol_new_interleave,
398 .rebind = mpol_rebind_nodemask,
399 },
400 [MPOL_PREFERRED] = {
401 .create = mpol_new_preferred,
402 .rebind = mpol_rebind_preferred,
403 },
404 [MPOL_BIND] = {
405 .create = mpol_new_bind,
406 .rebind = mpol_rebind_nodemask,
407 },
408 [MPOL_LOCAL] = {
409 .rebind = mpol_rebind_default,
410 },
411};
412
413static int migrate_page_add(struct page *page, struct list_head *pagelist,
414 unsigned long flags);
415
416struct queue_pages {
417 struct list_head *pagelist;
418 unsigned long flags;
419 nodemask_t *nmask;
420 unsigned long start;
421 unsigned long end;
422 struct vm_area_struct *first;
423};
424
425/*
426 * Check if the page's nid is in qp->nmask.
427 *
428 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
429 * in the invert of qp->nmask.
430 */
431static inline bool queue_pages_required(struct page *page,
432 struct queue_pages *qp)
433{
434 int nid = page_to_nid(page);
435 unsigned long flags = qp->flags;
436
437 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
438}
439
440/*
441 * queue_pages_pmd() has four possible return values:
442 * 0 - pages are placed on the right node or queued successfully, or
443 * special page is met, i.e. huge zero page.
444 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
445 * specified.
446 * 2 - THP was split.
447 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
448 * existing page was already on a node that does not follow the
449 * policy.
450 */
451static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
452 unsigned long end, struct mm_walk *walk)
453 __releases(ptl)
454{
455 int ret = 0;
456 struct page *page;
457 struct queue_pages *qp = walk->private;
458 unsigned long flags;
459
460 if (unlikely(is_pmd_migration_entry(*pmd))) {
461 ret = -EIO;
462 goto unlock;
463 }
464 page = pmd_page(*pmd);
465 if (is_huge_zero_page(page)) {
466 spin_unlock(ptl);
467 walk->action = ACTION_CONTINUE;
468 goto out;
469 }
470 if (!queue_pages_required(page, qp))
471 goto unlock;
472
473 flags = qp->flags;
474 /* go to thp migration */
475 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
476 if (!vma_migratable(walk->vma) ||
477 migrate_page_add(page, qp->pagelist, flags)) {
478 ret = 1;
479 goto unlock;
480 }
481 } else
482 ret = -EIO;
483unlock:
484 spin_unlock(ptl);
485out:
486 return ret;
487}
488
489/*
490 * Scan through pages checking if pages follow certain conditions,
491 * and move them to the pagelist if they do.
492 *
493 * queue_pages_pte_range() has three possible return values:
494 * 0 - pages are placed on the right node or queued successfully, or
495 * special page is met, i.e. zero page.
496 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
497 * specified.
498 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
499 * on a node that does not follow the policy.
500 */
501static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
502 unsigned long end, struct mm_walk *walk)
503{
504 struct vm_area_struct *vma = walk->vma;
505 struct page *page;
506 struct queue_pages *qp = walk->private;
507 unsigned long flags = qp->flags;
508 int ret;
509 bool has_unmovable = false;
510 pte_t *pte, *mapped_pte;
511 spinlock_t *ptl;
512
513 ptl = pmd_trans_huge_lock(pmd, vma);
514 if (ptl) {
515 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
516 if (ret != 2)
517 return ret;
518 }
519 /* THP was split, fall through to pte walk */
520
521 if (pmd_trans_unstable(pmd))
522 return 0;
523
524 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
525 for (; addr != end; pte++, addr += PAGE_SIZE) {
526 if (!pte_present(*pte))
527 continue;
528 page = vm_normal_page(vma, addr, *pte);
529 if (!page)
530 continue;
531 /*
532 * vm_normal_page() filters out zero pages, but there might
533 * still be PageReserved pages to skip, perhaps in a VDSO.
534 */
535 if (PageReserved(page))
536 continue;
537 if (!queue_pages_required(page, qp))
538 continue;
539 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
540 /* MPOL_MF_STRICT must be specified if we get here */
541 if (!vma_migratable(vma)) {
542 has_unmovable = true;
543 break;
544 }
545
546 /*
547 * Do not abort immediately since there may be
548 * temporary off LRU pages in the range. Still
549 * need migrate other LRU pages.
550 */
551 if (migrate_page_add(page, qp->pagelist, flags))
552 has_unmovable = true;
553 } else
554 break;
555 }
556 pte_unmap_unlock(mapped_pte, ptl);
557 cond_resched();
558
559 if (has_unmovable)
560 return 1;
561
562 return addr != end ? -EIO : 0;
563}
564
565static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
566 unsigned long addr, unsigned long end,
567 struct mm_walk *walk)
568{
569 int ret = 0;
570#ifdef CONFIG_HUGETLB_PAGE
571 struct queue_pages *qp = walk->private;
572 unsigned long flags = (qp->flags & MPOL_MF_VALID);
573 struct page *page;
574 spinlock_t *ptl;
575 pte_t entry;
576
577 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
578 entry = huge_ptep_get(pte);
579 if (!pte_present(entry))
580 goto unlock;
581 page = pte_page(entry);
582 if (!queue_pages_required(page, qp))
583 goto unlock;
584
585 if (flags == MPOL_MF_STRICT) {
586 /*
587 * STRICT alone means only detecting misplaced page and no
588 * need to further check other vma.
589 */
590 ret = -EIO;
591 goto unlock;
592 }
593
594 if (!vma_migratable(walk->vma)) {
595 /*
596 * Must be STRICT with MOVE*, otherwise .test_walk() have
597 * stopped walking current vma.
598 * Detecting misplaced page but allow migrating pages which
599 * have been queued.
600 */
601 ret = 1;
602 goto unlock;
603 }
604
605 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
606 if (flags & (MPOL_MF_MOVE_ALL) ||
607 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
608 if (!isolate_huge_page(page, qp->pagelist) &&
609 (flags & MPOL_MF_STRICT))
610 /*
611 * Failed to isolate page but allow migrating pages
612 * which have been queued.
613 */
614 ret = 1;
615 }
616unlock:
617 spin_unlock(ptl);
618#else
619 BUG();
620#endif
621 return ret;
622}
623
624#ifdef CONFIG_NUMA_BALANCING
625/*
626 * This is used to mark a range of virtual addresses to be inaccessible.
627 * These are later cleared by a NUMA hinting fault. Depending on these
628 * faults, pages may be migrated for better NUMA placement.
629 *
630 * This is assuming that NUMA faults are handled using PROT_NONE. If
631 * an architecture makes a different choice, it will need further
632 * changes to the core.
633 */
634unsigned long change_prot_numa(struct vm_area_struct *vma,
635 unsigned long addr, unsigned long end)
636{
637 int nr_updated;
638
639 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
640 if (nr_updated)
641 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
642
643 return nr_updated;
644}
645#else
646static unsigned long change_prot_numa(struct vm_area_struct *vma,
647 unsigned long addr, unsigned long end)
648{
649 return 0;
650}
651#endif /* CONFIG_NUMA_BALANCING */
652
653static int queue_pages_test_walk(unsigned long start, unsigned long end,
654 struct mm_walk *walk)
655{
656 struct vm_area_struct *vma = walk->vma;
657 struct queue_pages *qp = walk->private;
658 unsigned long endvma = vma->vm_end;
659 unsigned long flags = qp->flags;
660
661 /* range check first */
662 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
663
664 if (!qp->first) {
665 qp->first = vma;
666 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
667 (qp->start < vma->vm_start))
668 /* hole at head side of range */
669 return -EFAULT;
670 }
671 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
672 ((vma->vm_end < qp->end) &&
673 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
674 /* hole at middle or tail of range */
675 return -EFAULT;
676
677 /*
678 * Need check MPOL_MF_STRICT to return -EIO if possible
679 * regardless of vma_migratable
680 */
681 if (!vma_migratable(vma) &&
682 !(flags & MPOL_MF_STRICT))
683 return 1;
684
685 if (endvma > end)
686 endvma = end;
687
688 if (flags & MPOL_MF_LAZY) {
689 /* Similar to task_numa_work, skip inaccessible VMAs */
690 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
691 !(vma->vm_flags & VM_MIXEDMAP))
692 change_prot_numa(vma, start, endvma);
693 return 1;
694 }
695
696 /* queue pages from current vma */
697 if (flags & MPOL_MF_VALID)
698 return 0;
699 return 1;
700}
701
702static const struct mm_walk_ops queue_pages_walk_ops = {
703 .hugetlb_entry = queue_pages_hugetlb,
704 .pmd_entry = queue_pages_pte_range,
705 .test_walk = queue_pages_test_walk,
706};
707
708/*
709 * Walk through page tables and collect pages to be migrated.
710 *
711 * If pages found in a given range are on a set of nodes (determined by
712 * @nodes and @flags,) it's isolated and queued to the pagelist which is
713 * passed via @private.
714 *
715 * queue_pages_range() has three possible return values:
716 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
717 * specified.
718 * 0 - queue pages successfully or no misplaced page.
719 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
720 * memory range specified by nodemask and maxnode points outside
721 * your accessible address space (-EFAULT)
722 */
723static int
724queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
725 nodemask_t *nodes, unsigned long flags,
726 struct list_head *pagelist)
727{
728 int err;
729 struct queue_pages qp = {
730 .pagelist = pagelist,
731 .flags = flags,
732 .nmask = nodes,
733 .start = start,
734 .end = end,
735 .first = NULL,
736 };
737
738 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
739
740 if (!qp.first)
741 /* whole range in hole */
742 err = -EFAULT;
743
744 return err;
745}
746
747/*
748 * Apply policy to a single VMA
749 * This must be called with the mmap_lock held for writing.
750 */
751static int vma_replace_policy(struct vm_area_struct *vma,
752 struct mempolicy *pol)
753{
754 int err;
755 struct mempolicy *old;
756 struct mempolicy *new;
757
758 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
759 vma->vm_start, vma->vm_end, vma->vm_pgoff,
760 vma->vm_ops, vma->vm_file,
761 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
762
763 new = mpol_dup(pol);
764 if (IS_ERR(new))
765 return PTR_ERR(new);
766
767 if (vma->vm_ops && vma->vm_ops->set_policy) {
768 err = vma->vm_ops->set_policy(vma, new);
769 if (err)
770 goto err_out;
771 }
772
773 old = vma->vm_policy;
774 vma->vm_policy = new; /* protected by mmap_lock */
775 mpol_put(old);
776
777 return 0;
778 err_out:
779 mpol_put(new);
780 return err;
781}
782
783/* Step 2: apply policy to a range and do splits. */
784static int mbind_range(struct mm_struct *mm, unsigned long start,
785 unsigned long end, struct mempolicy *new_pol)
786{
787 struct vm_area_struct *next;
788 struct vm_area_struct *prev;
789 struct vm_area_struct *vma;
790 int err = 0;
791 pgoff_t pgoff;
792 unsigned long vmstart;
793 unsigned long vmend;
794
795 vma = find_vma(mm, start);
796 VM_BUG_ON(!vma);
797
798 prev = vma->vm_prev;
799 if (start > vma->vm_start)
800 prev = vma;
801
802 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
803 next = vma->vm_next;
804 vmstart = max(start, vma->vm_start);
805 vmend = min(end, vma->vm_end);
806
807 if (mpol_equal(vma_policy(vma), new_pol))
808 continue;
809
810 pgoff = vma->vm_pgoff +
811 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
812 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
813 vma->anon_vma, vma->vm_file, pgoff,
814 new_pol, vma->vm_userfaultfd_ctx);
815 if (prev) {
816 vma = prev;
817 next = vma->vm_next;
818 if (mpol_equal(vma_policy(vma), new_pol))
819 continue;
820 /* vma_merge() joined vma && vma->next, case 8 */
821 goto replace;
822 }
823 if (vma->vm_start != vmstart) {
824 err = split_vma(vma->vm_mm, vma, vmstart, 1);
825 if (err)
826 goto out;
827 }
828 if (vma->vm_end != vmend) {
829 err = split_vma(vma->vm_mm, vma, vmend, 0);
830 if (err)
831 goto out;
832 }
833 replace:
834 err = vma_replace_policy(vma, new_pol);
835 if (err)
836 goto out;
837 }
838
839 out:
840 return err;
841}
842
843/* Set the process memory policy */
844static long do_set_mempolicy(unsigned short mode, unsigned short flags,
845 nodemask_t *nodes)
846{
847 struct mempolicy *new, *old;
848 NODEMASK_SCRATCH(scratch);
849 int ret;
850
851 if (!scratch)
852 return -ENOMEM;
853
854 new = mpol_new(mode, flags, nodes);
855 if (IS_ERR(new)) {
856 ret = PTR_ERR(new);
857 goto out;
858 }
859
860 ret = mpol_set_nodemask(new, nodes, scratch);
861 if (ret) {
862 mpol_put(new);
863 goto out;
864 }
865 task_lock(current);
866 old = current->mempolicy;
867 current->mempolicy = new;
868 if (new && new->mode == MPOL_INTERLEAVE)
869 current->il_prev = MAX_NUMNODES-1;
870 task_unlock(current);
871 mpol_put(old);
872 ret = 0;
873out:
874 NODEMASK_SCRATCH_FREE(scratch);
875 return ret;
876}
877
878/*
879 * Return nodemask for policy for get_mempolicy() query
880 *
881 * Called with task's alloc_lock held
882 */
883static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
884{
885 nodes_clear(*nodes);
886 if (p == &default_policy)
887 return;
888
889 switch (p->mode) {
890 case MPOL_BIND:
891 case MPOL_INTERLEAVE:
892 case MPOL_PREFERRED:
893 *nodes = p->nodes;
894 break;
895 case MPOL_LOCAL:
896 /* return empty node mask for local allocation */
897 break;
898 default:
899 BUG();
900 }
901}
902
903static int lookup_node(struct mm_struct *mm, unsigned long addr)
904{
905 struct page *p = NULL;
906 int err;
907
908 int locked = 1;
909 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
910 if (err > 0) {
911 err = page_to_nid(p);
912 put_page(p);
913 }
914 if (locked)
915 mmap_read_unlock(mm);
916 return err;
917}
918
919/* Retrieve NUMA policy */
920static long do_get_mempolicy(int *policy, nodemask_t *nmask,
921 unsigned long addr, unsigned long flags)
922{
923 int err;
924 struct mm_struct *mm = current->mm;
925 struct vm_area_struct *vma = NULL;
926 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
927
928 if (flags &
929 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
930 return -EINVAL;
931
932 if (flags & MPOL_F_MEMS_ALLOWED) {
933 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
934 return -EINVAL;
935 *policy = 0; /* just so it's initialized */
936 task_lock(current);
937 *nmask = cpuset_current_mems_allowed;
938 task_unlock(current);
939 return 0;
940 }
941
942 if (flags & MPOL_F_ADDR) {
943 /*
944 * Do NOT fall back to task policy if the
945 * vma/shared policy at addr is NULL. We
946 * want to return MPOL_DEFAULT in this case.
947 */
948 mmap_read_lock(mm);
949 vma = vma_lookup(mm, addr);
950 if (!vma) {
951 mmap_read_unlock(mm);
952 return -EFAULT;
953 }
954 if (vma->vm_ops && vma->vm_ops->get_policy)
955 pol = vma->vm_ops->get_policy(vma, addr);
956 else
957 pol = vma->vm_policy;
958 } else if (addr)
959 return -EINVAL;
960
961 if (!pol)
962 pol = &default_policy; /* indicates default behavior */
963
964 if (flags & MPOL_F_NODE) {
965 if (flags & MPOL_F_ADDR) {
966 /*
967 * Take a refcount on the mpol, lookup_node()
968 * will drop the mmap_lock, so after calling
969 * lookup_node() only "pol" remains valid, "vma"
970 * is stale.
971 */
972 pol_refcount = pol;
973 vma = NULL;
974 mpol_get(pol);
975 err = lookup_node(mm, addr);
976 if (err < 0)
977 goto out;
978 *policy = err;
979 } else if (pol == current->mempolicy &&
980 pol->mode == MPOL_INTERLEAVE) {
981 *policy = next_node_in(current->il_prev, pol->nodes);
982 } else {
983 err = -EINVAL;
984 goto out;
985 }
986 } else {
987 *policy = pol == &default_policy ? MPOL_DEFAULT :
988 pol->mode;
989 /*
990 * Internal mempolicy flags must be masked off before exposing
991 * the policy to userspace.
992 */
993 *policy |= (pol->flags & MPOL_MODE_FLAGS);
994 }
995
996 err = 0;
997 if (nmask) {
998 if (mpol_store_user_nodemask(pol)) {
999 *nmask = pol->w.user_nodemask;
1000 } else {
1001 task_lock(current);
1002 get_policy_nodemask(pol, nmask);
1003 task_unlock(current);
1004 }
1005 }
1006
1007 out:
1008 mpol_cond_put(pol);
1009 if (vma)
1010 mmap_read_unlock(mm);
1011 if (pol_refcount)
1012 mpol_put(pol_refcount);
1013 return err;
1014}
1015
1016#ifdef CONFIG_MIGRATION
1017/*
1018 * page migration, thp tail pages can be passed.
1019 */
1020static int migrate_page_add(struct page *page, struct list_head *pagelist,
1021 unsigned long flags)
1022{
1023 struct page *head = compound_head(page);
1024 /*
1025 * Avoid migrating a page that is shared with others.
1026 */
1027 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1028 if (!isolate_lru_page(head)) {
1029 list_add_tail(&head->lru, pagelist);
1030 mod_node_page_state(page_pgdat(head),
1031 NR_ISOLATED_ANON + page_is_file_lru(head),
1032 thp_nr_pages(head));
1033 } else if (flags & MPOL_MF_STRICT) {
1034 /*
1035 * Non-movable page may reach here. And, there may be
1036 * temporary off LRU pages or non-LRU movable pages.
1037 * Treat them as unmovable pages since they can't be
1038 * isolated, so they can't be moved at the moment. It
1039 * should return -EIO for this case too.
1040 */
1041 return -EIO;
1042 }
1043 }
1044
1045 return 0;
1046}
1047
1048/*
1049 * Migrate pages from one node to a target node.
1050 * Returns error or the number of pages not migrated.
1051 */
1052static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1053 int flags)
1054{
1055 nodemask_t nmask;
1056 LIST_HEAD(pagelist);
1057 int err = 0;
1058 struct migration_target_control mtc = {
1059 .nid = dest,
1060 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1061 };
1062
1063 nodes_clear(nmask);
1064 node_set(source, nmask);
1065
1066 /*
1067 * This does not "check" the range but isolates all pages that
1068 * need migration. Between passing in the full user address
1069 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1070 */
1071 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1072 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1073 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1074
1075 if (!list_empty(&pagelist)) {
1076 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1077 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1078 if (err)
1079 putback_movable_pages(&pagelist);
1080 }
1081
1082 return err;
1083}
1084
1085/*
1086 * Move pages between the two nodesets so as to preserve the physical
1087 * layout as much as possible.
1088 *
1089 * Returns the number of page that could not be moved.
1090 */
1091int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1092 const nodemask_t *to, int flags)
1093{
1094 int busy = 0;
1095 int err = 0;
1096 nodemask_t tmp;
1097
1098 lru_cache_disable();
1099
1100 mmap_read_lock(mm);
1101
1102 /*
1103 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1104 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1105 * bit in 'tmp', and return that <source, dest> pair for migration.
1106 * The pair of nodemasks 'to' and 'from' define the map.
1107 *
1108 * If no pair of bits is found that way, fallback to picking some
1109 * pair of 'source' and 'dest' bits that are not the same. If the
1110 * 'source' and 'dest' bits are the same, this represents a node
1111 * that will be migrating to itself, so no pages need move.
1112 *
1113 * If no bits are left in 'tmp', or if all remaining bits left
1114 * in 'tmp' correspond to the same bit in 'to', return false
1115 * (nothing left to migrate).
1116 *
1117 * This lets us pick a pair of nodes to migrate between, such that
1118 * if possible the dest node is not already occupied by some other
1119 * source node, minimizing the risk of overloading the memory on a
1120 * node that would happen if we migrated incoming memory to a node
1121 * before migrating outgoing memory source that same node.
1122 *
1123 * A single scan of tmp is sufficient. As we go, we remember the
1124 * most recent <s, d> pair that moved (s != d). If we find a pair
1125 * that not only moved, but what's better, moved to an empty slot
1126 * (d is not set in tmp), then we break out then, with that pair.
1127 * Otherwise when we finish scanning from_tmp, we at least have the
1128 * most recent <s, d> pair that moved. If we get all the way through
1129 * the scan of tmp without finding any node that moved, much less
1130 * moved to an empty node, then there is nothing left worth migrating.
1131 */
1132
1133 tmp = *from;
1134 while (!nodes_empty(tmp)) {
1135 int s, d;
1136 int source = NUMA_NO_NODE;
1137 int dest = 0;
1138
1139 for_each_node_mask(s, tmp) {
1140
1141 /*
1142 * do_migrate_pages() tries to maintain the relative
1143 * node relationship of the pages established between
1144 * threads and memory areas.
1145 *
1146 * However if the number of source nodes is not equal to
1147 * the number of destination nodes we can not preserve
1148 * this node relative relationship. In that case, skip
1149 * copying memory from a node that is in the destination
1150 * mask.
1151 *
1152 * Example: [2,3,4] -> [3,4,5] moves everything.
1153 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1154 */
1155
1156 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1157 (node_isset(s, *to)))
1158 continue;
1159
1160 d = node_remap(s, *from, *to);
1161 if (s == d)
1162 continue;
1163
1164 source = s; /* Node moved. Memorize */
1165 dest = d;
1166
1167 /* dest not in remaining from nodes? */
1168 if (!node_isset(dest, tmp))
1169 break;
1170 }
1171 if (source == NUMA_NO_NODE)
1172 break;
1173
1174 node_clear(source, tmp);
1175 err = migrate_to_node(mm, source, dest, flags);
1176 if (err > 0)
1177 busy += err;
1178 if (err < 0)
1179 break;
1180 }
1181 mmap_read_unlock(mm);
1182
1183 lru_cache_enable();
1184 if (err < 0)
1185 return err;
1186 return busy;
1187
1188}
1189
1190/*
1191 * Allocate a new page for page migration based on vma policy.
1192 * Start by assuming the page is mapped by the same vma as contains @start.
1193 * Search forward from there, if not. N.B., this assumes that the
1194 * list of pages handed to migrate_pages()--which is how we get here--
1195 * is in virtual address order.
1196 */
1197static struct page *new_page(struct page *page, unsigned long start)
1198{
1199 struct vm_area_struct *vma;
1200 unsigned long address;
1201
1202 vma = find_vma(current->mm, start);
1203 while (vma) {
1204 address = page_address_in_vma(page, vma);
1205 if (address != -EFAULT)
1206 break;
1207 vma = vma->vm_next;
1208 }
1209
1210 if (PageHuge(page)) {
1211 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1212 vma, address);
1213 } else if (PageTransHuge(page)) {
1214 struct page *thp;
1215
1216 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1217 HPAGE_PMD_ORDER);
1218 if (!thp)
1219 return NULL;
1220 prep_transhuge_page(thp);
1221 return thp;
1222 }
1223 /*
1224 * if !vma, alloc_page_vma() will use task or system default policy
1225 */
1226 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1227 vma, address);
1228}
1229#else
1230
1231static int migrate_page_add(struct page *page, struct list_head *pagelist,
1232 unsigned long flags)
1233{
1234 return -EIO;
1235}
1236
1237int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1238 const nodemask_t *to, int flags)
1239{
1240 return -ENOSYS;
1241}
1242
1243static struct page *new_page(struct page *page, unsigned long start)
1244{
1245 return NULL;
1246}
1247#endif
1248
1249static long do_mbind(unsigned long start, unsigned long len,
1250 unsigned short mode, unsigned short mode_flags,
1251 nodemask_t *nmask, unsigned long flags)
1252{
1253 struct mm_struct *mm = current->mm;
1254 struct mempolicy *new;
1255 unsigned long end;
1256 int err;
1257 int ret;
1258 LIST_HEAD(pagelist);
1259
1260 if (flags & ~(unsigned long)MPOL_MF_VALID)
1261 return -EINVAL;
1262 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1263 return -EPERM;
1264
1265 if (start & ~PAGE_MASK)
1266 return -EINVAL;
1267
1268 if (mode == MPOL_DEFAULT)
1269 flags &= ~MPOL_MF_STRICT;
1270
1271 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1272 end = start + len;
1273
1274 if (end < start)
1275 return -EINVAL;
1276 if (end == start)
1277 return 0;
1278
1279 new = mpol_new(mode, mode_flags, nmask);
1280 if (IS_ERR(new))
1281 return PTR_ERR(new);
1282
1283 if (flags & MPOL_MF_LAZY)
1284 new->flags |= MPOL_F_MOF;
1285
1286 /*
1287 * If we are using the default policy then operation
1288 * on discontinuous address spaces is okay after all
1289 */
1290 if (!new)
1291 flags |= MPOL_MF_DISCONTIG_OK;
1292
1293 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1294 start, start + len, mode, mode_flags,
1295 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1296
1297 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1298
1299 lru_cache_disable();
1300 }
1301 {
1302 NODEMASK_SCRATCH(scratch);
1303 if (scratch) {
1304 mmap_write_lock(mm);
1305 err = mpol_set_nodemask(new, nmask, scratch);
1306 if (err)
1307 mmap_write_unlock(mm);
1308 } else
1309 err = -ENOMEM;
1310 NODEMASK_SCRATCH_FREE(scratch);
1311 }
1312 if (err)
1313 goto mpol_out;
1314
1315 ret = queue_pages_range(mm, start, end, nmask,
1316 flags | MPOL_MF_INVERT, &pagelist);
1317
1318 if (ret < 0) {
1319 err = ret;
1320 goto up_out;
1321 }
1322
1323 err = mbind_range(mm, start, end, new);
1324
1325 if (!err) {
1326 int nr_failed = 0;
1327
1328 if (!list_empty(&pagelist)) {
1329 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1330 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1331 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1332 if (nr_failed)
1333 putback_movable_pages(&pagelist);
1334 }
1335
1336 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1337 err = -EIO;
1338 } else {
1339up_out:
1340 if (!list_empty(&pagelist))
1341 putback_movable_pages(&pagelist);
1342 }
1343
1344 mmap_write_unlock(mm);
1345mpol_out:
1346 mpol_put(new);
1347 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1348 lru_cache_enable();
1349 return err;
1350}
1351
1352/*
1353 * User space interface with variable sized bitmaps for nodelists.
1354 */
1355
1356/* Copy a node mask from user space. */
1357static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1358 unsigned long maxnode)
1359{
1360 unsigned long k;
1361 unsigned long t;
1362 unsigned long nlongs;
1363 unsigned long endmask;
1364
1365 --maxnode;
1366 nodes_clear(*nodes);
1367 if (maxnode == 0 || !nmask)
1368 return 0;
1369 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1370 return -EINVAL;
1371
1372 nlongs = BITS_TO_LONGS(maxnode);
1373 if ((maxnode % BITS_PER_LONG) == 0)
1374 endmask = ~0UL;
1375 else
1376 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1377
1378 /*
1379 * When the user specified more nodes than supported just check
1380 * if the non supported part is all zero.
1381 *
1382 * If maxnode have more longs than MAX_NUMNODES, check
1383 * the bits in that area first. And then go through to
1384 * check the rest bits which equal or bigger than MAX_NUMNODES.
1385 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1386 */
1387 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1388 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1389 if (get_user(t, nmask + k))
1390 return -EFAULT;
1391 if (k == nlongs - 1) {
1392 if (t & endmask)
1393 return -EINVAL;
1394 } else if (t)
1395 return -EINVAL;
1396 }
1397 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1398 endmask = ~0UL;
1399 }
1400
1401 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1402 unsigned long valid_mask = endmask;
1403
1404 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1405 if (get_user(t, nmask + nlongs - 1))
1406 return -EFAULT;
1407 if (t & valid_mask)
1408 return -EINVAL;
1409 }
1410
1411 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1412 return -EFAULT;
1413 nodes_addr(*nodes)[nlongs-1] &= endmask;
1414 return 0;
1415}
1416
1417/* Copy a kernel node mask to user space */
1418static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1419 nodemask_t *nodes)
1420{
1421 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1422 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1423
1424 if (copy > nbytes) {
1425 if (copy > PAGE_SIZE)
1426 return -EINVAL;
1427 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1428 return -EFAULT;
1429 copy = nbytes;
1430 }
1431 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1432}
1433
1434/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1435static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1436{
1437 *flags = *mode & MPOL_MODE_FLAGS;
1438 *mode &= ~MPOL_MODE_FLAGS;
1439 if ((unsigned int)(*mode) >= MPOL_MAX)
1440 return -EINVAL;
1441 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1442 return -EINVAL;
1443 if (*flags & MPOL_F_NUMA_BALANCING) {
1444 if (*mode != MPOL_BIND)
1445 return -EINVAL;
1446 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1447 }
1448 return 0;
1449}
1450
1451static long kernel_mbind(unsigned long start, unsigned long len,
1452 unsigned long mode, const unsigned long __user *nmask,
1453 unsigned long maxnode, unsigned int flags)
1454{
1455 unsigned short mode_flags;
1456 nodemask_t nodes;
1457 int lmode = mode;
1458 int err;
1459
1460 start = untagged_addr(start);
1461 err = sanitize_mpol_flags(&lmode, &mode_flags);
1462 if (err)
1463 return err;
1464
1465 err = get_nodes(&nodes, nmask, maxnode);
1466 if (err)
1467 return err;
1468
1469 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1470}
1471
1472SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1473 unsigned long, mode, const unsigned long __user *, nmask,
1474 unsigned long, maxnode, unsigned int, flags)
1475{
1476 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1477}
1478
1479/* Set the process memory policy */
1480static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1481 unsigned long maxnode)
1482{
1483 unsigned short mode_flags;
1484 nodemask_t nodes;
1485 int lmode = mode;
1486 int err;
1487
1488 err = sanitize_mpol_flags(&lmode, &mode_flags);
1489 if (err)
1490 return err;
1491
1492 err = get_nodes(&nodes, nmask, maxnode);
1493 if (err)
1494 return err;
1495
1496 return do_set_mempolicy(lmode, mode_flags, &nodes);
1497}
1498
1499SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1500 unsigned long, maxnode)
1501{
1502 return kernel_set_mempolicy(mode, nmask, maxnode);
1503}
1504
1505static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1506 const unsigned long __user *old_nodes,
1507 const unsigned long __user *new_nodes)
1508{
1509 struct mm_struct *mm = NULL;
1510 struct task_struct *task;
1511 nodemask_t task_nodes;
1512 int err;
1513 nodemask_t *old;
1514 nodemask_t *new;
1515 NODEMASK_SCRATCH(scratch);
1516
1517 if (!scratch)
1518 return -ENOMEM;
1519
1520 old = &scratch->mask1;
1521 new = &scratch->mask2;
1522
1523 err = get_nodes(old, old_nodes, maxnode);
1524 if (err)
1525 goto out;
1526
1527 err = get_nodes(new, new_nodes, maxnode);
1528 if (err)
1529 goto out;
1530
1531 /* Find the mm_struct */
1532 rcu_read_lock();
1533 task = pid ? find_task_by_vpid(pid) : current;
1534 if (!task) {
1535 rcu_read_unlock();
1536 err = -ESRCH;
1537 goto out;
1538 }
1539 get_task_struct(task);
1540
1541 err = -EINVAL;
1542
1543 /*
1544 * Check if this process has the right to modify the specified process.
1545 * Use the regular "ptrace_may_access()" checks.
1546 */
1547 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1548 rcu_read_unlock();
1549 err = -EPERM;
1550 goto out_put;
1551 }
1552 rcu_read_unlock();
1553
1554 task_nodes = cpuset_mems_allowed(task);
1555 /* Is the user allowed to access the target nodes? */
1556 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1557 err = -EPERM;
1558 goto out_put;
1559 }
1560
1561 task_nodes = cpuset_mems_allowed(current);
1562 nodes_and(*new, *new, task_nodes);
1563 if (nodes_empty(*new))
1564 goto out_put;
1565
1566 err = security_task_movememory(task);
1567 if (err)
1568 goto out_put;
1569
1570 mm = get_task_mm(task);
1571 put_task_struct(task);
1572
1573 if (!mm) {
1574 err = -EINVAL;
1575 goto out;
1576 }
1577
1578 err = do_migrate_pages(mm, old, new,
1579 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1580
1581 mmput(mm);
1582out:
1583 NODEMASK_SCRATCH_FREE(scratch);
1584
1585 return err;
1586
1587out_put:
1588 put_task_struct(task);
1589 goto out;
1590
1591}
1592
1593SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1594 const unsigned long __user *, old_nodes,
1595 const unsigned long __user *, new_nodes)
1596{
1597 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1598}
1599
1600
1601/* Retrieve NUMA policy */
1602static int kernel_get_mempolicy(int __user *policy,
1603 unsigned long __user *nmask,
1604 unsigned long maxnode,
1605 unsigned long addr,
1606 unsigned long flags)
1607{
1608 int err;
1609 int pval;
1610 nodemask_t nodes;
1611
1612 if (nmask != NULL && maxnode < nr_node_ids)
1613 return -EINVAL;
1614
1615 addr = untagged_addr(addr);
1616
1617 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1618
1619 if (err)
1620 return err;
1621
1622 if (policy && put_user(pval, policy))
1623 return -EFAULT;
1624
1625 if (nmask)
1626 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1627
1628 return err;
1629}
1630
1631SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1632 unsigned long __user *, nmask, unsigned long, maxnode,
1633 unsigned long, addr, unsigned long, flags)
1634{
1635 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1636}
1637
1638#ifdef CONFIG_COMPAT
1639
1640COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1641 compat_ulong_t __user *, nmask,
1642 compat_ulong_t, maxnode,
1643 compat_ulong_t, addr, compat_ulong_t, flags)
1644{
1645 long err;
1646 unsigned long __user *nm = NULL;
1647 unsigned long nr_bits, alloc_size;
1648 DECLARE_BITMAP(bm, MAX_NUMNODES);
1649
1650 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1651 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1652
1653 if (nmask)
1654 nm = compat_alloc_user_space(alloc_size);
1655
1656 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1657
1658 if (!err && nmask) {
1659 unsigned long copy_size;
1660 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1661 err = copy_from_user(bm, nm, copy_size);
1662 /* ensure entire bitmap is zeroed */
1663 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1664 err |= compat_put_bitmap(nmask, bm, nr_bits);
1665 }
1666
1667 return err;
1668}
1669
1670COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1671 compat_ulong_t, maxnode)
1672{
1673 unsigned long __user *nm = NULL;
1674 unsigned long nr_bits, alloc_size;
1675 DECLARE_BITMAP(bm, MAX_NUMNODES);
1676
1677 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1678 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1679
1680 if (nmask) {
1681 if (compat_get_bitmap(bm, nmask, nr_bits))
1682 return -EFAULT;
1683 nm = compat_alloc_user_space(alloc_size);
1684 if (copy_to_user(nm, bm, alloc_size))
1685 return -EFAULT;
1686 }
1687
1688 return kernel_set_mempolicy(mode, nm, nr_bits+1);
1689}
1690
1691COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1692 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1693 compat_ulong_t, maxnode, compat_ulong_t, flags)
1694{
1695 unsigned long __user *nm = NULL;
1696 unsigned long nr_bits, alloc_size;
1697 nodemask_t bm;
1698
1699 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1700 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1701
1702 if (nmask) {
1703 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1704 return -EFAULT;
1705 nm = compat_alloc_user_space(alloc_size);
1706 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1707 return -EFAULT;
1708 }
1709
1710 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1711}
1712
1713COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1714 compat_ulong_t, maxnode,
1715 const compat_ulong_t __user *, old_nodes,
1716 const compat_ulong_t __user *, new_nodes)
1717{
1718 unsigned long __user *old = NULL;
1719 unsigned long __user *new = NULL;
1720 nodemask_t tmp_mask;
1721 unsigned long nr_bits;
1722 unsigned long size;
1723
1724 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1725 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1726 if (old_nodes) {
1727 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1728 return -EFAULT;
1729 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1730 if (new_nodes)
1731 new = old + size / sizeof(unsigned long);
1732 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1733 return -EFAULT;
1734 }
1735 if (new_nodes) {
1736 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1737 return -EFAULT;
1738 if (new == NULL)
1739 new = compat_alloc_user_space(size);
1740 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1741 return -EFAULT;
1742 }
1743 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1744}
1745
1746#endif /* CONFIG_COMPAT */
1747
1748bool vma_migratable(struct vm_area_struct *vma)
1749{
1750 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1751 return false;
1752
1753 /*
1754 * DAX device mappings require predictable access latency, so avoid
1755 * incurring periodic faults.
1756 */
1757 if (vma_is_dax(vma))
1758 return false;
1759
1760 if (is_vm_hugetlb_page(vma) &&
1761 !hugepage_migration_supported(hstate_vma(vma)))
1762 return false;
1763
1764 /*
1765 * Migration allocates pages in the highest zone. If we cannot
1766 * do so then migration (at least from node to node) is not
1767 * possible.
1768 */
1769 if (vma->vm_file &&
1770 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1771 < policy_zone)
1772 return false;
1773 return true;
1774}
1775
1776struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1777 unsigned long addr)
1778{
1779 struct mempolicy *pol = NULL;
1780
1781 if (vma) {
1782 if (vma->vm_ops && vma->vm_ops->get_policy) {
1783 pol = vma->vm_ops->get_policy(vma, addr);
1784 } else if (vma->vm_policy) {
1785 pol = vma->vm_policy;
1786
1787 /*
1788 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1789 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1790 * count on these policies which will be dropped by
1791 * mpol_cond_put() later
1792 */
1793 if (mpol_needs_cond_ref(pol))
1794 mpol_get(pol);
1795 }
1796 }
1797
1798 return pol;
1799}
1800
1801/*
1802 * get_vma_policy(@vma, @addr)
1803 * @vma: virtual memory area whose policy is sought
1804 * @addr: address in @vma for shared policy lookup
1805 *
1806 * Returns effective policy for a VMA at specified address.
1807 * Falls back to current->mempolicy or system default policy, as necessary.
1808 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1809 * count--added by the get_policy() vm_op, as appropriate--to protect against
1810 * freeing by another task. It is the caller's responsibility to free the
1811 * extra reference for shared policies.
1812 */
1813static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1814 unsigned long addr)
1815{
1816 struct mempolicy *pol = __get_vma_policy(vma, addr);
1817
1818 if (!pol)
1819 pol = get_task_policy(current);
1820
1821 return pol;
1822}
1823
1824bool vma_policy_mof(struct vm_area_struct *vma)
1825{
1826 struct mempolicy *pol;
1827
1828 if (vma->vm_ops && vma->vm_ops->get_policy) {
1829 bool ret = false;
1830
1831 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1832 if (pol && (pol->flags & MPOL_F_MOF))
1833 ret = true;
1834 mpol_cond_put(pol);
1835
1836 return ret;
1837 }
1838
1839 pol = vma->vm_policy;
1840 if (!pol)
1841 pol = get_task_policy(current);
1842
1843 return pol->flags & MPOL_F_MOF;
1844}
1845
1846static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1847{
1848 enum zone_type dynamic_policy_zone = policy_zone;
1849
1850 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1851
1852 /*
1853 * if policy->nodes has movable memory only,
1854 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1855 *
1856 * policy->nodes is intersect with node_states[N_MEMORY].
1857 * so if the following test fails, it implies
1858 * policy->nodes has movable memory only.
1859 */
1860 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1861 dynamic_policy_zone = ZONE_MOVABLE;
1862
1863 return zone >= dynamic_policy_zone;
1864}
1865
1866/*
1867 * Return a nodemask representing a mempolicy for filtering nodes for
1868 * page allocation
1869 */
1870nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1871{
1872 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1873 if (unlikely(policy->mode == MPOL_BIND) &&
1874 apply_policy_zone(policy, gfp_zone(gfp)) &&
1875 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1876 return &policy->nodes;
1877
1878 return NULL;
1879}
1880
1881/* Return the node id preferred by the given mempolicy, or the given id */
1882static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1883{
1884 if (policy->mode == MPOL_PREFERRED) {
1885 nd = first_node(policy->nodes);
1886 } else {
1887 /*
1888 * __GFP_THISNODE shouldn't even be used with the bind policy
1889 * because we might easily break the expectation to stay on the
1890 * requested node and not break the policy.
1891 */
1892 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1893 }
1894
1895 return nd;
1896}
1897
1898/* Do dynamic interleaving for a process */
1899static unsigned interleave_nodes(struct mempolicy *policy)
1900{
1901 unsigned next;
1902 struct task_struct *me = current;
1903
1904 next = next_node_in(me->il_prev, policy->nodes);
1905 if (next < MAX_NUMNODES)
1906 me->il_prev = next;
1907 return next;
1908}
1909
1910/*
1911 * Depending on the memory policy provide a node from which to allocate the
1912 * next slab entry.
1913 */
1914unsigned int mempolicy_slab_node(void)
1915{
1916 struct mempolicy *policy;
1917 int node = numa_mem_id();
1918
1919 if (in_interrupt())
1920 return node;
1921
1922 policy = current->mempolicy;
1923 if (!policy)
1924 return node;
1925
1926 switch (policy->mode) {
1927 case MPOL_PREFERRED:
1928 return first_node(policy->nodes);
1929
1930 case MPOL_INTERLEAVE:
1931 return interleave_nodes(policy);
1932
1933 case MPOL_BIND: {
1934 struct zoneref *z;
1935
1936 /*
1937 * Follow bind policy behavior and start allocation at the
1938 * first node.
1939 */
1940 struct zonelist *zonelist;
1941 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1942 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1943 z = first_zones_zonelist(zonelist, highest_zoneidx,
1944 &policy->nodes);
1945 return z->zone ? zone_to_nid(z->zone) : node;
1946 }
1947 case MPOL_LOCAL:
1948 return node;
1949
1950 default:
1951 BUG();
1952 }
1953}
1954
1955/*
1956 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1957 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1958 * number of present nodes.
1959 */
1960static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1961{
1962 nodemask_t nodemask = pol->nodes;
1963 unsigned int target, nnodes;
1964 int i;
1965 int nid;
1966 /*
1967 * The barrier will stabilize the nodemask in a register or on
1968 * the stack so that it will stop changing under the code.
1969 *
1970 * Between first_node() and next_node(), pol->nodes could be changed
1971 * by other threads. So we put pol->nodes in a local stack.
1972 */
1973 barrier();
1974
1975 nnodes = nodes_weight(nodemask);
1976 if (!nnodes)
1977 return numa_node_id();
1978 target = (unsigned int)n % nnodes;
1979 nid = first_node(nodemask);
1980 for (i = 0; i < target; i++)
1981 nid = next_node(nid, nodemask);
1982 return nid;
1983}
1984
1985/* Determine a node number for interleave */
1986static inline unsigned interleave_nid(struct mempolicy *pol,
1987 struct vm_area_struct *vma, unsigned long addr, int shift)
1988{
1989 if (vma) {
1990 unsigned long off;
1991
1992 /*
1993 * for small pages, there is no difference between
1994 * shift and PAGE_SHIFT, so the bit-shift is safe.
1995 * for huge pages, since vm_pgoff is in units of small
1996 * pages, we need to shift off the always 0 bits to get
1997 * a useful offset.
1998 */
1999 BUG_ON(shift < PAGE_SHIFT);
2000 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
2001 off += (addr - vma->vm_start) >> shift;
2002 return offset_il_node(pol, off);
2003 } else
2004 return interleave_nodes(pol);
2005}
2006
2007#ifdef CONFIG_HUGETLBFS
2008/*
2009 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2010 * @vma: virtual memory area whose policy is sought
2011 * @addr: address in @vma for shared policy lookup and interleave policy
2012 * @gfp_flags: for requested zone
2013 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2014 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2015 *
2016 * Returns a nid suitable for a huge page allocation and a pointer
2017 * to the struct mempolicy for conditional unref after allocation.
2018 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2019 * @nodemask for filtering the zonelist.
2020 *
2021 * Must be protected by read_mems_allowed_begin()
2022 */
2023int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2024 struct mempolicy **mpol, nodemask_t **nodemask)
2025{
2026 int nid;
2027
2028 *mpol = get_vma_policy(vma, addr);
2029 *nodemask = NULL; /* assume !MPOL_BIND */
2030
2031 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
2032 nid = interleave_nid(*mpol, vma, addr,
2033 huge_page_shift(hstate_vma(vma)));
2034 } else {
2035 nid = policy_node(gfp_flags, *mpol, numa_node_id());
2036 if ((*mpol)->mode == MPOL_BIND)
2037 *nodemask = &(*mpol)->nodes;
2038 }
2039 return nid;
2040}
2041
2042/*
2043 * init_nodemask_of_mempolicy
2044 *
2045 * If the current task's mempolicy is "default" [NULL], return 'false'
2046 * to indicate default policy. Otherwise, extract the policy nodemask
2047 * for 'bind' or 'interleave' policy into the argument nodemask, or
2048 * initialize the argument nodemask to contain the single node for
2049 * 'preferred' or 'local' policy and return 'true' to indicate presence
2050 * of non-default mempolicy.
2051 *
2052 * We don't bother with reference counting the mempolicy [mpol_get/put]
2053 * because the current task is examining it's own mempolicy and a task's
2054 * mempolicy is only ever changed by the task itself.
2055 *
2056 * N.B., it is the caller's responsibility to free a returned nodemask.
2057 */
2058bool init_nodemask_of_mempolicy(nodemask_t *mask)
2059{
2060 struct mempolicy *mempolicy;
2061
2062 if (!(mask && current->mempolicy))
2063 return false;
2064
2065 task_lock(current);
2066 mempolicy = current->mempolicy;
2067 switch (mempolicy->mode) {
2068 case MPOL_PREFERRED:
2069 case MPOL_BIND:
2070 case MPOL_INTERLEAVE:
2071 *mask = mempolicy->nodes;
2072 break;
2073
2074 case MPOL_LOCAL:
2075 init_nodemask_of_node(mask, numa_node_id());
2076 break;
2077
2078 default:
2079 BUG();
2080 }
2081 task_unlock(current);
2082
2083 return true;
2084}
2085#endif
2086
2087/*
2088 * mempolicy_in_oom_domain
2089 *
2090 * If tsk's mempolicy is "bind", check for intersection between mask and
2091 * the policy nodemask. Otherwise, return true for all other policies
2092 * including "interleave", as a tsk with "interleave" policy may have
2093 * memory allocated from all nodes in system.
2094 *
2095 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2096 */
2097bool mempolicy_in_oom_domain(struct task_struct *tsk,
2098 const nodemask_t *mask)
2099{
2100 struct mempolicy *mempolicy;
2101 bool ret = true;
2102
2103 if (!mask)
2104 return ret;
2105
2106 task_lock(tsk);
2107 mempolicy = tsk->mempolicy;
2108 if (mempolicy && mempolicy->mode == MPOL_BIND)
2109 ret = nodes_intersects(mempolicy->nodes, *mask);
2110 task_unlock(tsk);
2111
2112 return ret;
2113}
2114
2115/* Allocate a page in interleaved policy.
2116 Own path because it needs to do special accounting. */
2117static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2118 unsigned nid)
2119{
2120 struct page *page;
2121
2122 page = __alloc_pages(gfp, order, nid, NULL);
2123 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2124 if (!static_branch_likely(&vm_numa_stat_key))
2125 return page;
2126 if (page && page_to_nid(page) == nid) {
2127 preempt_disable();
2128 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2129 preempt_enable();
2130 }
2131 return page;
2132}
2133
2134/**
2135 * alloc_pages_vma - Allocate a page for a VMA.
2136 * @gfp: GFP flags.
2137 * @order: Order of the GFP allocation.
2138 * @vma: Pointer to VMA or NULL if not available.
2139 * @addr: Virtual address of the allocation. Must be inside @vma.
2140 * @node: Which node to prefer for allocation (modulo policy).
2141 * @hugepage: For hugepages try only the preferred node if possible.
2142 *
2143 * Allocate a page for a specific address in @vma, using the appropriate
2144 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2145 * of the mm_struct of the VMA to prevent it from going away. Should be
2146 * used for all allocations for pages that will be mapped into user space.
2147 *
2148 * Return: The page on success or NULL if allocation fails.
2149 */
2150struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2151 unsigned long addr, int node, bool hugepage)
2152{
2153 struct mempolicy *pol;
2154 struct page *page;
2155 int preferred_nid;
2156 nodemask_t *nmask;
2157
2158 pol = get_vma_policy(vma, addr);
2159
2160 if (pol->mode == MPOL_INTERLEAVE) {
2161 unsigned nid;
2162
2163 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2164 mpol_cond_put(pol);
2165 page = alloc_page_interleave(gfp, order, nid);
2166 goto out;
2167 }
2168
2169 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2170 int hpage_node = node;
2171
2172 /*
2173 * For hugepage allocation and non-interleave policy which
2174 * allows the current node (or other explicitly preferred
2175 * node) we only try to allocate from the current/preferred
2176 * node and don't fall back to other nodes, as the cost of
2177 * remote accesses would likely offset THP benefits.
2178 *
2179 * If the policy is interleave, or does not allow the current
2180 * node in its nodemask, we allocate the standard way.
2181 */
2182 if (pol->mode == MPOL_PREFERRED)
2183 hpage_node = first_node(pol->nodes);
2184
2185 nmask = policy_nodemask(gfp, pol);
2186 if (!nmask || node_isset(hpage_node, *nmask)) {
2187 mpol_cond_put(pol);
2188 /*
2189 * First, try to allocate THP only on local node, but
2190 * don't reclaim unnecessarily, just compact.
2191 */
2192 page = __alloc_pages_node(hpage_node,
2193 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2194
2195 /*
2196 * If hugepage allocations are configured to always
2197 * synchronous compact or the vma has been madvised
2198 * to prefer hugepage backing, retry allowing remote
2199 * memory with both reclaim and compact as well.
2200 */
2201 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2202 page = __alloc_pages_node(hpage_node,
2203 gfp, order);
2204
2205 goto out;
2206 }
2207 }
2208
2209 nmask = policy_nodemask(gfp, pol);
2210 preferred_nid = policy_node(gfp, pol, node);
2211 page = __alloc_pages(gfp, order, preferred_nid, nmask);
2212 mpol_cond_put(pol);
2213out:
2214 return page;
2215}
2216EXPORT_SYMBOL(alloc_pages_vma);
2217
2218/**
2219 * alloc_pages - Allocate pages.
2220 * @gfp: GFP flags.
2221 * @order: Power of two of number of pages to allocate.
2222 *
2223 * Allocate 1 << @order contiguous pages. The physical address of the
2224 * first page is naturally aligned (eg an order-3 allocation will be aligned
2225 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2226 * process is honoured when in process context.
2227 *
2228 * Context: Can be called from any context, providing the appropriate GFP
2229 * flags are used.
2230 * Return: The page on success or NULL if allocation fails.
2231 */
2232struct page *alloc_pages(gfp_t gfp, unsigned order)
2233{
2234 struct mempolicy *pol = &default_policy;
2235 struct page *page;
2236
2237 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2238 pol = get_task_policy(current);
2239
2240 /*
2241 * No reference counting needed for current->mempolicy
2242 * nor system default_policy
2243 */
2244 if (pol->mode == MPOL_INTERLEAVE)
2245 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2246 else
2247 page = __alloc_pages(gfp, order,
2248 policy_node(gfp, pol, numa_node_id()),
2249 policy_nodemask(gfp, pol));
2250
2251 return page;
2252}
2253EXPORT_SYMBOL(alloc_pages);
2254
2255int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2256{
2257 struct mempolicy *pol = mpol_dup(vma_policy(src));
2258
2259 if (IS_ERR(pol))
2260 return PTR_ERR(pol);
2261 dst->vm_policy = pol;
2262 return 0;
2263}
2264
2265/*
2266 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2267 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2268 * with the mems_allowed returned by cpuset_mems_allowed(). This
2269 * keeps mempolicies cpuset relative after its cpuset moves. See
2270 * further kernel/cpuset.c update_nodemask().
2271 *
2272 * current's mempolicy may be rebinded by the other task(the task that changes
2273 * cpuset's mems), so we needn't do rebind work for current task.
2274 */
2275
2276/* Slow path of a mempolicy duplicate */
2277struct mempolicy *__mpol_dup(struct mempolicy *old)
2278{
2279 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2280
2281 if (!new)
2282 return ERR_PTR(-ENOMEM);
2283
2284 /* task's mempolicy is protected by alloc_lock */
2285 if (old == current->mempolicy) {
2286 task_lock(current);
2287 *new = *old;
2288 task_unlock(current);
2289 } else
2290 *new = *old;
2291
2292 if (current_cpuset_is_being_rebound()) {
2293 nodemask_t mems = cpuset_mems_allowed(current);
2294 mpol_rebind_policy(new, &mems);
2295 }
2296 atomic_set(&new->refcnt, 1);
2297 return new;
2298}
2299
2300/* Slow path of a mempolicy comparison */
2301bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2302{
2303 if (!a || !b)
2304 return false;
2305 if (a->mode != b->mode)
2306 return false;
2307 if (a->flags != b->flags)
2308 return false;
2309 if (mpol_store_user_nodemask(a))
2310 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2311 return false;
2312
2313 switch (a->mode) {
2314 case MPOL_BIND:
2315 case MPOL_INTERLEAVE:
2316 case MPOL_PREFERRED:
2317 return !!nodes_equal(a->nodes, b->nodes);
2318 case MPOL_LOCAL:
2319 return true;
2320 default:
2321 BUG();
2322 return false;
2323 }
2324}
2325
2326/*
2327 * Shared memory backing store policy support.
2328 *
2329 * Remember policies even when nobody has shared memory mapped.
2330 * The policies are kept in Red-Black tree linked from the inode.
2331 * They are protected by the sp->lock rwlock, which should be held
2332 * for any accesses to the tree.
2333 */
2334
2335/*
2336 * lookup first element intersecting start-end. Caller holds sp->lock for
2337 * reading or for writing
2338 */
2339static struct sp_node *
2340sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2341{
2342 struct rb_node *n = sp->root.rb_node;
2343
2344 while (n) {
2345 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2346
2347 if (start >= p->end)
2348 n = n->rb_right;
2349 else if (end <= p->start)
2350 n = n->rb_left;
2351 else
2352 break;
2353 }
2354 if (!n)
2355 return NULL;
2356 for (;;) {
2357 struct sp_node *w = NULL;
2358 struct rb_node *prev = rb_prev(n);
2359 if (!prev)
2360 break;
2361 w = rb_entry(prev, struct sp_node, nd);
2362 if (w->end <= start)
2363 break;
2364 n = prev;
2365 }
2366 return rb_entry(n, struct sp_node, nd);
2367}
2368
2369/*
2370 * Insert a new shared policy into the list. Caller holds sp->lock for
2371 * writing.
2372 */
2373static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2374{
2375 struct rb_node **p = &sp->root.rb_node;
2376 struct rb_node *parent = NULL;
2377 struct sp_node *nd;
2378
2379 while (*p) {
2380 parent = *p;
2381 nd = rb_entry(parent, struct sp_node, nd);
2382 if (new->start < nd->start)
2383 p = &(*p)->rb_left;
2384 else if (new->end > nd->end)
2385 p = &(*p)->rb_right;
2386 else
2387 BUG();
2388 }
2389 rb_link_node(&new->nd, parent, p);
2390 rb_insert_color(&new->nd, &sp->root);
2391 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2392 new->policy ? new->policy->mode : 0);
2393}
2394
2395/* Find shared policy intersecting idx */
2396struct mempolicy *
2397mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2398{
2399 struct mempolicy *pol = NULL;
2400 struct sp_node *sn;
2401
2402 if (!sp->root.rb_node)
2403 return NULL;
2404 read_lock(&sp->lock);
2405 sn = sp_lookup(sp, idx, idx+1);
2406 if (sn) {
2407 mpol_get(sn->policy);
2408 pol = sn->policy;
2409 }
2410 read_unlock(&sp->lock);
2411 return pol;
2412}
2413
2414static void sp_free(struct sp_node *n)
2415{
2416 mpol_put(n->policy);
2417 kmem_cache_free(sn_cache, n);
2418}
2419
2420/**
2421 * mpol_misplaced - check whether current page node is valid in policy
2422 *
2423 * @page: page to be checked
2424 * @vma: vm area where page mapped
2425 * @addr: virtual address where page mapped
2426 *
2427 * Lookup current policy node id for vma,addr and "compare to" page's
2428 * node id. Policy determination "mimics" alloc_page_vma().
2429 * Called from fault path where we know the vma and faulting address.
2430 *
2431 * Return: -1 if the page is in a node that is valid for this policy, or a
2432 * suitable node ID to allocate a replacement page from.
2433 */
2434int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2435{
2436 struct mempolicy *pol;
2437 struct zoneref *z;
2438 int curnid = page_to_nid(page);
2439 unsigned long pgoff;
2440 int thiscpu = raw_smp_processor_id();
2441 int thisnid = cpu_to_node(thiscpu);
2442 int polnid = NUMA_NO_NODE;
2443 int ret = -1;
2444
2445 pol = get_vma_policy(vma, addr);
2446 if (!(pol->flags & MPOL_F_MOF))
2447 goto out;
2448
2449 switch (pol->mode) {
2450 case MPOL_INTERLEAVE:
2451 pgoff = vma->vm_pgoff;
2452 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2453 polnid = offset_il_node(pol, pgoff);
2454 break;
2455
2456 case MPOL_PREFERRED:
2457 polnid = first_node(pol->nodes);
2458 break;
2459
2460 case MPOL_LOCAL:
2461 polnid = numa_node_id();
2462 break;
2463
2464 case MPOL_BIND:
2465 /* Optimize placement among multiple nodes via NUMA balancing */
2466 if (pol->flags & MPOL_F_MORON) {
2467 if (node_isset(thisnid, pol->nodes))
2468 break;
2469 goto out;
2470 }
2471
2472 /*
2473 * allows binding to multiple nodes.
2474 * use current page if in policy nodemask,
2475 * else select nearest allowed node, if any.
2476 * If no allowed nodes, use current [!misplaced].
2477 */
2478 if (node_isset(curnid, pol->nodes))
2479 goto out;
2480 z = first_zones_zonelist(
2481 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2482 gfp_zone(GFP_HIGHUSER),
2483 &pol->nodes);
2484 polnid = zone_to_nid(z->zone);
2485 break;
2486
2487 default:
2488 BUG();
2489 }
2490
2491 /* Migrate the page towards the node whose CPU is referencing it */
2492 if (pol->flags & MPOL_F_MORON) {
2493 polnid = thisnid;
2494
2495 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2496 goto out;
2497 }
2498
2499 if (curnid != polnid)
2500 ret = polnid;
2501out:
2502 mpol_cond_put(pol);
2503
2504 return ret;
2505}
2506
2507/*
2508 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2509 * dropped after task->mempolicy is set to NULL so that any allocation done as
2510 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2511 * policy.
2512 */
2513void mpol_put_task_policy(struct task_struct *task)
2514{
2515 struct mempolicy *pol;
2516
2517 task_lock(task);
2518 pol = task->mempolicy;
2519 task->mempolicy = NULL;
2520 task_unlock(task);
2521 mpol_put(pol);
2522}
2523
2524static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2525{
2526 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2527 rb_erase(&n->nd, &sp->root);
2528 sp_free(n);
2529}
2530
2531static void sp_node_init(struct sp_node *node, unsigned long start,
2532 unsigned long end, struct mempolicy *pol)
2533{
2534 node->start = start;
2535 node->end = end;
2536 node->policy = pol;
2537}
2538
2539static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2540 struct mempolicy *pol)
2541{
2542 struct sp_node *n;
2543 struct mempolicy *newpol;
2544
2545 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2546 if (!n)
2547 return NULL;
2548
2549 newpol = mpol_dup(pol);
2550 if (IS_ERR(newpol)) {
2551 kmem_cache_free(sn_cache, n);
2552 return NULL;
2553 }
2554 newpol->flags |= MPOL_F_SHARED;
2555 sp_node_init(n, start, end, newpol);
2556
2557 return n;
2558}
2559
2560/* Replace a policy range. */
2561static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2562 unsigned long end, struct sp_node *new)
2563{
2564 struct sp_node *n;
2565 struct sp_node *n_new = NULL;
2566 struct mempolicy *mpol_new = NULL;
2567 int ret = 0;
2568
2569restart:
2570 write_lock(&sp->lock);
2571 n = sp_lookup(sp, start, end);
2572 /* Take care of old policies in the same range. */
2573 while (n && n->start < end) {
2574 struct rb_node *next = rb_next(&n->nd);
2575 if (n->start >= start) {
2576 if (n->end <= end)
2577 sp_delete(sp, n);
2578 else
2579 n->start = end;
2580 } else {
2581 /* Old policy spanning whole new range. */
2582 if (n->end > end) {
2583 if (!n_new)
2584 goto alloc_new;
2585
2586 *mpol_new = *n->policy;
2587 atomic_set(&mpol_new->refcnt, 1);
2588 sp_node_init(n_new, end, n->end, mpol_new);
2589 n->end = start;
2590 sp_insert(sp, n_new);
2591 n_new = NULL;
2592 mpol_new = NULL;
2593 break;
2594 } else
2595 n->end = start;
2596 }
2597 if (!next)
2598 break;
2599 n = rb_entry(next, struct sp_node, nd);
2600 }
2601 if (new)
2602 sp_insert(sp, new);
2603 write_unlock(&sp->lock);
2604 ret = 0;
2605
2606err_out:
2607 if (mpol_new)
2608 mpol_put(mpol_new);
2609 if (n_new)
2610 kmem_cache_free(sn_cache, n_new);
2611
2612 return ret;
2613
2614alloc_new:
2615 write_unlock(&sp->lock);
2616 ret = -ENOMEM;
2617 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2618 if (!n_new)
2619 goto err_out;
2620 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2621 if (!mpol_new)
2622 goto err_out;
2623 goto restart;
2624}
2625
2626/**
2627 * mpol_shared_policy_init - initialize shared policy for inode
2628 * @sp: pointer to inode shared policy
2629 * @mpol: struct mempolicy to install
2630 *
2631 * Install non-NULL @mpol in inode's shared policy rb-tree.
2632 * On entry, the current task has a reference on a non-NULL @mpol.
2633 * This must be released on exit.
2634 * This is called at get_inode() calls and we can use GFP_KERNEL.
2635 */
2636void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2637{
2638 int ret;
2639
2640 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2641 rwlock_init(&sp->lock);
2642
2643 if (mpol) {
2644 struct vm_area_struct pvma;
2645 struct mempolicy *new;
2646 NODEMASK_SCRATCH(scratch);
2647
2648 if (!scratch)
2649 goto put_mpol;
2650 /* contextualize the tmpfs mount point mempolicy */
2651 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2652 if (IS_ERR(new))
2653 goto free_scratch; /* no valid nodemask intersection */
2654
2655 task_lock(current);
2656 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2657 task_unlock(current);
2658 if (ret)
2659 goto put_new;
2660
2661 /* Create pseudo-vma that contains just the policy */
2662 vma_init(&pvma, NULL);
2663 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2664 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2665
2666put_new:
2667 mpol_put(new); /* drop initial ref */
2668free_scratch:
2669 NODEMASK_SCRATCH_FREE(scratch);
2670put_mpol:
2671 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2672 }
2673}
2674
2675int mpol_set_shared_policy(struct shared_policy *info,
2676 struct vm_area_struct *vma, struct mempolicy *npol)
2677{
2678 int err;
2679 struct sp_node *new = NULL;
2680 unsigned long sz = vma_pages(vma);
2681
2682 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2683 vma->vm_pgoff,
2684 sz, npol ? npol->mode : -1,
2685 npol ? npol->flags : -1,
2686 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2687
2688 if (npol) {
2689 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2690 if (!new)
2691 return -ENOMEM;
2692 }
2693 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2694 if (err && new)
2695 sp_free(new);
2696 return err;
2697}
2698
2699/* Free a backing policy store on inode delete. */
2700void mpol_free_shared_policy(struct shared_policy *p)
2701{
2702 struct sp_node *n;
2703 struct rb_node *next;
2704
2705 if (!p->root.rb_node)
2706 return;
2707 write_lock(&p->lock);
2708 next = rb_first(&p->root);
2709 while (next) {
2710 n = rb_entry(next, struct sp_node, nd);
2711 next = rb_next(&n->nd);
2712 sp_delete(p, n);
2713 }
2714 write_unlock(&p->lock);
2715}
2716
2717#ifdef CONFIG_NUMA_BALANCING
2718static int __initdata numabalancing_override;
2719
2720static void __init check_numabalancing_enable(void)
2721{
2722 bool numabalancing_default = false;
2723
2724 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2725 numabalancing_default = true;
2726
2727 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2728 if (numabalancing_override)
2729 set_numabalancing_state(numabalancing_override == 1);
2730
2731 if (num_online_nodes() > 1 && !numabalancing_override) {
2732 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2733 numabalancing_default ? "Enabling" : "Disabling");
2734 set_numabalancing_state(numabalancing_default);
2735 }
2736}
2737
2738static int __init setup_numabalancing(char *str)
2739{
2740 int ret = 0;
2741 if (!str)
2742 goto out;
2743
2744 if (!strcmp(str, "enable")) {
2745 numabalancing_override = 1;
2746 ret = 1;
2747 } else if (!strcmp(str, "disable")) {
2748 numabalancing_override = -1;
2749 ret = 1;
2750 }
2751out:
2752 if (!ret)
2753 pr_warn("Unable to parse numa_balancing=\n");
2754
2755 return ret;
2756}
2757__setup("numa_balancing=", setup_numabalancing);
2758#else
2759static inline void __init check_numabalancing_enable(void)
2760{
2761}
2762#endif /* CONFIG_NUMA_BALANCING */
2763
2764/* assumes fs == KERNEL_DS */
2765void __init numa_policy_init(void)
2766{
2767 nodemask_t interleave_nodes;
2768 unsigned long largest = 0;
2769 int nid, prefer = 0;
2770
2771 policy_cache = kmem_cache_create("numa_policy",
2772 sizeof(struct mempolicy),
2773 0, SLAB_PANIC, NULL);
2774
2775 sn_cache = kmem_cache_create("shared_policy_node",
2776 sizeof(struct sp_node),
2777 0, SLAB_PANIC, NULL);
2778
2779 for_each_node(nid) {
2780 preferred_node_policy[nid] = (struct mempolicy) {
2781 .refcnt = ATOMIC_INIT(1),
2782 .mode = MPOL_PREFERRED,
2783 .flags = MPOL_F_MOF | MPOL_F_MORON,
2784 .nodes = nodemask_of_node(nid),
2785 };
2786 }
2787
2788 /*
2789 * Set interleaving policy for system init. Interleaving is only
2790 * enabled across suitably sized nodes (default is >= 16MB), or
2791 * fall back to the largest node if they're all smaller.
2792 */
2793 nodes_clear(interleave_nodes);
2794 for_each_node_state(nid, N_MEMORY) {
2795 unsigned long total_pages = node_present_pages(nid);
2796
2797 /* Preserve the largest node */
2798 if (largest < total_pages) {
2799 largest = total_pages;
2800 prefer = nid;
2801 }
2802
2803 /* Interleave this node? */
2804 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2805 node_set(nid, interleave_nodes);
2806 }
2807
2808 /* All too small, use the largest */
2809 if (unlikely(nodes_empty(interleave_nodes)))
2810 node_set(prefer, interleave_nodes);
2811
2812 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2813 pr_err("%s: interleaving failed\n", __func__);
2814
2815 check_numabalancing_enable();
2816}
2817
2818/* Reset policy of current process to default */
2819void numa_default_policy(void)
2820{
2821 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2822}
2823
2824/*
2825 * Parse and format mempolicy from/to strings
2826 */
2827
2828static const char * const policy_modes[] =
2829{
2830 [MPOL_DEFAULT] = "default",
2831 [MPOL_PREFERRED] = "prefer",
2832 [MPOL_BIND] = "bind",
2833 [MPOL_INTERLEAVE] = "interleave",
2834 [MPOL_LOCAL] = "local",
2835};
2836
2837
2838#ifdef CONFIG_TMPFS
2839/**
2840 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2841 * @str: string containing mempolicy to parse
2842 * @mpol: pointer to struct mempolicy pointer, returned on success.
2843 *
2844 * Format of input:
2845 * <mode>[=<flags>][:<nodelist>]
2846 *
2847 * On success, returns 0, else 1
2848 */
2849int mpol_parse_str(char *str, struct mempolicy **mpol)
2850{
2851 struct mempolicy *new = NULL;
2852 unsigned short mode_flags;
2853 nodemask_t nodes;
2854 char *nodelist = strchr(str, ':');
2855 char *flags = strchr(str, '=');
2856 int err = 1, mode;
2857
2858 if (flags)
2859 *flags++ = '\0'; /* terminate mode string */
2860
2861 if (nodelist) {
2862 /* NUL-terminate mode or flags string */
2863 *nodelist++ = '\0';
2864 if (nodelist_parse(nodelist, nodes))
2865 goto out;
2866 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2867 goto out;
2868 } else
2869 nodes_clear(nodes);
2870
2871 mode = match_string(policy_modes, MPOL_MAX, str);
2872 if (mode < 0)
2873 goto out;
2874
2875 switch (mode) {
2876 case MPOL_PREFERRED:
2877 /*
2878 * Insist on a nodelist of one node only, although later
2879 * we use first_node(nodes) to grab a single node, so here
2880 * nodelist (or nodes) cannot be empty.
2881 */
2882 if (nodelist) {
2883 char *rest = nodelist;
2884 while (isdigit(*rest))
2885 rest++;
2886 if (*rest)
2887 goto out;
2888 if (nodes_empty(nodes))
2889 goto out;
2890 }
2891 break;
2892 case MPOL_INTERLEAVE:
2893 /*
2894 * Default to online nodes with memory if no nodelist
2895 */
2896 if (!nodelist)
2897 nodes = node_states[N_MEMORY];
2898 break;
2899 case MPOL_LOCAL:
2900 /*
2901 * Don't allow a nodelist; mpol_new() checks flags
2902 */
2903 if (nodelist)
2904 goto out;
2905 break;
2906 case MPOL_DEFAULT:
2907 /*
2908 * Insist on a empty nodelist
2909 */
2910 if (!nodelist)
2911 err = 0;
2912 goto out;
2913 case MPOL_BIND:
2914 /*
2915 * Insist on a nodelist
2916 */
2917 if (!nodelist)
2918 goto out;
2919 }
2920
2921 mode_flags = 0;
2922 if (flags) {
2923 /*
2924 * Currently, we only support two mutually exclusive
2925 * mode flags.
2926 */
2927 if (!strcmp(flags, "static"))
2928 mode_flags |= MPOL_F_STATIC_NODES;
2929 else if (!strcmp(flags, "relative"))
2930 mode_flags |= MPOL_F_RELATIVE_NODES;
2931 else
2932 goto out;
2933 }
2934
2935 new = mpol_new(mode, mode_flags, &nodes);
2936 if (IS_ERR(new))
2937 goto out;
2938
2939 /*
2940 * Save nodes for mpol_to_str() to show the tmpfs mount options
2941 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2942 */
2943 if (mode != MPOL_PREFERRED) {
2944 new->nodes = nodes;
2945 } else if (nodelist) {
2946 nodes_clear(new->nodes);
2947 node_set(first_node(nodes), new->nodes);
2948 } else {
2949 new->mode = MPOL_LOCAL;
2950 }
2951
2952 /*
2953 * Save nodes for contextualization: this will be used to "clone"
2954 * the mempolicy in a specific context [cpuset] at a later time.
2955 */
2956 new->w.user_nodemask = nodes;
2957
2958 err = 0;
2959
2960out:
2961 /* Restore string for error message */
2962 if (nodelist)
2963 *--nodelist = ':';
2964 if (flags)
2965 *--flags = '=';
2966 if (!err)
2967 *mpol = new;
2968 return err;
2969}
2970#endif /* CONFIG_TMPFS */
2971
2972/**
2973 * mpol_to_str - format a mempolicy structure for printing
2974 * @buffer: to contain formatted mempolicy string
2975 * @maxlen: length of @buffer
2976 * @pol: pointer to mempolicy to be formatted
2977 *
2978 * Convert @pol into a string. If @buffer is too short, truncate the string.
2979 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2980 * longest flag, "relative", and to display at least a few node ids.
2981 */
2982void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2983{
2984 char *p = buffer;
2985 nodemask_t nodes = NODE_MASK_NONE;
2986 unsigned short mode = MPOL_DEFAULT;
2987 unsigned short flags = 0;
2988
2989 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2990 mode = pol->mode;
2991 flags = pol->flags;
2992 }
2993
2994 switch (mode) {
2995 case MPOL_DEFAULT:
2996 case MPOL_LOCAL:
2997 break;
2998 case MPOL_PREFERRED:
2999 case MPOL_BIND:
3000 case MPOL_INTERLEAVE:
3001 nodes = pol->nodes;
3002 break;
3003 default:
3004 WARN_ON_ONCE(1);
3005 snprintf(p, maxlen, "unknown");
3006 return;
3007 }
3008
3009 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3010
3011 if (flags & MPOL_MODE_FLAGS) {
3012 p += snprintf(p, buffer + maxlen - p, "=");
3013
3014 /*
3015 * Currently, the only defined flags are mutually exclusive
3016 */
3017 if (flags & MPOL_F_STATIC_NODES)
3018 p += snprintf(p, buffer + maxlen - p, "static");
3019 else if (flags & MPOL_F_RELATIVE_NODES)
3020 p += snprintf(p, buffer + maxlen - p, "relative");
3021 }
3022
3023 if (!nodes_empty(nodes))
3024 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3025 nodemask_pr_args(&nodes));
3026}