Loading...
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/slab.h>
77#include <linux/string.h>
78#include <linux/export.h>
79#include <linux/nsproxy.h>
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
83#include <linux/swap.h>
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
86#include <linux/migrate.h>
87#include <linux/ksm.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92#include <linux/mm_inline.h>
93#include <linux/mmu_notifier.h>
94
95#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
97#include <linux/random.h>
98
99#include "internal.h"
100
101/* Internal flags */
102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
104
105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
107
108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
110enum zone_type policy_zone = 0;
111
112/*
113 * run-time system-wide default policy => local allocation
114 */
115static struct mempolicy default_policy = {
116 .refcnt = ATOMIC_INIT(1), /* never free it */
117 .mode = MPOL_PREFERRED,
118 .flags = MPOL_F_LOCAL,
119};
120
121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
126
127 if (!pol) {
128 int node = numa_node_id();
129
130 if (node != NUMA_NO_NODE) {
131 pol = &preferred_node_policy[node];
132 /*
133 * preferred_node_policy is not initialised early in
134 * boot
135 */
136 if (!pol->mode)
137 pol = NULL;
138 }
139 }
140
141 return pol;
142}
143
144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
162} mpol_ops[MPOL_MAX];
163
164/* Check that the nodemask contains at least one populated zone */
165static int is_valid_nodemask(const nodemask_t *nodemask)
166{
167 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
168}
169
170static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171{
172 return pol->flags & MPOL_MODE_FLAGS;
173}
174
175static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
176 const nodemask_t *rel)
177{
178 nodemask_t tmp;
179 nodes_fold(tmp, *orig, nodes_weight(*rel));
180 nodes_onto(*ret, tmp, *rel);
181}
182
183static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
184{
185 if (nodes_empty(*nodes))
186 return -EINVAL;
187 pol->v.nodes = *nodes;
188 return 0;
189}
190
191static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
192{
193 if (!nodes)
194 pol->flags |= MPOL_F_LOCAL; /* local allocation */
195 else if (nodes_empty(*nodes))
196 return -EINVAL; /* no allowed nodes */
197 else
198 pol->v.preferred_node = first_node(*nodes);
199 return 0;
200}
201
202static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
203{
204 if (!is_valid_nodemask(nodes))
205 return -EINVAL;
206 pol->v.nodes = *nodes;
207 return 0;
208}
209
210/*
211 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
212 * any, for the new policy. mpol_new() has already validated the nodes
213 * parameter with respect to the policy mode and flags. But, we need to
214 * handle an empty nodemask with MPOL_PREFERRED here.
215 *
216 * Must be called holding task's alloc_lock to protect task's mems_allowed
217 * and mempolicy. May also be called holding the mmap_semaphore for write.
218 */
219static int mpol_set_nodemask(struct mempolicy *pol,
220 const nodemask_t *nodes, struct nodemask_scratch *nsc)
221{
222 int ret;
223
224 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
225 if (pol == NULL)
226 return 0;
227 /* Check N_MEMORY */
228 nodes_and(nsc->mask1,
229 cpuset_current_mems_allowed, node_states[N_MEMORY]);
230
231 VM_BUG_ON(!nodes);
232 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
233 nodes = NULL; /* explicit local allocation */
234 else {
235 if (pol->flags & MPOL_F_RELATIVE_NODES)
236 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
237 else
238 nodes_and(nsc->mask2, *nodes, nsc->mask1);
239
240 if (mpol_store_user_nodemask(pol))
241 pol->w.user_nodemask = *nodes;
242 else
243 pol->w.cpuset_mems_allowed =
244 cpuset_current_mems_allowed;
245 }
246
247 if (nodes)
248 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
249 else
250 ret = mpol_ops[pol->mode].create(pol, NULL);
251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
260{
261 struct mempolicy *policy;
262
263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265
266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
268 return ERR_PTR(-EINVAL);
269 return NULL;
270 }
271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
283 }
284 } else if (mode == MPOL_LOCAL) {
285 if (!nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
287 mode = MPOL_PREFERRED;
288 } else if (nodes_empty(*nodes))
289 return ERR_PTR(-EINVAL);
290 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
291 if (!policy)
292 return ERR_PTR(-ENOMEM);
293 atomic_set(&policy->refcnt, 1);
294 policy->mode = mode;
295 policy->flags = flags;
296
297 return policy;
298}
299
300/* Slow path of a mpol destructor. */
301void __mpol_put(struct mempolicy *p)
302{
303 if (!atomic_dec_and_test(&p->refcnt))
304 return;
305 kmem_cache_free(policy_cache, p);
306}
307
308static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
310{
311}
312
313/*
314 * step:
315 * MPOL_REBIND_ONCE - do rebind work at once
316 * MPOL_REBIND_STEP1 - set all the newly nodes
317 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
318 */
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320 enum mpol_rebind_step step)
321{
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
329 /*
330 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331 * result
332 */
333 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334 nodes_remap(tmp, pol->v.nodes,
335 pol->w.cpuset_mems_allowed, *nodes);
336 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337 } else if (step == MPOL_REBIND_STEP2) {
338 tmp = pol->w.cpuset_mems_allowed;
339 pol->w.cpuset_mems_allowed = *nodes;
340 } else
341 BUG();
342 }
343
344 if (nodes_empty(tmp))
345 tmp = *nodes;
346
347 if (step == MPOL_REBIND_STEP1)
348 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
350 pol->v.nodes = tmp;
351 else
352 BUG();
353
354 if (!node_isset(current->il_next, tmp)) {
355 current->il_next = next_node(current->il_next, tmp);
356 if (current->il_next >= MAX_NUMNODES)
357 current->il_next = first_node(tmp);
358 if (current->il_next >= MAX_NUMNODES)
359 current->il_next = numa_node_id();
360 }
361}
362
363static void mpol_rebind_preferred(struct mempolicy *pol,
364 const nodemask_t *nodes,
365 enum mpol_rebind_step step)
366{
367 nodemask_t tmp;
368
369 if (pol->flags & MPOL_F_STATIC_NODES) {
370 int node = first_node(pol->w.user_nodemask);
371
372 if (node_isset(node, *nodes)) {
373 pol->v.preferred_node = node;
374 pol->flags &= ~MPOL_F_LOCAL;
375 } else
376 pol->flags |= MPOL_F_LOCAL;
377 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
378 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
379 pol->v.preferred_node = first_node(tmp);
380 } else if (!(pol->flags & MPOL_F_LOCAL)) {
381 pol->v.preferred_node = node_remap(pol->v.preferred_node,
382 pol->w.cpuset_mems_allowed,
383 *nodes);
384 pol->w.cpuset_mems_allowed = *nodes;
385 }
386}
387
388/*
389 * mpol_rebind_policy - Migrate a policy to a different set of nodes
390 *
391 * If read-side task has no lock to protect task->mempolicy, write-side
392 * task will rebind the task->mempolicy by two step. The first step is
393 * setting all the newly nodes, and the second step is cleaning all the
394 * disallowed nodes. In this way, we can avoid finding no node to alloc
395 * page.
396 * If we have a lock to protect task->mempolicy in read-side, we do
397 * rebind directly.
398 *
399 * step:
400 * MPOL_REBIND_ONCE - do rebind work at once
401 * MPOL_REBIND_STEP1 - set all the newly nodes
402 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
403 */
404static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405 enum mpol_rebind_step step)
406{
407 if (!pol)
408 return;
409 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
410 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
411 return;
412
413 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414 return;
415
416 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417 BUG();
418
419 if (step == MPOL_REBIND_STEP1)
420 pol->flags |= MPOL_F_REBINDING;
421 else if (step == MPOL_REBIND_STEP2)
422 pol->flags &= ~MPOL_F_REBINDING;
423 else if (step >= MPOL_REBIND_NSTEP)
424 BUG();
425
426 mpol_ops[pol->mode].rebind(pol, newmask, step);
427}
428
429/*
430 * Wrapper for mpol_rebind_policy() that just requires task
431 * pointer, and updates task mempolicy.
432 *
433 * Called with task's alloc_lock held.
434 */
435
436void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437 enum mpol_rebind_step step)
438{
439 mpol_rebind_policy(tsk->mempolicy, new, step);
440}
441
442/*
443 * Rebind each vma in mm to new nodemask.
444 *
445 * Call holding a reference to mm. Takes mm->mmap_sem during call.
446 */
447
448void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
449{
450 struct vm_area_struct *vma;
451
452 down_write(&mm->mmap_sem);
453 for (vma = mm->mmap; vma; vma = vma->vm_next)
454 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
455 up_write(&mm->mmap_sem);
456}
457
458static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
459 [MPOL_DEFAULT] = {
460 .rebind = mpol_rebind_default,
461 },
462 [MPOL_INTERLEAVE] = {
463 .create = mpol_new_interleave,
464 .rebind = mpol_rebind_nodemask,
465 },
466 [MPOL_PREFERRED] = {
467 .create = mpol_new_preferred,
468 .rebind = mpol_rebind_preferred,
469 },
470 [MPOL_BIND] = {
471 .create = mpol_new_bind,
472 .rebind = mpol_rebind_nodemask,
473 },
474};
475
476static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 unsigned long flags);
478
479/*
480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do.
482 */
483static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
484 unsigned long addr, unsigned long end,
485 const nodemask_t *nodes, unsigned long flags,
486 void *private)
487{
488 pte_t *orig_pte;
489 pte_t *pte;
490 spinlock_t *ptl;
491
492 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
493 do {
494 struct page *page;
495 int nid;
496
497 if (!pte_present(*pte))
498 continue;
499 page = vm_normal_page(vma, addr, *pte);
500 if (!page)
501 continue;
502 /*
503 * vm_normal_page() filters out zero pages, but there might
504 * still be PageReserved pages to skip, perhaps in a VDSO.
505 */
506 if (PageReserved(page))
507 continue;
508 nid = page_to_nid(page);
509 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
510 continue;
511
512 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
513 migrate_page_add(page, private, flags);
514 else
515 break;
516 } while (pte++, addr += PAGE_SIZE, addr != end);
517 pte_unmap_unlock(orig_pte, ptl);
518 return addr != end;
519}
520
521static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
522 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
523 void *private)
524{
525#ifdef CONFIG_HUGETLB_PAGE
526 int nid;
527 struct page *page;
528 spinlock_t *ptl;
529 pte_t entry;
530
531 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
532 entry = huge_ptep_get((pte_t *)pmd);
533 if (!pte_present(entry))
534 goto unlock;
535 page = pte_page(entry);
536 nid = page_to_nid(page);
537 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
538 goto unlock;
539 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
540 if (flags & (MPOL_MF_MOVE_ALL) ||
541 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
542 isolate_huge_page(page, private);
543unlock:
544 spin_unlock(ptl);
545#else
546 BUG();
547#endif
548}
549
550static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
551 unsigned long addr, unsigned long end,
552 const nodemask_t *nodes, unsigned long flags,
553 void *private)
554{
555 pmd_t *pmd;
556 unsigned long next;
557
558 pmd = pmd_offset(pud, addr);
559 do {
560 next = pmd_addr_end(addr, end);
561 if (!pmd_present(*pmd))
562 continue;
563 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
564 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
565 flags, private);
566 continue;
567 }
568 split_huge_page_pmd(vma, addr, pmd);
569 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
570 continue;
571 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
572 flags, private))
573 return -EIO;
574 } while (pmd++, addr = next, addr != end);
575 return 0;
576}
577
578static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
579 unsigned long addr, unsigned long end,
580 const nodemask_t *nodes, unsigned long flags,
581 void *private)
582{
583 pud_t *pud;
584 unsigned long next;
585
586 pud = pud_offset(pgd, addr);
587 do {
588 next = pud_addr_end(addr, end);
589 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
590 continue;
591 if (pud_none_or_clear_bad(pud))
592 continue;
593 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
594 flags, private))
595 return -EIO;
596 } while (pud++, addr = next, addr != end);
597 return 0;
598}
599
600static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
601 unsigned long addr, unsigned long end,
602 const nodemask_t *nodes, unsigned long flags,
603 void *private)
604{
605 pgd_t *pgd;
606 unsigned long next;
607
608 pgd = pgd_offset(vma->vm_mm, addr);
609 do {
610 next = pgd_addr_end(addr, end);
611 if (pgd_none_or_clear_bad(pgd))
612 continue;
613 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
614 flags, private))
615 return -EIO;
616 } while (pgd++, addr = next, addr != end);
617 return 0;
618}
619
620#ifdef CONFIG_NUMA_BALANCING
621/*
622 * This is used to mark a range of virtual addresses to be inaccessible.
623 * These are later cleared by a NUMA hinting fault. Depending on these
624 * faults, pages may be migrated for better NUMA placement.
625 *
626 * This is assuming that NUMA faults are handled using PROT_NONE. If
627 * an architecture makes a different choice, it will need further
628 * changes to the core.
629 */
630unsigned long change_prot_numa(struct vm_area_struct *vma,
631 unsigned long addr, unsigned long end)
632{
633 int nr_updated;
634
635 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
636 if (nr_updated)
637 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
638
639 return nr_updated;
640}
641#else
642static unsigned long change_prot_numa(struct vm_area_struct *vma,
643 unsigned long addr, unsigned long end)
644{
645 return 0;
646}
647#endif /* CONFIG_NUMA_BALANCING */
648
649/*
650 * Walk through page tables and collect pages to be migrated.
651 *
652 * If pages found in a given range are on a set of nodes (determined by
653 * @nodes and @flags,) it's isolated and queued to the pagelist which is
654 * passed via @private.)
655 */
656static struct vm_area_struct *
657queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
658 const nodemask_t *nodes, unsigned long flags, void *private)
659{
660 int err;
661 struct vm_area_struct *first, *vma, *prev;
662
663
664 first = find_vma(mm, start);
665 if (!first)
666 return ERR_PTR(-EFAULT);
667 prev = NULL;
668 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
669 unsigned long endvma = vma->vm_end;
670
671 if (endvma > end)
672 endvma = end;
673 if (vma->vm_start > start)
674 start = vma->vm_start;
675
676 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
677 if (!vma->vm_next && vma->vm_end < end)
678 return ERR_PTR(-EFAULT);
679 if (prev && prev->vm_end < vma->vm_start)
680 return ERR_PTR(-EFAULT);
681 }
682
683 if (flags & MPOL_MF_LAZY) {
684 change_prot_numa(vma, start, endvma);
685 goto next;
686 }
687
688 if ((flags & MPOL_MF_STRICT) ||
689 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
690 vma_migratable(vma))) {
691
692 err = queue_pages_pgd_range(vma, start, endvma, nodes,
693 flags, private);
694 if (err) {
695 first = ERR_PTR(err);
696 break;
697 }
698 }
699next:
700 prev = vma;
701 }
702 return first;
703}
704
705/*
706 * Apply policy to a single VMA
707 * This must be called with the mmap_sem held for writing.
708 */
709static int vma_replace_policy(struct vm_area_struct *vma,
710 struct mempolicy *pol)
711{
712 int err;
713 struct mempolicy *old;
714 struct mempolicy *new;
715
716 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
717 vma->vm_start, vma->vm_end, vma->vm_pgoff,
718 vma->vm_ops, vma->vm_file,
719 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
720
721 new = mpol_dup(pol);
722 if (IS_ERR(new))
723 return PTR_ERR(new);
724
725 if (vma->vm_ops && vma->vm_ops->set_policy) {
726 err = vma->vm_ops->set_policy(vma, new);
727 if (err)
728 goto err_out;
729 }
730
731 old = vma->vm_policy;
732 vma->vm_policy = new; /* protected by mmap_sem */
733 mpol_put(old);
734
735 return 0;
736 err_out:
737 mpol_put(new);
738 return err;
739}
740
741/* Step 2: apply policy to a range and do splits. */
742static int mbind_range(struct mm_struct *mm, unsigned long start,
743 unsigned long end, struct mempolicy *new_pol)
744{
745 struct vm_area_struct *next;
746 struct vm_area_struct *prev;
747 struct vm_area_struct *vma;
748 int err = 0;
749 pgoff_t pgoff;
750 unsigned long vmstart;
751 unsigned long vmend;
752
753 vma = find_vma(mm, start);
754 if (!vma || vma->vm_start > start)
755 return -EFAULT;
756
757 prev = vma->vm_prev;
758 if (start > vma->vm_start)
759 prev = vma;
760
761 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
762 next = vma->vm_next;
763 vmstart = max(start, vma->vm_start);
764 vmend = min(end, vma->vm_end);
765
766 if (mpol_equal(vma_policy(vma), new_pol))
767 continue;
768
769 pgoff = vma->vm_pgoff +
770 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
771 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
772 vma->anon_vma, vma->vm_file, pgoff,
773 new_pol);
774 if (prev) {
775 vma = prev;
776 next = vma->vm_next;
777 if (mpol_equal(vma_policy(vma), new_pol))
778 continue;
779 /* vma_merge() joined vma && vma->next, case 8 */
780 goto replace;
781 }
782 if (vma->vm_start != vmstart) {
783 err = split_vma(vma->vm_mm, vma, vmstart, 1);
784 if (err)
785 goto out;
786 }
787 if (vma->vm_end != vmend) {
788 err = split_vma(vma->vm_mm, vma, vmend, 0);
789 if (err)
790 goto out;
791 }
792 replace:
793 err = vma_replace_policy(vma, new_pol);
794 if (err)
795 goto out;
796 }
797
798 out:
799 return err;
800}
801
802/* Set the process memory policy */
803static long do_set_mempolicy(unsigned short mode, unsigned short flags,
804 nodemask_t *nodes)
805{
806 struct mempolicy *new, *old;
807 struct mm_struct *mm = current->mm;
808 NODEMASK_SCRATCH(scratch);
809 int ret;
810
811 if (!scratch)
812 return -ENOMEM;
813
814 new = mpol_new(mode, flags, nodes);
815 if (IS_ERR(new)) {
816 ret = PTR_ERR(new);
817 goto out;
818 }
819 /*
820 * prevent changing our mempolicy while show_numa_maps()
821 * is using it.
822 * Note: do_set_mempolicy() can be called at init time
823 * with no 'mm'.
824 */
825 if (mm)
826 down_write(&mm->mmap_sem);
827 task_lock(current);
828 ret = mpol_set_nodemask(new, nodes, scratch);
829 if (ret) {
830 task_unlock(current);
831 if (mm)
832 up_write(&mm->mmap_sem);
833 mpol_put(new);
834 goto out;
835 }
836 old = current->mempolicy;
837 current->mempolicy = new;
838 if (new && new->mode == MPOL_INTERLEAVE &&
839 nodes_weight(new->v.nodes))
840 current->il_next = first_node(new->v.nodes);
841 task_unlock(current);
842 if (mm)
843 up_write(&mm->mmap_sem);
844
845 mpol_put(old);
846 ret = 0;
847out:
848 NODEMASK_SCRATCH_FREE(scratch);
849 return ret;
850}
851
852/*
853 * Return nodemask for policy for get_mempolicy() query
854 *
855 * Called with task's alloc_lock held
856 */
857static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
858{
859 nodes_clear(*nodes);
860 if (p == &default_policy)
861 return;
862
863 switch (p->mode) {
864 case MPOL_BIND:
865 /* Fall through */
866 case MPOL_INTERLEAVE:
867 *nodes = p->v.nodes;
868 break;
869 case MPOL_PREFERRED:
870 if (!(p->flags & MPOL_F_LOCAL))
871 node_set(p->v.preferred_node, *nodes);
872 /* else return empty node mask for local allocation */
873 break;
874 default:
875 BUG();
876 }
877}
878
879static int lookup_node(struct mm_struct *mm, unsigned long addr)
880{
881 struct page *p;
882 int err;
883
884 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
885 if (err >= 0) {
886 err = page_to_nid(p);
887 put_page(p);
888 }
889 return err;
890}
891
892/* Retrieve NUMA policy */
893static long do_get_mempolicy(int *policy, nodemask_t *nmask,
894 unsigned long addr, unsigned long flags)
895{
896 int err;
897 struct mm_struct *mm = current->mm;
898 struct vm_area_struct *vma = NULL;
899 struct mempolicy *pol = current->mempolicy;
900
901 if (flags &
902 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
903 return -EINVAL;
904
905 if (flags & MPOL_F_MEMS_ALLOWED) {
906 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
907 return -EINVAL;
908 *policy = 0; /* just so it's initialized */
909 task_lock(current);
910 *nmask = cpuset_current_mems_allowed;
911 task_unlock(current);
912 return 0;
913 }
914
915 if (flags & MPOL_F_ADDR) {
916 /*
917 * Do NOT fall back to task policy if the
918 * vma/shared policy at addr is NULL. We
919 * want to return MPOL_DEFAULT in this case.
920 */
921 down_read(&mm->mmap_sem);
922 vma = find_vma_intersection(mm, addr, addr+1);
923 if (!vma) {
924 up_read(&mm->mmap_sem);
925 return -EFAULT;
926 }
927 if (vma->vm_ops && vma->vm_ops->get_policy)
928 pol = vma->vm_ops->get_policy(vma, addr);
929 else
930 pol = vma->vm_policy;
931 } else if (addr)
932 return -EINVAL;
933
934 if (!pol)
935 pol = &default_policy; /* indicates default behavior */
936
937 if (flags & MPOL_F_NODE) {
938 if (flags & MPOL_F_ADDR) {
939 err = lookup_node(mm, addr);
940 if (err < 0)
941 goto out;
942 *policy = err;
943 } else if (pol == current->mempolicy &&
944 pol->mode == MPOL_INTERLEAVE) {
945 *policy = current->il_next;
946 } else {
947 err = -EINVAL;
948 goto out;
949 }
950 } else {
951 *policy = pol == &default_policy ? MPOL_DEFAULT :
952 pol->mode;
953 /*
954 * Internal mempolicy flags must be masked off before exposing
955 * the policy to userspace.
956 */
957 *policy |= (pol->flags & MPOL_MODE_FLAGS);
958 }
959
960 if (vma) {
961 up_read(¤t->mm->mmap_sem);
962 vma = NULL;
963 }
964
965 err = 0;
966 if (nmask) {
967 if (mpol_store_user_nodemask(pol)) {
968 *nmask = pol->w.user_nodemask;
969 } else {
970 task_lock(current);
971 get_policy_nodemask(pol, nmask);
972 task_unlock(current);
973 }
974 }
975
976 out:
977 mpol_cond_put(pol);
978 if (vma)
979 up_read(¤t->mm->mmap_sem);
980 return err;
981}
982
983#ifdef CONFIG_MIGRATION
984/*
985 * page migration
986 */
987static void migrate_page_add(struct page *page, struct list_head *pagelist,
988 unsigned long flags)
989{
990 /*
991 * Avoid migrating a page that is shared with others.
992 */
993 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
994 if (!isolate_lru_page(page)) {
995 list_add_tail(&page->lru, pagelist);
996 inc_zone_page_state(page, NR_ISOLATED_ANON +
997 page_is_file_cache(page));
998 }
999 }
1000}
1001
1002static struct page *new_node_page(struct page *page, unsigned long node, int **x)
1003{
1004 if (PageHuge(page))
1005 return alloc_huge_page_node(page_hstate(compound_head(page)),
1006 node);
1007 else
1008 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
1009}
1010
1011/*
1012 * Migrate pages from one node to a target node.
1013 * Returns error or the number of pages not migrated.
1014 */
1015static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1016 int flags)
1017{
1018 nodemask_t nmask;
1019 LIST_HEAD(pagelist);
1020 int err = 0;
1021
1022 nodes_clear(nmask);
1023 node_set(source, nmask);
1024
1025 /*
1026 * This does not "check" the range but isolates all pages that
1027 * need migration. Between passing in the full user address
1028 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1029 */
1030 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1031 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1032 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1033
1034 if (!list_empty(&pagelist)) {
1035 err = migrate_pages(&pagelist, new_node_page, dest,
1036 MIGRATE_SYNC, MR_SYSCALL);
1037 if (err)
1038 putback_movable_pages(&pagelist);
1039 }
1040
1041 return err;
1042}
1043
1044/*
1045 * Move pages between the two nodesets so as to preserve the physical
1046 * layout as much as possible.
1047 *
1048 * Returns the number of page that could not be moved.
1049 */
1050int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1051 const nodemask_t *to, int flags)
1052{
1053 int busy = 0;
1054 int err;
1055 nodemask_t tmp;
1056
1057 err = migrate_prep();
1058 if (err)
1059 return err;
1060
1061 down_read(&mm->mmap_sem);
1062
1063 err = migrate_vmas(mm, from, to, flags);
1064 if (err)
1065 goto out;
1066
1067 /*
1068 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1069 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1070 * bit in 'tmp', and return that <source, dest> pair for migration.
1071 * The pair of nodemasks 'to' and 'from' define the map.
1072 *
1073 * If no pair of bits is found that way, fallback to picking some
1074 * pair of 'source' and 'dest' bits that are not the same. If the
1075 * 'source' and 'dest' bits are the same, this represents a node
1076 * that will be migrating to itself, so no pages need move.
1077 *
1078 * If no bits are left in 'tmp', or if all remaining bits left
1079 * in 'tmp' correspond to the same bit in 'to', return false
1080 * (nothing left to migrate).
1081 *
1082 * This lets us pick a pair of nodes to migrate between, such that
1083 * if possible the dest node is not already occupied by some other
1084 * source node, minimizing the risk of overloading the memory on a
1085 * node that would happen if we migrated incoming memory to a node
1086 * before migrating outgoing memory source that same node.
1087 *
1088 * A single scan of tmp is sufficient. As we go, we remember the
1089 * most recent <s, d> pair that moved (s != d). If we find a pair
1090 * that not only moved, but what's better, moved to an empty slot
1091 * (d is not set in tmp), then we break out then, with that pair.
1092 * Otherwise when we finish scanning from_tmp, we at least have the
1093 * most recent <s, d> pair that moved. If we get all the way through
1094 * the scan of tmp without finding any node that moved, much less
1095 * moved to an empty node, then there is nothing left worth migrating.
1096 */
1097
1098 tmp = *from;
1099 while (!nodes_empty(tmp)) {
1100 int s,d;
1101 int source = NUMA_NO_NODE;
1102 int dest = 0;
1103
1104 for_each_node_mask(s, tmp) {
1105
1106 /*
1107 * do_migrate_pages() tries to maintain the relative
1108 * node relationship of the pages established between
1109 * threads and memory areas.
1110 *
1111 * However if the number of source nodes is not equal to
1112 * the number of destination nodes we can not preserve
1113 * this node relative relationship. In that case, skip
1114 * copying memory from a node that is in the destination
1115 * mask.
1116 *
1117 * Example: [2,3,4] -> [3,4,5] moves everything.
1118 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1119 */
1120
1121 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1122 (node_isset(s, *to)))
1123 continue;
1124
1125 d = node_remap(s, *from, *to);
1126 if (s == d)
1127 continue;
1128
1129 source = s; /* Node moved. Memorize */
1130 dest = d;
1131
1132 /* dest not in remaining from nodes? */
1133 if (!node_isset(dest, tmp))
1134 break;
1135 }
1136 if (source == NUMA_NO_NODE)
1137 break;
1138
1139 node_clear(source, tmp);
1140 err = migrate_to_node(mm, source, dest, flags);
1141 if (err > 0)
1142 busy += err;
1143 if (err < 0)
1144 break;
1145 }
1146out:
1147 up_read(&mm->mmap_sem);
1148 if (err < 0)
1149 return err;
1150 return busy;
1151
1152}
1153
1154/*
1155 * Allocate a new page for page migration based on vma policy.
1156 * Start assuming that page is mapped by vma pointed to by @private.
1157 * Search forward from there, if not. N.B., this assumes that the
1158 * list of pages handed to migrate_pages()--which is how we get here--
1159 * is in virtual address order.
1160 */
1161static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1162{
1163 struct vm_area_struct *vma = (struct vm_area_struct *)private;
1164 unsigned long uninitialized_var(address);
1165
1166 while (vma) {
1167 address = page_address_in_vma(page, vma);
1168 if (address != -EFAULT)
1169 break;
1170 vma = vma->vm_next;
1171 }
1172
1173 if (PageHuge(page)) {
1174 BUG_ON(!vma);
1175 return alloc_huge_page_noerr(vma, address, 1);
1176 }
1177 /*
1178 * if !vma, alloc_page_vma() will use task or system default policy
1179 */
1180 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1181}
1182#else
1183
1184static void migrate_page_add(struct page *page, struct list_head *pagelist,
1185 unsigned long flags)
1186{
1187}
1188
1189int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1190 const nodemask_t *to, int flags)
1191{
1192 return -ENOSYS;
1193}
1194
1195static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1196{
1197 return NULL;
1198}
1199#endif
1200
1201static long do_mbind(unsigned long start, unsigned long len,
1202 unsigned short mode, unsigned short mode_flags,
1203 nodemask_t *nmask, unsigned long flags)
1204{
1205 struct vm_area_struct *vma;
1206 struct mm_struct *mm = current->mm;
1207 struct mempolicy *new;
1208 unsigned long end;
1209 int err;
1210 LIST_HEAD(pagelist);
1211
1212 if (flags & ~(unsigned long)MPOL_MF_VALID)
1213 return -EINVAL;
1214 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1215 return -EPERM;
1216
1217 if (start & ~PAGE_MASK)
1218 return -EINVAL;
1219
1220 if (mode == MPOL_DEFAULT)
1221 flags &= ~MPOL_MF_STRICT;
1222
1223 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1224 end = start + len;
1225
1226 if (end < start)
1227 return -EINVAL;
1228 if (end == start)
1229 return 0;
1230
1231 new = mpol_new(mode, mode_flags, nmask);
1232 if (IS_ERR(new))
1233 return PTR_ERR(new);
1234
1235 if (flags & MPOL_MF_LAZY)
1236 new->flags |= MPOL_F_MOF;
1237
1238 /*
1239 * If we are using the default policy then operation
1240 * on discontinuous address spaces is okay after all
1241 */
1242 if (!new)
1243 flags |= MPOL_MF_DISCONTIG_OK;
1244
1245 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1246 start, start + len, mode, mode_flags,
1247 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1248
1249 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1250
1251 err = migrate_prep();
1252 if (err)
1253 goto mpol_out;
1254 }
1255 {
1256 NODEMASK_SCRATCH(scratch);
1257 if (scratch) {
1258 down_write(&mm->mmap_sem);
1259 task_lock(current);
1260 err = mpol_set_nodemask(new, nmask, scratch);
1261 task_unlock(current);
1262 if (err)
1263 up_write(&mm->mmap_sem);
1264 } else
1265 err = -ENOMEM;
1266 NODEMASK_SCRATCH_FREE(scratch);
1267 }
1268 if (err)
1269 goto mpol_out;
1270
1271 vma = queue_pages_range(mm, start, end, nmask,
1272 flags | MPOL_MF_INVERT, &pagelist);
1273
1274 err = PTR_ERR(vma); /* maybe ... */
1275 if (!IS_ERR(vma))
1276 err = mbind_range(mm, start, end, new);
1277
1278 if (!err) {
1279 int nr_failed = 0;
1280
1281 if (!list_empty(&pagelist)) {
1282 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1283 nr_failed = migrate_pages(&pagelist, new_vma_page,
1284 (unsigned long)vma,
1285 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1286 if (nr_failed)
1287 putback_movable_pages(&pagelist);
1288 }
1289
1290 if (nr_failed && (flags & MPOL_MF_STRICT))
1291 err = -EIO;
1292 } else
1293 putback_movable_pages(&pagelist);
1294
1295 up_write(&mm->mmap_sem);
1296 mpol_out:
1297 mpol_put(new);
1298 return err;
1299}
1300
1301/*
1302 * User space interface with variable sized bitmaps for nodelists.
1303 */
1304
1305/* Copy a node mask from user space. */
1306static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1307 unsigned long maxnode)
1308{
1309 unsigned long k;
1310 unsigned long nlongs;
1311 unsigned long endmask;
1312
1313 --maxnode;
1314 nodes_clear(*nodes);
1315 if (maxnode == 0 || !nmask)
1316 return 0;
1317 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1318 return -EINVAL;
1319
1320 nlongs = BITS_TO_LONGS(maxnode);
1321 if ((maxnode % BITS_PER_LONG) == 0)
1322 endmask = ~0UL;
1323 else
1324 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1325
1326 /* When the user specified more nodes than supported just check
1327 if the non supported part is all zero. */
1328 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1329 if (nlongs > PAGE_SIZE/sizeof(long))
1330 return -EINVAL;
1331 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1332 unsigned long t;
1333 if (get_user(t, nmask + k))
1334 return -EFAULT;
1335 if (k == nlongs - 1) {
1336 if (t & endmask)
1337 return -EINVAL;
1338 } else if (t)
1339 return -EINVAL;
1340 }
1341 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1342 endmask = ~0UL;
1343 }
1344
1345 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1346 return -EFAULT;
1347 nodes_addr(*nodes)[nlongs-1] &= endmask;
1348 return 0;
1349}
1350
1351/* Copy a kernel node mask to user space */
1352static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1353 nodemask_t *nodes)
1354{
1355 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1356 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1357
1358 if (copy > nbytes) {
1359 if (copy > PAGE_SIZE)
1360 return -EINVAL;
1361 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1362 return -EFAULT;
1363 copy = nbytes;
1364 }
1365 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1366}
1367
1368SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1369 unsigned long, mode, unsigned long __user *, nmask,
1370 unsigned long, maxnode, unsigned, flags)
1371{
1372 nodemask_t nodes;
1373 int err;
1374 unsigned short mode_flags;
1375
1376 mode_flags = mode & MPOL_MODE_FLAGS;
1377 mode &= ~MPOL_MODE_FLAGS;
1378 if (mode >= MPOL_MAX)
1379 return -EINVAL;
1380 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1381 (mode_flags & MPOL_F_RELATIVE_NODES))
1382 return -EINVAL;
1383 err = get_nodes(&nodes, nmask, maxnode);
1384 if (err)
1385 return err;
1386 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1387}
1388
1389/* Set the process memory policy */
1390SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1391 unsigned long, maxnode)
1392{
1393 int err;
1394 nodemask_t nodes;
1395 unsigned short flags;
1396
1397 flags = mode & MPOL_MODE_FLAGS;
1398 mode &= ~MPOL_MODE_FLAGS;
1399 if ((unsigned int)mode >= MPOL_MAX)
1400 return -EINVAL;
1401 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1402 return -EINVAL;
1403 err = get_nodes(&nodes, nmask, maxnode);
1404 if (err)
1405 return err;
1406 return do_set_mempolicy(mode, flags, &nodes);
1407}
1408
1409SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1410 const unsigned long __user *, old_nodes,
1411 const unsigned long __user *, new_nodes)
1412{
1413 const struct cred *cred = current_cred(), *tcred;
1414 struct mm_struct *mm = NULL;
1415 struct task_struct *task;
1416 nodemask_t task_nodes;
1417 int err;
1418 nodemask_t *old;
1419 nodemask_t *new;
1420 NODEMASK_SCRATCH(scratch);
1421
1422 if (!scratch)
1423 return -ENOMEM;
1424
1425 old = &scratch->mask1;
1426 new = &scratch->mask2;
1427
1428 err = get_nodes(old, old_nodes, maxnode);
1429 if (err)
1430 goto out;
1431
1432 err = get_nodes(new, new_nodes, maxnode);
1433 if (err)
1434 goto out;
1435
1436 /* Find the mm_struct */
1437 rcu_read_lock();
1438 task = pid ? find_task_by_vpid(pid) : current;
1439 if (!task) {
1440 rcu_read_unlock();
1441 err = -ESRCH;
1442 goto out;
1443 }
1444 get_task_struct(task);
1445
1446 err = -EINVAL;
1447
1448 /*
1449 * Check if this process has the right to modify the specified
1450 * process. The right exists if the process has administrative
1451 * capabilities, superuser privileges or the same
1452 * userid as the target process.
1453 */
1454 tcred = __task_cred(task);
1455 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1456 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1457 !capable(CAP_SYS_NICE)) {
1458 rcu_read_unlock();
1459 err = -EPERM;
1460 goto out_put;
1461 }
1462 rcu_read_unlock();
1463
1464 task_nodes = cpuset_mems_allowed(task);
1465 /* Is the user allowed to access the target nodes? */
1466 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1467 err = -EPERM;
1468 goto out_put;
1469 }
1470
1471 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1472 err = -EINVAL;
1473 goto out_put;
1474 }
1475
1476 err = security_task_movememory(task);
1477 if (err)
1478 goto out_put;
1479
1480 mm = get_task_mm(task);
1481 put_task_struct(task);
1482
1483 if (!mm) {
1484 err = -EINVAL;
1485 goto out;
1486 }
1487
1488 err = do_migrate_pages(mm, old, new,
1489 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1490
1491 mmput(mm);
1492out:
1493 NODEMASK_SCRATCH_FREE(scratch);
1494
1495 return err;
1496
1497out_put:
1498 put_task_struct(task);
1499 goto out;
1500
1501}
1502
1503
1504/* Retrieve NUMA policy */
1505SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1506 unsigned long __user *, nmask, unsigned long, maxnode,
1507 unsigned long, addr, unsigned long, flags)
1508{
1509 int err;
1510 int uninitialized_var(pval);
1511 nodemask_t nodes;
1512
1513 if (nmask != NULL && maxnode < MAX_NUMNODES)
1514 return -EINVAL;
1515
1516 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1517
1518 if (err)
1519 return err;
1520
1521 if (policy && put_user(pval, policy))
1522 return -EFAULT;
1523
1524 if (nmask)
1525 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1526
1527 return err;
1528}
1529
1530#ifdef CONFIG_COMPAT
1531
1532COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1533 compat_ulong_t __user *, nmask,
1534 compat_ulong_t, maxnode,
1535 compat_ulong_t, addr, compat_ulong_t, flags)
1536{
1537 long err;
1538 unsigned long __user *nm = NULL;
1539 unsigned long nr_bits, alloc_size;
1540 DECLARE_BITMAP(bm, MAX_NUMNODES);
1541
1542 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1543 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1544
1545 if (nmask)
1546 nm = compat_alloc_user_space(alloc_size);
1547
1548 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1549
1550 if (!err && nmask) {
1551 unsigned long copy_size;
1552 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1553 err = copy_from_user(bm, nm, copy_size);
1554 /* ensure entire bitmap is zeroed */
1555 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1556 err |= compat_put_bitmap(nmask, bm, nr_bits);
1557 }
1558
1559 return err;
1560}
1561
1562COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1563 compat_ulong_t, maxnode)
1564{
1565 long err = 0;
1566 unsigned long __user *nm = NULL;
1567 unsigned long nr_bits, alloc_size;
1568 DECLARE_BITMAP(bm, MAX_NUMNODES);
1569
1570 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1571 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1572
1573 if (nmask) {
1574 err = compat_get_bitmap(bm, nmask, nr_bits);
1575 nm = compat_alloc_user_space(alloc_size);
1576 err |= copy_to_user(nm, bm, alloc_size);
1577 }
1578
1579 if (err)
1580 return -EFAULT;
1581
1582 return sys_set_mempolicy(mode, nm, nr_bits+1);
1583}
1584
1585COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1586 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1587 compat_ulong_t, maxnode, compat_ulong_t, flags)
1588{
1589 long err = 0;
1590 unsigned long __user *nm = NULL;
1591 unsigned long nr_bits, alloc_size;
1592 nodemask_t bm;
1593
1594 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1595 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1596
1597 if (nmask) {
1598 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1599 nm = compat_alloc_user_space(alloc_size);
1600 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1601 }
1602
1603 if (err)
1604 return -EFAULT;
1605
1606 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1607}
1608
1609#endif
1610
1611/*
1612 * get_vma_policy(@task, @vma, @addr)
1613 * @task - task for fallback if vma policy == default
1614 * @vma - virtual memory area whose policy is sought
1615 * @addr - address in @vma for shared policy lookup
1616 *
1617 * Returns effective policy for a VMA at specified address.
1618 * Falls back to @task or system default policy, as necessary.
1619 * Current or other task's task mempolicy and non-shared vma policies must be
1620 * protected by task_lock(task) by the caller.
1621 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1622 * count--added by the get_policy() vm_op, as appropriate--to protect against
1623 * freeing by another task. It is the caller's responsibility to free the
1624 * extra reference for shared policies.
1625 */
1626struct mempolicy *get_vma_policy(struct task_struct *task,
1627 struct vm_area_struct *vma, unsigned long addr)
1628{
1629 struct mempolicy *pol = get_task_policy(task);
1630
1631 if (vma) {
1632 if (vma->vm_ops && vma->vm_ops->get_policy) {
1633 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1634 addr);
1635 if (vpol)
1636 pol = vpol;
1637 } else if (vma->vm_policy) {
1638 pol = vma->vm_policy;
1639
1640 /*
1641 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1642 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1643 * count on these policies which will be dropped by
1644 * mpol_cond_put() later
1645 */
1646 if (mpol_needs_cond_ref(pol))
1647 mpol_get(pol);
1648 }
1649 }
1650 if (!pol)
1651 pol = &default_policy;
1652 return pol;
1653}
1654
1655bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
1656{
1657 struct mempolicy *pol = get_task_policy(task);
1658 if (vma) {
1659 if (vma->vm_ops && vma->vm_ops->get_policy) {
1660 bool ret = false;
1661
1662 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1663 if (pol && (pol->flags & MPOL_F_MOF))
1664 ret = true;
1665 mpol_cond_put(pol);
1666
1667 return ret;
1668 } else if (vma->vm_policy) {
1669 pol = vma->vm_policy;
1670 }
1671 }
1672
1673 if (!pol)
1674 return default_policy.flags & MPOL_F_MOF;
1675
1676 return pol->flags & MPOL_F_MOF;
1677}
1678
1679static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1680{
1681 enum zone_type dynamic_policy_zone = policy_zone;
1682
1683 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1684
1685 /*
1686 * if policy->v.nodes has movable memory only,
1687 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1688 *
1689 * policy->v.nodes is intersect with node_states[N_MEMORY].
1690 * so if the following test faile, it implies
1691 * policy->v.nodes has movable memory only.
1692 */
1693 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1694 dynamic_policy_zone = ZONE_MOVABLE;
1695
1696 return zone >= dynamic_policy_zone;
1697}
1698
1699/*
1700 * Return a nodemask representing a mempolicy for filtering nodes for
1701 * page allocation
1702 */
1703static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1704{
1705 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1706 if (unlikely(policy->mode == MPOL_BIND) &&
1707 apply_policy_zone(policy, gfp_zone(gfp)) &&
1708 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1709 return &policy->v.nodes;
1710
1711 return NULL;
1712}
1713
1714/* Return a zonelist indicated by gfp for node representing a mempolicy */
1715static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1716 int nd)
1717{
1718 switch (policy->mode) {
1719 case MPOL_PREFERRED:
1720 if (!(policy->flags & MPOL_F_LOCAL))
1721 nd = policy->v.preferred_node;
1722 break;
1723 case MPOL_BIND:
1724 /*
1725 * Normally, MPOL_BIND allocations are node-local within the
1726 * allowed nodemask. However, if __GFP_THISNODE is set and the
1727 * current node isn't part of the mask, we use the zonelist for
1728 * the first node in the mask instead.
1729 */
1730 if (unlikely(gfp & __GFP_THISNODE) &&
1731 unlikely(!node_isset(nd, policy->v.nodes)))
1732 nd = first_node(policy->v.nodes);
1733 break;
1734 default:
1735 BUG();
1736 }
1737 return node_zonelist(nd, gfp);
1738}
1739
1740/* Do dynamic interleaving for a process */
1741static unsigned interleave_nodes(struct mempolicy *policy)
1742{
1743 unsigned nid, next;
1744 struct task_struct *me = current;
1745
1746 nid = me->il_next;
1747 next = next_node(nid, policy->v.nodes);
1748 if (next >= MAX_NUMNODES)
1749 next = first_node(policy->v.nodes);
1750 if (next < MAX_NUMNODES)
1751 me->il_next = next;
1752 return nid;
1753}
1754
1755/*
1756 * Depending on the memory policy provide a node from which to allocate the
1757 * next slab entry.
1758 */
1759unsigned int mempolicy_slab_node(void)
1760{
1761 struct mempolicy *policy;
1762 int node = numa_mem_id();
1763
1764 if (in_interrupt())
1765 return node;
1766
1767 policy = current->mempolicy;
1768 if (!policy || policy->flags & MPOL_F_LOCAL)
1769 return node;
1770
1771 switch (policy->mode) {
1772 case MPOL_PREFERRED:
1773 /*
1774 * handled MPOL_F_LOCAL above
1775 */
1776 return policy->v.preferred_node;
1777
1778 case MPOL_INTERLEAVE:
1779 return interleave_nodes(policy);
1780
1781 case MPOL_BIND: {
1782 /*
1783 * Follow bind policy behavior and start allocation at the
1784 * first node.
1785 */
1786 struct zonelist *zonelist;
1787 struct zone *zone;
1788 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1789 zonelist = &NODE_DATA(node)->node_zonelists[0];
1790 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1791 &policy->v.nodes,
1792 &zone);
1793 return zone ? zone->node : node;
1794 }
1795
1796 default:
1797 BUG();
1798 }
1799}
1800
1801/* Do static interleaving for a VMA with known offset. */
1802static unsigned offset_il_node(struct mempolicy *pol,
1803 struct vm_area_struct *vma, unsigned long off)
1804{
1805 unsigned nnodes = nodes_weight(pol->v.nodes);
1806 unsigned target;
1807 int c;
1808 int nid = NUMA_NO_NODE;
1809
1810 if (!nnodes)
1811 return numa_node_id();
1812 target = (unsigned int)off % nnodes;
1813 c = 0;
1814 do {
1815 nid = next_node(nid, pol->v.nodes);
1816 c++;
1817 } while (c <= target);
1818 return nid;
1819}
1820
1821/* Determine a node number for interleave */
1822static inline unsigned interleave_nid(struct mempolicy *pol,
1823 struct vm_area_struct *vma, unsigned long addr, int shift)
1824{
1825 if (vma) {
1826 unsigned long off;
1827
1828 /*
1829 * for small pages, there is no difference between
1830 * shift and PAGE_SHIFT, so the bit-shift is safe.
1831 * for huge pages, since vm_pgoff is in units of small
1832 * pages, we need to shift off the always 0 bits to get
1833 * a useful offset.
1834 */
1835 BUG_ON(shift < PAGE_SHIFT);
1836 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1837 off += (addr - vma->vm_start) >> shift;
1838 return offset_il_node(pol, vma, off);
1839 } else
1840 return interleave_nodes(pol);
1841}
1842
1843/*
1844 * Return the bit number of a random bit set in the nodemask.
1845 * (returns NUMA_NO_NODE if nodemask is empty)
1846 */
1847int node_random(const nodemask_t *maskp)
1848{
1849 int w, bit = NUMA_NO_NODE;
1850
1851 w = nodes_weight(*maskp);
1852 if (w)
1853 bit = bitmap_ord_to_pos(maskp->bits,
1854 get_random_int() % w, MAX_NUMNODES);
1855 return bit;
1856}
1857
1858#ifdef CONFIG_HUGETLBFS
1859/*
1860 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1861 * @vma = virtual memory area whose policy is sought
1862 * @addr = address in @vma for shared policy lookup and interleave policy
1863 * @gfp_flags = for requested zone
1864 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1865 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1866 *
1867 * Returns a zonelist suitable for a huge page allocation and a pointer
1868 * to the struct mempolicy for conditional unref after allocation.
1869 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1870 * @nodemask for filtering the zonelist.
1871 *
1872 * Must be protected by read_mems_allowed_begin()
1873 */
1874struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1875 gfp_t gfp_flags, struct mempolicy **mpol,
1876 nodemask_t **nodemask)
1877{
1878 struct zonelist *zl;
1879
1880 *mpol = get_vma_policy(current, vma, addr);
1881 *nodemask = NULL; /* assume !MPOL_BIND */
1882
1883 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1884 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1885 huge_page_shift(hstate_vma(vma))), gfp_flags);
1886 } else {
1887 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1888 if ((*mpol)->mode == MPOL_BIND)
1889 *nodemask = &(*mpol)->v.nodes;
1890 }
1891 return zl;
1892}
1893
1894/*
1895 * init_nodemask_of_mempolicy
1896 *
1897 * If the current task's mempolicy is "default" [NULL], return 'false'
1898 * to indicate default policy. Otherwise, extract the policy nodemask
1899 * for 'bind' or 'interleave' policy into the argument nodemask, or
1900 * initialize the argument nodemask to contain the single node for
1901 * 'preferred' or 'local' policy and return 'true' to indicate presence
1902 * of non-default mempolicy.
1903 *
1904 * We don't bother with reference counting the mempolicy [mpol_get/put]
1905 * because the current task is examining it's own mempolicy and a task's
1906 * mempolicy is only ever changed by the task itself.
1907 *
1908 * N.B., it is the caller's responsibility to free a returned nodemask.
1909 */
1910bool init_nodemask_of_mempolicy(nodemask_t *mask)
1911{
1912 struct mempolicy *mempolicy;
1913 int nid;
1914
1915 if (!(mask && current->mempolicy))
1916 return false;
1917
1918 task_lock(current);
1919 mempolicy = current->mempolicy;
1920 switch (mempolicy->mode) {
1921 case MPOL_PREFERRED:
1922 if (mempolicy->flags & MPOL_F_LOCAL)
1923 nid = numa_node_id();
1924 else
1925 nid = mempolicy->v.preferred_node;
1926 init_nodemask_of_node(mask, nid);
1927 break;
1928
1929 case MPOL_BIND:
1930 /* Fall through */
1931 case MPOL_INTERLEAVE:
1932 *mask = mempolicy->v.nodes;
1933 break;
1934
1935 default:
1936 BUG();
1937 }
1938 task_unlock(current);
1939
1940 return true;
1941}
1942#endif
1943
1944/*
1945 * mempolicy_nodemask_intersects
1946 *
1947 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1948 * policy. Otherwise, check for intersection between mask and the policy
1949 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1950 * policy, always return true since it may allocate elsewhere on fallback.
1951 *
1952 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1953 */
1954bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1955 const nodemask_t *mask)
1956{
1957 struct mempolicy *mempolicy;
1958 bool ret = true;
1959
1960 if (!mask)
1961 return ret;
1962 task_lock(tsk);
1963 mempolicy = tsk->mempolicy;
1964 if (!mempolicy)
1965 goto out;
1966
1967 switch (mempolicy->mode) {
1968 case MPOL_PREFERRED:
1969 /*
1970 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1971 * allocate from, they may fallback to other nodes when oom.
1972 * Thus, it's possible for tsk to have allocated memory from
1973 * nodes in mask.
1974 */
1975 break;
1976 case MPOL_BIND:
1977 case MPOL_INTERLEAVE:
1978 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1979 break;
1980 default:
1981 BUG();
1982 }
1983out:
1984 task_unlock(tsk);
1985 return ret;
1986}
1987
1988/* Allocate a page in interleaved policy.
1989 Own path because it needs to do special accounting. */
1990static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1991 unsigned nid)
1992{
1993 struct zonelist *zl;
1994 struct page *page;
1995
1996 zl = node_zonelist(nid, gfp);
1997 page = __alloc_pages(gfp, order, zl);
1998 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1999 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
2000 return page;
2001}
2002
2003/**
2004 * alloc_pages_vma - Allocate a page for a VMA.
2005 *
2006 * @gfp:
2007 * %GFP_USER user allocation.
2008 * %GFP_KERNEL kernel allocations,
2009 * %GFP_HIGHMEM highmem/user allocations,
2010 * %GFP_FS allocation should not call back into a file system.
2011 * %GFP_ATOMIC don't sleep.
2012 *
2013 * @order:Order of the GFP allocation.
2014 * @vma: Pointer to VMA or NULL if not available.
2015 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2016 *
2017 * This function allocates a page from the kernel page pool and applies
2018 * a NUMA policy associated with the VMA or the current process.
2019 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2020 * mm_struct of the VMA to prevent it from going away. Should be used for
2021 * all allocations for pages that will be mapped into
2022 * user space. Returns NULL when no page can be allocated.
2023 *
2024 * Should be called with the mm_sem of the vma hold.
2025 */
2026struct page *
2027alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2028 unsigned long addr, int node)
2029{
2030 struct mempolicy *pol;
2031 struct page *page;
2032 unsigned int cpuset_mems_cookie;
2033
2034retry_cpuset:
2035 pol = get_vma_policy(current, vma, addr);
2036 cpuset_mems_cookie = read_mems_allowed_begin();
2037
2038 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
2039 unsigned nid;
2040
2041 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2042 mpol_cond_put(pol);
2043 page = alloc_page_interleave(gfp, order, nid);
2044 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2045 goto retry_cpuset;
2046
2047 return page;
2048 }
2049 page = __alloc_pages_nodemask(gfp, order,
2050 policy_zonelist(gfp, pol, node),
2051 policy_nodemask(gfp, pol));
2052 if (unlikely(mpol_needs_cond_ref(pol)))
2053 __mpol_put(pol);
2054 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2055 goto retry_cpuset;
2056 return page;
2057}
2058
2059/**
2060 * alloc_pages_current - Allocate pages.
2061 *
2062 * @gfp:
2063 * %GFP_USER user allocation,
2064 * %GFP_KERNEL kernel allocation,
2065 * %GFP_HIGHMEM highmem allocation,
2066 * %GFP_FS don't call back into a file system.
2067 * %GFP_ATOMIC don't sleep.
2068 * @order: Power of two of allocation size in pages. 0 is a single page.
2069 *
2070 * Allocate a page from the kernel page pool. When not in
2071 * interrupt context and apply the current process NUMA policy.
2072 * Returns NULL when no page can be allocated.
2073 *
2074 * Don't call cpuset_update_task_memory_state() unless
2075 * 1) it's ok to take cpuset_sem (can WAIT), and
2076 * 2) allocating for current task (not interrupt).
2077 */
2078struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2079{
2080 struct mempolicy *pol = get_task_policy(current);
2081 struct page *page;
2082 unsigned int cpuset_mems_cookie;
2083
2084 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
2085 pol = &default_policy;
2086
2087retry_cpuset:
2088 cpuset_mems_cookie = read_mems_allowed_begin();
2089
2090 /*
2091 * No reference counting needed for current->mempolicy
2092 * nor system default_policy
2093 */
2094 if (pol->mode == MPOL_INTERLEAVE)
2095 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2096 else
2097 page = __alloc_pages_nodemask(gfp, order,
2098 policy_zonelist(gfp, pol, numa_node_id()),
2099 policy_nodemask(gfp, pol));
2100
2101 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2102 goto retry_cpuset;
2103
2104 return page;
2105}
2106EXPORT_SYMBOL(alloc_pages_current);
2107
2108int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2109{
2110 struct mempolicy *pol = mpol_dup(vma_policy(src));
2111
2112 if (IS_ERR(pol))
2113 return PTR_ERR(pol);
2114 dst->vm_policy = pol;
2115 return 0;
2116}
2117
2118/*
2119 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2120 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2121 * with the mems_allowed returned by cpuset_mems_allowed(). This
2122 * keeps mempolicies cpuset relative after its cpuset moves. See
2123 * further kernel/cpuset.c update_nodemask().
2124 *
2125 * current's mempolicy may be rebinded by the other task(the task that changes
2126 * cpuset's mems), so we needn't do rebind work for current task.
2127 */
2128
2129/* Slow path of a mempolicy duplicate */
2130struct mempolicy *__mpol_dup(struct mempolicy *old)
2131{
2132 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2133
2134 if (!new)
2135 return ERR_PTR(-ENOMEM);
2136
2137 /* task's mempolicy is protected by alloc_lock */
2138 if (old == current->mempolicy) {
2139 task_lock(current);
2140 *new = *old;
2141 task_unlock(current);
2142 } else
2143 *new = *old;
2144
2145 rcu_read_lock();
2146 if (current_cpuset_is_being_rebound()) {
2147 nodemask_t mems = cpuset_mems_allowed(current);
2148 if (new->flags & MPOL_F_REBINDING)
2149 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2150 else
2151 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2152 }
2153 rcu_read_unlock();
2154 atomic_set(&new->refcnt, 1);
2155 return new;
2156}
2157
2158/* Slow path of a mempolicy comparison */
2159bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2160{
2161 if (!a || !b)
2162 return false;
2163 if (a->mode != b->mode)
2164 return false;
2165 if (a->flags != b->flags)
2166 return false;
2167 if (mpol_store_user_nodemask(a))
2168 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2169 return false;
2170
2171 switch (a->mode) {
2172 case MPOL_BIND:
2173 /* Fall through */
2174 case MPOL_INTERLEAVE:
2175 return !!nodes_equal(a->v.nodes, b->v.nodes);
2176 case MPOL_PREFERRED:
2177 return a->v.preferred_node == b->v.preferred_node;
2178 default:
2179 BUG();
2180 return false;
2181 }
2182}
2183
2184/*
2185 * Shared memory backing store policy support.
2186 *
2187 * Remember policies even when nobody has shared memory mapped.
2188 * The policies are kept in Red-Black tree linked from the inode.
2189 * They are protected by the sp->lock spinlock, which should be held
2190 * for any accesses to the tree.
2191 */
2192
2193/* lookup first element intersecting start-end */
2194/* Caller holds sp->lock */
2195static struct sp_node *
2196sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2197{
2198 struct rb_node *n = sp->root.rb_node;
2199
2200 while (n) {
2201 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2202
2203 if (start >= p->end)
2204 n = n->rb_right;
2205 else if (end <= p->start)
2206 n = n->rb_left;
2207 else
2208 break;
2209 }
2210 if (!n)
2211 return NULL;
2212 for (;;) {
2213 struct sp_node *w = NULL;
2214 struct rb_node *prev = rb_prev(n);
2215 if (!prev)
2216 break;
2217 w = rb_entry(prev, struct sp_node, nd);
2218 if (w->end <= start)
2219 break;
2220 n = prev;
2221 }
2222 return rb_entry(n, struct sp_node, nd);
2223}
2224
2225/* Insert a new shared policy into the list. */
2226/* Caller holds sp->lock */
2227static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2228{
2229 struct rb_node **p = &sp->root.rb_node;
2230 struct rb_node *parent = NULL;
2231 struct sp_node *nd;
2232
2233 while (*p) {
2234 parent = *p;
2235 nd = rb_entry(parent, struct sp_node, nd);
2236 if (new->start < nd->start)
2237 p = &(*p)->rb_left;
2238 else if (new->end > nd->end)
2239 p = &(*p)->rb_right;
2240 else
2241 BUG();
2242 }
2243 rb_link_node(&new->nd, parent, p);
2244 rb_insert_color(&new->nd, &sp->root);
2245 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2246 new->policy ? new->policy->mode : 0);
2247}
2248
2249/* Find shared policy intersecting idx */
2250struct mempolicy *
2251mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2252{
2253 struct mempolicy *pol = NULL;
2254 struct sp_node *sn;
2255
2256 if (!sp->root.rb_node)
2257 return NULL;
2258 spin_lock(&sp->lock);
2259 sn = sp_lookup(sp, idx, idx+1);
2260 if (sn) {
2261 mpol_get(sn->policy);
2262 pol = sn->policy;
2263 }
2264 spin_unlock(&sp->lock);
2265 return pol;
2266}
2267
2268static void sp_free(struct sp_node *n)
2269{
2270 mpol_put(n->policy);
2271 kmem_cache_free(sn_cache, n);
2272}
2273
2274/**
2275 * mpol_misplaced - check whether current page node is valid in policy
2276 *
2277 * @page - page to be checked
2278 * @vma - vm area where page mapped
2279 * @addr - virtual address where page mapped
2280 *
2281 * Lookup current policy node id for vma,addr and "compare to" page's
2282 * node id.
2283 *
2284 * Returns:
2285 * -1 - not misplaced, page is in the right node
2286 * node - node id where the page should be
2287 *
2288 * Policy determination "mimics" alloc_page_vma().
2289 * Called from fault path where we know the vma and faulting address.
2290 */
2291int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2292{
2293 struct mempolicy *pol;
2294 struct zone *zone;
2295 int curnid = page_to_nid(page);
2296 unsigned long pgoff;
2297 int thiscpu = raw_smp_processor_id();
2298 int thisnid = cpu_to_node(thiscpu);
2299 int polnid = -1;
2300 int ret = -1;
2301
2302 BUG_ON(!vma);
2303
2304 pol = get_vma_policy(current, vma, addr);
2305 if (!(pol->flags & MPOL_F_MOF))
2306 goto out;
2307
2308 switch (pol->mode) {
2309 case MPOL_INTERLEAVE:
2310 BUG_ON(addr >= vma->vm_end);
2311 BUG_ON(addr < vma->vm_start);
2312
2313 pgoff = vma->vm_pgoff;
2314 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2315 polnid = offset_il_node(pol, vma, pgoff);
2316 break;
2317
2318 case MPOL_PREFERRED:
2319 if (pol->flags & MPOL_F_LOCAL)
2320 polnid = numa_node_id();
2321 else
2322 polnid = pol->v.preferred_node;
2323 break;
2324
2325 case MPOL_BIND:
2326 /*
2327 * allows binding to multiple nodes.
2328 * use current page if in policy nodemask,
2329 * else select nearest allowed node, if any.
2330 * If no allowed nodes, use current [!misplaced].
2331 */
2332 if (node_isset(curnid, pol->v.nodes))
2333 goto out;
2334 (void)first_zones_zonelist(
2335 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2336 gfp_zone(GFP_HIGHUSER),
2337 &pol->v.nodes, &zone);
2338 polnid = zone->node;
2339 break;
2340
2341 default:
2342 BUG();
2343 }
2344
2345 /* Migrate the page towards the node whose CPU is referencing it */
2346 if (pol->flags & MPOL_F_MORON) {
2347 polnid = thisnid;
2348
2349 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2350 goto out;
2351 }
2352
2353 if (curnid != polnid)
2354 ret = polnid;
2355out:
2356 mpol_cond_put(pol);
2357
2358 return ret;
2359}
2360
2361static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2362{
2363 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2364 rb_erase(&n->nd, &sp->root);
2365 sp_free(n);
2366}
2367
2368static void sp_node_init(struct sp_node *node, unsigned long start,
2369 unsigned long end, struct mempolicy *pol)
2370{
2371 node->start = start;
2372 node->end = end;
2373 node->policy = pol;
2374}
2375
2376static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2377 struct mempolicy *pol)
2378{
2379 struct sp_node *n;
2380 struct mempolicy *newpol;
2381
2382 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2383 if (!n)
2384 return NULL;
2385
2386 newpol = mpol_dup(pol);
2387 if (IS_ERR(newpol)) {
2388 kmem_cache_free(sn_cache, n);
2389 return NULL;
2390 }
2391 newpol->flags |= MPOL_F_SHARED;
2392 sp_node_init(n, start, end, newpol);
2393
2394 return n;
2395}
2396
2397/* Replace a policy range. */
2398static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2399 unsigned long end, struct sp_node *new)
2400{
2401 struct sp_node *n;
2402 struct sp_node *n_new = NULL;
2403 struct mempolicy *mpol_new = NULL;
2404 int ret = 0;
2405
2406restart:
2407 spin_lock(&sp->lock);
2408 n = sp_lookup(sp, start, end);
2409 /* Take care of old policies in the same range. */
2410 while (n && n->start < end) {
2411 struct rb_node *next = rb_next(&n->nd);
2412 if (n->start >= start) {
2413 if (n->end <= end)
2414 sp_delete(sp, n);
2415 else
2416 n->start = end;
2417 } else {
2418 /* Old policy spanning whole new range. */
2419 if (n->end > end) {
2420 if (!n_new)
2421 goto alloc_new;
2422
2423 *mpol_new = *n->policy;
2424 atomic_set(&mpol_new->refcnt, 1);
2425 sp_node_init(n_new, end, n->end, mpol_new);
2426 n->end = start;
2427 sp_insert(sp, n_new);
2428 n_new = NULL;
2429 mpol_new = NULL;
2430 break;
2431 } else
2432 n->end = start;
2433 }
2434 if (!next)
2435 break;
2436 n = rb_entry(next, struct sp_node, nd);
2437 }
2438 if (new)
2439 sp_insert(sp, new);
2440 spin_unlock(&sp->lock);
2441 ret = 0;
2442
2443err_out:
2444 if (mpol_new)
2445 mpol_put(mpol_new);
2446 if (n_new)
2447 kmem_cache_free(sn_cache, n_new);
2448
2449 return ret;
2450
2451alloc_new:
2452 spin_unlock(&sp->lock);
2453 ret = -ENOMEM;
2454 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2455 if (!n_new)
2456 goto err_out;
2457 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2458 if (!mpol_new)
2459 goto err_out;
2460 goto restart;
2461}
2462
2463/**
2464 * mpol_shared_policy_init - initialize shared policy for inode
2465 * @sp: pointer to inode shared policy
2466 * @mpol: struct mempolicy to install
2467 *
2468 * Install non-NULL @mpol in inode's shared policy rb-tree.
2469 * On entry, the current task has a reference on a non-NULL @mpol.
2470 * This must be released on exit.
2471 * This is called at get_inode() calls and we can use GFP_KERNEL.
2472 */
2473void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2474{
2475 int ret;
2476
2477 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2478 spin_lock_init(&sp->lock);
2479
2480 if (mpol) {
2481 struct vm_area_struct pvma;
2482 struct mempolicy *new;
2483 NODEMASK_SCRATCH(scratch);
2484
2485 if (!scratch)
2486 goto put_mpol;
2487 /* contextualize the tmpfs mount point mempolicy */
2488 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2489 if (IS_ERR(new))
2490 goto free_scratch; /* no valid nodemask intersection */
2491
2492 task_lock(current);
2493 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2494 task_unlock(current);
2495 if (ret)
2496 goto put_new;
2497
2498 /* Create pseudo-vma that contains just the policy */
2499 memset(&pvma, 0, sizeof(struct vm_area_struct));
2500 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2501 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2502
2503put_new:
2504 mpol_put(new); /* drop initial ref */
2505free_scratch:
2506 NODEMASK_SCRATCH_FREE(scratch);
2507put_mpol:
2508 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2509 }
2510}
2511
2512int mpol_set_shared_policy(struct shared_policy *info,
2513 struct vm_area_struct *vma, struct mempolicy *npol)
2514{
2515 int err;
2516 struct sp_node *new = NULL;
2517 unsigned long sz = vma_pages(vma);
2518
2519 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2520 vma->vm_pgoff,
2521 sz, npol ? npol->mode : -1,
2522 npol ? npol->flags : -1,
2523 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2524
2525 if (npol) {
2526 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2527 if (!new)
2528 return -ENOMEM;
2529 }
2530 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2531 if (err && new)
2532 sp_free(new);
2533 return err;
2534}
2535
2536/* Free a backing policy store on inode delete. */
2537void mpol_free_shared_policy(struct shared_policy *p)
2538{
2539 struct sp_node *n;
2540 struct rb_node *next;
2541
2542 if (!p->root.rb_node)
2543 return;
2544 spin_lock(&p->lock);
2545 next = rb_first(&p->root);
2546 while (next) {
2547 n = rb_entry(next, struct sp_node, nd);
2548 next = rb_next(&n->nd);
2549 sp_delete(p, n);
2550 }
2551 spin_unlock(&p->lock);
2552}
2553
2554#ifdef CONFIG_NUMA_BALANCING
2555static int __initdata numabalancing_override;
2556
2557static void __init check_numabalancing_enable(void)
2558{
2559 bool numabalancing_default = false;
2560
2561 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2562 numabalancing_default = true;
2563
2564 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2565 if (numabalancing_override)
2566 set_numabalancing_state(numabalancing_override == 1);
2567
2568 if (nr_node_ids > 1 && !numabalancing_override) {
2569 pr_info("%s automatic NUMA balancing. "
2570 "Configure with numa_balancing= or the "
2571 "kernel.numa_balancing sysctl",
2572 numabalancing_default ? "Enabling" : "Disabling");
2573 set_numabalancing_state(numabalancing_default);
2574 }
2575}
2576
2577static int __init setup_numabalancing(char *str)
2578{
2579 int ret = 0;
2580 if (!str)
2581 goto out;
2582
2583 if (!strcmp(str, "enable")) {
2584 numabalancing_override = 1;
2585 ret = 1;
2586 } else if (!strcmp(str, "disable")) {
2587 numabalancing_override = -1;
2588 ret = 1;
2589 }
2590out:
2591 if (!ret)
2592 pr_warn("Unable to parse numa_balancing=\n");
2593
2594 return ret;
2595}
2596__setup("numa_balancing=", setup_numabalancing);
2597#else
2598static inline void __init check_numabalancing_enable(void)
2599{
2600}
2601#endif /* CONFIG_NUMA_BALANCING */
2602
2603/* assumes fs == KERNEL_DS */
2604void __init numa_policy_init(void)
2605{
2606 nodemask_t interleave_nodes;
2607 unsigned long largest = 0;
2608 int nid, prefer = 0;
2609
2610 policy_cache = kmem_cache_create("numa_policy",
2611 sizeof(struct mempolicy),
2612 0, SLAB_PANIC, NULL);
2613
2614 sn_cache = kmem_cache_create("shared_policy_node",
2615 sizeof(struct sp_node),
2616 0, SLAB_PANIC, NULL);
2617
2618 for_each_node(nid) {
2619 preferred_node_policy[nid] = (struct mempolicy) {
2620 .refcnt = ATOMIC_INIT(1),
2621 .mode = MPOL_PREFERRED,
2622 .flags = MPOL_F_MOF | MPOL_F_MORON,
2623 .v = { .preferred_node = nid, },
2624 };
2625 }
2626
2627 /*
2628 * Set interleaving policy for system init. Interleaving is only
2629 * enabled across suitably sized nodes (default is >= 16MB), or
2630 * fall back to the largest node if they're all smaller.
2631 */
2632 nodes_clear(interleave_nodes);
2633 for_each_node_state(nid, N_MEMORY) {
2634 unsigned long total_pages = node_present_pages(nid);
2635
2636 /* Preserve the largest node */
2637 if (largest < total_pages) {
2638 largest = total_pages;
2639 prefer = nid;
2640 }
2641
2642 /* Interleave this node? */
2643 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2644 node_set(nid, interleave_nodes);
2645 }
2646
2647 /* All too small, use the largest */
2648 if (unlikely(nodes_empty(interleave_nodes)))
2649 node_set(prefer, interleave_nodes);
2650
2651 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2652 printk("numa_policy_init: interleaving failed\n");
2653
2654 check_numabalancing_enable();
2655}
2656
2657/* Reset policy of current process to default */
2658void numa_default_policy(void)
2659{
2660 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2661}
2662
2663/*
2664 * Parse and format mempolicy from/to strings
2665 */
2666
2667/*
2668 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2669 */
2670static const char * const policy_modes[] =
2671{
2672 [MPOL_DEFAULT] = "default",
2673 [MPOL_PREFERRED] = "prefer",
2674 [MPOL_BIND] = "bind",
2675 [MPOL_INTERLEAVE] = "interleave",
2676 [MPOL_LOCAL] = "local",
2677};
2678
2679
2680#ifdef CONFIG_TMPFS
2681/**
2682 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2683 * @str: string containing mempolicy to parse
2684 * @mpol: pointer to struct mempolicy pointer, returned on success.
2685 *
2686 * Format of input:
2687 * <mode>[=<flags>][:<nodelist>]
2688 *
2689 * On success, returns 0, else 1
2690 */
2691int mpol_parse_str(char *str, struct mempolicy **mpol)
2692{
2693 struct mempolicy *new = NULL;
2694 unsigned short mode;
2695 unsigned short mode_flags;
2696 nodemask_t nodes;
2697 char *nodelist = strchr(str, ':');
2698 char *flags = strchr(str, '=');
2699 int err = 1;
2700
2701 if (nodelist) {
2702 /* NUL-terminate mode or flags string */
2703 *nodelist++ = '\0';
2704 if (nodelist_parse(nodelist, nodes))
2705 goto out;
2706 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2707 goto out;
2708 } else
2709 nodes_clear(nodes);
2710
2711 if (flags)
2712 *flags++ = '\0'; /* terminate mode string */
2713
2714 for (mode = 0; mode < MPOL_MAX; mode++) {
2715 if (!strcmp(str, policy_modes[mode])) {
2716 break;
2717 }
2718 }
2719 if (mode >= MPOL_MAX)
2720 goto out;
2721
2722 switch (mode) {
2723 case MPOL_PREFERRED:
2724 /*
2725 * Insist on a nodelist of one node only
2726 */
2727 if (nodelist) {
2728 char *rest = nodelist;
2729 while (isdigit(*rest))
2730 rest++;
2731 if (*rest)
2732 goto out;
2733 }
2734 break;
2735 case MPOL_INTERLEAVE:
2736 /*
2737 * Default to online nodes with memory if no nodelist
2738 */
2739 if (!nodelist)
2740 nodes = node_states[N_MEMORY];
2741 break;
2742 case MPOL_LOCAL:
2743 /*
2744 * Don't allow a nodelist; mpol_new() checks flags
2745 */
2746 if (nodelist)
2747 goto out;
2748 mode = MPOL_PREFERRED;
2749 break;
2750 case MPOL_DEFAULT:
2751 /*
2752 * Insist on a empty nodelist
2753 */
2754 if (!nodelist)
2755 err = 0;
2756 goto out;
2757 case MPOL_BIND:
2758 /*
2759 * Insist on a nodelist
2760 */
2761 if (!nodelist)
2762 goto out;
2763 }
2764
2765 mode_flags = 0;
2766 if (flags) {
2767 /*
2768 * Currently, we only support two mutually exclusive
2769 * mode flags.
2770 */
2771 if (!strcmp(flags, "static"))
2772 mode_flags |= MPOL_F_STATIC_NODES;
2773 else if (!strcmp(flags, "relative"))
2774 mode_flags |= MPOL_F_RELATIVE_NODES;
2775 else
2776 goto out;
2777 }
2778
2779 new = mpol_new(mode, mode_flags, &nodes);
2780 if (IS_ERR(new))
2781 goto out;
2782
2783 /*
2784 * Save nodes for mpol_to_str() to show the tmpfs mount options
2785 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2786 */
2787 if (mode != MPOL_PREFERRED)
2788 new->v.nodes = nodes;
2789 else if (nodelist)
2790 new->v.preferred_node = first_node(nodes);
2791 else
2792 new->flags |= MPOL_F_LOCAL;
2793
2794 /*
2795 * Save nodes for contextualization: this will be used to "clone"
2796 * the mempolicy in a specific context [cpuset] at a later time.
2797 */
2798 new->w.user_nodemask = nodes;
2799
2800 err = 0;
2801
2802out:
2803 /* Restore string for error message */
2804 if (nodelist)
2805 *--nodelist = ':';
2806 if (flags)
2807 *--flags = '=';
2808 if (!err)
2809 *mpol = new;
2810 return err;
2811}
2812#endif /* CONFIG_TMPFS */
2813
2814/**
2815 * mpol_to_str - format a mempolicy structure for printing
2816 * @buffer: to contain formatted mempolicy string
2817 * @maxlen: length of @buffer
2818 * @pol: pointer to mempolicy to be formatted
2819 *
2820 * Convert @pol into a string. If @buffer is too short, truncate the string.
2821 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2822 * longest flag, "relative", and to display at least a few node ids.
2823 */
2824void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2825{
2826 char *p = buffer;
2827 nodemask_t nodes = NODE_MASK_NONE;
2828 unsigned short mode = MPOL_DEFAULT;
2829 unsigned short flags = 0;
2830
2831 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2832 mode = pol->mode;
2833 flags = pol->flags;
2834 }
2835
2836 switch (mode) {
2837 case MPOL_DEFAULT:
2838 break;
2839 case MPOL_PREFERRED:
2840 if (flags & MPOL_F_LOCAL)
2841 mode = MPOL_LOCAL;
2842 else
2843 node_set(pol->v.preferred_node, nodes);
2844 break;
2845 case MPOL_BIND:
2846 case MPOL_INTERLEAVE:
2847 nodes = pol->v.nodes;
2848 break;
2849 default:
2850 WARN_ON_ONCE(1);
2851 snprintf(p, maxlen, "unknown");
2852 return;
2853 }
2854
2855 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2856
2857 if (flags & MPOL_MODE_FLAGS) {
2858 p += snprintf(p, buffer + maxlen - p, "=");
2859
2860 /*
2861 * Currently, the only defined flags are mutually exclusive
2862 */
2863 if (flags & MPOL_F_STATIC_NODES)
2864 p += snprintf(p, buffer + maxlen - p, "static");
2865 else if (flags & MPOL_F_RELATIVE_NODES)
2866 p += snprintf(p, buffer + maxlen - p, "relative");
2867 }
2868
2869 if (!nodes_empty(nodes)) {
2870 p += snprintf(p, buffer + maxlen - p, ":");
2871 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2872 }
2873}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * preferred many Try a set of nodes first before normal fallback. This is
35 * similar to preferred without the special case.
36 *
37 * default Allocate on the local node first, or when on a VMA
38 * use the process policy. This is what Linux always did
39 * in a NUMA aware kernel and still does by, ahem, default.
40 *
41 * The process policy is applied for most non interrupt memory allocations
42 * in that process' context. Interrupts ignore the policies and always
43 * try to allocate on the local CPU. The VMA policy is only applied for memory
44 * allocations for a VMA in the VM.
45 *
46 * Currently there are a few corner cases in swapping where the policy
47 * is not applied, but the majority should be handled. When process policy
48 * is used it is not remembered over swap outs/swap ins.
49 *
50 * Only the highest zone in the zone hierarchy gets policied. Allocations
51 * requesting a lower zone just use default policy. This implies that
52 * on systems with highmem kernel lowmem allocation don't get policied.
53 * Same with GFP_DMA allocations.
54 *
55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56 * all users and remembered even when nobody has memory mapped.
57 */
58
59/* Notebook:
60 fix mmap readahead to honour policy and enable policy for any page cache
61 object
62 statistics for bigpages
63 global policy for page cache? currently it uses process policy. Requires
64 first item above.
65 handle mremap for shared memory (currently ignored for the policy)
66 grows down?
67 make bind policy root only? It can trigger oom much faster and the
68 kernel is not always grateful with that.
69*/
70
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73#include <linux/mempolicy.h>
74#include <linux/pagewalk.h>
75#include <linux/highmem.h>
76#include <linux/hugetlb.h>
77#include <linux/kernel.h>
78#include <linux/sched.h>
79#include <linux/sched/mm.h>
80#include <linux/sched/numa_balancing.h>
81#include <linux/sched/task.h>
82#include <linux/nodemask.h>
83#include <linux/cpuset.h>
84#include <linux/slab.h>
85#include <linux/string.h>
86#include <linux/export.h>
87#include <linux/nsproxy.h>
88#include <linux/interrupt.h>
89#include <linux/init.h>
90#include <linux/compat.h>
91#include <linux/ptrace.h>
92#include <linux/swap.h>
93#include <linux/seq_file.h>
94#include <linux/proc_fs.h>
95#include <linux/migrate.h>
96#include <linux/ksm.h>
97#include <linux/rmap.h>
98#include <linux/security.h>
99#include <linux/syscalls.h>
100#include <linux/ctype.h>
101#include <linux/mm_inline.h>
102#include <linux/mmu_notifier.h>
103#include <linux/printk.h>
104#include <linux/swapops.h>
105
106#include <asm/tlbflush.h>
107#include <asm/tlb.h>
108#include <linux/uaccess.h>
109
110#include "internal.h"
111
112/* Internal flags */
113#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
114#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
115
116static struct kmem_cache *policy_cache;
117static struct kmem_cache *sn_cache;
118
119/* Highest zone. An specific allocation for a zone below that is not
120 policied. */
121enum zone_type policy_zone = 0;
122
123/*
124 * run-time system-wide default policy => local allocation
125 */
126static struct mempolicy default_policy = {
127 .refcnt = ATOMIC_INIT(1), /* never free it */
128 .mode = MPOL_LOCAL,
129};
130
131static struct mempolicy preferred_node_policy[MAX_NUMNODES];
132
133/**
134 * numa_map_to_online_node - Find closest online node
135 * @node: Node id to start the search
136 *
137 * Lookup the next closest node by distance if @nid is not online.
138 *
139 * Return: this @node if it is online, otherwise the closest node by distance
140 */
141int numa_map_to_online_node(int node)
142{
143 int min_dist = INT_MAX, dist, n, min_node;
144
145 if (node == NUMA_NO_NODE || node_online(node))
146 return node;
147
148 min_node = node;
149 for_each_online_node(n) {
150 dist = node_distance(node, n);
151 if (dist < min_dist) {
152 min_dist = dist;
153 min_node = n;
154 }
155 }
156
157 return min_node;
158}
159EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160
161struct mempolicy *get_task_policy(struct task_struct *p)
162{
163 struct mempolicy *pol = p->mempolicy;
164 int node;
165
166 if (pol)
167 return pol;
168
169 node = numa_node_id();
170 if (node != NUMA_NO_NODE) {
171 pol = &preferred_node_policy[node];
172 /* preferred_node_policy is not initialised early in boot */
173 if (pol->mode)
174 return pol;
175 }
176
177 return &default_policy;
178}
179
180static const struct mempolicy_operations {
181 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
183} mpol_ops[MPOL_MAX];
184
185static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186{
187 return pol->flags & MPOL_MODE_FLAGS;
188}
189
190static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
191 const nodemask_t *rel)
192{
193 nodemask_t tmp;
194 nodes_fold(tmp, *orig, nodes_weight(*rel));
195 nodes_onto(*ret, tmp, *rel);
196}
197
198static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
199{
200 if (nodes_empty(*nodes))
201 return -EINVAL;
202 pol->nodes = *nodes;
203 return 0;
204}
205
206static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
207{
208 if (nodes_empty(*nodes))
209 return -EINVAL;
210
211 nodes_clear(pol->nodes);
212 node_set(first_node(*nodes), pol->nodes);
213 return 0;
214}
215
216/*
217 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
218 * any, for the new policy. mpol_new() has already validated the nodes
219 * parameter with respect to the policy mode and flags.
220 *
221 * Must be called holding task's alloc_lock to protect task's mems_allowed
222 * and mempolicy. May also be called holding the mmap_lock for write.
223 */
224static int mpol_set_nodemask(struct mempolicy *pol,
225 const nodemask_t *nodes, struct nodemask_scratch *nsc)
226{
227 int ret;
228
229 /*
230 * Default (pol==NULL) resp. local memory policies are not a
231 * subject of any remapping. They also do not need any special
232 * constructor.
233 */
234 if (!pol || pol->mode == MPOL_LOCAL)
235 return 0;
236
237 /* Check N_MEMORY */
238 nodes_and(nsc->mask1,
239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
240
241 VM_BUG_ON(!nodes);
242
243 if (pol->flags & MPOL_F_RELATIVE_NODES)
244 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
245 else
246 nodes_and(nsc->mask2, *nodes, nsc->mask1);
247
248 if (mpol_store_user_nodemask(pol))
249 pol->w.user_nodemask = *nodes;
250 else
251 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
252
253 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
254 return ret;
255}
256
257/*
258 * This function just creates a new policy, does some check and simple
259 * initialization. You must invoke mpol_set_nodemask() to set nodes.
260 */
261static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262 nodemask_t *nodes)
263{
264 struct mempolicy *policy;
265
266 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
267 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268
269 if (mode == MPOL_DEFAULT) {
270 if (nodes && !nodes_empty(*nodes))
271 return ERR_PTR(-EINVAL);
272 return NULL;
273 }
274 VM_BUG_ON(!nodes);
275
276 /*
277 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
278 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
279 * All other modes require a valid pointer to a non-empty nodemask.
280 */
281 if (mode == MPOL_PREFERRED) {
282 if (nodes_empty(*nodes)) {
283 if (((flags & MPOL_F_STATIC_NODES) ||
284 (flags & MPOL_F_RELATIVE_NODES)))
285 return ERR_PTR(-EINVAL);
286
287 mode = MPOL_LOCAL;
288 }
289 } else if (mode == MPOL_LOCAL) {
290 if (!nodes_empty(*nodes) ||
291 (flags & MPOL_F_STATIC_NODES) ||
292 (flags & MPOL_F_RELATIVE_NODES))
293 return ERR_PTR(-EINVAL);
294 } else if (nodes_empty(*nodes))
295 return ERR_PTR(-EINVAL);
296 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
297 if (!policy)
298 return ERR_PTR(-ENOMEM);
299 atomic_set(&policy->refcnt, 1);
300 policy->mode = mode;
301 policy->flags = flags;
302 policy->home_node = NUMA_NO_NODE;
303
304 return policy;
305}
306
307/* Slow path of a mpol destructor. */
308void __mpol_put(struct mempolicy *p)
309{
310 if (!atomic_dec_and_test(&p->refcnt))
311 return;
312 kmem_cache_free(policy_cache, p);
313}
314
315static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
316{
317}
318
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
320{
321 nodemask_t tmp;
322
323 if (pol->flags & MPOL_F_STATIC_NODES)
324 nodes_and(tmp, pol->w.user_nodemask, *nodes);
325 else if (pol->flags & MPOL_F_RELATIVE_NODES)
326 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
327 else {
328 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329 *nodes);
330 pol->w.cpuset_mems_allowed = *nodes;
331 }
332
333 if (nodes_empty(tmp))
334 tmp = *nodes;
335
336 pol->nodes = tmp;
337}
338
339static void mpol_rebind_preferred(struct mempolicy *pol,
340 const nodemask_t *nodes)
341{
342 pol->w.cpuset_mems_allowed = *nodes;
343}
344
345/*
346 * mpol_rebind_policy - Migrate a policy to a different set of nodes
347 *
348 * Per-vma policies are protected by mmap_lock. Allocations using per-task
349 * policies are protected by task->mems_allowed_seq to prevent a premature
350 * OOM/allocation failure due to parallel nodemask modification.
351 */
352static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
353{
354 if (!pol || pol->mode == MPOL_LOCAL)
355 return;
356 if (!mpol_store_user_nodemask(pol) &&
357 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
358 return;
359
360 mpol_ops[pol->mode].rebind(pol, newmask);
361}
362
363/*
364 * Wrapper for mpol_rebind_policy() that just requires task
365 * pointer, and updates task mempolicy.
366 *
367 * Called with task's alloc_lock held.
368 */
369
370void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
371{
372 mpol_rebind_policy(tsk->mempolicy, new);
373}
374
375/*
376 * Rebind each vma in mm to new nodemask.
377 *
378 * Call holding a reference to mm. Takes mm->mmap_lock during call.
379 */
380
381void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
382{
383 struct vm_area_struct *vma;
384 VMA_ITERATOR(vmi, mm, 0);
385
386 mmap_write_lock(mm);
387 for_each_vma(vmi, vma)
388 mpol_rebind_policy(vma->vm_policy, new);
389 mmap_write_unlock(mm);
390}
391
392static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
393 [MPOL_DEFAULT] = {
394 .rebind = mpol_rebind_default,
395 },
396 [MPOL_INTERLEAVE] = {
397 .create = mpol_new_nodemask,
398 .rebind = mpol_rebind_nodemask,
399 },
400 [MPOL_PREFERRED] = {
401 .create = mpol_new_preferred,
402 .rebind = mpol_rebind_preferred,
403 },
404 [MPOL_BIND] = {
405 .create = mpol_new_nodemask,
406 .rebind = mpol_rebind_nodemask,
407 },
408 [MPOL_LOCAL] = {
409 .rebind = mpol_rebind_default,
410 },
411 [MPOL_PREFERRED_MANY] = {
412 .create = mpol_new_nodemask,
413 .rebind = mpol_rebind_preferred,
414 },
415};
416
417static int migrate_page_add(struct page *page, struct list_head *pagelist,
418 unsigned long flags);
419
420struct queue_pages {
421 struct list_head *pagelist;
422 unsigned long flags;
423 nodemask_t *nmask;
424 unsigned long start;
425 unsigned long end;
426 struct vm_area_struct *first;
427};
428
429/*
430 * Check if the page's nid is in qp->nmask.
431 *
432 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
433 * in the invert of qp->nmask.
434 */
435static inline bool queue_pages_required(struct page *page,
436 struct queue_pages *qp)
437{
438 int nid = page_to_nid(page);
439 unsigned long flags = qp->flags;
440
441 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
442}
443
444/*
445 * queue_pages_pmd() has three possible return values:
446 * 0 - pages are placed on the right node or queued successfully, or
447 * special page is met, i.e. huge zero page.
448 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
449 * specified.
450 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
451 * existing page was already on a node that does not follow the
452 * policy.
453 */
454static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
455 unsigned long end, struct mm_walk *walk)
456 __releases(ptl)
457{
458 int ret = 0;
459 struct page *page;
460 struct queue_pages *qp = walk->private;
461 unsigned long flags;
462
463 if (unlikely(is_pmd_migration_entry(*pmd))) {
464 ret = -EIO;
465 goto unlock;
466 }
467 page = pmd_page(*pmd);
468 if (is_huge_zero_page(page)) {
469 walk->action = ACTION_CONTINUE;
470 goto unlock;
471 }
472 if (!queue_pages_required(page, qp))
473 goto unlock;
474
475 flags = qp->flags;
476 /* go to thp migration */
477 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478 if (!vma_migratable(walk->vma) ||
479 migrate_page_add(page, qp->pagelist, flags)) {
480 ret = 1;
481 goto unlock;
482 }
483 } else
484 ret = -EIO;
485unlock:
486 spin_unlock(ptl);
487 return ret;
488}
489
490/*
491 * Scan through pages checking if pages follow certain conditions,
492 * and move them to the pagelist if they do.
493 *
494 * queue_pages_pte_range() has three possible return values:
495 * 0 - pages are placed on the right node or queued successfully, or
496 * special page is met, i.e. zero page.
497 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
498 * specified.
499 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
500 * on a node that does not follow the policy.
501 */
502static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
503 unsigned long end, struct mm_walk *walk)
504{
505 struct vm_area_struct *vma = walk->vma;
506 struct page *page;
507 struct queue_pages *qp = walk->private;
508 unsigned long flags = qp->flags;
509 bool has_unmovable = false;
510 pte_t *pte, *mapped_pte;
511 spinlock_t *ptl;
512
513 ptl = pmd_trans_huge_lock(pmd, vma);
514 if (ptl)
515 return queue_pages_pmd(pmd, ptl, addr, end, walk);
516
517 if (pmd_trans_unstable(pmd))
518 return 0;
519
520 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
521 for (; addr != end; pte++, addr += PAGE_SIZE) {
522 if (!pte_present(*pte))
523 continue;
524 page = vm_normal_page(vma, addr, *pte);
525 if (!page || is_zone_device_page(page))
526 continue;
527 /*
528 * vm_normal_page() filters out zero pages, but there might
529 * still be PageReserved pages to skip, perhaps in a VDSO.
530 */
531 if (PageReserved(page))
532 continue;
533 if (!queue_pages_required(page, qp))
534 continue;
535 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
536 /* MPOL_MF_STRICT must be specified if we get here */
537 if (!vma_migratable(vma)) {
538 has_unmovable = true;
539 break;
540 }
541
542 /*
543 * Do not abort immediately since there may be
544 * temporary off LRU pages in the range. Still
545 * need migrate other LRU pages.
546 */
547 if (migrate_page_add(page, qp->pagelist, flags))
548 has_unmovable = true;
549 } else
550 break;
551 }
552 pte_unmap_unlock(mapped_pte, ptl);
553 cond_resched();
554
555 if (has_unmovable)
556 return 1;
557
558 return addr != end ? -EIO : 0;
559}
560
561static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
562 unsigned long addr, unsigned long end,
563 struct mm_walk *walk)
564{
565 int ret = 0;
566#ifdef CONFIG_HUGETLB_PAGE
567 struct queue_pages *qp = walk->private;
568 unsigned long flags = (qp->flags & MPOL_MF_VALID);
569 struct page *page;
570 spinlock_t *ptl;
571 pte_t entry;
572
573 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
574 entry = huge_ptep_get(pte);
575 if (!pte_present(entry))
576 goto unlock;
577 page = pte_page(entry);
578 if (!queue_pages_required(page, qp))
579 goto unlock;
580
581 if (flags == MPOL_MF_STRICT) {
582 /*
583 * STRICT alone means only detecting misplaced page and no
584 * need to further check other vma.
585 */
586 ret = -EIO;
587 goto unlock;
588 }
589
590 if (!vma_migratable(walk->vma)) {
591 /*
592 * Must be STRICT with MOVE*, otherwise .test_walk() have
593 * stopped walking current vma.
594 * Detecting misplaced page but allow migrating pages which
595 * have been queued.
596 */
597 ret = 1;
598 goto unlock;
599 }
600
601 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
602 if (flags & (MPOL_MF_MOVE_ALL) ||
603 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
604 !hugetlb_pmd_shared(pte))) {
605 if (isolate_hugetlb(page, qp->pagelist) &&
606 (flags & MPOL_MF_STRICT))
607 /*
608 * Failed to isolate page but allow migrating pages
609 * which have been queued.
610 */
611 ret = 1;
612 }
613unlock:
614 spin_unlock(ptl);
615#else
616 BUG();
617#endif
618 return ret;
619}
620
621#ifdef CONFIG_NUMA_BALANCING
622/*
623 * This is used to mark a range of virtual addresses to be inaccessible.
624 * These are later cleared by a NUMA hinting fault. Depending on these
625 * faults, pages may be migrated for better NUMA placement.
626 *
627 * This is assuming that NUMA faults are handled using PROT_NONE. If
628 * an architecture makes a different choice, it will need further
629 * changes to the core.
630 */
631unsigned long change_prot_numa(struct vm_area_struct *vma,
632 unsigned long addr, unsigned long end)
633{
634 struct mmu_gather tlb;
635 int nr_updated;
636
637 tlb_gather_mmu(&tlb, vma->vm_mm);
638
639 nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
640 MM_CP_PROT_NUMA);
641 if (nr_updated)
642 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
643
644 tlb_finish_mmu(&tlb);
645
646 return nr_updated;
647}
648#else
649static unsigned long change_prot_numa(struct vm_area_struct *vma,
650 unsigned long addr, unsigned long end)
651{
652 return 0;
653}
654#endif /* CONFIG_NUMA_BALANCING */
655
656static int queue_pages_test_walk(unsigned long start, unsigned long end,
657 struct mm_walk *walk)
658{
659 struct vm_area_struct *next, *vma = walk->vma;
660 struct queue_pages *qp = walk->private;
661 unsigned long endvma = vma->vm_end;
662 unsigned long flags = qp->flags;
663
664 /* range check first */
665 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
666
667 if (!qp->first) {
668 qp->first = vma;
669 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
670 (qp->start < vma->vm_start))
671 /* hole at head side of range */
672 return -EFAULT;
673 }
674 next = find_vma(vma->vm_mm, vma->vm_end);
675 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
676 ((vma->vm_end < qp->end) &&
677 (!next || vma->vm_end < next->vm_start)))
678 /* hole at middle or tail of range */
679 return -EFAULT;
680
681 /*
682 * Need check MPOL_MF_STRICT to return -EIO if possible
683 * regardless of vma_migratable
684 */
685 if (!vma_migratable(vma) &&
686 !(flags & MPOL_MF_STRICT))
687 return 1;
688
689 if (endvma > end)
690 endvma = end;
691
692 if (flags & MPOL_MF_LAZY) {
693 /* Similar to task_numa_work, skip inaccessible VMAs */
694 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
695 !(vma->vm_flags & VM_MIXEDMAP))
696 change_prot_numa(vma, start, endvma);
697 return 1;
698 }
699
700 /* queue pages from current vma */
701 if (flags & MPOL_MF_VALID)
702 return 0;
703 return 1;
704}
705
706static const struct mm_walk_ops queue_pages_walk_ops = {
707 .hugetlb_entry = queue_pages_hugetlb,
708 .pmd_entry = queue_pages_pte_range,
709 .test_walk = queue_pages_test_walk,
710};
711
712/*
713 * Walk through page tables and collect pages to be migrated.
714 *
715 * If pages found in a given range are on a set of nodes (determined by
716 * @nodes and @flags,) it's isolated and queued to the pagelist which is
717 * passed via @private.
718 *
719 * queue_pages_range() has three possible return values:
720 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
721 * specified.
722 * 0 - queue pages successfully or no misplaced page.
723 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
724 * memory range specified by nodemask and maxnode points outside
725 * your accessible address space (-EFAULT)
726 */
727static int
728queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
729 nodemask_t *nodes, unsigned long flags,
730 struct list_head *pagelist)
731{
732 int err;
733 struct queue_pages qp = {
734 .pagelist = pagelist,
735 .flags = flags,
736 .nmask = nodes,
737 .start = start,
738 .end = end,
739 .first = NULL,
740 };
741
742 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
743
744 if (!qp.first)
745 /* whole range in hole */
746 err = -EFAULT;
747
748 return err;
749}
750
751/*
752 * Apply policy to a single VMA
753 * This must be called with the mmap_lock held for writing.
754 */
755static int vma_replace_policy(struct vm_area_struct *vma,
756 struct mempolicy *pol)
757{
758 int err;
759 struct mempolicy *old;
760 struct mempolicy *new;
761
762 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
763 vma->vm_start, vma->vm_end, vma->vm_pgoff,
764 vma->vm_ops, vma->vm_file,
765 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
766
767 new = mpol_dup(pol);
768 if (IS_ERR(new))
769 return PTR_ERR(new);
770
771 if (vma->vm_ops && vma->vm_ops->set_policy) {
772 err = vma->vm_ops->set_policy(vma, new);
773 if (err)
774 goto err_out;
775 }
776
777 old = vma->vm_policy;
778 vma->vm_policy = new; /* protected by mmap_lock */
779 mpol_put(old);
780
781 return 0;
782 err_out:
783 mpol_put(new);
784 return err;
785}
786
787/* Step 2: apply policy to a range and do splits. */
788static int mbind_range(struct mm_struct *mm, unsigned long start,
789 unsigned long end, struct mempolicy *new_pol)
790{
791 MA_STATE(mas, &mm->mm_mt, start, start);
792 struct vm_area_struct *prev;
793 struct vm_area_struct *vma;
794 int err = 0;
795 pgoff_t pgoff;
796
797 prev = mas_prev(&mas, 0);
798 if (unlikely(!prev))
799 mas_set(&mas, start);
800
801 vma = mas_find(&mas, end - 1);
802 if (WARN_ON(!vma))
803 return 0;
804
805 if (start > vma->vm_start)
806 prev = vma;
807
808 for (; vma; vma = mas_next(&mas, end - 1)) {
809 unsigned long vmstart = max(start, vma->vm_start);
810 unsigned long vmend = min(end, vma->vm_end);
811
812 if (mpol_equal(vma_policy(vma), new_pol))
813 goto next;
814
815 pgoff = vma->vm_pgoff +
816 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
817 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
818 vma->anon_vma, vma->vm_file, pgoff,
819 new_pol, vma->vm_userfaultfd_ctx,
820 anon_vma_name(vma));
821 if (prev) {
822 /* vma_merge() invalidated the mas */
823 mas_pause(&mas);
824 vma = prev;
825 goto replace;
826 }
827 if (vma->vm_start != vmstart) {
828 err = split_vma(vma->vm_mm, vma, vmstart, 1);
829 if (err)
830 goto out;
831 /* split_vma() invalidated the mas */
832 mas_pause(&mas);
833 }
834 if (vma->vm_end != vmend) {
835 err = split_vma(vma->vm_mm, vma, vmend, 0);
836 if (err)
837 goto out;
838 /* split_vma() invalidated the mas */
839 mas_pause(&mas);
840 }
841replace:
842 err = vma_replace_policy(vma, new_pol);
843 if (err)
844 goto out;
845next:
846 prev = vma;
847 }
848
849out:
850 return err;
851}
852
853/* Set the process memory policy */
854static long do_set_mempolicy(unsigned short mode, unsigned short flags,
855 nodemask_t *nodes)
856{
857 struct mempolicy *new, *old;
858 NODEMASK_SCRATCH(scratch);
859 int ret;
860
861 if (!scratch)
862 return -ENOMEM;
863
864 new = mpol_new(mode, flags, nodes);
865 if (IS_ERR(new)) {
866 ret = PTR_ERR(new);
867 goto out;
868 }
869
870 task_lock(current);
871 ret = mpol_set_nodemask(new, nodes, scratch);
872 if (ret) {
873 task_unlock(current);
874 mpol_put(new);
875 goto out;
876 }
877
878 old = current->mempolicy;
879 current->mempolicy = new;
880 if (new && new->mode == MPOL_INTERLEAVE)
881 current->il_prev = MAX_NUMNODES-1;
882 task_unlock(current);
883 mpol_put(old);
884 ret = 0;
885out:
886 NODEMASK_SCRATCH_FREE(scratch);
887 return ret;
888}
889
890/*
891 * Return nodemask for policy for get_mempolicy() query
892 *
893 * Called with task's alloc_lock held
894 */
895static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
896{
897 nodes_clear(*nodes);
898 if (p == &default_policy)
899 return;
900
901 switch (p->mode) {
902 case MPOL_BIND:
903 case MPOL_INTERLEAVE:
904 case MPOL_PREFERRED:
905 case MPOL_PREFERRED_MANY:
906 *nodes = p->nodes;
907 break;
908 case MPOL_LOCAL:
909 /* return empty node mask for local allocation */
910 break;
911 default:
912 BUG();
913 }
914}
915
916static int lookup_node(struct mm_struct *mm, unsigned long addr)
917{
918 struct page *p = NULL;
919 int ret;
920
921 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
922 if (ret > 0) {
923 ret = page_to_nid(p);
924 put_page(p);
925 }
926 return ret;
927}
928
929/* Retrieve NUMA policy */
930static long do_get_mempolicy(int *policy, nodemask_t *nmask,
931 unsigned long addr, unsigned long flags)
932{
933 int err;
934 struct mm_struct *mm = current->mm;
935 struct vm_area_struct *vma = NULL;
936 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
937
938 if (flags &
939 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
940 return -EINVAL;
941
942 if (flags & MPOL_F_MEMS_ALLOWED) {
943 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
944 return -EINVAL;
945 *policy = 0; /* just so it's initialized */
946 task_lock(current);
947 *nmask = cpuset_current_mems_allowed;
948 task_unlock(current);
949 return 0;
950 }
951
952 if (flags & MPOL_F_ADDR) {
953 /*
954 * Do NOT fall back to task policy if the
955 * vma/shared policy at addr is NULL. We
956 * want to return MPOL_DEFAULT in this case.
957 */
958 mmap_read_lock(mm);
959 vma = vma_lookup(mm, addr);
960 if (!vma) {
961 mmap_read_unlock(mm);
962 return -EFAULT;
963 }
964 if (vma->vm_ops && vma->vm_ops->get_policy)
965 pol = vma->vm_ops->get_policy(vma, addr);
966 else
967 pol = vma->vm_policy;
968 } else if (addr)
969 return -EINVAL;
970
971 if (!pol)
972 pol = &default_policy; /* indicates default behavior */
973
974 if (flags & MPOL_F_NODE) {
975 if (flags & MPOL_F_ADDR) {
976 /*
977 * Take a refcount on the mpol, because we are about to
978 * drop the mmap_lock, after which only "pol" remains
979 * valid, "vma" is stale.
980 */
981 pol_refcount = pol;
982 vma = NULL;
983 mpol_get(pol);
984 mmap_read_unlock(mm);
985 err = lookup_node(mm, addr);
986 if (err < 0)
987 goto out;
988 *policy = err;
989 } else if (pol == current->mempolicy &&
990 pol->mode == MPOL_INTERLEAVE) {
991 *policy = next_node_in(current->il_prev, pol->nodes);
992 } else {
993 err = -EINVAL;
994 goto out;
995 }
996 } else {
997 *policy = pol == &default_policy ? MPOL_DEFAULT :
998 pol->mode;
999 /*
1000 * Internal mempolicy flags must be masked off before exposing
1001 * the policy to userspace.
1002 */
1003 *policy |= (pol->flags & MPOL_MODE_FLAGS);
1004 }
1005
1006 err = 0;
1007 if (nmask) {
1008 if (mpol_store_user_nodemask(pol)) {
1009 *nmask = pol->w.user_nodemask;
1010 } else {
1011 task_lock(current);
1012 get_policy_nodemask(pol, nmask);
1013 task_unlock(current);
1014 }
1015 }
1016
1017 out:
1018 mpol_cond_put(pol);
1019 if (vma)
1020 mmap_read_unlock(mm);
1021 if (pol_refcount)
1022 mpol_put(pol_refcount);
1023 return err;
1024}
1025
1026#ifdef CONFIG_MIGRATION
1027/*
1028 * page migration, thp tail pages can be passed.
1029 */
1030static int migrate_page_add(struct page *page, struct list_head *pagelist,
1031 unsigned long flags)
1032{
1033 struct page *head = compound_head(page);
1034 /*
1035 * Avoid migrating a page that is shared with others.
1036 */
1037 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1038 if (!isolate_lru_page(head)) {
1039 list_add_tail(&head->lru, pagelist);
1040 mod_node_page_state(page_pgdat(head),
1041 NR_ISOLATED_ANON + page_is_file_lru(head),
1042 thp_nr_pages(head));
1043 } else if (flags & MPOL_MF_STRICT) {
1044 /*
1045 * Non-movable page may reach here. And, there may be
1046 * temporary off LRU pages or non-LRU movable pages.
1047 * Treat them as unmovable pages since they can't be
1048 * isolated, so they can't be moved at the moment. It
1049 * should return -EIO for this case too.
1050 */
1051 return -EIO;
1052 }
1053 }
1054
1055 return 0;
1056}
1057
1058/*
1059 * Migrate pages from one node to a target node.
1060 * Returns error or the number of pages not migrated.
1061 */
1062static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1063 int flags)
1064{
1065 nodemask_t nmask;
1066 struct vm_area_struct *vma;
1067 LIST_HEAD(pagelist);
1068 int err = 0;
1069 struct migration_target_control mtc = {
1070 .nid = dest,
1071 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1072 };
1073
1074 nodes_clear(nmask);
1075 node_set(source, nmask);
1076
1077 /*
1078 * This does not "check" the range but isolates all pages that
1079 * need migration. Between passing in the full user address
1080 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1081 */
1082 vma = find_vma(mm, 0);
1083 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1084 queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1085 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1086
1087 if (!list_empty(&pagelist)) {
1088 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1089 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1090 if (err)
1091 putback_movable_pages(&pagelist);
1092 }
1093
1094 return err;
1095}
1096
1097/*
1098 * Move pages between the two nodesets so as to preserve the physical
1099 * layout as much as possible.
1100 *
1101 * Returns the number of page that could not be moved.
1102 */
1103int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1104 const nodemask_t *to, int flags)
1105{
1106 int busy = 0;
1107 int err = 0;
1108 nodemask_t tmp;
1109
1110 lru_cache_disable();
1111
1112 mmap_read_lock(mm);
1113
1114 /*
1115 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1116 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1117 * bit in 'tmp', and return that <source, dest> pair for migration.
1118 * The pair of nodemasks 'to' and 'from' define the map.
1119 *
1120 * If no pair of bits is found that way, fallback to picking some
1121 * pair of 'source' and 'dest' bits that are not the same. If the
1122 * 'source' and 'dest' bits are the same, this represents a node
1123 * that will be migrating to itself, so no pages need move.
1124 *
1125 * If no bits are left in 'tmp', or if all remaining bits left
1126 * in 'tmp' correspond to the same bit in 'to', return false
1127 * (nothing left to migrate).
1128 *
1129 * This lets us pick a pair of nodes to migrate between, such that
1130 * if possible the dest node is not already occupied by some other
1131 * source node, minimizing the risk of overloading the memory on a
1132 * node that would happen if we migrated incoming memory to a node
1133 * before migrating outgoing memory source that same node.
1134 *
1135 * A single scan of tmp is sufficient. As we go, we remember the
1136 * most recent <s, d> pair that moved (s != d). If we find a pair
1137 * that not only moved, but what's better, moved to an empty slot
1138 * (d is not set in tmp), then we break out then, with that pair.
1139 * Otherwise when we finish scanning from_tmp, we at least have the
1140 * most recent <s, d> pair that moved. If we get all the way through
1141 * the scan of tmp without finding any node that moved, much less
1142 * moved to an empty node, then there is nothing left worth migrating.
1143 */
1144
1145 tmp = *from;
1146 while (!nodes_empty(tmp)) {
1147 int s, d;
1148 int source = NUMA_NO_NODE;
1149 int dest = 0;
1150
1151 for_each_node_mask(s, tmp) {
1152
1153 /*
1154 * do_migrate_pages() tries to maintain the relative
1155 * node relationship of the pages established between
1156 * threads and memory areas.
1157 *
1158 * However if the number of source nodes is not equal to
1159 * the number of destination nodes we can not preserve
1160 * this node relative relationship. In that case, skip
1161 * copying memory from a node that is in the destination
1162 * mask.
1163 *
1164 * Example: [2,3,4] -> [3,4,5] moves everything.
1165 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1166 */
1167
1168 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1169 (node_isset(s, *to)))
1170 continue;
1171
1172 d = node_remap(s, *from, *to);
1173 if (s == d)
1174 continue;
1175
1176 source = s; /* Node moved. Memorize */
1177 dest = d;
1178
1179 /* dest not in remaining from nodes? */
1180 if (!node_isset(dest, tmp))
1181 break;
1182 }
1183 if (source == NUMA_NO_NODE)
1184 break;
1185
1186 node_clear(source, tmp);
1187 err = migrate_to_node(mm, source, dest, flags);
1188 if (err > 0)
1189 busy += err;
1190 if (err < 0)
1191 break;
1192 }
1193 mmap_read_unlock(mm);
1194
1195 lru_cache_enable();
1196 if (err < 0)
1197 return err;
1198 return busy;
1199
1200}
1201
1202/*
1203 * Allocate a new page for page migration based on vma policy.
1204 * Start by assuming the page is mapped by the same vma as contains @start.
1205 * Search forward from there, if not. N.B., this assumes that the
1206 * list of pages handed to migrate_pages()--which is how we get here--
1207 * is in virtual address order.
1208 */
1209static struct page *new_page(struct page *page, unsigned long start)
1210{
1211 struct folio *dst, *src = page_folio(page);
1212 struct vm_area_struct *vma;
1213 unsigned long address;
1214 VMA_ITERATOR(vmi, current->mm, start);
1215 gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
1216
1217 for_each_vma(vmi, vma) {
1218 address = page_address_in_vma(page, vma);
1219 if (address != -EFAULT)
1220 break;
1221 }
1222
1223 if (folio_test_hugetlb(src))
1224 return alloc_huge_page_vma(page_hstate(&src->page),
1225 vma, address);
1226
1227 if (folio_test_large(src))
1228 gfp = GFP_TRANSHUGE;
1229
1230 /*
1231 * if !vma, vma_alloc_folio() will use task or system default policy
1232 */
1233 dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
1234 folio_test_large(src));
1235 return &dst->page;
1236}
1237#else
1238
1239static int migrate_page_add(struct page *page, struct list_head *pagelist,
1240 unsigned long flags)
1241{
1242 return -EIO;
1243}
1244
1245int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1246 const nodemask_t *to, int flags)
1247{
1248 return -ENOSYS;
1249}
1250
1251static struct page *new_page(struct page *page, unsigned long start)
1252{
1253 return NULL;
1254}
1255#endif
1256
1257static long do_mbind(unsigned long start, unsigned long len,
1258 unsigned short mode, unsigned short mode_flags,
1259 nodemask_t *nmask, unsigned long flags)
1260{
1261 struct mm_struct *mm = current->mm;
1262 struct mempolicy *new;
1263 unsigned long end;
1264 int err;
1265 int ret;
1266 LIST_HEAD(pagelist);
1267
1268 if (flags & ~(unsigned long)MPOL_MF_VALID)
1269 return -EINVAL;
1270 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1271 return -EPERM;
1272
1273 if (start & ~PAGE_MASK)
1274 return -EINVAL;
1275
1276 if (mode == MPOL_DEFAULT)
1277 flags &= ~MPOL_MF_STRICT;
1278
1279 len = PAGE_ALIGN(len);
1280 end = start + len;
1281
1282 if (end < start)
1283 return -EINVAL;
1284 if (end == start)
1285 return 0;
1286
1287 new = mpol_new(mode, mode_flags, nmask);
1288 if (IS_ERR(new))
1289 return PTR_ERR(new);
1290
1291 if (flags & MPOL_MF_LAZY)
1292 new->flags |= MPOL_F_MOF;
1293
1294 /*
1295 * If we are using the default policy then operation
1296 * on discontinuous address spaces is okay after all
1297 */
1298 if (!new)
1299 flags |= MPOL_MF_DISCONTIG_OK;
1300
1301 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1302 start, start + len, mode, mode_flags,
1303 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1304
1305 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1306
1307 lru_cache_disable();
1308 }
1309 {
1310 NODEMASK_SCRATCH(scratch);
1311 if (scratch) {
1312 mmap_write_lock(mm);
1313 err = mpol_set_nodemask(new, nmask, scratch);
1314 if (err)
1315 mmap_write_unlock(mm);
1316 } else
1317 err = -ENOMEM;
1318 NODEMASK_SCRATCH_FREE(scratch);
1319 }
1320 if (err)
1321 goto mpol_out;
1322
1323 ret = queue_pages_range(mm, start, end, nmask,
1324 flags | MPOL_MF_INVERT, &pagelist);
1325
1326 if (ret < 0) {
1327 err = ret;
1328 goto up_out;
1329 }
1330
1331 err = mbind_range(mm, start, end, new);
1332
1333 if (!err) {
1334 int nr_failed = 0;
1335
1336 if (!list_empty(&pagelist)) {
1337 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1338 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1339 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1340 if (nr_failed)
1341 putback_movable_pages(&pagelist);
1342 }
1343
1344 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1345 err = -EIO;
1346 } else {
1347up_out:
1348 if (!list_empty(&pagelist))
1349 putback_movable_pages(&pagelist);
1350 }
1351
1352 mmap_write_unlock(mm);
1353mpol_out:
1354 mpol_put(new);
1355 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1356 lru_cache_enable();
1357 return err;
1358}
1359
1360/*
1361 * User space interface with variable sized bitmaps for nodelists.
1362 */
1363static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1364 unsigned long maxnode)
1365{
1366 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1367 int ret;
1368
1369 if (in_compat_syscall())
1370 ret = compat_get_bitmap(mask,
1371 (const compat_ulong_t __user *)nmask,
1372 maxnode);
1373 else
1374 ret = copy_from_user(mask, nmask,
1375 nlongs * sizeof(unsigned long));
1376
1377 if (ret)
1378 return -EFAULT;
1379
1380 if (maxnode % BITS_PER_LONG)
1381 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1382
1383 return 0;
1384}
1385
1386/* Copy a node mask from user space. */
1387static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1388 unsigned long maxnode)
1389{
1390 --maxnode;
1391 nodes_clear(*nodes);
1392 if (maxnode == 0 || !nmask)
1393 return 0;
1394 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1395 return -EINVAL;
1396
1397 /*
1398 * When the user specified more nodes than supported just check
1399 * if the non supported part is all zero, one word at a time,
1400 * starting at the end.
1401 */
1402 while (maxnode > MAX_NUMNODES) {
1403 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1404 unsigned long t;
1405
1406 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1407 return -EFAULT;
1408
1409 if (maxnode - bits >= MAX_NUMNODES) {
1410 maxnode -= bits;
1411 } else {
1412 maxnode = MAX_NUMNODES;
1413 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1414 }
1415 if (t)
1416 return -EINVAL;
1417 }
1418
1419 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1420}
1421
1422/* Copy a kernel node mask to user space */
1423static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1424 nodemask_t *nodes)
1425{
1426 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1427 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1428 bool compat = in_compat_syscall();
1429
1430 if (compat)
1431 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1432
1433 if (copy > nbytes) {
1434 if (copy > PAGE_SIZE)
1435 return -EINVAL;
1436 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1437 return -EFAULT;
1438 copy = nbytes;
1439 maxnode = nr_node_ids;
1440 }
1441
1442 if (compat)
1443 return compat_put_bitmap((compat_ulong_t __user *)mask,
1444 nodes_addr(*nodes), maxnode);
1445
1446 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1447}
1448
1449/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1450static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1451{
1452 *flags = *mode & MPOL_MODE_FLAGS;
1453 *mode &= ~MPOL_MODE_FLAGS;
1454
1455 if ((unsigned int)(*mode) >= MPOL_MAX)
1456 return -EINVAL;
1457 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1458 return -EINVAL;
1459 if (*flags & MPOL_F_NUMA_BALANCING) {
1460 if (*mode != MPOL_BIND)
1461 return -EINVAL;
1462 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1463 }
1464 return 0;
1465}
1466
1467static long kernel_mbind(unsigned long start, unsigned long len,
1468 unsigned long mode, const unsigned long __user *nmask,
1469 unsigned long maxnode, unsigned int flags)
1470{
1471 unsigned short mode_flags;
1472 nodemask_t nodes;
1473 int lmode = mode;
1474 int err;
1475
1476 start = untagged_addr(start);
1477 err = sanitize_mpol_flags(&lmode, &mode_flags);
1478 if (err)
1479 return err;
1480
1481 err = get_nodes(&nodes, nmask, maxnode);
1482 if (err)
1483 return err;
1484
1485 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1486}
1487
1488SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1489 unsigned long, home_node, unsigned long, flags)
1490{
1491 struct mm_struct *mm = current->mm;
1492 struct vm_area_struct *vma;
1493 struct mempolicy *new;
1494 unsigned long vmstart;
1495 unsigned long vmend;
1496 unsigned long end;
1497 int err = -ENOENT;
1498 VMA_ITERATOR(vmi, mm, start);
1499
1500 start = untagged_addr(start);
1501 if (start & ~PAGE_MASK)
1502 return -EINVAL;
1503 /*
1504 * flags is used for future extension if any.
1505 */
1506 if (flags != 0)
1507 return -EINVAL;
1508
1509 /*
1510 * Check home_node is online to avoid accessing uninitialized
1511 * NODE_DATA.
1512 */
1513 if (home_node >= MAX_NUMNODES || !node_online(home_node))
1514 return -EINVAL;
1515
1516 len = PAGE_ALIGN(len);
1517 end = start + len;
1518
1519 if (end < start)
1520 return -EINVAL;
1521 if (end == start)
1522 return 0;
1523 mmap_write_lock(mm);
1524 for_each_vma_range(vmi, vma, end) {
1525 vmstart = max(start, vma->vm_start);
1526 vmend = min(end, vma->vm_end);
1527 new = mpol_dup(vma_policy(vma));
1528 if (IS_ERR(new)) {
1529 err = PTR_ERR(new);
1530 break;
1531 }
1532 /*
1533 * Only update home node if there is an existing vma policy
1534 */
1535 if (!new)
1536 continue;
1537
1538 /*
1539 * If any vma in the range got policy other than MPOL_BIND
1540 * or MPOL_PREFERRED_MANY we return error. We don't reset
1541 * the home node for vmas we already updated before.
1542 */
1543 if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
1544 mpol_put(new);
1545 err = -EOPNOTSUPP;
1546 break;
1547 }
1548
1549 new->home_node = home_node;
1550 err = mbind_range(mm, vmstart, vmend, new);
1551 mpol_put(new);
1552 if (err)
1553 break;
1554 }
1555 mmap_write_unlock(mm);
1556 return err;
1557}
1558
1559SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1560 unsigned long, mode, const unsigned long __user *, nmask,
1561 unsigned long, maxnode, unsigned int, flags)
1562{
1563 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1564}
1565
1566/* Set the process memory policy */
1567static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1568 unsigned long maxnode)
1569{
1570 unsigned short mode_flags;
1571 nodemask_t nodes;
1572 int lmode = mode;
1573 int err;
1574
1575 err = sanitize_mpol_flags(&lmode, &mode_flags);
1576 if (err)
1577 return err;
1578
1579 err = get_nodes(&nodes, nmask, maxnode);
1580 if (err)
1581 return err;
1582
1583 return do_set_mempolicy(lmode, mode_flags, &nodes);
1584}
1585
1586SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1587 unsigned long, maxnode)
1588{
1589 return kernel_set_mempolicy(mode, nmask, maxnode);
1590}
1591
1592static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1593 const unsigned long __user *old_nodes,
1594 const unsigned long __user *new_nodes)
1595{
1596 struct mm_struct *mm = NULL;
1597 struct task_struct *task;
1598 nodemask_t task_nodes;
1599 int err;
1600 nodemask_t *old;
1601 nodemask_t *new;
1602 NODEMASK_SCRATCH(scratch);
1603
1604 if (!scratch)
1605 return -ENOMEM;
1606
1607 old = &scratch->mask1;
1608 new = &scratch->mask2;
1609
1610 err = get_nodes(old, old_nodes, maxnode);
1611 if (err)
1612 goto out;
1613
1614 err = get_nodes(new, new_nodes, maxnode);
1615 if (err)
1616 goto out;
1617
1618 /* Find the mm_struct */
1619 rcu_read_lock();
1620 task = pid ? find_task_by_vpid(pid) : current;
1621 if (!task) {
1622 rcu_read_unlock();
1623 err = -ESRCH;
1624 goto out;
1625 }
1626 get_task_struct(task);
1627
1628 err = -EINVAL;
1629
1630 /*
1631 * Check if this process has the right to modify the specified process.
1632 * Use the regular "ptrace_may_access()" checks.
1633 */
1634 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1635 rcu_read_unlock();
1636 err = -EPERM;
1637 goto out_put;
1638 }
1639 rcu_read_unlock();
1640
1641 task_nodes = cpuset_mems_allowed(task);
1642 /* Is the user allowed to access the target nodes? */
1643 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1644 err = -EPERM;
1645 goto out_put;
1646 }
1647
1648 task_nodes = cpuset_mems_allowed(current);
1649 nodes_and(*new, *new, task_nodes);
1650 if (nodes_empty(*new))
1651 goto out_put;
1652
1653 err = security_task_movememory(task);
1654 if (err)
1655 goto out_put;
1656
1657 mm = get_task_mm(task);
1658 put_task_struct(task);
1659
1660 if (!mm) {
1661 err = -EINVAL;
1662 goto out;
1663 }
1664
1665 err = do_migrate_pages(mm, old, new,
1666 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1667
1668 mmput(mm);
1669out:
1670 NODEMASK_SCRATCH_FREE(scratch);
1671
1672 return err;
1673
1674out_put:
1675 put_task_struct(task);
1676 goto out;
1677
1678}
1679
1680SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1681 const unsigned long __user *, old_nodes,
1682 const unsigned long __user *, new_nodes)
1683{
1684 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1685}
1686
1687
1688/* Retrieve NUMA policy */
1689static int kernel_get_mempolicy(int __user *policy,
1690 unsigned long __user *nmask,
1691 unsigned long maxnode,
1692 unsigned long addr,
1693 unsigned long flags)
1694{
1695 int err;
1696 int pval;
1697 nodemask_t nodes;
1698
1699 if (nmask != NULL && maxnode < nr_node_ids)
1700 return -EINVAL;
1701
1702 addr = untagged_addr(addr);
1703
1704 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1705
1706 if (err)
1707 return err;
1708
1709 if (policy && put_user(pval, policy))
1710 return -EFAULT;
1711
1712 if (nmask)
1713 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1714
1715 return err;
1716}
1717
1718SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1719 unsigned long __user *, nmask, unsigned long, maxnode,
1720 unsigned long, addr, unsigned long, flags)
1721{
1722 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1723}
1724
1725bool vma_migratable(struct vm_area_struct *vma)
1726{
1727 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1728 return false;
1729
1730 /*
1731 * DAX device mappings require predictable access latency, so avoid
1732 * incurring periodic faults.
1733 */
1734 if (vma_is_dax(vma))
1735 return false;
1736
1737 if (is_vm_hugetlb_page(vma) &&
1738 !hugepage_migration_supported(hstate_vma(vma)))
1739 return false;
1740
1741 /*
1742 * Migration allocates pages in the highest zone. If we cannot
1743 * do so then migration (at least from node to node) is not
1744 * possible.
1745 */
1746 if (vma->vm_file &&
1747 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1748 < policy_zone)
1749 return false;
1750 return true;
1751}
1752
1753struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1754 unsigned long addr)
1755{
1756 struct mempolicy *pol = NULL;
1757
1758 if (vma) {
1759 if (vma->vm_ops && vma->vm_ops->get_policy) {
1760 pol = vma->vm_ops->get_policy(vma, addr);
1761 } else if (vma->vm_policy) {
1762 pol = vma->vm_policy;
1763
1764 /*
1765 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1766 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1767 * count on these policies which will be dropped by
1768 * mpol_cond_put() later
1769 */
1770 if (mpol_needs_cond_ref(pol))
1771 mpol_get(pol);
1772 }
1773 }
1774
1775 return pol;
1776}
1777
1778/*
1779 * get_vma_policy(@vma, @addr)
1780 * @vma: virtual memory area whose policy is sought
1781 * @addr: address in @vma for shared policy lookup
1782 *
1783 * Returns effective policy for a VMA at specified address.
1784 * Falls back to current->mempolicy or system default policy, as necessary.
1785 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1786 * count--added by the get_policy() vm_op, as appropriate--to protect against
1787 * freeing by another task. It is the caller's responsibility to free the
1788 * extra reference for shared policies.
1789 */
1790static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1791 unsigned long addr)
1792{
1793 struct mempolicy *pol = __get_vma_policy(vma, addr);
1794
1795 if (!pol)
1796 pol = get_task_policy(current);
1797
1798 return pol;
1799}
1800
1801bool vma_policy_mof(struct vm_area_struct *vma)
1802{
1803 struct mempolicy *pol;
1804
1805 if (vma->vm_ops && vma->vm_ops->get_policy) {
1806 bool ret = false;
1807
1808 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1809 if (pol && (pol->flags & MPOL_F_MOF))
1810 ret = true;
1811 mpol_cond_put(pol);
1812
1813 return ret;
1814 }
1815
1816 pol = vma->vm_policy;
1817 if (!pol)
1818 pol = get_task_policy(current);
1819
1820 return pol->flags & MPOL_F_MOF;
1821}
1822
1823bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1824{
1825 enum zone_type dynamic_policy_zone = policy_zone;
1826
1827 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1828
1829 /*
1830 * if policy->nodes has movable memory only,
1831 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1832 *
1833 * policy->nodes is intersect with node_states[N_MEMORY].
1834 * so if the following test fails, it implies
1835 * policy->nodes has movable memory only.
1836 */
1837 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1838 dynamic_policy_zone = ZONE_MOVABLE;
1839
1840 return zone >= dynamic_policy_zone;
1841}
1842
1843/*
1844 * Return a nodemask representing a mempolicy for filtering nodes for
1845 * page allocation
1846 */
1847nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1848{
1849 int mode = policy->mode;
1850
1851 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1852 if (unlikely(mode == MPOL_BIND) &&
1853 apply_policy_zone(policy, gfp_zone(gfp)) &&
1854 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1855 return &policy->nodes;
1856
1857 if (mode == MPOL_PREFERRED_MANY)
1858 return &policy->nodes;
1859
1860 return NULL;
1861}
1862
1863/*
1864 * Return the preferred node id for 'prefer' mempolicy, and return
1865 * the given id for all other policies.
1866 *
1867 * policy_node() is always coupled with policy_nodemask(), which
1868 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1869 */
1870static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1871{
1872 if (policy->mode == MPOL_PREFERRED) {
1873 nd = first_node(policy->nodes);
1874 } else {
1875 /*
1876 * __GFP_THISNODE shouldn't even be used with the bind policy
1877 * because we might easily break the expectation to stay on the
1878 * requested node and not break the policy.
1879 */
1880 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1881 }
1882
1883 if ((policy->mode == MPOL_BIND ||
1884 policy->mode == MPOL_PREFERRED_MANY) &&
1885 policy->home_node != NUMA_NO_NODE)
1886 return policy->home_node;
1887
1888 return nd;
1889}
1890
1891/* Do dynamic interleaving for a process */
1892static unsigned interleave_nodes(struct mempolicy *policy)
1893{
1894 unsigned next;
1895 struct task_struct *me = current;
1896
1897 next = next_node_in(me->il_prev, policy->nodes);
1898 if (next < MAX_NUMNODES)
1899 me->il_prev = next;
1900 return next;
1901}
1902
1903/*
1904 * Depending on the memory policy provide a node from which to allocate the
1905 * next slab entry.
1906 */
1907unsigned int mempolicy_slab_node(void)
1908{
1909 struct mempolicy *policy;
1910 int node = numa_mem_id();
1911
1912 if (!in_task())
1913 return node;
1914
1915 policy = current->mempolicy;
1916 if (!policy)
1917 return node;
1918
1919 switch (policy->mode) {
1920 case MPOL_PREFERRED:
1921 return first_node(policy->nodes);
1922
1923 case MPOL_INTERLEAVE:
1924 return interleave_nodes(policy);
1925
1926 case MPOL_BIND:
1927 case MPOL_PREFERRED_MANY:
1928 {
1929 struct zoneref *z;
1930
1931 /*
1932 * Follow bind policy behavior and start allocation at the
1933 * first node.
1934 */
1935 struct zonelist *zonelist;
1936 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1937 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1938 z = first_zones_zonelist(zonelist, highest_zoneidx,
1939 &policy->nodes);
1940 return z->zone ? zone_to_nid(z->zone) : node;
1941 }
1942 case MPOL_LOCAL:
1943 return node;
1944
1945 default:
1946 BUG();
1947 }
1948}
1949
1950/*
1951 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1952 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1953 * number of present nodes.
1954 */
1955static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1956{
1957 nodemask_t nodemask = pol->nodes;
1958 unsigned int target, nnodes;
1959 int i;
1960 int nid;
1961 /*
1962 * The barrier will stabilize the nodemask in a register or on
1963 * the stack so that it will stop changing under the code.
1964 *
1965 * Between first_node() and next_node(), pol->nodes could be changed
1966 * by other threads. So we put pol->nodes in a local stack.
1967 */
1968 barrier();
1969
1970 nnodes = nodes_weight(nodemask);
1971 if (!nnodes)
1972 return numa_node_id();
1973 target = (unsigned int)n % nnodes;
1974 nid = first_node(nodemask);
1975 for (i = 0; i < target; i++)
1976 nid = next_node(nid, nodemask);
1977 return nid;
1978}
1979
1980/* Determine a node number for interleave */
1981static inline unsigned interleave_nid(struct mempolicy *pol,
1982 struct vm_area_struct *vma, unsigned long addr, int shift)
1983{
1984 if (vma) {
1985 unsigned long off;
1986
1987 /*
1988 * for small pages, there is no difference between
1989 * shift and PAGE_SHIFT, so the bit-shift is safe.
1990 * for huge pages, since vm_pgoff is in units of small
1991 * pages, we need to shift off the always 0 bits to get
1992 * a useful offset.
1993 */
1994 BUG_ON(shift < PAGE_SHIFT);
1995 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1996 off += (addr - vma->vm_start) >> shift;
1997 return offset_il_node(pol, off);
1998 } else
1999 return interleave_nodes(pol);
2000}
2001
2002#ifdef CONFIG_HUGETLBFS
2003/*
2004 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2005 * @vma: virtual memory area whose policy is sought
2006 * @addr: address in @vma for shared policy lookup and interleave policy
2007 * @gfp_flags: for requested zone
2008 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2009 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2010 *
2011 * Returns a nid suitable for a huge page allocation and a pointer
2012 * to the struct mempolicy for conditional unref after allocation.
2013 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2014 * to the mempolicy's @nodemask for filtering the zonelist.
2015 *
2016 * Must be protected by read_mems_allowed_begin()
2017 */
2018int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2019 struct mempolicy **mpol, nodemask_t **nodemask)
2020{
2021 int nid;
2022 int mode;
2023
2024 *mpol = get_vma_policy(vma, addr);
2025 *nodemask = NULL;
2026 mode = (*mpol)->mode;
2027
2028 if (unlikely(mode == MPOL_INTERLEAVE)) {
2029 nid = interleave_nid(*mpol, vma, addr,
2030 huge_page_shift(hstate_vma(vma)));
2031 } else {
2032 nid = policy_node(gfp_flags, *mpol, numa_node_id());
2033 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2034 *nodemask = &(*mpol)->nodes;
2035 }
2036 return nid;
2037}
2038
2039/*
2040 * init_nodemask_of_mempolicy
2041 *
2042 * If the current task's mempolicy is "default" [NULL], return 'false'
2043 * to indicate default policy. Otherwise, extract the policy nodemask
2044 * for 'bind' or 'interleave' policy into the argument nodemask, or
2045 * initialize the argument nodemask to contain the single node for
2046 * 'preferred' or 'local' policy and return 'true' to indicate presence
2047 * of non-default mempolicy.
2048 *
2049 * We don't bother with reference counting the mempolicy [mpol_get/put]
2050 * because the current task is examining it's own mempolicy and a task's
2051 * mempolicy is only ever changed by the task itself.
2052 *
2053 * N.B., it is the caller's responsibility to free a returned nodemask.
2054 */
2055bool init_nodemask_of_mempolicy(nodemask_t *mask)
2056{
2057 struct mempolicy *mempolicy;
2058
2059 if (!(mask && current->mempolicy))
2060 return false;
2061
2062 task_lock(current);
2063 mempolicy = current->mempolicy;
2064 switch (mempolicy->mode) {
2065 case MPOL_PREFERRED:
2066 case MPOL_PREFERRED_MANY:
2067 case MPOL_BIND:
2068 case MPOL_INTERLEAVE:
2069 *mask = mempolicy->nodes;
2070 break;
2071
2072 case MPOL_LOCAL:
2073 init_nodemask_of_node(mask, numa_node_id());
2074 break;
2075
2076 default:
2077 BUG();
2078 }
2079 task_unlock(current);
2080
2081 return true;
2082}
2083#endif
2084
2085/*
2086 * mempolicy_in_oom_domain
2087 *
2088 * If tsk's mempolicy is "bind", check for intersection between mask and
2089 * the policy nodemask. Otherwise, return true for all other policies
2090 * including "interleave", as a tsk with "interleave" policy may have
2091 * memory allocated from all nodes in system.
2092 *
2093 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2094 */
2095bool mempolicy_in_oom_domain(struct task_struct *tsk,
2096 const nodemask_t *mask)
2097{
2098 struct mempolicy *mempolicy;
2099 bool ret = true;
2100
2101 if (!mask)
2102 return ret;
2103
2104 task_lock(tsk);
2105 mempolicy = tsk->mempolicy;
2106 if (mempolicy && mempolicy->mode == MPOL_BIND)
2107 ret = nodes_intersects(mempolicy->nodes, *mask);
2108 task_unlock(tsk);
2109
2110 return ret;
2111}
2112
2113/* Allocate a page in interleaved policy.
2114 Own path because it needs to do special accounting. */
2115static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2116 unsigned nid)
2117{
2118 struct page *page;
2119
2120 page = __alloc_pages(gfp, order, nid, NULL);
2121 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2122 if (!static_branch_likely(&vm_numa_stat_key))
2123 return page;
2124 if (page && page_to_nid(page) == nid) {
2125 preempt_disable();
2126 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2127 preempt_enable();
2128 }
2129 return page;
2130}
2131
2132static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2133 int nid, struct mempolicy *pol)
2134{
2135 struct page *page;
2136 gfp_t preferred_gfp;
2137
2138 /*
2139 * This is a two pass approach. The first pass will only try the
2140 * preferred nodes but skip the direct reclaim and allow the
2141 * allocation to fail, while the second pass will try all the
2142 * nodes in system.
2143 */
2144 preferred_gfp = gfp | __GFP_NOWARN;
2145 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2146 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2147 if (!page)
2148 page = __alloc_pages(gfp, order, nid, NULL);
2149
2150 return page;
2151}
2152
2153/**
2154 * vma_alloc_folio - Allocate a folio for a VMA.
2155 * @gfp: GFP flags.
2156 * @order: Order of the folio.
2157 * @vma: Pointer to VMA or NULL if not available.
2158 * @addr: Virtual address of the allocation. Must be inside @vma.
2159 * @hugepage: For hugepages try only the preferred node if possible.
2160 *
2161 * Allocate a folio for a specific address in @vma, using the appropriate
2162 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2163 * of the mm_struct of the VMA to prevent it from going away. Should be
2164 * used for all allocations for folios that will be mapped into user space.
2165 *
2166 * Return: The folio on success or NULL if allocation fails.
2167 */
2168struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2169 unsigned long addr, bool hugepage)
2170{
2171 struct mempolicy *pol;
2172 int node = numa_node_id();
2173 struct folio *folio;
2174 int preferred_nid;
2175 nodemask_t *nmask;
2176
2177 pol = get_vma_policy(vma, addr);
2178
2179 if (pol->mode == MPOL_INTERLEAVE) {
2180 struct page *page;
2181 unsigned nid;
2182
2183 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2184 mpol_cond_put(pol);
2185 gfp |= __GFP_COMP;
2186 page = alloc_page_interleave(gfp, order, nid);
2187 if (page && order > 1)
2188 prep_transhuge_page(page);
2189 folio = (struct folio *)page;
2190 goto out;
2191 }
2192
2193 if (pol->mode == MPOL_PREFERRED_MANY) {
2194 struct page *page;
2195
2196 node = policy_node(gfp, pol, node);
2197 gfp |= __GFP_COMP;
2198 page = alloc_pages_preferred_many(gfp, order, node, pol);
2199 mpol_cond_put(pol);
2200 if (page && order > 1)
2201 prep_transhuge_page(page);
2202 folio = (struct folio *)page;
2203 goto out;
2204 }
2205
2206 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2207 int hpage_node = node;
2208
2209 /*
2210 * For hugepage allocation and non-interleave policy which
2211 * allows the current node (or other explicitly preferred
2212 * node) we only try to allocate from the current/preferred
2213 * node and don't fall back to other nodes, as the cost of
2214 * remote accesses would likely offset THP benefits.
2215 *
2216 * If the policy is interleave or does not allow the current
2217 * node in its nodemask, we allocate the standard way.
2218 */
2219 if (pol->mode == MPOL_PREFERRED)
2220 hpage_node = first_node(pol->nodes);
2221
2222 nmask = policy_nodemask(gfp, pol);
2223 if (!nmask || node_isset(hpage_node, *nmask)) {
2224 mpol_cond_put(pol);
2225 /*
2226 * First, try to allocate THP only on local node, but
2227 * don't reclaim unnecessarily, just compact.
2228 */
2229 folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2230 __GFP_NORETRY, order, hpage_node);
2231
2232 /*
2233 * If hugepage allocations are configured to always
2234 * synchronous compact or the vma has been madvised
2235 * to prefer hugepage backing, retry allowing remote
2236 * memory with both reclaim and compact as well.
2237 */
2238 if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2239 folio = __folio_alloc(gfp, order, hpage_node,
2240 nmask);
2241
2242 goto out;
2243 }
2244 }
2245
2246 nmask = policy_nodemask(gfp, pol);
2247 preferred_nid = policy_node(gfp, pol, node);
2248 folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2249 mpol_cond_put(pol);
2250out:
2251 return folio;
2252}
2253EXPORT_SYMBOL(vma_alloc_folio);
2254
2255/**
2256 * alloc_pages - Allocate pages.
2257 * @gfp: GFP flags.
2258 * @order: Power of two of number of pages to allocate.
2259 *
2260 * Allocate 1 << @order contiguous pages. The physical address of the
2261 * first page is naturally aligned (eg an order-3 allocation will be aligned
2262 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2263 * process is honoured when in process context.
2264 *
2265 * Context: Can be called from any context, providing the appropriate GFP
2266 * flags are used.
2267 * Return: The page on success or NULL if allocation fails.
2268 */
2269struct page *alloc_pages(gfp_t gfp, unsigned order)
2270{
2271 struct mempolicy *pol = &default_policy;
2272 struct page *page;
2273
2274 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2275 pol = get_task_policy(current);
2276
2277 /*
2278 * No reference counting needed for current->mempolicy
2279 * nor system default_policy
2280 */
2281 if (pol->mode == MPOL_INTERLEAVE)
2282 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2283 else if (pol->mode == MPOL_PREFERRED_MANY)
2284 page = alloc_pages_preferred_many(gfp, order,
2285 policy_node(gfp, pol, numa_node_id()), pol);
2286 else
2287 page = __alloc_pages(gfp, order,
2288 policy_node(gfp, pol, numa_node_id()),
2289 policy_nodemask(gfp, pol));
2290
2291 return page;
2292}
2293EXPORT_SYMBOL(alloc_pages);
2294
2295struct folio *folio_alloc(gfp_t gfp, unsigned order)
2296{
2297 struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2298
2299 if (page && order > 1)
2300 prep_transhuge_page(page);
2301 return (struct folio *)page;
2302}
2303EXPORT_SYMBOL(folio_alloc);
2304
2305static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2306 struct mempolicy *pol, unsigned long nr_pages,
2307 struct page **page_array)
2308{
2309 int nodes;
2310 unsigned long nr_pages_per_node;
2311 int delta;
2312 int i;
2313 unsigned long nr_allocated;
2314 unsigned long total_allocated = 0;
2315
2316 nodes = nodes_weight(pol->nodes);
2317 nr_pages_per_node = nr_pages / nodes;
2318 delta = nr_pages - nodes * nr_pages_per_node;
2319
2320 for (i = 0; i < nodes; i++) {
2321 if (delta) {
2322 nr_allocated = __alloc_pages_bulk(gfp,
2323 interleave_nodes(pol), NULL,
2324 nr_pages_per_node + 1, NULL,
2325 page_array);
2326 delta--;
2327 } else {
2328 nr_allocated = __alloc_pages_bulk(gfp,
2329 interleave_nodes(pol), NULL,
2330 nr_pages_per_node, NULL, page_array);
2331 }
2332
2333 page_array += nr_allocated;
2334 total_allocated += nr_allocated;
2335 }
2336
2337 return total_allocated;
2338}
2339
2340static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2341 struct mempolicy *pol, unsigned long nr_pages,
2342 struct page **page_array)
2343{
2344 gfp_t preferred_gfp;
2345 unsigned long nr_allocated = 0;
2346
2347 preferred_gfp = gfp | __GFP_NOWARN;
2348 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2349
2350 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2351 nr_pages, NULL, page_array);
2352
2353 if (nr_allocated < nr_pages)
2354 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2355 nr_pages - nr_allocated, NULL,
2356 page_array + nr_allocated);
2357 return nr_allocated;
2358}
2359
2360/* alloc pages bulk and mempolicy should be considered at the
2361 * same time in some situation such as vmalloc.
2362 *
2363 * It can accelerate memory allocation especially interleaving
2364 * allocate memory.
2365 */
2366unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2367 unsigned long nr_pages, struct page **page_array)
2368{
2369 struct mempolicy *pol = &default_policy;
2370
2371 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2372 pol = get_task_policy(current);
2373
2374 if (pol->mode == MPOL_INTERLEAVE)
2375 return alloc_pages_bulk_array_interleave(gfp, pol,
2376 nr_pages, page_array);
2377
2378 if (pol->mode == MPOL_PREFERRED_MANY)
2379 return alloc_pages_bulk_array_preferred_many(gfp,
2380 numa_node_id(), pol, nr_pages, page_array);
2381
2382 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2383 policy_nodemask(gfp, pol), nr_pages, NULL,
2384 page_array);
2385}
2386
2387int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2388{
2389 struct mempolicy *pol = mpol_dup(vma_policy(src));
2390
2391 if (IS_ERR(pol))
2392 return PTR_ERR(pol);
2393 dst->vm_policy = pol;
2394 return 0;
2395}
2396
2397/*
2398 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2399 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2400 * with the mems_allowed returned by cpuset_mems_allowed(). This
2401 * keeps mempolicies cpuset relative after its cpuset moves. See
2402 * further kernel/cpuset.c update_nodemask().
2403 *
2404 * current's mempolicy may be rebinded by the other task(the task that changes
2405 * cpuset's mems), so we needn't do rebind work for current task.
2406 */
2407
2408/* Slow path of a mempolicy duplicate */
2409struct mempolicy *__mpol_dup(struct mempolicy *old)
2410{
2411 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2412
2413 if (!new)
2414 return ERR_PTR(-ENOMEM);
2415
2416 /* task's mempolicy is protected by alloc_lock */
2417 if (old == current->mempolicy) {
2418 task_lock(current);
2419 *new = *old;
2420 task_unlock(current);
2421 } else
2422 *new = *old;
2423
2424 if (current_cpuset_is_being_rebound()) {
2425 nodemask_t mems = cpuset_mems_allowed(current);
2426 mpol_rebind_policy(new, &mems);
2427 }
2428 atomic_set(&new->refcnt, 1);
2429 return new;
2430}
2431
2432/* Slow path of a mempolicy comparison */
2433bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2434{
2435 if (!a || !b)
2436 return false;
2437 if (a->mode != b->mode)
2438 return false;
2439 if (a->flags != b->flags)
2440 return false;
2441 if (a->home_node != b->home_node)
2442 return false;
2443 if (mpol_store_user_nodemask(a))
2444 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2445 return false;
2446
2447 switch (a->mode) {
2448 case MPOL_BIND:
2449 case MPOL_INTERLEAVE:
2450 case MPOL_PREFERRED:
2451 case MPOL_PREFERRED_MANY:
2452 return !!nodes_equal(a->nodes, b->nodes);
2453 case MPOL_LOCAL:
2454 return true;
2455 default:
2456 BUG();
2457 return false;
2458 }
2459}
2460
2461/*
2462 * Shared memory backing store policy support.
2463 *
2464 * Remember policies even when nobody has shared memory mapped.
2465 * The policies are kept in Red-Black tree linked from the inode.
2466 * They are protected by the sp->lock rwlock, which should be held
2467 * for any accesses to the tree.
2468 */
2469
2470/*
2471 * lookup first element intersecting start-end. Caller holds sp->lock for
2472 * reading or for writing
2473 */
2474static struct sp_node *
2475sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2476{
2477 struct rb_node *n = sp->root.rb_node;
2478
2479 while (n) {
2480 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2481
2482 if (start >= p->end)
2483 n = n->rb_right;
2484 else if (end <= p->start)
2485 n = n->rb_left;
2486 else
2487 break;
2488 }
2489 if (!n)
2490 return NULL;
2491 for (;;) {
2492 struct sp_node *w = NULL;
2493 struct rb_node *prev = rb_prev(n);
2494 if (!prev)
2495 break;
2496 w = rb_entry(prev, struct sp_node, nd);
2497 if (w->end <= start)
2498 break;
2499 n = prev;
2500 }
2501 return rb_entry(n, struct sp_node, nd);
2502}
2503
2504/*
2505 * Insert a new shared policy into the list. Caller holds sp->lock for
2506 * writing.
2507 */
2508static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2509{
2510 struct rb_node **p = &sp->root.rb_node;
2511 struct rb_node *parent = NULL;
2512 struct sp_node *nd;
2513
2514 while (*p) {
2515 parent = *p;
2516 nd = rb_entry(parent, struct sp_node, nd);
2517 if (new->start < nd->start)
2518 p = &(*p)->rb_left;
2519 else if (new->end > nd->end)
2520 p = &(*p)->rb_right;
2521 else
2522 BUG();
2523 }
2524 rb_link_node(&new->nd, parent, p);
2525 rb_insert_color(&new->nd, &sp->root);
2526 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2527 new->policy ? new->policy->mode : 0);
2528}
2529
2530/* Find shared policy intersecting idx */
2531struct mempolicy *
2532mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2533{
2534 struct mempolicy *pol = NULL;
2535 struct sp_node *sn;
2536
2537 if (!sp->root.rb_node)
2538 return NULL;
2539 read_lock(&sp->lock);
2540 sn = sp_lookup(sp, idx, idx+1);
2541 if (sn) {
2542 mpol_get(sn->policy);
2543 pol = sn->policy;
2544 }
2545 read_unlock(&sp->lock);
2546 return pol;
2547}
2548
2549static void sp_free(struct sp_node *n)
2550{
2551 mpol_put(n->policy);
2552 kmem_cache_free(sn_cache, n);
2553}
2554
2555/**
2556 * mpol_misplaced - check whether current page node is valid in policy
2557 *
2558 * @page: page to be checked
2559 * @vma: vm area where page mapped
2560 * @addr: virtual address where page mapped
2561 *
2562 * Lookup current policy node id for vma,addr and "compare to" page's
2563 * node id. Policy determination "mimics" alloc_page_vma().
2564 * Called from fault path where we know the vma and faulting address.
2565 *
2566 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2567 * policy, or a suitable node ID to allocate a replacement page from.
2568 */
2569int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2570{
2571 struct mempolicy *pol;
2572 struct zoneref *z;
2573 int curnid = page_to_nid(page);
2574 unsigned long pgoff;
2575 int thiscpu = raw_smp_processor_id();
2576 int thisnid = cpu_to_node(thiscpu);
2577 int polnid = NUMA_NO_NODE;
2578 int ret = NUMA_NO_NODE;
2579
2580 pol = get_vma_policy(vma, addr);
2581 if (!(pol->flags & MPOL_F_MOF))
2582 goto out;
2583
2584 switch (pol->mode) {
2585 case MPOL_INTERLEAVE:
2586 pgoff = vma->vm_pgoff;
2587 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2588 polnid = offset_il_node(pol, pgoff);
2589 break;
2590
2591 case MPOL_PREFERRED:
2592 if (node_isset(curnid, pol->nodes))
2593 goto out;
2594 polnid = first_node(pol->nodes);
2595 break;
2596
2597 case MPOL_LOCAL:
2598 polnid = numa_node_id();
2599 break;
2600
2601 case MPOL_BIND:
2602 /* Optimize placement among multiple nodes via NUMA balancing */
2603 if (pol->flags & MPOL_F_MORON) {
2604 if (node_isset(thisnid, pol->nodes))
2605 break;
2606 goto out;
2607 }
2608 fallthrough;
2609
2610 case MPOL_PREFERRED_MANY:
2611 /*
2612 * use current page if in policy nodemask,
2613 * else select nearest allowed node, if any.
2614 * If no allowed nodes, use current [!misplaced].
2615 */
2616 if (node_isset(curnid, pol->nodes))
2617 goto out;
2618 z = first_zones_zonelist(
2619 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2620 gfp_zone(GFP_HIGHUSER),
2621 &pol->nodes);
2622 polnid = zone_to_nid(z->zone);
2623 break;
2624
2625 default:
2626 BUG();
2627 }
2628
2629 /* Migrate the page towards the node whose CPU is referencing it */
2630 if (pol->flags & MPOL_F_MORON) {
2631 polnid = thisnid;
2632
2633 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2634 goto out;
2635 }
2636
2637 if (curnid != polnid)
2638 ret = polnid;
2639out:
2640 mpol_cond_put(pol);
2641
2642 return ret;
2643}
2644
2645/*
2646 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2647 * dropped after task->mempolicy is set to NULL so that any allocation done as
2648 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2649 * policy.
2650 */
2651void mpol_put_task_policy(struct task_struct *task)
2652{
2653 struct mempolicy *pol;
2654
2655 task_lock(task);
2656 pol = task->mempolicy;
2657 task->mempolicy = NULL;
2658 task_unlock(task);
2659 mpol_put(pol);
2660}
2661
2662static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2663{
2664 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2665 rb_erase(&n->nd, &sp->root);
2666 sp_free(n);
2667}
2668
2669static void sp_node_init(struct sp_node *node, unsigned long start,
2670 unsigned long end, struct mempolicy *pol)
2671{
2672 node->start = start;
2673 node->end = end;
2674 node->policy = pol;
2675}
2676
2677static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2678 struct mempolicy *pol)
2679{
2680 struct sp_node *n;
2681 struct mempolicy *newpol;
2682
2683 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2684 if (!n)
2685 return NULL;
2686
2687 newpol = mpol_dup(pol);
2688 if (IS_ERR(newpol)) {
2689 kmem_cache_free(sn_cache, n);
2690 return NULL;
2691 }
2692 newpol->flags |= MPOL_F_SHARED;
2693 sp_node_init(n, start, end, newpol);
2694
2695 return n;
2696}
2697
2698/* Replace a policy range. */
2699static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2700 unsigned long end, struct sp_node *new)
2701{
2702 struct sp_node *n;
2703 struct sp_node *n_new = NULL;
2704 struct mempolicy *mpol_new = NULL;
2705 int ret = 0;
2706
2707restart:
2708 write_lock(&sp->lock);
2709 n = sp_lookup(sp, start, end);
2710 /* Take care of old policies in the same range. */
2711 while (n && n->start < end) {
2712 struct rb_node *next = rb_next(&n->nd);
2713 if (n->start >= start) {
2714 if (n->end <= end)
2715 sp_delete(sp, n);
2716 else
2717 n->start = end;
2718 } else {
2719 /* Old policy spanning whole new range. */
2720 if (n->end > end) {
2721 if (!n_new)
2722 goto alloc_new;
2723
2724 *mpol_new = *n->policy;
2725 atomic_set(&mpol_new->refcnt, 1);
2726 sp_node_init(n_new, end, n->end, mpol_new);
2727 n->end = start;
2728 sp_insert(sp, n_new);
2729 n_new = NULL;
2730 mpol_new = NULL;
2731 break;
2732 } else
2733 n->end = start;
2734 }
2735 if (!next)
2736 break;
2737 n = rb_entry(next, struct sp_node, nd);
2738 }
2739 if (new)
2740 sp_insert(sp, new);
2741 write_unlock(&sp->lock);
2742 ret = 0;
2743
2744err_out:
2745 if (mpol_new)
2746 mpol_put(mpol_new);
2747 if (n_new)
2748 kmem_cache_free(sn_cache, n_new);
2749
2750 return ret;
2751
2752alloc_new:
2753 write_unlock(&sp->lock);
2754 ret = -ENOMEM;
2755 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2756 if (!n_new)
2757 goto err_out;
2758 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2759 if (!mpol_new)
2760 goto err_out;
2761 atomic_set(&mpol_new->refcnt, 1);
2762 goto restart;
2763}
2764
2765/**
2766 * mpol_shared_policy_init - initialize shared policy for inode
2767 * @sp: pointer to inode shared policy
2768 * @mpol: struct mempolicy to install
2769 *
2770 * Install non-NULL @mpol in inode's shared policy rb-tree.
2771 * On entry, the current task has a reference on a non-NULL @mpol.
2772 * This must be released on exit.
2773 * This is called at get_inode() calls and we can use GFP_KERNEL.
2774 */
2775void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2776{
2777 int ret;
2778
2779 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2780 rwlock_init(&sp->lock);
2781
2782 if (mpol) {
2783 struct vm_area_struct pvma;
2784 struct mempolicy *new;
2785 NODEMASK_SCRATCH(scratch);
2786
2787 if (!scratch)
2788 goto put_mpol;
2789 /* contextualize the tmpfs mount point mempolicy */
2790 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2791 if (IS_ERR(new))
2792 goto free_scratch; /* no valid nodemask intersection */
2793
2794 task_lock(current);
2795 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2796 task_unlock(current);
2797 if (ret)
2798 goto put_new;
2799
2800 /* Create pseudo-vma that contains just the policy */
2801 vma_init(&pvma, NULL);
2802 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2803 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2804
2805put_new:
2806 mpol_put(new); /* drop initial ref */
2807free_scratch:
2808 NODEMASK_SCRATCH_FREE(scratch);
2809put_mpol:
2810 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2811 }
2812}
2813
2814int mpol_set_shared_policy(struct shared_policy *info,
2815 struct vm_area_struct *vma, struct mempolicy *npol)
2816{
2817 int err;
2818 struct sp_node *new = NULL;
2819 unsigned long sz = vma_pages(vma);
2820
2821 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2822 vma->vm_pgoff,
2823 sz, npol ? npol->mode : -1,
2824 npol ? npol->flags : -1,
2825 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2826
2827 if (npol) {
2828 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2829 if (!new)
2830 return -ENOMEM;
2831 }
2832 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2833 if (err && new)
2834 sp_free(new);
2835 return err;
2836}
2837
2838/* Free a backing policy store on inode delete. */
2839void mpol_free_shared_policy(struct shared_policy *p)
2840{
2841 struct sp_node *n;
2842 struct rb_node *next;
2843
2844 if (!p->root.rb_node)
2845 return;
2846 write_lock(&p->lock);
2847 next = rb_first(&p->root);
2848 while (next) {
2849 n = rb_entry(next, struct sp_node, nd);
2850 next = rb_next(&n->nd);
2851 sp_delete(p, n);
2852 }
2853 write_unlock(&p->lock);
2854}
2855
2856#ifdef CONFIG_NUMA_BALANCING
2857static int __initdata numabalancing_override;
2858
2859static void __init check_numabalancing_enable(void)
2860{
2861 bool numabalancing_default = false;
2862
2863 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2864 numabalancing_default = true;
2865
2866 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2867 if (numabalancing_override)
2868 set_numabalancing_state(numabalancing_override == 1);
2869
2870 if (num_online_nodes() > 1 && !numabalancing_override) {
2871 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2872 numabalancing_default ? "Enabling" : "Disabling");
2873 set_numabalancing_state(numabalancing_default);
2874 }
2875}
2876
2877static int __init setup_numabalancing(char *str)
2878{
2879 int ret = 0;
2880 if (!str)
2881 goto out;
2882
2883 if (!strcmp(str, "enable")) {
2884 numabalancing_override = 1;
2885 ret = 1;
2886 } else if (!strcmp(str, "disable")) {
2887 numabalancing_override = -1;
2888 ret = 1;
2889 }
2890out:
2891 if (!ret)
2892 pr_warn("Unable to parse numa_balancing=\n");
2893
2894 return ret;
2895}
2896__setup("numa_balancing=", setup_numabalancing);
2897#else
2898static inline void __init check_numabalancing_enable(void)
2899{
2900}
2901#endif /* CONFIG_NUMA_BALANCING */
2902
2903/* assumes fs == KERNEL_DS */
2904void __init numa_policy_init(void)
2905{
2906 nodemask_t interleave_nodes;
2907 unsigned long largest = 0;
2908 int nid, prefer = 0;
2909
2910 policy_cache = kmem_cache_create("numa_policy",
2911 sizeof(struct mempolicy),
2912 0, SLAB_PANIC, NULL);
2913
2914 sn_cache = kmem_cache_create("shared_policy_node",
2915 sizeof(struct sp_node),
2916 0, SLAB_PANIC, NULL);
2917
2918 for_each_node(nid) {
2919 preferred_node_policy[nid] = (struct mempolicy) {
2920 .refcnt = ATOMIC_INIT(1),
2921 .mode = MPOL_PREFERRED,
2922 .flags = MPOL_F_MOF | MPOL_F_MORON,
2923 .nodes = nodemask_of_node(nid),
2924 };
2925 }
2926
2927 /*
2928 * Set interleaving policy for system init. Interleaving is only
2929 * enabled across suitably sized nodes (default is >= 16MB), or
2930 * fall back to the largest node if they're all smaller.
2931 */
2932 nodes_clear(interleave_nodes);
2933 for_each_node_state(nid, N_MEMORY) {
2934 unsigned long total_pages = node_present_pages(nid);
2935
2936 /* Preserve the largest node */
2937 if (largest < total_pages) {
2938 largest = total_pages;
2939 prefer = nid;
2940 }
2941
2942 /* Interleave this node? */
2943 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2944 node_set(nid, interleave_nodes);
2945 }
2946
2947 /* All too small, use the largest */
2948 if (unlikely(nodes_empty(interleave_nodes)))
2949 node_set(prefer, interleave_nodes);
2950
2951 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2952 pr_err("%s: interleaving failed\n", __func__);
2953
2954 check_numabalancing_enable();
2955}
2956
2957/* Reset policy of current process to default */
2958void numa_default_policy(void)
2959{
2960 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2961}
2962
2963/*
2964 * Parse and format mempolicy from/to strings
2965 */
2966
2967static const char * const policy_modes[] =
2968{
2969 [MPOL_DEFAULT] = "default",
2970 [MPOL_PREFERRED] = "prefer",
2971 [MPOL_BIND] = "bind",
2972 [MPOL_INTERLEAVE] = "interleave",
2973 [MPOL_LOCAL] = "local",
2974 [MPOL_PREFERRED_MANY] = "prefer (many)",
2975};
2976
2977
2978#ifdef CONFIG_TMPFS
2979/**
2980 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2981 * @str: string containing mempolicy to parse
2982 * @mpol: pointer to struct mempolicy pointer, returned on success.
2983 *
2984 * Format of input:
2985 * <mode>[=<flags>][:<nodelist>]
2986 *
2987 * Return: %0 on success, else %1
2988 */
2989int mpol_parse_str(char *str, struct mempolicy **mpol)
2990{
2991 struct mempolicy *new = NULL;
2992 unsigned short mode_flags;
2993 nodemask_t nodes;
2994 char *nodelist = strchr(str, ':');
2995 char *flags = strchr(str, '=');
2996 int err = 1, mode;
2997
2998 if (flags)
2999 *flags++ = '\0'; /* terminate mode string */
3000
3001 if (nodelist) {
3002 /* NUL-terminate mode or flags string */
3003 *nodelist++ = '\0';
3004 if (nodelist_parse(nodelist, nodes))
3005 goto out;
3006 if (!nodes_subset(nodes, node_states[N_MEMORY]))
3007 goto out;
3008 } else
3009 nodes_clear(nodes);
3010
3011 mode = match_string(policy_modes, MPOL_MAX, str);
3012 if (mode < 0)
3013 goto out;
3014
3015 switch (mode) {
3016 case MPOL_PREFERRED:
3017 /*
3018 * Insist on a nodelist of one node only, although later
3019 * we use first_node(nodes) to grab a single node, so here
3020 * nodelist (or nodes) cannot be empty.
3021 */
3022 if (nodelist) {
3023 char *rest = nodelist;
3024 while (isdigit(*rest))
3025 rest++;
3026 if (*rest)
3027 goto out;
3028 if (nodes_empty(nodes))
3029 goto out;
3030 }
3031 break;
3032 case MPOL_INTERLEAVE:
3033 /*
3034 * Default to online nodes with memory if no nodelist
3035 */
3036 if (!nodelist)
3037 nodes = node_states[N_MEMORY];
3038 break;
3039 case MPOL_LOCAL:
3040 /*
3041 * Don't allow a nodelist; mpol_new() checks flags
3042 */
3043 if (nodelist)
3044 goto out;
3045 break;
3046 case MPOL_DEFAULT:
3047 /*
3048 * Insist on a empty nodelist
3049 */
3050 if (!nodelist)
3051 err = 0;
3052 goto out;
3053 case MPOL_PREFERRED_MANY:
3054 case MPOL_BIND:
3055 /*
3056 * Insist on a nodelist
3057 */
3058 if (!nodelist)
3059 goto out;
3060 }
3061
3062 mode_flags = 0;
3063 if (flags) {
3064 /*
3065 * Currently, we only support two mutually exclusive
3066 * mode flags.
3067 */
3068 if (!strcmp(flags, "static"))
3069 mode_flags |= MPOL_F_STATIC_NODES;
3070 else if (!strcmp(flags, "relative"))
3071 mode_flags |= MPOL_F_RELATIVE_NODES;
3072 else
3073 goto out;
3074 }
3075
3076 new = mpol_new(mode, mode_flags, &nodes);
3077 if (IS_ERR(new))
3078 goto out;
3079
3080 /*
3081 * Save nodes for mpol_to_str() to show the tmpfs mount options
3082 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3083 */
3084 if (mode != MPOL_PREFERRED) {
3085 new->nodes = nodes;
3086 } else if (nodelist) {
3087 nodes_clear(new->nodes);
3088 node_set(first_node(nodes), new->nodes);
3089 } else {
3090 new->mode = MPOL_LOCAL;
3091 }
3092
3093 /*
3094 * Save nodes for contextualization: this will be used to "clone"
3095 * the mempolicy in a specific context [cpuset] at a later time.
3096 */
3097 new->w.user_nodemask = nodes;
3098
3099 err = 0;
3100
3101out:
3102 /* Restore string for error message */
3103 if (nodelist)
3104 *--nodelist = ':';
3105 if (flags)
3106 *--flags = '=';
3107 if (!err)
3108 *mpol = new;
3109 return err;
3110}
3111#endif /* CONFIG_TMPFS */
3112
3113/**
3114 * mpol_to_str - format a mempolicy structure for printing
3115 * @buffer: to contain formatted mempolicy string
3116 * @maxlen: length of @buffer
3117 * @pol: pointer to mempolicy to be formatted
3118 *
3119 * Convert @pol into a string. If @buffer is too short, truncate the string.
3120 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3121 * longest flag, "relative", and to display at least a few node ids.
3122 */
3123void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3124{
3125 char *p = buffer;
3126 nodemask_t nodes = NODE_MASK_NONE;
3127 unsigned short mode = MPOL_DEFAULT;
3128 unsigned short flags = 0;
3129
3130 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3131 mode = pol->mode;
3132 flags = pol->flags;
3133 }
3134
3135 switch (mode) {
3136 case MPOL_DEFAULT:
3137 case MPOL_LOCAL:
3138 break;
3139 case MPOL_PREFERRED:
3140 case MPOL_PREFERRED_MANY:
3141 case MPOL_BIND:
3142 case MPOL_INTERLEAVE:
3143 nodes = pol->nodes;
3144 break;
3145 default:
3146 WARN_ON_ONCE(1);
3147 snprintf(p, maxlen, "unknown");
3148 return;
3149 }
3150
3151 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3152
3153 if (flags & MPOL_MODE_FLAGS) {
3154 p += snprintf(p, buffer + maxlen - p, "=");
3155
3156 /*
3157 * Currently, the only defined flags are mutually exclusive
3158 */
3159 if (flags & MPOL_F_STATIC_NODES)
3160 p += snprintf(p, buffer + maxlen - p, "static");
3161 else if (flags & MPOL_F_RELATIVE_NODES)
3162 p += snprintf(p, buffer + maxlen - p, "relative");
3163 }
3164
3165 if (!nodes_empty(nodes))
3166 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3167 nodemask_pr_args(&nodes));
3168}