Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
 
   1/*
   2 * Simple NUMA memory policy for the Linux kernel.
   3 *
   4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   6 * Subject to the GNU Public License, version 2.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case NUMA_NO_NODE here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
 
 
 
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *		  in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
  68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  69
  70#include <linux/mempolicy.h>
  71#include <linux/mm.h>
  72#include <linux/highmem.h>
  73#include <linux/hugetlb.h>
  74#include <linux/kernel.h>
  75#include <linux/sched.h>
  76#include <linux/sched/mm.h>
  77#include <linux/sched/numa_balancing.h>
  78#include <linux/sched/task.h>
  79#include <linux/nodemask.h>
  80#include <linux/cpuset.h>
  81#include <linux/slab.h>
  82#include <linux/string.h>
  83#include <linux/export.h>
  84#include <linux/nsproxy.h>
  85#include <linux/interrupt.h>
  86#include <linux/init.h>
  87#include <linux/compat.h>
  88#include <linux/ptrace.h>
  89#include <linux/swap.h>
  90#include <linux/seq_file.h>
  91#include <linux/proc_fs.h>
  92#include <linux/migrate.h>
  93#include <linux/ksm.h>
  94#include <linux/rmap.h>
  95#include <linux/security.h>
  96#include <linux/syscalls.h>
  97#include <linux/ctype.h>
  98#include <linux/mm_inline.h>
  99#include <linux/mmu_notifier.h>
 100#include <linux/printk.h>
 101#include <linux/swapops.h>
 102
 103#include <asm/tlbflush.h>
 
 104#include <linux/uaccess.h>
 105
 106#include "internal.h"
 107
 108/* Internal flags */
 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
 111
 112static struct kmem_cache *policy_cache;
 113static struct kmem_cache *sn_cache;
 114
 115/* Highest zone. An specific allocation for a zone below that is not
 116   policied. */
 117enum zone_type policy_zone = 0;
 118
 119/*
 120 * run-time system-wide default policy => local allocation
 121 */
 122static struct mempolicy default_policy = {
 123	.refcnt = ATOMIC_INIT(1), /* never free it */
 124	.mode = MPOL_PREFERRED,
 125	.flags = MPOL_F_LOCAL,
 126};
 127
 128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
 129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130struct mempolicy *get_task_policy(struct task_struct *p)
 131{
 132	struct mempolicy *pol = p->mempolicy;
 133	int node;
 134
 135	if (pol)
 136		return pol;
 137
 138	node = numa_node_id();
 139	if (node != NUMA_NO_NODE) {
 140		pol = &preferred_node_policy[node];
 141		/* preferred_node_policy is not initialised early in boot */
 142		if (pol->mode)
 143			return pol;
 144	}
 145
 146	return &default_policy;
 147}
 148
 149static const struct mempolicy_operations {
 150	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 151	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
 152} mpol_ops[MPOL_MAX];
 153
 154static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 155{
 156	return pol->flags & MPOL_MODE_FLAGS;
 157}
 158
 159static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 160				   const nodemask_t *rel)
 161{
 162	nodemask_t tmp;
 163	nodes_fold(tmp, *orig, nodes_weight(*rel));
 164	nodes_onto(*ret, tmp, *rel);
 165}
 166
 167static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 168{
 169	if (nodes_empty(*nodes))
 170		return -EINVAL;
 171	pol->v.nodes = *nodes;
 172	return 0;
 173}
 174
 175static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 176{
 177	if (!nodes)
 178		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
 179	else if (nodes_empty(*nodes))
 180		return -EINVAL;			/*  no allowed nodes */
 181	else
 182		pol->v.preferred_node = first_node(*nodes);
 183	return 0;
 184}
 185
 186static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 187{
 188	if (nodes_empty(*nodes))
 189		return -EINVAL;
 190	pol->v.nodes = *nodes;
 
 
 191	return 0;
 192}
 193
 194/*
 195 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 196 * any, for the new policy.  mpol_new() has already validated the nodes
 197 * parameter with respect to the policy mode and flags.  But, we need to
 198 * handle an empty nodemask with MPOL_PREFERRED here.
 199 *
 200 * Must be called holding task's alloc_lock to protect task's mems_allowed
 201 * and mempolicy.  May also be called holding the mmap_semaphore for write.
 202 */
 203static int mpol_set_nodemask(struct mempolicy *pol,
 204		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 205{
 206	int ret;
 207
 208	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
 209	if (pol == NULL)
 
 
 
 
 210		return 0;
 
 211	/* Check N_MEMORY */
 212	nodes_and(nsc->mask1,
 213		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
 214
 215	VM_BUG_ON(!nodes);
 216	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
 217		nodes = NULL;	/* explicit local allocation */
 218	else {
 219		if (pol->flags & MPOL_F_RELATIVE_NODES)
 220			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
 221		else
 222			nodes_and(nsc->mask2, *nodes, nsc->mask1);
 223
 224		if (mpol_store_user_nodemask(pol))
 225			pol->w.user_nodemask = *nodes;
 226		else
 227			pol->w.cpuset_mems_allowed =
 228						cpuset_current_mems_allowed;
 229	}
 230
 231	if (nodes)
 232		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 233	else
 234		ret = mpol_ops[pol->mode].create(pol, NULL);
 
 
 235	return ret;
 236}
 237
 238/*
 239 * This function just creates a new policy, does some check and simple
 240 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 241 */
 242static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 243				  nodemask_t *nodes)
 244{
 245	struct mempolicy *policy;
 246
 247	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 248		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
 249
 250	if (mode == MPOL_DEFAULT) {
 251		if (nodes && !nodes_empty(*nodes))
 252			return ERR_PTR(-EINVAL);
 253		return NULL;
 254	}
 255	VM_BUG_ON(!nodes);
 256
 257	/*
 258	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 259	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 260	 * All other modes require a valid pointer to a non-empty nodemask.
 261	 */
 262	if (mode == MPOL_PREFERRED) {
 263		if (nodes_empty(*nodes)) {
 264			if (((flags & MPOL_F_STATIC_NODES) ||
 265			     (flags & MPOL_F_RELATIVE_NODES)))
 266				return ERR_PTR(-EINVAL);
 
 
 267		}
 268	} else if (mode == MPOL_LOCAL) {
 269		if (!nodes_empty(*nodes) ||
 270		    (flags & MPOL_F_STATIC_NODES) ||
 271		    (flags & MPOL_F_RELATIVE_NODES))
 272			return ERR_PTR(-EINVAL);
 273		mode = MPOL_PREFERRED;
 274	} else if (nodes_empty(*nodes))
 275		return ERR_PTR(-EINVAL);
 276	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 277	if (!policy)
 278		return ERR_PTR(-ENOMEM);
 279	atomic_set(&policy->refcnt, 1);
 280	policy->mode = mode;
 281	policy->flags = flags;
 
 282
 283	return policy;
 284}
 285
 286/* Slow path of a mpol destructor. */
 287void __mpol_put(struct mempolicy *p)
 288{
 289	if (!atomic_dec_and_test(&p->refcnt))
 290		return;
 291	kmem_cache_free(policy_cache, p);
 292}
 293
 294static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
 295{
 296}
 297
 298static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 299{
 300	nodemask_t tmp;
 301
 302	if (pol->flags & MPOL_F_STATIC_NODES)
 303		nodes_and(tmp, pol->w.user_nodemask, *nodes);
 304	else if (pol->flags & MPOL_F_RELATIVE_NODES)
 305		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 306	else {
 307		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
 308								*nodes);
 309		pol->w.cpuset_mems_allowed = tmp;
 310	}
 311
 312	if (nodes_empty(tmp))
 313		tmp = *nodes;
 314
 315	pol->v.nodes = tmp;
 316}
 317
 318static void mpol_rebind_preferred(struct mempolicy *pol,
 319						const nodemask_t *nodes)
 320{
 321	nodemask_t tmp;
 322
 323	if (pol->flags & MPOL_F_STATIC_NODES) {
 324		int node = first_node(pol->w.user_nodemask);
 325
 326		if (node_isset(node, *nodes)) {
 327			pol->v.preferred_node = node;
 328			pol->flags &= ~MPOL_F_LOCAL;
 329		} else
 330			pol->flags |= MPOL_F_LOCAL;
 331	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
 332		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 333		pol->v.preferred_node = first_node(tmp);
 334	} else if (!(pol->flags & MPOL_F_LOCAL)) {
 335		pol->v.preferred_node = node_remap(pol->v.preferred_node,
 336						   pol->w.cpuset_mems_allowed,
 337						   *nodes);
 338		pol->w.cpuset_mems_allowed = *nodes;
 339	}
 340}
 341
 342/*
 343 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 344 *
 345 * Per-vma policies are protected by mmap_sem. Allocations using per-task
 346 * policies are protected by task->mems_allowed_seq to prevent a premature
 347 * OOM/allocation failure due to parallel nodemask modification.
 348 */
 349static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
 350{
 351	if (!pol)
 352		return;
 353	if (!mpol_store_user_nodemask(pol) &&
 354	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 355		return;
 356
 357	mpol_ops[pol->mode].rebind(pol, newmask);
 358}
 359
 360/*
 361 * Wrapper for mpol_rebind_policy() that just requires task
 362 * pointer, and updates task mempolicy.
 363 *
 364 * Called with task's alloc_lock held.
 365 */
 366
 367void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
 368{
 369	mpol_rebind_policy(tsk->mempolicy, new);
 370}
 371
 372/*
 373 * Rebind each vma in mm to new nodemask.
 374 *
 375 * Call holding a reference to mm.  Takes mm->mmap_sem during call.
 376 */
 377
 378void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 379{
 380	struct vm_area_struct *vma;
 
 381
 382	down_write(&mm->mmap_sem);
 383	for (vma = mm->mmap; vma; vma = vma->vm_next)
 384		mpol_rebind_policy(vma->vm_policy, new);
 385	up_write(&mm->mmap_sem);
 386}
 387
 388static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 389	[MPOL_DEFAULT] = {
 390		.rebind = mpol_rebind_default,
 391	},
 392	[MPOL_INTERLEAVE] = {
 393		.create = mpol_new_interleave,
 394		.rebind = mpol_rebind_nodemask,
 395	},
 396	[MPOL_PREFERRED] = {
 397		.create = mpol_new_preferred,
 398		.rebind = mpol_rebind_preferred,
 399	},
 400	[MPOL_BIND] = {
 401		.create = mpol_new_bind,
 402		.rebind = mpol_rebind_nodemask,
 403	},
 
 
 
 
 
 
 
 404};
 405
 406static void migrate_page_add(struct page *page, struct list_head *pagelist,
 407				unsigned long flags);
 408
 409struct queue_pages {
 410	struct list_head *pagelist;
 411	unsigned long flags;
 412	nodemask_t *nmask;
 413	struct vm_area_struct *prev;
 
 
 414};
 415
 416/*
 417 * Check if the page's nid is in qp->nmask.
 418 *
 419 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
 420 * in the invert of qp->nmask.
 421 */
 422static inline bool queue_pages_required(struct page *page,
 423					struct queue_pages *qp)
 424{
 425	int nid = page_to_nid(page);
 426	unsigned long flags = qp->flags;
 427
 428	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 429}
 430
 
 
 
 
 
 
 
 
 
 
 431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 432				unsigned long end, struct mm_walk *walk)
 
 433{
 434	int ret = 0;
 435	struct page *page;
 436	struct queue_pages *qp = walk->private;
 437	unsigned long flags;
 438
 439	if (unlikely(is_pmd_migration_entry(*pmd))) {
 440		ret = 1;
 441		goto unlock;
 442	}
 443	page = pmd_page(*pmd);
 444	if (is_huge_zero_page(page)) {
 445		spin_unlock(ptl);
 446		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
 447		goto out;
 448	}
 449	if (!queue_pages_required(page, qp)) {
 450		ret = 1;
 451		goto unlock;
 452	}
 
 
 453
 454	ret = 1;
 455	flags = qp->flags;
 456	/* go to thp migration */
 457	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 458		migrate_page_add(page, qp->pagelist, flags);
 
 
 
 
 
 
 459unlock:
 460	spin_unlock(ptl);
 461out:
 462	return ret;
 463}
 464
 465/*
 466 * Scan through pages checking if pages follow certain conditions,
 467 * and move them to the pagelist if they do.
 
 
 
 
 
 
 
 
 468 */
 469static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 470			unsigned long end, struct mm_walk *walk)
 471{
 472	struct vm_area_struct *vma = walk->vma;
 473	struct page *page;
 474	struct queue_pages *qp = walk->private;
 475	unsigned long flags = qp->flags;
 476	int ret;
 477	pte_t *pte;
 478	spinlock_t *ptl;
 479
 480	ptl = pmd_trans_huge_lock(pmd, vma);
 481	if (ptl) {
 482		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
 483		if (ret)
 484			return 0;
 485	}
 486
 487	if (pmd_trans_unstable(pmd))
 488		return 0;
 489
 490	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 491	for (; addr != end; pte++, addr += PAGE_SIZE) {
 492		if (!pte_present(*pte))
 493			continue;
 494		page = vm_normal_page(vma, addr, *pte);
 495		if (!page)
 496			continue;
 497		/*
 498		 * vm_normal_page() filters out zero pages, but there might
 499		 * still be PageReserved pages to skip, perhaps in a VDSO.
 500		 */
 501		if (PageReserved(page))
 502			continue;
 503		if (!queue_pages_required(page, qp))
 504			continue;
 505		migrate_page_add(page, qp->pagelist, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506	}
 507	pte_unmap_unlock(pte - 1, ptl);
 508	cond_resched();
 509	return 0;
 
 
 
 
 510}
 511
 512static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
 513			       unsigned long addr, unsigned long end,
 514			       struct mm_walk *walk)
 515{
 
 516#ifdef CONFIG_HUGETLB_PAGE
 517	struct queue_pages *qp = walk->private;
 518	unsigned long flags = qp->flags;
 519	struct page *page;
 520	spinlock_t *ptl;
 521	pte_t entry;
 522
 523	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
 524	entry = huge_ptep_get(pte);
 525	if (!pte_present(entry))
 526		goto unlock;
 527	page = pte_page(entry);
 528	if (!queue_pages_required(page, qp))
 529		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
 531	if (flags & (MPOL_MF_MOVE_ALL) ||
 532	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
 533		isolate_huge_page(page, qp->pagelist);
 
 
 
 
 
 
 
 
 534unlock:
 535	spin_unlock(ptl);
 536#else
 537	BUG();
 538#endif
 539	return 0;
 540}
 541
 542#ifdef CONFIG_NUMA_BALANCING
 543/*
 544 * This is used to mark a range of virtual addresses to be inaccessible.
 545 * These are later cleared by a NUMA hinting fault. Depending on these
 546 * faults, pages may be migrated for better NUMA placement.
 547 *
 548 * This is assuming that NUMA faults are handled using PROT_NONE. If
 549 * an architecture makes a different choice, it will need further
 550 * changes to the core.
 551 */
 552unsigned long change_prot_numa(struct vm_area_struct *vma,
 553			unsigned long addr, unsigned long end)
 554{
 
 555	int nr_updated;
 556
 557	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
 
 
 
 558	if (nr_updated)
 559		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 560
 
 
 561	return nr_updated;
 562}
 563#else
 564static unsigned long change_prot_numa(struct vm_area_struct *vma,
 565			unsigned long addr, unsigned long end)
 566{
 567	return 0;
 568}
 569#endif /* CONFIG_NUMA_BALANCING */
 570
 571static int queue_pages_test_walk(unsigned long start, unsigned long end,
 572				struct mm_walk *walk)
 573{
 574	struct vm_area_struct *vma = walk->vma;
 575	struct queue_pages *qp = walk->private;
 576	unsigned long endvma = vma->vm_end;
 577	unsigned long flags = qp->flags;
 578
 579	if (!vma_migratable(vma))
 580		return 1;
 581
 582	if (endvma > end)
 583		endvma = end;
 584	if (vma->vm_start > start)
 585		start = vma->vm_start;
 586
 587	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
 588		if (!vma->vm_next && vma->vm_end < end)
 589			return -EFAULT;
 590		if (qp->prev && qp->prev->vm_end < vma->vm_start)
 591			return -EFAULT;
 592	}
 
 
 
 
 
 
 593
 594	qp->prev = vma;
 
 
 
 
 
 
 
 
 
 595
 596	if (flags & MPOL_MF_LAZY) {
 597		/* Similar to task_numa_work, skip inaccessible VMAs */
 598		if (!is_vm_hugetlb_page(vma) &&
 599			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
 600			!(vma->vm_flags & VM_MIXEDMAP))
 601			change_prot_numa(vma, start, endvma);
 602		return 1;
 603	}
 604
 605	/* queue pages from current vma */
 606	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 607		return 0;
 608	return 1;
 609}
 610
 
 
 
 
 
 
 611/*
 612 * Walk through page tables and collect pages to be migrated.
 613 *
 614 * If pages found in a given range are on a set of nodes (determined by
 615 * @nodes and @flags,) it's isolated and queued to the pagelist which is
 616 * passed via @private.)
 
 
 
 
 
 
 
 
 617 */
 618static int
 619queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 620		nodemask_t *nodes, unsigned long flags,
 621		struct list_head *pagelist)
 622{
 
 623	struct queue_pages qp = {
 624		.pagelist = pagelist,
 625		.flags = flags,
 626		.nmask = nodes,
 627		.prev = NULL,
 628	};
 629	struct mm_walk queue_pages_walk = {
 630		.hugetlb_entry = queue_pages_hugetlb,
 631		.pmd_entry = queue_pages_pte_range,
 632		.test_walk = queue_pages_test_walk,
 633		.mm = mm,
 634		.private = &qp,
 635	};
 636
 637	return walk_page_range(start, end, &queue_pages_walk);
 
 
 
 
 
 
 638}
 639
 640/*
 641 * Apply policy to a single VMA
 642 * This must be called with the mmap_sem held for writing.
 643 */
 644static int vma_replace_policy(struct vm_area_struct *vma,
 645						struct mempolicy *pol)
 646{
 647	int err;
 648	struct mempolicy *old;
 649	struct mempolicy *new;
 650
 651	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 652		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 653		 vma->vm_ops, vma->vm_file,
 654		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 655
 656	new = mpol_dup(pol);
 657	if (IS_ERR(new))
 658		return PTR_ERR(new);
 659
 660	if (vma->vm_ops && vma->vm_ops->set_policy) {
 661		err = vma->vm_ops->set_policy(vma, new);
 662		if (err)
 663			goto err_out;
 664	}
 665
 666	old = vma->vm_policy;
 667	vma->vm_policy = new; /* protected by mmap_sem */
 668	mpol_put(old);
 669
 670	return 0;
 671 err_out:
 672	mpol_put(new);
 673	return err;
 674}
 675
 676/* Step 2: apply policy to a range and do splits. */
 677static int mbind_range(struct mm_struct *mm, unsigned long start,
 678		       unsigned long end, struct mempolicy *new_pol)
 679{
 680	struct vm_area_struct *next;
 681	struct vm_area_struct *prev;
 682	struct vm_area_struct *vma;
 683	int err = 0;
 684	pgoff_t pgoff;
 685	unsigned long vmstart;
 686	unsigned long vmend;
 687
 688	vma = find_vma(mm, start);
 689	if (!vma || vma->vm_start > start)
 690		return -EFAULT;
 
 
 
 
 691
 692	prev = vma->vm_prev;
 693	if (start > vma->vm_start)
 694		prev = vma;
 695
 696	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 697		next = vma->vm_next;
 698		vmstart = max(start, vma->vm_start);
 699		vmend   = min(end, vma->vm_end);
 700
 701		if (mpol_equal(vma_policy(vma), new_pol))
 702			continue;
 703
 704		pgoff = vma->vm_pgoff +
 705			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 706		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 707				 vma->anon_vma, vma->vm_file, pgoff,
 708				 new_pol, vma->vm_userfaultfd_ctx);
 
 709		if (prev) {
 
 
 710			vma = prev;
 711			next = vma->vm_next;
 712			if (mpol_equal(vma_policy(vma), new_pol))
 713				continue;
 714			/* vma_merge() joined vma && vma->next, case 8 */
 715			goto replace;
 716		}
 717		if (vma->vm_start != vmstart) {
 718			err = split_vma(vma->vm_mm, vma, vmstart, 1);
 719			if (err)
 720				goto out;
 
 
 721		}
 722		if (vma->vm_end != vmend) {
 723			err = split_vma(vma->vm_mm, vma, vmend, 0);
 724			if (err)
 725				goto out;
 
 
 726		}
 727 replace:
 728		err = vma_replace_policy(vma, new_pol);
 729		if (err)
 730			goto out;
 
 
 731	}
 732
 733 out:
 734	return err;
 735}
 736
 737/* Set the process memory policy */
 738static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 739			     nodemask_t *nodes)
 740{
 741	struct mempolicy *new, *old;
 742	NODEMASK_SCRATCH(scratch);
 743	int ret;
 744
 745	if (!scratch)
 746		return -ENOMEM;
 747
 748	new = mpol_new(mode, flags, nodes);
 749	if (IS_ERR(new)) {
 750		ret = PTR_ERR(new);
 751		goto out;
 752	}
 753
 754	task_lock(current);
 755	ret = mpol_set_nodemask(new, nodes, scratch);
 756	if (ret) {
 757		task_unlock(current);
 758		mpol_put(new);
 759		goto out;
 760	}
 
 761	old = current->mempolicy;
 762	current->mempolicy = new;
 763	if (new && new->mode == MPOL_INTERLEAVE)
 764		current->il_prev = MAX_NUMNODES-1;
 765	task_unlock(current);
 766	mpol_put(old);
 767	ret = 0;
 768out:
 769	NODEMASK_SCRATCH_FREE(scratch);
 770	return ret;
 771}
 772
 773/*
 774 * Return nodemask for policy for get_mempolicy() query
 775 *
 776 * Called with task's alloc_lock held
 777 */
 778static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 779{
 780	nodes_clear(*nodes);
 781	if (p == &default_policy)
 782		return;
 783
 784	switch (p->mode) {
 785	case MPOL_BIND:
 786		/* Fall through */
 787	case MPOL_INTERLEAVE:
 788		*nodes = p->v.nodes;
 789		break;
 790	case MPOL_PREFERRED:
 791		if (!(p->flags & MPOL_F_LOCAL))
 792			node_set(p->v.preferred_node, *nodes);
 793		/* else return empty node mask for local allocation */
 
 
 794		break;
 795	default:
 796		BUG();
 797	}
 798}
 799
 800static int lookup_node(unsigned long addr)
 801{
 802	struct page *p;
 803	int err;
 804
 805	err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
 806	if (err >= 0) {
 807		err = page_to_nid(p);
 808		put_page(p);
 809	}
 810	return err;
 811}
 812
 813/* Retrieve NUMA policy */
 814static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 815			     unsigned long addr, unsigned long flags)
 816{
 817	int err;
 818	struct mm_struct *mm = current->mm;
 819	struct vm_area_struct *vma = NULL;
 820	struct mempolicy *pol = current->mempolicy;
 821
 822	if (flags &
 823		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 824		return -EINVAL;
 825
 826	if (flags & MPOL_F_MEMS_ALLOWED) {
 827		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 828			return -EINVAL;
 829		*policy = 0;	/* just so it's initialized */
 830		task_lock(current);
 831		*nmask  = cpuset_current_mems_allowed;
 832		task_unlock(current);
 833		return 0;
 834	}
 835
 836	if (flags & MPOL_F_ADDR) {
 837		/*
 838		 * Do NOT fall back to task policy if the
 839		 * vma/shared policy at addr is NULL.  We
 840		 * want to return MPOL_DEFAULT in this case.
 841		 */
 842		down_read(&mm->mmap_sem);
 843		vma = find_vma_intersection(mm, addr, addr+1);
 844		if (!vma) {
 845			up_read(&mm->mmap_sem);
 846			return -EFAULT;
 847		}
 848		if (vma->vm_ops && vma->vm_ops->get_policy)
 849			pol = vma->vm_ops->get_policy(vma, addr);
 850		else
 851			pol = vma->vm_policy;
 852	} else if (addr)
 853		return -EINVAL;
 854
 855	if (!pol)
 856		pol = &default_policy;	/* indicates default behavior */
 857
 858	if (flags & MPOL_F_NODE) {
 859		if (flags & MPOL_F_ADDR) {
 860			err = lookup_node(addr);
 
 
 
 
 
 
 
 
 
 861			if (err < 0)
 862				goto out;
 863			*policy = err;
 864		} else if (pol == current->mempolicy &&
 865				pol->mode == MPOL_INTERLEAVE) {
 866			*policy = next_node_in(current->il_prev, pol->v.nodes);
 867		} else {
 868			err = -EINVAL;
 869			goto out;
 870		}
 871	} else {
 872		*policy = pol == &default_policy ? MPOL_DEFAULT :
 873						pol->mode;
 874		/*
 875		 * Internal mempolicy flags must be masked off before exposing
 876		 * the policy to userspace.
 877		 */
 878		*policy |= (pol->flags & MPOL_MODE_FLAGS);
 879	}
 880
 881	err = 0;
 882	if (nmask) {
 883		if (mpol_store_user_nodemask(pol)) {
 884			*nmask = pol->w.user_nodemask;
 885		} else {
 886			task_lock(current);
 887			get_policy_nodemask(pol, nmask);
 888			task_unlock(current);
 889		}
 890	}
 891
 892 out:
 893	mpol_cond_put(pol);
 894	if (vma)
 895		up_read(&current->mm->mmap_sem);
 
 
 896	return err;
 897}
 898
 899#ifdef CONFIG_MIGRATION
 900/*
 901 * page migration, thp tail pages can be passed.
 902 */
 903static void migrate_page_add(struct page *page, struct list_head *pagelist,
 904				unsigned long flags)
 905{
 906	struct page *head = compound_head(page);
 907	/*
 908	 * Avoid migrating a page that is shared with others.
 909	 */
 910	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
 911		if (!isolate_lru_page(head)) {
 912			list_add_tail(&head->lru, pagelist);
 913			mod_node_page_state(page_pgdat(head),
 914				NR_ISOLATED_ANON + page_is_file_cache(head),
 915				hpage_nr_pages(head));
 
 
 
 
 
 
 
 
 
 916		}
 917	}
 918}
 919
 920/* page allocation callback for NUMA node migration */
 921struct page *alloc_new_node_page(struct page *page, unsigned long node)
 922{
 923	if (PageHuge(page))
 924		return alloc_huge_page_node(page_hstate(compound_head(page)),
 925					node);
 926	else if (PageTransHuge(page)) {
 927		struct page *thp;
 928
 929		thp = alloc_pages_node(node,
 930			(GFP_TRANSHUGE | __GFP_THISNODE),
 931			HPAGE_PMD_ORDER);
 932		if (!thp)
 933			return NULL;
 934		prep_transhuge_page(thp);
 935		return thp;
 936	} else
 937		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
 938						    __GFP_THISNODE, 0);
 939}
 940
 941/*
 942 * Migrate pages from one node to a target node.
 943 * Returns error or the number of pages not migrated.
 944 */
 945static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 946			   int flags)
 947{
 948	nodemask_t nmask;
 
 949	LIST_HEAD(pagelist);
 950	int err = 0;
 
 
 
 
 951
 952	nodes_clear(nmask);
 953	node_set(source, nmask);
 954
 955	/*
 956	 * This does not "check" the range but isolates all pages that
 957	 * need migration.  Between passing in the full user address
 958	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
 959	 */
 
 960	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
 961	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
 962			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 963
 964	if (!list_empty(&pagelist)) {
 965		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
 966					MIGRATE_SYNC, MR_SYSCALL);
 967		if (err)
 968			putback_movable_pages(&pagelist);
 969	}
 970
 971	return err;
 972}
 973
 974/*
 975 * Move pages between the two nodesets so as to preserve the physical
 976 * layout as much as possible.
 977 *
 978 * Returns the number of page that could not be moved.
 979 */
 980int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 981		     const nodemask_t *to, int flags)
 982{
 983	int busy = 0;
 984	int err;
 985	nodemask_t tmp;
 986
 987	err = migrate_prep();
 988	if (err)
 989		return err;
 990
 991	down_read(&mm->mmap_sem);
 992
 993	/*
 994	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
 995	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
 996	 * bit in 'tmp', and return that <source, dest> pair for migration.
 997	 * The pair of nodemasks 'to' and 'from' define the map.
 998	 *
 999	 * If no pair of bits is found that way, fallback to picking some
1000	 * pair of 'source' and 'dest' bits that are not the same.  If the
1001	 * 'source' and 'dest' bits are the same, this represents a node
1002	 * that will be migrating to itself, so no pages need move.
1003	 *
1004	 * If no bits are left in 'tmp', or if all remaining bits left
1005	 * in 'tmp' correspond to the same bit in 'to', return false
1006	 * (nothing left to migrate).
1007	 *
1008	 * This lets us pick a pair of nodes to migrate between, such that
1009	 * if possible the dest node is not already occupied by some other
1010	 * source node, minimizing the risk of overloading the memory on a
1011	 * node that would happen if we migrated incoming memory to a node
1012	 * before migrating outgoing memory source that same node.
1013	 *
1014	 * A single scan of tmp is sufficient.  As we go, we remember the
1015	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1016	 * that not only moved, but what's better, moved to an empty slot
1017	 * (d is not set in tmp), then we break out then, with that pair.
1018	 * Otherwise when we finish scanning from_tmp, we at least have the
1019	 * most recent <s, d> pair that moved.  If we get all the way through
1020	 * the scan of tmp without finding any node that moved, much less
1021	 * moved to an empty node, then there is nothing left worth migrating.
1022	 */
1023
1024	tmp = *from;
1025	while (!nodes_empty(tmp)) {
1026		int s,d;
1027		int source = NUMA_NO_NODE;
1028		int dest = 0;
1029
1030		for_each_node_mask(s, tmp) {
1031
1032			/*
1033			 * do_migrate_pages() tries to maintain the relative
1034			 * node relationship of the pages established between
1035			 * threads and memory areas.
1036                         *
1037			 * However if the number of source nodes is not equal to
1038			 * the number of destination nodes we can not preserve
1039			 * this node relative relationship.  In that case, skip
1040			 * copying memory from a node that is in the destination
1041			 * mask.
1042			 *
1043			 * Example: [2,3,4] -> [3,4,5] moves everything.
1044			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1045			 */
1046
1047			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1048						(node_isset(s, *to)))
1049				continue;
1050
1051			d = node_remap(s, *from, *to);
1052			if (s == d)
1053				continue;
1054
1055			source = s;	/* Node moved. Memorize */
1056			dest = d;
1057
1058			/* dest not in remaining from nodes? */
1059			if (!node_isset(dest, tmp))
1060				break;
1061		}
1062		if (source == NUMA_NO_NODE)
1063			break;
1064
1065		node_clear(source, tmp);
1066		err = migrate_to_node(mm, source, dest, flags);
1067		if (err > 0)
1068			busy += err;
1069		if (err < 0)
1070			break;
1071	}
1072	up_read(&mm->mmap_sem);
 
 
1073	if (err < 0)
1074		return err;
1075	return busy;
1076
1077}
1078
1079/*
1080 * Allocate a new page for page migration based on vma policy.
1081 * Start by assuming the page is mapped by the same vma as contains @start.
1082 * Search forward from there, if not.  N.B., this assumes that the
1083 * list of pages handed to migrate_pages()--which is how we get here--
1084 * is in virtual address order.
1085 */
1086static struct page *new_page(struct page *page, unsigned long start)
1087{
 
1088	struct vm_area_struct *vma;
1089	unsigned long uninitialized_var(address);
 
 
1090
1091	vma = find_vma(current->mm, start);
1092	while (vma) {
1093		address = page_address_in_vma(page, vma);
1094		if (address != -EFAULT)
1095			break;
1096		vma = vma->vm_next;
1097	}
1098
1099	if (PageHuge(page)) {
1100		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1101				vma, address);
1102	} else if (PageTransHuge(page)) {
1103		struct page *thp;
1104
1105		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1106					 HPAGE_PMD_ORDER);
1107		if (!thp)
1108			return NULL;
1109		prep_transhuge_page(thp);
1110		return thp;
1111	}
1112	/*
1113	 * if !vma, alloc_page_vma() will use task or system default policy
1114	 */
1115	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1116			vma, address);
 
1117}
1118#else
1119
1120static void migrate_page_add(struct page *page, struct list_head *pagelist,
1121				unsigned long flags)
1122{
 
1123}
1124
1125int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1126		     const nodemask_t *to, int flags)
1127{
1128	return -ENOSYS;
1129}
1130
1131static struct page *new_page(struct page *page, unsigned long start)
1132{
1133	return NULL;
1134}
1135#endif
1136
1137static long do_mbind(unsigned long start, unsigned long len,
1138		     unsigned short mode, unsigned short mode_flags,
1139		     nodemask_t *nmask, unsigned long flags)
1140{
1141	struct mm_struct *mm = current->mm;
1142	struct mempolicy *new;
1143	unsigned long end;
1144	int err;
 
1145	LIST_HEAD(pagelist);
1146
1147	if (flags & ~(unsigned long)MPOL_MF_VALID)
1148		return -EINVAL;
1149	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1150		return -EPERM;
1151
1152	if (start & ~PAGE_MASK)
1153		return -EINVAL;
1154
1155	if (mode == MPOL_DEFAULT)
1156		flags &= ~MPOL_MF_STRICT;
1157
1158	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1159	end = start + len;
1160
1161	if (end < start)
1162		return -EINVAL;
1163	if (end == start)
1164		return 0;
1165
1166	new = mpol_new(mode, mode_flags, nmask);
1167	if (IS_ERR(new))
1168		return PTR_ERR(new);
1169
1170	if (flags & MPOL_MF_LAZY)
1171		new->flags |= MPOL_F_MOF;
1172
1173	/*
1174	 * If we are using the default policy then operation
1175	 * on discontinuous address spaces is okay after all
1176	 */
1177	if (!new)
1178		flags |= MPOL_MF_DISCONTIG_OK;
1179
1180	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1181		 start, start + len, mode, mode_flags,
1182		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1183
1184	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1185
1186		err = migrate_prep();
1187		if (err)
1188			goto mpol_out;
1189	}
1190	{
1191		NODEMASK_SCRATCH(scratch);
1192		if (scratch) {
1193			down_write(&mm->mmap_sem);
1194			task_lock(current);
1195			err = mpol_set_nodemask(new, nmask, scratch);
1196			task_unlock(current);
1197			if (err)
1198				up_write(&mm->mmap_sem);
1199		} else
1200			err = -ENOMEM;
1201		NODEMASK_SCRATCH_FREE(scratch);
1202	}
1203	if (err)
1204		goto mpol_out;
1205
1206	err = queue_pages_range(mm, start, end, nmask,
1207			  flags | MPOL_MF_INVERT, &pagelist);
1208	if (!err)
1209		err = mbind_range(mm, start, end, new);
 
 
 
 
 
1210
1211	if (!err) {
1212		int nr_failed = 0;
1213
1214		if (!list_empty(&pagelist)) {
1215			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1216			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1217				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1218			if (nr_failed)
1219				putback_movable_pages(&pagelist);
1220		}
1221
1222		if (nr_failed && (flags & MPOL_MF_STRICT))
1223			err = -EIO;
1224	} else
1225		putback_movable_pages(&pagelist);
 
 
 
1226
1227	up_write(&mm->mmap_sem);
1228 mpol_out:
1229	mpol_put(new);
 
 
1230	return err;
1231}
1232
1233/*
1234 * User space interface with variable sized bitmaps for nodelists.
1235 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1236
1237/* Copy a node mask from user space. */
1238static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1239		     unsigned long maxnode)
1240{
1241	unsigned long k;
1242	unsigned long t;
1243	unsigned long nlongs;
1244	unsigned long endmask;
1245
1246	--maxnode;
1247	nodes_clear(*nodes);
1248	if (maxnode == 0 || !nmask)
1249		return 0;
1250	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1251		return -EINVAL;
1252
1253	nlongs = BITS_TO_LONGS(maxnode);
1254	if ((maxnode % BITS_PER_LONG) == 0)
1255		endmask = ~0UL;
1256	else
1257		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1258
1259	/*
1260	 * When the user specified more nodes than supported just check
1261	 * if the non supported part is all zero.
1262	 *
1263	 * If maxnode have more longs than MAX_NUMNODES, check
1264	 * the bits in that area first. And then go through to
1265	 * check the rest bits which equal or bigger than MAX_NUMNODES.
1266	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1267	 */
1268	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1269		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1270			if (get_user(t, nmask + k))
1271				return -EFAULT;
1272			if (k == nlongs - 1) {
1273				if (t & endmask)
1274					return -EINVAL;
1275			} else if (t)
1276				return -EINVAL;
1277		}
1278		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1279		endmask = ~0UL;
1280	}
1281
1282	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1283		unsigned long valid_mask = endmask;
1284
1285		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1286		if (get_user(t, nmask + nlongs - 1))
1287			return -EFAULT;
1288		if (t & valid_mask)
 
 
 
 
 
 
 
1289			return -EINVAL;
1290	}
1291
1292	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1293		return -EFAULT;
1294	nodes_addr(*nodes)[nlongs-1] &= endmask;
1295	return 0;
1296}
1297
1298/* Copy a kernel node mask to user space */
1299static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1300			      nodemask_t *nodes)
1301{
1302	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1303	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
 
 
 
 
1304
1305	if (copy > nbytes) {
1306		if (copy > PAGE_SIZE)
1307			return -EINVAL;
1308		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1309			return -EFAULT;
1310		copy = nbytes;
 
1311	}
 
 
 
 
 
1312	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1313}
1314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1315static long kernel_mbind(unsigned long start, unsigned long len,
1316			 unsigned long mode, const unsigned long __user *nmask,
1317			 unsigned long maxnode, unsigned int flags)
1318{
 
1319	nodemask_t nodes;
 
1320	int err;
1321	unsigned short mode_flags;
1322
1323	mode_flags = mode & MPOL_MODE_FLAGS;
1324	mode &= ~MPOL_MODE_FLAGS;
1325	if (mode >= MPOL_MAX)
1326		return -EINVAL;
1327	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1328	    (mode_flags & MPOL_F_RELATIVE_NODES))
1329		return -EINVAL;
1330	err = get_nodes(&nodes, nmask, maxnode);
1331	if (err)
1332		return err;
1333	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334}
1335
1336SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1337		unsigned long, mode, const unsigned long __user *, nmask,
1338		unsigned long, maxnode, unsigned int, flags)
1339{
1340	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1341}
1342
1343/* Set the process memory policy */
1344static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1345				 unsigned long maxnode)
1346{
1347	int err;
1348	nodemask_t nodes;
1349	unsigned short flags;
 
 
 
 
 
1350
1351	flags = mode & MPOL_MODE_FLAGS;
1352	mode &= ~MPOL_MODE_FLAGS;
1353	if ((unsigned int)mode >= MPOL_MAX)
1354		return -EINVAL;
1355	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1356		return -EINVAL;
1357	err = get_nodes(&nodes, nmask, maxnode);
1358	if (err)
1359		return err;
1360	return do_set_mempolicy(mode, flags, &nodes);
 
1361}
1362
1363SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1364		unsigned long, maxnode)
1365{
1366	return kernel_set_mempolicy(mode, nmask, maxnode);
1367}
1368
1369static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1370				const unsigned long __user *old_nodes,
1371				const unsigned long __user *new_nodes)
1372{
1373	struct mm_struct *mm = NULL;
1374	struct task_struct *task;
1375	nodemask_t task_nodes;
1376	int err;
1377	nodemask_t *old;
1378	nodemask_t *new;
1379	NODEMASK_SCRATCH(scratch);
1380
1381	if (!scratch)
1382		return -ENOMEM;
1383
1384	old = &scratch->mask1;
1385	new = &scratch->mask2;
1386
1387	err = get_nodes(old, old_nodes, maxnode);
1388	if (err)
1389		goto out;
1390
1391	err = get_nodes(new, new_nodes, maxnode);
1392	if (err)
1393		goto out;
1394
1395	/* Find the mm_struct */
1396	rcu_read_lock();
1397	task = pid ? find_task_by_vpid(pid) : current;
1398	if (!task) {
1399		rcu_read_unlock();
1400		err = -ESRCH;
1401		goto out;
1402	}
1403	get_task_struct(task);
1404
1405	err = -EINVAL;
1406
1407	/*
1408	 * Check if this process has the right to modify the specified process.
1409	 * Use the regular "ptrace_may_access()" checks.
1410	 */
1411	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1412		rcu_read_unlock();
1413		err = -EPERM;
1414		goto out_put;
1415	}
1416	rcu_read_unlock();
1417
1418	task_nodes = cpuset_mems_allowed(task);
1419	/* Is the user allowed to access the target nodes? */
1420	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1421		err = -EPERM;
1422		goto out_put;
1423	}
1424
1425	task_nodes = cpuset_mems_allowed(current);
1426	nodes_and(*new, *new, task_nodes);
1427	if (nodes_empty(*new))
1428		goto out_put;
1429
1430	nodes_and(*new, *new, node_states[N_MEMORY]);
1431	if (nodes_empty(*new))
1432		goto out_put;
1433
1434	err = security_task_movememory(task);
1435	if (err)
1436		goto out_put;
1437
1438	mm = get_task_mm(task);
1439	put_task_struct(task);
1440
1441	if (!mm) {
1442		err = -EINVAL;
1443		goto out;
1444	}
1445
1446	err = do_migrate_pages(mm, old, new,
1447		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1448
1449	mmput(mm);
1450out:
1451	NODEMASK_SCRATCH_FREE(scratch);
1452
1453	return err;
1454
1455out_put:
1456	put_task_struct(task);
1457	goto out;
1458
1459}
1460
1461SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1462		const unsigned long __user *, old_nodes,
1463		const unsigned long __user *, new_nodes)
1464{
1465	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1466}
1467
1468
1469/* Retrieve NUMA policy */
1470static int kernel_get_mempolicy(int __user *policy,
1471				unsigned long __user *nmask,
1472				unsigned long maxnode,
1473				unsigned long addr,
1474				unsigned long flags)
1475{
1476	int err;
1477	int uninitialized_var(pval);
1478	nodemask_t nodes;
1479
1480	if (nmask != NULL && maxnode < MAX_NUMNODES)
1481		return -EINVAL;
1482
 
 
1483	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1484
1485	if (err)
1486		return err;
1487
1488	if (policy && put_user(pval, policy))
1489		return -EFAULT;
1490
1491	if (nmask)
1492		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1493
1494	return err;
1495}
1496
1497SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1498		unsigned long __user *, nmask, unsigned long, maxnode,
1499		unsigned long, addr, unsigned long, flags)
1500{
1501	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1502}
1503
1504#ifdef CONFIG_COMPAT
1505
1506COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1507		       compat_ulong_t __user *, nmask,
1508		       compat_ulong_t, maxnode,
1509		       compat_ulong_t, addr, compat_ulong_t, flags)
1510{
1511	long err;
1512	unsigned long __user *nm = NULL;
1513	unsigned long nr_bits, alloc_size;
1514	DECLARE_BITMAP(bm, MAX_NUMNODES);
1515
1516	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1517	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1518
1519	if (nmask)
1520		nm = compat_alloc_user_space(alloc_size);
1521
1522	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1523
1524	if (!err && nmask) {
1525		unsigned long copy_size;
1526		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1527		err = copy_from_user(bm, nm, copy_size);
1528		/* ensure entire bitmap is zeroed */
1529		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1530		err |= compat_put_bitmap(nmask, bm, nr_bits);
1531	}
1532
1533	return err;
1534}
1535
1536COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1537		       compat_ulong_t, maxnode)
1538{
1539	unsigned long __user *nm = NULL;
1540	unsigned long nr_bits, alloc_size;
1541	DECLARE_BITMAP(bm, MAX_NUMNODES);
1542
1543	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1544	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1545
1546	if (nmask) {
1547		if (compat_get_bitmap(bm, nmask, nr_bits))
1548			return -EFAULT;
1549		nm = compat_alloc_user_space(alloc_size);
1550		if (copy_to_user(nm, bm, alloc_size))
1551			return -EFAULT;
1552	}
1553
1554	return kernel_set_mempolicy(mode, nm, nr_bits+1);
1555}
1556
1557COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1558		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1559		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1560{
1561	unsigned long __user *nm = NULL;
1562	unsigned long nr_bits, alloc_size;
1563	nodemask_t bm;
1564
1565	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1566	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1567
1568	if (nmask) {
1569		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1570			return -EFAULT;
1571		nm = compat_alloc_user_space(alloc_size);
1572		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1573			return -EFAULT;
1574	}
1575
1576	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1577}
 
1578
1579COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1580		       compat_ulong_t, maxnode,
1581		       const compat_ulong_t __user *, old_nodes,
1582		       const compat_ulong_t __user *, new_nodes)
1583{
1584	unsigned long __user *old = NULL;
1585	unsigned long __user *new = NULL;
1586	nodemask_t tmp_mask;
1587	unsigned long nr_bits;
1588	unsigned long size;
1589
1590	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1591	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1592	if (old_nodes) {
1593		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1594			return -EFAULT;
1595		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1596		if (new_nodes)
1597			new = old + size / sizeof(unsigned long);
1598		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1599			return -EFAULT;
1600	}
1601	if (new_nodes) {
1602		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1603			return -EFAULT;
1604		if (new == NULL)
1605			new = compat_alloc_user_space(size);
1606		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1607			return -EFAULT;
1608	}
1609	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1610}
1611
1612#endif /* CONFIG_COMPAT */
1613
1614struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1615						unsigned long addr)
1616{
1617	struct mempolicy *pol = NULL;
1618
1619	if (vma) {
1620		if (vma->vm_ops && vma->vm_ops->get_policy) {
1621			pol = vma->vm_ops->get_policy(vma, addr);
1622		} else if (vma->vm_policy) {
1623			pol = vma->vm_policy;
1624
1625			/*
1626			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1627			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1628			 * count on these policies which will be dropped by
1629			 * mpol_cond_put() later
1630			 */
1631			if (mpol_needs_cond_ref(pol))
1632				mpol_get(pol);
1633		}
1634	}
1635
1636	return pol;
1637}
1638
1639/*
1640 * get_vma_policy(@vma, @addr)
1641 * @vma: virtual memory area whose policy is sought
1642 * @addr: address in @vma for shared policy lookup
1643 *
1644 * Returns effective policy for a VMA at specified address.
1645 * Falls back to current->mempolicy or system default policy, as necessary.
1646 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1647 * count--added by the get_policy() vm_op, as appropriate--to protect against
1648 * freeing by another task.  It is the caller's responsibility to free the
1649 * extra reference for shared policies.
1650 */
1651static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1652						unsigned long addr)
1653{
1654	struct mempolicy *pol = __get_vma_policy(vma, addr);
1655
1656	if (!pol)
1657		pol = get_task_policy(current);
1658
1659	return pol;
1660}
1661
1662bool vma_policy_mof(struct vm_area_struct *vma)
1663{
1664	struct mempolicy *pol;
1665
1666	if (vma->vm_ops && vma->vm_ops->get_policy) {
1667		bool ret = false;
1668
1669		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1670		if (pol && (pol->flags & MPOL_F_MOF))
1671			ret = true;
1672		mpol_cond_put(pol);
1673
1674		return ret;
1675	}
1676
1677	pol = vma->vm_policy;
1678	if (!pol)
1679		pol = get_task_policy(current);
1680
1681	return pol->flags & MPOL_F_MOF;
1682}
1683
1684static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1685{
1686	enum zone_type dynamic_policy_zone = policy_zone;
1687
1688	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1689
1690	/*
1691	 * if policy->v.nodes has movable memory only,
1692	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1693	 *
1694	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1695	 * so if the following test faile, it implies
1696	 * policy->v.nodes has movable memory only.
1697	 */
1698	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1699		dynamic_policy_zone = ZONE_MOVABLE;
1700
1701	return zone >= dynamic_policy_zone;
1702}
1703
1704/*
1705 * Return a nodemask representing a mempolicy for filtering nodes for
1706 * page allocation
1707 */
1708static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1709{
 
 
1710	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1711	if (unlikely(policy->mode == MPOL_BIND) &&
1712			apply_policy_zone(policy, gfp_zone(gfp)) &&
1713			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1714		return &policy->v.nodes;
 
 
 
1715
1716	return NULL;
1717}
1718
1719/* Return the node id preferred by the given mempolicy, or the given id */
1720static int policy_node(gfp_t gfp, struct mempolicy *policy,
1721								int nd)
 
 
 
 
 
1722{
1723	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1724		nd = policy->v.preferred_node;
1725	else {
1726		/*
1727		 * __GFP_THISNODE shouldn't even be used with the bind policy
1728		 * because we might easily break the expectation to stay on the
1729		 * requested node and not break the policy.
1730		 */
1731		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1732	}
1733
 
 
 
 
 
1734	return nd;
1735}
1736
1737/* Do dynamic interleaving for a process */
1738static unsigned interleave_nodes(struct mempolicy *policy)
1739{
1740	unsigned next;
1741	struct task_struct *me = current;
1742
1743	next = next_node_in(me->il_prev, policy->v.nodes);
1744	if (next < MAX_NUMNODES)
1745		me->il_prev = next;
1746	return next;
1747}
1748
1749/*
1750 * Depending on the memory policy provide a node from which to allocate the
1751 * next slab entry.
1752 */
1753unsigned int mempolicy_slab_node(void)
1754{
1755	struct mempolicy *policy;
1756	int node = numa_mem_id();
1757
1758	if (in_interrupt())
1759		return node;
1760
1761	policy = current->mempolicy;
1762	if (!policy || policy->flags & MPOL_F_LOCAL)
1763		return node;
1764
1765	switch (policy->mode) {
1766	case MPOL_PREFERRED:
1767		/*
1768		 * handled MPOL_F_LOCAL above
1769		 */
1770		return policy->v.preferred_node;
1771
1772	case MPOL_INTERLEAVE:
1773		return interleave_nodes(policy);
1774
1775	case MPOL_BIND: {
 
 
1776		struct zoneref *z;
1777
1778		/*
1779		 * Follow bind policy behavior and start allocation at the
1780		 * first node.
1781		 */
1782		struct zonelist *zonelist;
1783		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1784		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1785		z = first_zones_zonelist(zonelist, highest_zoneidx,
1786							&policy->v.nodes);
1787		return z->zone ? z->zone->node : node;
1788	}
 
 
1789
1790	default:
1791		BUG();
1792	}
1793}
1794
1795/*
1796 * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1797 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1798 * number of present nodes.
1799 */
1800static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1801{
1802	unsigned nnodes = nodes_weight(pol->v.nodes);
1803	unsigned target;
1804	int i;
1805	int nid;
 
 
 
 
 
 
 
 
1806
 
1807	if (!nnodes)
1808		return numa_node_id();
1809	target = (unsigned int)n % nnodes;
1810	nid = first_node(pol->v.nodes);
1811	for (i = 0; i < target; i++)
1812		nid = next_node(nid, pol->v.nodes);
1813	return nid;
1814}
1815
1816/* Determine a node number for interleave */
1817static inline unsigned interleave_nid(struct mempolicy *pol,
1818		 struct vm_area_struct *vma, unsigned long addr, int shift)
1819{
1820	if (vma) {
1821		unsigned long off;
1822
1823		/*
1824		 * for small pages, there is no difference between
1825		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1826		 * for huge pages, since vm_pgoff is in units of small
1827		 * pages, we need to shift off the always 0 bits to get
1828		 * a useful offset.
1829		 */
1830		BUG_ON(shift < PAGE_SHIFT);
1831		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1832		off += (addr - vma->vm_start) >> shift;
1833		return offset_il_node(pol, off);
1834	} else
1835		return interleave_nodes(pol);
1836}
1837
1838#ifdef CONFIG_HUGETLBFS
1839/*
1840 * huge_node(@vma, @addr, @gfp_flags, @mpol)
1841 * @vma: virtual memory area whose policy is sought
1842 * @addr: address in @vma for shared policy lookup and interleave policy
1843 * @gfp_flags: for requested zone
1844 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1845 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1846 *
1847 * Returns a nid suitable for a huge page allocation and a pointer
1848 * to the struct mempolicy for conditional unref after allocation.
1849 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1850 * @nodemask for filtering the zonelist.
1851 *
1852 * Must be protected by read_mems_allowed_begin()
1853 */
1854int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1855				struct mempolicy **mpol, nodemask_t **nodemask)
1856{
1857	int nid;
 
1858
1859	*mpol = get_vma_policy(vma, addr);
1860	*nodemask = NULL;	/* assume !MPOL_BIND */
 
1861
1862	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1863		nid = interleave_nid(*mpol, vma, addr,
1864					huge_page_shift(hstate_vma(vma)));
1865	} else {
1866		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1867		if ((*mpol)->mode == MPOL_BIND)
1868			*nodemask = &(*mpol)->v.nodes;
1869	}
1870	return nid;
1871}
1872
1873/*
1874 * init_nodemask_of_mempolicy
1875 *
1876 * If the current task's mempolicy is "default" [NULL], return 'false'
1877 * to indicate default policy.  Otherwise, extract the policy nodemask
1878 * for 'bind' or 'interleave' policy into the argument nodemask, or
1879 * initialize the argument nodemask to contain the single node for
1880 * 'preferred' or 'local' policy and return 'true' to indicate presence
1881 * of non-default mempolicy.
1882 *
1883 * We don't bother with reference counting the mempolicy [mpol_get/put]
1884 * because the current task is examining it's own mempolicy and a task's
1885 * mempolicy is only ever changed by the task itself.
1886 *
1887 * N.B., it is the caller's responsibility to free a returned nodemask.
1888 */
1889bool init_nodemask_of_mempolicy(nodemask_t *mask)
1890{
1891	struct mempolicy *mempolicy;
1892	int nid;
1893
1894	if (!(mask && current->mempolicy))
1895		return false;
1896
1897	task_lock(current);
1898	mempolicy = current->mempolicy;
1899	switch (mempolicy->mode) {
1900	case MPOL_PREFERRED:
1901		if (mempolicy->flags & MPOL_F_LOCAL)
1902			nid = numa_node_id();
1903		else
1904			nid = mempolicy->v.preferred_node;
1905		init_nodemask_of_node(mask, nid);
1906		break;
1907
1908	case MPOL_BIND:
1909		/* Fall through */
1910	case MPOL_INTERLEAVE:
1911		*mask =  mempolicy->v.nodes;
 
 
 
 
1912		break;
1913
1914	default:
1915		BUG();
1916	}
1917	task_unlock(current);
1918
1919	return true;
1920}
1921#endif
1922
1923/*
1924 * mempolicy_nodemask_intersects
1925 *
1926 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1927 * policy.  Otherwise, check for intersection between mask and the policy
1928 * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1929 * policy, always return true since it may allocate elsewhere on fallback.
1930 *
1931 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1932 */
1933bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1934					const nodemask_t *mask)
1935{
1936	struct mempolicy *mempolicy;
1937	bool ret = true;
1938
1939	if (!mask)
1940		return ret;
 
1941	task_lock(tsk);
1942	mempolicy = tsk->mempolicy;
1943	if (!mempolicy)
1944		goto out;
1945
1946	switch (mempolicy->mode) {
1947	case MPOL_PREFERRED:
1948		/*
1949		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1950		 * allocate from, they may fallback to other nodes when oom.
1951		 * Thus, it's possible for tsk to have allocated memory from
1952		 * nodes in mask.
1953		 */
1954		break;
1955	case MPOL_BIND:
1956	case MPOL_INTERLEAVE:
1957		ret = nodes_intersects(mempolicy->v.nodes, *mask);
1958		break;
1959	default:
1960		BUG();
1961	}
1962out:
1963	task_unlock(tsk);
 
1964	return ret;
1965}
1966
1967/* Allocate a page in interleaved policy.
1968   Own path because it needs to do special accounting. */
1969static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1970					unsigned nid)
1971{
1972	struct page *page;
1973
1974	page = __alloc_pages(gfp, order, nid);
1975	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
1976	if (!static_branch_likely(&vm_numa_stat_key))
1977		return page;
1978	if (page && page_to_nid(page) == nid) {
1979		preempt_disable();
1980		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1981		preempt_enable();
1982	}
1983	return page;
1984}
1985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1986/**
1987 * 	alloc_pages_vma	- Allocate a page for a VMA.
 
 
 
 
 
 
 
 
 
 
1988 *
1989 * 	@gfp:
1990 *      %GFP_USER    user allocation.
1991 *      %GFP_KERNEL  kernel allocations,
1992 *      %GFP_HIGHMEM highmem/user allocations,
1993 *      %GFP_FS      allocation should not call back into a file system.
1994 *      %GFP_ATOMIC  don't sleep.
1995 *
1996 *	@order:Order of the GFP allocation.
1997 * 	@vma:  Pointer to VMA or NULL if not available.
1998 *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1999 *	@node: Which node to prefer for allocation (modulo policy).
2000 *	@hugepage: for hugepages try only the preferred node if possible
2001 *
2002 * 	This function allocates a page from the kernel page pool and applies
2003 *	a NUMA policy associated with the VMA or the current process.
2004 *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
2005 *	mm_struct of the VMA to prevent it from going away. Should be used for
2006 *	all allocations for pages that will be mapped into user space. Returns
2007 *	NULL when no page can be allocated.
2008 */
2009struct page *
2010alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2011		unsigned long addr, int node, bool hugepage)
2012{
2013	struct mempolicy *pol;
2014	struct page *page;
 
2015	int preferred_nid;
2016	nodemask_t *nmask;
2017
2018	pol = get_vma_policy(vma, addr);
2019
2020	if (pol->mode == MPOL_INTERLEAVE) {
 
2021		unsigned nid;
2022
2023		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2024		mpol_cond_put(pol);
 
2025		page = alloc_page_interleave(gfp, order, nid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026		goto out;
2027	}
2028
2029	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2030		int hpage_node = node;
2031
2032		/*
2033		 * For hugepage allocation and non-interleave policy which
2034		 * allows the current node (or other explicitly preferred
2035		 * node) we only try to allocate from the current/preferred
2036		 * node and don't fall back to other nodes, as the cost of
2037		 * remote accesses would likely offset THP benefits.
2038		 *
2039		 * If the policy is interleave, or does not allow the current
2040		 * node in its nodemask, we allocate the standard way.
2041		 */
2042		if (pol->mode == MPOL_PREFERRED &&
2043						!(pol->flags & MPOL_F_LOCAL))
2044			hpage_node = pol->v.preferred_node;
2045
2046		nmask = policy_nodemask(gfp, pol);
2047		if (!nmask || node_isset(hpage_node, *nmask)) {
2048			mpol_cond_put(pol);
2049			page = __alloc_pages_node(hpage_node,
2050						gfp | __GFP_THISNODE, order);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2051			goto out;
2052		}
2053	}
2054
2055	nmask = policy_nodemask(gfp, pol);
2056	preferred_nid = policy_node(gfp, pol, node);
2057	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2058	mpol_cond_put(pol);
2059out:
2060	return page;
2061}
 
2062
2063/**
2064 * 	alloc_pages_current - Allocate pages.
2065 *
2066 *	@gfp:
2067 *		%GFP_USER   user allocation,
2068 *      	%GFP_KERNEL kernel allocation,
2069 *      	%GFP_HIGHMEM highmem allocation,
2070 *      	%GFP_FS     don't call back into a file system.
2071 *      	%GFP_ATOMIC don't sleep.
2072 *	@order: Power of two of allocation size in pages. 0 is a single page.
2073 *
2074 *	Allocate a page from the kernel page pool.  When not in
2075 *	interrupt context and apply the current process NUMA policy.
2076 *	Returns NULL when no page can be allocated.
2077 */
2078struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2079{
2080	struct mempolicy *pol = &default_policy;
2081	struct page *page;
2082
2083	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2084		pol = get_task_policy(current);
2085
2086	/*
2087	 * No reference counting needed for current->mempolicy
2088	 * nor system default_policy
2089	 */
2090	if (pol->mode == MPOL_INTERLEAVE)
2091		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
 
 
 
2092	else
2093		page = __alloc_pages_nodemask(gfp, order,
2094				policy_node(gfp, pol, numa_node_id()),
2095				policy_nodemask(gfp, pol));
2096
2097	return page;
2098}
2099EXPORT_SYMBOL(alloc_pages_current);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2100
2101int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2102{
2103	struct mempolicy *pol = mpol_dup(vma_policy(src));
2104
2105	if (IS_ERR(pol))
2106		return PTR_ERR(pol);
2107	dst->vm_policy = pol;
2108	return 0;
2109}
2110
2111/*
2112 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2113 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2114 * with the mems_allowed returned by cpuset_mems_allowed().  This
2115 * keeps mempolicies cpuset relative after its cpuset moves.  See
2116 * further kernel/cpuset.c update_nodemask().
2117 *
2118 * current's mempolicy may be rebinded by the other task(the task that changes
2119 * cpuset's mems), so we needn't do rebind work for current task.
2120 */
2121
2122/* Slow path of a mempolicy duplicate */
2123struct mempolicy *__mpol_dup(struct mempolicy *old)
2124{
2125	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2126
2127	if (!new)
2128		return ERR_PTR(-ENOMEM);
2129
2130	/* task's mempolicy is protected by alloc_lock */
2131	if (old == current->mempolicy) {
2132		task_lock(current);
2133		*new = *old;
2134		task_unlock(current);
2135	} else
2136		*new = *old;
2137
2138	if (current_cpuset_is_being_rebound()) {
2139		nodemask_t mems = cpuset_mems_allowed(current);
2140		mpol_rebind_policy(new, &mems);
2141	}
2142	atomic_set(&new->refcnt, 1);
2143	return new;
2144}
2145
2146/* Slow path of a mempolicy comparison */
2147bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2148{
2149	if (!a || !b)
2150		return false;
2151	if (a->mode != b->mode)
2152		return false;
2153	if (a->flags != b->flags)
2154		return false;
 
 
2155	if (mpol_store_user_nodemask(a))
2156		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2157			return false;
2158
2159	switch (a->mode) {
2160	case MPOL_BIND:
2161		/* Fall through */
2162	case MPOL_INTERLEAVE:
2163		return !!nodes_equal(a->v.nodes, b->v.nodes);
2164	case MPOL_PREFERRED:
2165		/* a's ->flags is the same as b's */
2166		if (a->flags & MPOL_F_LOCAL)
2167			return true;
2168		return a->v.preferred_node == b->v.preferred_node;
2169	default:
2170		BUG();
2171		return false;
2172	}
2173}
2174
2175/*
2176 * Shared memory backing store policy support.
2177 *
2178 * Remember policies even when nobody has shared memory mapped.
2179 * The policies are kept in Red-Black tree linked from the inode.
2180 * They are protected by the sp->lock rwlock, which should be held
2181 * for any accesses to the tree.
2182 */
2183
2184/*
2185 * lookup first element intersecting start-end.  Caller holds sp->lock for
2186 * reading or for writing
2187 */
2188static struct sp_node *
2189sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2190{
2191	struct rb_node *n = sp->root.rb_node;
2192
2193	while (n) {
2194		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2195
2196		if (start >= p->end)
2197			n = n->rb_right;
2198		else if (end <= p->start)
2199			n = n->rb_left;
2200		else
2201			break;
2202	}
2203	if (!n)
2204		return NULL;
2205	for (;;) {
2206		struct sp_node *w = NULL;
2207		struct rb_node *prev = rb_prev(n);
2208		if (!prev)
2209			break;
2210		w = rb_entry(prev, struct sp_node, nd);
2211		if (w->end <= start)
2212			break;
2213		n = prev;
2214	}
2215	return rb_entry(n, struct sp_node, nd);
2216}
2217
2218/*
2219 * Insert a new shared policy into the list.  Caller holds sp->lock for
2220 * writing.
2221 */
2222static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2223{
2224	struct rb_node **p = &sp->root.rb_node;
2225	struct rb_node *parent = NULL;
2226	struct sp_node *nd;
2227
2228	while (*p) {
2229		parent = *p;
2230		nd = rb_entry(parent, struct sp_node, nd);
2231		if (new->start < nd->start)
2232			p = &(*p)->rb_left;
2233		else if (new->end > nd->end)
2234			p = &(*p)->rb_right;
2235		else
2236			BUG();
2237	}
2238	rb_link_node(&new->nd, parent, p);
2239	rb_insert_color(&new->nd, &sp->root);
2240	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2241		 new->policy ? new->policy->mode : 0);
2242}
2243
2244/* Find shared policy intersecting idx */
2245struct mempolicy *
2246mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2247{
2248	struct mempolicy *pol = NULL;
2249	struct sp_node *sn;
2250
2251	if (!sp->root.rb_node)
2252		return NULL;
2253	read_lock(&sp->lock);
2254	sn = sp_lookup(sp, idx, idx+1);
2255	if (sn) {
2256		mpol_get(sn->policy);
2257		pol = sn->policy;
2258	}
2259	read_unlock(&sp->lock);
2260	return pol;
2261}
2262
2263static void sp_free(struct sp_node *n)
2264{
2265	mpol_put(n->policy);
2266	kmem_cache_free(sn_cache, n);
2267}
2268
2269/**
2270 * mpol_misplaced - check whether current page node is valid in policy
2271 *
2272 * @page: page to be checked
2273 * @vma: vm area where page mapped
2274 * @addr: virtual address where page mapped
2275 *
2276 * Lookup current policy node id for vma,addr and "compare to" page's
2277 * node id.
2278 *
2279 * Returns:
2280 *	-1	- not misplaced, page is in the right node
2281 *	node	- node id where the page should be
2282 *
2283 * Policy determination "mimics" alloc_page_vma().
2284 * Called from fault path where we know the vma and faulting address.
 
 
 
2285 */
2286int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2287{
2288	struct mempolicy *pol;
2289	struct zoneref *z;
2290	int curnid = page_to_nid(page);
2291	unsigned long pgoff;
2292	int thiscpu = raw_smp_processor_id();
2293	int thisnid = cpu_to_node(thiscpu);
2294	int polnid = -1;
2295	int ret = -1;
2296
2297	pol = get_vma_policy(vma, addr);
2298	if (!(pol->flags & MPOL_F_MOF))
2299		goto out;
2300
2301	switch (pol->mode) {
2302	case MPOL_INTERLEAVE:
2303		pgoff = vma->vm_pgoff;
2304		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2305		polnid = offset_il_node(pol, pgoff);
2306		break;
2307
2308	case MPOL_PREFERRED:
2309		if (pol->flags & MPOL_F_LOCAL)
2310			polnid = numa_node_id();
2311		else
2312			polnid = pol->v.preferred_node;
 
 
 
2313		break;
2314
2315	case MPOL_BIND:
 
 
 
 
 
 
 
2316
 
2317		/*
2318		 * allows binding to multiple nodes.
2319		 * use current page if in policy nodemask,
2320		 * else select nearest allowed node, if any.
2321		 * If no allowed nodes, use current [!misplaced].
2322		 */
2323		if (node_isset(curnid, pol->v.nodes))
2324			goto out;
2325		z = first_zones_zonelist(
2326				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2327				gfp_zone(GFP_HIGHUSER),
2328				&pol->v.nodes);
2329		polnid = z->zone->node;
2330		break;
2331
2332	default:
2333		BUG();
2334	}
2335
2336	/* Migrate the page towards the node whose CPU is referencing it */
2337	if (pol->flags & MPOL_F_MORON) {
2338		polnid = thisnid;
2339
2340		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2341			goto out;
2342	}
2343
2344	if (curnid != polnid)
2345		ret = polnid;
2346out:
2347	mpol_cond_put(pol);
2348
2349	return ret;
2350}
2351
2352/*
2353 * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2354 * dropped after task->mempolicy is set to NULL so that any allocation done as
2355 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2356 * policy.
2357 */
2358void mpol_put_task_policy(struct task_struct *task)
2359{
2360	struct mempolicy *pol;
2361
2362	task_lock(task);
2363	pol = task->mempolicy;
2364	task->mempolicy = NULL;
2365	task_unlock(task);
2366	mpol_put(pol);
2367}
2368
2369static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2370{
2371	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2372	rb_erase(&n->nd, &sp->root);
2373	sp_free(n);
2374}
2375
2376static void sp_node_init(struct sp_node *node, unsigned long start,
2377			unsigned long end, struct mempolicy *pol)
2378{
2379	node->start = start;
2380	node->end = end;
2381	node->policy = pol;
2382}
2383
2384static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2385				struct mempolicy *pol)
2386{
2387	struct sp_node *n;
2388	struct mempolicy *newpol;
2389
2390	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2391	if (!n)
2392		return NULL;
2393
2394	newpol = mpol_dup(pol);
2395	if (IS_ERR(newpol)) {
2396		kmem_cache_free(sn_cache, n);
2397		return NULL;
2398	}
2399	newpol->flags |= MPOL_F_SHARED;
2400	sp_node_init(n, start, end, newpol);
2401
2402	return n;
2403}
2404
2405/* Replace a policy range. */
2406static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2407				 unsigned long end, struct sp_node *new)
2408{
2409	struct sp_node *n;
2410	struct sp_node *n_new = NULL;
2411	struct mempolicy *mpol_new = NULL;
2412	int ret = 0;
2413
2414restart:
2415	write_lock(&sp->lock);
2416	n = sp_lookup(sp, start, end);
2417	/* Take care of old policies in the same range. */
2418	while (n && n->start < end) {
2419		struct rb_node *next = rb_next(&n->nd);
2420		if (n->start >= start) {
2421			if (n->end <= end)
2422				sp_delete(sp, n);
2423			else
2424				n->start = end;
2425		} else {
2426			/* Old policy spanning whole new range. */
2427			if (n->end > end) {
2428				if (!n_new)
2429					goto alloc_new;
2430
2431				*mpol_new = *n->policy;
2432				atomic_set(&mpol_new->refcnt, 1);
2433				sp_node_init(n_new, end, n->end, mpol_new);
2434				n->end = start;
2435				sp_insert(sp, n_new);
2436				n_new = NULL;
2437				mpol_new = NULL;
2438				break;
2439			} else
2440				n->end = start;
2441		}
2442		if (!next)
2443			break;
2444		n = rb_entry(next, struct sp_node, nd);
2445	}
2446	if (new)
2447		sp_insert(sp, new);
2448	write_unlock(&sp->lock);
2449	ret = 0;
2450
2451err_out:
2452	if (mpol_new)
2453		mpol_put(mpol_new);
2454	if (n_new)
2455		kmem_cache_free(sn_cache, n_new);
2456
2457	return ret;
2458
2459alloc_new:
2460	write_unlock(&sp->lock);
2461	ret = -ENOMEM;
2462	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2463	if (!n_new)
2464		goto err_out;
2465	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2466	if (!mpol_new)
2467		goto err_out;
 
2468	goto restart;
2469}
2470
2471/**
2472 * mpol_shared_policy_init - initialize shared policy for inode
2473 * @sp: pointer to inode shared policy
2474 * @mpol:  struct mempolicy to install
2475 *
2476 * Install non-NULL @mpol in inode's shared policy rb-tree.
2477 * On entry, the current task has a reference on a non-NULL @mpol.
2478 * This must be released on exit.
2479 * This is called at get_inode() calls and we can use GFP_KERNEL.
2480 */
2481void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2482{
2483	int ret;
2484
2485	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2486	rwlock_init(&sp->lock);
2487
2488	if (mpol) {
2489		struct vm_area_struct pvma;
2490		struct mempolicy *new;
2491		NODEMASK_SCRATCH(scratch);
2492
2493		if (!scratch)
2494			goto put_mpol;
2495		/* contextualize the tmpfs mount point mempolicy */
2496		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2497		if (IS_ERR(new))
2498			goto free_scratch; /* no valid nodemask intersection */
2499
2500		task_lock(current);
2501		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2502		task_unlock(current);
2503		if (ret)
2504			goto put_new;
2505
2506		/* Create pseudo-vma that contains just the policy */
2507		memset(&pvma, 0, sizeof(struct vm_area_struct));
2508		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2509		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2510
2511put_new:
2512		mpol_put(new);			/* drop initial ref */
2513free_scratch:
2514		NODEMASK_SCRATCH_FREE(scratch);
2515put_mpol:
2516		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2517	}
2518}
2519
2520int mpol_set_shared_policy(struct shared_policy *info,
2521			struct vm_area_struct *vma, struct mempolicy *npol)
2522{
2523	int err;
2524	struct sp_node *new = NULL;
2525	unsigned long sz = vma_pages(vma);
2526
2527	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2528		 vma->vm_pgoff,
2529		 sz, npol ? npol->mode : -1,
2530		 npol ? npol->flags : -1,
2531		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2532
2533	if (npol) {
2534		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2535		if (!new)
2536			return -ENOMEM;
2537	}
2538	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2539	if (err && new)
2540		sp_free(new);
2541	return err;
2542}
2543
2544/* Free a backing policy store on inode delete. */
2545void mpol_free_shared_policy(struct shared_policy *p)
2546{
2547	struct sp_node *n;
2548	struct rb_node *next;
2549
2550	if (!p->root.rb_node)
2551		return;
2552	write_lock(&p->lock);
2553	next = rb_first(&p->root);
2554	while (next) {
2555		n = rb_entry(next, struct sp_node, nd);
2556		next = rb_next(&n->nd);
2557		sp_delete(p, n);
2558	}
2559	write_unlock(&p->lock);
2560}
2561
2562#ifdef CONFIG_NUMA_BALANCING
2563static int __initdata numabalancing_override;
2564
2565static void __init check_numabalancing_enable(void)
2566{
2567	bool numabalancing_default = false;
2568
2569	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2570		numabalancing_default = true;
2571
2572	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2573	if (numabalancing_override)
2574		set_numabalancing_state(numabalancing_override == 1);
2575
2576	if (num_online_nodes() > 1 && !numabalancing_override) {
2577		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2578			numabalancing_default ? "Enabling" : "Disabling");
2579		set_numabalancing_state(numabalancing_default);
2580	}
2581}
2582
2583static int __init setup_numabalancing(char *str)
2584{
2585	int ret = 0;
2586	if (!str)
2587		goto out;
2588
2589	if (!strcmp(str, "enable")) {
2590		numabalancing_override = 1;
2591		ret = 1;
2592	} else if (!strcmp(str, "disable")) {
2593		numabalancing_override = -1;
2594		ret = 1;
2595	}
2596out:
2597	if (!ret)
2598		pr_warn("Unable to parse numa_balancing=\n");
2599
2600	return ret;
2601}
2602__setup("numa_balancing=", setup_numabalancing);
2603#else
2604static inline void __init check_numabalancing_enable(void)
2605{
2606}
2607#endif /* CONFIG_NUMA_BALANCING */
2608
2609/* assumes fs == KERNEL_DS */
2610void __init numa_policy_init(void)
2611{
2612	nodemask_t interleave_nodes;
2613	unsigned long largest = 0;
2614	int nid, prefer = 0;
2615
2616	policy_cache = kmem_cache_create("numa_policy",
2617					 sizeof(struct mempolicy),
2618					 0, SLAB_PANIC, NULL);
2619
2620	sn_cache = kmem_cache_create("shared_policy_node",
2621				     sizeof(struct sp_node),
2622				     0, SLAB_PANIC, NULL);
2623
2624	for_each_node(nid) {
2625		preferred_node_policy[nid] = (struct mempolicy) {
2626			.refcnt = ATOMIC_INIT(1),
2627			.mode = MPOL_PREFERRED,
2628			.flags = MPOL_F_MOF | MPOL_F_MORON,
2629			.v = { .preferred_node = nid, },
2630		};
2631	}
2632
2633	/*
2634	 * Set interleaving policy for system init. Interleaving is only
2635	 * enabled across suitably sized nodes (default is >= 16MB), or
2636	 * fall back to the largest node if they're all smaller.
2637	 */
2638	nodes_clear(interleave_nodes);
2639	for_each_node_state(nid, N_MEMORY) {
2640		unsigned long total_pages = node_present_pages(nid);
2641
2642		/* Preserve the largest node */
2643		if (largest < total_pages) {
2644			largest = total_pages;
2645			prefer = nid;
2646		}
2647
2648		/* Interleave this node? */
2649		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2650			node_set(nid, interleave_nodes);
2651	}
2652
2653	/* All too small, use the largest */
2654	if (unlikely(nodes_empty(interleave_nodes)))
2655		node_set(prefer, interleave_nodes);
2656
2657	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2658		pr_err("%s: interleaving failed\n", __func__);
2659
2660	check_numabalancing_enable();
2661}
2662
2663/* Reset policy of current process to default */
2664void numa_default_policy(void)
2665{
2666	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2667}
2668
2669/*
2670 * Parse and format mempolicy from/to strings
2671 */
2672
2673/*
2674 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2675 */
2676static const char * const policy_modes[] =
2677{
2678	[MPOL_DEFAULT]    = "default",
2679	[MPOL_PREFERRED]  = "prefer",
2680	[MPOL_BIND]       = "bind",
2681	[MPOL_INTERLEAVE] = "interleave",
2682	[MPOL_LOCAL]      = "local",
 
2683};
2684
2685
2686#ifdef CONFIG_TMPFS
2687/**
2688 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2689 * @str:  string containing mempolicy to parse
2690 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2691 *
2692 * Format of input:
2693 *	<mode>[=<flags>][:<nodelist>]
2694 *
2695 * On success, returns 0, else 1
2696 */
2697int mpol_parse_str(char *str, struct mempolicy **mpol)
2698{
2699	struct mempolicy *new = NULL;
2700	unsigned short mode;
2701	unsigned short mode_flags;
2702	nodemask_t nodes;
2703	char *nodelist = strchr(str, ':');
2704	char *flags = strchr(str, '=');
2705	int err = 1;
 
 
 
2706
2707	if (nodelist) {
2708		/* NUL-terminate mode or flags string */
2709		*nodelist++ = '\0';
2710		if (nodelist_parse(nodelist, nodes))
2711			goto out;
2712		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2713			goto out;
2714	} else
2715		nodes_clear(nodes);
2716
2717	if (flags)
2718		*flags++ = '\0';	/* terminate mode string */
2719
2720	for (mode = 0; mode < MPOL_MAX; mode++) {
2721		if (!strcmp(str, policy_modes[mode])) {
2722			break;
2723		}
2724	}
2725	if (mode >= MPOL_MAX)
2726		goto out;
2727
2728	switch (mode) {
2729	case MPOL_PREFERRED:
2730		/*
2731		 * Insist on a nodelist of one node only
 
 
2732		 */
2733		if (nodelist) {
2734			char *rest = nodelist;
2735			while (isdigit(*rest))
2736				rest++;
2737			if (*rest)
2738				goto out;
 
 
2739		}
2740		break;
2741	case MPOL_INTERLEAVE:
2742		/*
2743		 * Default to online nodes with memory if no nodelist
2744		 */
2745		if (!nodelist)
2746			nodes = node_states[N_MEMORY];
2747		break;
2748	case MPOL_LOCAL:
2749		/*
2750		 * Don't allow a nodelist;  mpol_new() checks flags
2751		 */
2752		if (nodelist)
2753			goto out;
2754		mode = MPOL_PREFERRED;
2755		break;
2756	case MPOL_DEFAULT:
2757		/*
2758		 * Insist on a empty nodelist
2759		 */
2760		if (!nodelist)
2761			err = 0;
2762		goto out;
 
2763	case MPOL_BIND:
2764		/*
2765		 * Insist on a nodelist
2766		 */
2767		if (!nodelist)
2768			goto out;
2769	}
2770
2771	mode_flags = 0;
2772	if (flags) {
2773		/*
2774		 * Currently, we only support two mutually exclusive
2775		 * mode flags.
2776		 */
2777		if (!strcmp(flags, "static"))
2778			mode_flags |= MPOL_F_STATIC_NODES;
2779		else if (!strcmp(flags, "relative"))
2780			mode_flags |= MPOL_F_RELATIVE_NODES;
2781		else
2782			goto out;
2783	}
2784
2785	new = mpol_new(mode, mode_flags, &nodes);
2786	if (IS_ERR(new))
2787		goto out;
2788
2789	/*
2790	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2791	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2792	 */
2793	if (mode != MPOL_PREFERRED)
2794		new->v.nodes = nodes;
2795	else if (nodelist)
2796		new->v.preferred_node = first_node(nodes);
2797	else
2798		new->flags |= MPOL_F_LOCAL;
 
 
2799
2800	/*
2801	 * Save nodes for contextualization: this will be used to "clone"
2802	 * the mempolicy in a specific context [cpuset] at a later time.
2803	 */
2804	new->w.user_nodemask = nodes;
2805
2806	err = 0;
2807
2808out:
2809	/* Restore string for error message */
2810	if (nodelist)
2811		*--nodelist = ':';
2812	if (flags)
2813		*--flags = '=';
2814	if (!err)
2815		*mpol = new;
2816	return err;
2817}
2818#endif /* CONFIG_TMPFS */
2819
2820/**
2821 * mpol_to_str - format a mempolicy structure for printing
2822 * @buffer:  to contain formatted mempolicy string
2823 * @maxlen:  length of @buffer
2824 * @pol:  pointer to mempolicy to be formatted
2825 *
2826 * Convert @pol into a string.  If @buffer is too short, truncate the string.
2827 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2828 * longest flag, "relative", and to display at least a few node ids.
2829 */
2830void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2831{
2832	char *p = buffer;
2833	nodemask_t nodes = NODE_MASK_NONE;
2834	unsigned short mode = MPOL_DEFAULT;
2835	unsigned short flags = 0;
2836
2837	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2838		mode = pol->mode;
2839		flags = pol->flags;
2840	}
2841
2842	switch (mode) {
2843	case MPOL_DEFAULT:
 
2844		break;
2845	case MPOL_PREFERRED:
2846		if (flags & MPOL_F_LOCAL)
2847			mode = MPOL_LOCAL;
2848		else
2849			node_set(pol->v.preferred_node, nodes);
2850		break;
2851	case MPOL_BIND:
2852	case MPOL_INTERLEAVE:
2853		nodes = pol->v.nodes;
2854		break;
2855	default:
2856		WARN_ON_ONCE(1);
2857		snprintf(p, maxlen, "unknown");
2858		return;
2859	}
2860
2861	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2862
2863	if (flags & MPOL_MODE_FLAGS) {
2864		p += snprintf(p, buffer + maxlen - p, "=");
2865
2866		/*
2867		 * Currently, the only defined flags are mutually exclusive
2868		 */
2869		if (flags & MPOL_F_STATIC_NODES)
2870			p += snprintf(p, buffer + maxlen - p, "static");
2871		else if (flags & MPOL_F_RELATIVE_NODES)
2872			p += snprintf(p, buffer + maxlen - p, "relative");
2873	}
2874
2875	if (!nodes_empty(nodes))
2876		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2877			       nodemask_pr_args(&nodes));
2878}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Simple NUMA memory policy for the Linux kernel.
   4 *
   5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
 
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case NUMA_NO_NODE here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * preferred many Try a set of nodes first before normal fallback. This is
  35 *                similar to preferred without the special case.
  36 *
  37 * default        Allocate on the local node first, or when on a VMA
  38 *                use the process policy. This is what Linux always did
  39 *		  in a NUMA aware kernel and still does by, ahem, default.
  40 *
  41 * The process policy is applied for most non interrupt memory allocations
  42 * in that process' context. Interrupts ignore the policies and always
  43 * try to allocate on the local CPU. The VMA policy is only applied for memory
  44 * allocations for a VMA in the VM.
  45 *
  46 * Currently there are a few corner cases in swapping where the policy
  47 * is not applied, but the majority should be handled. When process policy
  48 * is used it is not remembered over swap outs/swap ins.
  49 *
  50 * Only the highest zone in the zone hierarchy gets policied. Allocations
  51 * requesting a lower zone just use default policy. This implies that
  52 * on systems with highmem kernel lowmem allocation don't get policied.
  53 * Same with GFP_DMA allocations.
  54 *
  55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  56 * all users and remembered even when nobody has memory mapped.
  57 */
  58
  59/* Notebook:
  60   fix mmap readahead to honour policy and enable policy for any page cache
  61   object
  62   statistics for bigpages
  63   global policy for page cache? currently it uses process policy. Requires
  64   first item above.
  65   handle mremap for shared memory (currently ignored for the policy)
  66   grows down?
  67   make bind policy root only? It can trigger oom much faster and the
  68   kernel is not always grateful with that.
  69*/
  70
  71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  72
  73#include <linux/mempolicy.h>
  74#include <linux/pagewalk.h>
  75#include <linux/highmem.h>
  76#include <linux/hugetlb.h>
  77#include <linux/kernel.h>
  78#include <linux/sched.h>
  79#include <linux/sched/mm.h>
  80#include <linux/sched/numa_balancing.h>
  81#include <linux/sched/task.h>
  82#include <linux/nodemask.h>
  83#include <linux/cpuset.h>
  84#include <linux/slab.h>
  85#include <linux/string.h>
  86#include <linux/export.h>
  87#include <linux/nsproxy.h>
  88#include <linux/interrupt.h>
  89#include <linux/init.h>
  90#include <linux/compat.h>
  91#include <linux/ptrace.h>
  92#include <linux/swap.h>
  93#include <linux/seq_file.h>
  94#include <linux/proc_fs.h>
  95#include <linux/migrate.h>
  96#include <linux/ksm.h>
  97#include <linux/rmap.h>
  98#include <linux/security.h>
  99#include <linux/syscalls.h>
 100#include <linux/ctype.h>
 101#include <linux/mm_inline.h>
 102#include <linux/mmu_notifier.h>
 103#include <linux/printk.h>
 104#include <linux/swapops.h>
 105
 106#include <asm/tlbflush.h>
 107#include <asm/tlb.h>
 108#include <linux/uaccess.h>
 109
 110#include "internal.h"
 111
 112/* Internal flags */
 113#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
 114#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
 115
 116static struct kmem_cache *policy_cache;
 117static struct kmem_cache *sn_cache;
 118
 119/* Highest zone. An specific allocation for a zone below that is not
 120   policied. */
 121enum zone_type policy_zone = 0;
 122
 123/*
 124 * run-time system-wide default policy => local allocation
 125 */
 126static struct mempolicy default_policy = {
 127	.refcnt = ATOMIC_INIT(1), /* never free it */
 128	.mode = MPOL_LOCAL,
 
 129};
 130
 131static struct mempolicy preferred_node_policy[MAX_NUMNODES];
 132
 133/**
 134 * numa_map_to_online_node - Find closest online node
 135 * @node: Node id to start the search
 136 *
 137 * Lookup the next closest node by distance if @nid is not online.
 138 *
 139 * Return: this @node if it is online, otherwise the closest node by distance
 140 */
 141int numa_map_to_online_node(int node)
 142{
 143	int min_dist = INT_MAX, dist, n, min_node;
 144
 145	if (node == NUMA_NO_NODE || node_online(node))
 146		return node;
 147
 148	min_node = node;
 149	for_each_online_node(n) {
 150		dist = node_distance(node, n);
 151		if (dist < min_dist) {
 152			min_dist = dist;
 153			min_node = n;
 154		}
 155	}
 156
 157	return min_node;
 158}
 159EXPORT_SYMBOL_GPL(numa_map_to_online_node);
 160
 161struct mempolicy *get_task_policy(struct task_struct *p)
 162{
 163	struct mempolicy *pol = p->mempolicy;
 164	int node;
 165
 166	if (pol)
 167		return pol;
 168
 169	node = numa_node_id();
 170	if (node != NUMA_NO_NODE) {
 171		pol = &preferred_node_policy[node];
 172		/* preferred_node_policy is not initialised early in boot */
 173		if (pol->mode)
 174			return pol;
 175	}
 176
 177	return &default_policy;
 178}
 179
 180static const struct mempolicy_operations {
 181	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 182	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
 183} mpol_ops[MPOL_MAX];
 184
 185static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 186{
 187	return pol->flags & MPOL_MODE_FLAGS;
 188}
 189
 190static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 191				   const nodemask_t *rel)
 192{
 193	nodemask_t tmp;
 194	nodes_fold(tmp, *orig, nodes_weight(*rel));
 195	nodes_onto(*ret, tmp, *rel);
 196}
 197
 198static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 199{
 200	if (nodes_empty(*nodes))
 201		return -EINVAL;
 202	pol->nodes = *nodes;
 203	return 0;
 204}
 205
 206static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 207{
 
 
 
 
 
 
 
 
 
 
 
 208	if (nodes_empty(*nodes))
 209		return -EINVAL;
 210
 211	nodes_clear(pol->nodes);
 212	node_set(first_node(*nodes), pol->nodes);
 213	return 0;
 214}
 215
 216/*
 217 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 218 * any, for the new policy.  mpol_new() has already validated the nodes
 219 * parameter with respect to the policy mode and flags.
 
 220 *
 221 * Must be called holding task's alloc_lock to protect task's mems_allowed
 222 * and mempolicy.  May also be called holding the mmap_lock for write.
 223 */
 224static int mpol_set_nodemask(struct mempolicy *pol,
 225		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 226{
 227	int ret;
 228
 229	/*
 230	 * Default (pol==NULL) resp. local memory policies are not a
 231	 * subject of any remapping. They also do not need any special
 232	 * constructor.
 233	 */
 234	if (!pol || pol->mode == MPOL_LOCAL)
 235		return 0;
 236
 237	/* Check N_MEMORY */
 238	nodes_and(nsc->mask1,
 239		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
 240
 241	VM_BUG_ON(!nodes);
 
 
 
 
 
 
 
 242
 243	if (pol->flags & MPOL_F_RELATIVE_NODES)
 244		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
 245	else
 246		nodes_and(nsc->mask2, *nodes, nsc->mask1);
 
 
 247
 248	if (mpol_store_user_nodemask(pol))
 249		pol->w.user_nodemask = *nodes;
 250	else
 251		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
 252
 253	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 254	return ret;
 255}
 256
 257/*
 258 * This function just creates a new policy, does some check and simple
 259 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 260 */
 261static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 262				  nodemask_t *nodes)
 263{
 264	struct mempolicy *policy;
 265
 266	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 267		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
 268
 269	if (mode == MPOL_DEFAULT) {
 270		if (nodes && !nodes_empty(*nodes))
 271			return ERR_PTR(-EINVAL);
 272		return NULL;
 273	}
 274	VM_BUG_ON(!nodes);
 275
 276	/*
 277	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 278	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 279	 * All other modes require a valid pointer to a non-empty nodemask.
 280	 */
 281	if (mode == MPOL_PREFERRED) {
 282		if (nodes_empty(*nodes)) {
 283			if (((flags & MPOL_F_STATIC_NODES) ||
 284			     (flags & MPOL_F_RELATIVE_NODES)))
 285				return ERR_PTR(-EINVAL);
 286
 287			mode = MPOL_LOCAL;
 288		}
 289	} else if (mode == MPOL_LOCAL) {
 290		if (!nodes_empty(*nodes) ||
 291		    (flags & MPOL_F_STATIC_NODES) ||
 292		    (flags & MPOL_F_RELATIVE_NODES))
 293			return ERR_PTR(-EINVAL);
 
 294	} else if (nodes_empty(*nodes))
 295		return ERR_PTR(-EINVAL);
 296	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 297	if (!policy)
 298		return ERR_PTR(-ENOMEM);
 299	atomic_set(&policy->refcnt, 1);
 300	policy->mode = mode;
 301	policy->flags = flags;
 302	policy->home_node = NUMA_NO_NODE;
 303
 304	return policy;
 305}
 306
 307/* Slow path of a mpol destructor. */
 308void __mpol_put(struct mempolicy *p)
 309{
 310	if (!atomic_dec_and_test(&p->refcnt))
 311		return;
 312	kmem_cache_free(policy_cache, p);
 313}
 314
 315static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
 316{
 317}
 318
 319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 320{
 321	nodemask_t tmp;
 322
 323	if (pol->flags & MPOL_F_STATIC_NODES)
 324		nodes_and(tmp, pol->w.user_nodemask, *nodes);
 325	else if (pol->flags & MPOL_F_RELATIVE_NODES)
 326		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 327	else {
 328		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
 329								*nodes);
 330		pol->w.cpuset_mems_allowed = *nodes;
 331	}
 332
 333	if (nodes_empty(tmp))
 334		tmp = *nodes;
 335
 336	pol->nodes = tmp;
 337}
 338
 339static void mpol_rebind_preferred(struct mempolicy *pol,
 340						const nodemask_t *nodes)
 341{
 342	pol->w.cpuset_mems_allowed = *nodes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 343}
 344
 345/*
 346 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 347 *
 348 * Per-vma policies are protected by mmap_lock. Allocations using per-task
 349 * policies are protected by task->mems_allowed_seq to prevent a premature
 350 * OOM/allocation failure due to parallel nodemask modification.
 351 */
 352static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
 353{
 354	if (!pol || pol->mode == MPOL_LOCAL)
 355		return;
 356	if (!mpol_store_user_nodemask(pol) &&
 357	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 358		return;
 359
 360	mpol_ops[pol->mode].rebind(pol, newmask);
 361}
 362
 363/*
 364 * Wrapper for mpol_rebind_policy() that just requires task
 365 * pointer, and updates task mempolicy.
 366 *
 367 * Called with task's alloc_lock held.
 368 */
 369
 370void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
 371{
 372	mpol_rebind_policy(tsk->mempolicy, new);
 373}
 374
 375/*
 376 * Rebind each vma in mm to new nodemask.
 377 *
 378 * Call holding a reference to mm.  Takes mm->mmap_lock during call.
 379 */
 380
 381void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 382{
 383	struct vm_area_struct *vma;
 384	VMA_ITERATOR(vmi, mm, 0);
 385
 386	mmap_write_lock(mm);
 387	for_each_vma(vmi, vma)
 388		mpol_rebind_policy(vma->vm_policy, new);
 389	mmap_write_unlock(mm);
 390}
 391
 392static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 393	[MPOL_DEFAULT] = {
 394		.rebind = mpol_rebind_default,
 395	},
 396	[MPOL_INTERLEAVE] = {
 397		.create = mpol_new_nodemask,
 398		.rebind = mpol_rebind_nodemask,
 399	},
 400	[MPOL_PREFERRED] = {
 401		.create = mpol_new_preferred,
 402		.rebind = mpol_rebind_preferred,
 403	},
 404	[MPOL_BIND] = {
 405		.create = mpol_new_nodemask,
 406		.rebind = mpol_rebind_nodemask,
 407	},
 408	[MPOL_LOCAL] = {
 409		.rebind = mpol_rebind_default,
 410	},
 411	[MPOL_PREFERRED_MANY] = {
 412		.create = mpol_new_nodemask,
 413		.rebind = mpol_rebind_preferred,
 414	},
 415};
 416
 417static int migrate_page_add(struct page *page, struct list_head *pagelist,
 418				unsigned long flags);
 419
 420struct queue_pages {
 421	struct list_head *pagelist;
 422	unsigned long flags;
 423	nodemask_t *nmask;
 424	unsigned long start;
 425	unsigned long end;
 426	struct vm_area_struct *first;
 427};
 428
 429/*
 430 * Check if the page's nid is in qp->nmask.
 431 *
 432 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
 433 * in the invert of qp->nmask.
 434 */
 435static inline bool queue_pages_required(struct page *page,
 436					struct queue_pages *qp)
 437{
 438	int nid = page_to_nid(page);
 439	unsigned long flags = qp->flags;
 440
 441	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 442}
 443
 444/*
 445 * queue_pages_pmd() has three possible return values:
 446 * 0 - pages are placed on the right node or queued successfully, or
 447 *     special page is met, i.e. huge zero page.
 448 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 449 *     specified.
 450 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
 451 *        existing page was already on a node that does not follow the
 452 *        policy.
 453 */
 454static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 455				unsigned long end, struct mm_walk *walk)
 456	__releases(ptl)
 457{
 458	int ret = 0;
 459	struct page *page;
 460	struct queue_pages *qp = walk->private;
 461	unsigned long flags;
 462
 463	if (unlikely(is_pmd_migration_entry(*pmd))) {
 464		ret = -EIO;
 465		goto unlock;
 466	}
 467	page = pmd_page(*pmd);
 468	if (is_huge_zero_page(page)) {
 469		walk->action = ACTION_CONTINUE;
 
 
 
 
 
 470		goto unlock;
 471	}
 472	if (!queue_pages_required(page, qp))
 473		goto unlock;
 474
 
 475	flags = qp->flags;
 476	/* go to thp migration */
 477	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 478		if (!vma_migratable(walk->vma) ||
 479		    migrate_page_add(page, qp->pagelist, flags)) {
 480			ret = 1;
 481			goto unlock;
 482		}
 483	} else
 484		ret = -EIO;
 485unlock:
 486	spin_unlock(ptl);
 
 487	return ret;
 488}
 489
 490/*
 491 * Scan through pages checking if pages follow certain conditions,
 492 * and move them to the pagelist if they do.
 493 *
 494 * queue_pages_pte_range() has three possible return values:
 495 * 0 - pages are placed on the right node or queued successfully, or
 496 *     special page is met, i.e. zero page.
 497 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 498 *     specified.
 499 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
 500 *        on a node that does not follow the policy.
 501 */
 502static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 503			unsigned long end, struct mm_walk *walk)
 504{
 505	struct vm_area_struct *vma = walk->vma;
 506	struct page *page;
 507	struct queue_pages *qp = walk->private;
 508	unsigned long flags = qp->flags;
 509	bool has_unmovable = false;
 510	pte_t *pte, *mapped_pte;
 511	spinlock_t *ptl;
 512
 513	ptl = pmd_trans_huge_lock(pmd, vma);
 514	if (ptl)
 515		return queue_pages_pmd(pmd, ptl, addr, end, walk);
 
 
 
 516
 517	if (pmd_trans_unstable(pmd))
 518		return 0;
 519
 520	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 521	for (; addr != end; pte++, addr += PAGE_SIZE) {
 522		if (!pte_present(*pte))
 523			continue;
 524		page = vm_normal_page(vma, addr, *pte);
 525		if (!page || is_zone_device_page(page))
 526			continue;
 527		/*
 528		 * vm_normal_page() filters out zero pages, but there might
 529		 * still be PageReserved pages to skip, perhaps in a VDSO.
 530		 */
 531		if (PageReserved(page))
 532			continue;
 533		if (!queue_pages_required(page, qp))
 534			continue;
 535		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 536			/* MPOL_MF_STRICT must be specified if we get here */
 537			if (!vma_migratable(vma)) {
 538				has_unmovable = true;
 539				break;
 540			}
 541
 542			/*
 543			 * Do not abort immediately since there may be
 544			 * temporary off LRU pages in the range.  Still
 545			 * need migrate other LRU pages.
 546			 */
 547			if (migrate_page_add(page, qp->pagelist, flags))
 548				has_unmovable = true;
 549		} else
 550			break;
 551	}
 552	pte_unmap_unlock(mapped_pte, ptl);
 553	cond_resched();
 554
 555	if (has_unmovable)
 556		return 1;
 557
 558	return addr != end ? -EIO : 0;
 559}
 560
 561static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
 562			       unsigned long addr, unsigned long end,
 563			       struct mm_walk *walk)
 564{
 565	int ret = 0;
 566#ifdef CONFIG_HUGETLB_PAGE
 567	struct queue_pages *qp = walk->private;
 568	unsigned long flags = (qp->flags & MPOL_MF_VALID);
 569	struct page *page;
 570	spinlock_t *ptl;
 571	pte_t entry;
 572
 573	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
 574	entry = huge_ptep_get(pte);
 575	if (!pte_present(entry))
 576		goto unlock;
 577	page = pte_page(entry);
 578	if (!queue_pages_required(page, qp))
 579		goto unlock;
 580
 581	if (flags == MPOL_MF_STRICT) {
 582		/*
 583		 * STRICT alone means only detecting misplaced page and no
 584		 * need to further check other vma.
 585		 */
 586		ret = -EIO;
 587		goto unlock;
 588	}
 589
 590	if (!vma_migratable(walk->vma)) {
 591		/*
 592		 * Must be STRICT with MOVE*, otherwise .test_walk() have
 593		 * stopped walking current vma.
 594		 * Detecting misplaced page but allow migrating pages which
 595		 * have been queued.
 596		 */
 597		ret = 1;
 598		goto unlock;
 599	}
 600
 601	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
 602	if (flags & (MPOL_MF_MOVE_ALL) ||
 603	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
 604	     !hugetlb_pmd_shared(pte))) {
 605		if (isolate_hugetlb(page, qp->pagelist) &&
 606			(flags & MPOL_MF_STRICT))
 607			/*
 608			 * Failed to isolate page but allow migrating pages
 609			 * which have been queued.
 610			 */
 611			ret = 1;
 612	}
 613unlock:
 614	spin_unlock(ptl);
 615#else
 616	BUG();
 617#endif
 618	return ret;
 619}
 620
 621#ifdef CONFIG_NUMA_BALANCING
 622/*
 623 * This is used to mark a range of virtual addresses to be inaccessible.
 624 * These are later cleared by a NUMA hinting fault. Depending on these
 625 * faults, pages may be migrated for better NUMA placement.
 626 *
 627 * This is assuming that NUMA faults are handled using PROT_NONE. If
 628 * an architecture makes a different choice, it will need further
 629 * changes to the core.
 630 */
 631unsigned long change_prot_numa(struct vm_area_struct *vma,
 632			unsigned long addr, unsigned long end)
 633{
 634	struct mmu_gather tlb;
 635	int nr_updated;
 636
 637	tlb_gather_mmu(&tlb, vma->vm_mm);
 638
 639	nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
 640				       MM_CP_PROT_NUMA);
 641	if (nr_updated)
 642		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 643
 644	tlb_finish_mmu(&tlb);
 645
 646	return nr_updated;
 647}
 648#else
 649static unsigned long change_prot_numa(struct vm_area_struct *vma,
 650			unsigned long addr, unsigned long end)
 651{
 652	return 0;
 653}
 654#endif /* CONFIG_NUMA_BALANCING */
 655
 656static int queue_pages_test_walk(unsigned long start, unsigned long end,
 657				struct mm_walk *walk)
 658{
 659	struct vm_area_struct *next, *vma = walk->vma;
 660	struct queue_pages *qp = walk->private;
 661	unsigned long endvma = vma->vm_end;
 662	unsigned long flags = qp->flags;
 663
 664	/* range check first */
 665	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
 666
 667	if (!qp->first) {
 668		qp->first = vma;
 669		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 670			(qp->start < vma->vm_start))
 671			/* hole at head side of range */
 
 
 
 
 672			return -EFAULT;
 673	}
 674	next = find_vma(vma->vm_mm, vma->vm_end);
 675	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 676		((vma->vm_end < qp->end) &&
 677		(!next || vma->vm_end < next->vm_start)))
 678		/* hole at middle or tail of range */
 679		return -EFAULT;
 680
 681	/*
 682	 * Need check MPOL_MF_STRICT to return -EIO if possible
 683	 * regardless of vma_migratable
 684	 */
 685	if (!vma_migratable(vma) &&
 686	    !(flags & MPOL_MF_STRICT))
 687		return 1;
 688
 689	if (endvma > end)
 690		endvma = end;
 691
 692	if (flags & MPOL_MF_LAZY) {
 693		/* Similar to task_numa_work, skip inaccessible VMAs */
 694		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
 
 695			!(vma->vm_flags & VM_MIXEDMAP))
 696			change_prot_numa(vma, start, endvma);
 697		return 1;
 698	}
 699
 700	/* queue pages from current vma */
 701	if (flags & MPOL_MF_VALID)
 702		return 0;
 703	return 1;
 704}
 705
 706static const struct mm_walk_ops queue_pages_walk_ops = {
 707	.hugetlb_entry		= queue_pages_hugetlb,
 708	.pmd_entry		= queue_pages_pte_range,
 709	.test_walk		= queue_pages_test_walk,
 710};
 711
 712/*
 713 * Walk through page tables and collect pages to be migrated.
 714 *
 715 * If pages found in a given range are on a set of nodes (determined by
 716 * @nodes and @flags,) it's isolated and queued to the pagelist which is
 717 * passed via @private.
 718 *
 719 * queue_pages_range() has three possible return values:
 720 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
 721 *     specified.
 722 * 0 - queue pages successfully or no misplaced page.
 723 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
 724 *         memory range specified by nodemask and maxnode points outside
 725 *         your accessible address space (-EFAULT)
 726 */
 727static int
 728queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 729		nodemask_t *nodes, unsigned long flags,
 730		struct list_head *pagelist)
 731{
 732	int err;
 733	struct queue_pages qp = {
 734		.pagelist = pagelist,
 735		.flags = flags,
 736		.nmask = nodes,
 737		.start = start,
 738		.end = end,
 739		.first = NULL,
 
 
 
 
 
 740	};
 741
 742	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
 743
 744	if (!qp.first)
 745		/* whole range in hole */
 746		err = -EFAULT;
 747
 748	return err;
 749}
 750
 751/*
 752 * Apply policy to a single VMA
 753 * This must be called with the mmap_lock held for writing.
 754 */
 755static int vma_replace_policy(struct vm_area_struct *vma,
 756						struct mempolicy *pol)
 757{
 758	int err;
 759	struct mempolicy *old;
 760	struct mempolicy *new;
 761
 762	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 763		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 764		 vma->vm_ops, vma->vm_file,
 765		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 766
 767	new = mpol_dup(pol);
 768	if (IS_ERR(new))
 769		return PTR_ERR(new);
 770
 771	if (vma->vm_ops && vma->vm_ops->set_policy) {
 772		err = vma->vm_ops->set_policy(vma, new);
 773		if (err)
 774			goto err_out;
 775	}
 776
 777	old = vma->vm_policy;
 778	vma->vm_policy = new; /* protected by mmap_lock */
 779	mpol_put(old);
 780
 781	return 0;
 782 err_out:
 783	mpol_put(new);
 784	return err;
 785}
 786
 787/* Step 2: apply policy to a range and do splits. */
 788static int mbind_range(struct mm_struct *mm, unsigned long start,
 789		       unsigned long end, struct mempolicy *new_pol)
 790{
 791	MA_STATE(mas, &mm->mm_mt, start, start);
 792	struct vm_area_struct *prev;
 793	struct vm_area_struct *vma;
 794	int err = 0;
 795	pgoff_t pgoff;
 
 
 796
 797	prev = mas_prev(&mas, 0);
 798	if (unlikely(!prev))
 799		mas_set(&mas, start);
 800
 801	vma = mas_find(&mas, end - 1);
 802	if (WARN_ON(!vma))
 803		return 0;
 804
 
 805	if (start > vma->vm_start)
 806		prev = vma;
 807
 808	for (; vma; vma = mas_next(&mas, end - 1)) {
 809		unsigned long vmstart = max(start, vma->vm_start);
 810		unsigned long vmend = min(end, vma->vm_end);
 
 811
 812		if (mpol_equal(vma_policy(vma), new_pol))
 813			goto next;
 814
 815		pgoff = vma->vm_pgoff +
 816			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 817		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 818				 vma->anon_vma, vma->vm_file, pgoff,
 819				 new_pol, vma->vm_userfaultfd_ctx,
 820				 anon_vma_name(vma));
 821		if (prev) {
 822			/* vma_merge() invalidated the mas */
 823			mas_pause(&mas);
 824			vma = prev;
 
 
 
 
 825			goto replace;
 826		}
 827		if (vma->vm_start != vmstart) {
 828			err = split_vma(vma->vm_mm, vma, vmstart, 1);
 829			if (err)
 830				goto out;
 831			/* split_vma() invalidated the mas */
 832			mas_pause(&mas);
 833		}
 834		if (vma->vm_end != vmend) {
 835			err = split_vma(vma->vm_mm, vma, vmend, 0);
 836			if (err)
 837				goto out;
 838			/* split_vma() invalidated the mas */
 839			mas_pause(&mas);
 840		}
 841replace:
 842		err = vma_replace_policy(vma, new_pol);
 843		if (err)
 844			goto out;
 845next:
 846		prev = vma;
 847	}
 848
 849out:
 850	return err;
 851}
 852
 853/* Set the process memory policy */
 854static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 855			     nodemask_t *nodes)
 856{
 857	struct mempolicy *new, *old;
 858	NODEMASK_SCRATCH(scratch);
 859	int ret;
 860
 861	if (!scratch)
 862		return -ENOMEM;
 863
 864	new = mpol_new(mode, flags, nodes);
 865	if (IS_ERR(new)) {
 866		ret = PTR_ERR(new);
 867		goto out;
 868	}
 869
 870	task_lock(current);
 871	ret = mpol_set_nodemask(new, nodes, scratch);
 872	if (ret) {
 873		task_unlock(current);
 874		mpol_put(new);
 875		goto out;
 876	}
 877
 878	old = current->mempolicy;
 879	current->mempolicy = new;
 880	if (new && new->mode == MPOL_INTERLEAVE)
 881		current->il_prev = MAX_NUMNODES-1;
 882	task_unlock(current);
 883	mpol_put(old);
 884	ret = 0;
 885out:
 886	NODEMASK_SCRATCH_FREE(scratch);
 887	return ret;
 888}
 889
 890/*
 891 * Return nodemask for policy for get_mempolicy() query
 892 *
 893 * Called with task's alloc_lock held
 894 */
 895static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 896{
 897	nodes_clear(*nodes);
 898	if (p == &default_policy)
 899		return;
 900
 901	switch (p->mode) {
 902	case MPOL_BIND:
 
 903	case MPOL_INTERLEAVE:
 
 
 904	case MPOL_PREFERRED:
 905	case MPOL_PREFERRED_MANY:
 906		*nodes = p->nodes;
 907		break;
 908	case MPOL_LOCAL:
 909		/* return empty node mask for local allocation */
 910		break;
 911	default:
 912		BUG();
 913	}
 914}
 915
 916static int lookup_node(struct mm_struct *mm, unsigned long addr)
 917{
 918	struct page *p = NULL;
 919	int ret;
 920
 921	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
 922	if (ret > 0) {
 923		ret = page_to_nid(p);
 924		put_page(p);
 925	}
 926	return ret;
 927}
 928
 929/* Retrieve NUMA policy */
 930static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 931			     unsigned long addr, unsigned long flags)
 932{
 933	int err;
 934	struct mm_struct *mm = current->mm;
 935	struct vm_area_struct *vma = NULL;
 936	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
 937
 938	if (flags &
 939		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 940		return -EINVAL;
 941
 942	if (flags & MPOL_F_MEMS_ALLOWED) {
 943		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 944			return -EINVAL;
 945		*policy = 0;	/* just so it's initialized */
 946		task_lock(current);
 947		*nmask  = cpuset_current_mems_allowed;
 948		task_unlock(current);
 949		return 0;
 950	}
 951
 952	if (flags & MPOL_F_ADDR) {
 953		/*
 954		 * Do NOT fall back to task policy if the
 955		 * vma/shared policy at addr is NULL.  We
 956		 * want to return MPOL_DEFAULT in this case.
 957		 */
 958		mmap_read_lock(mm);
 959		vma = vma_lookup(mm, addr);
 960		if (!vma) {
 961			mmap_read_unlock(mm);
 962			return -EFAULT;
 963		}
 964		if (vma->vm_ops && vma->vm_ops->get_policy)
 965			pol = vma->vm_ops->get_policy(vma, addr);
 966		else
 967			pol = vma->vm_policy;
 968	} else if (addr)
 969		return -EINVAL;
 970
 971	if (!pol)
 972		pol = &default_policy;	/* indicates default behavior */
 973
 974	if (flags & MPOL_F_NODE) {
 975		if (flags & MPOL_F_ADDR) {
 976			/*
 977			 * Take a refcount on the mpol, because we are about to
 978			 * drop the mmap_lock, after which only "pol" remains
 979			 * valid, "vma" is stale.
 980			 */
 981			pol_refcount = pol;
 982			vma = NULL;
 983			mpol_get(pol);
 984			mmap_read_unlock(mm);
 985			err = lookup_node(mm, addr);
 986			if (err < 0)
 987				goto out;
 988			*policy = err;
 989		} else if (pol == current->mempolicy &&
 990				pol->mode == MPOL_INTERLEAVE) {
 991			*policy = next_node_in(current->il_prev, pol->nodes);
 992		} else {
 993			err = -EINVAL;
 994			goto out;
 995		}
 996	} else {
 997		*policy = pol == &default_policy ? MPOL_DEFAULT :
 998						pol->mode;
 999		/*
1000		 * Internal mempolicy flags must be masked off before exposing
1001		 * the policy to userspace.
1002		 */
1003		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1004	}
1005
1006	err = 0;
1007	if (nmask) {
1008		if (mpol_store_user_nodemask(pol)) {
1009			*nmask = pol->w.user_nodemask;
1010		} else {
1011			task_lock(current);
1012			get_policy_nodemask(pol, nmask);
1013			task_unlock(current);
1014		}
1015	}
1016
1017 out:
1018	mpol_cond_put(pol);
1019	if (vma)
1020		mmap_read_unlock(mm);
1021	if (pol_refcount)
1022		mpol_put(pol_refcount);
1023	return err;
1024}
1025
1026#ifdef CONFIG_MIGRATION
1027/*
1028 * page migration, thp tail pages can be passed.
1029 */
1030static int migrate_page_add(struct page *page, struct list_head *pagelist,
1031				unsigned long flags)
1032{
1033	struct page *head = compound_head(page);
1034	/*
1035	 * Avoid migrating a page that is shared with others.
1036	 */
1037	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1038		if (!isolate_lru_page(head)) {
1039			list_add_tail(&head->lru, pagelist);
1040			mod_node_page_state(page_pgdat(head),
1041				NR_ISOLATED_ANON + page_is_file_lru(head),
1042				thp_nr_pages(head));
1043		} else if (flags & MPOL_MF_STRICT) {
1044			/*
1045			 * Non-movable page may reach here.  And, there may be
1046			 * temporary off LRU pages or non-LRU movable pages.
1047			 * Treat them as unmovable pages since they can't be
1048			 * isolated, so they can't be moved at the moment.  It
1049			 * should return -EIO for this case too.
1050			 */
1051			return -EIO;
1052		}
1053	}
 
1054
1055	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056}
1057
1058/*
1059 * Migrate pages from one node to a target node.
1060 * Returns error or the number of pages not migrated.
1061 */
1062static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1063			   int flags)
1064{
1065	nodemask_t nmask;
1066	struct vm_area_struct *vma;
1067	LIST_HEAD(pagelist);
1068	int err = 0;
1069	struct migration_target_control mtc = {
1070		.nid = dest,
1071		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1072	};
1073
1074	nodes_clear(nmask);
1075	node_set(source, nmask);
1076
1077	/*
1078	 * This does not "check" the range but isolates all pages that
1079	 * need migration.  Between passing in the full user address
1080	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1081	 */
1082	vma = find_vma(mm, 0);
1083	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1084	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1085			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1086
1087	if (!list_empty(&pagelist)) {
1088		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1089				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1090		if (err)
1091			putback_movable_pages(&pagelist);
1092	}
1093
1094	return err;
1095}
1096
1097/*
1098 * Move pages between the two nodesets so as to preserve the physical
1099 * layout as much as possible.
1100 *
1101 * Returns the number of page that could not be moved.
1102 */
1103int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1104		     const nodemask_t *to, int flags)
1105{
1106	int busy = 0;
1107	int err = 0;
1108	nodemask_t tmp;
1109
1110	lru_cache_disable();
 
 
1111
1112	mmap_read_lock(mm);
1113
1114	/*
1115	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1116	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1117	 * bit in 'tmp', and return that <source, dest> pair for migration.
1118	 * The pair of nodemasks 'to' and 'from' define the map.
1119	 *
1120	 * If no pair of bits is found that way, fallback to picking some
1121	 * pair of 'source' and 'dest' bits that are not the same.  If the
1122	 * 'source' and 'dest' bits are the same, this represents a node
1123	 * that will be migrating to itself, so no pages need move.
1124	 *
1125	 * If no bits are left in 'tmp', or if all remaining bits left
1126	 * in 'tmp' correspond to the same bit in 'to', return false
1127	 * (nothing left to migrate).
1128	 *
1129	 * This lets us pick a pair of nodes to migrate between, such that
1130	 * if possible the dest node is not already occupied by some other
1131	 * source node, minimizing the risk of overloading the memory on a
1132	 * node that would happen if we migrated incoming memory to a node
1133	 * before migrating outgoing memory source that same node.
1134	 *
1135	 * A single scan of tmp is sufficient.  As we go, we remember the
1136	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1137	 * that not only moved, but what's better, moved to an empty slot
1138	 * (d is not set in tmp), then we break out then, with that pair.
1139	 * Otherwise when we finish scanning from_tmp, we at least have the
1140	 * most recent <s, d> pair that moved.  If we get all the way through
1141	 * the scan of tmp without finding any node that moved, much less
1142	 * moved to an empty node, then there is nothing left worth migrating.
1143	 */
1144
1145	tmp = *from;
1146	while (!nodes_empty(tmp)) {
1147		int s, d;
1148		int source = NUMA_NO_NODE;
1149		int dest = 0;
1150
1151		for_each_node_mask(s, tmp) {
1152
1153			/*
1154			 * do_migrate_pages() tries to maintain the relative
1155			 * node relationship of the pages established between
1156			 * threads and memory areas.
1157                         *
1158			 * However if the number of source nodes is not equal to
1159			 * the number of destination nodes we can not preserve
1160			 * this node relative relationship.  In that case, skip
1161			 * copying memory from a node that is in the destination
1162			 * mask.
1163			 *
1164			 * Example: [2,3,4] -> [3,4,5] moves everything.
1165			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1166			 */
1167
1168			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1169						(node_isset(s, *to)))
1170				continue;
1171
1172			d = node_remap(s, *from, *to);
1173			if (s == d)
1174				continue;
1175
1176			source = s;	/* Node moved. Memorize */
1177			dest = d;
1178
1179			/* dest not in remaining from nodes? */
1180			if (!node_isset(dest, tmp))
1181				break;
1182		}
1183		if (source == NUMA_NO_NODE)
1184			break;
1185
1186		node_clear(source, tmp);
1187		err = migrate_to_node(mm, source, dest, flags);
1188		if (err > 0)
1189			busy += err;
1190		if (err < 0)
1191			break;
1192	}
1193	mmap_read_unlock(mm);
1194
1195	lru_cache_enable();
1196	if (err < 0)
1197		return err;
1198	return busy;
1199
1200}
1201
1202/*
1203 * Allocate a new page for page migration based on vma policy.
1204 * Start by assuming the page is mapped by the same vma as contains @start.
1205 * Search forward from there, if not.  N.B., this assumes that the
1206 * list of pages handed to migrate_pages()--which is how we get here--
1207 * is in virtual address order.
1208 */
1209static struct page *new_page(struct page *page, unsigned long start)
1210{
1211	struct folio *dst, *src = page_folio(page);
1212	struct vm_area_struct *vma;
1213	unsigned long address;
1214	VMA_ITERATOR(vmi, current->mm, start);
1215	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
1216
1217	for_each_vma(vmi, vma) {
 
1218		address = page_address_in_vma(page, vma);
1219		if (address != -EFAULT)
1220			break;
 
1221	}
1222
1223	if (folio_test_hugetlb(src))
1224		return alloc_huge_page_vma(page_hstate(&src->page),
1225				vma, address);
 
 
1226
1227	if (folio_test_large(src))
1228		gfp = GFP_TRANSHUGE;
1229
 
 
 
 
1230	/*
1231	 * if !vma, vma_alloc_folio() will use task or system default policy
1232	 */
1233	dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
1234			folio_test_large(src));
1235	return &dst->page;
1236}
1237#else
1238
1239static int migrate_page_add(struct page *page, struct list_head *pagelist,
1240				unsigned long flags)
1241{
1242	return -EIO;
1243}
1244
1245int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1246		     const nodemask_t *to, int flags)
1247{
1248	return -ENOSYS;
1249}
1250
1251static struct page *new_page(struct page *page, unsigned long start)
1252{
1253	return NULL;
1254}
1255#endif
1256
1257static long do_mbind(unsigned long start, unsigned long len,
1258		     unsigned short mode, unsigned short mode_flags,
1259		     nodemask_t *nmask, unsigned long flags)
1260{
1261	struct mm_struct *mm = current->mm;
1262	struct mempolicy *new;
1263	unsigned long end;
1264	int err;
1265	int ret;
1266	LIST_HEAD(pagelist);
1267
1268	if (flags & ~(unsigned long)MPOL_MF_VALID)
1269		return -EINVAL;
1270	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1271		return -EPERM;
1272
1273	if (start & ~PAGE_MASK)
1274		return -EINVAL;
1275
1276	if (mode == MPOL_DEFAULT)
1277		flags &= ~MPOL_MF_STRICT;
1278
1279	len = PAGE_ALIGN(len);
1280	end = start + len;
1281
1282	if (end < start)
1283		return -EINVAL;
1284	if (end == start)
1285		return 0;
1286
1287	new = mpol_new(mode, mode_flags, nmask);
1288	if (IS_ERR(new))
1289		return PTR_ERR(new);
1290
1291	if (flags & MPOL_MF_LAZY)
1292		new->flags |= MPOL_F_MOF;
1293
1294	/*
1295	 * If we are using the default policy then operation
1296	 * on discontinuous address spaces is okay after all
1297	 */
1298	if (!new)
1299		flags |= MPOL_MF_DISCONTIG_OK;
1300
1301	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1302		 start, start + len, mode, mode_flags,
1303		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1304
1305	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1306
1307		lru_cache_disable();
 
 
1308	}
1309	{
1310		NODEMASK_SCRATCH(scratch);
1311		if (scratch) {
1312			mmap_write_lock(mm);
 
1313			err = mpol_set_nodemask(new, nmask, scratch);
 
1314			if (err)
1315				mmap_write_unlock(mm);
1316		} else
1317			err = -ENOMEM;
1318		NODEMASK_SCRATCH_FREE(scratch);
1319	}
1320	if (err)
1321		goto mpol_out;
1322
1323	ret = queue_pages_range(mm, start, end, nmask,
1324			  flags | MPOL_MF_INVERT, &pagelist);
1325
1326	if (ret < 0) {
1327		err = ret;
1328		goto up_out;
1329	}
1330
1331	err = mbind_range(mm, start, end, new);
1332
1333	if (!err) {
1334		int nr_failed = 0;
1335
1336		if (!list_empty(&pagelist)) {
1337			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1338			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1339				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1340			if (nr_failed)
1341				putback_movable_pages(&pagelist);
1342		}
1343
1344		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1345			err = -EIO;
1346	} else {
1347up_out:
1348		if (!list_empty(&pagelist))
1349			putback_movable_pages(&pagelist);
1350	}
1351
1352	mmap_write_unlock(mm);
1353mpol_out:
1354	mpol_put(new);
1355	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1356		lru_cache_enable();
1357	return err;
1358}
1359
1360/*
1361 * User space interface with variable sized bitmaps for nodelists.
1362 */
1363static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1364		      unsigned long maxnode)
1365{
1366	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1367	int ret;
1368
1369	if (in_compat_syscall())
1370		ret = compat_get_bitmap(mask,
1371					(const compat_ulong_t __user *)nmask,
1372					maxnode);
1373	else
1374		ret = copy_from_user(mask, nmask,
1375				     nlongs * sizeof(unsigned long));
1376
1377	if (ret)
1378		return -EFAULT;
1379
1380	if (maxnode % BITS_PER_LONG)
1381		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1382
1383	return 0;
1384}
1385
1386/* Copy a node mask from user space. */
1387static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1388		     unsigned long maxnode)
1389{
 
 
 
 
 
1390	--maxnode;
1391	nodes_clear(*nodes);
1392	if (maxnode == 0 || !nmask)
1393		return 0;
1394	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1395		return -EINVAL;
1396
 
 
 
 
 
 
1397	/*
1398	 * When the user specified more nodes than supported just check
1399	 * if the non supported part is all zero, one word at a time,
1400	 * starting at the end.
1401	 */
1402	while (maxnode > MAX_NUMNODES) {
1403		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1404		unsigned long t;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1405
1406		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
 
1407			return -EFAULT;
1408
1409		if (maxnode - bits >= MAX_NUMNODES) {
1410			maxnode -= bits;
1411		} else {
1412			maxnode = MAX_NUMNODES;
1413			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1414		}
1415		if (t)
1416			return -EINVAL;
1417	}
1418
1419	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
 
 
 
1420}
1421
1422/* Copy a kernel node mask to user space */
1423static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1424			      nodemask_t *nodes)
1425{
1426	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1427	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1428	bool compat = in_compat_syscall();
1429
1430	if (compat)
1431		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1432
1433	if (copy > nbytes) {
1434		if (copy > PAGE_SIZE)
1435			return -EINVAL;
1436		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1437			return -EFAULT;
1438		copy = nbytes;
1439		maxnode = nr_node_ids;
1440	}
1441
1442	if (compat)
1443		return compat_put_bitmap((compat_ulong_t __user *)mask,
1444					 nodes_addr(*nodes), maxnode);
1445
1446	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1447}
1448
1449/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1450static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1451{
1452	*flags = *mode & MPOL_MODE_FLAGS;
1453	*mode &= ~MPOL_MODE_FLAGS;
1454
1455	if ((unsigned int)(*mode) >=  MPOL_MAX)
1456		return -EINVAL;
1457	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1458		return -EINVAL;
1459	if (*flags & MPOL_F_NUMA_BALANCING) {
1460		if (*mode != MPOL_BIND)
1461			return -EINVAL;
1462		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1463	}
1464	return 0;
1465}
1466
1467static long kernel_mbind(unsigned long start, unsigned long len,
1468			 unsigned long mode, const unsigned long __user *nmask,
1469			 unsigned long maxnode, unsigned int flags)
1470{
1471	unsigned short mode_flags;
1472	nodemask_t nodes;
1473	int lmode = mode;
1474	int err;
 
1475
1476	start = untagged_addr(start);
1477	err = sanitize_mpol_flags(&lmode, &mode_flags);
1478	if (err)
1479		return err;
1480
 
 
1481	err = get_nodes(&nodes, nmask, maxnode);
1482	if (err)
1483		return err;
1484
1485	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1486}
1487
1488SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1489		unsigned long, home_node, unsigned long, flags)
1490{
1491	struct mm_struct *mm = current->mm;
1492	struct vm_area_struct *vma;
1493	struct mempolicy *new;
1494	unsigned long vmstart;
1495	unsigned long vmend;
1496	unsigned long end;
1497	int err = -ENOENT;
1498	VMA_ITERATOR(vmi, mm, start);
1499
1500	start = untagged_addr(start);
1501	if (start & ~PAGE_MASK)
1502		return -EINVAL;
1503	/*
1504	 * flags is used for future extension if any.
1505	 */
1506	if (flags != 0)
1507		return -EINVAL;
1508
1509	/*
1510	 * Check home_node is online to avoid accessing uninitialized
1511	 * NODE_DATA.
1512	 */
1513	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1514		return -EINVAL;
1515
1516	len = PAGE_ALIGN(len);
1517	end = start + len;
1518
1519	if (end < start)
1520		return -EINVAL;
1521	if (end == start)
1522		return 0;
1523	mmap_write_lock(mm);
1524	for_each_vma_range(vmi, vma, end) {
1525		vmstart = max(start, vma->vm_start);
1526		vmend   = min(end, vma->vm_end);
1527		new = mpol_dup(vma_policy(vma));
1528		if (IS_ERR(new)) {
1529			err = PTR_ERR(new);
1530			break;
1531		}
1532		/*
1533		 * Only update home node if there is an existing vma policy
1534		 */
1535		if (!new)
1536			continue;
1537
1538		/*
1539		 * If any vma in the range got policy other than MPOL_BIND
1540		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1541		 * the home node for vmas we already updated before.
1542		 */
1543		if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
1544			mpol_put(new);
1545			err = -EOPNOTSUPP;
1546			break;
1547		}
1548
1549		new->home_node = home_node;
1550		err = mbind_range(mm, vmstart, vmend, new);
1551		mpol_put(new);
1552		if (err)
1553			break;
1554	}
1555	mmap_write_unlock(mm);
1556	return err;
1557}
1558
1559SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1560		unsigned long, mode, const unsigned long __user *, nmask,
1561		unsigned long, maxnode, unsigned int, flags)
1562{
1563	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1564}
1565
1566/* Set the process memory policy */
1567static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1568				 unsigned long maxnode)
1569{
1570	unsigned short mode_flags;
1571	nodemask_t nodes;
1572	int lmode = mode;
1573	int err;
1574
1575	err = sanitize_mpol_flags(&lmode, &mode_flags);
1576	if (err)
1577		return err;
1578
 
 
 
 
 
 
1579	err = get_nodes(&nodes, nmask, maxnode);
1580	if (err)
1581		return err;
1582
1583	return do_set_mempolicy(lmode, mode_flags, &nodes);
1584}
1585
1586SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1587		unsigned long, maxnode)
1588{
1589	return kernel_set_mempolicy(mode, nmask, maxnode);
1590}
1591
1592static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1593				const unsigned long __user *old_nodes,
1594				const unsigned long __user *new_nodes)
1595{
1596	struct mm_struct *mm = NULL;
1597	struct task_struct *task;
1598	nodemask_t task_nodes;
1599	int err;
1600	nodemask_t *old;
1601	nodemask_t *new;
1602	NODEMASK_SCRATCH(scratch);
1603
1604	if (!scratch)
1605		return -ENOMEM;
1606
1607	old = &scratch->mask1;
1608	new = &scratch->mask2;
1609
1610	err = get_nodes(old, old_nodes, maxnode);
1611	if (err)
1612		goto out;
1613
1614	err = get_nodes(new, new_nodes, maxnode);
1615	if (err)
1616		goto out;
1617
1618	/* Find the mm_struct */
1619	rcu_read_lock();
1620	task = pid ? find_task_by_vpid(pid) : current;
1621	if (!task) {
1622		rcu_read_unlock();
1623		err = -ESRCH;
1624		goto out;
1625	}
1626	get_task_struct(task);
1627
1628	err = -EINVAL;
1629
1630	/*
1631	 * Check if this process has the right to modify the specified process.
1632	 * Use the regular "ptrace_may_access()" checks.
1633	 */
1634	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1635		rcu_read_unlock();
1636		err = -EPERM;
1637		goto out_put;
1638	}
1639	rcu_read_unlock();
1640
1641	task_nodes = cpuset_mems_allowed(task);
1642	/* Is the user allowed to access the target nodes? */
1643	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1644		err = -EPERM;
1645		goto out_put;
1646	}
1647
1648	task_nodes = cpuset_mems_allowed(current);
1649	nodes_and(*new, *new, task_nodes);
1650	if (nodes_empty(*new))
1651		goto out_put;
1652
 
 
 
 
1653	err = security_task_movememory(task);
1654	if (err)
1655		goto out_put;
1656
1657	mm = get_task_mm(task);
1658	put_task_struct(task);
1659
1660	if (!mm) {
1661		err = -EINVAL;
1662		goto out;
1663	}
1664
1665	err = do_migrate_pages(mm, old, new,
1666		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1667
1668	mmput(mm);
1669out:
1670	NODEMASK_SCRATCH_FREE(scratch);
1671
1672	return err;
1673
1674out_put:
1675	put_task_struct(task);
1676	goto out;
1677
1678}
1679
1680SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1681		const unsigned long __user *, old_nodes,
1682		const unsigned long __user *, new_nodes)
1683{
1684	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1685}
1686
1687
1688/* Retrieve NUMA policy */
1689static int kernel_get_mempolicy(int __user *policy,
1690				unsigned long __user *nmask,
1691				unsigned long maxnode,
1692				unsigned long addr,
1693				unsigned long flags)
1694{
1695	int err;
1696	int pval;
1697	nodemask_t nodes;
1698
1699	if (nmask != NULL && maxnode < nr_node_ids)
1700		return -EINVAL;
1701
1702	addr = untagged_addr(addr);
1703
1704	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1705
1706	if (err)
1707		return err;
1708
1709	if (policy && put_user(pval, policy))
1710		return -EFAULT;
1711
1712	if (nmask)
1713		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1714
1715	return err;
1716}
1717
1718SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1719		unsigned long __user *, nmask, unsigned long, maxnode,
1720		unsigned long, addr, unsigned long, flags)
1721{
1722	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1723}
1724
1725bool vma_migratable(struct vm_area_struct *vma)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1726{
1727	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1728		return false;
 
 
 
 
1729
1730	/*
1731	 * DAX device mappings require predictable access latency, so avoid
1732	 * incurring periodic faults.
1733	 */
1734	if (vma_is_dax(vma))
1735		return false;
 
1736
1737	if (is_vm_hugetlb_page(vma) &&
1738		!hugepage_migration_supported(hstate_vma(vma)))
1739		return false;
1740
1741	/*
1742	 * Migration allocates pages in the highest zone. If we cannot
1743	 * do so then migration (at least from node to node) is not
1744	 * possible.
1745	 */
1746	if (vma->vm_file &&
1747		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1748			< policy_zone)
1749		return false;
1750	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1751}
1752
 
 
1753struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1754						unsigned long addr)
1755{
1756	struct mempolicy *pol = NULL;
1757
1758	if (vma) {
1759		if (vma->vm_ops && vma->vm_ops->get_policy) {
1760			pol = vma->vm_ops->get_policy(vma, addr);
1761		} else if (vma->vm_policy) {
1762			pol = vma->vm_policy;
1763
1764			/*
1765			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1766			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1767			 * count on these policies which will be dropped by
1768			 * mpol_cond_put() later
1769			 */
1770			if (mpol_needs_cond_ref(pol))
1771				mpol_get(pol);
1772		}
1773	}
1774
1775	return pol;
1776}
1777
1778/*
1779 * get_vma_policy(@vma, @addr)
1780 * @vma: virtual memory area whose policy is sought
1781 * @addr: address in @vma for shared policy lookup
1782 *
1783 * Returns effective policy for a VMA at specified address.
1784 * Falls back to current->mempolicy or system default policy, as necessary.
1785 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1786 * count--added by the get_policy() vm_op, as appropriate--to protect against
1787 * freeing by another task.  It is the caller's responsibility to free the
1788 * extra reference for shared policies.
1789 */
1790static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1791						unsigned long addr)
1792{
1793	struct mempolicy *pol = __get_vma_policy(vma, addr);
1794
1795	if (!pol)
1796		pol = get_task_policy(current);
1797
1798	return pol;
1799}
1800
1801bool vma_policy_mof(struct vm_area_struct *vma)
1802{
1803	struct mempolicy *pol;
1804
1805	if (vma->vm_ops && vma->vm_ops->get_policy) {
1806		bool ret = false;
1807
1808		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1809		if (pol && (pol->flags & MPOL_F_MOF))
1810			ret = true;
1811		mpol_cond_put(pol);
1812
1813		return ret;
1814	}
1815
1816	pol = vma->vm_policy;
1817	if (!pol)
1818		pol = get_task_policy(current);
1819
1820	return pol->flags & MPOL_F_MOF;
1821}
1822
1823bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1824{
1825	enum zone_type dynamic_policy_zone = policy_zone;
1826
1827	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1828
1829	/*
1830	 * if policy->nodes has movable memory only,
1831	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1832	 *
1833	 * policy->nodes is intersect with node_states[N_MEMORY].
1834	 * so if the following test fails, it implies
1835	 * policy->nodes has movable memory only.
1836	 */
1837	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1838		dynamic_policy_zone = ZONE_MOVABLE;
1839
1840	return zone >= dynamic_policy_zone;
1841}
1842
1843/*
1844 * Return a nodemask representing a mempolicy for filtering nodes for
1845 * page allocation
1846 */
1847nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1848{
1849	int mode = policy->mode;
1850
1851	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1852	if (unlikely(mode == MPOL_BIND) &&
1853		apply_policy_zone(policy, gfp_zone(gfp)) &&
1854		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1855		return &policy->nodes;
1856
1857	if (mode == MPOL_PREFERRED_MANY)
1858		return &policy->nodes;
1859
1860	return NULL;
1861}
1862
1863/*
1864 * Return the  preferred node id for 'prefer' mempolicy, and return
1865 * the given id for all other policies.
1866 *
1867 * policy_node() is always coupled with policy_nodemask(), which
1868 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1869 */
1870static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1871{
1872	if (policy->mode == MPOL_PREFERRED) {
1873		nd = first_node(policy->nodes);
1874	} else {
1875		/*
1876		 * __GFP_THISNODE shouldn't even be used with the bind policy
1877		 * because we might easily break the expectation to stay on the
1878		 * requested node and not break the policy.
1879		 */
1880		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1881	}
1882
1883	if ((policy->mode == MPOL_BIND ||
1884	     policy->mode == MPOL_PREFERRED_MANY) &&
1885	    policy->home_node != NUMA_NO_NODE)
1886		return policy->home_node;
1887
1888	return nd;
1889}
1890
1891/* Do dynamic interleaving for a process */
1892static unsigned interleave_nodes(struct mempolicy *policy)
1893{
1894	unsigned next;
1895	struct task_struct *me = current;
1896
1897	next = next_node_in(me->il_prev, policy->nodes);
1898	if (next < MAX_NUMNODES)
1899		me->il_prev = next;
1900	return next;
1901}
1902
1903/*
1904 * Depending on the memory policy provide a node from which to allocate the
1905 * next slab entry.
1906 */
1907unsigned int mempolicy_slab_node(void)
1908{
1909	struct mempolicy *policy;
1910	int node = numa_mem_id();
1911
1912	if (!in_task())
1913		return node;
1914
1915	policy = current->mempolicy;
1916	if (!policy)
1917		return node;
1918
1919	switch (policy->mode) {
1920	case MPOL_PREFERRED:
1921		return first_node(policy->nodes);
 
 
 
1922
1923	case MPOL_INTERLEAVE:
1924		return interleave_nodes(policy);
1925
1926	case MPOL_BIND:
1927	case MPOL_PREFERRED_MANY:
1928	{
1929		struct zoneref *z;
1930
1931		/*
1932		 * Follow bind policy behavior and start allocation at the
1933		 * first node.
1934		 */
1935		struct zonelist *zonelist;
1936		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1937		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1938		z = first_zones_zonelist(zonelist, highest_zoneidx,
1939							&policy->nodes);
1940		return z->zone ? zone_to_nid(z->zone) : node;
1941	}
1942	case MPOL_LOCAL:
1943		return node;
1944
1945	default:
1946		BUG();
1947	}
1948}
1949
1950/*
1951 * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1952 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1953 * number of present nodes.
1954 */
1955static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1956{
1957	nodemask_t nodemask = pol->nodes;
1958	unsigned int target, nnodes;
1959	int i;
1960	int nid;
1961	/*
1962	 * The barrier will stabilize the nodemask in a register or on
1963	 * the stack so that it will stop changing under the code.
1964	 *
1965	 * Between first_node() and next_node(), pol->nodes could be changed
1966	 * by other threads. So we put pol->nodes in a local stack.
1967	 */
1968	barrier();
1969
1970	nnodes = nodes_weight(nodemask);
1971	if (!nnodes)
1972		return numa_node_id();
1973	target = (unsigned int)n % nnodes;
1974	nid = first_node(nodemask);
1975	for (i = 0; i < target; i++)
1976		nid = next_node(nid, nodemask);
1977	return nid;
1978}
1979
1980/* Determine a node number for interleave */
1981static inline unsigned interleave_nid(struct mempolicy *pol,
1982		 struct vm_area_struct *vma, unsigned long addr, int shift)
1983{
1984	if (vma) {
1985		unsigned long off;
1986
1987		/*
1988		 * for small pages, there is no difference between
1989		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1990		 * for huge pages, since vm_pgoff is in units of small
1991		 * pages, we need to shift off the always 0 bits to get
1992		 * a useful offset.
1993		 */
1994		BUG_ON(shift < PAGE_SHIFT);
1995		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1996		off += (addr - vma->vm_start) >> shift;
1997		return offset_il_node(pol, off);
1998	} else
1999		return interleave_nodes(pol);
2000}
2001
2002#ifdef CONFIG_HUGETLBFS
2003/*
2004 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2005 * @vma: virtual memory area whose policy is sought
2006 * @addr: address in @vma for shared policy lookup and interleave policy
2007 * @gfp_flags: for requested zone
2008 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2009 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2010 *
2011 * Returns a nid suitable for a huge page allocation and a pointer
2012 * to the struct mempolicy for conditional unref after allocation.
2013 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2014 * to the mempolicy's @nodemask for filtering the zonelist.
2015 *
2016 * Must be protected by read_mems_allowed_begin()
2017 */
2018int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2019				struct mempolicy **mpol, nodemask_t **nodemask)
2020{
2021	int nid;
2022	int mode;
2023
2024	*mpol = get_vma_policy(vma, addr);
2025	*nodemask = NULL;
2026	mode = (*mpol)->mode;
2027
2028	if (unlikely(mode == MPOL_INTERLEAVE)) {
2029		nid = interleave_nid(*mpol, vma, addr,
2030					huge_page_shift(hstate_vma(vma)));
2031	} else {
2032		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2033		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2034			*nodemask = &(*mpol)->nodes;
2035	}
2036	return nid;
2037}
2038
2039/*
2040 * init_nodemask_of_mempolicy
2041 *
2042 * If the current task's mempolicy is "default" [NULL], return 'false'
2043 * to indicate default policy.  Otherwise, extract the policy nodemask
2044 * for 'bind' or 'interleave' policy into the argument nodemask, or
2045 * initialize the argument nodemask to contain the single node for
2046 * 'preferred' or 'local' policy and return 'true' to indicate presence
2047 * of non-default mempolicy.
2048 *
2049 * We don't bother with reference counting the mempolicy [mpol_get/put]
2050 * because the current task is examining it's own mempolicy and a task's
2051 * mempolicy is only ever changed by the task itself.
2052 *
2053 * N.B., it is the caller's responsibility to free a returned nodemask.
2054 */
2055bool init_nodemask_of_mempolicy(nodemask_t *mask)
2056{
2057	struct mempolicy *mempolicy;
 
2058
2059	if (!(mask && current->mempolicy))
2060		return false;
2061
2062	task_lock(current);
2063	mempolicy = current->mempolicy;
2064	switch (mempolicy->mode) {
2065	case MPOL_PREFERRED:
2066	case MPOL_PREFERRED_MANY:
 
 
 
 
 
 
2067	case MPOL_BIND:
 
2068	case MPOL_INTERLEAVE:
2069		*mask = mempolicy->nodes;
2070		break;
2071
2072	case MPOL_LOCAL:
2073		init_nodemask_of_node(mask, numa_node_id());
2074		break;
2075
2076	default:
2077		BUG();
2078	}
2079	task_unlock(current);
2080
2081	return true;
2082}
2083#endif
2084
2085/*
2086 * mempolicy_in_oom_domain
2087 *
2088 * If tsk's mempolicy is "bind", check for intersection between mask and
2089 * the policy nodemask. Otherwise, return true for all other policies
2090 * including "interleave", as a tsk with "interleave" policy may have
2091 * memory allocated from all nodes in system.
2092 *
2093 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2094 */
2095bool mempolicy_in_oom_domain(struct task_struct *tsk,
2096					const nodemask_t *mask)
2097{
2098	struct mempolicy *mempolicy;
2099	bool ret = true;
2100
2101	if (!mask)
2102		return ret;
2103
2104	task_lock(tsk);
2105	mempolicy = tsk->mempolicy;
2106	if (mempolicy && mempolicy->mode == MPOL_BIND)
2107		ret = nodes_intersects(mempolicy->nodes, *mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2108	task_unlock(tsk);
2109
2110	return ret;
2111}
2112
2113/* Allocate a page in interleaved policy.
2114   Own path because it needs to do special accounting. */
2115static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2116					unsigned nid)
2117{
2118	struct page *page;
2119
2120	page = __alloc_pages(gfp, order, nid, NULL);
2121	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2122	if (!static_branch_likely(&vm_numa_stat_key))
2123		return page;
2124	if (page && page_to_nid(page) == nid) {
2125		preempt_disable();
2126		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2127		preempt_enable();
2128	}
2129	return page;
2130}
2131
2132static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2133						int nid, struct mempolicy *pol)
2134{
2135	struct page *page;
2136	gfp_t preferred_gfp;
2137
2138	/*
2139	 * This is a two pass approach. The first pass will only try the
2140	 * preferred nodes but skip the direct reclaim and allow the
2141	 * allocation to fail, while the second pass will try all the
2142	 * nodes in system.
2143	 */
2144	preferred_gfp = gfp | __GFP_NOWARN;
2145	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2146	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2147	if (!page)
2148		page = __alloc_pages(gfp, order, nid, NULL);
2149
2150	return page;
2151}
2152
2153/**
2154 * vma_alloc_folio - Allocate a folio for a VMA.
2155 * @gfp: GFP flags.
2156 * @order: Order of the folio.
2157 * @vma: Pointer to VMA or NULL if not available.
2158 * @addr: Virtual address of the allocation.  Must be inside @vma.
2159 * @hugepage: For hugepages try only the preferred node if possible.
2160 *
2161 * Allocate a folio for a specific address in @vma, using the appropriate
2162 * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2163 * of the mm_struct of the VMA to prevent it from going away.  Should be
2164 * used for all allocations for folios that will be mapped into user space.
2165 *
2166 * Return: The folio on success or NULL if allocation fails.
2167 */
2168struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2169		unsigned long addr, bool hugepage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2170{
2171	struct mempolicy *pol;
2172	int node = numa_node_id();
2173	struct folio *folio;
2174	int preferred_nid;
2175	nodemask_t *nmask;
2176
2177	pol = get_vma_policy(vma, addr);
2178
2179	if (pol->mode == MPOL_INTERLEAVE) {
2180		struct page *page;
2181		unsigned nid;
2182
2183		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2184		mpol_cond_put(pol);
2185		gfp |= __GFP_COMP;
2186		page = alloc_page_interleave(gfp, order, nid);
2187		if (page && order > 1)
2188			prep_transhuge_page(page);
2189		folio = (struct folio *)page;
2190		goto out;
2191	}
2192
2193	if (pol->mode == MPOL_PREFERRED_MANY) {
2194		struct page *page;
2195
2196		node = policy_node(gfp, pol, node);
2197		gfp |= __GFP_COMP;
2198		page = alloc_pages_preferred_many(gfp, order, node, pol);
2199		mpol_cond_put(pol);
2200		if (page && order > 1)
2201			prep_transhuge_page(page);
2202		folio = (struct folio *)page;
2203		goto out;
2204	}
2205
2206	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2207		int hpage_node = node;
2208
2209		/*
2210		 * For hugepage allocation and non-interleave policy which
2211		 * allows the current node (or other explicitly preferred
2212		 * node) we only try to allocate from the current/preferred
2213		 * node and don't fall back to other nodes, as the cost of
2214		 * remote accesses would likely offset THP benefits.
2215		 *
2216		 * If the policy is interleave or does not allow the current
2217		 * node in its nodemask, we allocate the standard way.
2218		 */
2219		if (pol->mode == MPOL_PREFERRED)
2220			hpage_node = first_node(pol->nodes);
 
2221
2222		nmask = policy_nodemask(gfp, pol);
2223		if (!nmask || node_isset(hpage_node, *nmask)) {
2224			mpol_cond_put(pol);
2225			/*
2226			 * First, try to allocate THP only on local node, but
2227			 * don't reclaim unnecessarily, just compact.
2228			 */
2229			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2230					__GFP_NORETRY, order, hpage_node);
2231
2232			/*
2233			 * If hugepage allocations are configured to always
2234			 * synchronous compact or the vma has been madvised
2235			 * to prefer hugepage backing, retry allowing remote
2236			 * memory with both reclaim and compact as well.
2237			 */
2238			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2239				folio = __folio_alloc(gfp, order, hpage_node,
2240						      nmask);
2241
2242			goto out;
2243		}
2244	}
2245
2246	nmask = policy_nodemask(gfp, pol);
2247	preferred_nid = policy_node(gfp, pol, node);
2248	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2249	mpol_cond_put(pol);
2250out:
2251	return folio;
2252}
2253EXPORT_SYMBOL(vma_alloc_folio);
2254
2255/**
2256 * alloc_pages - Allocate pages.
2257 * @gfp: GFP flags.
2258 * @order: Power of two of number of pages to allocate.
2259 *
2260 * Allocate 1 << @order contiguous pages.  The physical address of the
2261 * first page is naturally aligned (eg an order-3 allocation will be aligned
2262 * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2263 * process is honoured when in process context.
2264 *
2265 * Context: Can be called from any context, providing the appropriate GFP
2266 * flags are used.
2267 * Return: The page on success or NULL if allocation fails.
 
2268 */
2269struct page *alloc_pages(gfp_t gfp, unsigned order)
2270{
2271	struct mempolicy *pol = &default_policy;
2272	struct page *page;
2273
2274	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2275		pol = get_task_policy(current);
2276
2277	/*
2278	 * No reference counting needed for current->mempolicy
2279	 * nor system default_policy
2280	 */
2281	if (pol->mode == MPOL_INTERLEAVE)
2282		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2283	else if (pol->mode == MPOL_PREFERRED_MANY)
2284		page = alloc_pages_preferred_many(gfp, order,
2285				  policy_node(gfp, pol, numa_node_id()), pol);
2286	else
2287		page = __alloc_pages(gfp, order,
2288				policy_node(gfp, pol, numa_node_id()),
2289				policy_nodemask(gfp, pol));
2290
2291	return page;
2292}
2293EXPORT_SYMBOL(alloc_pages);
2294
2295struct folio *folio_alloc(gfp_t gfp, unsigned order)
2296{
2297	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2298
2299	if (page && order > 1)
2300		prep_transhuge_page(page);
2301	return (struct folio *)page;
2302}
2303EXPORT_SYMBOL(folio_alloc);
2304
2305static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2306		struct mempolicy *pol, unsigned long nr_pages,
2307		struct page **page_array)
2308{
2309	int nodes;
2310	unsigned long nr_pages_per_node;
2311	int delta;
2312	int i;
2313	unsigned long nr_allocated;
2314	unsigned long total_allocated = 0;
2315
2316	nodes = nodes_weight(pol->nodes);
2317	nr_pages_per_node = nr_pages / nodes;
2318	delta = nr_pages - nodes * nr_pages_per_node;
2319
2320	for (i = 0; i < nodes; i++) {
2321		if (delta) {
2322			nr_allocated = __alloc_pages_bulk(gfp,
2323					interleave_nodes(pol), NULL,
2324					nr_pages_per_node + 1, NULL,
2325					page_array);
2326			delta--;
2327		} else {
2328			nr_allocated = __alloc_pages_bulk(gfp,
2329					interleave_nodes(pol), NULL,
2330					nr_pages_per_node, NULL, page_array);
2331		}
2332
2333		page_array += nr_allocated;
2334		total_allocated += nr_allocated;
2335	}
2336
2337	return total_allocated;
2338}
2339
2340static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2341		struct mempolicy *pol, unsigned long nr_pages,
2342		struct page **page_array)
2343{
2344	gfp_t preferred_gfp;
2345	unsigned long nr_allocated = 0;
2346
2347	preferred_gfp = gfp | __GFP_NOWARN;
2348	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2349
2350	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2351					   nr_pages, NULL, page_array);
2352
2353	if (nr_allocated < nr_pages)
2354		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2355				nr_pages - nr_allocated, NULL,
2356				page_array + nr_allocated);
2357	return nr_allocated;
2358}
2359
2360/* alloc pages bulk and mempolicy should be considered at the
2361 * same time in some situation such as vmalloc.
2362 *
2363 * It can accelerate memory allocation especially interleaving
2364 * allocate memory.
2365 */
2366unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2367		unsigned long nr_pages, struct page **page_array)
2368{
2369	struct mempolicy *pol = &default_policy;
2370
2371	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2372		pol = get_task_policy(current);
2373
2374	if (pol->mode == MPOL_INTERLEAVE)
2375		return alloc_pages_bulk_array_interleave(gfp, pol,
2376							 nr_pages, page_array);
2377
2378	if (pol->mode == MPOL_PREFERRED_MANY)
2379		return alloc_pages_bulk_array_preferred_many(gfp,
2380				numa_node_id(), pol, nr_pages, page_array);
2381
2382	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2383				  policy_nodemask(gfp, pol), nr_pages, NULL,
2384				  page_array);
2385}
2386
2387int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2388{
2389	struct mempolicy *pol = mpol_dup(vma_policy(src));
2390
2391	if (IS_ERR(pol))
2392		return PTR_ERR(pol);
2393	dst->vm_policy = pol;
2394	return 0;
2395}
2396
2397/*
2398 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2399 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2400 * with the mems_allowed returned by cpuset_mems_allowed().  This
2401 * keeps mempolicies cpuset relative after its cpuset moves.  See
2402 * further kernel/cpuset.c update_nodemask().
2403 *
2404 * current's mempolicy may be rebinded by the other task(the task that changes
2405 * cpuset's mems), so we needn't do rebind work for current task.
2406 */
2407
2408/* Slow path of a mempolicy duplicate */
2409struct mempolicy *__mpol_dup(struct mempolicy *old)
2410{
2411	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2412
2413	if (!new)
2414		return ERR_PTR(-ENOMEM);
2415
2416	/* task's mempolicy is protected by alloc_lock */
2417	if (old == current->mempolicy) {
2418		task_lock(current);
2419		*new = *old;
2420		task_unlock(current);
2421	} else
2422		*new = *old;
2423
2424	if (current_cpuset_is_being_rebound()) {
2425		nodemask_t mems = cpuset_mems_allowed(current);
2426		mpol_rebind_policy(new, &mems);
2427	}
2428	atomic_set(&new->refcnt, 1);
2429	return new;
2430}
2431
2432/* Slow path of a mempolicy comparison */
2433bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2434{
2435	if (!a || !b)
2436		return false;
2437	if (a->mode != b->mode)
2438		return false;
2439	if (a->flags != b->flags)
2440		return false;
2441	if (a->home_node != b->home_node)
2442		return false;
2443	if (mpol_store_user_nodemask(a))
2444		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2445			return false;
2446
2447	switch (a->mode) {
2448	case MPOL_BIND:
 
2449	case MPOL_INTERLEAVE:
 
2450	case MPOL_PREFERRED:
2451	case MPOL_PREFERRED_MANY:
2452		return !!nodes_equal(a->nodes, b->nodes);
2453	case MPOL_LOCAL:
2454		return true;
2455	default:
2456		BUG();
2457		return false;
2458	}
2459}
2460
2461/*
2462 * Shared memory backing store policy support.
2463 *
2464 * Remember policies even when nobody has shared memory mapped.
2465 * The policies are kept in Red-Black tree linked from the inode.
2466 * They are protected by the sp->lock rwlock, which should be held
2467 * for any accesses to the tree.
2468 */
2469
2470/*
2471 * lookup first element intersecting start-end.  Caller holds sp->lock for
2472 * reading or for writing
2473 */
2474static struct sp_node *
2475sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2476{
2477	struct rb_node *n = sp->root.rb_node;
2478
2479	while (n) {
2480		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2481
2482		if (start >= p->end)
2483			n = n->rb_right;
2484		else if (end <= p->start)
2485			n = n->rb_left;
2486		else
2487			break;
2488	}
2489	if (!n)
2490		return NULL;
2491	for (;;) {
2492		struct sp_node *w = NULL;
2493		struct rb_node *prev = rb_prev(n);
2494		if (!prev)
2495			break;
2496		w = rb_entry(prev, struct sp_node, nd);
2497		if (w->end <= start)
2498			break;
2499		n = prev;
2500	}
2501	return rb_entry(n, struct sp_node, nd);
2502}
2503
2504/*
2505 * Insert a new shared policy into the list.  Caller holds sp->lock for
2506 * writing.
2507 */
2508static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2509{
2510	struct rb_node **p = &sp->root.rb_node;
2511	struct rb_node *parent = NULL;
2512	struct sp_node *nd;
2513
2514	while (*p) {
2515		parent = *p;
2516		nd = rb_entry(parent, struct sp_node, nd);
2517		if (new->start < nd->start)
2518			p = &(*p)->rb_left;
2519		else if (new->end > nd->end)
2520			p = &(*p)->rb_right;
2521		else
2522			BUG();
2523	}
2524	rb_link_node(&new->nd, parent, p);
2525	rb_insert_color(&new->nd, &sp->root);
2526	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2527		 new->policy ? new->policy->mode : 0);
2528}
2529
2530/* Find shared policy intersecting idx */
2531struct mempolicy *
2532mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2533{
2534	struct mempolicy *pol = NULL;
2535	struct sp_node *sn;
2536
2537	if (!sp->root.rb_node)
2538		return NULL;
2539	read_lock(&sp->lock);
2540	sn = sp_lookup(sp, idx, idx+1);
2541	if (sn) {
2542		mpol_get(sn->policy);
2543		pol = sn->policy;
2544	}
2545	read_unlock(&sp->lock);
2546	return pol;
2547}
2548
2549static void sp_free(struct sp_node *n)
2550{
2551	mpol_put(n->policy);
2552	kmem_cache_free(sn_cache, n);
2553}
2554
2555/**
2556 * mpol_misplaced - check whether current page node is valid in policy
2557 *
2558 * @page: page to be checked
2559 * @vma: vm area where page mapped
2560 * @addr: virtual address where page mapped
2561 *
2562 * Lookup current policy node id for vma,addr and "compare to" page's
2563 * node id.  Policy determination "mimics" alloc_page_vma().
 
 
 
 
 
 
2564 * Called from fault path where we know the vma and faulting address.
2565 *
2566 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2567 * policy, or a suitable node ID to allocate a replacement page from.
2568 */
2569int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2570{
2571	struct mempolicy *pol;
2572	struct zoneref *z;
2573	int curnid = page_to_nid(page);
2574	unsigned long pgoff;
2575	int thiscpu = raw_smp_processor_id();
2576	int thisnid = cpu_to_node(thiscpu);
2577	int polnid = NUMA_NO_NODE;
2578	int ret = NUMA_NO_NODE;
2579
2580	pol = get_vma_policy(vma, addr);
2581	if (!(pol->flags & MPOL_F_MOF))
2582		goto out;
2583
2584	switch (pol->mode) {
2585	case MPOL_INTERLEAVE:
2586		pgoff = vma->vm_pgoff;
2587		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2588		polnid = offset_il_node(pol, pgoff);
2589		break;
2590
2591	case MPOL_PREFERRED:
2592		if (node_isset(curnid, pol->nodes))
2593			goto out;
2594		polnid = first_node(pol->nodes);
2595		break;
2596
2597	case MPOL_LOCAL:
2598		polnid = numa_node_id();
2599		break;
2600
2601	case MPOL_BIND:
2602		/* Optimize placement among multiple nodes via NUMA balancing */
2603		if (pol->flags & MPOL_F_MORON) {
2604			if (node_isset(thisnid, pol->nodes))
2605				break;
2606			goto out;
2607		}
2608		fallthrough;
2609
2610	case MPOL_PREFERRED_MANY:
2611		/*
 
2612		 * use current page if in policy nodemask,
2613		 * else select nearest allowed node, if any.
2614		 * If no allowed nodes, use current [!misplaced].
2615		 */
2616		if (node_isset(curnid, pol->nodes))
2617			goto out;
2618		z = first_zones_zonelist(
2619				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2620				gfp_zone(GFP_HIGHUSER),
2621				&pol->nodes);
2622		polnid = zone_to_nid(z->zone);
2623		break;
2624
2625	default:
2626		BUG();
2627	}
2628
2629	/* Migrate the page towards the node whose CPU is referencing it */
2630	if (pol->flags & MPOL_F_MORON) {
2631		polnid = thisnid;
2632
2633		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2634			goto out;
2635	}
2636
2637	if (curnid != polnid)
2638		ret = polnid;
2639out:
2640	mpol_cond_put(pol);
2641
2642	return ret;
2643}
2644
2645/*
2646 * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2647 * dropped after task->mempolicy is set to NULL so that any allocation done as
2648 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2649 * policy.
2650 */
2651void mpol_put_task_policy(struct task_struct *task)
2652{
2653	struct mempolicy *pol;
2654
2655	task_lock(task);
2656	pol = task->mempolicy;
2657	task->mempolicy = NULL;
2658	task_unlock(task);
2659	mpol_put(pol);
2660}
2661
2662static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2663{
2664	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2665	rb_erase(&n->nd, &sp->root);
2666	sp_free(n);
2667}
2668
2669static void sp_node_init(struct sp_node *node, unsigned long start,
2670			unsigned long end, struct mempolicy *pol)
2671{
2672	node->start = start;
2673	node->end = end;
2674	node->policy = pol;
2675}
2676
2677static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2678				struct mempolicy *pol)
2679{
2680	struct sp_node *n;
2681	struct mempolicy *newpol;
2682
2683	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2684	if (!n)
2685		return NULL;
2686
2687	newpol = mpol_dup(pol);
2688	if (IS_ERR(newpol)) {
2689		kmem_cache_free(sn_cache, n);
2690		return NULL;
2691	}
2692	newpol->flags |= MPOL_F_SHARED;
2693	sp_node_init(n, start, end, newpol);
2694
2695	return n;
2696}
2697
2698/* Replace a policy range. */
2699static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2700				 unsigned long end, struct sp_node *new)
2701{
2702	struct sp_node *n;
2703	struct sp_node *n_new = NULL;
2704	struct mempolicy *mpol_new = NULL;
2705	int ret = 0;
2706
2707restart:
2708	write_lock(&sp->lock);
2709	n = sp_lookup(sp, start, end);
2710	/* Take care of old policies in the same range. */
2711	while (n && n->start < end) {
2712		struct rb_node *next = rb_next(&n->nd);
2713		if (n->start >= start) {
2714			if (n->end <= end)
2715				sp_delete(sp, n);
2716			else
2717				n->start = end;
2718		} else {
2719			/* Old policy spanning whole new range. */
2720			if (n->end > end) {
2721				if (!n_new)
2722					goto alloc_new;
2723
2724				*mpol_new = *n->policy;
2725				atomic_set(&mpol_new->refcnt, 1);
2726				sp_node_init(n_new, end, n->end, mpol_new);
2727				n->end = start;
2728				sp_insert(sp, n_new);
2729				n_new = NULL;
2730				mpol_new = NULL;
2731				break;
2732			} else
2733				n->end = start;
2734		}
2735		if (!next)
2736			break;
2737		n = rb_entry(next, struct sp_node, nd);
2738	}
2739	if (new)
2740		sp_insert(sp, new);
2741	write_unlock(&sp->lock);
2742	ret = 0;
2743
2744err_out:
2745	if (mpol_new)
2746		mpol_put(mpol_new);
2747	if (n_new)
2748		kmem_cache_free(sn_cache, n_new);
2749
2750	return ret;
2751
2752alloc_new:
2753	write_unlock(&sp->lock);
2754	ret = -ENOMEM;
2755	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2756	if (!n_new)
2757		goto err_out;
2758	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2759	if (!mpol_new)
2760		goto err_out;
2761	atomic_set(&mpol_new->refcnt, 1);
2762	goto restart;
2763}
2764
2765/**
2766 * mpol_shared_policy_init - initialize shared policy for inode
2767 * @sp: pointer to inode shared policy
2768 * @mpol:  struct mempolicy to install
2769 *
2770 * Install non-NULL @mpol in inode's shared policy rb-tree.
2771 * On entry, the current task has a reference on a non-NULL @mpol.
2772 * This must be released on exit.
2773 * This is called at get_inode() calls and we can use GFP_KERNEL.
2774 */
2775void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2776{
2777	int ret;
2778
2779	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2780	rwlock_init(&sp->lock);
2781
2782	if (mpol) {
2783		struct vm_area_struct pvma;
2784		struct mempolicy *new;
2785		NODEMASK_SCRATCH(scratch);
2786
2787		if (!scratch)
2788			goto put_mpol;
2789		/* contextualize the tmpfs mount point mempolicy */
2790		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2791		if (IS_ERR(new))
2792			goto free_scratch; /* no valid nodemask intersection */
2793
2794		task_lock(current);
2795		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2796		task_unlock(current);
2797		if (ret)
2798			goto put_new;
2799
2800		/* Create pseudo-vma that contains just the policy */
2801		vma_init(&pvma, NULL);
2802		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2803		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2804
2805put_new:
2806		mpol_put(new);			/* drop initial ref */
2807free_scratch:
2808		NODEMASK_SCRATCH_FREE(scratch);
2809put_mpol:
2810		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2811	}
2812}
2813
2814int mpol_set_shared_policy(struct shared_policy *info,
2815			struct vm_area_struct *vma, struct mempolicy *npol)
2816{
2817	int err;
2818	struct sp_node *new = NULL;
2819	unsigned long sz = vma_pages(vma);
2820
2821	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2822		 vma->vm_pgoff,
2823		 sz, npol ? npol->mode : -1,
2824		 npol ? npol->flags : -1,
2825		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2826
2827	if (npol) {
2828		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2829		if (!new)
2830			return -ENOMEM;
2831	}
2832	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2833	if (err && new)
2834		sp_free(new);
2835	return err;
2836}
2837
2838/* Free a backing policy store on inode delete. */
2839void mpol_free_shared_policy(struct shared_policy *p)
2840{
2841	struct sp_node *n;
2842	struct rb_node *next;
2843
2844	if (!p->root.rb_node)
2845		return;
2846	write_lock(&p->lock);
2847	next = rb_first(&p->root);
2848	while (next) {
2849		n = rb_entry(next, struct sp_node, nd);
2850		next = rb_next(&n->nd);
2851		sp_delete(p, n);
2852	}
2853	write_unlock(&p->lock);
2854}
2855
2856#ifdef CONFIG_NUMA_BALANCING
2857static int __initdata numabalancing_override;
2858
2859static void __init check_numabalancing_enable(void)
2860{
2861	bool numabalancing_default = false;
2862
2863	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2864		numabalancing_default = true;
2865
2866	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2867	if (numabalancing_override)
2868		set_numabalancing_state(numabalancing_override == 1);
2869
2870	if (num_online_nodes() > 1 && !numabalancing_override) {
2871		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2872			numabalancing_default ? "Enabling" : "Disabling");
2873		set_numabalancing_state(numabalancing_default);
2874	}
2875}
2876
2877static int __init setup_numabalancing(char *str)
2878{
2879	int ret = 0;
2880	if (!str)
2881		goto out;
2882
2883	if (!strcmp(str, "enable")) {
2884		numabalancing_override = 1;
2885		ret = 1;
2886	} else if (!strcmp(str, "disable")) {
2887		numabalancing_override = -1;
2888		ret = 1;
2889	}
2890out:
2891	if (!ret)
2892		pr_warn("Unable to parse numa_balancing=\n");
2893
2894	return ret;
2895}
2896__setup("numa_balancing=", setup_numabalancing);
2897#else
2898static inline void __init check_numabalancing_enable(void)
2899{
2900}
2901#endif /* CONFIG_NUMA_BALANCING */
2902
2903/* assumes fs == KERNEL_DS */
2904void __init numa_policy_init(void)
2905{
2906	nodemask_t interleave_nodes;
2907	unsigned long largest = 0;
2908	int nid, prefer = 0;
2909
2910	policy_cache = kmem_cache_create("numa_policy",
2911					 sizeof(struct mempolicy),
2912					 0, SLAB_PANIC, NULL);
2913
2914	sn_cache = kmem_cache_create("shared_policy_node",
2915				     sizeof(struct sp_node),
2916				     0, SLAB_PANIC, NULL);
2917
2918	for_each_node(nid) {
2919		preferred_node_policy[nid] = (struct mempolicy) {
2920			.refcnt = ATOMIC_INIT(1),
2921			.mode = MPOL_PREFERRED,
2922			.flags = MPOL_F_MOF | MPOL_F_MORON,
2923			.nodes = nodemask_of_node(nid),
2924		};
2925	}
2926
2927	/*
2928	 * Set interleaving policy for system init. Interleaving is only
2929	 * enabled across suitably sized nodes (default is >= 16MB), or
2930	 * fall back to the largest node if they're all smaller.
2931	 */
2932	nodes_clear(interleave_nodes);
2933	for_each_node_state(nid, N_MEMORY) {
2934		unsigned long total_pages = node_present_pages(nid);
2935
2936		/* Preserve the largest node */
2937		if (largest < total_pages) {
2938			largest = total_pages;
2939			prefer = nid;
2940		}
2941
2942		/* Interleave this node? */
2943		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2944			node_set(nid, interleave_nodes);
2945	}
2946
2947	/* All too small, use the largest */
2948	if (unlikely(nodes_empty(interleave_nodes)))
2949		node_set(prefer, interleave_nodes);
2950
2951	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2952		pr_err("%s: interleaving failed\n", __func__);
2953
2954	check_numabalancing_enable();
2955}
2956
2957/* Reset policy of current process to default */
2958void numa_default_policy(void)
2959{
2960	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2961}
2962
2963/*
2964 * Parse and format mempolicy from/to strings
2965 */
2966
 
 
 
2967static const char * const policy_modes[] =
2968{
2969	[MPOL_DEFAULT]    = "default",
2970	[MPOL_PREFERRED]  = "prefer",
2971	[MPOL_BIND]       = "bind",
2972	[MPOL_INTERLEAVE] = "interleave",
2973	[MPOL_LOCAL]      = "local",
2974	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2975};
2976
2977
2978#ifdef CONFIG_TMPFS
2979/**
2980 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2981 * @str:  string containing mempolicy to parse
2982 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2983 *
2984 * Format of input:
2985 *	<mode>[=<flags>][:<nodelist>]
2986 *
2987 * Return: %0 on success, else %1
2988 */
2989int mpol_parse_str(char *str, struct mempolicy **mpol)
2990{
2991	struct mempolicy *new = NULL;
 
2992	unsigned short mode_flags;
2993	nodemask_t nodes;
2994	char *nodelist = strchr(str, ':');
2995	char *flags = strchr(str, '=');
2996	int err = 1, mode;
2997
2998	if (flags)
2999		*flags++ = '\0';	/* terminate mode string */
3000
3001	if (nodelist) {
3002		/* NUL-terminate mode or flags string */
3003		*nodelist++ = '\0';
3004		if (nodelist_parse(nodelist, nodes))
3005			goto out;
3006		if (!nodes_subset(nodes, node_states[N_MEMORY]))
3007			goto out;
3008	} else
3009		nodes_clear(nodes);
3010
3011	mode = match_string(policy_modes, MPOL_MAX, str);
3012	if (mode < 0)
 
 
 
 
 
 
 
3013		goto out;
3014
3015	switch (mode) {
3016	case MPOL_PREFERRED:
3017		/*
3018		 * Insist on a nodelist of one node only, although later
3019		 * we use first_node(nodes) to grab a single node, so here
3020		 * nodelist (or nodes) cannot be empty.
3021		 */
3022		if (nodelist) {
3023			char *rest = nodelist;
3024			while (isdigit(*rest))
3025				rest++;
3026			if (*rest)
3027				goto out;
3028			if (nodes_empty(nodes))
3029				goto out;
3030		}
3031		break;
3032	case MPOL_INTERLEAVE:
3033		/*
3034		 * Default to online nodes with memory if no nodelist
3035		 */
3036		if (!nodelist)
3037			nodes = node_states[N_MEMORY];
3038		break;
3039	case MPOL_LOCAL:
3040		/*
3041		 * Don't allow a nodelist;  mpol_new() checks flags
3042		 */
3043		if (nodelist)
3044			goto out;
 
3045		break;
3046	case MPOL_DEFAULT:
3047		/*
3048		 * Insist on a empty nodelist
3049		 */
3050		if (!nodelist)
3051			err = 0;
3052		goto out;
3053	case MPOL_PREFERRED_MANY:
3054	case MPOL_BIND:
3055		/*
3056		 * Insist on a nodelist
3057		 */
3058		if (!nodelist)
3059			goto out;
3060	}
3061
3062	mode_flags = 0;
3063	if (flags) {
3064		/*
3065		 * Currently, we only support two mutually exclusive
3066		 * mode flags.
3067		 */
3068		if (!strcmp(flags, "static"))
3069			mode_flags |= MPOL_F_STATIC_NODES;
3070		else if (!strcmp(flags, "relative"))
3071			mode_flags |= MPOL_F_RELATIVE_NODES;
3072		else
3073			goto out;
3074	}
3075
3076	new = mpol_new(mode, mode_flags, &nodes);
3077	if (IS_ERR(new))
3078		goto out;
3079
3080	/*
3081	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3082	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3083	 */
3084	if (mode != MPOL_PREFERRED) {
3085		new->nodes = nodes;
3086	} else if (nodelist) {
3087		nodes_clear(new->nodes);
3088		node_set(first_node(nodes), new->nodes);
3089	} else {
3090		new->mode = MPOL_LOCAL;
3091	}
3092
3093	/*
3094	 * Save nodes for contextualization: this will be used to "clone"
3095	 * the mempolicy in a specific context [cpuset] at a later time.
3096	 */
3097	new->w.user_nodemask = nodes;
3098
3099	err = 0;
3100
3101out:
3102	/* Restore string for error message */
3103	if (nodelist)
3104		*--nodelist = ':';
3105	if (flags)
3106		*--flags = '=';
3107	if (!err)
3108		*mpol = new;
3109	return err;
3110}
3111#endif /* CONFIG_TMPFS */
3112
3113/**
3114 * mpol_to_str - format a mempolicy structure for printing
3115 * @buffer:  to contain formatted mempolicy string
3116 * @maxlen:  length of @buffer
3117 * @pol:  pointer to mempolicy to be formatted
3118 *
3119 * Convert @pol into a string.  If @buffer is too short, truncate the string.
3120 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3121 * longest flag, "relative", and to display at least a few node ids.
3122 */
3123void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3124{
3125	char *p = buffer;
3126	nodemask_t nodes = NODE_MASK_NONE;
3127	unsigned short mode = MPOL_DEFAULT;
3128	unsigned short flags = 0;
3129
3130	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3131		mode = pol->mode;
3132		flags = pol->flags;
3133	}
3134
3135	switch (mode) {
3136	case MPOL_DEFAULT:
3137	case MPOL_LOCAL:
3138		break;
3139	case MPOL_PREFERRED:
3140	case MPOL_PREFERRED_MANY:
 
 
 
 
3141	case MPOL_BIND:
3142	case MPOL_INTERLEAVE:
3143		nodes = pol->nodes;
3144		break;
3145	default:
3146		WARN_ON_ONCE(1);
3147		snprintf(p, maxlen, "unknown");
3148		return;
3149	}
3150
3151	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3152
3153	if (flags & MPOL_MODE_FLAGS) {
3154		p += snprintf(p, buffer + maxlen - p, "=");
3155
3156		/*
3157		 * Currently, the only defined flags are mutually exclusive
3158		 */
3159		if (flags & MPOL_F_STATIC_NODES)
3160			p += snprintf(p, buffer + maxlen - p, "static");
3161		else if (flags & MPOL_F_RELATIVE_NODES)
3162			p += snprintf(p, buffer + maxlen - p, "relative");
3163	}
3164
3165	if (!nodes_empty(nodes))
3166		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3167			       nodemask_pr_args(&nodes));
3168}