Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * Simple NUMA memory policy for the Linux kernel.
   3 *
   4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   6 * Subject to the GNU Public License, version 2.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case node -1 here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
 
 
 
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *		  in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
 
 
  68#include <linux/mempolicy.h>
  69#include <linux/mm.h>
  70#include <linux/highmem.h>
  71#include <linux/hugetlb.h>
  72#include <linux/kernel.h>
  73#include <linux/sched.h>
 
 
 
  74#include <linux/nodemask.h>
  75#include <linux/cpuset.h>
  76#include <linux/slab.h>
  77#include <linux/string.h>
  78#include <linux/module.h>
  79#include <linux/nsproxy.h>
  80#include <linux/interrupt.h>
  81#include <linux/init.h>
  82#include <linux/compat.h>
 
  83#include <linux/swap.h>
  84#include <linux/seq_file.h>
  85#include <linux/proc_fs.h>
  86#include <linux/migrate.h>
  87#include <linux/ksm.h>
  88#include <linux/rmap.h>
  89#include <linux/security.h>
  90#include <linux/syscalls.h>
  91#include <linux/ctype.h>
  92#include <linux/mm_inline.h>
 
 
 
  93
  94#include <asm/tlbflush.h>
  95#include <asm/uaccess.h>
  96#include <linux/random.h>
  97
  98#include "internal.h"
  99
 100/* Internal flags */
 101#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
 102#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
 
 103
 104static struct kmem_cache *policy_cache;
 105static struct kmem_cache *sn_cache;
 106
 107/* Highest zone. An specific allocation for a zone below that is not
 108   policied. */
 109enum zone_type policy_zone = 0;
 110
 111/*
 112 * run-time system-wide default policy => local allocation
 113 */
 114struct mempolicy default_policy = {
 115	.refcnt = ATOMIC_INIT(1), /* never free it */
 116	.mode = MPOL_PREFERRED,
 117	.flags = MPOL_F_LOCAL,
 118};
 119
 120static const struct mempolicy_operations {
 121	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 122	/*
 123	 * If read-side task has no lock to protect task->mempolicy, write-side
 124	 * task will rebind the task->mempolicy by two step. The first step is
 125	 * setting all the newly nodes, and the second step is cleaning all the
 126	 * disallowed nodes. In this way, we can avoid finding no node to alloc
 127	 * page.
 128	 * If we have a lock to protect task->mempolicy in read-side, we do
 129	 * rebind directly.
 130	 *
 131	 * step:
 132	 * 	MPOL_REBIND_ONCE - do rebind work at once
 133	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
 134	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
 135	 */
 136	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
 137			enum mpol_rebind_step step);
 138} mpol_ops[MPOL_MAX];
 139
 140/* Check that the nodemask contains at least one populated zone */
 141static int is_valid_nodemask(const nodemask_t *nodemask)
 
 
 
 
 
 
 
 
 142{
 143	int nd, k;
 144
 145	for_each_node_mask(nd, *nodemask) {
 146		struct zone *z;
 
 
 
 147
 148		for (k = 0; k <= policy_zone; k++) {
 149			z = &NODE_DATA(nd)->node_zones[k];
 150			if (z->present_pages > 0)
 151				return 1;
 
 
 152		}
 153	}
 154
 155	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156}
 157
 
 
 
 
 
 158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 159{
 160	return pol->flags & MPOL_MODE_FLAGS;
 161}
 162
 163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 164				   const nodemask_t *rel)
 165{
 166	nodemask_t tmp;
 167	nodes_fold(tmp, *orig, nodes_weight(*rel));
 168	nodes_onto(*ret, tmp, *rel);
 169}
 170
 171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 172{
 173	if (nodes_empty(*nodes))
 174		return -EINVAL;
 175	pol->v.nodes = *nodes;
 176	return 0;
 177}
 178
 179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 180{
 181	if (!nodes)
 182		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
 183	else if (nodes_empty(*nodes))
 184		return -EINVAL;			/*  no allowed nodes */
 185	else
 186		pol->v.preferred_node = first_node(*nodes);
 187	return 0;
 188}
 189
 190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 191{
 192	if (!is_valid_nodemask(nodes))
 193		return -EINVAL;
 194	pol->v.nodes = *nodes;
 
 
 195	return 0;
 196}
 197
 198/*
 199 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 200 * any, for the new policy.  mpol_new() has already validated the nodes
 201 * parameter with respect to the policy mode and flags.  But, we need to
 202 * handle an empty nodemask with MPOL_PREFERRED here.
 203 *
 204 * Must be called holding task's alloc_lock to protect task's mems_allowed
 205 * and mempolicy.  May also be called holding the mmap_semaphore for write.
 206 */
 207static int mpol_set_nodemask(struct mempolicy *pol,
 208		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 209{
 210	int ret;
 211
 212	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
 213	if (pol == NULL)
 
 
 
 
 214		return 0;
 215	/* Check N_HIGH_MEMORY */
 
 216	nodes_and(nsc->mask1,
 217		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
 218
 219	VM_BUG_ON(!nodes);
 220	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
 221		nodes = NULL;	/* explicit local allocation */
 222	else {
 223		if (pol->flags & MPOL_F_RELATIVE_NODES)
 224			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
 225		else
 226			nodes_and(nsc->mask2, *nodes, nsc->mask1);
 227
 228		if (mpol_store_user_nodemask(pol))
 229			pol->w.user_nodemask = *nodes;
 230		else
 231			pol->w.cpuset_mems_allowed =
 232						cpuset_current_mems_allowed;
 233	}
 234
 235	if (nodes)
 236		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 237	else
 238		ret = mpol_ops[pol->mode].create(pol, NULL);
 
 
 239	return ret;
 240}
 241
 242/*
 243 * This function just creates a new policy, does some check and simple
 244 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 245 */
 246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 247				  nodemask_t *nodes)
 248{
 249	struct mempolicy *policy;
 250
 251	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 252		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
 253
 254	if (mode == MPOL_DEFAULT) {
 255		if (nodes && !nodes_empty(*nodes))
 256			return ERR_PTR(-EINVAL);
 257		return NULL;	/* simply delete any existing policy */
 258	}
 259	VM_BUG_ON(!nodes);
 260
 261	/*
 262	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 263	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 264	 * All other modes require a valid pointer to a non-empty nodemask.
 265	 */
 266	if (mode == MPOL_PREFERRED) {
 267		if (nodes_empty(*nodes)) {
 268			if (((flags & MPOL_F_STATIC_NODES) ||
 269			     (flags & MPOL_F_RELATIVE_NODES)))
 270				return ERR_PTR(-EINVAL);
 
 
 271		}
 
 
 
 
 
 272	} else if (nodes_empty(*nodes))
 273		return ERR_PTR(-EINVAL);
 
 274	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 275	if (!policy)
 276		return ERR_PTR(-ENOMEM);
 277	atomic_set(&policy->refcnt, 1);
 278	policy->mode = mode;
 279	policy->flags = flags;
 
 280
 281	return policy;
 282}
 283
 284/* Slow path of a mpol destructor. */
 285void __mpol_put(struct mempolicy *p)
 286{
 287	if (!atomic_dec_and_test(&p->refcnt))
 288		return;
 289	kmem_cache_free(policy_cache, p);
 290}
 291
 292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
 293				enum mpol_rebind_step step)
 294{
 295}
 296
 297/*
 298 * step:
 299 * 	MPOL_REBIND_ONCE  - do rebind work at once
 300 * 	MPOL_REBIND_STEP1 - set all the newly nodes
 301 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
 302 */
 303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
 304				 enum mpol_rebind_step step)
 305{
 306	nodemask_t tmp;
 307
 308	if (pol->flags & MPOL_F_STATIC_NODES)
 309		nodes_and(tmp, pol->w.user_nodemask, *nodes);
 310	else if (pol->flags & MPOL_F_RELATIVE_NODES)
 311		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 312	else {
 313		/*
 314		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
 315		 * result
 316		 */
 317		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
 318			nodes_remap(tmp, pol->v.nodes,
 319					pol->w.cpuset_mems_allowed, *nodes);
 320			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
 321		} else if (step == MPOL_REBIND_STEP2) {
 322			tmp = pol->w.cpuset_mems_allowed;
 323			pol->w.cpuset_mems_allowed = *nodes;
 324		} else
 325			BUG();
 326	}
 327
 328	if (nodes_empty(tmp))
 329		tmp = *nodes;
 330
 331	if (step == MPOL_REBIND_STEP1)
 332		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
 333	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
 334		pol->v.nodes = tmp;
 335	else
 336		BUG();
 337
 338	if (!node_isset(current->il_next, tmp)) {
 339		current->il_next = next_node(current->il_next, tmp);
 340		if (current->il_next >= MAX_NUMNODES)
 341			current->il_next = first_node(tmp);
 342		if (current->il_next >= MAX_NUMNODES)
 343			current->il_next = numa_node_id();
 344	}
 345}
 346
 347static void mpol_rebind_preferred(struct mempolicy *pol,
 348				  const nodemask_t *nodes,
 349				  enum mpol_rebind_step step)
 350{
 351	nodemask_t tmp;
 352
 353	if (pol->flags & MPOL_F_STATIC_NODES) {
 354		int node = first_node(pol->w.user_nodemask);
 355
 356		if (node_isset(node, *nodes)) {
 357			pol->v.preferred_node = node;
 358			pol->flags &= ~MPOL_F_LOCAL;
 359		} else
 360			pol->flags |= MPOL_F_LOCAL;
 361	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
 362		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 363		pol->v.preferred_node = first_node(tmp);
 364	} else if (!(pol->flags & MPOL_F_LOCAL)) {
 365		pol->v.preferred_node = node_remap(pol->v.preferred_node,
 366						   pol->w.cpuset_mems_allowed,
 367						   *nodes);
 368		pol->w.cpuset_mems_allowed = *nodes;
 369	}
 370}
 371
 372/*
 373 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 374 *
 375 * If read-side task has no lock to protect task->mempolicy, write-side
 376 * task will rebind the task->mempolicy by two step. The first step is
 377 * setting all the newly nodes, and the second step is cleaning all the
 378 * disallowed nodes. In this way, we can avoid finding no node to alloc
 379 * page.
 380 * If we have a lock to protect task->mempolicy in read-side, we do
 381 * rebind directly.
 382 *
 383 * step:
 384 * 	MPOL_REBIND_ONCE  - do rebind work at once
 385 * 	MPOL_REBIND_STEP1 - set all the newly nodes
 386 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
 387 */
 388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
 389				enum mpol_rebind_step step)
 390{
 391	if (!pol)
 392		return;
 393	if (!mpol_store_user_nodemask(pol) && step == 0 &&
 394	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 395		return;
 396
 397	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
 398		return;
 399
 400	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
 401		BUG();
 402
 403	if (step == MPOL_REBIND_STEP1)
 404		pol->flags |= MPOL_F_REBINDING;
 405	else if (step == MPOL_REBIND_STEP2)
 406		pol->flags &= ~MPOL_F_REBINDING;
 407	else if (step >= MPOL_REBIND_NSTEP)
 408		BUG();
 409
 410	mpol_ops[pol->mode].rebind(pol, newmask, step);
 411}
 412
 413/*
 414 * Wrapper for mpol_rebind_policy() that just requires task
 415 * pointer, and updates task mempolicy.
 416 *
 417 * Called with task's alloc_lock held.
 418 */
 419
 420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
 421			enum mpol_rebind_step step)
 422{
 423	mpol_rebind_policy(tsk->mempolicy, new, step);
 424}
 425
 426/*
 427 * Rebind each vma in mm to new nodemask.
 428 *
 429 * Call holding a reference to mm.  Takes mm->mmap_sem during call.
 430 */
 431
 432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 433{
 434	struct vm_area_struct *vma;
 
 435
 436	down_write(&mm->mmap_sem);
 437	for (vma = mm->mmap; vma; vma = vma->vm_next)
 438		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
 439	up_write(&mm->mmap_sem);
 
 
 440}
 441
 442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 443	[MPOL_DEFAULT] = {
 444		.rebind = mpol_rebind_default,
 445	},
 446	[MPOL_INTERLEAVE] = {
 447		.create = mpol_new_interleave,
 448		.rebind = mpol_rebind_nodemask,
 449	},
 450	[MPOL_PREFERRED] = {
 451		.create = mpol_new_preferred,
 452		.rebind = mpol_rebind_preferred,
 453	},
 454	[MPOL_BIND] = {
 455		.create = mpol_new_bind,
 456		.rebind = mpol_rebind_nodemask,
 457	},
 
 
 
 
 
 
 
 458};
 459
 460static void migrate_page_add(struct page *page, struct list_head *pagelist,
 461				unsigned long flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 462
 463/* Scan through pages checking if pages follow certain conditions. */
 464static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 465		unsigned long addr, unsigned long end,
 466		const nodemask_t *nodes, unsigned long flags,
 467		void *private)
 468{
 469	pte_t *orig_pte;
 470	pte_t *pte;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 471	spinlock_t *ptl;
 472
 473	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 474	do {
 475		struct page *page;
 476		int nid;
 
 
 477
 478		if (!pte_present(*pte))
 
 
 
 
 
 
 
 
 
 
 
 479			continue;
 480		page = vm_normal_page(vma, addr, *pte);
 481		if (!page)
 
 482			continue;
 483		/*
 484		 * vm_normal_page() filters out zero pages, but there might
 485		 * still be PageReserved pages to skip, perhaps in a VDSO.
 486		 * And we cannot move PageKsm pages sensibly or safely yet.
 487		 */
 488		if (PageReserved(page) || PageKsm(page))
 489			continue;
 490		nid = page_to_nid(page);
 491		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
 492			continue;
 493
 494		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 495			migrate_page_add(page, private, flags);
 496		else
 497			break;
 498	} while (pte++, addr += PAGE_SIZE, addr != end);
 499	pte_unmap_unlock(orig_pte, ptl);
 500	return addr != end;
 501}
 502
 503static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 504		unsigned long addr, unsigned long end,
 505		const nodemask_t *nodes, unsigned long flags,
 506		void *private)
 507{
 508	pmd_t *pmd;
 509	unsigned long next;
 510
 511	pmd = pmd_offset(pud, addr);
 512	do {
 513		next = pmd_addr_end(addr, end);
 514		split_huge_page_pmd(vma->vm_mm, pmd);
 515		if (pmd_none_or_clear_bad(pmd))
 516			continue;
 517		if (check_pte_range(vma, pmd, addr, next, nodes,
 518				    flags, private))
 519			return -EIO;
 520	} while (pmd++, addr = next, addr != end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521	return 0;
 522}
 523
 524static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 525		unsigned long addr, unsigned long end,
 526		const nodemask_t *nodes, unsigned long flags,
 527		void *private)
 528{
 529	pud_t *pud;
 530	unsigned long next;
 531
 532	pud = pud_offset(pgd, addr);
 533	do {
 534		next = pud_addr_end(addr, end);
 535		if (pud_none_or_clear_bad(pud))
 536			continue;
 537		if (check_pmd_range(vma, pud, addr, next, nodes,
 538				    flags, private))
 539			return -EIO;
 540	} while (pud++, addr = next, addr != end);
 541	return 0;
 542}
 543
 544static inline int check_pgd_range(struct vm_area_struct *vma,
 545		unsigned long addr, unsigned long end,
 546		const nodemask_t *nodes, unsigned long flags,
 547		void *private)
 548{
 549	pgd_t *pgd;
 550	unsigned long next;
 551
 552	pgd = pgd_offset(vma->vm_mm, addr);
 553	do {
 554		next = pgd_addr_end(addr, end);
 555		if (pgd_none_or_clear_bad(pgd))
 556			continue;
 557		if (check_pud_range(vma, pgd, addr, next, nodes,
 558				    flags, private))
 559			return -EIO;
 560	} while (pgd++, addr = next, addr != end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 561	return 0;
 562}
 563
 
 564/*
 565 * Check if all pages in a range are on a set of nodes.
 566 * If pagelist != NULL then isolate pages from the LRU and
 567 * put them on the pagelist.
 568 */
 569static struct vm_area_struct *
 570check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 571		const nodemask_t *nodes, unsigned long flags, void *private)
 
 
 
 572{
 573	int err;
 574	struct vm_area_struct *first, *vma, *prev;
 575
 
 576
 577	first = find_vma(mm, start);
 578	if (!first)
 579		return ERR_PTR(-EFAULT);
 580	prev = NULL;
 581	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
 582		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
 583			if (!vma->vm_next && vma->vm_end < end)
 584				return ERR_PTR(-EFAULT);
 585			if (prev && prev->vm_end < vma->vm_start)
 586				return ERR_PTR(-EFAULT);
 587		}
 588		if (!is_vm_hugetlb_page(vma) &&
 589		    ((flags & MPOL_MF_STRICT) ||
 590		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 591				vma_migratable(vma)))) {
 592			unsigned long endvma = vma->vm_end;
 593
 594			if (endvma > end)
 595				endvma = end;
 596			if (vma->vm_start > start)
 597				start = vma->vm_start;
 598			err = check_pgd_range(vma, start, endvma, nodes,
 599						flags, private);
 600			if (err) {
 601				first = ERR_PTR(err);
 602				break;
 603			}
 604		}
 605		prev = vma;
 606	}
 607	return first;
 608}
 
 609
 610/* Apply policy to a single VMA */
 611static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
 612{
 613	int err = 0;
 614	struct mempolicy *old = vma->vm_policy;
 
 
 615
 616	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 617		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 618		 vma->vm_ops, vma->vm_file,
 619		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 620
 621	if (vma->vm_ops && vma->vm_ops->set_policy)
 622		err = vma->vm_ops->set_policy(vma, new);
 623	if (!err) {
 624		mpol_get(new);
 625		vma->vm_policy = new;
 626		mpol_put(old);
 627	}
 628	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629}
 630
 631/* Step 2: apply policy to a range and do splits. */
 632static int mbind_range(struct mm_struct *mm, unsigned long start,
 633		       unsigned long end, struct mempolicy *new_pol)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 634{
 635	struct vm_area_struct *next;
 636	struct vm_area_struct *prev;
 637	struct vm_area_struct *vma;
 638	int err = 0;
 639	unsigned long vmstart;
 640	unsigned long vmend;
 
 
 
 
 
 641
 642	vma = find_vma_prev(mm, start, &prev);
 643	if (!vma || vma->vm_start > start)
 644		return -EFAULT;
 645
 646	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 647		next = vma->vm_next;
 648		vmstart = max(start, vma->vm_start);
 649		vmend   = min(end, vma->vm_end);
 650
 651		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 652				  vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 653				  new_pol);
 654		if (prev) {
 655			vma = prev;
 656			next = vma->vm_next;
 657			continue;
 658		}
 659		if (vma->vm_start != vmstart) {
 660			err = split_vma(vma->vm_mm, vma, vmstart, 1);
 661			if (err)
 662				goto out;
 663		}
 664		if (vma->vm_end != vmend) {
 665			err = split_vma(vma->vm_mm, vma, vmend, 0);
 666			if (err)
 667				goto out;
 668		}
 669		err = policy_vma(vma, new_pol);
 670		if (err)
 671			goto out;
 672	}
 673
 674 out:
 675	return err;
 676}
 677
 678/*
 679 * Update task->flags PF_MEMPOLICY bit: set iff non-default
 680 * mempolicy.  Allows more rapid checking of this (combined perhaps
 681 * with other PF_* flag bits) on memory allocation hot code paths.
 682 *
 683 * If called from outside this file, the task 'p' should -only- be
 684 * a newly forked child not yet visible on the task list, because
 685 * manipulating the task flags of a visible task is not safe.
 686 *
 687 * The above limitation is why this routine has the funny name
 688 * mpol_fix_fork_child_flag().
 689 *
 690 * It is also safe to call this with a task pointer of current,
 691 * which the static wrapper mpol_set_task_struct_flag() does,
 692 * for use within this file.
 693 */
 694
 695void mpol_fix_fork_child_flag(struct task_struct *p)
 696{
 697	if (p->mempolicy)
 698		p->flags |= PF_MEMPOLICY;
 699	else
 700		p->flags &= ~PF_MEMPOLICY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 701}
 702
 703static void mpol_set_task_struct_flag(void)
 704{
 705	mpol_fix_fork_child_flag(current);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 706}
 707
 708/* Set the process memory policy */
 709static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 710			     nodemask_t *nodes)
 711{
 712	struct mempolicy *new, *old;
 713	struct mm_struct *mm = current->mm;
 714	NODEMASK_SCRATCH(scratch);
 715	int ret;
 716
 717	if (!scratch)
 718		return -ENOMEM;
 719
 720	new = mpol_new(mode, flags, nodes);
 721	if (IS_ERR(new)) {
 722		ret = PTR_ERR(new);
 723		goto out;
 724	}
 725	/*
 726	 * prevent changing our mempolicy while show_numa_maps()
 727	 * is using it.
 728	 * Note:  do_set_mempolicy() can be called at init time
 729	 * with no 'mm'.
 730	 */
 731	if (mm)
 732		down_write(&mm->mmap_sem);
 733	task_lock(current);
 734	ret = mpol_set_nodemask(new, nodes, scratch);
 735	if (ret) {
 736		task_unlock(current);
 737		if (mm)
 738			up_write(&mm->mmap_sem);
 739		mpol_put(new);
 740		goto out;
 741	}
 
 742	old = current->mempolicy;
 743	current->mempolicy = new;
 744	mpol_set_task_struct_flag();
 745	if (new && new->mode == MPOL_INTERLEAVE &&
 746	    nodes_weight(new->v.nodes))
 747		current->il_next = first_node(new->v.nodes);
 748	task_unlock(current);
 749	if (mm)
 750		up_write(&mm->mmap_sem);
 751
 752	mpol_put(old);
 753	ret = 0;
 754out:
 755	NODEMASK_SCRATCH_FREE(scratch);
 756	return ret;
 757}
 758
 759/*
 760 * Return nodemask for policy for get_mempolicy() query
 761 *
 762 * Called with task's alloc_lock held
 763 */
 764static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 765{
 766	nodes_clear(*nodes);
 767	if (p == &default_policy)
 768		return;
 769
 770	switch (p->mode) {
 771	case MPOL_BIND:
 772		/* Fall through */
 773	case MPOL_INTERLEAVE:
 774		*nodes = p->v.nodes;
 775		break;
 776	case MPOL_PREFERRED:
 777		if (!(p->flags & MPOL_F_LOCAL))
 778			node_set(p->v.preferred_node, *nodes);
 779		/* else return empty node mask for local allocation */
 
 
 780		break;
 781	default:
 782		BUG();
 783	}
 784}
 785
 786static int lookup_node(struct mm_struct *mm, unsigned long addr)
 787{
 788	struct page *p;
 789	int err;
 790
 791	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 792	if (err >= 0) {
 793		err = page_to_nid(p);
 794		put_page(p);
 795	}
 796	return err;
 797}
 798
 799/* Retrieve NUMA policy */
 800static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 801			     unsigned long addr, unsigned long flags)
 802{
 803	int err;
 804	struct mm_struct *mm = current->mm;
 805	struct vm_area_struct *vma = NULL;
 806	struct mempolicy *pol = current->mempolicy;
 807
 808	if (flags &
 809		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 810		return -EINVAL;
 811
 812	if (flags & MPOL_F_MEMS_ALLOWED) {
 813		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 814			return -EINVAL;
 815		*policy = 0;	/* just so it's initialized */
 816		task_lock(current);
 817		*nmask  = cpuset_current_mems_allowed;
 818		task_unlock(current);
 819		return 0;
 820	}
 821
 822	if (flags & MPOL_F_ADDR) {
 
 823		/*
 824		 * Do NOT fall back to task policy if the
 825		 * vma/shared policy at addr is NULL.  We
 826		 * want to return MPOL_DEFAULT in this case.
 827		 */
 828		down_read(&mm->mmap_sem);
 829		vma = find_vma_intersection(mm, addr, addr+1);
 830		if (!vma) {
 831			up_read(&mm->mmap_sem);
 832			return -EFAULT;
 833		}
 834		if (vma->vm_ops && vma->vm_ops->get_policy)
 835			pol = vma->vm_ops->get_policy(vma, addr);
 836		else
 837			pol = vma->vm_policy;
 838	} else if (addr)
 839		return -EINVAL;
 840
 841	if (!pol)
 842		pol = &default_policy;	/* indicates default behavior */
 843
 844	if (flags & MPOL_F_NODE) {
 845		if (flags & MPOL_F_ADDR) {
 
 
 
 
 
 
 
 
 
 846			err = lookup_node(mm, addr);
 847			if (err < 0)
 848				goto out;
 849			*policy = err;
 850		} else if (pol == current->mempolicy &&
 851				pol->mode == MPOL_INTERLEAVE) {
 852			*policy = current->il_next;
 853		} else {
 854			err = -EINVAL;
 855			goto out;
 856		}
 857	} else {
 858		*policy = pol == &default_policy ? MPOL_DEFAULT :
 859						pol->mode;
 860		/*
 861		 * Internal mempolicy flags must be masked off before exposing
 862		 * the policy to userspace.
 863		 */
 864		*policy |= (pol->flags & MPOL_MODE_FLAGS);
 865	}
 866
 867	if (vma) {
 868		up_read(&current->mm->mmap_sem);
 869		vma = NULL;
 870	}
 871
 872	err = 0;
 873	if (nmask) {
 874		if (mpol_store_user_nodemask(pol)) {
 875			*nmask = pol->w.user_nodemask;
 876		} else {
 877			task_lock(current);
 878			get_policy_nodemask(pol, nmask);
 879			task_unlock(current);
 880		}
 881	}
 882
 883 out:
 884	mpol_cond_put(pol);
 885	if (vma)
 886		up_read(&current->mm->mmap_sem);
 
 
 887	return err;
 888}
 889
 890#ifdef CONFIG_MIGRATION
 891/*
 892 * page migration
 893 */
 894static void migrate_page_add(struct page *page, struct list_head *pagelist,
 895				unsigned long flags)
 896{
 897	/*
 898	 * Avoid migrating a page that is shared with others.
 
 
 
 
 
 899	 */
 900	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
 901		if (!isolate_lru_page(page)) {
 902			list_add_tail(&page->lru, pagelist);
 903			inc_zone_page_state(page, NR_ISOLATED_ANON +
 904					    page_is_file_cache(page));
 
 
 
 
 
 
 
 
 
 905		}
 906	}
 907}
 908
 909static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 910{
 911	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
 912}
 913
 914/*
 915 * Migrate pages from one node to a target node.
 916 * Returns error or the number of pages not migrated.
 917 */
 918static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 919			   int flags)
 920{
 921	nodemask_t nmask;
 922	LIST_HEAD(pagelist);
 923	int err = 0;
 924	struct vm_area_struct *vma;
 
 
 
 
 
 
 
 925
 926	nodes_clear(nmask);
 927	node_set(source, nmask);
 928
 929	vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
 930			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 931	if (IS_ERR(vma))
 932		return PTR_ERR(vma);
 
 
 
 
 
 
 
 
 
 
 933
 934	if (!list_empty(&pagelist)) {
 935		err = migrate_pages(&pagelist, new_node_page, dest,
 936								false, true);
 937		if (err)
 938			putback_lru_pages(&pagelist);
 939	}
 940
 
 
 941	return err;
 942}
 943
 944/*
 945 * Move pages between the two nodesets so as to preserve the physical
 946 * layout as much as possible.
 947 *
 948 * Returns the number of page that could not be moved.
 949 */
 950int do_migrate_pages(struct mm_struct *mm,
 951	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
 952{
 953	int busy = 0;
 954	int err;
 955	nodemask_t tmp;
 956
 957	err = migrate_prep();
 958	if (err)
 959		return err;
 960
 961	down_read(&mm->mmap_sem);
 962
 963	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
 964	if (err)
 965		goto out;
 966
 967	/*
 968	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
 969	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
 970	 * bit in 'tmp', and return that <source, dest> pair for migration.
 971	 * The pair of nodemasks 'to' and 'from' define the map.
 972	 *
 973	 * If no pair of bits is found that way, fallback to picking some
 974	 * pair of 'source' and 'dest' bits that are not the same.  If the
 975	 * 'source' and 'dest' bits are the same, this represents a node
 976	 * that will be migrating to itself, so no pages need move.
 977	 *
 978	 * If no bits are left in 'tmp', or if all remaining bits left
 979	 * in 'tmp' correspond to the same bit in 'to', return false
 980	 * (nothing left to migrate).
 981	 *
 982	 * This lets us pick a pair of nodes to migrate between, such that
 983	 * if possible the dest node is not already occupied by some other
 984	 * source node, minimizing the risk of overloading the memory on a
 985	 * node that would happen if we migrated incoming memory to a node
 986	 * before migrating outgoing memory source that same node.
 987	 *
 988	 * A single scan of tmp is sufficient.  As we go, we remember the
 989	 * most recent <s, d> pair that moved (s != d).  If we find a pair
 990	 * that not only moved, but what's better, moved to an empty slot
 991	 * (d is not set in tmp), then we break out then, with that pair.
 992	 * Otherwise when we finish scanning from_tmp, we at least have the
 993	 * most recent <s, d> pair that moved.  If we get all the way through
 994	 * the scan of tmp without finding any node that moved, much less
 995	 * moved to an empty node, then there is nothing left worth migrating.
 996	 */
 997
 998	tmp = *from_nodes;
 999	while (!nodes_empty(tmp)) {
1000		int s,d;
1001		int source = -1;
1002		int dest = 0;
1003
1004		for_each_node_mask(s, tmp) {
1005			d = node_remap(s, *from_nodes, *to_nodes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006			if (s == d)
1007				continue;
1008
1009			source = s;	/* Node moved. Memorize */
1010			dest = d;
1011
1012			/* dest not in remaining from nodes? */
1013			if (!node_isset(dest, tmp))
1014				break;
1015		}
1016		if (source == -1)
1017			break;
1018
1019		node_clear(source, tmp);
1020		err = migrate_to_node(mm, source, dest, flags);
1021		if (err > 0)
1022			busy += err;
1023		if (err < 0)
1024			break;
1025	}
1026out:
1027	up_read(&mm->mmap_sem);
1028	if (err < 0)
1029		return err;
1030	return busy;
1031
1032}
1033
1034/*
1035 * Allocate a new page for page migration based on vma policy.
1036 * Start assuming that page is mapped by vma pointed to by @private.
1037 * Search forward from there, if not.  N.B., this assumes that the
1038 * list of pages handed to migrate_pages()--which is how we get here--
1039 * is in virtual address order.
1040 */
1041static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
 
1042{
1043	struct vm_area_struct *vma = (struct vm_area_struct *)private;
1044	unsigned long uninitialized_var(address);
 
 
 
 
 
1045
1046	while (vma) {
1047		address = page_address_in_vma(page, vma);
1048		if (address != -EFAULT)
1049			break;
1050		vma = vma->vm_next;
 
 
 
 
 
 
1051	}
1052
1053	/*
1054	 * if !vma, alloc_page_vma() will use task or system default policy
1055	 */
1056	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 
 
 
1057}
1058#else
1059
1060static void migrate_page_add(struct page *page, struct list_head *pagelist,
1061				unsigned long flags)
1062{
 
1063}
1064
1065int do_migrate_pages(struct mm_struct *mm,
1066	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1067{
1068	return -ENOSYS;
1069}
1070
1071static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
 
1072{
1073	return NULL;
1074}
1075#endif
1076
1077static long do_mbind(unsigned long start, unsigned long len,
1078		     unsigned short mode, unsigned short mode_flags,
1079		     nodemask_t *nmask, unsigned long flags)
1080{
1081	struct vm_area_struct *vma;
1082	struct mm_struct *mm = current->mm;
 
 
 
1083	struct mempolicy *new;
1084	unsigned long end;
1085	int err;
 
1086	LIST_HEAD(pagelist);
1087
1088	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1089				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1090		return -EINVAL;
1091	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1092		return -EPERM;
1093
1094	if (start & ~PAGE_MASK)
1095		return -EINVAL;
1096
1097	if (mode == MPOL_DEFAULT)
1098		flags &= ~MPOL_MF_STRICT;
1099
1100	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1101	end = start + len;
1102
1103	if (end < start)
1104		return -EINVAL;
1105	if (end == start)
1106		return 0;
1107
1108	new = mpol_new(mode, mode_flags, nmask);
1109	if (IS_ERR(new))
1110		return PTR_ERR(new);
1111
1112	/*
1113	 * If we are using the default policy then operation
1114	 * on discontinuous address spaces is okay after all
1115	 */
1116	if (!new)
1117		flags |= MPOL_MF_DISCONTIG_OK;
1118
1119	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1120		 start, start + len, mode, mode_flags,
1121		 nmask ? nodes_addr(*nmask)[0] : -1);
1122
1123	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1124
1125		err = migrate_prep();
1126		if (err)
1127			goto mpol_out;
1128	}
1129	{
1130		NODEMASK_SCRATCH(scratch);
1131		if (scratch) {
1132			down_write(&mm->mmap_sem);
1133			task_lock(current);
1134			err = mpol_set_nodemask(new, nmask, scratch);
1135			task_unlock(current);
1136			if (err)
1137				up_write(&mm->mmap_sem);
1138		} else
1139			err = -ENOMEM;
1140		NODEMASK_SCRATCH_FREE(scratch);
1141	}
1142	if (err)
1143		goto mpol_out;
1144
1145	vma = check_range(mm, start, end, nmask,
1146			  flags | MPOL_MF_INVERT, &pagelist);
 
 
 
 
1147
1148	err = PTR_ERR(vma);
1149	if (!IS_ERR(vma)) {
1150		int nr_failed = 0;
 
 
 
 
 
 
 
 
 
1151
1152		err = mbind_range(mm, start, end, new);
 
 
 
 
 
 
 
1153
1154		if (!list_empty(&pagelist)) {
1155			nr_failed = migrate_pages(&pagelist, new_vma_page,
1156						(unsigned long)vma,
1157						false, true);
1158			if (nr_failed)
1159				putback_lru_pages(&pagelist);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1160		}
 
1161
1162		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1163			err = -EIO;
1164	} else
1165		putback_lru_pages(&pagelist);
 
 
 
 
1166
1167	up_write(&mm->mmap_sem);
1168 mpol_out:
 
 
 
1169	mpol_put(new);
 
 
1170	return err;
1171}
1172
1173/*
1174 * User space interface with variable sized bitmaps for nodelists.
1175 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1176
1177/* Copy a node mask from user space. */
1178static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1179		     unsigned long maxnode)
1180{
1181	unsigned long k;
1182	unsigned long nlongs;
1183	unsigned long endmask;
1184
1185	--maxnode;
1186	nodes_clear(*nodes);
1187	if (maxnode == 0 || !nmask)
1188		return 0;
1189	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1190		return -EINVAL;
1191
1192	nlongs = BITS_TO_LONGS(maxnode);
1193	if ((maxnode % BITS_PER_LONG) == 0)
1194		endmask = ~0UL;
1195	else
1196		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
 
 
 
1197
1198	/* When the user specified more nodes than supported just check
1199	   if the non supported part is all zero. */
1200	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1201		if (nlongs > PAGE_SIZE/sizeof(long))
1202			return -EINVAL;
1203		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1204			unsigned long t;
1205			if (get_user(t, nmask + k))
1206				return -EFAULT;
1207			if (k == nlongs - 1) {
1208				if (t & endmask)
1209					return -EINVAL;
1210			} else if (t)
1211				return -EINVAL;
1212		}
1213		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1214		endmask = ~0UL;
1215	}
1216
1217	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1218		return -EFAULT;
1219	nodes_addr(*nodes)[nlongs-1] &= endmask;
1220	return 0;
1221}
1222
1223/* Copy a kernel node mask to user space */
1224static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1225			      nodemask_t *nodes)
1226{
1227	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1228	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
 
 
 
 
1229
1230	if (copy > nbytes) {
1231		if (copy > PAGE_SIZE)
1232			return -EINVAL;
1233		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1234			return -EFAULT;
1235		copy = nbytes;
 
1236	}
 
 
 
 
 
1237	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1238}
1239
1240SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1241		unsigned long, mode, unsigned long __user *, nmask,
1242		unsigned long, maxnode, unsigned, flags)
1243{
1244	nodemask_t nodes;
1245	int err;
1246	unsigned short mode_flags;
1247
1248	mode_flags = mode & MPOL_MODE_FLAGS;
1249	mode &= ~MPOL_MODE_FLAGS;
1250	if (mode >= MPOL_MAX)
1251		return -EINVAL;
1252	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1253	    (mode_flags & MPOL_F_RELATIVE_NODES))
1254		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255	err = get_nodes(&nodes, nmask, maxnode);
1256	if (err)
1257		return err;
1258	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
 
1259}
1260
1261/* Set the process memory policy */
1262SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1263		unsigned long, maxnode)
1264{
1265	int err;
1266	nodemask_t nodes;
1267	unsigned short flags;
 
 
 
 
 
 
 
 
 
 
 
 
1268
1269	flags = mode & MPOL_MODE_FLAGS;
1270	mode &= ~MPOL_MODE_FLAGS;
1271	if ((unsigned int)mode >= MPOL_MAX)
 
 
1272		return -EINVAL;
1273	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
 
 
 
 
1274		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1275	err = get_nodes(&nodes, nmask, maxnode);
1276	if (err)
1277		return err;
1278	return do_set_mempolicy(mode, flags, &nodes);
 
1279}
1280
1281SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1282		const unsigned long __user *, old_nodes,
1283		const unsigned long __user *, new_nodes)
 
 
 
 
 
 
1284{
1285	const struct cred *cred = current_cred(), *tcred;
1286	struct mm_struct *mm = NULL;
1287	struct task_struct *task;
1288	nodemask_t task_nodes;
1289	int err;
1290	nodemask_t *old;
1291	nodemask_t *new;
1292	NODEMASK_SCRATCH(scratch);
1293
1294	if (!scratch)
1295		return -ENOMEM;
1296
1297	old = &scratch->mask1;
1298	new = &scratch->mask2;
1299
1300	err = get_nodes(old, old_nodes, maxnode);
1301	if (err)
1302		goto out;
1303
1304	err = get_nodes(new, new_nodes, maxnode);
1305	if (err)
1306		goto out;
1307
1308	/* Find the mm_struct */
1309	rcu_read_lock();
1310	task = pid ? find_task_by_vpid(pid) : current;
1311	if (!task) {
1312		rcu_read_unlock();
1313		err = -ESRCH;
1314		goto out;
1315	}
1316	mm = get_task_mm(task);
1317	rcu_read_unlock();
1318
1319	err = -EINVAL;
1320	if (!mm)
1321		goto out;
1322
1323	/*
1324	 * Check if this process has the right to modify the specified
1325	 * process. The right exists if the process has administrative
1326	 * capabilities, superuser privileges or the same
1327	 * userid as the target process.
1328	 */
1329	rcu_read_lock();
1330	tcred = __task_cred(task);
1331	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1332	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
1333	    !capable(CAP_SYS_NICE)) {
1334		rcu_read_unlock();
1335		err = -EPERM;
1336		goto out;
1337	}
1338	rcu_read_unlock();
1339
1340	task_nodes = cpuset_mems_allowed(task);
1341	/* Is the user allowed to access the target nodes? */
1342	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1343		err = -EPERM;
1344		goto out;
1345	}
1346
1347	if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1348		err = -EINVAL;
1349		goto out;
1350	}
1351
1352	err = security_task_movememory(task);
1353	if (err)
 
 
 
 
 
 
 
1354		goto out;
 
1355
1356	err = do_migrate_pages(mm, old, new,
1357		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
 
 
1358out:
1359	if (mm)
1360		mmput(mm);
1361	NODEMASK_SCRATCH_FREE(scratch);
1362
1363	return err;
 
 
 
 
1364}
1365
 
 
 
 
 
 
1366
1367/* Retrieve NUMA policy */
1368SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1369		unsigned long __user *, nmask, unsigned long, maxnode,
1370		unsigned long, addr, unsigned long, flags)
 
 
1371{
1372	int err;
1373	int uninitialized_var(pval);
1374	nodemask_t nodes;
1375
1376	if (nmask != NULL && maxnode < MAX_NUMNODES)
1377		return -EINVAL;
1378
 
 
1379	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1380
1381	if (err)
1382		return err;
1383
1384	if (policy && put_user(pval, policy))
1385		return -EFAULT;
1386
1387	if (nmask)
1388		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1389
1390	return err;
1391}
1392
1393#ifdef CONFIG_COMPAT
1394
1395asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1396				     compat_ulong_t __user *nmask,
1397				     compat_ulong_t maxnode,
1398				     compat_ulong_t addr, compat_ulong_t flags)
1399{
1400	long err;
1401	unsigned long __user *nm = NULL;
1402	unsigned long nr_bits, alloc_size;
1403	DECLARE_BITMAP(bm, MAX_NUMNODES);
1404
1405	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1406	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1407
1408	if (nmask)
1409		nm = compat_alloc_user_space(alloc_size);
1410
1411	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1412
1413	if (!err && nmask) {
1414		unsigned long copy_size;
1415		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1416		err = copy_from_user(bm, nm, copy_size);
1417		/* ensure entire bitmap is zeroed */
1418		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1419		err |= compat_put_bitmap(nmask, bm, nr_bits);
1420	}
1421
1422	return err;
1423}
1424
1425asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1426				     compat_ulong_t maxnode)
1427{
1428	long err = 0;
1429	unsigned long __user *nm = NULL;
1430	unsigned long nr_bits, alloc_size;
1431	DECLARE_BITMAP(bm, MAX_NUMNODES);
1432
1433	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1434	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1435
1436	if (nmask) {
1437		err = compat_get_bitmap(bm, nmask, nr_bits);
1438		nm = compat_alloc_user_space(alloc_size);
1439		err |= copy_to_user(nm, bm, alloc_size);
1440	}
 
1441
1442	if (err)
1443		return -EFAULT;
 
1444
1445	return sys_set_mempolicy(mode, nm, nr_bits+1);
 
 
 
 
 
 
 
 
 
1446}
1447
1448asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1449			     compat_ulong_t mode, compat_ulong_t __user *nmask,
1450			     compat_ulong_t maxnode, compat_ulong_t flags)
1451{
1452	long err = 0;
1453	unsigned long __user *nm = NULL;
1454	unsigned long nr_bits, alloc_size;
1455	nodemask_t bm;
1456
1457	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1458	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1459
1460	if (nmask) {
1461		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1462		nm = compat_alloc_user_space(alloc_size);
1463		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1464	}
1465
1466	if (err)
1467		return -EFAULT;
1468
1469	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1470}
1471
1472#endif
1473
1474/*
1475 * get_vma_policy(@task, @vma, @addr)
1476 * @task - task for fallback if vma policy == default
1477 * @vma   - virtual memory area whose policy is sought
1478 * @addr  - address in @vma for shared policy lookup
 
1479 *
1480 * Returns effective policy for a VMA at specified address.
1481 * Falls back to @task or system default policy, as necessary.
1482 * Current or other task's task mempolicy and non-shared vma policies
1483 * are protected by the task's mmap_sem, which must be held for read by
1484 * the caller.
1485 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1486 * count--added by the get_policy() vm_op, as appropriate--to protect against
1487 * freeing by another task.  It is the caller's responsibility to free the
1488 * extra reference for shared policies.
1489 */
1490struct mempolicy *get_vma_policy(struct task_struct *task,
1491		struct vm_area_struct *vma, unsigned long addr)
1492{
1493	struct mempolicy *pol = task->mempolicy;
1494
1495	if (vma) {
1496		if (vma->vm_ops && vma->vm_ops->get_policy) {
1497			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1498									addr);
1499			if (vpol)
1500				pol = vpol;
1501		} else if (vma->vm_policy)
1502			pol = vma->vm_policy;
1503	}
1504	if (!pol)
1505		pol = &default_policy;
 
 
 
 
1506	return pol;
1507}
1508
1509/*
1510 * Return a nodemask representing a mempolicy for filtering nodes for
1511 * page allocation
1512 */
1513static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1514{
1515	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1516	if (unlikely(policy->mode == MPOL_BIND) &&
1517			gfp_zone(gfp) >= policy_zone &&
1518			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1519		return &policy->v.nodes;
1520
1521	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1522}
1523
1524/* Return a zonelist indicated by gfp for node representing a mempolicy */
1525static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1526	int nd)
1527{
1528	switch (policy->mode) {
1529	case MPOL_PREFERRED:
1530		if (!(policy->flags & MPOL_F_LOCAL))
1531			nd = policy->v.preferred_node;
1532		break;
1533	case MPOL_BIND:
1534		/*
1535		 * Normally, MPOL_BIND allocations are node-local within the
1536		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1537		 * current node isn't part of the mask, we use the zonelist for
1538		 * the first node in the mask instead.
1539		 */
1540		if (unlikely(gfp & __GFP_THISNODE) &&
1541				unlikely(!node_isset(nd, policy->v.nodes)))
1542			nd = first_node(policy->v.nodes);
1543		break;
1544	default:
1545		BUG();
1546	}
1547	return node_zonelist(nd, gfp);
1548}
1549
1550/* Do dynamic interleaving for a process */
1551static unsigned interleave_nodes(struct mempolicy *policy)
1552{
1553	unsigned nid, next;
1554	struct task_struct *me = current;
1555
1556	nid = me->il_next;
1557	next = next_node(nid, policy->v.nodes);
1558	if (next >= MAX_NUMNODES)
1559		next = first_node(policy->v.nodes);
1560	if (next < MAX_NUMNODES)
1561		me->il_next = next;
1562	return nid;
1563}
1564
1565/*
1566 * Depending on the memory policy provide a node from which to allocate the
1567 * next slab entry.
1568 * @policy must be protected by freeing by the caller.  If @policy is
1569 * the current task's mempolicy, this protection is implicit, as only the
1570 * task can change it's policy.  The system default policy requires no
1571 * such protection.
1572 */
1573unsigned slab_node(struct mempolicy *policy)
1574{
1575	if (!policy || policy->flags & MPOL_F_LOCAL)
1576		return numa_node_id();
 
 
 
 
 
 
 
1577
1578	switch (policy->mode) {
1579	case MPOL_PREFERRED:
1580		/*
1581		 * handled MPOL_F_LOCAL above
1582		 */
1583		return policy->v.preferred_node;
1584
1585	case MPOL_INTERLEAVE:
1586		return interleave_nodes(policy);
1587
1588	case MPOL_BIND: {
 
 
 
 
1589		/*
1590		 * Follow bind policy behavior and start allocation at the
1591		 * first node.
1592		 */
1593		struct zonelist *zonelist;
1594		struct zone *zone;
1595		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1596		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1597		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1598							&policy->v.nodes,
1599							&zone);
1600		return zone ? zone->node : numa_node_id();
1601	}
 
 
1602
1603	default:
1604		BUG();
1605	}
1606}
1607
1608/* Do static interleaving for a VMA with known offset. */
1609static unsigned offset_il_node(struct mempolicy *pol,
1610		struct vm_area_struct *vma, unsigned long off)
1611{
1612	unsigned nnodes = nodes_weight(pol->v.nodes);
1613	unsigned target;
1614	int c;
1615	int nid = -1;
 
 
 
 
 
 
 
 
 
 
 
1616
 
1617	if (!nnodes)
1618		return numa_node_id();
1619	target = (unsigned int)off % nnodes;
1620	c = 0;
1621	do {
1622		nid = next_node(nid, pol->v.nodes);
1623		c++;
1624	} while (c <= target);
1625	return nid;
1626}
1627
1628/* Determine a node number for interleave */
1629static inline unsigned interleave_nid(struct mempolicy *pol,
1630		 struct vm_area_struct *vma, unsigned long addr, int shift)
 
 
 
1631{
1632	if (vma) {
1633		unsigned long off;
1634
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635		/*
1636		 * for small pages, there is no difference between
1637		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1638		 * for huge pages, since vm_pgoff is in units of small
1639		 * pages, we need to shift off the always 0 bits to get
1640		 * a useful offset.
1641		 */
1642		BUG_ON(shift < PAGE_SHIFT);
1643		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1644		off += (addr - vma->vm_start) >> shift;
1645		return offset_il_node(pol, vma, off);
1646	} else
1647		return interleave_nodes(pol);
1648}
1649
1650/*
1651 * Return the bit number of a random bit set in the nodemask.
1652 * (returns -1 if nodemask is empty)
1653 */
1654int node_random(const nodemask_t *maskp)
1655{
1656	int w, bit = -1;
1657
1658	w = nodes_weight(*maskp);
1659	if (w)
1660		bit = bitmap_ord_to_pos(maskp->bits,
1661			get_random_int() % w, MAX_NUMNODES);
1662	return bit;
1663}
1664
1665#ifdef CONFIG_HUGETLBFS
1666/*
1667 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1668 * @vma = virtual memory area whose policy is sought
1669 * @addr = address in @vma for shared policy lookup and interleave policy
1670 * @gfp_flags = for requested zone
1671 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1672 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1673 *
1674 * Returns a zonelist suitable for a huge page allocation and a pointer
1675 * to the struct mempolicy for conditional unref after allocation.
1676 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1677 * @nodemask for filtering the zonelist.
1678 *
1679 * Must be protected by get_mems_allowed()
1680 */
1681struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1682				gfp_t gfp_flags, struct mempolicy **mpol,
1683				nodemask_t **nodemask)
1684{
1685	struct zonelist *zl;
1686
1687	*mpol = get_vma_policy(current, vma, addr);
1688	*nodemask = NULL;	/* assume !MPOL_BIND */
1689
1690	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1691		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1692				huge_page_shift(hstate_vma(vma))), gfp_flags);
1693	} else {
1694		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1695		if ((*mpol)->mode == MPOL_BIND)
1696			*nodemask = &(*mpol)->v.nodes;
1697	}
1698	return zl;
1699}
1700
1701/*
1702 * init_nodemask_of_mempolicy
1703 *
1704 * If the current task's mempolicy is "default" [NULL], return 'false'
1705 * to indicate default policy.  Otherwise, extract the policy nodemask
1706 * for 'bind' or 'interleave' policy into the argument nodemask, or
1707 * initialize the argument nodemask to contain the single node for
1708 * 'preferred' or 'local' policy and return 'true' to indicate presence
1709 * of non-default mempolicy.
1710 *
1711 * We don't bother with reference counting the mempolicy [mpol_get/put]
1712 * because the current task is examining it's own mempolicy and a task's
1713 * mempolicy is only ever changed by the task itself.
1714 *
1715 * N.B., it is the caller's responsibility to free a returned nodemask.
1716 */
1717bool init_nodemask_of_mempolicy(nodemask_t *mask)
1718{
1719	struct mempolicy *mempolicy;
1720	int nid;
1721
1722	if (!(mask && current->mempolicy))
1723		return false;
1724
1725	task_lock(current);
1726	mempolicy = current->mempolicy;
1727	switch (mempolicy->mode) {
1728	case MPOL_PREFERRED:
1729		if (mempolicy->flags & MPOL_F_LOCAL)
1730			nid = numa_node_id();
1731		else
1732			nid = mempolicy->v.preferred_node;
1733		init_nodemask_of_node(mask, nid);
1734		break;
1735
1736	case MPOL_BIND:
1737		/* Fall through */
1738	case MPOL_INTERLEAVE:
1739		*mask =  mempolicy->v.nodes;
 
 
 
 
1740		break;
1741
1742	default:
1743		BUG();
1744	}
1745	task_unlock(current);
1746
1747	return true;
1748}
1749#endif
1750
1751/*
1752 * mempolicy_nodemask_intersects
1753 *
1754 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1755 * policy.  Otherwise, check for intersection between mask and the policy
1756 * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1757 * policy, always return true since it may allocate elsewhere on fallback.
1758 *
1759 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1760 */
1761bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1762					const nodemask_t *mask)
1763{
1764	struct mempolicy *mempolicy;
1765	bool ret = true;
1766
1767	if (!mask)
1768		return ret;
 
1769	task_lock(tsk);
1770	mempolicy = tsk->mempolicy;
1771	if (!mempolicy)
1772		goto out;
1773
1774	switch (mempolicy->mode) {
1775	case MPOL_PREFERRED:
1776		/*
1777		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1778		 * allocate from, they may fallback to other nodes when oom.
1779		 * Thus, it's possible for tsk to have allocated memory from
1780		 * nodes in mask.
1781		 */
1782		break;
1783	case MPOL_BIND:
1784	case MPOL_INTERLEAVE:
1785		ret = nodes_intersects(mempolicy->v.nodes, *mask);
1786		break;
1787	default:
1788		BUG();
1789	}
1790out:
1791	task_unlock(tsk);
 
1792	return ret;
1793}
1794
1795/* Allocate a page in interleaved policy.
1796   Own path because it needs to do special accounting. */
1797static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1798					unsigned nid)
1799{
1800	struct zonelist *zl;
1801	struct page *page;
 
 
 
 
 
 
 
 
 
 
 
 
 
1802
1803	zl = node_zonelist(nid, gfp);
1804	page = __alloc_pages(gfp, order, zl);
1805	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1806		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1807	return page;
1808}
1809
1810/**
1811 * 	alloc_pages_vma	- Allocate a page for a VMA.
 
 
 
 
 
1812 *
1813 * 	@gfp:
1814 *      %GFP_USER    user allocation.
1815 *      %GFP_KERNEL  kernel allocations,
1816 *      %GFP_HIGHMEM highmem/user allocations,
1817 *      %GFP_FS      allocation should not call back into a file system.
1818 *      %GFP_ATOMIC  don't sleep.
1819 *
1820 *	@order:Order of the GFP allocation.
1821 * 	@vma:  Pointer to VMA or NULL if not available.
1822 *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1823 *
1824 * 	This function allocates a page from the kernel page pool and applies
1825 *	a NUMA policy associated with the VMA or the current process.
1826 *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1827 *	mm_struct of the VMA to prevent it from going away. Should be used for
1828 *	all allocations for pages that will be mapped into
1829 * 	user space. Returns NULL when no page can be allocated.
1830 *
1831 *	Should be called with the mm_sem of the vma hold.
1832 */
1833struct page *
1834alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1835		unsigned long addr, int node)
1836{
1837	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1838	struct zonelist *zl;
1839	struct page *page;
1840
1841	get_mems_allowed();
1842	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1843		unsigned nid;
1844
1845		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1846		mpol_cond_put(pol);
1847		page = alloc_page_interleave(gfp, order, nid);
1848		put_mems_allowed();
1849		return page;
1850	}
1851	zl = policy_zonelist(gfp, pol, node);
1852	if (unlikely(mpol_needs_cond_ref(pol))) {
1853		/*
1854		 * slow path: ref counted shared policy
 
 
 
 
 
 
 
1855		 */
1856		struct page *page =  __alloc_pages_nodemask(gfp, order,
1857						zl, policy_nodemask(gfp, pol));
1858		__mpol_put(pol);
1859		put_mems_allowed();
1860		return page;
 
 
 
 
 
 
 
 
 
 
 
 
1861	}
1862	/*
1863	 * fast path:  default or task policy
1864	 */
1865	page = __alloc_pages_nodemask(gfp, order, zl,
1866				      policy_nodemask(gfp, pol));
1867	put_mems_allowed();
 
 
 
 
 
 
 
1868	return page;
1869}
1870
1871/**
1872 * 	alloc_pages_current - Allocate pages.
 
 
 
 
 
 
 
 
 
 
 
1873 *
1874 *	@gfp:
1875 *		%GFP_USER   user allocation,
1876 *      	%GFP_KERNEL kernel allocation,
1877 *      	%GFP_HIGHMEM highmem allocation,
1878 *      	%GFP_FS     don't call back into a file system.
1879 *      	%GFP_ATOMIC don't sleep.
1880 *	@order: Power of two of allocation size in pages. 0 is a single page.
1881 *
1882 *	Allocate a page from the kernel page pool.  When not in
1883 *	interrupt context and apply the current process NUMA policy.
1884 *	Returns NULL when no page can be allocated.
1885 *
1886 *	Don't call cpuset_update_task_memory_state() unless
1887 *	1) it's ok to take cpuset_sem (can WAIT), and
1888 *	2) allocating for current task (not interrupt).
1889 */
1890struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 
1891{
1892	struct mempolicy *pol = current->mempolicy;
 
1893	struct page *page;
1894
1895	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1896		pol = &default_policy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1897
1898	get_mems_allowed();
1899	/*
1900	 * No reference counting needed for current->mempolicy
1901	 * nor system default_policy
1902	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1903	if (pol->mode == MPOL_INTERLEAVE)
1904		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1905	else
1906		page = __alloc_pages_nodemask(gfp, order,
1907				policy_zonelist(gfp, pol, numa_node_id()),
1908				policy_nodemask(gfp, pol));
1909	put_mems_allowed();
1910	return page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1911}
1912EXPORT_SYMBOL(alloc_pages_current);
1913
1914/*
1915 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1916 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1917 * with the mems_allowed returned by cpuset_mems_allowed().  This
1918 * keeps mempolicies cpuset relative after its cpuset moves.  See
1919 * further kernel/cpuset.c update_nodemask().
1920 *
1921 * current's mempolicy may be rebinded by the other task(the task that changes
1922 * cpuset's mems), so we needn't do rebind work for current task.
1923 */
1924
1925/* Slow path of a mempolicy duplicate */
1926struct mempolicy *__mpol_dup(struct mempolicy *old)
1927{
1928	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1929
1930	if (!new)
1931		return ERR_PTR(-ENOMEM);
1932
1933	/* task's mempolicy is protected by alloc_lock */
1934	if (old == current->mempolicy) {
1935		task_lock(current);
1936		*new = *old;
1937		task_unlock(current);
1938	} else
1939		*new = *old;
1940
1941	rcu_read_lock();
1942	if (current_cpuset_is_being_rebound()) {
1943		nodemask_t mems = cpuset_mems_allowed(current);
1944		if (new->flags & MPOL_F_REBINDING)
1945			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1946		else
1947			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1948	}
1949	rcu_read_unlock();
1950	atomic_set(&new->refcnt, 1);
1951	return new;
1952}
1953
1954/*
1955 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1956 * eliminate the * MPOL_F_* flags that require conditional ref and
1957 * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
1958 * after return.  Use the returned value.
1959 *
1960 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1961 * policy lookup, even if the policy needs/has extra ref on lookup.
1962 * shmem_readahead needs this.
1963 */
1964struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1965						struct mempolicy *frompol)
1966{
1967	if (!mpol_needs_cond_ref(frompol))
1968		return frompol;
1969
1970	*tompol = *frompol;
1971	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
1972	__mpol_put(frompol);
1973	return tompol;
1974}
1975
1976/* Slow path of a mempolicy comparison */
1977int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1978{
1979	if (!a || !b)
1980		return 0;
1981	if (a->mode != b->mode)
1982		return 0;
1983	if (a->flags != b->flags)
1984		return 0;
 
 
1985	if (mpol_store_user_nodemask(a))
1986		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1987			return 0;
1988
1989	switch (a->mode) {
1990	case MPOL_BIND:
1991		/* Fall through */
1992	case MPOL_INTERLEAVE:
1993		return nodes_equal(a->v.nodes, b->v.nodes);
1994	case MPOL_PREFERRED:
1995		return a->v.preferred_node == b->v.preferred_node;
 
 
 
1996	default:
1997		BUG();
1998		return 0;
1999	}
2000}
2001
2002/*
2003 * Shared memory backing store policy support.
2004 *
2005 * Remember policies even when nobody has shared memory mapped.
2006 * The policies are kept in Red-Black tree linked from the inode.
2007 * They are protected by the sp->lock spinlock, which should be held
2008 * for any accesses to the tree.
2009 */
2010
2011/* lookup first element intersecting start-end */
2012/* Caller holds sp->lock */
2013static struct sp_node *
2014sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
 
 
2015{
2016	struct rb_node *n = sp->root.rb_node;
2017
2018	while (n) {
2019		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2020
2021		if (start >= p->end)
2022			n = n->rb_right;
2023		else if (end <= p->start)
2024			n = n->rb_left;
2025		else
2026			break;
2027	}
2028	if (!n)
2029		return NULL;
2030	for (;;) {
2031		struct sp_node *w = NULL;
2032		struct rb_node *prev = rb_prev(n);
2033		if (!prev)
2034			break;
2035		w = rb_entry(prev, struct sp_node, nd);
2036		if (w->end <= start)
2037			break;
2038		n = prev;
2039	}
2040	return rb_entry(n, struct sp_node, nd);
2041}
2042
2043/* Insert a new shared policy into the list. */
2044/* Caller holds sp->lock */
 
 
2045static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2046{
2047	struct rb_node **p = &sp->root.rb_node;
2048	struct rb_node *parent = NULL;
2049	struct sp_node *nd;
2050
2051	while (*p) {
2052		parent = *p;
2053		nd = rb_entry(parent, struct sp_node, nd);
2054		if (new->start < nd->start)
2055			p = &(*p)->rb_left;
2056		else if (new->end > nd->end)
2057			p = &(*p)->rb_right;
2058		else
2059			BUG();
2060	}
2061	rb_link_node(&new->nd, parent, p);
2062	rb_insert_color(&new->nd, &sp->root);
2063	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2064		 new->policy ? new->policy->mode : 0);
2065}
2066
2067/* Find shared policy intersecting idx */
2068struct mempolicy *
2069mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2070{
2071	struct mempolicy *pol = NULL;
2072	struct sp_node *sn;
2073
2074	if (!sp->root.rb_node)
2075		return NULL;
2076	spin_lock(&sp->lock);
2077	sn = sp_lookup(sp, idx, idx+1);
2078	if (sn) {
2079		mpol_get(sn->policy);
2080		pol = sn->policy;
2081	}
2082	spin_unlock(&sp->lock);
2083	return pol;
2084}
2085
2086static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2087{
2088	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2089	rb_erase(&n->nd, &sp->root);
2090	mpol_put(n->policy);
2091	kmem_cache_free(sn_cache, n);
2092}
2093
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2094static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2095				struct mempolicy *pol)
2096{
2097	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
 
2098
 
2099	if (!n)
2100		return NULL;
2101	n->start = start;
2102	n->end = end;
2103	mpol_get(pol);
2104	pol->flags |= MPOL_F_SHARED;	/* for unref */
2105	n->policy = pol;
 
 
 
 
2106	return n;
2107}
2108
2109/* Replace a policy range. */
2110static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2111				 unsigned long end, struct sp_node *new)
2112{
2113	struct sp_node *n, *new2 = NULL;
 
 
 
2114
2115restart:
2116	spin_lock(&sp->lock);
2117	n = sp_lookup(sp, start, end);
2118	/* Take care of old policies in the same range. */
2119	while (n && n->start < end) {
2120		struct rb_node *next = rb_next(&n->nd);
2121		if (n->start >= start) {
2122			if (n->end <= end)
2123				sp_delete(sp, n);
2124			else
2125				n->start = end;
2126		} else {
2127			/* Old policy spanning whole new range. */
2128			if (n->end > end) {
2129				if (!new2) {
2130					spin_unlock(&sp->lock);
2131					new2 = sp_alloc(end, n->end, n->policy);
2132					if (!new2)
2133						return -ENOMEM;
2134					goto restart;
2135				}
2136				n->end = start;
2137				sp_insert(sp, new2);
2138				new2 = NULL;
 
2139				break;
2140			} else
2141				n->end = start;
2142		}
2143		if (!next)
2144			break;
2145		n = rb_entry(next, struct sp_node, nd);
2146	}
2147	if (new)
2148		sp_insert(sp, new);
2149	spin_unlock(&sp->lock);
2150	if (new2) {
2151		mpol_put(new2->policy);
2152		kmem_cache_free(sn_cache, new2);
2153	}
2154	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2155}
2156
2157/**
2158 * mpol_shared_policy_init - initialize shared policy for inode
2159 * @sp: pointer to inode shared policy
2160 * @mpol:  struct mempolicy to install
2161 *
2162 * Install non-NULL @mpol in inode's shared policy rb-tree.
2163 * On entry, the current task has a reference on a non-NULL @mpol.
2164 * This must be released on exit.
2165 * This is called at get_inode() calls and we can use GFP_KERNEL.
2166 */
2167void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2168{
2169	int ret;
2170
2171	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2172	spin_lock_init(&sp->lock);
2173
2174	if (mpol) {
2175		struct vm_area_struct pvma;
2176		struct mempolicy *new;
2177		NODEMASK_SCRATCH(scratch);
2178
2179		if (!scratch)
2180			goto put_mpol;
2181		/* contextualize the tmpfs mount point mempolicy */
2182		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2183		if (IS_ERR(new))
 
2184			goto free_scratch; /* no valid nodemask intersection */
2185
2186		task_lock(current);
2187		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2188		task_unlock(current);
2189		if (ret)
2190			goto put_new;
2191
2192		/* Create pseudo-vma that contains just the policy */
2193		memset(&pvma, 0, sizeof(struct vm_area_struct));
2194		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2195		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2196
2197put_new:
2198		mpol_put(new);			/* drop initial ref */
 
 
 
 
2199free_scratch:
2200		NODEMASK_SCRATCH_FREE(scratch);
2201put_mpol:
2202		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2203	}
2204}
2205
2206int mpol_set_shared_policy(struct shared_policy *info,
2207			struct vm_area_struct *vma, struct mempolicy *npol)
2208{
2209	int err;
2210	struct sp_node *new = NULL;
2211	unsigned long sz = vma_pages(vma);
2212
2213	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2214		 vma->vm_pgoff,
2215		 sz, npol ? npol->mode : -1,
2216		 npol ? npol->flags : -1,
2217		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2218
2219	if (npol) {
2220		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2221		if (!new)
2222			return -ENOMEM;
2223	}
2224	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2225	if (err && new)
2226		kmem_cache_free(sn_cache, new);
2227	return err;
2228}
2229
2230/* Free a backing policy store on inode delete. */
2231void mpol_free_shared_policy(struct shared_policy *p)
2232{
2233	struct sp_node *n;
2234	struct rb_node *next;
2235
2236	if (!p->root.rb_node)
2237		return;
2238	spin_lock(&p->lock);
2239	next = rb_first(&p->root);
2240	while (next) {
2241		n = rb_entry(next, struct sp_node, nd);
2242		next = rb_next(&n->nd);
2243		rb_erase(&n->nd, &p->root);
2244		mpol_put(n->policy);
2245		kmem_cache_free(sn_cache, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2246	}
2247	spin_unlock(&p->lock);
 
 
 
 
 
 
 
 
 
2248}
 
2249
2250/* assumes fs == KERNEL_DS */
2251void __init numa_policy_init(void)
2252{
2253	nodemask_t interleave_nodes;
2254	unsigned long largest = 0;
2255	int nid, prefer = 0;
2256
2257	policy_cache = kmem_cache_create("numa_policy",
2258					 sizeof(struct mempolicy),
2259					 0, SLAB_PANIC, NULL);
2260
2261	sn_cache = kmem_cache_create("shared_policy_node",
2262				     sizeof(struct sp_node),
2263				     0, SLAB_PANIC, NULL);
2264
 
 
 
 
 
 
 
 
 
2265	/*
2266	 * Set interleaving policy for system init. Interleaving is only
2267	 * enabled across suitably sized nodes (default is >= 16MB), or
2268	 * fall back to the largest node if they're all smaller.
2269	 */
2270	nodes_clear(interleave_nodes);
2271	for_each_node_state(nid, N_HIGH_MEMORY) {
2272		unsigned long total_pages = node_present_pages(nid);
2273
2274		/* Preserve the largest node */
2275		if (largest < total_pages) {
2276			largest = total_pages;
2277			prefer = nid;
2278		}
2279
2280		/* Interleave this node? */
2281		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2282			node_set(nid, interleave_nodes);
2283	}
2284
2285	/* All too small, use the largest */
2286	if (unlikely(nodes_empty(interleave_nodes)))
2287		node_set(prefer, interleave_nodes);
2288
2289	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2290		printk("numa_policy_init: interleaving failed\n");
 
 
2291}
2292
2293/* Reset policy of current process to default */
2294void numa_default_policy(void)
2295{
2296	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2297}
2298
2299/*
2300 * Parse and format mempolicy from/to strings
2301 */
2302
2303/*
2304 * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
2305 * Used only for mpol_parse_str() and mpol_to_str()
2306 */
2307#define MPOL_LOCAL MPOL_MAX
2308static const char * const policy_modes[] =
2309{
2310	[MPOL_DEFAULT]    = "default",
2311	[MPOL_PREFERRED]  = "prefer",
2312	[MPOL_BIND]       = "bind",
2313	[MPOL_INTERLEAVE] = "interleave",
2314	[MPOL_LOCAL]      = "local"
 
2315};
2316
2317
2318#ifdef CONFIG_TMPFS
2319/**
2320 * mpol_parse_str - parse string to mempolicy
2321 * @str:  string containing mempolicy to parse
2322 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2323 * @no_context:  flag whether to "contextualize" the mempolicy
2324 *
2325 * Format of input:
2326 *	<mode>[=<flags>][:<nodelist>]
2327 *
2328 * if @no_context is true, save the input nodemask in w.user_nodemask in
2329 * the returned mempolicy.  This will be used to "clone" the mempolicy in
2330 * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
2331 * mount option.  Note that if 'static' or 'relative' mode flags were
2332 * specified, the input nodemask will already have been saved.  Saving
2333 * it again is redundant, but safe.
2334 *
2335 * On success, returns 0, else 1
2336 */
2337int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2338{
2339	struct mempolicy *new = NULL;
2340	unsigned short mode;
2341	unsigned short uninitialized_var(mode_flags);
2342	nodemask_t nodes;
2343	char *nodelist = strchr(str, ':');
2344	char *flags = strchr(str, '=');
2345	int err = 1;
 
 
 
2346
2347	if (nodelist) {
2348		/* NUL-terminate mode or flags string */
2349		*nodelist++ = '\0';
2350		if (nodelist_parse(nodelist, nodes))
2351			goto out;
2352		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2353			goto out;
2354	} else
2355		nodes_clear(nodes);
2356
2357	if (flags)
2358		*flags++ = '\0';	/* terminate mode string */
2359
2360	for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2361		if (!strcmp(str, policy_modes[mode])) {
2362			break;
2363		}
2364	}
2365	if (mode > MPOL_LOCAL)
2366		goto out;
2367
2368	switch (mode) {
2369	case MPOL_PREFERRED:
2370		/*
2371		 * Insist on a nodelist of one node only
 
 
2372		 */
2373		if (nodelist) {
2374			char *rest = nodelist;
2375			while (isdigit(*rest))
2376				rest++;
2377			if (*rest)
2378				goto out;
 
 
2379		}
2380		break;
2381	case MPOL_INTERLEAVE:
2382		/*
2383		 * Default to online nodes with memory if no nodelist
2384		 */
2385		if (!nodelist)
2386			nodes = node_states[N_HIGH_MEMORY];
2387		break;
2388	case MPOL_LOCAL:
2389		/*
2390		 * Don't allow a nodelist;  mpol_new() checks flags
2391		 */
2392		if (nodelist)
2393			goto out;
2394		mode = MPOL_PREFERRED;
2395		break;
2396	case MPOL_DEFAULT:
2397		/*
2398		 * Insist on a empty nodelist
2399		 */
2400		if (!nodelist)
2401			err = 0;
2402		goto out;
 
2403	case MPOL_BIND:
2404		/*
2405		 * Insist on a nodelist
2406		 */
2407		if (!nodelist)
2408			goto out;
2409	}
2410
2411	mode_flags = 0;
2412	if (flags) {
2413		/*
2414		 * Currently, we only support two mutually exclusive
2415		 * mode flags.
2416		 */
2417		if (!strcmp(flags, "static"))
2418			mode_flags |= MPOL_F_STATIC_NODES;
2419		else if (!strcmp(flags, "relative"))
2420			mode_flags |= MPOL_F_RELATIVE_NODES;
2421		else
2422			goto out;
2423	}
2424
2425	new = mpol_new(mode, mode_flags, &nodes);
2426	if (IS_ERR(new))
2427		goto out;
2428
2429	if (no_context) {
2430		/* save for contextualization */
2431		new->w.user_nodemask = nodes;
 
 
 
 
 
 
2432	} else {
2433		int ret;
2434		NODEMASK_SCRATCH(scratch);
2435		if (scratch) {
2436			task_lock(current);
2437			ret = mpol_set_nodemask(new, &nodes, scratch);
2438			task_unlock(current);
2439		} else
2440			ret = -ENOMEM;
2441		NODEMASK_SCRATCH_FREE(scratch);
2442		if (ret) {
2443			mpol_put(new);
2444			goto out;
2445		}
2446	}
 
 
 
 
 
 
 
2447	err = 0;
2448
2449out:
2450	/* Restore string for error message */
2451	if (nodelist)
2452		*--nodelist = ':';
2453	if (flags)
2454		*--flags = '=';
2455	if (!err)
2456		*mpol = new;
2457	return err;
2458}
2459#endif /* CONFIG_TMPFS */
2460
2461/**
2462 * mpol_to_str - format a mempolicy structure for printing
2463 * @buffer:  to contain formatted mempolicy string
2464 * @maxlen:  length of @buffer
2465 * @pol:  pointer to mempolicy to be formatted
2466 * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2467 *
2468 * Convert a mempolicy into a string.
2469 * Returns the number of characters in buffer (if positive)
2470 * or an error (negative)
2471 */
2472int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2473{
2474	char *p = buffer;
2475	int l;
2476	nodemask_t nodes;
2477	unsigned short mode;
2478	unsigned short flags = pol ? pol->flags : 0;
2479
2480	/*
2481	 * Sanity check:  room for longest mode, flag and some nodes
2482	 */
2483	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2484
2485	if (!pol || pol == &default_policy)
2486		mode = MPOL_DEFAULT;
2487	else
2488		mode = pol->mode;
 
 
2489
2490	switch (mode) {
2491	case MPOL_DEFAULT:
2492		nodes_clear(nodes);
2493		break;
2494
2495	case MPOL_PREFERRED:
2496		nodes_clear(nodes);
2497		if (flags & MPOL_F_LOCAL)
2498			mode = MPOL_LOCAL;	/* pseudo-policy */
2499		else
2500			node_set(pol->v.preferred_node, nodes);
2501		break;
2502
2503	case MPOL_BIND:
2504		/* Fall through */
2505	case MPOL_INTERLEAVE:
2506		if (no_context)
2507			nodes = pol->w.user_nodemask;
2508		else
2509			nodes = pol->v.nodes;
2510		break;
2511
2512	default:
2513		BUG();
 
 
2514	}
2515
2516	l = strlen(policy_modes[mode]);
2517	if (buffer + maxlen < p + l + 1)
2518		return -ENOSPC;
2519
2520	strcpy(p, policy_modes[mode]);
2521	p += l;
2522
2523	if (flags & MPOL_MODE_FLAGS) {
2524		if (buffer + maxlen < p + 2)
2525			return -ENOSPC;
2526		*p++ = '=';
2527
2528		/*
2529		 * Currently, the only defined flags are mutually exclusive
2530		 */
2531		if (flags & MPOL_F_STATIC_NODES)
2532			p += snprintf(p, buffer + maxlen - p, "static");
2533		else if (flags & MPOL_F_RELATIVE_NODES)
2534			p += snprintf(p, buffer + maxlen - p, "relative");
2535	}
2536
2537	if (!nodes_empty(nodes)) {
2538		if (buffer + maxlen < p + 2)
2539			return -ENOSPC;
2540		*p++ = ':';
2541	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2542	}
2543	return p - buffer;
2544}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Simple NUMA memory policy for the Linux kernel.
   4 *
   5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
 
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred      Try a specific node first before normal fallback.
  29 *                As a special case NUMA_NO_NODE here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * preferred many Try a set of nodes first before normal fallback. This is
  35 *                similar to preferred without the special case.
  36 *
  37 * default        Allocate on the local node first, or when on a VMA
  38 *                use the process policy. This is what Linux always did
  39 *		  in a NUMA aware kernel and still does by, ahem, default.
  40 *
  41 * The process policy is applied for most non interrupt memory allocations
  42 * in that process' context. Interrupts ignore the policies and always
  43 * try to allocate on the local CPU. The VMA policy is only applied for memory
  44 * allocations for a VMA in the VM.
  45 *
  46 * Currently there are a few corner cases in swapping where the policy
  47 * is not applied, but the majority should be handled. When process policy
  48 * is used it is not remembered over swap outs/swap ins.
  49 *
  50 * Only the highest zone in the zone hierarchy gets policied. Allocations
  51 * requesting a lower zone just use default policy. This implies that
  52 * on systems with highmem kernel lowmem allocation don't get policied.
  53 * Same with GFP_DMA allocations.
  54 *
  55 * For shmem/tmpfs shared memory the policy is shared between
  56 * all users and remembered even when nobody has memory mapped.
  57 */
  58
  59/* Notebook:
  60   fix mmap readahead to honour policy and enable policy for any page cache
  61   object
  62   statistics for bigpages
  63   global policy for page cache? currently it uses process policy. Requires
  64   first item above.
  65   handle mremap for shared memory (currently ignored for the policy)
  66   grows down?
  67   make bind policy root only? It can trigger oom much faster and the
  68   kernel is not always grateful with that.
  69*/
  70
  71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  72
  73#include <linux/mempolicy.h>
  74#include <linux/pagewalk.h>
  75#include <linux/highmem.h>
  76#include <linux/hugetlb.h>
  77#include <linux/kernel.h>
  78#include <linux/sched.h>
  79#include <linux/sched/mm.h>
  80#include <linux/sched/numa_balancing.h>
  81#include <linux/sched/task.h>
  82#include <linux/nodemask.h>
  83#include <linux/cpuset.h>
  84#include <linux/slab.h>
  85#include <linux/string.h>
  86#include <linux/export.h>
  87#include <linux/nsproxy.h>
  88#include <linux/interrupt.h>
  89#include <linux/init.h>
  90#include <linux/compat.h>
  91#include <linux/ptrace.h>
  92#include <linux/swap.h>
  93#include <linux/seq_file.h>
  94#include <linux/proc_fs.h>
  95#include <linux/migrate.h>
  96#include <linux/ksm.h>
  97#include <linux/rmap.h>
  98#include <linux/security.h>
  99#include <linux/syscalls.h>
 100#include <linux/ctype.h>
 101#include <linux/mm_inline.h>
 102#include <linux/mmu_notifier.h>
 103#include <linux/printk.h>
 104#include <linux/swapops.h>
 105
 106#include <asm/tlbflush.h>
 107#include <asm/tlb.h>
 108#include <linux/uaccess.h>
 109
 110#include "internal.h"
 111
 112/* Internal flags */
 113#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
 114#define MPOL_MF_INVERT       (MPOL_MF_INTERNAL << 1)	/* Invert check for nodemask */
 115#define MPOL_MF_WRLOCK       (MPOL_MF_INTERNAL << 2)	/* Write-lock walked vmas */
 116
 117static struct kmem_cache *policy_cache;
 118static struct kmem_cache *sn_cache;
 119
 120/* Highest zone. An specific allocation for a zone below that is not
 121   policied. */
 122enum zone_type policy_zone = 0;
 123
 124/*
 125 * run-time system-wide default policy => local allocation
 126 */
 127static struct mempolicy default_policy = {
 128	.refcnt = ATOMIC_INIT(1), /* never free it */
 129	.mode = MPOL_LOCAL,
 
 130};
 131
 132static struct mempolicy preferred_node_policy[MAX_NUMNODES];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133
 134/**
 135 * numa_nearest_node - Find nearest node by state
 136 * @node: Node id to start the search
 137 * @state: State to filter the search
 138 *
 139 * Lookup the closest node by distance if @nid is not in state.
 140 *
 141 * Return: this @node if it is in state, otherwise the closest node by distance
 142 */
 143int numa_nearest_node(int node, unsigned int state)
 144{
 145	int min_dist = INT_MAX, dist, n, min_node;
 146
 147	if (state >= NR_NODE_STATES)
 148		return -EINVAL;
 149
 150	if (node == NUMA_NO_NODE || node_state(node, state))
 151		return node;
 152
 153	min_node = node;
 154	for_each_node_state(n, state) {
 155		dist = node_distance(node, n);
 156		if (dist < min_dist) {
 157			min_dist = dist;
 158			min_node = n;
 159		}
 160	}
 161
 162	return min_node;
 163}
 164EXPORT_SYMBOL_GPL(numa_nearest_node);
 165
 166struct mempolicy *get_task_policy(struct task_struct *p)
 167{
 168	struct mempolicy *pol = p->mempolicy;
 169	int node;
 170
 171	if (pol)
 172		return pol;
 173
 174	node = numa_node_id();
 175	if (node != NUMA_NO_NODE) {
 176		pol = &preferred_node_policy[node];
 177		/* preferred_node_policy is not initialised early in boot */
 178		if (pol->mode)
 179			return pol;
 180	}
 181
 182	return &default_policy;
 183}
 184
 185static const struct mempolicy_operations {
 186	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 187	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
 188} mpol_ops[MPOL_MAX];
 189
 190static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 191{
 192	return pol->flags & MPOL_MODE_FLAGS;
 193}
 194
 195static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 196				   const nodemask_t *rel)
 197{
 198	nodemask_t tmp;
 199	nodes_fold(tmp, *orig, nodes_weight(*rel));
 200	nodes_onto(*ret, tmp, *rel);
 201}
 202
 203static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 204{
 205	if (nodes_empty(*nodes))
 206		return -EINVAL;
 207	pol->nodes = *nodes;
 208	return 0;
 209}
 210
 211static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 212{
 213	if (nodes_empty(*nodes))
 
 
 
 
 
 
 
 
 
 
 
 214		return -EINVAL;
 215
 216	nodes_clear(pol->nodes);
 217	node_set(first_node(*nodes), pol->nodes);
 218	return 0;
 219}
 220
 221/*
 222 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 223 * any, for the new policy.  mpol_new() has already validated the nodes
 224 * parameter with respect to the policy mode and flags.
 
 225 *
 226 * Must be called holding task's alloc_lock to protect task's mems_allowed
 227 * and mempolicy.  May also be called holding the mmap_lock for write.
 228 */
 229static int mpol_set_nodemask(struct mempolicy *pol,
 230		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 231{
 232	int ret;
 233
 234	/*
 235	 * Default (pol==NULL) resp. local memory policies are not a
 236	 * subject of any remapping. They also do not need any special
 237	 * constructor.
 238	 */
 239	if (!pol || pol->mode == MPOL_LOCAL)
 240		return 0;
 241
 242	/* Check N_MEMORY */
 243	nodes_and(nsc->mask1,
 244		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
 245
 246	VM_BUG_ON(!nodes);
 
 
 
 
 
 
 
 247
 248	if (pol->flags & MPOL_F_RELATIVE_NODES)
 249		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
 250	else
 251		nodes_and(nsc->mask2, *nodes, nsc->mask1);
 
 
 252
 253	if (mpol_store_user_nodemask(pol))
 254		pol->w.user_nodemask = *nodes;
 255	else
 256		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
 257
 258	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 259	return ret;
 260}
 261
 262/*
 263 * This function just creates a new policy, does some check and simple
 264 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 265 */
 266static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 267				  nodemask_t *nodes)
 268{
 269	struct mempolicy *policy;
 270
 
 
 
 271	if (mode == MPOL_DEFAULT) {
 272		if (nodes && !nodes_empty(*nodes))
 273			return ERR_PTR(-EINVAL);
 274		return NULL;
 275	}
 276	VM_BUG_ON(!nodes);
 277
 278	/*
 279	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 280	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 281	 * All other modes require a valid pointer to a non-empty nodemask.
 282	 */
 283	if (mode == MPOL_PREFERRED) {
 284		if (nodes_empty(*nodes)) {
 285			if (((flags & MPOL_F_STATIC_NODES) ||
 286			     (flags & MPOL_F_RELATIVE_NODES)))
 287				return ERR_PTR(-EINVAL);
 288
 289			mode = MPOL_LOCAL;
 290		}
 291	} else if (mode == MPOL_LOCAL) {
 292		if (!nodes_empty(*nodes) ||
 293		    (flags & MPOL_F_STATIC_NODES) ||
 294		    (flags & MPOL_F_RELATIVE_NODES))
 295			return ERR_PTR(-EINVAL);
 296	} else if (nodes_empty(*nodes))
 297		return ERR_PTR(-EINVAL);
 298
 299	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 300	if (!policy)
 301		return ERR_PTR(-ENOMEM);
 302	atomic_set(&policy->refcnt, 1);
 303	policy->mode = mode;
 304	policy->flags = flags;
 305	policy->home_node = NUMA_NO_NODE;
 306
 307	return policy;
 308}
 309
 310/* Slow path of a mpol destructor. */
 311void __mpol_put(struct mempolicy *pol)
 312{
 313	if (!atomic_dec_and_test(&pol->refcnt))
 314		return;
 315	kmem_cache_free(policy_cache, pol);
 316}
 317
 318static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
 
 319{
 320}
 321
 322static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 
 
 
 
 
 
 
 323{
 324	nodemask_t tmp;
 325
 326	if (pol->flags & MPOL_F_STATIC_NODES)
 327		nodes_and(tmp, pol->w.user_nodemask, *nodes);
 328	else if (pol->flags & MPOL_F_RELATIVE_NODES)
 329		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 330	else {
 331		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
 332								*nodes);
 333		pol->w.cpuset_mems_allowed = *nodes;
 
 
 
 
 
 
 
 
 
 
 334	}
 335
 336	if (nodes_empty(tmp))
 337		tmp = *nodes;
 338
 339	pol->nodes = tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 340}
 341
 342static void mpol_rebind_preferred(struct mempolicy *pol,
 343						const nodemask_t *nodes)
 
 344{
 345	pol->w.cpuset_mems_allowed = *nodes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346}
 347
 348/*
 349 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 350 *
 351 * Per-vma policies are protected by mmap_lock. Allocations using per-task
 352 * policies are protected by task->mems_allowed_seq to prevent a premature
 353 * OOM/allocation failure due to parallel nodemask modification.
 
 
 
 
 
 
 
 
 
 354 */
 355static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
 
 356{
 357	if (!pol || pol->mode == MPOL_LOCAL)
 358		return;
 359	if (!mpol_store_user_nodemask(pol) &&
 360	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 361		return;
 362
 363	mpol_ops[pol->mode].rebind(pol, newmask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 364}
 365
 366/*
 367 * Wrapper for mpol_rebind_policy() that just requires task
 368 * pointer, and updates task mempolicy.
 369 *
 370 * Called with task's alloc_lock held.
 371 */
 372void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
 
 
 373{
 374	mpol_rebind_policy(tsk->mempolicy, new);
 375}
 376
 377/*
 378 * Rebind each vma in mm to new nodemask.
 379 *
 380 * Call holding a reference to mm.  Takes mm->mmap_lock during call.
 381 */
 
 382void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 383{
 384	struct vm_area_struct *vma;
 385	VMA_ITERATOR(vmi, mm, 0);
 386
 387	mmap_write_lock(mm);
 388	for_each_vma(vmi, vma) {
 389		vma_start_write(vma);
 390		mpol_rebind_policy(vma->vm_policy, new);
 391	}
 392	mmap_write_unlock(mm);
 393}
 394
 395static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 396	[MPOL_DEFAULT] = {
 397		.rebind = mpol_rebind_default,
 398	},
 399	[MPOL_INTERLEAVE] = {
 400		.create = mpol_new_nodemask,
 401		.rebind = mpol_rebind_nodemask,
 402	},
 403	[MPOL_PREFERRED] = {
 404		.create = mpol_new_preferred,
 405		.rebind = mpol_rebind_preferred,
 406	},
 407	[MPOL_BIND] = {
 408		.create = mpol_new_nodemask,
 409		.rebind = mpol_rebind_nodemask,
 410	},
 411	[MPOL_LOCAL] = {
 412		.rebind = mpol_rebind_default,
 413	},
 414	[MPOL_PREFERRED_MANY] = {
 415		.create = mpol_new_nodemask,
 416		.rebind = mpol_rebind_preferred,
 417	},
 418};
 419
 420static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
 421				unsigned long flags);
 422static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
 423				pgoff_t ilx, int *nid);
 424
 425static bool strictly_unmovable(unsigned long flags)
 426{
 427	/*
 428	 * STRICT without MOVE flags lets do_mbind() fail immediately with -EIO
 429	 * if any misplaced page is found.
 430	 */
 431	return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ==
 432			 MPOL_MF_STRICT;
 433}
 434
 435struct migration_mpol {		/* for alloc_migration_target_by_mpol() */
 436	struct mempolicy *pol;
 437	pgoff_t ilx;
 438};
 439
 440struct queue_pages {
 441	struct list_head *pagelist;
 442	unsigned long flags;
 443	nodemask_t *nmask;
 444	unsigned long start;
 445	unsigned long end;
 446	struct vm_area_struct *first;
 447	struct folio *large;		/* note last large folio encountered */
 448	long nr_failed;			/* could not be isolated at this time */
 449};
 450
 451/*
 452 * Check if the folio's nid is in qp->nmask.
 453 *
 454 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
 455 * in the invert of qp->nmask.
 456 */
 457static inline bool queue_folio_required(struct folio *folio,
 458					struct queue_pages *qp)
 459{
 460	int nid = folio_nid(folio);
 461	unsigned long flags = qp->flags;
 462
 463	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 464}
 465
 466static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
 
 
 
 
 467{
 468	struct folio *folio;
 469	struct queue_pages *qp = walk->private;
 470
 471	if (unlikely(is_pmd_migration_entry(*pmd))) {
 472		qp->nr_failed++;
 473		return;
 474	}
 475	folio = pfn_folio(pmd_pfn(*pmd));
 476	if (is_huge_zero_page(&folio->page)) {
 477		walk->action = ACTION_CONTINUE;
 478		return;
 479	}
 480	if (!queue_folio_required(folio, qp))
 481		return;
 482	if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
 483	    !vma_migratable(walk->vma) ||
 484	    !migrate_folio_add(folio, qp->pagelist, qp->flags))
 485		qp->nr_failed++;
 486}
 487
 488/*
 489 * Scan through folios, checking if they satisfy the required conditions,
 490 * moving them from LRU to local pagelist for migration if they do (or not).
 491 *
 492 * queue_folios_pte_range() has two possible return values:
 493 * 0 - continue walking to scan for more, even if an existing folio on the
 494 *     wrong node could not be isolated and queued for migration.
 495 * -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL,
 496 *        and an existing folio was on a node that does not follow the policy.
 497 */
 498static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
 499			unsigned long end, struct mm_walk *walk)
 500{
 501	struct vm_area_struct *vma = walk->vma;
 502	struct folio *folio;
 503	struct queue_pages *qp = walk->private;
 504	unsigned long flags = qp->flags;
 505	pte_t *pte, *mapped_pte;
 506	pte_t ptent;
 507	spinlock_t *ptl;
 508
 509	ptl = pmd_trans_huge_lock(pmd, vma);
 510	if (ptl) {
 511		queue_folios_pmd(pmd, walk);
 512		spin_unlock(ptl);
 513		goto out;
 514	}
 515
 516	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 517	if (!pte) {
 518		walk->action = ACTION_AGAIN;
 519		return 0;
 520	}
 521	for (; addr != end; pte++, addr += PAGE_SIZE) {
 522		ptent = ptep_get(pte);
 523		if (pte_none(ptent))
 524			continue;
 525		if (!pte_present(ptent)) {
 526			if (is_migration_entry(pte_to_swp_entry(ptent)))
 527				qp->nr_failed++;
 528			continue;
 529		}
 530		folio = vm_normal_folio(vma, addr, ptent);
 531		if (!folio || folio_is_zone_device(folio))
 532			continue;
 533		/*
 534		 * vm_normal_folio() filters out zero pages, but there might
 535		 * still be reserved folios to skip, perhaps in a VDSO.
 
 536		 */
 537		if (folio_test_reserved(folio))
 538			continue;
 539		if (!queue_folio_required(folio, qp))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540			continue;
 541		if (folio_test_large(folio)) {
 542			/*
 543			 * A large folio can only be isolated from LRU once,
 544			 * but may be mapped by many PTEs (and Copy-On-Write may
 545			 * intersperse PTEs of other, order 0, folios).  This is
 546			 * a common case, so don't mistake it for failure (but
 547			 * there can be other cases of multi-mapped pages which
 548			 * this quick check does not help to filter out - and a
 549			 * search of the pagelist might grow to be prohibitive).
 550			 *
 551			 * migrate_pages(&pagelist) returns nr_failed folios, so
 552			 * check "large" now so that queue_pages_range() returns
 553			 * a comparable nr_failed folios.  This does imply that
 554			 * if folio could not be isolated for some racy reason
 555			 * at its first PTE, later PTEs will not give it another
 556			 * chance of isolation; but keeps the accounting simple.
 557			 */
 558			if (folio == qp->large)
 559				continue;
 560			qp->large = folio;
 561		}
 562		if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
 563		    !vma_migratable(vma) ||
 564		    !migrate_folio_add(folio, qp->pagelist, flags)) {
 565			qp->nr_failed++;
 566			if (strictly_unmovable(flags))
 567				break;
 568		}
 569	}
 570	pte_unmap_unlock(mapped_pte, ptl);
 571	cond_resched();
 572out:
 573	if (qp->nr_failed && strictly_unmovable(flags))
 574		return -EIO;
 575	return 0;
 576}
 577
 578static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
 579			       unsigned long addr, unsigned long end,
 580			       struct mm_walk *walk)
 581{
 582#ifdef CONFIG_HUGETLB_PAGE
 583	struct queue_pages *qp = walk->private;
 584	unsigned long flags = qp->flags;
 585	struct folio *folio;
 586	spinlock_t *ptl;
 587	pte_t entry;
 
 
 
 
 
 
 
 
 
 588
 589	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
 590	entry = huge_ptep_get(pte);
 591	if (!pte_present(entry)) {
 592		if (unlikely(is_hugetlb_entry_migration(entry)))
 593			qp->nr_failed++;
 594		goto unlock;
 595	}
 596	folio = pfn_folio(pte_pfn(entry));
 597	if (!queue_folio_required(folio, qp))
 598		goto unlock;
 599	if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
 600	    !vma_migratable(walk->vma)) {
 601		qp->nr_failed++;
 602		goto unlock;
 603	}
 604	/*
 605	 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
 606	 * Choosing not to migrate a shared folio is not counted as a failure.
 607	 *
 608	 * To check if the folio is shared, ideally we want to make sure
 609	 * every page is mapped to the same process. Doing that is very
 610	 * expensive, so check the estimated sharers of the folio instead.
 611	 */
 612	if ((flags & MPOL_MF_MOVE_ALL) ||
 613	    (folio_estimated_sharers(folio) == 1 && !hugetlb_pmd_shared(pte)))
 614		if (!isolate_hugetlb(folio, qp->pagelist))
 615			qp->nr_failed++;
 616unlock:
 617	spin_unlock(ptl);
 618	if (qp->nr_failed && strictly_unmovable(flags))
 619		return -EIO;
 620#endif
 621	return 0;
 622}
 623
 624#ifdef CONFIG_NUMA_BALANCING
 625/*
 626 * This is used to mark a range of virtual addresses to be inaccessible.
 627 * These are later cleared by a NUMA hinting fault. Depending on these
 628 * faults, pages may be migrated for better NUMA placement.
 629 *
 630 * This is assuming that NUMA faults are handled using PROT_NONE. If
 631 * an architecture makes a different choice, it will need further
 632 * changes to the core.
 633 */
 634unsigned long change_prot_numa(struct vm_area_struct *vma,
 635			unsigned long addr, unsigned long end)
 636{
 637	struct mmu_gather tlb;
 638	long nr_updated;
 639
 640	tlb_gather_mmu(&tlb, vma->vm_mm);
 641
 642	nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
 643	if (nr_updated > 0)
 644		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 645
 646	tlb_finish_mmu(&tlb);
 647
 648	return nr_updated;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649}
 650#endif /* CONFIG_NUMA_BALANCING */
 651
 652static int queue_pages_test_walk(unsigned long start, unsigned long end,
 653				struct mm_walk *walk)
 654{
 655	struct vm_area_struct *next, *vma = walk->vma;
 656	struct queue_pages *qp = walk->private;
 657	unsigned long endvma = vma->vm_end;
 658	unsigned long flags = qp->flags;
 659
 660	/* range check first */
 661	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
 
 
 662
 663	if (!qp->first) {
 664		qp->first = vma;
 665		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 666			(qp->start < vma->vm_start))
 667			/* hole at head side of range */
 668			return -EFAULT;
 669	}
 670	next = find_vma(vma->vm_mm, vma->vm_end);
 671	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 672		((vma->vm_end < qp->end) &&
 673		(!next || vma->vm_end < next->vm_start)))
 674		/* hole at middle or tail of range */
 675		return -EFAULT;
 676
 677	/*
 678	 * Need check MPOL_MF_STRICT to return -EIO if possible
 679	 * regardless of vma_migratable
 680	 */
 681	if (!vma_migratable(vma) &&
 682	    !(flags & MPOL_MF_STRICT))
 683		return 1;
 684
 685	if (endvma > end)
 686		endvma = end;
 687
 688	/*
 689	 * Check page nodes, and queue pages to move, in the current vma.
 690	 * But if no moving, and no strict checking, the scan can be skipped.
 691	 */
 692	if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 693		return 0;
 694	return 1;
 695}
 696
 697static const struct mm_walk_ops queue_pages_walk_ops = {
 698	.hugetlb_entry		= queue_folios_hugetlb,
 699	.pmd_entry		= queue_folios_pte_range,
 700	.test_walk		= queue_pages_test_walk,
 701	.walk_lock		= PGWALK_RDLOCK,
 702};
 703
 704static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
 705	.hugetlb_entry		= queue_folios_hugetlb,
 706	.pmd_entry		= queue_folios_pte_range,
 707	.test_walk		= queue_pages_test_walk,
 708	.walk_lock		= PGWALK_WRLOCK,
 709};
 710
 711/*
 712 * Walk through page tables and collect pages to be migrated.
 713 *
 714 * If pages found in a given range are not on the required set of @nodes,
 715 * and migration is allowed, they are isolated and queued to @pagelist.
 716 *
 717 * queue_pages_range() may return:
 718 * 0 - all pages already on the right node, or successfully queued for moving
 719 *     (or neither strict checking nor moving requested: only range checking).
 720 * >0 - this number of misplaced folios could not be queued for moving
 721 *      (a hugetlbfs page or a transparent huge page being counted as 1).
 722 * -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs.
 723 * -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified.
 724 */
 725static long
 726queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 727		nodemask_t *nodes, unsigned long flags,
 728		struct list_head *pagelist)
 729{
 730	int err;
 731	struct queue_pages qp = {
 732		.pagelist = pagelist,
 733		.flags = flags,
 734		.nmask = nodes,
 735		.start = start,
 736		.end = end,
 737		.first = NULL,
 738	};
 739	const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ?
 740			&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
 741
 742	err = walk_page_range(mm, start, end, ops, &qp);
 
 
 743
 744	if (!qp.first)
 745		/* whole range in hole */
 746		err = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747
 748	return err ? : qp.nr_failed;
 
 749}
 750
 751/*
 752 * Apply policy to a single VMA
 753 * This must be called with the mmap_lock held for writing.
 
 
 
 
 
 
 
 
 
 
 
 
 754 */
 755static int vma_replace_policy(struct vm_area_struct *vma,
 756				struct mempolicy *pol)
 757{
 758	int err;
 759	struct mempolicy *old;
 760	struct mempolicy *new;
 761
 762	vma_assert_write_locked(vma);
 763
 764	new = mpol_dup(pol);
 765	if (IS_ERR(new))
 766		return PTR_ERR(new);
 767
 768	if (vma->vm_ops && vma->vm_ops->set_policy) {
 769		err = vma->vm_ops->set_policy(vma, new);
 770		if (err)
 771			goto err_out;
 772	}
 773
 774	old = vma->vm_policy;
 775	vma->vm_policy = new; /* protected by mmap_lock */
 776	mpol_put(old);
 777
 778	return 0;
 779 err_out:
 780	mpol_put(new);
 781	return err;
 782}
 783
 784/* Split or merge the VMA (if required) and apply the new policy */
 785static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
 786		struct vm_area_struct **prev, unsigned long start,
 787		unsigned long end, struct mempolicy *new_pol)
 788{
 789	unsigned long vmstart, vmend;
 790
 791	vmend = min(end, vma->vm_end);
 792	if (start > vma->vm_start) {
 793		*prev = vma;
 794		vmstart = start;
 795	} else {
 796		vmstart = vma->vm_start;
 797	}
 798
 799	if (mpol_equal(vma->vm_policy, new_pol)) {
 800		*prev = vma;
 801		return 0;
 802	}
 803
 804	vma =  vma_modify_policy(vmi, *prev, vma, vmstart, vmend, new_pol);
 805	if (IS_ERR(vma))
 806		return PTR_ERR(vma);
 807
 808	*prev = vma;
 809	return vma_replace_policy(vma, new_pol);
 810}
 811
 812/* Set the process memory policy */
 813static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 814			     nodemask_t *nodes)
 815{
 816	struct mempolicy *new, *old;
 
 817	NODEMASK_SCRATCH(scratch);
 818	int ret;
 819
 820	if (!scratch)
 821		return -ENOMEM;
 822
 823	new = mpol_new(mode, flags, nodes);
 824	if (IS_ERR(new)) {
 825		ret = PTR_ERR(new);
 826		goto out;
 827	}
 828
 
 
 
 
 
 
 
 829	task_lock(current);
 830	ret = mpol_set_nodemask(new, nodes, scratch);
 831	if (ret) {
 832		task_unlock(current);
 
 
 833		mpol_put(new);
 834		goto out;
 835	}
 836
 837	old = current->mempolicy;
 838	current->mempolicy = new;
 839	if (new && new->mode == MPOL_INTERLEAVE)
 840		current->il_prev = MAX_NUMNODES-1;
 
 
 841	task_unlock(current);
 
 
 
 842	mpol_put(old);
 843	ret = 0;
 844out:
 845	NODEMASK_SCRATCH_FREE(scratch);
 846	return ret;
 847}
 848
 849/*
 850 * Return nodemask for policy for get_mempolicy() query
 851 *
 852 * Called with task's alloc_lock held
 853 */
 854static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
 855{
 856	nodes_clear(*nodes);
 857	if (pol == &default_policy)
 858		return;
 859
 860	switch (pol->mode) {
 861	case MPOL_BIND:
 
 862	case MPOL_INTERLEAVE:
 
 
 863	case MPOL_PREFERRED:
 864	case MPOL_PREFERRED_MANY:
 865		*nodes = pol->nodes;
 866		break;
 867	case MPOL_LOCAL:
 868		/* return empty node mask for local allocation */
 869		break;
 870	default:
 871		BUG();
 872	}
 873}
 874
 875static int lookup_node(struct mm_struct *mm, unsigned long addr)
 876{
 877	struct page *p = NULL;
 878	int ret;
 879
 880	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
 881	if (ret > 0) {
 882		ret = page_to_nid(p);
 883		put_page(p);
 884	}
 885	return ret;
 886}
 887
 888/* Retrieve NUMA policy */
 889static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 890			     unsigned long addr, unsigned long flags)
 891{
 892	int err;
 893	struct mm_struct *mm = current->mm;
 894	struct vm_area_struct *vma = NULL;
 895	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
 896
 897	if (flags &
 898		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 899		return -EINVAL;
 900
 901	if (flags & MPOL_F_MEMS_ALLOWED) {
 902		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 903			return -EINVAL;
 904		*policy = 0;	/* just so it's initialized */
 905		task_lock(current);
 906		*nmask  = cpuset_current_mems_allowed;
 907		task_unlock(current);
 908		return 0;
 909	}
 910
 911	if (flags & MPOL_F_ADDR) {
 912		pgoff_t ilx;		/* ignored here */
 913		/*
 914		 * Do NOT fall back to task policy if the
 915		 * vma/shared policy at addr is NULL.  We
 916		 * want to return MPOL_DEFAULT in this case.
 917		 */
 918		mmap_read_lock(mm);
 919		vma = vma_lookup(mm, addr);
 920		if (!vma) {
 921			mmap_read_unlock(mm);
 922			return -EFAULT;
 923		}
 924		pol = __get_vma_policy(vma, addr, &ilx);
 
 
 
 925	} else if (addr)
 926		return -EINVAL;
 927
 928	if (!pol)
 929		pol = &default_policy;	/* indicates default behavior */
 930
 931	if (flags & MPOL_F_NODE) {
 932		if (flags & MPOL_F_ADDR) {
 933			/*
 934			 * Take a refcount on the mpol, because we are about to
 935			 * drop the mmap_lock, after which only "pol" remains
 936			 * valid, "vma" is stale.
 937			 */
 938			pol_refcount = pol;
 939			vma = NULL;
 940			mpol_get(pol);
 941			mmap_read_unlock(mm);
 942			err = lookup_node(mm, addr);
 943			if (err < 0)
 944				goto out;
 945			*policy = err;
 946		} else if (pol == current->mempolicy &&
 947				pol->mode == MPOL_INTERLEAVE) {
 948			*policy = next_node_in(current->il_prev, pol->nodes);
 949		} else {
 950			err = -EINVAL;
 951			goto out;
 952		}
 953	} else {
 954		*policy = pol == &default_policy ? MPOL_DEFAULT :
 955						pol->mode;
 956		/*
 957		 * Internal mempolicy flags must be masked off before exposing
 958		 * the policy to userspace.
 959		 */
 960		*policy |= (pol->flags & MPOL_MODE_FLAGS);
 961	}
 962
 
 
 
 
 
 963	err = 0;
 964	if (nmask) {
 965		if (mpol_store_user_nodemask(pol)) {
 966			*nmask = pol->w.user_nodemask;
 967		} else {
 968			task_lock(current);
 969			get_policy_nodemask(pol, nmask);
 970			task_unlock(current);
 971		}
 972	}
 973
 974 out:
 975	mpol_cond_put(pol);
 976	if (vma)
 977		mmap_read_unlock(mm);
 978	if (pol_refcount)
 979		mpol_put(pol_refcount);
 980	return err;
 981}
 982
 983#ifdef CONFIG_MIGRATION
 984static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
 
 
 
 985				unsigned long flags)
 986{
 987	/*
 988	 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
 989	 * Choosing not to migrate a shared folio is not counted as a failure.
 990	 *
 991	 * To check if the folio is shared, ideally we want to make sure
 992	 * every page is mapped to the same process. Doing that is very
 993	 * expensive, so check the estimated sharers of the folio instead.
 994	 */
 995	if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
 996		if (folio_isolate_lru(folio)) {
 997			list_add_tail(&folio->lru, foliolist);
 998			node_stat_mod_folio(folio,
 999				NR_ISOLATED_ANON + folio_is_file_lru(folio),
1000				folio_nr_pages(folio));
1001		} else {
1002			/*
1003			 * Non-movable folio may reach here.  And, there may be
1004			 * temporary off LRU folios or non-LRU movable folios.
1005			 * Treat them as unmovable folios since they can't be
1006			 * isolated, so they can't be moved at the moment.
1007			 */
1008			return false;
1009		}
1010	}
1011	return true;
 
 
 
 
1012}
1013
1014/*
1015 * Migrate pages from one node to a target node.
1016 * Returns error or the number of pages not migrated.
1017 */
1018static long migrate_to_node(struct mm_struct *mm, int source, int dest,
1019			    int flags)
1020{
1021	nodemask_t nmask;
 
 
1022	struct vm_area_struct *vma;
1023	LIST_HEAD(pagelist);
1024	long nr_failed;
1025	long err = 0;
1026	struct migration_target_control mtc = {
1027		.nid = dest,
1028		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1029	};
1030
1031	nodes_clear(nmask);
1032	node_set(source, nmask);
1033
1034	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1035
1036	mmap_read_lock(mm);
1037	vma = find_vma(mm, 0);
1038
1039	/*
1040	 * This does not migrate the range, but isolates all pages that
1041	 * need migration.  Between passing in the full user address
1042	 * space range and MPOL_MF_DISCONTIG_OK, this call cannot fail,
1043	 * but passes back the count of pages which could not be isolated.
1044	 */
1045	nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1046				      flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1047	mmap_read_unlock(mm);
1048
1049	if (!list_empty(&pagelist)) {
1050		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1051			(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1052		if (err)
1053			putback_movable_pages(&pagelist);
1054	}
1055
1056	if (err >= 0)
1057		err += nr_failed;
1058	return err;
1059}
1060
1061/*
1062 * Move pages between the two nodesets so as to preserve the physical
1063 * layout as much as possible.
1064 *
1065 * Returns the number of page that could not be moved.
1066 */
1067int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1068		     const nodemask_t *to, int flags)
1069{
1070	long nr_failed = 0;
1071	long err = 0;
1072	nodemask_t tmp;
1073
1074	lru_cache_disable();
 
 
 
 
 
 
 
 
1075
1076	/*
1077	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1078	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1079	 * bit in 'tmp', and return that <source, dest> pair for migration.
1080	 * The pair of nodemasks 'to' and 'from' define the map.
1081	 *
1082	 * If no pair of bits is found that way, fallback to picking some
1083	 * pair of 'source' and 'dest' bits that are not the same.  If the
1084	 * 'source' and 'dest' bits are the same, this represents a node
1085	 * that will be migrating to itself, so no pages need move.
1086	 *
1087	 * If no bits are left in 'tmp', or if all remaining bits left
1088	 * in 'tmp' correspond to the same bit in 'to', return false
1089	 * (nothing left to migrate).
1090	 *
1091	 * This lets us pick a pair of nodes to migrate between, such that
1092	 * if possible the dest node is not already occupied by some other
1093	 * source node, minimizing the risk of overloading the memory on a
1094	 * node that would happen if we migrated incoming memory to a node
1095	 * before migrating outgoing memory source that same node.
1096	 *
1097	 * A single scan of tmp is sufficient.  As we go, we remember the
1098	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1099	 * that not only moved, but what's better, moved to an empty slot
1100	 * (d is not set in tmp), then we break out then, with that pair.
1101	 * Otherwise when we finish scanning from_tmp, we at least have the
1102	 * most recent <s, d> pair that moved.  If we get all the way through
1103	 * the scan of tmp without finding any node that moved, much less
1104	 * moved to an empty node, then there is nothing left worth migrating.
1105	 */
1106
1107	tmp = *from;
1108	while (!nodes_empty(tmp)) {
1109		int s, d;
1110		int source = NUMA_NO_NODE;
1111		int dest = 0;
1112
1113		for_each_node_mask(s, tmp) {
1114
1115			/*
1116			 * do_migrate_pages() tries to maintain the relative
1117			 * node relationship of the pages established between
1118			 * threads and memory areas.
1119                         *
1120			 * However if the number of source nodes is not equal to
1121			 * the number of destination nodes we can not preserve
1122			 * this node relative relationship.  In that case, skip
1123			 * copying memory from a node that is in the destination
1124			 * mask.
1125			 *
1126			 * Example: [2,3,4] -> [3,4,5] moves everything.
1127			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1128			 */
1129
1130			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1131						(node_isset(s, *to)))
1132				continue;
1133
1134			d = node_remap(s, *from, *to);
1135			if (s == d)
1136				continue;
1137
1138			source = s;	/* Node moved. Memorize */
1139			dest = d;
1140
1141			/* dest not in remaining from nodes? */
1142			if (!node_isset(dest, tmp))
1143				break;
1144		}
1145		if (source == NUMA_NO_NODE)
1146			break;
1147
1148		node_clear(source, tmp);
1149		err = migrate_to_node(mm, source, dest, flags);
1150		if (err > 0)
1151			nr_failed += err;
1152		if (err < 0)
1153			break;
1154	}
1155
1156	lru_cache_enable();
1157	if (err < 0)
1158		return err;
1159	return (nr_failed < INT_MAX) ? nr_failed : INT_MAX;
 
1160}
1161
1162/*
1163 * Allocate a new folio for page migration, according to NUMA mempolicy.
 
 
 
 
1164 */
1165static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1166						    unsigned long private)
1167{
1168	struct migration_mpol *mmpol = (struct migration_mpol *)private;
1169	struct mempolicy *pol = mmpol->pol;
1170	pgoff_t ilx = mmpol->ilx;
1171	struct page *page;
1172	unsigned int order;
1173	int nid = numa_node_id();
1174	gfp_t gfp;
1175
1176	order = folio_order(src);
1177	ilx += src->index >> order;
1178
1179	if (folio_test_hugetlb(src)) {
1180		nodemask_t *nodemask;
1181		struct hstate *h;
1182
1183		h = folio_hstate(src);
1184		gfp = htlb_alloc_mask(h);
1185		nodemask = policy_nodemask(gfp, pol, ilx, &nid);
1186		return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp);
1187	}
1188
1189	if (folio_test_large(src))
1190		gfp = GFP_TRANSHUGE;
1191	else
1192		gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
1193
1194	page = alloc_pages_mpol(gfp, order, pol, ilx, nid);
1195	return page_rmappable_folio(page);
1196}
1197#else
1198
1199static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1200				unsigned long flags)
1201{
1202	return false;
1203}
1204
1205int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1206		     const nodemask_t *to, int flags)
1207{
1208	return -ENOSYS;
1209}
1210
1211static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1212						    unsigned long private)
1213{
1214	return NULL;
1215}
1216#endif
1217
1218static long do_mbind(unsigned long start, unsigned long len,
1219		     unsigned short mode, unsigned short mode_flags,
1220		     nodemask_t *nmask, unsigned long flags)
1221{
 
1222	struct mm_struct *mm = current->mm;
1223	struct vm_area_struct *vma, *prev;
1224	struct vma_iterator vmi;
1225	struct migration_mpol mmpol;
1226	struct mempolicy *new;
1227	unsigned long end;
1228	long err;
1229	long nr_failed;
1230	LIST_HEAD(pagelist);
1231
1232	if (flags & ~(unsigned long)MPOL_MF_VALID)
 
1233		return -EINVAL;
1234	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1235		return -EPERM;
1236
1237	if (start & ~PAGE_MASK)
1238		return -EINVAL;
1239
1240	if (mode == MPOL_DEFAULT)
1241		flags &= ~MPOL_MF_STRICT;
1242
1243	len = PAGE_ALIGN(len);
1244	end = start + len;
1245
1246	if (end < start)
1247		return -EINVAL;
1248	if (end == start)
1249		return 0;
1250
1251	new = mpol_new(mode, mode_flags, nmask);
1252	if (IS_ERR(new))
1253		return PTR_ERR(new);
1254
1255	/*
1256	 * If we are using the default policy then operation
1257	 * on discontinuous address spaces is okay after all
1258	 */
1259	if (!new)
1260		flags |= MPOL_MF_DISCONTIG_OK;
1261
1262	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1263		lru_cache_disable();
 
 
 
 
 
 
 
 
1264	{
1265		NODEMASK_SCRATCH(scratch);
1266		if (scratch) {
1267			mmap_write_lock(mm);
 
1268			err = mpol_set_nodemask(new, nmask, scratch);
 
1269			if (err)
1270				mmap_write_unlock(mm);
1271		} else
1272			err = -ENOMEM;
1273		NODEMASK_SCRATCH_FREE(scratch);
1274	}
1275	if (err)
1276		goto mpol_out;
1277
1278	/*
1279	 * Lock the VMAs before scanning for pages to migrate,
1280	 * to ensure we don't miss a concurrently inserted page.
1281	 */
1282	nr_failed = queue_pages_range(mm, start, end, nmask,
1283			flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist);
1284
1285	if (nr_failed < 0) {
1286		err = nr_failed;
1287		nr_failed = 0;
1288	} else {
1289		vma_iter_init(&vmi, mm, start);
1290		prev = vma_prev(&vmi);
1291		for_each_vma_range(vmi, vma, end) {
1292			err = mbind_range(&vmi, vma, &prev, start, end, new);
1293			if (err)
1294				break;
1295		}
1296	}
1297
1298	if (!err && !list_empty(&pagelist)) {
1299		/* Convert MPOL_DEFAULT's NULL to task or default policy */
1300		if (!new) {
1301			new = get_task_policy(current);
1302			mpol_get(new);
1303		}
1304		mmpol.pol = new;
1305		mmpol.ilx = 0;
1306
1307		/*
1308		 * In the interleaved case, attempt to allocate on exactly the
1309		 * targeted nodes, for the first VMA to be migrated; for later
1310		 * VMAs, the nodes will still be interleaved from the targeted
1311		 * nodemask, but one by one may be selected differently.
1312		 */
1313		if (new->mode == MPOL_INTERLEAVE) {
1314			struct page *page;
1315			unsigned int order;
1316			unsigned long addr = -EFAULT;
1317
1318			list_for_each_entry(page, &pagelist, lru) {
1319				if (!PageKsm(page))
1320					break;
1321			}
1322			if (!list_entry_is_head(page, &pagelist, lru)) {
1323				vma_iter_init(&vmi, mm, start);
1324				for_each_vma_range(vmi, vma, end) {
1325					addr = page_address_in_vma(page, vma);
1326					if (addr != -EFAULT)
1327						break;
1328				}
1329			}
1330			if (addr != -EFAULT) {
1331				order = compound_order(page);
1332				/* We already know the pol, but not the ilx */
1333				mpol_cond_put(get_vma_policy(vma, addr, order,
1334							     &mmpol.ilx));
1335				/* Set base from which to increment by index */
1336				mmpol.ilx -= page->index >> order;
1337			}
1338		}
1339	}
1340
1341	mmap_write_unlock(mm);
1342
1343	if (!err && !list_empty(&pagelist)) {
1344		nr_failed |= migrate_pages(&pagelist,
1345				alloc_migration_target_by_mpol, NULL,
1346				(unsigned long)&mmpol, MIGRATE_SYNC,
1347				MR_MEMPOLICY_MBIND, NULL);
1348	}
1349
1350	if (nr_failed && (flags & MPOL_MF_STRICT))
1351		err = -EIO;
1352	if (!list_empty(&pagelist))
1353		putback_movable_pages(&pagelist);
1354mpol_out:
1355	mpol_put(new);
1356	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1357		lru_cache_enable();
1358	return err;
1359}
1360
1361/*
1362 * User space interface with variable sized bitmaps for nodelists.
1363 */
1364static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1365		      unsigned long maxnode)
1366{
1367	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1368	int ret;
1369
1370	if (in_compat_syscall())
1371		ret = compat_get_bitmap(mask,
1372					(const compat_ulong_t __user *)nmask,
1373					maxnode);
1374	else
1375		ret = copy_from_user(mask, nmask,
1376				     nlongs * sizeof(unsigned long));
1377
1378	if (ret)
1379		return -EFAULT;
1380
1381	if (maxnode % BITS_PER_LONG)
1382		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1383
1384	return 0;
1385}
1386
1387/* Copy a node mask from user space. */
1388static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1389		     unsigned long maxnode)
1390{
 
 
 
 
1391	--maxnode;
1392	nodes_clear(*nodes);
1393	if (maxnode == 0 || !nmask)
1394		return 0;
1395	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1396		return -EINVAL;
1397
1398	/*
1399	 * When the user specified more nodes than supported just check
1400	 * if the non supported part is all zero, one word at a time,
1401	 * starting at the end.
1402	 */
1403	while (maxnode > MAX_NUMNODES) {
1404		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1405		unsigned long t;
1406
1407		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1408			return -EFAULT;
1409
1410		if (maxnode - bits >= MAX_NUMNODES) {
1411			maxnode -= bits;
1412		} else {
1413			maxnode = MAX_NUMNODES;
1414			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
 
 
 
 
 
 
1415		}
1416		if (t)
1417			return -EINVAL;
1418	}
1419
1420	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
 
 
 
1421}
1422
1423/* Copy a kernel node mask to user space */
1424static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1425			      nodemask_t *nodes)
1426{
1427	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1428	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1429	bool compat = in_compat_syscall();
1430
1431	if (compat)
1432		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1433
1434	if (copy > nbytes) {
1435		if (copy > PAGE_SIZE)
1436			return -EINVAL;
1437		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1438			return -EFAULT;
1439		copy = nbytes;
1440		maxnode = nr_node_ids;
1441	}
1442
1443	if (compat)
1444		return compat_put_bitmap((compat_ulong_t __user *)mask,
1445					 nodes_addr(*nodes), maxnode);
1446
1447	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1448}
1449
1450/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1451static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
 
1452{
1453	*flags = *mode & MPOL_MODE_FLAGS;
1454	*mode &= ~MPOL_MODE_FLAGS;
 
1455
1456	if ((unsigned int)(*mode) >=  MPOL_MAX)
 
 
1457		return -EINVAL;
1458	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
 
1459		return -EINVAL;
1460	if (*flags & MPOL_F_NUMA_BALANCING) {
1461		if (*mode != MPOL_BIND)
1462			return -EINVAL;
1463		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1464	}
1465	return 0;
1466}
1467
1468static long kernel_mbind(unsigned long start, unsigned long len,
1469			 unsigned long mode, const unsigned long __user *nmask,
1470			 unsigned long maxnode, unsigned int flags)
1471{
1472	unsigned short mode_flags;
1473	nodemask_t nodes;
1474	int lmode = mode;
1475	int err;
1476
1477	start = untagged_addr(start);
1478	err = sanitize_mpol_flags(&lmode, &mode_flags);
1479	if (err)
1480		return err;
1481
1482	err = get_nodes(&nodes, nmask, maxnode);
1483	if (err)
1484		return err;
1485
1486	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1487}
1488
1489SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1490		unsigned long, home_node, unsigned long, flags)
 
1491{
1492	struct mm_struct *mm = current->mm;
1493	struct vm_area_struct *vma, *prev;
1494	struct mempolicy *new, *old;
1495	unsigned long end;
1496	int err = -ENOENT;
1497	VMA_ITERATOR(vmi, mm, start);
1498
1499	start = untagged_addr(start);
1500	if (start & ~PAGE_MASK)
1501		return -EINVAL;
1502	/*
1503	 * flags is used for future extension if any.
1504	 */
1505	if (flags != 0)
1506		return -EINVAL;
1507
1508	/*
1509	 * Check home_node is online to avoid accessing uninitialized
1510	 * NODE_DATA.
1511	 */
1512	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1513		return -EINVAL;
1514
1515	len = PAGE_ALIGN(len);
1516	end = start + len;
1517
1518	if (end < start)
1519		return -EINVAL;
1520	if (end == start)
1521		return 0;
1522	mmap_write_lock(mm);
1523	prev = vma_prev(&vmi);
1524	for_each_vma_range(vmi, vma, end) {
1525		/*
1526		 * If any vma in the range got policy other than MPOL_BIND
1527		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1528		 * the home node for vmas we already updated before.
1529		 */
1530		old = vma_policy(vma);
1531		if (!old) {
1532			prev = vma;
1533			continue;
1534		}
1535		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1536			err = -EOPNOTSUPP;
1537			break;
1538		}
1539		new = mpol_dup(old);
1540		if (IS_ERR(new)) {
1541			err = PTR_ERR(new);
1542			break;
1543		}
1544
1545		vma_start_write(vma);
1546		new->home_node = home_node;
1547		err = mbind_range(&vmi, vma, &prev, start, end, new);
1548		mpol_put(new);
1549		if (err)
1550			break;
1551	}
1552	mmap_write_unlock(mm);
1553	return err;
1554}
1555
1556SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1557		unsigned long, mode, const unsigned long __user *, nmask,
1558		unsigned long, maxnode, unsigned int, flags)
1559{
1560	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1561}
1562
1563/* Set the process memory policy */
1564static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1565				 unsigned long maxnode)
1566{
1567	unsigned short mode_flags;
1568	nodemask_t nodes;
1569	int lmode = mode;
1570	int err;
1571
1572	err = sanitize_mpol_flags(&lmode, &mode_flags);
1573	if (err)
1574		return err;
1575
1576	err = get_nodes(&nodes, nmask, maxnode);
1577	if (err)
1578		return err;
1579
1580	return do_set_mempolicy(lmode, mode_flags, &nodes);
1581}
1582
1583SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1584		unsigned long, maxnode)
1585{
1586	return kernel_set_mempolicy(mode, nmask, maxnode);
1587}
1588
1589static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1590				const unsigned long __user *old_nodes,
1591				const unsigned long __user *new_nodes)
1592{
 
1593	struct mm_struct *mm = NULL;
1594	struct task_struct *task;
1595	nodemask_t task_nodes;
1596	int err;
1597	nodemask_t *old;
1598	nodemask_t *new;
1599	NODEMASK_SCRATCH(scratch);
1600
1601	if (!scratch)
1602		return -ENOMEM;
1603
1604	old = &scratch->mask1;
1605	new = &scratch->mask2;
1606
1607	err = get_nodes(old, old_nodes, maxnode);
1608	if (err)
1609		goto out;
1610
1611	err = get_nodes(new, new_nodes, maxnode);
1612	if (err)
1613		goto out;
1614
1615	/* Find the mm_struct */
1616	rcu_read_lock();
1617	task = pid ? find_task_by_vpid(pid) : current;
1618	if (!task) {
1619		rcu_read_unlock();
1620		err = -ESRCH;
1621		goto out;
1622	}
1623	get_task_struct(task);
 
1624
1625	err = -EINVAL;
 
 
1626
1627	/*
1628	 * Check if this process has the right to modify the specified process.
1629	 * Use the regular "ptrace_may_access()" checks.
 
 
1630	 */
1631	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
 
 
 
 
1632		rcu_read_unlock();
1633		err = -EPERM;
1634		goto out_put;
1635	}
1636	rcu_read_unlock();
1637
1638	task_nodes = cpuset_mems_allowed(task);
1639	/* Is the user allowed to access the target nodes? */
1640	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1641		err = -EPERM;
1642		goto out_put;
1643	}
1644
1645	task_nodes = cpuset_mems_allowed(current);
1646	nodes_and(*new, *new, task_nodes);
1647	if (nodes_empty(*new))
1648		goto out_put;
1649
1650	err = security_task_movememory(task);
1651	if (err)
1652		goto out_put;
1653
1654	mm = get_task_mm(task);
1655	put_task_struct(task);
1656
1657	if (!mm) {
1658		err = -EINVAL;
1659		goto out;
1660	}
1661
1662	err = do_migrate_pages(mm, old, new,
1663		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1664
1665	mmput(mm);
1666out:
 
 
1667	NODEMASK_SCRATCH_FREE(scratch);
1668
1669	return err;
1670
1671out_put:
1672	put_task_struct(task);
1673	goto out;
1674}
1675
1676SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1677		const unsigned long __user *, old_nodes,
1678		const unsigned long __user *, new_nodes)
1679{
1680	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1681}
1682
1683/* Retrieve NUMA policy */
1684static int kernel_get_mempolicy(int __user *policy,
1685				unsigned long __user *nmask,
1686				unsigned long maxnode,
1687				unsigned long addr,
1688				unsigned long flags)
1689{
1690	int err;
1691	int pval;
1692	nodemask_t nodes;
1693
1694	if (nmask != NULL && maxnode < nr_node_ids)
1695		return -EINVAL;
1696
1697	addr = untagged_addr(addr);
1698
1699	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1700
1701	if (err)
1702		return err;
1703
1704	if (policy && put_user(pval, policy))
1705		return -EFAULT;
1706
1707	if (nmask)
1708		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1709
1710	return err;
1711}
1712
1713SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1714		unsigned long __user *, nmask, unsigned long, maxnode,
1715		unsigned long, addr, unsigned long, flags)
 
 
 
1716{
1717	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1718}
1719
1720bool vma_migratable(struct vm_area_struct *vma)
 
1721{
1722	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1723		return false;
 
 
 
 
 
1724
1725	/*
1726	 * DAX device mappings require predictable access latency, so avoid
1727	 * incurring periodic faults.
1728	 */
1729	if (vma_is_dax(vma))
1730		return false;
1731
1732	if (is_vm_hugetlb_page(vma) &&
1733		!hugepage_migration_supported(hstate_vma(vma)))
1734		return false;
1735
1736	/*
1737	 * Migration allocates pages in the highest zone. If we cannot
1738	 * do so then migration (at least from node to node) is not
1739	 * possible.
1740	 */
1741	if (vma->vm_file &&
1742		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1743			< policy_zone)
1744		return false;
1745	return true;
1746}
1747
1748struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1749				   unsigned long addr, pgoff_t *ilx)
 
1750{
1751	*ilx = 0;
1752	return (vma->vm_ops && vma->vm_ops->get_policy) ?
1753		vma->vm_ops->get_policy(vma, addr, ilx) : vma->vm_policy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1754}
1755
 
 
1756/*
1757 * get_vma_policy(@vma, @addr, @order, @ilx)
1758 * @vma: virtual memory area whose policy is sought
1759 * @addr: address in @vma for shared policy lookup
1760 * @order: 0, or appropriate huge_page_order for interleaving
1761 * @ilx: interleave index (output), for use only when MPOL_INTERLEAVE
1762 *
1763 * Returns effective policy for a VMA at specified address.
1764 * Falls back to current->mempolicy or system default policy, as necessary.
 
 
 
1765 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1766 * count--added by the get_policy() vm_op, as appropriate--to protect against
1767 * freeing by another task.  It is the caller's responsibility to free the
1768 * extra reference for shared policies.
1769 */
1770struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1771				 unsigned long addr, int order, pgoff_t *ilx)
1772{
1773	struct mempolicy *pol;
1774
1775	pol = __get_vma_policy(vma, addr, ilx);
 
 
 
 
 
 
 
 
1776	if (!pol)
1777		pol = get_task_policy(current);
1778	if (pol->mode == MPOL_INTERLEAVE) {
1779		*ilx += vma->vm_pgoff >> order;
1780		*ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
1781	}
1782	return pol;
1783}
1784
1785bool vma_policy_mof(struct vm_area_struct *vma)
 
 
 
 
1786{
1787	struct mempolicy *pol;
 
 
 
 
1788
1789	if (vma->vm_ops && vma->vm_ops->get_policy) {
1790		bool ret = false;
1791		pgoff_t ilx;		/* ignored here */
1792
1793		pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx);
1794		if (pol && (pol->flags & MPOL_F_MOF))
1795			ret = true;
1796		mpol_cond_put(pol);
1797
1798		return ret;
1799	}
1800
1801	pol = vma->vm_policy;
1802	if (!pol)
1803		pol = get_task_policy(current);
1804
1805	return pol->flags & MPOL_F_MOF;
1806}
1807
1808bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
 
 
1809{
1810	enum zone_type dynamic_policy_zone = policy_zone;
1811
1812	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1813
1814	/*
1815	 * if policy->nodes has movable memory only,
1816	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1817	 *
1818	 * policy->nodes is intersect with node_states[N_MEMORY].
1819	 * so if the following test fails, it implies
1820	 * policy->nodes has movable memory only.
1821	 */
1822	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1823		dynamic_policy_zone = ZONE_MOVABLE;
1824
1825	return zone >= dynamic_policy_zone;
 
 
 
 
1826}
1827
1828/* Do dynamic interleaving for a process */
1829static unsigned int interleave_nodes(struct mempolicy *policy)
1830{
1831	unsigned int nid;
 
1832
1833	nid = next_node_in(current->il_prev, policy->nodes);
1834	if (nid < MAX_NUMNODES)
1835		current->il_prev = nid;
 
 
 
1836	return nid;
1837}
1838
1839/*
1840 * Depending on the memory policy provide a node from which to allocate the
1841 * next slab entry.
 
 
 
 
1842 */
1843unsigned int mempolicy_slab_node(void)
1844{
1845	struct mempolicy *policy;
1846	int node = numa_mem_id();
1847
1848	if (!in_task())
1849		return node;
1850
1851	policy = current->mempolicy;
1852	if (!policy)
1853		return node;
1854
1855	switch (policy->mode) {
1856	case MPOL_PREFERRED:
1857		return first_node(policy->nodes);
 
 
 
1858
1859	case MPOL_INTERLEAVE:
1860		return interleave_nodes(policy);
1861
1862	case MPOL_BIND:
1863	case MPOL_PREFERRED_MANY:
1864	{
1865		struct zoneref *z;
1866
1867		/*
1868		 * Follow bind policy behavior and start allocation at the
1869		 * first node.
1870		 */
1871		struct zonelist *zonelist;
 
1872		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1873		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1874		z = first_zones_zonelist(zonelist, highest_zoneidx,
1875							&policy->nodes);
1876		return z->zone ? zone_to_nid(z->zone) : node;
 
1877	}
1878	case MPOL_LOCAL:
1879		return node;
1880
1881	default:
1882		BUG();
1883	}
1884}
1885
1886/*
1887 * Do static interleaving for interleave index @ilx.  Returns the ilx'th
1888 * node in pol->nodes (starting from ilx=0), wrapping around if ilx
1889 * exceeds the number of present nodes.
1890 */
1891static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx)
1892{
1893	nodemask_t nodemask = pol->nodes;
1894	unsigned int target, nnodes;
1895	int i;
1896	int nid;
1897	/*
1898	 * The barrier will stabilize the nodemask in a register or on
1899	 * the stack so that it will stop changing under the code.
1900	 *
1901	 * Between first_node() and next_node(), pol->nodes could be changed
1902	 * by other threads. So we put pol->nodes in a local stack.
1903	 */
1904	barrier();
1905
1906	nnodes = nodes_weight(nodemask);
1907	if (!nnodes)
1908		return numa_node_id();
1909	target = ilx % nnodes;
1910	nid = first_node(nodemask);
1911	for (i = 0; i < target; i++)
1912		nid = next_node(nid, nodemask);
 
 
1913	return nid;
1914}
1915
1916/*
1917 * Return a nodemask representing a mempolicy for filtering nodes for
1918 * page allocation, together with preferred node id (or the input node id).
1919 */
1920static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
1921				   pgoff_t ilx, int *nid)
1922{
1923	nodemask_t *nodemask = NULL;
 
1924
1925	switch (pol->mode) {
1926	case MPOL_PREFERRED:
1927		/* Override input node id */
1928		*nid = first_node(pol->nodes);
1929		break;
1930	case MPOL_PREFERRED_MANY:
1931		nodemask = &pol->nodes;
1932		if (pol->home_node != NUMA_NO_NODE)
1933			*nid = pol->home_node;
1934		break;
1935	case MPOL_BIND:
1936		/* Restrict to nodemask (but not on lower zones) */
1937		if (apply_policy_zone(pol, gfp_zone(gfp)) &&
1938		    cpuset_nodemask_valid_mems_allowed(&pol->nodes))
1939			nodemask = &pol->nodes;
1940		if (pol->home_node != NUMA_NO_NODE)
1941			*nid = pol->home_node;
1942		/*
1943		 * __GFP_THISNODE shouldn't even be used with the bind policy
1944		 * because we might easily break the expectation to stay on the
1945		 * requested node and not break the policy.
 
 
1946		 */
1947		WARN_ON_ONCE(gfp & __GFP_THISNODE);
1948		break;
1949	case MPOL_INTERLEAVE:
1950		/* Override input node id */
1951		*nid = (ilx == NO_INTERLEAVE_INDEX) ?
1952			interleave_nodes(pol) : interleave_nid(pol, ilx);
1953		break;
1954	}
 
 
 
 
 
 
 
1955
1956	return nodemask;
 
 
 
 
1957}
1958
1959#ifdef CONFIG_HUGETLBFS
1960/*
1961 * huge_node(@vma, @addr, @gfp_flags, @mpol)
1962 * @vma: virtual memory area whose policy is sought
1963 * @addr: address in @vma for shared policy lookup and interleave policy
1964 * @gfp_flags: for requested zone
1965 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1966 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
1967 *
1968 * Returns a nid suitable for a huge page allocation and a pointer
1969 * to the struct mempolicy for conditional unref after allocation.
1970 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1971 * to the mempolicy's @nodemask for filtering the zonelist.
 
 
1972 */
1973int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1974		struct mempolicy **mpol, nodemask_t **nodemask)
 
1975{
1976	pgoff_t ilx;
1977	int nid;
 
 
1978
1979	nid = numa_node_id();
1980	*mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
1981	*nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid);
1982	return nid;
 
 
 
 
 
1983}
1984
1985/*
1986 * init_nodemask_of_mempolicy
1987 *
1988 * If the current task's mempolicy is "default" [NULL], return 'false'
1989 * to indicate default policy.  Otherwise, extract the policy nodemask
1990 * for 'bind' or 'interleave' policy into the argument nodemask, or
1991 * initialize the argument nodemask to contain the single node for
1992 * 'preferred' or 'local' policy and return 'true' to indicate presence
1993 * of non-default mempolicy.
1994 *
1995 * We don't bother with reference counting the mempolicy [mpol_get/put]
1996 * because the current task is examining it's own mempolicy and a task's
1997 * mempolicy is only ever changed by the task itself.
1998 *
1999 * N.B., it is the caller's responsibility to free a returned nodemask.
2000 */
2001bool init_nodemask_of_mempolicy(nodemask_t *mask)
2002{
2003	struct mempolicy *mempolicy;
 
2004
2005	if (!(mask && current->mempolicy))
2006		return false;
2007
2008	task_lock(current);
2009	mempolicy = current->mempolicy;
2010	switch (mempolicy->mode) {
2011	case MPOL_PREFERRED:
2012	case MPOL_PREFERRED_MANY:
 
 
 
 
 
 
2013	case MPOL_BIND:
 
2014	case MPOL_INTERLEAVE:
2015		*mask = mempolicy->nodes;
2016		break;
2017
2018	case MPOL_LOCAL:
2019		init_nodemask_of_node(mask, numa_node_id());
2020		break;
2021
2022	default:
2023		BUG();
2024	}
2025	task_unlock(current);
2026
2027	return true;
2028}
2029#endif
2030
2031/*
2032 * mempolicy_in_oom_domain
2033 *
2034 * If tsk's mempolicy is "bind", check for intersection between mask and
2035 * the policy nodemask. Otherwise, return true for all other policies
2036 * including "interleave", as a tsk with "interleave" policy may have
2037 * memory allocated from all nodes in system.
2038 *
2039 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2040 */
2041bool mempolicy_in_oom_domain(struct task_struct *tsk,
2042					const nodemask_t *mask)
2043{
2044	struct mempolicy *mempolicy;
2045	bool ret = true;
2046
2047	if (!mask)
2048		return ret;
2049
2050	task_lock(tsk);
2051	mempolicy = tsk->mempolicy;
2052	if (mempolicy && mempolicy->mode == MPOL_BIND)
2053		ret = nodes_intersects(mempolicy->nodes, *mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2054	task_unlock(tsk);
2055
2056	return ret;
2057}
2058
2059static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2060						int nid, nodemask_t *nodemask)
 
 
2061{
 
2062	struct page *page;
2063	gfp_t preferred_gfp;
2064
2065	/*
2066	 * This is a two pass approach. The first pass will only try the
2067	 * preferred nodes but skip the direct reclaim and allow the
2068	 * allocation to fail, while the second pass will try all the
2069	 * nodes in system.
2070	 */
2071	preferred_gfp = gfp | __GFP_NOWARN;
2072	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2073	page = __alloc_pages(preferred_gfp, order, nid, nodemask);
2074	if (!page)
2075		page = __alloc_pages(gfp, order, nid, NULL);
2076
 
 
 
 
2077	return page;
2078}
2079
2080/**
2081 * alloc_pages_mpol - Allocate pages according to NUMA mempolicy.
2082 * @gfp: GFP flags.
2083 * @order: Order of the page allocation.
2084 * @pol: Pointer to the NUMA mempolicy.
2085 * @ilx: Index for interleave mempolicy (also distinguishes alloc_pages()).
2086 * @nid: Preferred node (usually numa_node_id() but @mpol may override it).
2087 *
2088 * Return: The page on success or NULL if allocation fails.
2089 */
2090struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
2091		struct mempolicy *pol, pgoff_t ilx, int nid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2092{
2093	nodemask_t *nodemask;
 
2094	struct page *page;
2095
2096	nodemask = policy_nodemask(gfp, pol, ilx, &nid);
 
 
2097
2098	if (pol->mode == MPOL_PREFERRED_MANY)
2099		return alloc_pages_preferred_many(gfp, order, nid, nodemask);
2100
2101	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2102	    /* filter "hugepage" allocation, unless from alloc_pages() */
2103	    order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
 
 
2104		/*
2105		 * For hugepage allocation and non-interleave policy which
2106		 * allows the current node (or other explicitly preferred
2107		 * node) we only try to allocate from the current/preferred
2108		 * node and don't fall back to other nodes, as the cost of
2109		 * remote accesses would likely offset THP benefits.
2110		 *
2111		 * If the policy is interleave or does not allow the current
2112		 * node in its nodemask, we allocate the standard way.
2113		 */
2114		if (pol->mode != MPOL_INTERLEAVE &&
2115		    (!nodemask || node_isset(nid, *nodemask))) {
2116			/*
2117			 * First, try to allocate THP only on local node, but
2118			 * don't reclaim unnecessarily, just compact.
2119			 */
2120			page = __alloc_pages_node(nid,
2121				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2122			if (page || !(gfp & __GFP_DIRECT_RECLAIM))
2123				return page;
2124			/*
2125			 * If hugepage allocations are configured to always
2126			 * synchronous compact or the vma has been madvised
2127			 * to prefer hugepage backing, retry allowing remote
2128			 * memory with both reclaim and compact as well.
2129			 */
2130		}
2131	}
2132
2133	page = __alloc_pages(gfp, order, nid, nodemask);
2134
2135	if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) {
2136		/* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
2137		if (static_branch_likely(&vm_numa_stat_key) &&
2138		    page_to_nid(page) == nid) {
2139			preempt_disable();
2140			__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2141			preempt_enable();
2142		}
2143	}
2144
2145	return page;
2146}
2147
2148/**
2149 * vma_alloc_folio - Allocate a folio for a VMA.
2150 * @gfp: GFP flags.
2151 * @order: Order of the folio.
2152 * @vma: Pointer to VMA.
2153 * @addr: Virtual address of the allocation.  Must be inside @vma.
2154 * @hugepage: Unused (was: For hugepages try only preferred node if possible).
2155 *
2156 * Allocate a folio for a specific address in @vma, using the appropriate
2157 * NUMA policy.  The caller must hold the mmap_lock of the mm_struct of the
2158 * VMA to prevent it from going away.  Should be used for all allocations
2159 * for folios that will be mapped into user space, excepting hugetlbfs, and
2160 * excepting where direct use of alloc_pages_mpol() is more appropriate.
2161 *
2162 * Return: The folio on success or NULL if allocation fails.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2163 */
2164struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2165		unsigned long addr, bool hugepage)
2166{
2167	struct mempolicy *pol;
2168	pgoff_t ilx;
2169	struct page *page;
2170
2171	pol = get_vma_policy(vma, addr, order, &ilx);
2172	page = alloc_pages_mpol(gfp | __GFP_COMP, order,
2173				pol, ilx, numa_node_id());
2174	mpol_cond_put(pol);
2175	return page_rmappable_folio(page);
2176}
2177EXPORT_SYMBOL(vma_alloc_folio);
2178
2179/**
2180 * alloc_pages - Allocate pages.
2181 * @gfp: GFP flags.
2182 * @order: Power of two of number of pages to allocate.
2183 *
2184 * Allocate 1 << @order contiguous pages.  The physical address of the
2185 * first page is naturally aligned (eg an order-3 allocation will be aligned
2186 * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2187 * process is honoured when in process context.
2188 *
2189 * Context: Can be called from any context, providing the appropriate GFP
2190 * flags are used.
2191 * Return: The page on success or NULL if allocation fails.
2192 */
2193struct page *alloc_pages(gfp_t gfp, unsigned int order)
2194{
2195	struct mempolicy *pol = &default_policy;
2196
 
2197	/*
2198	 * No reference counting needed for current->mempolicy
2199	 * nor system default_policy
2200	 */
2201	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2202		pol = get_task_policy(current);
2203
2204	return alloc_pages_mpol(gfp, order,
2205				pol, NO_INTERLEAVE_INDEX, numa_node_id());
2206}
2207EXPORT_SYMBOL(alloc_pages);
2208
2209struct folio *folio_alloc(gfp_t gfp, unsigned int order)
2210{
2211	return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order));
2212}
2213EXPORT_SYMBOL(folio_alloc);
2214
2215static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2216		struct mempolicy *pol, unsigned long nr_pages,
2217		struct page **page_array)
2218{
2219	int nodes;
2220	unsigned long nr_pages_per_node;
2221	int delta;
2222	int i;
2223	unsigned long nr_allocated;
2224	unsigned long total_allocated = 0;
2225
2226	nodes = nodes_weight(pol->nodes);
2227	nr_pages_per_node = nr_pages / nodes;
2228	delta = nr_pages - nodes * nr_pages_per_node;
2229
2230	for (i = 0; i < nodes; i++) {
2231		if (delta) {
2232			nr_allocated = __alloc_pages_bulk(gfp,
2233					interleave_nodes(pol), NULL,
2234					nr_pages_per_node + 1, NULL,
2235					page_array);
2236			delta--;
2237		} else {
2238			nr_allocated = __alloc_pages_bulk(gfp,
2239					interleave_nodes(pol), NULL,
2240					nr_pages_per_node, NULL, page_array);
2241		}
2242
2243		page_array += nr_allocated;
2244		total_allocated += nr_allocated;
2245	}
2246
2247	return total_allocated;
2248}
2249
2250static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2251		struct mempolicy *pol, unsigned long nr_pages,
2252		struct page **page_array)
2253{
2254	gfp_t preferred_gfp;
2255	unsigned long nr_allocated = 0;
2256
2257	preferred_gfp = gfp | __GFP_NOWARN;
2258	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2259
2260	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2261					   nr_pages, NULL, page_array);
2262
2263	if (nr_allocated < nr_pages)
2264		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2265				nr_pages - nr_allocated, NULL,
2266				page_array + nr_allocated);
2267	return nr_allocated;
2268}
2269
2270/* alloc pages bulk and mempolicy should be considered at the
2271 * same time in some situation such as vmalloc.
2272 *
2273 * It can accelerate memory allocation especially interleaving
2274 * allocate memory.
2275 */
2276unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2277		unsigned long nr_pages, struct page **page_array)
2278{
2279	struct mempolicy *pol = &default_policy;
2280	nodemask_t *nodemask;
2281	int nid;
2282
2283	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2284		pol = get_task_policy(current);
2285
2286	if (pol->mode == MPOL_INTERLEAVE)
2287		return alloc_pages_bulk_array_interleave(gfp, pol,
2288							 nr_pages, page_array);
2289
2290	if (pol->mode == MPOL_PREFERRED_MANY)
2291		return alloc_pages_bulk_array_preferred_many(gfp,
2292				numa_node_id(), pol, nr_pages, page_array);
2293
2294	nid = numa_node_id();
2295	nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
2296	return __alloc_pages_bulk(gfp, nid, nodemask,
2297				  nr_pages, NULL, page_array);
2298}
2299
2300int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2301{
2302	struct mempolicy *pol = mpol_dup(src->vm_policy);
2303
2304	if (IS_ERR(pol))
2305		return PTR_ERR(pol);
2306	dst->vm_policy = pol;
2307	return 0;
2308}
 
2309
2310/*
2311 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2312 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2313 * with the mems_allowed returned by cpuset_mems_allowed().  This
2314 * keeps mempolicies cpuset relative after its cpuset moves.  See
2315 * further kernel/cpuset.c update_nodemask().
2316 *
2317 * current's mempolicy may be rebinded by the other task(the task that changes
2318 * cpuset's mems), so we needn't do rebind work for current task.
2319 */
2320
2321/* Slow path of a mempolicy duplicate */
2322struct mempolicy *__mpol_dup(struct mempolicy *old)
2323{
2324	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2325
2326	if (!new)
2327		return ERR_PTR(-ENOMEM);
2328
2329	/* task's mempolicy is protected by alloc_lock */
2330	if (old == current->mempolicy) {
2331		task_lock(current);
2332		*new = *old;
2333		task_unlock(current);
2334	} else
2335		*new = *old;
2336
 
2337	if (current_cpuset_is_being_rebound()) {
2338		nodemask_t mems = cpuset_mems_allowed(current);
2339		mpol_rebind_policy(new, &mems);
 
 
 
2340	}
 
2341	atomic_set(&new->refcnt, 1);
2342	return new;
2343}
2344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2345/* Slow path of a mempolicy comparison */
2346bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2347{
2348	if (!a || !b)
2349		return false;
2350	if (a->mode != b->mode)
2351		return false;
2352	if (a->flags != b->flags)
2353		return false;
2354	if (a->home_node != b->home_node)
2355		return false;
2356	if (mpol_store_user_nodemask(a))
2357		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2358			return false;
2359
2360	switch (a->mode) {
2361	case MPOL_BIND:
 
2362	case MPOL_INTERLEAVE:
 
2363	case MPOL_PREFERRED:
2364	case MPOL_PREFERRED_MANY:
2365		return !!nodes_equal(a->nodes, b->nodes);
2366	case MPOL_LOCAL:
2367		return true;
2368	default:
2369		BUG();
2370		return false;
2371	}
2372}
2373
2374/*
2375 * Shared memory backing store policy support.
2376 *
2377 * Remember policies even when nobody has shared memory mapped.
2378 * The policies are kept in Red-Black tree linked from the inode.
2379 * They are protected by the sp->lock rwlock, which should be held
2380 * for any accesses to the tree.
2381 */
2382
2383/*
2384 * lookup first element intersecting start-end.  Caller holds sp->lock for
2385 * reading or for writing
2386 */
2387static struct sp_node *sp_lookup(struct shared_policy *sp,
2388					pgoff_t start, pgoff_t end)
2389{
2390	struct rb_node *n = sp->root.rb_node;
2391
2392	while (n) {
2393		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2394
2395		if (start >= p->end)
2396			n = n->rb_right;
2397		else if (end <= p->start)
2398			n = n->rb_left;
2399		else
2400			break;
2401	}
2402	if (!n)
2403		return NULL;
2404	for (;;) {
2405		struct sp_node *w = NULL;
2406		struct rb_node *prev = rb_prev(n);
2407		if (!prev)
2408			break;
2409		w = rb_entry(prev, struct sp_node, nd);
2410		if (w->end <= start)
2411			break;
2412		n = prev;
2413	}
2414	return rb_entry(n, struct sp_node, nd);
2415}
2416
2417/*
2418 * Insert a new shared policy into the list.  Caller holds sp->lock for
2419 * writing.
2420 */
2421static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2422{
2423	struct rb_node **p = &sp->root.rb_node;
2424	struct rb_node *parent = NULL;
2425	struct sp_node *nd;
2426
2427	while (*p) {
2428		parent = *p;
2429		nd = rb_entry(parent, struct sp_node, nd);
2430		if (new->start < nd->start)
2431			p = &(*p)->rb_left;
2432		else if (new->end > nd->end)
2433			p = &(*p)->rb_right;
2434		else
2435			BUG();
2436	}
2437	rb_link_node(&new->nd, parent, p);
2438	rb_insert_color(&new->nd, &sp->root);
 
 
2439}
2440
2441/* Find shared policy intersecting idx */
2442struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
2443						pgoff_t idx)
2444{
2445	struct mempolicy *pol = NULL;
2446	struct sp_node *sn;
2447
2448	if (!sp->root.rb_node)
2449		return NULL;
2450	read_lock(&sp->lock);
2451	sn = sp_lookup(sp, idx, idx+1);
2452	if (sn) {
2453		mpol_get(sn->policy);
2454		pol = sn->policy;
2455	}
2456	read_unlock(&sp->lock);
2457	return pol;
2458}
2459
2460static void sp_free(struct sp_node *n)
2461{
 
 
2462	mpol_put(n->policy);
2463	kmem_cache_free(sn_cache, n);
2464}
2465
2466/**
2467 * mpol_misplaced - check whether current folio node is valid in policy
2468 *
2469 * @folio: folio to be checked
2470 * @vma: vm area where folio mapped
2471 * @addr: virtual address in @vma for shared policy lookup and interleave policy
2472 *
2473 * Lookup current policy node id for vma,addr and "compare to" folio's
2474 * node id.  Policy determination "mimics" alloc_page_vma().
2475 * Called from fault path where we know the vma and faulting address.
2476 *
2477 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2478 * policy, or a suitable node ID to allocate a replacement folio from.
2479 */
2480int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma,
2481		   unsigned long addr)
2482{
2483	struct mempolicy *pol;
2484	pgoff_t ilx;
2485	struct zoneref *z;
2486	int curnid = folio_nid(folio);
2487	int thiscpu = raw_smp_processor_id();
2488	int thisnid = cpu_to_node(thiscpu);
2489	int polnid = NUMA_NO_NODE;
2490	int ret = NUMA_NO_NODE;
2491
2492	pol = get_vma_policy(vma, addr, folio_order(folio), &ilx);
2493	if (!(pol->flags & MPOL_F_MOF))
2494		goto out;
2495
2496	switch (pol->mode) {
2497	case MPOL_INTERLEAVE:
2498		polnid = interleave_nid(pol, ilx);
2499		break;
2500
2501	case MPOL_PREFERRED:
2502		if (node_isset(curnid, pol->nodes))
2503			goto out;
2504		polnid = first_node(pol->nodes);
2505		break;
2506
2507	case MPOL_LOCAL:
2508		polnid = numa_node_id();
2509		break;
2510
2511	case MPOL_BIND:
2512		/* Optimize placement among multiple nodes via NUMA balancing */
2513		if (pol->flags & MPOL_F_MORON) {
2514			if (node_isset(thisnid, pol->nodes))
2515				break;
2516			goto out;
2517		}
2518		fallthrough;
2519
2520	case MPOL_PREFERRED_MANY:
2521		/*
2522		 * use current page if in policy nodemask,
2523		 * else select nearest allowed node, if any.
2524		 * If no allowed nodes, use current [!misplaced].
2525		 */
2526		if (node_isset(curnid, pol->nodes))
2527			goto out;
2528		z = first_zones_zonelist(
2529				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2530				gfp_zone(GFP_HIGHUSER),
2531				&pol->nodes);
2532		polnid = zone_to_nid(z->zone);
2533		break;
2534
2535	default:
2536		BUG();
2537	}
2538
2539	/* Migrate the folio towards the node whose CPU is referencing it */
2540	if (pol->flags & MPOL_F_MORON) {
2541		polnid = thisnid;
2542
2543		if (!should_numa_migrate_memory(current, folio, curnid,
2544						thiscpu))
2545			goto out;
2546	}
2547
2548	if (curnid != polnid)
2549		ret = polnid;
2550out:
2551	mpol_cond_put(pol);
2552
2553	return ret;
2554}
2555
2556/*
2557 * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2558 * dropped after task->mempolicy is set to NULL so that any allocation done as
2559 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2560 * policy.
2561 */
2562void mpol_put_task_policy(struct task_struct *task)
2563{
2564	struct mempolicy *pol;
2565
2566	task_lock(task);
2567	pol = task->mempolicy;
2568	task->mempolicy = NULL;
2569	task_unlock(task);
2570	mpol_put(pol);
2571}
2572
2573static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2574{
2575	rb_erase(&n->nd, &sp->root);
2576	sp_free(n);
2577}
2578
2579static void sp_node_init(struct sp_node *node, unsigned long start,
2580			unsigned long end, struct mempolicy *pol)
2581{
2582	node->start = start;
2583	node->end = end;
2584	node->policy = pol;
2585}
2586
2587static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2588				struct mempolicy *pol)
2589{
2590	struct sp_node *n;
2591	struct mempolicy *newpol;
2592
2593	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2594	if (!n)
2595		return NULL;
2596
2597	newpol = mpol_dup(pol);
2598	if (IS_ERR(newpol)) {
2599		kmem_cache_free(sn_cache, n);
2600		return NULL;
2601	}
2602	newpol->flags |= MPOL_F_SHARED;
2603	sp_node_init(n, start, end, newpol);
2604
2605	return n;
2606}
2607
2608/* Replace a policy range. */
2609static int shared_policy_replace(struct shared_policy *sp, pgoff_t start,
2610				 pgoff_t end, struct sp_node *new)
2611{
2612	struct sp_node *n;
2613	struct sp_node *n_new = NULL;
2614	struct mempolicy *mpol_new = NULL;
2615	int ret = 0;
2616
2617restart:
2618	write_lock(&sp->lock);
2619	n = sp_lookup(sp, start, end);
2620	/* Take care of old policies in the same range. */
2621	while (n && n->start < end) {
2622		struct rb_node *next = rb_next(&n->nd);
2623		if (n->start >= start) {
2624			if (n->end <= end)
2625				sp_delete(sp, n);
2626			else
2627				n->start = end;
2628		} else {
2629			/* Old policy spanning whole new range. */
2630			if (n->end > end) {
2631				if (!n_new)
2632					goto alloc_new;
2633
2634				*mpol_new = *n->policy;
2635				atomic_set(&mpol_new->refcnt, 1);
2636				sp_node_init(n_new, end, n->end, mpol_new);
 
2637				n->end = start;
2638				sp_insert(sp, n_new);
2639				n_new = NULL;
2640				mpol_new = NULL;
2641				break;
2642			} else
2643				n->end = start;
2644		}
2645		if (!next)
2646			break;
2647		n = rb_entry(next, struct sp_node, nd);
2648	}
2649	if (new)
2650		sp_insert(sp, new);
2651	write_unlock(&sp->lock);
2652	ret = 0;
2653
2654err_out:
2655	if (mpol_new)
2656		mpol_put(mpol_new);
2657	if (n_new)
2658		kmem_cache_free(sn_cache, n_new);
2659
2660	return ret;
2661
2662alloc_new:
2663	write_unlock(&sp->lock);
2664	ret = -ENOMEM;
2665	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2666	if (!n_new)
2667		goto err_out;
2668	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2669	if (!mpol_new)
2670		goto err_out;
2671	atomic_set(&mpol_new->refcnt, 1);
2672	goto restart;
2673}
2674
2675/**
2676 * mpol_shared_policy_init - initialize shared policy for inode
2677 * @sp: pointer to inode shared policy
2678 * @mpol:  struct mempolicy to install
2679 *
2680 * Install non-NULL @mpol in inode's shared policy rb-tree.
2681 * On entry, the current task has a reference on a non-NULL @mpol.
2682 * This must be released on exit.
2683 * This is called at get_inode() calls and we can use GFP_KERNEL.
2684 */
2685void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2686{
2687	int ret;
2688
2689	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2690	rwlock_init(&sp->lock);
2691
2692	if (mpol) {
2693		struct sp_node *sn;
2694		struct mempolicy *npol;
2695		NODEMASK_SCRATCH(scratch);
2696
2697		if (!scratch)
2698			goto put_mpol;
2699
2700		/* contextualize the tmpfs mount point mempolicy to this file */
2701		npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2702		if (IS_ERR(npol))
2703			goto free_scratch; /* no valid nodemask intersection */
2704
2705		task_lock(current);
2706		ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch);
2707		task_unlock(current);
2708		if (ret)
2709			goto put_npol;
 
 
 
 
 
2710
2711		/* alloc node covering entire file; adds ref to file's npol */
2712		sn = sp_alloc(0, MAX_LFS_FILESIZE >> PAGE_SHIFT, npol);
2713		if (sn)
2714			sp_insert(sp, sn);
2715put_npol:
2716		mpol_put(npol);	/* drop initial ref on file's npol */
2717free_scratch:
2718		NODEMASK_SCRATCH_FREE(scratch);
2719put_mpol:
2720		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2721	}
2722}
2723
2724int mpol_set_shared_policy(struct shared_policy *sp,
2725			struct vm_area_struct *vma, struct mempolicy *pol)
2726{
2727	int err;
2728	struct sp_node *new = NULL;
2729	unsigned long sz = vma_pages(vma);
2730
2731	if (pol) {
2732		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
 
 
 
 
 
 
2733		if (!new)
2734			return -ENOMEM;
2735	}
2736	err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
2737	if (err && new)
2738		sp_free(new);
2739	return err;
2740}
2741
2742/* Free a backing policy store on inode delete. */
2743void mpol_free_shared_policy(struct shared_policy *sp)
2744{
2745	struct sp_node *n;
2746	struct rb_node *next;
2747
2748	if (!sp->root.rb_node)
2749		return;
2750	write_lock(&sp->lock);
2751	next = rb_first(&sp->root);
2752	while (next) {
2753		n = rb_entry(next, struct sp_node, nd);
2754		next = rb_next(&n->nd);
2755		sp_delete(sp, n);
2756	}
2757	write_unlock(&sp->lock);
2758}
2759
2760#ifdef CONFIG_NUMA_BALANCING
2761static int __initdata numabalancing_override;
2762
2763static void __init check_numabalancing_enable(void)
2764{
2765	bool numabalancing_default = false;
2766
2767	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2768		numabalancing_default = true;
2769
2770	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2771	if (numabalancing_override)
2772		set_numabalancing_state(numabalancing_override == 1);
2773
2774	if (num_online_nodes() > 1 && !numabalancing_override) {
2775		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2776			numabalancing_default ? "Enabling" : "Disabling");
2777		set_numabalancing_state(numabalancing_default);
2778	}
2779}
2780
2781static int __init setup_numabalancing(char *str)
2782{
2783	int ret = 0;
2784	if (!str)
2785		goto out;
2786
2787	if (!strcmp(str, "enable")) {
2788		numabalancing_override = 1;
2789		ret = 1;
2790	} else if (!strcmp(str, "disable")) {
2791		numabalancing_override = -1;
2792		ret = 1;
2793	}
2794out:
2795	if (!ret)
2796		pr_warn("Unable to parse numa_balancing=\n");
2797
2798	return ret;
2799}
2800__setup("numa_balancing=", setup_numabalancing);
2801#else
2802static inline void __init check_numabalancing_enable(void)
2803{
2804}
2805#endif /* CONFIG_NUMA_BALANCING */
2806
 
2807void __init numa_policy_init(void)
2808{
2809	nodemask_t interleave_nodes;
2810	unsigned long largest = 0;
2811	int nid, prefer = 0;
2812
2813	policy_cache = kmem_cache_create("numa_policy",
2814					 sizeof(struct mempolicy),
2815					 0, SLAB_PANIC, NULL);
2816
2817	sn_cache = kmem_cache_create("shared_policy_node",
2818				     sizeof(struct sp_node),
2819				     0, SLAB_PANIC, NULL);
2820
2821	for_each_node(nid) {
2822		preferred_node_policy[nid] = (struct mempolicy) {
2823			.refcnt = ATOMIC_INIT(1),
2824			.mode = MPOL_PREFERRED,
2825			.flags = MPOL_F_MOF | MPOL_F_MORON,
2826			.nodes = nodemask_of_node(nid),
2827		};
2828	}
2829
2830	/*
2831	 * Set interleaving policy for system init. Interleaving is only
2832	 * enabled across suitably sized nodes (default is >= 16MB), or
2833	 * fall back to the largest node if they're all smaller.
2834	 */
2835	nodes_clear(interleave_nodes);
2836	for_each_node_state(nid, N_MEMORY) {
2837		unsigned long total_pages = node_present_pages(nid);
2838
2839		/* Preserve the largest node */
2840		if (largest < total_pages) {
2841			largest = total_pages;
2842			prefer = nid;
2843		}
2844
2845		/* Interleave this node? */
2846		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2847			node_set(nid, interleave_nodes);
2848	}
2849
2850	/* All too small, use the largest */
2851	if (unlikely(nodes_empty(interleave_nodes)))
2852		node_set(prefer, interleave_nodes);
2853
2854	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2855		pr_err("%s: interleaving failed\n", __func__);
2856
2857	check_numabalancing_enable();
2858}
2859
2860/* Reset policy of current process to default */
2861void numa_default_policy(void)
2862{
2863	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2864}
2865
2866/*
2867 * Parse and format mempolicy from/to strings
2868 */
 
 
 
 
 
 
2869static const char * const policy_modes[] =
2870{
2871	[MPOL_DEFAULT]    = "default",
2872	[MPOL_PREFERRED]  = "prefer",
2873	[MPOL_BIND]       = "bind",
2874	[MPOL_INTERLEAVE] = "interleave",
2875	[MPOL_LOCAL]      = "local",
2876	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2877};
2878
 
2879#ifdef CONFIG_TMPFS
2880/**
2881 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2882 * @str:  string containing mempolicy to parse
2883 * @mpol:  pointer to struct mempolicy pointer, returned on success.
 
2884 *
2885 * Format of input:
2886 *	<mode>[=<flags>][:<nodelist>]
2887 *
2888 * Return: %0 on success, else %1
 
 
 
 
 
 
 
2889 */
2890int mpol_parse_str(char *str, struct mempolicy **mpol)
2891{
2892	struct mempolicy *new = NULL;
2893	unsigned short mode_flags;
 
2894	nodemask_t nodes;
2895	char *nodelist = strchr(str, ':');
2896	char *flags = strchr(str, '=');
2897	int err = 1, mode;
2898
2899	if (flags)
2900		*flags++ = '\0';	/* terminate mode string */
2901
2902	if (nodelist) {
2903		/* NUL-terminate mode or flags string */
2904		*nodelist++ = '\0';
2905		if (nodelist_parse(nodelist, nodes))
2906			goto out;
2907		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2908			goto out;
2909	} else
2910		nodes_clear(nodes);
2911
2912	mode = match_string(policy_modes, MPOL_MAX, str);
2913	if (mode < 0)
 
 
 
 
 
 
 
2914		goto out;
2915
2916	switch (mode) {
2917	case MPOL_PREFERRED:
2918		/*
2919		 * Insist on a nodelist of one node only, although later
2920		 * we use first_node(nodes) to grab a single node, so here
2921		 * nodelist (or nodes) cannot be empty.
2922		 */
2923		if (nodelist) {
2924			char *rest = nodelist;
2925			while (isdigit(*rest))
2926				rest++;
2927			if (*rest)
2928				goto out;
2929			if (nodes_empty(nodes))
2930				goto out;
2931		}
2932		break;
2933	case MPOL_INTERLEAVE:
2934		/*
2935		 * Default to online nodes with memory if no nodelist
2936		 */
2937		if (!nodelist)
2938			nodes = node_states[N_MEMORY];
2939		break;
2940	case MPOL_LOCAL:
2941		/*
2942		 * Don't allow a nodelist;  mpol_new() checks flags
2943		 */
2944		if (nodelist)
2945			goto out;
 
2946		break;
2947	case MPOL_DEFAULT:
2948		/*
2949		 * Insist on a empty nodelist
2950		 */
2951		if (!nodelist)
2952			err = 0;
2953		goto out;
2954	case MPOL_PREFERRED_MANY:
2955	case MPOL_BIND:
2956		/*
2957		 * Insist on a nodelist
2958		 */
2959		if (!nodelist)
2960			goto out;
2961	}
2962
2963	mode_flags = 0;
2964	if (flags) {
2965		/*
2966		 * Currently, we only support two mutually exclusive
2967		 * mode flags.
2968		 */
2969		if (!strcmp(flags, "static"))
2970			mode_flags |= MPOL_F_STATIC_NODES;
2971		else if (!strcmp(flags, "relative"))
2972			mode_flags |= MPOL_F_RELATIVE_NODES;
2973		else
2974			goto out;
2975	}
2976
2977	new = mpol_new(mode, mode_flags, &nodes);
2978	if (IS_ERR(new))
2979		goto out;
2980
2981	/*
2982	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2983	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2984	 */
2985	if (mode != MPOL_PREFERRED) {
2986		new->nodes = nodes;
2987	} else if (nodelist) {
2988		nodes_clear(new->nodes);
2989		node_set(first_node(nodes), new->nodes);
2990	} else {
2991		new->mode = MPOL_LOCAL;
 
 
 
 
 
 
 
 
 
 
 
 
2992	}
2993
2994	/*
2995	 * Save nodes for contextualization: this will be used to "clone"
2996	 * the mempolicy in a specific context [cpuset] at a later time.
2997	 */
2998	new->w.user_nodemask = nodes;
2999
3000	err = 0;
3001
3002out:
3003	/* Restore string for error message */
3004	if (nodelist)
3005		*--nodelist = ':';
3006	if (flags)
3007		*--flags = '=';
3008	if (!err)
3009		*mpol = new;
3010	return err;
3011}
3012#endif /* CONFIG_TMPFS */
3013
3014/**
3015 * mpol_to_str - format a mempolicy structure for printing
3016 * @buffer:  to contain formatted mempolicy string
3017 * @maxlen:  length of @buffer
3018 * @pol:  pointer to mempolicy to be formatted
 
3019 *
3020 * Convert @pol into a string.  If @buffer is too short, truncate the string.
3021 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3022 * longest flag, "relative", and to display at least a few node ids.
3023 */
3024void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3025{
3026	char *p = buffer;
3027	nodemask_t nodes = NODE_MASK_NONE;
3028	unsigned short mode = MPOL_DEFAULT;
3029	unsigned short flags = 0;
 
 
 
 
 
 
3030
3031	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
 
 
3032		mode = pol->mode;
3033		flags = pol->flags;
3034	}
3035
3036	switch (mode) {
3037	case MPOL_DEFAULT:
3038	case MPOL_LOCAL:
3039		break;
 
3040	case MPOL_PREFERRED:
3041	case MPOL_PREFERRED_MANY:
 
 
 
 
 
 
3042	case MPOL_BIND:
 
3043	case MPOL_INTERLEAVE:
3044		nodes = pol->nodes;
 
 
 
3045		break;
 
3046	default:
3047		WARN_ON_ONCE(1);
3048		snprintf(p, maxlen, "unknown");
3049		return;
3050	}
3051
3052	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
 
 
 
 
 
3053
3054	if (flags & MPOL_MODE_FLAGS) {
3055		p += snprintf(p, buffer + maxlen - p, "=");
 
 
3056
3057		/*
3058		 * Currently, the only defined flags are mutually exclusive
3059		 */
3060		if (flags & MPOL_F_STATIC_NODES)
3061			p += snprintf(p, buffer + maxlen - p, "static");
3062		else if (flags & MPOL_F_RELATIVE_NODES)
3063			p += snprintf(p, buffer + maxlen - p, "relative");
3064	}
3065
3066	if (!nodes_empty(nodes))
3067		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3068			       nodemask_pr_args(&nodes));
 
 
 
 
3069}