Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Simple NUMA memory policy for the Linux kernel.
   4 *
   5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
 
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case NUMA_NO_NODE here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *		  in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
  68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  69
  70#include <linux/mempolicy.h>
  71#include <linux/pagewalk.h>
  72#include <linux/highmem.h>
  73#include <linux/hugetlb.h>
  74#include <linux/kernel.h>
  75#include <linux/sched.h>
  76#include <linux/sched/mm.h>
  77#include <linux/sched/numa_balancing.h>
  78#include <linux/sched/task.h>
  79#include <linux/nodemask.h>
  80#include <linux/cpuset.h>
  81#include <linux/slab.h>
  82#include <linux/string.h>
  83#include <linux/export.h>
  84#include <linux/nsproxy.h>
  85#include <linux/interrupt.h>
  86#include <linux/init.h>
  87#include <linux/compat.h>
  88#include <linux/ptrace.h>
  89#include <linux/swap.h>
  90#include <linux/seq_file.h>
  91#include <linux/proc_fs.h>
  92#include <linux/migrate.h>
  93#include <linux/ksm.h>
  94#include <linux/rmap.h>
  95#include <linux/security.h>
  96#include <linux/syscalls.h>
  97#include <linux/ctype.h>
  98#include <linux/mm_inline.h>
  99#include <linux/mmu_notifier.h>
 100#include <linux/printk.h>
 101#include <linux/swapops.h>
 102
 103#include <asm/tlbflush.h>
 104#include <linux/uaccess.h>
 
 105
 106#include "internal.h"
 107
 108/* Internal flags */
 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
 111
 112static struct kmem_cache *policy_cache;
 113static struct kmem_cache *sn_cache;
 114
 115/* Highest zone. An specific allocation for a zone below that is not
 116   policied. */
 117enum zone_type policy_zone = 0;
 118
 119/*
 120 * run-time system-wide default policy => local allocation
 121 */
 122static struct mempolicy default_policy = {
 123	.refcnt = ATOMIC_INIT(1), /* never free it */
 124	.mode = MPOL_LOCAL,
 
 125};
 126
 127static struct mempolicy preferred_node_policy[MAX_NUMNODES];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128
 129/**
 130 * numa_map_to_online_node - Find closest online node
 131 * @node: Node id to start the search
 132 *
 133 * Lookup the next closest node by distance if @nid is not online.
 134 */
 135int numa_map_to_online_node(int node)
 136{
 137	int min_dist = INT_MAX, dist, n, min_node;
 138
 139	if (node == NUMA_NO_NODE || node_online(node))
 140		return node;
 141
 142	min_node = node;
 143	for_each_online_node(n) {
 144		dist = node_distance(node, n);
 145		if (dist < min_dist) {
 146			min_dist = dist;
 147			min_node = n;
 148		}
 149	}
 150
 151	return min_node;
 152}
 153EXPORT_SYMBOL_GPL(numa_map_to_online_node);
 154
 155struct mempolicy *get_task_policy(struct task_struct *p)
 156{
 157	struct mempolicy *pol = p->mempolicy;
 158	int node;
 159
 160	if (pol)
 161		return pol;
 162
 163	node = numa_node_id();
 164	if (node != NUMA_NO_NODE) {
 165		pol = &preferred_node_policy[node];
 166		/* preferred_node_policy is not initialised early in boot */
 167		if (pol->mode)
 168			return pol;
 169	}
 170
 171	return &default_policy;
 172}
 173
 174static const struct mempolicy_operations {
 175	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 176	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
 177} mpol_ops[MPOL_MAX];
 178
 179static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 180{
 181	return pol->flags & MPOL_MODE_FLAGS;
 182}
 183
 184static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 185				   const nodemask_t *rel)
 186{
 187	nodemask_t tmp;
 188	nodes_fold(tmp, *orig, nodes_weight(*rel));
 189	nodes_onto(*ret, tmp, *rel);
 190}
 191
 192static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 193{
 194	if (nodes_empty(*nodes))
 195		return -EINVAL;
 196	pol->nodes = *nodes;
 197	return 0;
 198}
 199
 200static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 201{
 202	if (nodes_empty(*nodes))
 203		return -EINVAL;
 204
 205	nodes_clear(pol->nodes);
 206	node_set(first_node(*nodes), pol->nodes);
 
 207	return 0;
 208}
 209
 210static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 211{
 212	if (nodes_empty(*nodes))
 213		return -EINVAL;
 214	pol->nodes = *nodes;
 215	return 0;
 216}
 217
 218/*
 219 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 220 * any, for the new policy.  mpol_new() has already validated the nodes
 221 * parameter with respect to the policy mode and flags.
 
 222 *
 223 * Must be called holding task's alloc_lock to protect task's mems_allowed
 224 * and mempolicy.  May also be called holding the mmap_lock for write.
 225 */
 226static int mpol_set_nodemask(struct mempolicy *pol,
 227		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 228{
 229	int ret;
 230
 231	/*
 232	 * Default (pol==NULL) resp. local memory policies are not a
 233	 * subject of any remapping. They also do not need any special
 234	 * constructor.
 235	 */
 236	if (!pol || pol->mode == MPOL_LOCAL)
 237		return 0;
 238
 239	/* Check N_MEMORY */
 240	nodes_and(nsc->mask1,
 241		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
 242
 243	VM_BUG_ON(!nodes);
 
 
 
 
 
 
 
 244
 245	if (pol->flags & MPOL_F_RELATIVE_NODES)
 246		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
 247	else
 248		nodes_and(nsc->mask2, *nodes, nsc->mask1);
 
 
 249
 250	if (mpol_store_user_nodemask(pol))
 251		pol->w.user_nodemask = *nodes;
 252	else
 253		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
 254
 255	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 256	return ret;
 257}
 258
 259/*
 260 * This function just creates a new policy, does some check and simple
 261 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 262 */
 263static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 264				  nodemask_t *nodes)
 265{
 266	struct mempolicy *policy;
 267
 268	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 269		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
 270
 271	if (mode == MPOL_DEFAULT) {
 272		if (nodes && !nodes_empty(*nodes))
 273			return ERR_PTR(-EINVAL);
 274		return NULL;
 275	}
 276	VM_BUG_ON(!nodes);
 277
 278	/*
 279	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 280	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 281	 * All other modes require a valid pointer to a non-empty nodemask.
 282	 */
 283	if (mode == MPOL_PREFERRED) {
 284		if (nodes_empty(*nodes)) {
 285			if (((flags & MPOL_F_STATIC_NODES) ||
 286			     (flags & MPOL_F_RELATIVE_NODES)))
 287				return ERR_PTR(-EINVAL);
 288
 289			mode = MPOL_LOCAL;
 290		}
 291	} else if (mode == MPOL_LOCAL) {
 292		if (!nodes_empty(*nodes) ||
 293		    (flags & MPOL_F_STATIC_NODES) ||
 294		    (flags & MPOL_F_RELATIVE_NODES))
 295			return ERR_PTR(-EINVAL);
 296	} else if (nodes_empty(*nodes))
 297		return ERR_PTR(-EINVAL);
 298	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 299	if (!policy)
 300		return ERR_PTR(-ENOMEM);
 301	atomic_set(&policy->refcnt, 1);
 302	policy->mode = mode;
 303	policy->flags = flags;
 304
 305	return policy;
 306}
 307
 308/* Slow path of a mpol destructor. */
 309void __mpol_put(struct mempolicy *p)
 310{
 311	if (!atomic_dec_and_test(&p->refcnt))
 312		return;
 313	kmem_cache_free(policy_cache, p);
 314}
 315
 316static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
 
 317{
 318}
 319
 320static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 
 
 
 
 
 
 
 321{
 322	nodemask_t tmp;
 323
 324	if (pol->flags & MPOL_F_STATIC_NODES)
 325		nodes_and(tmp, pol->w.user_nodemask, *nodes);
 326	else if (pol->flags & MPOL_F_RELATIVE_NODES)
 327		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 328	else {
 329		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
 330								*nodes);
 331		pol->w.cpuset_mems_allowed = *nodes;
 
 
 
 
 
 
 
 
 
 
 332	}
 333
 334	if (nodes_empty(tmp))
 335		tmp = *nodes;
 336
 337	pol->nodes = tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 338}
 339
 340static void mpol_rebind_preferred(struct mempolicy *pol,
 341						const nodemask_t *nodes)
 
 342{
 343	pol->w.cpuset_mems_allowed = *nodes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344}
 345
 346/*
 347 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 348 *
 349 * Per-vma policies are protected by mmap_lock. Allocations using per-task
 350 * policies are protected by task->mems_allowed_seq to prevent a premature
 351 * OOM/allocation failure due to parallel nodemask modification.
 
 
 
 
 
 
 
 
 
 352 */
 353static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
 
 354{
 355	if (!pol)
 356		return;
 357	if (!mpol_store_user_nodemask(pol) &&
 358	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 359		return;
 360
 361	mpol_ops[pol->mode].rebind(pol, newmask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 362}
 363
 364/*
 365 * Wrapper for mpol_rebind_policy() that just requires task
 366 * pointer, and updates task mempolicy.
 367 *
 368 * Called with task's alloc_lock held.
 369 */
 370
 371void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
 
 372{
 373	mpol_rebind_policy(tsk->mempolicy, new);
 374}
 375
 376/*
 377 * Rebind each vma in mm to new nodemask.
 378 *
 379 * Call holding a reference to mm.  Takes mm->mmap_lock during call.
 380 */
 381
 382void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 383{
 384	struct vm_area_struct *vma;
 385
 386	mmap_write_lock(mm);
 387	for (vma = mm->mmap; vma; vma = vma->vm_next)
 388		mpol_rebind_policy(vma->vm_policy, new);
 389	mmap_write_unlock(mm);
 390}
 391
 392static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 393	[MPOL_DEFAULT] = {
 394		.rebind = mpol_rebind_default,
 395	},
 396	[MPOL_INTERLEAVE] = {
 397		.create = mpol_new_interleave,
 398		.rebind = mpol_rebind_nodemask,
 399	},
 400	[MPOL_PREFERRED] = {
 401		.create = mpol_new_preferred,
 402		.rebind = mpol_rebind_preferred,
 403	},
 404	[MPOL_BIND] = {
 405		.create = mpol_new_bind,
 406		.rebind = mpol_rebind_nodemask,
 407	},
 408	[MPOL_LOCAL] = {
 409		.rebind = mpol_rebind_default,
 410	},
 411};
 412
 413static int migrate_page_add(struct page *page, struct list_head *pagelist,
 414				unsigned long flags);
 415
 416struct queue_pages {
 417	struct list_head *pagelist;
 418	unsigned long flags;
 419	nodemask_t *nmask;
 420	unsigned long start;
 421	unsigned long end;
 422	struct vm_area_struct *first;
 423};
 424
 425/*
 426 * Check if the page's nid is in qp->nmask.
 427 *
 428 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
 429 * in the invert of qp->nmask.
 430 */
 431static inline bool queue_pages_required(struct page *page,
 432					struct queue_pages *qp)
 433{
 434	int nid = page_to_nid(page);
 435	unsigned long flags = qp->flags;
 436
 437	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 438}
 439
 440/*
 441 * queue_pages_pmd() has four possible return values:
 442 * 0 - pages are placed on the right node or queued successfully, or
 443 *     special page is met, i.e. huge zero page.
 444 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 445 *     specified.
 446 * 2 - THP was split.
 447 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
 448 *        existing page was already on a node that does not follow the
 449 *        policy.
 450 */
 451static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 452				unsigned long end, struct mm_walk *walk)
 453	__releases(ptl)
 454{
 455	int ret = 0;
 456	struct page *page;
 457	struct queue_pages *qp = walk->private;
 458	unsigned long flags;
 459
 460	if (unlikely(is_pmd_migration_entry(*pmd))) {
 461		ret = -EIO;
 462		goto unlock;
 463	}
 464	page = pmd_page(*pmd);
 465	if (is_huge_zero_page(page)) {
 466		spin_unlock(ptl);
 467		walk->action = ACTION_CONTINUE;
 468		goto out;
 469	}
 470	if (!queue_pages_required(page, qp))
 471		goto unlock;
 472
 473	flags = qp->flags;
 474	/* go to thp migration */
 475	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 476		if (!vma_migratable(walk->vma) ||
 477		    migrate_page_add(page, qp->pagelist, flags)) {
 478			ret = 1;
 479			goto unlock;
 480		}
 481	} else
 482		ret = -EIO;
 483unlock:
 484	spin_unlock(ptl);
 485out:
 486	return ret;
 487}
 488
 489/*
 490 * Scan through pages checking if pages follow certain conditions,
 491 * and move them to the pagelist if they do.
 492 *
 493 * queue_pages_pte_range() has three possible return values:
 494 * 0 - pages are placed on the right node or queued successfully, or
 495 *     special page is met, i.e. zero page.
 496 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 497 *     specified.
 498 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
 499 *        on a node that does not follow the policy.
 500 */
 501static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 502			unsigned long end, struct mm_walk *walk)
 503{
 504	struct vm_area_struct *vma = walk->vma;
 505	struct page *page;
 506	struct queue_pages *qp = walk->private;
 507	unsigned long flags = qp->flags;
 508	int ret;
 509	bool has_unmovable = false;
 510	pte_t *pte, *mapped_pte;
 511	spinlock_t *ptl;
 512
 513	ptl = pmd_trans_huge_lock(pmd, vma);
 514	if (ptl) {
 515		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
 516		if (ret != 2)
 517			return ret;
 518	}
 519	/* THP was split, fall through to pte walk */
 520
 521	if (pmd_trans_unstable(pmd))
 522		return 0;
 523
 524	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 525	for (; addr != end; pte++, addr += PAGE_SIZE) {
 526		if (!pte_present(*pte))
 527			continue;
 528		page = vm_normal_page(vma, addr, *pte);
 529		if (!page)
 530			continue;
 531		/*
 532		 * vm_normal_page() filters out zero pages, but there might
 533		 * still be PageReserved pages to skip, perhaps in a VDSO.
 
 534		 */
 535		if (PageReserved(page))
 536			continue;
 537		if (!queue_pages_required(page, qp))
 
 538			continue;
 539		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 540			/* MPOL_MF_STRICT must be specified if we get here */
 541			if (!vma_migratable(vma)) {
 542				has_unmovable = true;
 543				break;
 544			}
 545
 546			/*
 547			 * Do not abort immediately since there may be
 548			 * temporary off LRU pages in the range.  Still
 549			 * need migrate other LRU pages.
 550			 */
 551			if (migrate_page_add(page, qp->pagelist, flags))
 552				has_unmovable = true;
 553		} else
 554			break;
 555	}
 556	pte_unmap_unlock(mapped_pte, ptl);
 557	cond_resched();
 558
 559	if (has_unmovable)
 560		return 1;
 561
 562	return addr != end ? -EIO : 0;
 563}
 564
 565static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
 566			       unsigned long addr, unsigned long end,
 567			       struct mm_walk *walk)
 568{
 569	int ret = 0;
 570#ifdef CONFIG_HUGETLB_PAGE
 571	struct queue_pages *qp = walk->private;
 572	unsigned long flags = (qp->flags & MPOL_MF_VALID);
 573	struct page *page;
 574	spinlock_t *ptl;
 575	pte_t entry;
 576
 577	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
 578	entry = huge_ptep_get(pte);
 579	if (!pte_present(entry))
 580		goto unlock;
 581	page = pte_page(entry);
 582	if (!queue_pages_required(page, qp))
 583		goto unlock;
 584
 585	if (flags == MPOL_MF_STRICT) {
 586		/*
 587		 * STRICT alone means only detecting misplaced page and no
 588		 * need to further check other vma.
 589		 */
 590		ret = -EIO;
 591		goto unlock;
 592	}
 593
 594	if (!vma_migratable(walk->vma)) {
 595		/*
 596		 * Must be STRICT with MOVE*, otherwise .test_walk() have
 597		 * stopped walking current vma.
 598		 * Detecting misplaced page but allow migrating pages which
 599		 * have been queued.
 600		 */
 601		ret = 1;
 602		goto unlock;
 603	}
 604
 605	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
 606	if (flags & (MPOL_MF_MOVE_ALL) ||
 607	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
 608		if (!isolate_huge_page(page, qp->pagelist) &&
 609			(flags & MPOL_MF_STRICT))
 610			/*
 611			 * Failed to isolate page but allow migrating pages
 612			 * which have been queued.
 613			 */
 614			ret = 1;
 615	}
 616unlock:
 617	spin_unlock(ptl);
 618#else
 619	BUG();
 620#endif
 621	return ret;
 622}
 623
 624#ifdef CONFIG_NUMA_BALANCING
 625/*
 626 * This is used to mark a range of virtual addresses to be inaccessible.
 627 * These are later cleared by a NUMA hinting fault. Depending on these
 628 * faults, pages may be migrated for better NUMA placement.
 629 *
 630 * This is assuming that NUMA faults are handled using PROT_NONE. If
 631 * an architecture makes a different choice, it will need further
 632 * changes to the core.
 633 */
 634unsigned long change_prot_numa(struct vm_area_struct *vma,
 635			unsigned long addr, unsigned long end)
 636{
 637	int nr_updated;
 638
 639	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
 640	if (nr_updated)
 641		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 642
 643	return nr_updated;
 644}
 645#else
 646static unsigned long change_prot_numa(struct vm_area_struct *vma,
 647			unsigned long addr, unsigned long end)
 648{
 649	return 0;
 650}
 651#endif /* CONFIG_NUMA_BALANCING */
 652
 653static int queue_pages_test_walk(unsigned long start, unsigned long end,
 654				struct mm_walk *walk)
 655{
 656	struct vm_area_struct *vma = walk->vma;
 657	struct queue_pages *qp = walk->private;
 658	unsigned long endvma = vma->vm_end;
 659	unsigned long flags = qp->flags;
 660
 661	/* range check first */
 662	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
 663
 664	if (!qp->first) {
 665		qp->first = vma;
 666		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 667			(qp->start < vma->vm_start))
 668			/* hole at head side of range */
 669			return -EFAULT;
 670	}
 671	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 672		((vma->vm_end < qp->end) &&
 673		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
 674		/* hole at middle or tail of range */
 675		return -EFAULT;
 676
 677	/*
 678	 * Need check MPOL_MF_STRICT to return -EIO if possible
 679	 * regardless of vma_migratable
 680	 */
 681	if (!vma_migratable(vma) &&
 682	    !(flags & MPOL_MF_STRICT))
 683		return 1;
 684
 685	if (endvma > end)
 686		endvma = end;
 687
 688	if (flags & MPOL_MF_LAZY) {
 689		/* Similar to task_numa_work, skip inaccessible VMAs */
 690		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
 691			!(vma->vm_flags & VM_MIXEDMAP))
 692			change_prot_numa(vma, start, endvma);
 693		return 1;
 694	}
 695
 696	/* queue pages from current vma */
 697	if (flags & MPOL_MF_VALID)
 698		return 0;
 699	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700}
 701
 702static const struct mm_walk_ops queue_pages_walk_ops = {
 703	.hugetlb_entry		= queue_pages_hugetlb,
 704	.pmd_entry		= queue_pages_pte_range,
 705	.test_walk		= queue_pages_test_walk,
 706};
 707
 708/*
 709 * Walk through page tables and collect pages to be migrated.
 710 *
 711 * If pages found in a given range are on a set of nodes (determined by
 712 * @nodes and @flags,) it's isolated and queued to the pagelist which is
 713 * passed via @private.
 714 *
 715 * queue_pages_range() has three possible return values:
 716 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
 717 *     specified.
 718 * 0 - queue pages successfully or no misplaced page.
 719 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
 720 *         memory range specified by nodemask and maxnode points outside
 721 *         your accessible address space (-EFAULT)
 722 */
 723static int
 724queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 725		nodemask_t *nodes, unsigned long flags,
 726		struct list_head *pagelist)
 727{
 728	int err;
 729	struct queue_pages qp = {
 730		.pagelist = pagelist,
 731		.flags = flags,
 732		.nmask = nodes,
 733		.start = start,
 734		.end = end,
 735		.first = NULL,
 736	};
 737
 738	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
 739
 740	if (!qp.first)
 741		/* whole range in hole */
 742		err = -EFAULT;
 743
 744	return err;
 745}
 746
 747/*
 748 * Apply policy to a single VMA
 749 * This must be called with the mmap_lock held for writing.
 750 */
 751static int vma_replace_policy(struct vm_area_struct *vma,
 752						struct mempolicy *pol)
 
 
 753{
 754	int err;
 755	struct mempolicy *old;
 756	struct mempolicy *new;
 757
 758	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 759		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 760		 vma->vm_ops, vma->vm_file,
 761		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 762
 763	new = mpol_dup(pol);
 764	if (IS_ERR(new))
 765		return PTR_ERR(new);
 766
 767	if (vma->vm_ops && vma->vm_ops->set_policy) {
 768		err = vma->vm_ops->set_policy(vma, new);
 769		if (err)
 770			goto err_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 771	}
 772
 773	old = vma->vm_policy;
 774	vma->vm_policy = new; /* protected by mmap_lock */
 775	mpol_put(old);
 776
 777	return 0;
 778 err_out:
 779	mpol_put(new);
 780	return err;
 781}
 782
 783/* Step 2: apply policy to a range and do splits. */
 784static int mbind_range(struct mm_struct *mm, unsigned long start,
 785		       unsigned long end, struct mempolicy *new_pol)
 786{
 787	struct vm_area_struct *next;
 788	struct vm_area_struct *prev;
 789	struct vm_area_struct *vma;
 790	int err = 0;
 791	pgoff_t pgoff;
 792	unsigned long vmstart;
 793	unsigned long vmend;
 794
 795	vma = find_vma(mm, start);
 796	VM_BUG_ON(!vma);
 
 797
 798	prev = vma->vm_prev;
 799	if (start > vma->vm_start)
 800		prev = vma;
 801
 802	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 803		next = vma->vm_next;
 804		vmstart = max(start, vma->vm_start);
 805		vmend   = min(end, vma->vm_end);
 806
 807		if (mpol_equal(vma_policy(vma), new_pol))
 808			continue;
 809
 810		pgoff = vma->vm_pgoff +
 811			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 812		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 813				 vma->anon_vma, vma->vm_file, pgoff,
 814				 new_pol, vma->vm_userfaultfd_ctx);
 815		if (prev) {
 816			vma = prev;
 817			next = vma->vm_next;
 818			if (mpol_equal(vma_policy(vma), new_pol))
 819				continue;
 820			/* vma_merge() joined vma && vma->next, case 8 */
 821			goto replace;
 822		}
 823		if (vma->vm_start != vmstart) {
 824			err = split_vma(vma->vm_mm, vma, vmstart, 1);
 825			if (err)
 826				goto out;
 827		}
 828		if (vma->vm_end != vmend) {
 829			err = split_vma(vma->vm_mm, vma, vmend, 0);
 830			if (err)
 831				goto out;
 832		}
 833 replace:
 834		err = vma_replace_policy(vma, new_pol);
 835		if (err)
 836			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 837	}
 838
 839 out:
 840	return err;
 841}
 842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843/* Set the process memory policy */
 844static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 845			     nodemask_t *nodes)
 846{
 847	struct mempolicy *new, *old;
 
 848	NODEMASK_SCRATCH(scratch);
 849	int ret;
 850
 851	if (!scratch)
 852		return -ENOMEM;
 853
 854	new = mpol_new(mode, flags, nodes);
 855	if (IS_ERR(new)) {
 856		ret = PTR_ERR(new);
 857		goto out;
 858	}
 859
 
 
 
 
 
 
 
 
 860	ret = mpol_set_nodemask(new, nodes, scratch);
 861	if (ret) {
 
 
 
 862		mpol_put(new);
 863		goto out;
 864	}
 865	task_lock(current);
 866	old = current->mempolicy;
 867	current->mempolicy = new;
 868	if (new && new->mode == MPOL_INTERLEAVE)
 869		current->il_prev = MAX_NUMNODES-1;
 
 
 870	task_unlock(current);
 
 
 
 871	mpol_put(old);
 872	ret = 0;
 873out:
 874	NODEMASK_SCRATCH_FREE(scratch);
 875	return ret;
 876}
 877
 878/*
 879 * Return nodemask for policy for get_mempolicy() query
 880 *
 881 * Called with task's alloc_lock held
 882 */
 883static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 884{
 885	nodes_clear(*nodes);
 886	if (p == &default_policy)
 887		return;
 888
 889	switch (p->mode) {
 890	case MPOL_BIND:
 
 891	case MPOL_INTERLEAVE:
 892	case MPOL_PREFERRED:
 893		*nodes = p->nodes;
 894		break;
 895	case MPOL_LOCAL:
 896		/* return empty node mask for local allocation */
 
 
 897		break;
 898	default:
 899		BUG();
 900	}
 901}
 902
 903static int lookup_node(struct mm_struct *mm, unsigned long addr)
 904{
 905	struct page *p = NULL;
 906	int err;
 907
 908	int locked = 1;
 909	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
 910	if (err > 0) {
 911		err = page_to_nid(p);
 912		put_page(p);
 913	}
 914	if (locked)
 915		mmap_read_unlock(mm);
 916	return err;
 917}
 918
 919/* Retrieve NUMA policy */
 920static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 921			     unsigned long addr, unsigned long flags)
 922{
 923	int err;
 924	struct mm_struct *mm = current->mm;
 925	struct vm_area_struct *vma = NULL;
 926	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
 927
 928	if (flags &
 929		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 930		return -EINVAL;
 931
 932	if (flags & MPOL_F_MEMS_ALLOWED) {
 933		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 934			return -EINVAL;
 935		*policy = 0;	/* just so it's initialized */
 936		task_lock(current);
 937		*nmask  = cpuset_current_mems_allowed;
 938		task_unlock(current);
 939		return 0;
 940	}
 941
 942	if (flags & MPOL_F_ADDR) {
 943		/*
 944		 * Do NOT fall back to task policy if the
 945		 * vma/shared policy at addr is NULL.  We
 946		 * want to return MPOL_DEFAULT in this case.
 947		 */
 948		mmap_read_lock(mm);
 949		vma = vma_lookup(mm, addr);
 950		if (!vma) {
 951			mmap_read_unlock(mm);
 952			return -EFAULT;
 953		}
 954		if (vma->vm_ops && vma->vm_ops->get_policy)
 955			pol = vma->vm_ops->get_policy(vma, addr);
 956		else
 957			pol = vma->vm_policy;
 958	} else if (addr)
 959		return -EINVAL;
 960
 961	if (!pol)
 962		pol = &default_policy;	/* indicates default behavior */
 963
 964	if (flags & MPOL_F_NODE) {
 965		if (flags & MPOL_F_ADDR) {
 966			/*
 967			 * Take a refcount on the mpol, lookup_node()
 968			 * will drop the mmap_lock, so after calling
 969			 * lookup_node() only "pol" remains valid, "vma"
 970			 * is stale.
 971			 */
 972			pol_refcount = pol;
 973			vma = NULL;
 974			mpol_get(pol);
 975			err = lookup_node(mm, addr);
 976			if (err < 0)
 977				goto out;
 978			*policy = err;
 979		} else if (pol == current->mempolicy &&
 980				pol->mode == MPOL_INTERLEAVE) {
 981			*policy = next_node_in(current->il_prev, pol->nodes);
 982		} else {
 983			err = -EINVAL;
 984			goto out;
 985		}
 986	} else {
 987		*policy = pol == &default_policy ? MPOL_DEFAULT :
 988						pol->mode;
 989		/*
 990		 * Internal mempolicy flags must be masked off before exposing
 991		 * the policy to userspace.
 992		 */
 993		*policy |= (pol->flags & MPOL_MODE_FLAGS);
 994	}
 995
 
 
 
 
 
 996	err = 0;
 997	if (nmask) {
 998		if (mpol_store_user_nodemask(pol)) {
 999			*nmask = pol->w.user_nodemask;
1000		} else {
1001			task_lock(current);
1002			get_policy_nodemask(pol, nmask);
1003			task_unlock(current);
1004		}
1005	}
1006
1007 out:
1008	mpol_cond_put(pol);
1009	if (vma)
1010		mmap_read_unlock(mm);
1011	if (pol_refcount)
1012		mpol_put(pol_refcount);
1013	return err;
1014}
1015
1016#ifdef CONFIG_MIGRATION
1017/*
1018 * page migration, thp tail pages can be passed.
1019 */
1020static int migrate_page_add(struct page *page, struct list_head *pagelist,
1021				unsigned long flags)
1022{
1023	struct page *head = compound_head(page);
1024	/*
1025	 * Avoid migrating a page that is shared with others.
1026	 */
1027	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1028		if (!isolate_lru_page(head)) {
1029			list_add_tail(&head->lru, pagelist);
1030			mod_node_page_state(page_pgdat(head),
1031				NR_ISOLATED_ANON + page_is_file_lru(head),
1032				thp_nr_pages(head));
1033		} else if (flags & MPOL_MF_STRICT) {
1034			/*
1035			 * Non-movable page may reach here.  And, there may be
1036			 * temporary off LRU pages or non-LRU movable pages.
1037			 * Treat them as unmovable pages since they can't be
1038			 * isolated, so they can't be moved at the moment.  It
1039			 * should return -EIO for this case too.
1040			 */
1041			return -EIO;
1042		}
1043	}
 
1044
1045	return 0;
 
 
1046}
1047
1048/*
1049 * Migrate pages from one node to a target node.
1050 * Returns error or the number of pages not migrated.
1051 */
1052static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1053			   int flags)
1054{
1055	nodemask_t nmask;
1056	LIST_HEAD(pagelist);
1057	int err = 0;
1058	struct migration_target_control mtc = {
1059		.nid = dest,
1060		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1061	};
1062
1063	nodes_clear(nmask);
1064	node_set(source, nmask);
1065
1066	/*
1067	 * This does not "check" the range but isolates all pages that
1068	 * need migration.  Between passing in the full user address
1069	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1070	 */
1071	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1072	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1073			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
 
1074
1075	if (!list_empty(&pagelist)) {
1076		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1077				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1078		if (err)
1079			putback_movable_pages(&pagelist);
1080	}
1081
1082	return err;
1083}
1084
1085/*
1086 * Move pages between the two nodesets so as to preserve the physical
1087 * layout as much as possible.
1088 *
1089 * Returns the number of page that could not be moved.
1090 */
1091int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1092		     const nodemask_t *to, int flags)
1093{
1094	int busy = 0;
1095	int err = 0;
1096	nodemask_t tmp;
1097
1098	lru_cache_disable();
 
 
 
 
1099
1100	mmap_read_lock(mm);
 
 
1101
1102	/*
1103	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1104	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1105	 * bit in 'tmp', and return that <source, dest> pair for migration.
1106	 * The pair of nodemasks 'to' and 'from' define the map.
1107	 *
1108	 * If no pair of bits is found that way, fallback to picking some
1109	 * pair of 'source' and 'dest' bits that are not the same.  If the
1110	 * 'source' and 'dest' bits are the same, this represents a node
1111	 * that will be migrating to itself, so no pages need move.
1112	 *
1113	 * If no bits are left in 'tmp', or if all remaining bits left
1114	 * in 'tmp' correspond to the same bit in 'to', return false
1115	 * (nothing left to migrate).
1116	 *
1117	 * This lets us pick a pair of nodes to migrate between, such that
1118	 * if possible the dest node is not already occupied by some other
1119	 * source node, minimizing the risk of overloading the memory on a
1120	 * node that would happen if we migrated incoming memory to a node
1121	 * before migrating outgoing memory source that same node.
1122	 *
1123	 * A single scan of tmp is sufficient.  As we go, we remember the
1124	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1125	 * that not only moved, but what's better, moved to an empty slot
1126	 * (d is not set in tmp), then we break out then, with that pair.
1127	 * Otherwise when we finish scanning from_tmp, we at least have the
1128	 * most recent <s, d> pair that moved.  If we get all the way through
1129	 * the scan of tmp without finding any node that moved, much less
1130	 * moved to an empty node, then there is nothing left worth migrating.
1131	 */
1132
1133	tmp = *from;
1134	while (!nodes_empty(tmp)) {
1135		int s, d;
1136		int source = NUMA_NO_NODE;
1137		int dest = 0;
1138
1139		for_each_node_mask(s, tmp) {
1140
1141			/*
1142			 * do_migrate_pages() tries to maintain the relative
1143			 * node relationship of the pages established between
1144			 * threads and memory areas.
1145                         *
1146			 * However if the number of source nodes is not equal to
1147			 * the number of destination nodes we can not preserve
1148			 * this node relative relationship.  In that case, skip
1149			 * copying memory from a node that is in the destination
1150			 * mask.
1151			 *
1152			 * Example: [2,3,4] -> [3,4,5] moves everything.
1153			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1154			 */
1155
1156			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1157						(node_isset(s, *to)))
1158				continue;
1159
1160			d = node_remap(s, *from, *to);
1161			if (s == d)
1162				continue;
1163
1164			source = s;	/* Node moved. Memorize */
1165			dest = d;
1166
1167			/* dest not in remaining from nodes? */
1168			if (!node_isset(dest, tmp))
1169				break;
1170		}
1171		if (source == NUMA_NO_NODE)
1172			break;
1173
1174		node_clear(source, tmp);
1175		err = migrate_to_node(mm, source, dest, flags);
1176		if (err > 0)
1177			busy += err;
1178		if (err < 0)
1179			break;
1180	}
1181	mmap_read_unlock(mm);
1182
1183	lru_cache_enable();
1184	if (err < 0)
1185		return err;
1186	return busy;
1187
1188}
1189
1190/*
1191 * Allocate a new page for page migration based on vma policy.
1192 * Start by assuming the page is mapped by the same vma as contains @start.
1193 * Search forward from there, if not.  N.B., this assumes that the
1194 * list of pages handed to migrate_pages()--which is how we get here--
1195 * is in virtual address order.
1196 */
1197static struct page *new_page(struct page *page, unsigned long start)
1198{
1199	struct vm_area_struct *vma;
1200	unsigned long address;
1201
1202	vma = find_vma(current->mm, start);
1203	while (vma) {
1204		address = page_address_in_vma(page, vma);
1205		if (address != -EFAULT)
1206			break;
1207		vma = vma->vm_next;
1208	}
1209
1210	if (PageHuge(page)) {
1211		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1212				vma, address);
1213	} else if (PageTransHuge(page)) {
1214		struct page *thp;
1215
1216		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1217					 HPAGE_PMD_ORDER);
1218		if (!thp)
1219			return NULL;
1220		prep_transhuge_page(thp);
1221		return thp;
1222	}
1223	/*
1224	 * if !vma, alloc_page_vma() will use task or system default policy
1225	 */
1226	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1227			vma, address);
1228}
1229#else
1230
1231static int migrate_page_add(struct page *page, struct list_head *pagelist,
1232				unsigned long flags)
1233{
1234	return -EIO;
1235}
1236
1237int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1238		     const nodemask_t *to, int flags)
1239{
1240	return -ENOSYS;
1241}
1242
1243static struct page *new_page(struct page *page, unsigned long start)
1244{
1245	return NULL;
1246}
1247#endif
1248
1249static long do_mbind(unsigned long start, unsigned long len,
1250		     unsigned short mode, unsigned short mode_flags,
1251		     nodemask_t *nmask, unsigned long flags)
1252{
 
1253	struct mm_struct *mm = current->mm;
1254	struct mempolicy *new;
1255	unsigned long end;
1256	int err;
1257	int ret;
1258	LIST_HEAD(pagelist);
1259
1260	if (flags & ~(unsigned long)MPOL_MF_VALID)
 
1261		return -EINVAL;
1262	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1263		return -EPERM;
1264
1265	if (start & ~PAGE_MASK)
1266		return -EINVAL;
1267
1268	if (mode == MPOL_DEFAULT)
1269		flags &= ~MPOL_MF_STRICT;
1270
1271	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1272	end = start + len;
1273
1274	if (end < start)
1275		return -EINVAL;
1276	if (end == start)
1277		return 0;
1278
1279	new = mpol_new(mode, mode_flags, nmask);
1280	if (IS_ERR(new))
1281		return PTR_ERR(new);
1282
1283	if (flags & MPOL_MF_LAZY)
1284		new->flags |= MPOL_F_MOF;
1285
1286	/*
1287	 * If we are using the default policy then operation
1288	 * on discontinuous address spaces is okay after all
1289	 */
1290	if (!new)
1291		flags |= MPOL_MF_DISCONTIG_OK;
1292
1293	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1294		 start, start + len, mode, mode_flags,
1295		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1296
1297	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1298
1299		lru_cache_disable();
 
 
1300	}
1301	{
1302		NODEMASK_SCRATCH(scratch);
1303		if (scratch) {
1304			mmap_write_lock(mm);
 
1305			err = mpol_set_nodemask(new, nmask, scratch);
 
1306			if (err)
1307				mmap_write_unlock(mm);
1308		} else
1309			err = -ENOMEM;
1310		NODEMASK_SCRATCH_FREE(scratch);
1311	}
1312	if (err)
1313		goto mpol_out;
1314
1315	ret = queue_pages_range(mm, start, end, nmask,
1316			  flags | MPOL_MF_INVERT, &pagelist);
1317
1318	if (ret < 0) {
1319		err = ret;
1320		goto up_out;
1321	}
1322
1323	err = mbind_range(mm, start, end, new);
1324
1325	if (!err) {
1326		int nr_failed = 0;
1327
 
 
1328		if (!list_empty(&pagelist)) {
1329			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1330			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1331				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1332			if (nr_failed)
1333				putback_movable_pages(&pagelist);
1334		}
1335
1336		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1337			err = -EIO;
1338	} else {
1339up_out:
1340		if (!list_empty(&pagelist))
1341			putback_movable_pages(&pagelist);
1342	}
1343
1344	mmap_write_unlock(mm);
1345mpol_out:
1346	mpol_put(new);
1347	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1348		lru_cache_enable();
1349	return err;
1350}
1351
1352/*
1353 * User space interface with variable sized bitmaps for nodelists.
1354 */
1355
1356/* Copy a node mask from user space. */
1357static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1358		     unsigned long maxnode)
1359{
1360	unsigned long k;
1361	unsigned long t;
1362	unsigned long nlongs;
1363	unsigned long endmask;
1364
1365	--maxnode;
1366	nodes_clear(*nodes);
1367	if (maxnode == 0 || !nmask)
1368		return 0;
1369	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1370		return -EINVAL;
1371
1372	nlongs = BITS_TO_LONGS(maxnode);
1373	if ((maxnode % BITS_PER_LONG) == 0)
1374		endmask = ~0UL;
1375	else
1376		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1377
1378	/*
1379	 * When the user specified more nodes than supported just check
1380	 * if the non supported part is all zero.
1381	 *
1382	 * If maxnode have more longs than MAX_NUMNODES, check
1383	 * the bits in that area first. And then go through to
1384	 * check the rest bits which equal or bigger than MAX_NUMNODES.
1385	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1386	 */
1387	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
 
 
1388		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
 
1389			if (get_user(t, nmask + k))
1390				return -EFAULT;
1391			if (k == nlongs - 1) {
1392				if (t & endmask)
1393					return -EINVAL;
1394			} else if (t)
1395				return -EINVAL;
1396		}
1397		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1398		endmask = ~0UL;
1399	}
1400
1401	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1402		unsigned long valid_mask = endmask;
1403
1404		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1405		if (get_user(t, nmask + nlongs - 1))
1406			return -EFAULT;
1407		if (t & valid_mask)
1408			return -EINVAL;
1409	}
1410
1411	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1412		return -EFAULT;
1413	nodes_addr(*nodes)[nlongs-1] &= endmask;
1414	return 0;
1415}
1416
1417/* Copy a kernel node mask to user space */
1418static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1419			      nodemask_t *nodes)
1420{
1421	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1422	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1423
1424	if (copy > nbytes) {
1425		if (copy > PAGE_SIZE)
1426			return -EINVAL;
1427		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1428			return -EFAULT;
1429		copy = nbytes;
1430	}
1431	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1432}
1433
1434/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1435static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1436{
1437	*flags = *mode & MPOL_MODE_FLAGS;
1438	*mode &= ~MPOL_MODE_FLAGS;
1439	if ((unsigned int)(*mode) >= MPOL_MAX)
1440		return -EINVAL;
1441	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1442		return -EINVAL;
1443	if (*flags & MPOL_F_NUMA_BALANCING) {
1444		if (*mode != MPOL_BIND)
1445			return -EINVAL;
1446		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1447	}
1448	return 0;
1449}
1450
1451static long kernel_mbind(unsigned long start, unsigned long len,
1452			 unsigned long mode, const unsigned long __user *nmask,
1453			 unsigned long maxnode, unsigned int flags)
1454{
1455	unsigned short mode_flags;
1456	nodemask_t nodes;
1457	int lmode = mode;
1458	int err;
 
1459
1460	start = untagged_addr(start);
1461	err = sanitize_mpol_flags(&lmode, &mode_flags);
1462	if (err)
1463		return err;
1464
 
 
1465	err = get_nodes(&nodes, nmask, maxnode);
1466	if (err)
1467		return err;
1468
1469	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1470}
1471
1472SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1473		unsigned long, mode, const unsigned long __user *, nmask,
1474		unsigned long, maxnode, unsigned int, flags)
1475{
1476	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1477}
1478
1479/* Set the process memory policy */
1480static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1481				 unsigned long maxnode)
1482{
1483	unsigned short mode_flags;
1484	nodemask_t nodes;
1485	int lmode = mode;
1486	int err;
 
 
1487
1488	err = sanitize_mpol_flags(&lmode, &mode_flags);
1489	if (err)
1490		return err;
1491
 
 
1492	err = get_nodes(&nodes, nmask, maxnode);
1493	if (err)
1494		return err;
1495
1496	return do_set_mempolicy(lmode, mode_flags, &nodes);
1497}
1498
1499SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1500		unsigned long, maxnode)
1501{
1502	return kernel_set_mempolicy(mode, nmask, maxnode);
1503}
1504
1505static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1506				const unsigned long __user *old_nodes,
1507				const unsigned long __user *new_nodes)
1508{
 
1509	struct mm_struct *mm = NULL;
1510	struct task_struct *task;
1511	nodemask_t task_nodes;
1512	int err;
1513	nodemask_t *old;
1514	nodemask_t *new;
1515	NODEMASK_SCRATCH(scratch);
1516
1517	if (!scratch)
1518		return -ENOMEM;
1519
1520	old = &scratch->mask1;
1521	new = &scratch->mask2;
1522
1523	err = get_nodes(old, old_nodes, maxnode);
1524	if (err)
1525		goto out;
1526
1527	err = get_nodes(new, new_nodes, maxnode);
1528	if (err)
1529		goto out;
1530
1531	/* Find the mm_struct */
1532	rcu_read_lock();
1533	task = pid ? find_task_by_vpid(pid) : current;
1534	if (!task) {
1535		rcu_read_unlock();
1536		err = -ESRCH;
1537		goto out;
1538	}
1539	get_task_struct(task);
1540
1541	err = -EINVAL;
1542
1543	/*
1544	 * Check if this process has the right to modify the specified process.
1545	 * Use the regular "ptrace_may_access()" checks.
 
 
1546	 */
1547	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
 
 
 
1548		rcu_read_unlock();
1549		err = -EPERM;
1550		goto out_put;
1551	}
1552	rcu_read_unlock();
1553
1554	task_nodes = cpuset_mems_allowed(task);
1555	/* Is the user allowed to access the target nodes? */
1556	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1557		err = -EPERM;
1558		goto out_put;
1559	}
1560
1561	task_nodes = cpuset_mems_allowed(current);
1562	nodes_and(*new, *new, task_nodes);
1563	if (nodes_empty(*new))
1564		goto out_put;
 
1565
1566	err = security_task_movememory(task);
1567	if (err)
1568		goto out_put;
1569
1570	mm = get_task_mm(task);
1571	put_task_struct(task);
1572
1573	if (!mm) {
1574		err = -EINVAL;
1575		goto out;
1576	}
1577
1578	err = do_migrate_pages(mm, old, new,
1579		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1580
1581	mmput(mm);
1582out:
1583	NODEMASK_SCRATCH_FREE(scratch);
1584
1585	return err;
1586
1587out_put:
1588	put_task_struct(task);
1589	goto out;
1590
1591}
1592
1593SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1594		const unsigned long __user *, old_nodes,
1595		const unsigned long __user *, new_nodes)
1596{
1597	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1598}
1599
1600
1601/* Retrieve NUMA policy */
1602static int kernel_get_mempolicy(int __user *policy,
1603				unsigned long __user *nmask,
1604				unsigned long maxnode,
1605				unsigned long addr,
1606				unsigned long flags)
1607{
1608	int err;
1609	int pval;
1610	nodemask_t nodes;
1611
1612	if (nmask != NULL && maxnode < nr_node_ids)
1613		return -EINVAL;
1614
1615	addr = untagged_addr(addr);
1616
1617	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1618
1619	if (err)
1620		return err;
1621
1622	if (policy && put_user(pval, policy))
1623		return -EFAULT;
1624
1625	if (nmask)
1626		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1627
1628	return err;
1629}
1630
1631SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1632		unsigned long __user *, nmask, unsigned long, maxnode,
1633		unsigned long, addr, unsigned long, flags)
1634{
1635	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1636}
1637
1638#ifdef CONFIG_COMPAT
1639
1640COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1641		       compat_ulong_t __user *, nmask,
1642		       compat_ulong_t, maxnode,
1643		       compat_ulong_t, addr, compat_ulong_t, flags)
1644{
1645	long err;
1646	unsigned long __user *nm = NULL;
1647	unsigned long nr_bits, alloc_size;
1648	DECLARE_BITMAP(bm, MAX_NUMNODES);
1649
1650	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1651	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1652
1653	if (nmask)
1654		nm = compat_alloc_user_space(alloc_size);
1655
1656	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1657
1658	if (!err && nmask) {
1659		unsigned long copy_size;
1660		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1661		err = copy_from_user(bm, nm, copy_size);
1662		/* ensure entire bitmap is zeroed */
1663		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1664		err |= compat_put_bitmap(nmask, bm, nr_bits);
1665	}
1666
1667	return err;
1668}
1669
1670COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1671		       compat_ulong_t, maxnode)
1672{
 
1673	unsigned long __user *nm = NULL;
1674	unsigned long nr_bits, alloc_size;
1675	DECLARE_BITMAP(bm, MAX_NUMNODES);
1676
1677	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1678	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1679
1680	if (nmask) {
1681		if (compat_get_bitmap(bm, nmask, nr_bits))
1682			return -EFAULT;
1683		nm = compat_alloc_user_space(alloc_size);
1684		if (copy_to_user(nm, bm, alloc_size))
1685			return -EFAULT;
1686	}
1687
1688	return kernel_set_mempolicy(mode, nm, nr_bits+1);
 
 
 
1689}
1690
1691COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1692		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1693		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1694{
 
1695	unsigned long __user *nm = NULL;
1696	unsigned long nr_bits, alloc_size;
1697	nodemask_t bm;
1698
1699	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1700	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1701
1702	if (nmask) {
1703		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1704			return -EFAULT;
1705		nm = compat_alloc_user_space(alloc_size);
1706		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1707			return -EFAULT;
1708	}
1709
1710	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1711}
1712
1713COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1714		       compat_ulong_t, maxnode,
1715		       const compat_ulong_t __user *, old_nodes,
1716		       const compat_ulong_t __user *, new_nodes)
1717{
1718	unsigned long __user *old = NULL;
1719	unsigned long __user *new = NULL;
1720	nodemask_t tmp_mask;
1721	unsigned long nr_bits;
1722	unsigned long size;
1723
1724	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1725	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1726	if (old_nodes) {
1727		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1728			return -EFAULT;
1729		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1730		if (new_nodes)
1731			new = old + size / sizeof(unsigned long);
1732		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1733			return -EFAULT;
1734	}
1735	if (new_nodes) {
1736		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1737			return -EFAULT;
1738		if (new == NULL)
1739			new = compat_alloc_user_space(size);
1740		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1741			return -EFAULT;
1742	}
1743	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1744}
1745
1746#endif /* CONFIG_COMPAT */
1747
1748bool vma_migratable(struct vm_area_struct *vma)
1749{
1750	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1751		return false;
1752
1753	/*
1754	 * DAX device mappings require predictable access latency, so avoid
1755	 * incurring periodic faults.
1756	 */
1757	if (vma_is_dax(vma))
1758		return false;
1759
1760	if (is_vm_hugetlb_page(vma) &&
1761		!hugepage_migration_supported(hstate_vma(vma)))
1762		return false;
1763
1764	/*
1765	 * Migration allocates pages in the highest zone. If we cannot
1766	 * do so then migration (at least from node to node) is not
1767	 * possible.
1768	 */
1769	if (vma->vm_file &&
1770		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1771			< policy_zone)
1772		return false;
1773	return true;
1774}
1775
1776struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1777						unsigned long addr)
1778{
1779	struct mempolicy *pol = NULL;
1780
1781	if (vma) {
1782		if (vma->vm_ops && vma->vm_ops->get_policy) {
1783			pol = vma->vm_ops->get_policy(vma, addr);
1784		} else if (vma->vm_policy) {
1785			pol = vma->vm_policy;
1786
1787			/*
1788			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1789			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1790			 * count on these policies which will be dropped by
1791			 * mpol_cond_put() later
1792			 */
1793			if (mpol_needs_cond_ref(pol))
1794				mpol_get(pol);
1795		}
1796	}
1797
1798	return pol;
1799}
1800
1801/*
1802 * get_vma_policy(@vma, @addr)
1803 * @vma: virtual memory area whose policy is sought
1804 * @addr: address in @vma for shared policy lookup
 
1805 *
1806 * Returns effective policy for a VMA at specified address.
1807 * Falls back to current->mempolicy or system default policy, as necessary.
 
 
 
1808 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1809 * count--added by the get_policy() vm_op, as appropriate--to protect against
1810 * freeing by another task.  It is the caller's responsibility to free the
1811 * extra reference for shared policies.
1812 */
1813static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1814						unsigned long addr)
1815{
1816	struct mempolicy *pol = __get_vma_policy(vma, addr);
1817
1818	if (!pol)
1819		pol = get_task_policy(current);
1820
1821	return pol;
1822}
1823
1824bool vma_policy_mof(struct vm_area_struct *vma)
1825{
1826	struct mempolicy *pol;
1827
1828	if (vma->vm_ops && vma->vm_ops->get_policy) {
1829		bool ret = false;
1830
1831		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1832		if (pol && (pol->flags & MPOL_F_MOF))
1833			ret = true;
1834		mpol_cond_put(pol);
1835
1836		return ret;
 
 
 
 
 
 
 
1837	}
1838
1839	pol = vma->vm_policy;
1840	if (!pol)
1841		pol = get_task_policy(current);
1842
1843	return pol->flags & MPOL_F_MOF;
1844}
1845
1846static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1847{
1848	enum zone_type dynamic_policy_zone = policy_zone;
1849
1850	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1851
1852	/*
1853	 * if policy->nodes has movable memory only,
1854	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1855	 *
1856	 * policy->nodes is intersect with node_states[N_MEMORY].
1857	 * so if the following test fails, it implies
1858	 * policy->nodes has movable memory only.
1859	 */
1860	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1861		dynamic_policy_zone = ZONE_MOVABLE;
1862
1863	return zone >= dynamic_policy_zone;
1864}
1865
1866/*
1867 * Return a nodemask representing a mempolicy for filtering nodes for
1868 * page allocation
1869 */
1870nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1871{
1872	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1873	if (unlikely(policy->mode == MPOL_BIND) &&
1874			apply_policy_zone(policy, gfp_zone(gfp)) &&
1875			cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1876		return &policy->nodes;
1877
1878	return NULL;
1879}
1880
1881/* Return the node id preferred by the given mempolicy, or the given id */
1882static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
 
1883{
1884	if (policy->mode == MPOL_PREFERRED) {
1885		nd = first_node(policy->nodes);
1886	} else {
 
 
 
1887		/*
1888		 * __GFP_THISNODE shouldn't even be used with the bind policy
1889		 * because we might easily break the expectation to stay on the
1890		 * requested node and not break the policy.
 
1891		 */
1892		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
 
 
 
 
 
1893	}
1894
1895	return nd;
1896}
1897
1898/* Do dynamic interleaving for a process */
1899static unsigned interleave_nodes(struct mempolicy *policy)
1900{
1901	unsigned next;
1902	struct task_struct *me = current;
1903
1904	next = next_node_in(me->il_prev, policy->nodes);
 
 
 
1905	if (next < MAX_NUMNODES)
1906		me->il_prev = next;
1907	return next;
1908}
1909
1910/*
1911 * Depending on the memory policy provide a node from which to allocate the
1912 * next slab entry.
 
 
 
 
1913 */
1914unsigned int mempolicy_slab_node(void)
1915{
1916	struct mempolicy *policy;
1917	int node = numa_mem_id();
1918
1919	if (in_interrupt())
1920		return node;
1921
1922	policy = current->mempolicy;
1923	if (!policy)
1924		return node;
1925
1926	switch (policy->mode) {
1927	case MPOL_PREFERRED:
1928		return first_node(policy->nodes);
 
 
 
1929
1930	case MPOL_INTERLEAVE:
1931		return interleave_nodes(policy);
1932
1933	case MPOL_BIND: {
1934		struct zoneref *z;
1935
1936		/*
1937		 * Follow bind policy behavior and start allocation at the
1938		 * first node.
1939		 */
1940		struct zonelist *zonelist;
 
1941		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1942		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1943		z = first_zones_zonelist(zonelist, highest_zoneidx,
1944							&policy->nodes);
1945		return z->zone ? zone_to_nid(z->zone) : node;
 
1946	}
1947	case MPOL_LOCAL:
1948		return node;
1949
1950	default:
1951		BUG();
1952	}
1953}
1954
1955/*
1956 * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1957 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1958 * number of present nodes.
1959 */
1960static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1961{
1962	nodemask_t nodemask = pol->nodes;
1963	unsigned int target, nnodes;
1964	int i;
1965	int nid;
1966	/*
1967	 * The barrier will stabilize the nodemask in a register or on
1968	 * the stack so that it will stop changing under the code.
1969	 *
1970	 * Between first_node() and next_node(), pol->nodes could be changed
1971	 * by other threads. So we put pol->nodes in a local stack.
1972	 */
1973	barrier();
1974
1975	nnodes = nodes_weight(nodemask);
1976	if (!nnodes)
1977		return numa_node_id();
1978	target = (unsigned int)n % nnodes;
1979	nid = first_node(nodemask);
1980	for (i = 0; i < target; i++)
1981		nid = next_node(nid, nodemask);
 
 
1982	return nid;
1983}
1984
1985/* Determine a node number for interleave */
1986static inline unsigned interleave_nid(struct mempolicy *pol,
1987		 struct vm_area_struct *vma, unsigned long addr, int shift)
1988{
1989	if (vma) {
1990		unsigned long off;
1991
1992		/*
1993		 * for small pages, there is no difference between
1994		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1995		 * for huge pages, since vm_pgoff is in units of small
1996		 * pages, we need to shift off the always 0 bits to get
1997		 * a useful offset.
1998		 */
1999		BUG_ON(shift < PAGE_SHIFT);
2000		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
2001		off += (addr - vma->vm_start) >> shift;
2002		return offset_il_node(pol, off);
2003	} else
2004		return interleave_nodes(pol);
2005}
2006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2007#ifdef CONFIG_HUGETLBFS
2008/*
2009 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2010 * @vma: virtual memory area whose policy is sought
2011 * @addr: address in @vma for shared policy lookup and interleave policy
2012 * @gfp_flags: for requested zone
2013 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2014 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2015 *
2016 * Returns a nid suitable for a huge page allocation and a pointer
2017 * to the struct mempolicy for conditional unref after allocation.
2018 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2019 * @nodemask for filtering the zonelist.
2020 *
2021 * Must be protected by read_mems_allowed_begin()
2022 */
2023int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2024				struct mempolicy **mpol, nodemask_t **nodemask)
 
2025{
2026	int nid;
2027
2028	*mpol = get_vma_policy(vma, addr);
2029	*nodemask = NULL;	/* assume !MPOL_BIND */
2030
2031	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
2032		nid = interleave_nid(*mpol, vma, addr,
2033					huge_page_shift(hstate_vma(vma)));
2034	} else {
2035		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2036		if ((*mpol)->mode == MPOL_BIND)
2037			*nodemask = &(*mpol)->nodes;
2038	}
2039	return nid;
2040}
2041
2042/*
2043 * init_nodemask_of_mempolicy
2044 *
2045 * If the current task's mempolicy is "default" [NULL], return 'false'
2046 * to indicate default policy.  Otherwise, extract the policy nodemask
2047 * for 'bind' or 'interleave' policy into the argument nodemask, or
2048 * initialize the argument nodemask to contain the single node for
2049 * 'preferred' or 'local' policy and return 'true' to indicate presence
2050 * of non-default mempolicy.
2051 *
2052 * We don't bother with reference counting the mempolicy [mpol_get/put]
2053 * because the current task is examining it's own mempolicy and a task's
2054 * mempolicy is only ever changed by the task itself.
2055 *
2056 * N.B., it is the caller's responsibility to free a returned nodemask.
2057 */
2058bool init_nodemask_of_mempolicy(nodemask_t *mask)
2059{
2060	struct mempolicy *mempolicy;
 
2061
2062	if (!(mask && current->mempolicy))
2063		return false;
2064
2065	task_lock(current);
2066	mempolicy = current->mempolicy;
2067	switch (mempolicy->mode) {
2068	case MPOL_PREFERRED:
2069	case MPOL_BIND:
2070	case MPOL_INTERLEAVE:
2071		*mask = mempolicy->nodes;
 
 
2072		break;
2073
2074	case MPOL_LOCAL:
2075		init_nodemask_of_node(mask, numa_node_id());
 
 
2076		break;
2077
2078	default:
2079		BUG();
2080	}
2081	task_unlock(current);
2082
2083	return true;
2084}
2085#endif
2086
2087/*
2088 * mempolicy_in_oom_domain
2089 *
2090 * If tsk's mempolicy is "bind", check for intersection between mask and
2091 * the policy nodemask. Otherwise, return true for all other policies
2092 * including "interleave", as a tsk with "interleave" policy may have
2093 * memory allocated from all nodes in system.
2094 *
2095 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2096 */
2097bool mempolicy_in_oom_domain(struct task_struct *tsk,
2098					const nodemask_t *mask)
2099{
2100	struct mempolicy *mempolicy;
2101	bool ret = true;
2102
2103	if (!mask)
2104		return ret;
2105
2106	task_lock(tsk);
2107	mempolicy = tsk->mempolicy;
2108	if (mempolicy && mempolicy->mode == MPOL_BIND)
2109		ret = nodes_intersects(mempolicy->nodes, *mask);
2110	task_unlock(tsk);
2111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112	return ret;
2113}
2114
2115/* Allocate a page in interleaved policy.
2116   Own path because it needs to do special accounting. */
2117static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2118					unsigned nid)
2119{
 
2120	struct page *page;
2121
2122	page = __alloc_pages(gfp, order, nid, NULL);
2123	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2124	if (!static_branch_likely(&vm_numa_stat_key))
2125		return page;
2126	if (page && page_to_nid(page) == nid) {
2127		preempt_disable();
2128		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2129		preempt_enable();
2130	}
2131	return page;
2132}
2133
2134/**
2135 * alloc_pages_vma - Allocate a page for a VMA.
2136 * @gfp: GFP flags.
2137 * @order: Order of the GFP allocation.
2138 * @vma: Pointer to VMA or NULL if not available.
2139 * @addr: Virtual address of the allocation.  Must be inside @vma.
2140 * @node: Which node to prefer for allocation (modulo policy).
2141 * @hugepage: For hugepages try only the preferred node if possible.
2142 *
2143 * Allocate a page for a specific address in @vma, using the appropriate
2144 * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2145 * of the mm_struct of the VMA to prevent it from going away.  Should be
2146 * used for all allocations for pages that will be mapped into user space.
2147 *
2148 * Return: The page on success or NULL if allocation fails.
2149 */
2150struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2151		unsigned long addr, int node, bool hugepage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2152{
2153	struct mempolicy *pol;
 
2154	struct page *page;
2155	int preferred_nid;
2156	nodemask_t *nmask;
2157
2158	pol = get_vma_policy(vma, addr);
 
 
2159
2160	if (pol->mode == MPOL_INTERLEAVE) {
2161		unsigned nid;
2162
2163		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2164		mpol_cond_put(pol);
2165		page = alloc_page_interleave(gfp, order, nid);
2166		goto out;
2167	}
2168
2169	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2170		int hpage_node = node;
2171
 
 
 
 
2172		/*
2173		 * For hugepage allocation and non-interleave policy which
2174		 * allows the current node (or other explicitly preferred
2175		 * node) we only try to allocate from the current/preferred
2176		 * node and don't fall back to other nodes, as the cost of
2177		 * remote accesses would likely offset THP benefits.
2178		 *
2179		 * If the policy is interleave, or does not allow the current
2180		 * node in its nodemask, we allocate the standard way.
2181		 */
2182		if (pol->mode == MPOL_PREFERRED)
2183			hpage_node = first_node(pol->nodes);
2184
2185		nmask = policy_nodemask(gfp, pol);
2186		if (!nmask || node_isset(hpage_node, *nmask)) {
2187			mpol_cond_put(pol);
2188			/*
2189			 * First, try to allocate THP only on local node, but
2190			 * don't reclaim unnecessarily, just compact.
2191			 */
2192			page = __alloc_pages_node(hpage_node,
2193				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2194
2195			/*
2196			 * If hugepage allocations are configured to always
2197			 * synchronous compact or the vma has been madvised
2198			 * to prefer hugepage backing, retry allowing remote
2199			 * memory with both reclaim and compact as well.
2200			 */
2201			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2202				page = __alloc_pages_node(hpage_node,
2203								gfp, order);
2204
2205			goto out;
2206		}
2207	}
2208
2209	nmask = policy_nodemask(gfp, pol);
2210	preferred_nid = policy_node(gfp, pol, node);
2211	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2212	mpol_cond_put(pol);
2213out:
 
2214	return page;
2215}
2216EXPORT_SYMBOL(alloc_pages_vma);
2217
2218/**
2219 * alloc_pages - Allocate pages.
2220 * @gfp: GFP flags.
2221 * @order: Power of two of number of pages to allocate.
2222 *
2223 * Allocate 1 << @order contiguous pages.  The physical address of the
2224 * first page is naturally aligned (eg an order-3 allocation will be aligned
2225 * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2226 * process is honoured when in process context.
2227 *
2228 * Context: Can be called from any context, providing the appropriate GFP
2229 * flags are used.
2230 * Return: The page on success or NULL if allocation fails.
 
 
 
 
 
2231 */
2232struct page *alloc_pages(gfp_t gfp, unsigned order)
2233{
2234	struct mempolicy *pol = &default_policy;
2235	struct page *page;
 
 
 
 
2236
2237	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2238		pol = get_task_policy(current);
2239
2240	/*
2241	 * No reference counting needed for current->mempolicy
2242	 * nor system default_policy
2243	 */
2244	if (pol->mode == MPOL_INTERLEAVE)
2245		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2246	else
2247		page = __alloc_pages(gfp, order,
2248				policy_node(gfp, pol, numa_node_id()),
2249				policy_nodemask(gfp, pol));
2250
2251	return page;
2252}
2253EXPORT_SYMBOL(alloc_pages);
2254
2255int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2256{
2257	struct mempolicy *pol = mpol_dup(vma_policy(src));
2258
2259	if (IS_ERR(pol))
2260		return PTR_ERR(pol);
2261	dst->vm_policy = pol;
2262	return 0;
2263}
 
2264
2265/*
2266 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2267 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2268 * with the mems_allowed returned by cpuset_mems_allowed().  This
2269 * keeps mempolicies cpuset relative after its cpuset moves.  See
2270 * further kernel/cpuset.c update_nodemask().
2271 *
2272 * current's mempolicy may be rebinded by the other task(the task that changes
2273 * cpuset's mems), so we needn't do rebind work for current task.
2274 */
2275
2276/* Slow path of a mempolicy duplicate */
2277struct mempolicy *__mpol_dup(struct mempolicy *old)
2278{
2279	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2280
2281	if (!new)
2282		return ERR_PTR(-ENOMEM);
2283
2284	/* task's mempolicy is protected by alloc_lock */
2285	if (old == current->mempolicy) {
2286		task_lock(current);
2287		*new = *old;
2288		task_unlock(current);
2289	} else
2290		*new = *old;
2291
 
2292	if (current_cpuset_is_being_rebound()) {
2293		nodemask_t mems = cpuset_mems_allowed(current);
2294		mpol_rebind_policy(new, &mems);
 
 
 
2295	}
 
2296	atomic_set(&new->refcnt, 1);
2297	return new;
2298}
2299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2300/* Slow path of a mempolicy comparison */
2301bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2302{
2303	if (!a || !b)
2304		return false;
2305	if (a->mode != b->mode)
2306		return false;
2307	if (a->flags != b->flags)
2308		return false;
2309	if (mpol_store_user_nodemask(a))
2310		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2311			return false;
2312
2313	switch (a->mode) {
2314	case MPOL_BIND:
 
2315	case MPOL_INTERLEAVE:
 
2316	case MPOL_PREFERRED:
2317		return !!nodes_equal(a->nodes, b->nodes);
2318	case MPOL_LOCAL:
2319		return true;
2320	default:
2321		BUG();
2322		return false;
2323	}
2324}
2325
2326/*
2327 * Shared memory backing store policy support.
2328 *
2329 * Remember policies even when nobody has shared memory mapped.
2330 * The policies are kept in Red-Black tree linked from the inode.
2331 * They are protected by the sp->lock rwlock, which should be held
2332 * for any accesses to the tree.
2333 */
2334
2335/*
2336 * lookup first element intersecting start-end.  Caller holds sp->lock for
2337 * reading or for writing
2338 */
2339static struct sp_node *
2340sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2341{
2342	struct rb_node *n = sp->root.rb_node;
2343
2344	while (n) {
2345		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2346
2347		if (start >= p->end)
2348			n = n->rb_right;
2349		else if (end <= p->start)
2350			n = n->rb_left;
2351		else
2352			break;
2353	}
2354	if (!n)
2355		return NULL;
2356	for (;;) {
2357		struct sp_node *w = NULL;
2358		struct rb_node *prev = rb_prev(n);
2359		if (!prev)
2360			break;
2361		w = rb_entry(prev, struct sp_node, nd);
2362		if (w->end <= start)
2363			break;
2364		n = prev;
2365	}
2366	return rb_entry(n, struct sp_node, nd);
2367}
2368
2369/*
2370 * Insert a new shared policy into the list.  Caller holds sp->lock for
2371 * writing.
2372 */
2373static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2374{
2375	struct rb_node **p = &sp->root.rb_node;
2376	struct rb_node *parent = NULL;
2377	struct sp_node *nd;
2378
2379	while (*p) {
2380		parent = *p;
2381		nd = rb_entry(parent, struct sp_node, nd);
2382		if (new->start < nd->start)
2383			p = &(*p)->rb_left;
2384		else if (new->end > nd->end)
2385			p = &(*p)->rb_right;
2386		else
2387			BUG();
2388	}
2389	rb_link_node(&new->nd, parent, p);
2390	rb_insert_color(&new->nd, &sp->root);
2391	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2392		 new->policy ? new->policy->mode : 0);
2393}
2394
2395/* Find shared policy intersecting idx */
2396struct mempolicy *
2397mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2398{
2399	struct mempolicy *pol = NULL;
2400	struct sp_node *sn;
2401
2402	if (!sp->root.rb_node)
2403		return NULL;
2404	read_lock(&sp->lock);
2405	sn = sp_lookup(sp, idx, idx+1);
2406	if (sn) {
2407		mpol_get(sn->policy);
2408		pol = sn->policy;
2409	}
2410	read_unlock(&sp->lock);
2411	return pol;
2412}
2413
2414static void sp_free(struct sp_node *n)
2415{
2416	mpol_put(n->policy);
2417	kmem_cache_free(sn_cache, n);
2418}
2419
2420/**
2421 * mpol_misplaced - check whether current page node is valid in policy
2422 *
2423 * @page: page to be checked
2424 * @vma: vm area where page mapped
2425 * @addr: virtual address where page mapped
2426 *
2427 * Lookup current policy node id for vma,addr and "compare to" page's
2428 * node id.  Policy determination "mimics" alloc_page_vma().
2429 * Called from fault path where we know the vma and faulting address.
2430 *
2431 * Return: -1 if the page is in a node that is valid for this policy, or a
2432 * suitable node ID to allocate a replacement page from.
2433 */
2434int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2435{
2436	struct mempolicy *pol;
2437	struct zoneref *z;
2438	int curnid = page_to_nid(page);
2439	unsigned long pgoff;
2440	int thiscpu = raw_smp_processor_id();
2441	int thisnid = cpu_to_node(thiscpu);
2442	int polnid = NUMA_NO_NODE;
2443	int ret = -1;
2444
2445	pol = get_vma_policy(vma, addr);
2446	if (!(pol->flags & MPOL_F_MOF))
2447		goto out;
2448
2449	switch (pol->mode) {
2450	case MPOL_INTERLEAVE:
2451		pgoff = vma->vm_pgoff;
2452		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2453		polnid = offset_il_node(pol, pgoff);
2454		break;
2455
2456	case MPOL_PREFERRED:
2457		polnid = first_node(pol->nodes);
2458		break;
2459
2460	case MPOL_LOCAL:
2461		polnid = numa_node_id();
2462		break;
2463
2464	case MPOL_BIND:
2465		/* Optimize placement among multiple nodes via NUMA balancing */
2466		if (pol->flags & MPOL_F_MORON) {
2467			if (node_isset(thisnid, pol->nodes))
2468				break;
2469			goto out;
2470		}
2471
2472		/*
2473		 * allows binding to multiple nodes.
2474		 * use current page if in policy nodemask,
2475		 * else select nearest allowed node, if any.
2476		 * If no allowed nodes, use current [!misplaced].
2477		 */
2478		if (node_isset(curnid, pol->nodes))
2479			goto out;
2480		z = first_zones_zonelist(
2481				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2482				gfp_zone(GFP_HIGHUSER),
2483				&pol->nodes);
2484		polnid = zone_to_nid(z->zone);
2485		break;
2486
2487	default:
2488		BUG();
2489	}
2490
2491	/* Migrate the page towards the node whose CPU is referencing it */
2492	if (pol->flags & MPOL_F_MORON) {
2493		polnid = thisnid;
2494
2495		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2496			goto out;
2497	}
2498
2499	if (curnid != polnid)
2500		ret = polnid;
2501out:
2502	mpol_cond_put(pol);
2503
2504	return ret;
2505}
2506
2507/*
2508 * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2509 * dropped after task->mempolicy is set to NULL so that any allocation done as
2510 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2511 * policy.
2512 */
2513void mpol_put_task_policy(struct task_struct *task)
2514{
2515	struct mempolicy *pol;
2516
2517	task_lock(task);
2518	pol = task->mempolicy;
2519	task->mempolicy = NULL;
2520	task_unlock(task);
2521	mpol_put(pol);
2522}
2523
2524static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2525{
2526	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2527	rb_erase(&n->nd, &sp->root);
2528	sp_free(n);
2529}
2530
2531static void sp_node_init(struct sp_node *node, unsigned long start,
2532			unsigned long end, struct mempolicy *pol)
2533{
2534	node->start = start;
2535	node->end = end;
2536	node->policy = pol;
2537}
2538
2539static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2540				struct mempolicy *pol)
2541{
2542	struct sp_node *n;
2543	struct mempolicy *newpol;
2544
2545	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2546	if (!n)
2547		return NULL;
2548
2549	newpol = mpol_dup(pol);
2550	if (IS_ERR(newpol)) {
2551		kmem_cache_free(sn_cache, n);
2552		return NULL;
2553	}
2554	newpol->flags |= MPOL_F_SHARED;
2555	sp_node_init(n, start, end, newpol);
2556
2557	return n;
2558}
2559
2560/* Replace a policy range. */
2561static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2562				 unsigned long end, struct sp_node *new)
2563{
2564	struct sp_node *n;
2565	struct sp_node *n_new = NULL;
2566	struct mempolicy *mpol_new = NULL;
2567	int ret = 0;
2568
2569restart:
2570	write_lock(&sp->lock);
2571	n = sp_lookup(sp, start, end);
2572	/* Take care of old policies in the same range. */
2573	while (n && n->start < end) {
2574		struct rb_node *next = rb_next(&n->nd);
2575		if (n->start >= start) {
2576			if (n->end <= end)
2577				sp_delete(sp, n);
2578			else
2579				n->start = end;
2580		} else {
2581			/* Old policy spanning whole new range. */
2582			if (n->end > end) {
2583				if (!n_new)
2584					goto alloc_new;
2585
2586				*mpol_new = *n->policy;
2587				atomic_set(&mpol_new->refcnt, 1);
2588				sp_node_init(n_new, end, n->end, mpol_new);
 
2589				n->end = start;
2590				sp_insert(sp, n_new);
2591				n_new = NULL;
2592				mpol_new = NULL;
2593				break;
2594			} else
2595				n->end = start;
2596		}
2597		if (!next)
2598			break;
2599		n = rb_entry(next, struct sp_node, nd);
2600	}
2601	if (new)
2602		sp_insert(sp, new);
2603	write_unlock(&sp->lock);
2604	ret = 0;
2605
2606err_out:
2607	if (mpol_new)
2608		mpol_put(mpol_new);
2609	if (n_new)
2610		kmem_cache_free(sn_cache, n_new);
2611
2612	return ret;
2613
2614alloc_new:
2615	write_unlock(&sp->lock);
2616	ret = -ENOMEM;
2617	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2618	if (!n_new)
2619		goto err_out;
2620	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2621	if (!mpol_new)
2622		goto err_out;
2623	goto restart;
2624}
2625
2626/**
2627 * mpol_shared_policy_init - initialize shared policy for inode
2628 * @sp: pointer to inode shared policy
2629 * @mpol:  struct mempolicy to install
2630 *
2631 * Install non-NULL @mpol in inode's shared policy rb-tree.
2632 * On entry, the current task has a reference on a non-NULL @mpol.
2633 * This must be released on exit.
2634 * This is called at get_inode() calls and we can use GFP_KERNEL.
2635 */
2636void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2637{
2638	int ret;
2639
2640	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2641	rwlock_init(&sp->lock);
2642
2643	if (mpol) {
2644		struct vm_area_struct pvma;
2645		struct mempolicy *new;
2646		NODEMASK_SCRATCH(scratch);
2647
2648		if (!scratch)
2649			goto put_mpol;
2650		/* contextualize the tmpfs mount point mempolicy */
2651		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2652		if (IS_ERR(new))
2653			goto free_scratch; /* no valid nodemask intersection */
2654
2655		task_lock(current);
2656		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2657		task_unlock(current);
2658		if (ret)
2659			goto put_new;
2660
2661		/* Create pseudo-vma that contains just the policy */
2662		vma_init(&pvma, NULL);
2663		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2664		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2665
2666put_new:
2667		mpol_put(new);			/* drop initial ref */
2668free_scratch:
2669		NODEMASK_SCRATCH_FREE(scratch);
2670put_mpol:
2671		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2672	}
2673}
2674
2675int mpol_set_shared_policy(struct shared_policy *info,
2676			struct vm_area_struct *vma, struct mempolicy *npol)
2677{
2678	int err;
2679	struct sp_node *new = NULL;
2680	unsigned long sz = vma_pages(vma);
2681
2682	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2683		 vma->vm_pgoff,
2684		 sz, npol ? npol->mode : -1,
2685		 npol ? npol->flags : -1,
2686		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2687
2688	if (npol) {
2689		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2690		if (!new)
2691			return -ENOMEM;
2692	}
2693	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2694	if (err && new)
2695		sp_free(new);
2696	return err;
2697}
2698
2699/* Free a backing policy store on inode delete. */
2700void mpol_free_shared_policy(struct shared_policy *p)
2701{
2702	struct sp_node *n;
2703	struct rb_node *next;
2704
2705	if (!p->root.rb_node)
2706		return;
2707	write_lock(&p->lock);
2708	next = rb_first(&p->root);
2709	while (next) {
2710		n = rb_entry(next, struct sp_node, nd);
2711		next = rb_next(&n->nd);
2712		sp_delete(p, n);
2713	}
2714	write_unlock(&p->lock);
2715}
2716
2717#ifdef CONFIG_NUMA_BALANCING
2718static int __initdata numabalancing_override;
2719
2720static void __init check_numabalancing_enable(void)
2721{
2722	bool numabalancing_default = false;
2723
2724	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2725		numabalancing_default = true;
2726
2727	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2728	if (numabalancing_override)
2729		set_numabalancing_state(numabalancing_override == 1);
2730
2731	if (num_online_nodes() > 1 && !numabalancing_override) {
2732		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2733			numabalancing_default ? "Enabling" : "Disabling");
2734		set_numabalancing_state(numabalancing_default);
2735	}
2736}
2737
2738static int __init setup_numabalancing(char *str)
2739{
2740	int ret = 0;
2741	if (!str)
2742		goto out;
2743
2744	if (!strcmp(str, "enable")) {
2745		numabalancing_override = 1;
2746		ret = 1;
2747	} else if (!strcmp(str, "disable")) {
2748		numabalancing_override = -1;
2749		ret = 1;
2750	}
2751out:
2752	if (!ret)
2753		pr_warn("Unable to parse numa_balancing=\n");
2754
2755	return ret;
2756}
2757__setup("numa_balancing=", setup_numabalancing);
2758#else
2759static inline void __init check_numabalancing_enable(void)
2760{
2761}
2762#endif /* CONFIG_NUMA_BALANCING */
2763
2764/* assumes fs == KERNEL_DS */
2765void __init numa_policy_init(void)
2766{
2767	nodemask_t interleave_nodes;
2768	unsigned long largest = 0;
2769	int nid, prefer = 0;
2770
2771	policy_cache = kmem_cache_create("numa_policy",
2772					 sizeof(struct mempolicy),
2773					 0, SLAB_PANIC, NULL);
2774
2775	sn_cache = kmem_cache_create("shared_policy_node",
2776				     sizeof(struct sp_node),
2777				     0, SLAB_PANIC, NULL);
2778
2779	for_each_node(nid) {
2780		preferred_node_policy[nid] = (struct mempolicy) {
2781			.refcnt = ATOMIC_INIT(1),
2782			.mode = MPOL_PREFERRED,
2783			.flags = MPOL_F_MOF | MPOL_F_MORON,
2784			.nodes = nodemask_of_node(nid),
2785		};
2786	}
2787
2788	/*
2789	 * Set interleaving policy for system init. Interleaving is only
2790	 * enabled across suitably sized nodes (default is >= 16MB), or
2791	 * fall back to the largest node if they're all smaller.
2792	 */
2793	nodes_clear(interleave_nodes);
2794	for_each_node_state(nid, N_MEMORY) {
2795		unsigned long total_pages = node_present_pages(nid);
2796
2797		/* Preserve the largest node */
2798		if (largest < total_pages) {
2799			largest = total_pages;
2800			prefer = nid;
2801		}
2802
2803		/* Interleave this node? */
2804		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2805			node_set(nid, interleave_nodes);
2806	}
2807
2808	/* All too small, use the largest */
2809	if (unlikely(nodes_empty(interleave_nodes)))
2810		node_set(prefer, interleave_nodes);
2811
2812	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2813		pr_err("%s: interleaving failed\n", __func__);
2814
2815	check_numabalancing_enable();
2816}
2817
2818/* Reset policy of current process to default */
2819void numa_default_policy(void)
2820{
2821	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2822}
2823
2824/*
2825 * Parse and format mempolicy from/to strings
2826 */
2827
 
 
 
 
 
2828static const char * const policy_modes[] =
2829{
2830	[MPOL_DEFAULT]    = "default",
2831	[MPOL_PREFERRED]  = "prefer",
2832	[MPOL_BIND]       = "bind",
2833	[MPOL_INTERLEAVE] = "interleave",
2834	[MPOL_LOCAL]      = "local",
2835};
2836
2837
2838#ifdef CONFIG_TMPFS
2839/**
2840 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2841 * @str:  string containing mempolicy to parse
2842 * @mpol:  pointer to struct mempolicy pointer, returned on success.
 
2843 *
2844 * Format of input:
2845 *	<mode>[=<flags>][:<nodelist>]
2846 *
 
 
 
 
 
 
 
2847 * On success, returns 0, else 1
2848 */
2849int mpol_parse_str(char *str, struct mempolicy **mpol)
2850{
2851	struct mempolicy *new = NULL;
2852	unsigned short mode_flags;
 
2853	nodemask_t nodes;
2854	char *nodelist = strchr(str, ':');
2855	char *flags = strchr(str, '=');
2856	int err = 1, mode;
2857
2858	if (flags)
2859		*flags++ = '\0';	/* terminate mode string */
2860
2861	if (nodelist) {
2862		/* NUL-terminate mode or flags string */
2863		*nodelist++ = '\0';
2864		if (nodelist_parse(nodelist, nodes))
2865			goto out;
2866		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2867			goto out;
2868	} else
2869		nodes_clear(nodes);
2870
2871	mode = match_string(policy_modes, MPOL_MAX, str);
2872	if (mode < 0)
 
 
 
 
 
 
 
2873		goto out;
2874
2875	switch (mode) {
2876	case MPOL_PREFERRED:
2877		/*
2878		 * Insist on a nodelist of one node only, although later
2879		 * we use first_node(nodes) to grab a single node, so here
2880		 * nodelist (or nodes) cannot be empty.
2881		 */
2882		if (nodelist) {
2883			char *rest = nodelist;
2884			while (isdigit(*rest))
2885				rest++;
2886			if (*rest)
2887				goto out;
2888			if (nodes_empty(nodes))
2889				goto out;
2890		}
2891		break;
2892	case MPOL_INTERLEAVE:
2893		/*
2894		 * Default to online nodes with memory if no nodelist
2895		 */
2896		if (!nodelist)
2897			nodes = node_states[N_MEMORY];
2898		break;
2899	case MPOL_LOCAL:
2900		/*
2901		 * Don't allow a nodelist;  mpol_new() checks flags
2902		 */
2903		if (nodelist)
2904			goto out;
 
2905		break;
2906	case MPOL_DEFAULT:
2907		/*
2908		 * Insist on a empty nodelist
2909		 */
2910		if (!nodelist)
2911			err = 0;
2912		goto out;
2913	case MPOL_BIND:
2914		/*
2915		 * Insist on a nodelist
2916		 */
2917		if (!nodelist)
2918			goto out;
2919	}
2920
2921	mode_flags = 0;
2922	if (flags) {
2923		/*
2924		 * Currently, we only support two mutually exclusive
2925		 * mode flags.
2926		 */
2927		if (!strcmp(flags, "static"))
2928			mode_flags |= MPOL_F_STATIC_NODES;
2929		else if (!strcmp(flags, "relative"))
2930			mode_flags |= MPOL_F_RELATIVE_NODES;
2931		else
2932			goto out;
2933	}
2934
2935	new = mpol_new(mode, mode_flags, &nodes);
2936	if (IS_ERR(new))
2937		goto out;
2938
2939	/*
2940	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2941	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2942	 */
2943	if (mode != MPOL_PREFERRED) {
2944		new->nodes = nodes;
2945	} else if (nodelist) {
2946		nodes_clear(new->nodes);
2947		node_set(first_node(nodes), new->nodes);
2948	} else {
2949		new->mode = MPOL_LOCAL;
 
 
 
 
 
 
 
 
 
 
 
 
2950	}
2951
2952	/*
2953	 * Save nodes for contextualization: this will be used to "clone"
2954	 * the mempolicy in a specific context [cpuset] at a later time.
2955	 */
2956	new->w.user_nodemask = nodes;
2957
2958	err = 0;
2959
2960out:
2961	/* Restore string for error message */
2962	if (nodelist)
2963		*--nodelist = ':';
2964	if (flags)
2965		*--flags = '=';
2966	if (!err)
2967		*mpol = new;
2968	return err;
2969}
2970#endif /* CONFIG_TMPFS */
2971
2972/**
2973 * mpol_to_str - format a mempolicy structure for printing
2974 * @buffer:  to contain formatted mempolicy string
2975 * @maxlen:  length of @buffer
2976 * @pol:  pointer to mempolicy to be formatted
 
2977 *
2978 * Convert @pol into a string.  If @buffer is too short, truncate the string.
2979 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2980 * longest flag, "relative", and to display at least a few node ids.
2981 */
2982void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2983{
2984	char *p = buffer;
2985	nodemask_t nodes = NODE_MASK_NONE;
2986	unsigned short mode = MPOL_DEFAULT;
2987	unsigned short flags = 0;
 
 
 
 
 
 
2988
2989	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
 
 
2990		mode = pol->mode;
2991		flags = pol->flags;
2992	}
2993
2994	switch (mode) {
2995	case MPOL_DEFAULT:
2996	case MPOL_LOCAL:
2997		break;
 
2998	case MPOL_PREFERRED:
 
 
 
 
 
 
 
2999	case MPOL_BIND:
 
3000	case MPOL_INTERLEAVE:
3001		nodes = pol->nodes;
 
 
 
3002		break;
 
3003	default:
3004		WARN_ON_ONCE(1);
3005		snprintf(p, maxlen, "unknown");
3006		return;
3007	}
3008
3009	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
 
 
 
 
 
3010
3011	if (flags & MPOL_MODE_FLAGS) {
3012		p += snprintf(p, buffer + maxlen - p, "=");
 
 
3013
3014		/*
3015		 * Currently, the only defined flags are mutually exclusive
3016		 */
3017		if (flags & MPOL_F_STATIC_NODES)
3018			p += snprintf(p, buffer + maxlen - p, "static");
3019		else if (flags & MPOL_F_RELATIVE_NODES)
3020			p += snprintf(p, buffer + maxlen - p, "relative");
3021	}
3022
3023	if (!nodes_empty(nodes))
3024		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3025			       nodemask_pr_args(&nodes));
 
 
 
 
3026}
v3.5.6
 
   1/*
   2 * Simple NUMA memory policy for the Linux kernel.
   3 *
   4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   6 * Subject to the GNU Public License, version 2.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case node -1 here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *		  in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
 
 
  68#include <linux/mempolicy.h>
  69#include <linux/mm.h>
  70#include <linux/highmem.h>
  71#include <linux/hugetlb.h>
  72#include <linux/kernel.h>
  73#include <linux/sched.h>
 
 
 
  74#include <linux/nodemask.h>
  75#include <linux/cpuset.h>
  76#include <linux/slab.h>
  77#include <linux/string.h>
  78#include <linux/export.h>
  79#include <linux/nsproxy.h>
  80#include <linux/interrupt.h>
  81#include <linux/init.h>
  82#include <linux/compat.h>
 
  83#include <linux/swap.h>
  84#include <linux/seq_file.h>
  85#include <linux/proc_fs.h>
  86#include <linux/migrate.h>
  87#include <linux/ksm.h>
  88#include <linux/rmap.h>
  89#include <linux/security.h>
  90#include <linux/syscalls.h>
  91#include <linux/ctype.h>
  92#include <linux/mm_inline.h>
 
 
 
  93
  94#include <asm/tlbflush.h>
  95#include <asm/uaccess.h>
  96#include <linux/random.h>
  97
  98#include "internal.h"
  99
 100/* Internal flags */
 101#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
 102#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
 103
 104static struct kmem_cache *policy_cache;
 105static struct kmem_cache *sn_cache;
 106
 107/* Highest zone. An specific allocation for a zone below that is not
 108   policied. */
 109enum zone_type policy_zone = 0;
 110
 111/*
 112 * run-time system-wide default policy => local allocation
 113 */
 114static struct mempolicy default_policy = {
 115	.refcnt = ATOMIC_INIT(1), /* never free it */
 116	.mode = MPOL_PREFERRED,
 117	.flags = MPOL_F_LOCAL,
 118};
 119
 120static const struct mempolicy_operations {
 121	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 122	/*
 123	 * If read-side task has no lock to protect task->mempolicy, write-side
 124	 * task will rebind the task->mempolicy by two step. The first step is
 125	 * setting all the newly nodes, and the second step is cleaning all the
 126	 * disallowed nodes. In this way, we can avoid finding no node to alloc
 127	 * page.
 128	 * If we have a lock to protect task->mempolicy in read-side, we do
 129	 * rebind directly.
 130	 *
 131	 * step:
 132	 * 	MPOL_REBIND_ONCE - do rebind work at once
 133	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
 134	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
 135	 */
 136	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
 137			enum mpol_rebind_step step);
 138} mpol_ops[MPOL_MAX];
 139
 140/* Check that the nodemask contains at least one populated zone */
 141static int is_valid_nodemask(const nodemask_t *nodemask)
 
 
 
 
 
 142{
 143	int nd, k;
 144
 145	for_each_node_mask(nd, *nodemask) {
 146		struct zone *z;
 147
 148		for (k = 0; k <= policy_zone; k++) {
 149			z = &NODE_DATA(nd)->node_zones[k];
 150			if (z->present_pages > 0)
 151				return 1;
 
 
 152		}
 153	}
 154
 155	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156}
 157
 
 
 
 
 
 158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 159{
 160	return pol->flags & MPOL_MODE_FLAGS;
 161}
 162
 163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 164				   const nodemask_t *rel)
 165{
 166	nodemask_t tmp;
 167	nodes_fold(tmp, *orig, nodes_weight(*rel));
 168	nodes_onto(*ret, tmp, *rel);
 169}
 170
 171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 172{
 173	if (nodes_empty(*nodes))
 174		return -EINVAL;
 175	pol->v.nodes = *nodes;
 176	return 0;
 177}
 178
 179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 180{
 181	if (!nodes)
 182		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
 183	else if (nodes_empty(*nodes))
 184		return -EINVAL;			/*  no allowed nodes */
 185	else
 186		pol->v.preferred_node = first_node(*nodes);
 187	return 0;
 188}
 189
 190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 191{
 192	if (!is_valid_nodemask(nodes))
 193		return -EINVAL;
 194	pol->v.nodes = *nodes;
 195	return 0;
 196}
 197
 198/*
 199 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 200 * any, for the new policy.  mpol_new() has already validated the nodes
 201 * parameter with respect to the policy mode and flags.  But, we need to
 202 * handle an empty nodemask with MPOL_PREFERRED here.
 203 *
 204 * Must be called holding task's alloc_lock to protect task's mems_allowed
 205 * and mempolicy.  May also be called holding the mmap_semaphore for write.
 206 */
 207static int mpol_set_nodemask(struct mempolicy *pol,
 208		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 209{
 210	int ret;
 211
 212	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
 213	if (pol == NULL)
 
 
 
 
 214		return 0;
 215	/* Check N_HIGH_MEMORY */
 
 216	nodes_and(nsc->mask1,
 217		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
 218
 219	VM_BUG_ON(!nodes);
 220	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
 221		nodes = NULL;	/* explicit local allocation */
 222	else {
 223		if (pol->flags & MPOL_F_RELATIVE_NODES)
 224			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
 225		else
 226			nodes_and(nsc->mask2, *nodes, nsc->mask1);
 227
 228		if (mpol_store_user_nodemask(pol))
 229			pol->w.user_nodemask = *nodes;
 230		else
 231			pol->w.cpuset_mems_allowed =
 232						cpuset_current_mems_allowed;
 233	}
 234
 235	if (nodes)
 236		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 237	else
 238		ret = mpol_ops[pol->mode].create(pol, NULL);
 
 
 239	return ret;
 240}
 241
 242/*
 243 * This function just creates a new policy, does some check and simple
 244 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 245 */
 246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 247				  nodemask_t *nodes)
 248{
 249	struct mempolicy *policy;
 250
 251	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 252		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
 253
 254	if (mode == MPOL_DEFAULT) {
 255		if (nodes && !nodes_empty(*nodes))
 256			return ERR_PTR(-EINVAL);
 257		return NULL;	/* simply delete any existing policy */
 258	}
 259	VM_BUG_ON(!nodes);
 260
 261	/*
 262	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 263	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 264	 * All other modes require a valid pointer to a non-empty nodemask.
 265	 */
 266	if (mode == MPOL_PREFERRED) {
 267		if (nodes_empty(*nodes)) {
 268			if (((flags & MPOL_F_STATIC_NODES) ||
 269			     (flags & MPOL_F_RELATIVE_NODES)))
 270				return ERR_PTR(-EINVAL);
 
 
 271		}
 
 
 
 
 
 272	} else if (nodes_empty(*nodes))
 273		return ERR_PTR(-EINVAL);
 274	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 275	if (!policy)
 276		return ERR_PTR(-ENOMEM);
 277	atomic_set(&policy->refcnt, 1);
 278	policy->mode = mode;
 279	policy->flags = flags;
 280
 281	return policy;
 282}
 283
 284/* Slow path of a mpol destructor. */
 285void __mpol_put(struct mempolicy *p)
 286{
 287	if (!atomic_dec_and_test(&p->refcnt))
 288		return;
 289	kmem_cache_free(policy_cache, p);
 290}
 291
 292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
 293				enum mpol_rebind_step step)
 294{
 295}
 296
 297/*
 298 * step:
 299 * 	MPOL_REBIND_ONCE  - do rebind work at once
 300 * 	MPOL_REBIND_STEP1 - set all the newly nodes
 301 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
 302 */
 303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
 304				 enum mpol_rebind_step step)
 305{
 306	nodemask_t tmp;
 307
 308	if (pol->flags & MPOL_F_STATIC_NODES)
 309		nodes_and(tmp, pol->w.user_nodemask, *nodes);
 310	else if (pol->flags & MPOL_F_RELATIVE_NODES)
 311		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 312	else {
 313		/*
 314		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
 315		 * result
 316		 */
 317		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
 318			nodes_remap(tmp, pol->v.nodes,
 319					pol->w.cpuset_mems_allowed, *nodes);
 320			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
 321		} else if (step == MPOL_REBIND_STEP2) {
 322			tmp = pol->w.cpuset_mems_allowed;
 323			pol->w.cpuset_mems_allowed = *nodes;
 324		} else
 325			BUG();
 326	}
 327
 328	if (nodes_empty(tmp))
 329		tmp = *nodes;
 330
 331	if (step == MPOL_REBIND_STEP1)
 332		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
 333	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
 334		pol->v.nodes = tmp;
 335	else
 336		BUG();
 337
 338	if (!node_isset(current->il_next, tmp)) {
 339		current->il_next = next_node(current->il_next, tmp);
 340		if (current->il_next >= MAX_NUMNODES)
 341			current->il_next = first_node(tmp);
 342		if (current->il_next >= MAX_NUMNODES)
 343			current->il_next = numa_node_id();
 344	}
 345}
 346
 347static void mpol_rebind_preferred(struct mempolicy *pol,
 348				  const nodemask_t *nodes,
 349				  enum mpol_rebind_step step)
 350{
 351	nodemask_t tmp;
 352
 353	if (pol->flags & MPOL_F_STATIC_NODES) {
 354		int node = first_node(pol->w.user_nodemask);
 355
 356		if (node_isset(node, *nodes)) {
 357			pol->v.preferred_node = node;
 358			pol->flags &= ~MPOL_F_LOCAL;
 359		} else
 360			pol->flags |= MPOL_F_LOCAL;
 361	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
 362		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 363		pol->v.preferred_node = first_node(tmp);
 364	} else if (!(pol->flags & MPOL_F_LOCAL)) {
 365		pol->v.preferred_node = node_remap(pol->v.preferred_node,
 366						   pol->w.cpuset_mems_allowed,
 367						   *nodes);
 368		pol->w.cpuset_mems_allowed = *nodes;
 369	}
 370}
 371
 372/*
 373 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 374 *
 375 * If read-side task has no lock to protect task->mempolicy, write-side
 376 * task will rebind the task->mempolicy by two step. The first step is
 377 * setting all the newly nodes, and the second step is cleaning all the
 378 * disallowed nodes. In this way, we can avoid finding no node to alloc
 379 * page.
 380 * If we have a lock to protect task->mempolicy in read-side, we do
 381 * rebind directly.
 382 *
 383 * step:
 384 * 	MPOL_REBIND_ONCE  - do rebind work at once
 385 * 	MPOL_REBIND_STEP1 - set all the newly nodes
 386 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
 387 */
 388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
 389				enum mpol_rebind_step step)
 390{
 391	if (!pol)
 392		return;
 393	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
 394	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 395		return;
 396
 397	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
 398		return;
 399
 400	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
 401		BUG();
 402
 403	if (step == MPOL_REBIND_STEP1)
 404		pol->flags |= MPOL_F_REBINDING;
 405	else if (step == MPOL_REBIND_STEP2)
 406		pol->flags &= ~MPOL_F_REBINDING;
 407	else if (step >= MPOL_REBIND_NSTEP)
 408		BUG();
 409
 410	mpol_ops[pol->mode].rebind(pol, newmask, step);
 411}
 412
 413/*
 414 * Wrapper for mpol_rebind_policy() that just requires task
 415 * pointer, and updates task mempolicy.
 416 *
 417 * Called with task's alloc_lock held.
 418 */
 419
 420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
 421			enum mpol_rebind_step step)
 422{
 423	mpol_rebind_policy(tsk->mempolicy, new, step);
 424}
 425
 426/*
 427 * Rebind each vma in mm to new nodemask.
 428 *
 429 * Call holding a reference to mm.  Takes mm->mmap_sem during call.
 430 */
 431
 432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 433{
 434	struct vm_area_struct *vma;
 435
 436	down_write(&mm->mmap_sem);
 437	for (vma = mm->mmap; vma; vma = vma->vm_next)
 438		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
 439	up_write(&mm->mmap_sem);
 440}
 441
 442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 443	[MPOL_DEFAULT] = {
 444		.rebind = mpol_rebind_default,
 445	},
 446	[MPOL_INTERLEAVE] = {
 447		.create = mpol_new_interleave,
 448		.rebind = mpol_rebind_nodemask,
 449	},
 450	[MPOL_PREFERRED] = {
 451		.create = mpol_new_preferred,
 452		.rebind = mpol_rebind_preferred,
 453	},
 454	[MPOL_BIND] = {
 455		.create = mpol_new_bind,
 456		.rebind = mpol_rebind_nodemask,
 457	},
 
 
 
 458};
 459
 460static void migrate_page_add(struct page *page, struct list_head *pagelist,
 461				unsigned long flags);
 462
 463/* Scan through pages checking if pages follow certain conditions. */
 464static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 465		unsigned long addr, unsigned long end,
 466		const nodemask_t *nodes, unsigned long flags,
 467		void *private)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 468{
 469	pte_t *orig_pte;
 470	pte_t *pte;
 
 
 
 
 
 471	spinlock_t *ptl;
 472
 473	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 474	do {
 475		struct page *page;
 476		int nid;
 
 
 
 
 
 
 477
 
 
 478		if (!pte_present(*pte))
 479			continue;
 480		page = vm_normal_page(vma, addr, *pte);
 481		if (!page)
 482			continue;
 483		/*
 484		 * vm_normal_page() filters out zero pages, but there might
 485		 * still be PageReserved pages to skip, perhaps in a VDSO.
 486		 * And we cannot move PageKsm pages sensibly or safely yet.
 487		 */
 488		if (PageReserved(page) || PageKsm(page))
 489			continue;
 490		nid = page_to_nid(page);
 491		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
 492			continue;
 
 
 
 
 
 
 493
 494		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 495			migrate_page_add(page, private, flags);
 496		else
 
 
 
 
 
 497			break;
 498	} while (pte++, addr += PAGE_SIZE, addr != end);
 499	pte_unmap_unlock(orig_pte, ptl);
 500	return addr != end;
 501}
 502
 503static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 504		unsigned long addr, unsigned long end,
 505		const nodemask_t *nodes, unsigned long flags,
 506		void *private)
 507{
 508	pmd_t *pmd;
 509	unsigned long next;
 510
 511	pmd = pmd_offset(pud, addr);
 512	do {
 513		next = pmd_addr_end(addr, end);
 514		split_huge_page_pmd(vma->vm_mm, pmd);
 515		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
 516			continue;
 517		if (check_pte_range(vma, pmd, addr, next, nodes,
 518				    flags, private))
 519			return -EIO;
 520	} while (pmd++, addr = next, addr != end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521	return 0;
 522}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523
 524static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 525		unsigned long addr, unsigned long end,
 526		const nodemask_t *nodes, unsigned long flags,
 527		void *private)
 528{
 529	pud_t *pud;
 530	unsigned long next;
 531
 532	pud = pud_offset(pgd, addr);
 533	do {
 534		next = pud_addr_end(addr, end);
 535		if (pud_none_or_clear_bad(pud))
 536			continue;
 537		if (check_pmd_range(vma, pud, addr, next, nodes,
 538				    flags, private))
 539			return -EIO;
 540	} while (pud++, addr = next, addr != end);
 541	return 0;
 542}
 543
 544static inline int check_pgd_range(struct vm_area_struct *vma,
 545		unsigned long addr, unsigned long end,
 546		const nodemask_t *nodes, unsigned long flags,
 547		void *private)
 548{
 549	pgd_t *pgd;
 550	unsigned long next;
 551
 552	pgd = pgd_offset(vma->vm_mm, addr);
 553	do {
 554		next = pgd_addr_end(addr, end);
 555		if (pgd_none_or_clear_bad(pgd))
 556			continue;
 557		if (check_pud_range(vma, pgd, addr, next, nodes,
 558				    flags, private))
 559			return -EIO;
 560	} while (pgd++, addr = next, addr != end);
 561	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562}
 563
 564/*
 565 * Check if all pages in a range are on a set of nodes.
 566 * If pagelist != NULL then isolate pages from the LRU and
 567 * put them on the pagelist.
 568 */
 569static struct vm_area_struct *
 570check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 571		const nodemask_t *nodes, unsigned long flags, void *private)
 572{
 573	int err;
 574	struct vm_area_struct *first, *vma, *prev;
 
 
 
 
 
 
 575
 
 
 
 576
 577	first = find_vma(mm, start);
 578	if (!first)
 579		return ERR_PTR(-EFAULT);
 580	prev = NULL;
 581	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
 582		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
 583			if (!vma->vm_next && vma->vm_end < end)
 584				return ERR_PTR(-EFAULT);
 585			if (prev && prev->vm_end < vma->vm_start)
 586				return ERR_PTR(-EFAULT);
 587		}
 588		if (!is_vm_hugetlb_page(vma) &&
 589		    ((flags & MPOL_MF_STRICT) ||
 590		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 591				vma_migratable(vma)))) {
 592			unsigned long endvma = vma->vm_end;
 593
 594			if (endvma > end)
 595				endvma = end;
 596			if (vma->vm_start > start)
 597				start = vma->vm_start;
 598			err = check_pgd_range(vma, start, endvma, nodes,
 599						flags, private);
 600			if (err) {
 601				first = ERR_PTR(err);
 602				break;
 603			}
 604		}
 605		prev = vma;
 606	}
 607	return first;
 
 
 
 
 
 
 
 
 608}
 609
 610/* Step 2: apply policy to a range and do splits. */
 611static int mbind_range(struct mm_struct *mm, unsigned long start,
 612		       unsigned long end, struct mempolicy *new_pol)
 613{
 614	struct vm_area_struct *next;
 615	struct vm_area_struct *prev;
 616	struct vm_area_struct *vma;
 617	int err = 0;
 618	pgoff_t pgoff;
 619	unsigned long vmstart;
 620	unsigned long vmend;
 621
 622	vma = find_vma(mm, start);
 623	if (!vma || vma->vm_start > start)
 624		return -EFAULT;
 625
 626	prev = vma->vm_prev;
 627	if (start > vma->vm_start)
 628		prev = vma;
 629
 630	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 631		next = vma->vm_next;
 632		vmstart = max(start, vma->vm_start);
 633		vmend   = min(end, vma->vm_end);
 634
 635		if (mpol_equal(vma_policy(vma), new_pol))
 636			continue;
 637
 638		pgoff = vma->vm_pgoff +
 639			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 640		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 641				  vma->anon_vma, vma->vm_file, pgoff,
 642				  new_pol);
 643		if (prev) {
 644			vma = prev;
 645			next = vma->vm_next;
 646			continue;
 
 
 
 647		}
 648		if (vma->vm_start != vmstart) {
 649			err = split_vma(vma->vm_mm, vma, vmstart, 1);
 650			if (err)
 651				goto out;
 652		}
 653		if (vma->vm_end != vmend) {
 654			err = split_vma(vma->vm_mm, vma, vmend, 0);
 655			if (err)
 656				goto out;
 657		}
 658
 659		/*
 660		 * Apply policy to a single VMA. The reference counting of
 661		 * policy for vma_policy linkages has already been handled by
 662		 * vma_merge and split_vma as necessary. If this is a shared
 663		 * policy then ->set_policy will increment the reference count
 664		 * for an sp node.
 665		 */
 666		pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 667			vma->vm_start, vma->vm_end, vma->vm_pgoff,
 668			vma->vm_ops, vma->vm_file,
 669			vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 670		if (vma->vm_ops && vma->vm_ops->set_policy) {
 671			err = vma->vm_ops->set_policy(vma, new_pol);
 672			if (err)
 673				goto out;
 674		}
 675	}
 676
 677 out:
 678	return err;
 679}
 680
 681/*
 682 * Update task->flags PF_MEMPOLICY bit: set iff non-default
 683 * mempolicy.  Allows more rapid checking of this (combined perhaps
 684 * with other PF_* flag bits) on memory allocation hot code paths.
 685 *
 686 * If called from outside this file, the task 'p' should -only- be
 687 * a newly forked child not yet visible on the task list, because
 688 * manipulating the task flags of a visible task is not safe.
 689 *
 690 * The above limitation is why this routine has the funny name
 691 * mpol_fix_fork_child_flag().
 692 *
 693 * It is also safe to call this with a task pointer of current,
 694 * which the static wrapper mpol_set_task_struct_flag() does,
 695 * for use within this file.
 696 */
 697
 698void mpol_fix_fork_child_flag(struct task_struct *p)
 699{
 700	if (p->mempolicy)
 701		p->flags |= PF_MEMPOLICY;
 702	else
 703		p->flags &= ~PF_MEMPOLICY;
 704}
 705
 706static void mpol_set_task_struct_flag(void)
 707{
 708	mpol_fix_fork_child_flag(current);
 709}
 710
 711/* Set the process memory policy */
 712static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 713			     nodemask_t *nodes)
 714{
 715	struct mempolicy *new, *old;
 716	struct mm_struct *mm = current->mm;
 717	NODEMASK_SCRATCH(scratch);
 718	int ret;
 719
 720	if (!scratch)
 721		return -ENOMEM;
 722
 723	new = mpol_new(mode, flags, nodes);
 724	if (IS_ERR(new)) {
 725		ret = PTR_ERR(new);
 726		goto out;
 727	}
 728	/*
 729	 * prevent changing our mempolicy while show_numa_maps()
 730	 * is using it.
 731	 * Note:  do_set_mempolicy() can be called at init time
 732	 * with no 'mm'.
 733	 */
 734	if (mm)
 735		down_write(&mm->mmap_sem);
 736	task_lock(current);
 737	ret = mpol_set_nodemask(new, nodes, scratch);
 738	if (ret) {
 739		task_unlock(current);
 740		if (mm)
 741			up_write(&mm->mmap_sem);
 742		mpol_put(new);
 743		goto out;
 744	}
 
 745	old = current->mempolicy;
 746	current->mempolicy = new;
 747	mpol_set_task_struct_flag();
 748	if (new && new->mode == MPOL_INTERLEAVE &&
 749	    nodes_weight(new->v.nodes))
 750		current->il_next = first_node(new->v.nodes);
 751	task_unlock(current);
 752	if (mm)
 753		up_write(&mm->mmap_sem);
 754
 755	mpol_put(old);
 756	ret = 0;
 757out:
 758	NODEMASK_SCRATCH_FREE(scratch);
 759	return ret;
 760}
 761
 762/*
 763 * Return nodemask for policy for get_mempolicy() query
 764 *
 765 * Called with task's alloc_lock held
 766 */
 767static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 768{
 769	nodes_clear(*nodes);
 770	if (p == &default_policy)
 771		return;
 772
 773	switch (p->mode) {
 774	case MPOL_BIND:
 775		/* Fall through */
 776	case MPOL_INTERLEAVE:
 777		*nodes = p->v.nodes;
 
 778		break;
 779	case MPOL_PREFERRED:
 780		if (!(p->flags & MPOL_F_LOCAL))
 781			node_set(p->v.preferred_node, *nodes);
 782		/* else return empty node mask for local allocation */
 783		break;
 784	default:
 785		BUG();
 786	}
 787}
 788
 789static int lookup_node(struct mm_struct *mm, unsigned long addr)
 790{
 791	struct page *p;
 792	int err;
 793
 794	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 795	if (err >= 0) {
 
 796		err = page_to_nid(p);
 797		put_page(p);
 798	}
 
 
 799	return err;
 800}
 801
 802/* Retrieve NUMA policy */
 803static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 804			     unsigned long addr, unsigned long flags)
 805{
 806	int err;
 807	struct mm_struct *mm = current->mm;
 808	struct vm_area_struct *vma = NULL;
 809	struct mempolicy *pol = current->mempolicy;
 810
 811	if (flags &
 812		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 813		return -EINVAL;
 814
 815	if (flags & MPOL_F_MEMS_ALLOWED) {
 816		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 817			return -EINVAL;
 818		*policy = 0;	/* just so it's initialized */
 819		task_lock(current);
 820		*nmask  = cpuset_current_mems_allowed;
 821		task_unlock(current);
 822		return 0;
 823	}
 824
 825	if (flags & MPOL_F_ADDR) {
 826		/*
 827		 * Do NOT fall back to task policy if the
 828		 * vma/shared policy at addr is NULL.  We
 829		 * want to return MPOL_DEFAULT in this case.
 830		 */
 831		down_read(&mm->mmap_sem);
 832		vma = find_vma_intersection(mm, addr, addr+1);
 833		if (!vma) {
 834			up_read(&mm->mmap_sem);
 835			return -EFAULT;
 836		}
 837		if (vma->vm_ops && vma->vm_ops->get_policy)
 838			pol = vma->vm_ops->get_policy(vma, addr);
 839		else
 840			pol = vma->vm_policy;
 841	} else if (addr)
 842		return -EINVAL;
 843
 844	if (!pol)
 845		pol = &default_policy;	/* indicates default behavior */
 846
 847	if (flags & MPOL_F_NODE) {
 848		if (flags & MPOL_F_ADDR) {
 
 
 
 
 
 
 
 
 
 849			err = lookup_node(mm, addr);
 850			if (err < 0)
 851				goto out;
 852			*policy = err;
 853		} else if (pol == current->mempolicy &&
 854				pol->mode == MPOL_INTERLEAVE) {
 855			*policy = current->il_next;
 856		} else {
 857			err = -EINVAL;
 858			goto out;
 859		}
 860	} else {
 861		*policy = pol == &default_policy ? MPOL_DEFAULT :
 862						pol->mode;
 863		/*
 864		 * Internal mempolicy flags must be masked off before exposing
 865		 * the policy to userspace.
 866		 */
 867		*policy |= (pol->flags & MPOL_MODE_FLAGS);
 868	}
 869
 870	if (vma) {
 871		up_read(&current->mm->mmap_sem);
 872		vma = NULL;
 873	}
 874
 875	err = 0;
 876	if (nmask) {
 877		if (mpol_store_user_nodemask(pol)) {
 878			*nmask = pol->w.user_nodemask;
 879		} else {
 880			task_lock(current);
 881			get_policy_nodemask(pol, nmask);
 882			task_unlock(current);
 883		}
 884	}
 885
 886 out:
 887	mpol_cond_put(pol);
 888	if (vma)
 889		up_read(&current->mm->mmap_sem);
 
 
 890	return err;
 891}
 892
 893#ifdef CONFIG_MIGRATION
 894/*
 895 * page migration
 896 */
 897static void migrate_page_add(struct page *page, struct list_head *pagelist,
 898				unsigned long flags)
 899{
 
 900	/*
 901	 * Avoid migrating a page that is shared with others.
 902	 */
 903	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
 904		if (!isolate_lru_page(page)) {
 905			list_add_tail(&page->lru, pagelist);
 906			inc_zone_page_state(page, NR_ISOLATED_ANON +
 907					    page_is_file_cache(page));
 
 
 
 
 
 
 
 
 
 
 908		}
 909	}
 910}
 911
 912static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 913{
 914	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
 915}
 916
 917/*
 918 * Migrate pages from one node to a target node.
 919 * Returns error or the number of pages not migrated.
 920 */
 921static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 922			   int flags)
 923{
 924	nodemask_t nmask;
 925	LIST_HEAD(pagelist);
 926	int err = 0;
 927	struct vm_area_struct *vma;
 
 
 
 928
 929	nodes_clear(nmask);
 930	node_set(source, nmask);
 931
 932	vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
 
 
 
 
 
 
 933			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 934	if (IS_ERR(vma))
 935		return PTR_ERR(vma);
 936
 937	if (!list_empty(&pagelist)) {
 938		err = migrate_pages(&pagelist, new_node_page, dest,
 939							false, MIGRATE_SYNC);
 940		if (err)
 941			putback_lru_pages(&pagelist);
 942	}
 943
 944	return err;
 945}
 946
 947/*
 948 * Move pages between the two nodesets so as to preserve the physical
 949 * layout as much as possible.
 950 *
 951 * Returns the number of page that could not be moved.
 952 */
 953int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 954		     const nodemask_t *to, int flags)
 955{
 956	int busy = 0;
 957	int err;
 958	nodemask_t tmp;
 959
 960	err = migrate_prep();
 961	if (err)
 962		return err;
 963
 964	down_read(&mm->mmap_sem);
 965
 966	err = migrate_vmas(mm, from, to, flags);
 967	if (err)
 968		goto out;
 969
 970	/*
 971	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
 972	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
 973	 * bit in 'tmp', and return that <source, dest> pair for migration.
 974	 * The pair of nodemasks 'to' and 'from' define the map.
 975	 *
 976	 * If no pair of bits is found that way, fallback to picking some
 977	 * pair of 'source' and 'dest' bits that are not the same.  If the
 978	 * 'source' and 'dest' bits are the same, this represents a node
 979	 * that will be migrating to itself, so no pages need move.
 980	 *
 981	 * If no bits are left in 'tmp', or if all remaining bits left
 982	 * in 'tmp' correspond to the same bit in 'to', return false
 983	 * (nothing left to migrate).
 984	 *
 985	 * This lets us pick a pair of nodes to migrate between, such that
 986	 * if possible the dest node is not already occupied by some other
 987	 * source node, minimizing the risk of overloading the memory on a
 988	 * node that would happen if we migrated incoming memory to a node
 989	 * before migrating outgoing memory source that same node.
 990	 *
 991	 * A single scan of tmp is sufficient.  As we go, we remember the
 992	 * most recent <s, d> pair that moved (s != d).  If we find a pair
 993	 * that not only moved, but what's better, moved to an empty slot
 994	 * (d is not set in tmp), then we break out then, with that pair.
 995	 * Otherwise when we finish scanning from_tmp, we at least have the
 996	 * most recent <s, d> pair that moved.  If we get all the way through
 997	 * the scan of tmp without finding any node that moved, much less
 998	 * moved to an empty node, then there is nothing left worth migrating.
 999	 */
1000
1001	tmp = *from;
1002	while (!nodes_empty(tmp)) {
1003		int s,d;
1004		int source = -1;
1005		int dest = 0;
1006
1007		for_each_node_mask(s, tmp) {
1008
1009			/*
1010			 * do_migrate_pages() tries to maintain the relative
1011			 * node relationship of the pages established between
1012			 * threads and memory areas.
1013                         *
1014			 * However if the number of source nodes is not equal to
1015			 * the number of destination nodes we can not preserve
1016			 * this node relative relationship.  In that case, skip
1017			 * copying memory from a node that is in the destination
1018			 * mask.
1019			 *
1020			 * Example: [2,3,4] -> [3,4,5] moves everything.
1021			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1022			 */
1023
1024			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1025						(node_isset(s, *to)))
1026				continue;
1027
1028			d = node_remap(s, *from, *to);
1029			if (s == d)
1030				continue;
1031
1032			source = s;	/* Node moved. Memorize */
1033			dest = d;
1034
1035			/* dest not in remaining from nodes? */
1036			if (!node_isset(dest, tmp))
1037				break;
1038		}
1039		if (source == -1)
1040			break;
1041
1042		node_clear(source, tmp);
1043		err = migrate_to_node(mm, source, dest, flags);
1044		if (err > 0)
1045			busy += err;
1046		if (err < 0)
1047			break;
1048	}
1049out:
1050	up_read(&mm->mmap_sem);
 
1051	if (err < 0)
1052		return err;
1053	return busy;
1054
1055}
1056
1057/*
1058 * Allocate a new page for page migration based on vma policy.
1059 * Start assuming that page is mapped by vma pointed to by @private.
1060 * Search forward from there, if not.  N.B., this assumes that the
1061 * list of pages handed to migrate_pages()--which is how we get here--
1062 * is in virtual address order.
1063 */
1064static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1065{
1066	struct vm_area_struct *vma = (struct vm_area_struct *)private;
1067	unsigned long uninitialized_var(address);
1068
 
1069	while (vma) {
1070		address = page_address_in_vma(page, vma);
1071		if (address != -EFAULT)
1072			break;
1073		vma = vma->vm_next;
1074	}
1075
 
 
 
 
 
 
 
 
 
 
 
 
 
1076	/*
1077	 * if !vma, alloc_page_vma() will use task or system default policy
1078	 */
1079	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 
1080}
1081#else
1082
1083static void migrate_page_add(struct page *page, struct list_head *pagelist,
1084				unsigned long flags)
1085{
 
1086}
1087
1088int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1089		     const nodemask_t *to, int flags)
1090{
1091	return -ENOSYS;
1092}
1093
1094static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1095{
1096	return NULL;
1097}
1098#endif
1099
1100static long do_mbind(unsigned long start, unsigned long len,
1101		     unsigned short mode, unsigned short mode_flags,
1102		     nodemask_t *nmask, unsigned long flags)
1103{
1104	struct vm_area_struct *vma;
1105	struct mm_struct *mm = current->mm;
1106	struct mempolicy *new;
1107	unsigned long end;
1108	int err;
 
1109	LIST_HEAD(pagelist);
1110
1111	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1112				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1113		return -EINVAL;
1114	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1115		return -EPERM;
1116
1117	if (start & ~PAGE_MASK)
1118		return -EINVAL;
1119
1120	if (mode == MPOL_DEFAULT)
1121		flags &= ~MPOL_MF_STRICT;
1122
1123	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1124	end = start + len;
1125
1126	if (end < start)
1127		return -EINVAL;
1128	if (end == start)
1129		return 0;
1130
1131	new = mpol_new(mode, mode_flags, nmask);
1132	if (IS_ERR(new))
1133		return PTR_ERR(new);
1134
 
 
 
1135	/*
1136	 * If we are using the default policy then operation
1137	 * on discontinuous address spaces is okay after all
1138	 */
1139	if (!new)
1140		flags |= MPOL_MF_DISCONTIG_OK;
1141
1142	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1143		 start, start + len, mode, mode_flags,
1144		 nmask ? nodes_addr(*nmask)[0] : -1);
1145
1146	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1147
1148		err = migrate_prep();
1149		if (err)
1150			goto mpol_out;
1151	}
1152	{
1153		NODEMASK_SCRATCH(scratch);
1154		if (scratch) {
1155			down_write(&mm->mmap_sem);
1156			task_lock(current);
1157			err = mpol_set_nodemask(new, nmask, scratch);
1158			task_unlock(current);
1159			if (err)
1160				up_write(&mm->mmap_sem);
1161		} else
1162			err = -ENOMEM;
1163		NODEMASK_SCRATCH_FREE(scratch);
1164	}
1165	if (err)
1166		goto mpol_out;
1167
1168	vma = check_range(mm, start, end, nmask,
1169			  flags | MPOL_MF_INVERT, &pagelist);
1170
1171	err = PTR_ERR(vma);
1172	if (!IS_ERR(vma)) {
 
 
 
 
 
 
1173		int nr_failed = 0;
1174
1175		err = mbind_range(mm, start, end, new);
1176
1177		if (!list_empty(&pagelist)) {
1178			nr_failed = migrate_pages(&pagelist, new_vma_page,
1179						(unsigned long)vma,
1180						false, MIGRATE_SYNC);
1181			if (nr_failed)
1182				putback_lru_pages(&pagelist);
1183		}
1184
1185		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1186			err = -EIO;
1187	} else
1188		putback_lru_pages(&pagelist);
 
 
 
1189
1190	up_write(&mm->mmap_sem);
1191 mpol_out:
1192	mpol_put(new);
 
 
1193	return err;
1194}
1195
1196/*
1197 * User space interface with variable sized bitmaps for nodelists.
1198 */
1199
1200/* Copy a node mask from user space. */
1201static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1202		     unsigned long maxnode)
1203{
1204	unsigned long k;
 
1205	unsigned long nlongs;
1206	unsigned long endmask;
1207
1208	--maxnode;
1209	nodes_clear(*nodes);
1210	if (maxnode == 0 || !nmask)
1211		return 0;
1212	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1213		return -EINVAL;
1214
1215	nlongs = BITS_TO_LONGS(maxnode);
1216	if ((maxnode % BITS_PER_LONG) == 0)
1217		endmask = ~0UL;
1218	else
1219		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1220
1221	/* When the user specified more nodes than supported just check
1222	   if the non supported part is all zero. */
 
 
 
 
 
 
 
1223	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1224		if (nlongs > PAGE_SIZE/sizeof(long))
1225			return -EINVAL;
1226		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1227			unsigned long t;
1228			if (get_user(t, nmask + k))
1229				return -EFAULT;
1230			if (k == nlongs - 1) {
1231				if (t & endmask)
1232					return -EINVAL;
1233			} else if (t)
1234				return -EINVAL;
1235		}
1236		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1237		endmask = ~0UL;
1238	}
1239
 
 
 
 
 
 
 
 
 
 
1240	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1241		return -EFAULT;
1242	nodes_addr(*nodes)[nlongs-1] &= endmask;
1243	return 0;
1244}
1245
1246/* Copy a kernel node mask to user space */
1247static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1248			      nodemask_t *nodes)
1249{
1250	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1251	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1252
1253	if (copy > nbytes) {
1254		if (copy > PAGE_SIZE)
1255			return -EINVAL;
1256		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1257			return -EFAULT;
1258		copy = nbytes;
1259	}
1260	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1261}
1262
1263SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1264		unsigned long, mode, unsigned long __user *, nmask,
1265		unsigned long, maxnode, unsigned, flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266{
 
1267	nodemask_t nodes;
 
1268	int err;
1269	unsigned short mode_flags;
1270
1271	mode_flags = mode & MPOL_MODE_FLAGS;
1272	mode &= ~MPOL_MODE_FLAGS;
1273	if (mode >= MPOL_MAX)
1274		return -EINVAL;
1275	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1276	    (mode_flags & MPOL_F_RELATIVE_NODES))
1277		return -EINVAL;
1278	err = get_nodes(&nodes, nmask, maxnode);
1279	if (err)
1280		return err;
1281	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
 
 
 
 
 
 
 
 
1282}
1283
1284/* Set the process memory policy */
1285SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1286		unsigned long, maxnode)
1287{
 
 
 
1288	int err;
1289	nodemask_t nodes;
1290	unsigned short flags;
1291
1292	flags = mode & MPOL_MODE_FLAGS;
1293	mode &= ~MPOL_MODE_FLAGS;
1294	if ((unsigned int)mode >= MPOL_MAX)
1295		return -EINVAL;
1296	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1297		return -EINVAL;
1298	err = get_nodes(&nodes, nmask, maxnode);
1299	if (err)
1300		return err;
1301	return do_set_mempolicy(mode, flags, &nodes);
 
1302}
1303
1304SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1305		const unsigned long __user *, old_nodes,
1306		const unsigned long __user *, new_nodes)
 
 
 
 
 
 
1307{
1308	const struct cred *cred = current_cred(), *tcred;
1309	struct mm_struct *mm = NULL;
1310	struct task_struct *task;
1311	nodemask_t task_nodes;
1312	int err;
1313	nodemask_t *old;
1314	nodemask_t *new;
1315	NODEMASK_SCRATCH(scratch);
1316
1317	if (!scratch)
1318		return -ENOMEM;
1319
1320	old = &scratch->mask1;
1321	new = &scratch->mask2;
1322
1323	err = get_nodes(old, old_nodes, maxnode);
1324	if (err)
1325		goto out;
1326
1327	err = get_nodes(new, new_nodes, maxnode);
1328	if (err)
1329		goto out;
1330
1331	/* Find the mm_struct */
1332	rcu_read_lock();
1333	task = pid ? find_task_by_vpid(pid) : current;
1334	if (!task) {
1335		rcu_read_unlock();
1336		err = -ESRCH;
1337		goto out;
1338	}
1339	get_task_struct(task);
1340
1341	err = -EINVAL;
1342
1343	/*
1344	 * Check if this process has the right to modify the specified
1345	 * process. The right exists if the process has administrative
1346	 * capabilities, superuser privileges or the same
1347	 * userid as the target process.
1348	 */
1349	tcred = __task_cred(task);
1350	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1351	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1352	    !capable(CAP_SYS_NICE)) {
1353		rcu_read_unlock();
1354		err = -EPERM;
1355		goto out_put;
1356	}
1357	rcu_read_unlock();
1358
1359	task_nodes = cpuset_mems_allowed(task);
1360	/* Is the user allowed to access the target nodes? */
1361	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1362		err = -EPERM;
1363		goto out_put;
1364	}
1365
1366	if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1367		err = -EINVAL;
 
1368		goto out_put;
1369	}
1370
1371	err = security_task_movememory(task);
1372	if (err)
1373		goto out_put;
1374
1375	mm = get_task_mm(task);
1376	put_task_struct(task);
1377
1378	if (!mm) {
1379		err = -EINVAL;
1380		goto out;
1381	}
1382
1383	err = do_migrate_pages(mm, old, new,
1384		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1385
1386	mmput(mm);
1387out:
1388	NODEMASK_SCRATCH_FREE(scratch);
1389
1390	return err;
1391
1392out_put:
1393	put_task_struct(task);
1394	goto out;
1395
1396}
1397
 
 
 
 
 
 
 
1398
1399/* Retrieve NUMA policy */
1400SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1401		unsigned long __user *, nmask, unsigned long, maxnode,
1402		unsigned long, addr, unsigned long, flags)
 
 
1403{
1404	int err;
1405	int uninitialized_var(pval);
1406	nodemask_t nodes;
1407
1408	if (nmask != NULL && maxnode < MAX_NUMNODES)
1409		return -EINVAL;
1410
 
 
1411	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1412
1413	if (err)
1414		return err;
1415
1416	if (policy && put_user(pval, policy))
1417		return -EFAULT;
1418
1419	if (nmask)
1420		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1421
1422	return err;
1423}
1424
 
 
 
 
 
 
 
1425#ifdef CONFIG_COMPAT
1426
1427asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1428				     compat_ulong_t __user *nmask,
1429				     compat_ulong_t maxnode,
1430				     compat_ulong_t addr, compat_ulong_t flags)
1431{
1432	long err;
1433	unsigned long __user *nm = NULL;
1434	unsigned long nr_bits, alloc_size;
1435	DECLARE_BITMAP(bm, MAX_NUMNODES);
1436
1437	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1438	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1439
1440	if (nmask)
1441		nm = compat_alloc_user_space(alloc_size);
1442
1443	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1444
1445	if (!err && nmask) {
1446		unsigned long copy_size;
1447		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1448		err = copy_from_user(bm, nm, copy_size);
1449		/* ensure entire bitmap is zeroed */
1450		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1451		err |= compat_put_bitmap(nmask, bm, nr_bits);
1452	}
1453
1454	return err;
1455}
1456
1457asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1458				     compat_ulong_t maxnode)
1459{
1460	long err = 0;
1461	unsigned long __user *nm = NULL;
1462	unsigned long nr_bits, alloc_size;
1463	DECLARE_BITMAP(bm, MAX_NUMNODES);
1464
1465	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1466	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1467
1468	if (nmask) {
1469		err = compat_get_bitmap(bm, nmask, nr_bits);
 
1470		nm = compat_alloc_user_space(alloc_size);
1471		err |= copy_to_user(nm, bm, alloc_size);
 
1472	}
1473
1474	if (err)
1475		return -EFAULT;
1476
1477	return sys_set_mempolicy(mode, nm, nr_bits+1);
1478}
1479
1480asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1481			     compat_ulong_t mode, compat_ulong_t __user *nmask,
1482			     compat_ulong_t maxnode, compat_ulong_t flags)
1483{
1484	long err = 0;
1485	unsigned long __user *nm = NULL;
1486	unsigned long nr_bits, alloc_size;
1487	nodemask_t bm;
1488
1489	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1490	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1491
1492	if (nmask) {
1493		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
 
1494		nm = compat_alloc_user_space(alloc_size);
1495		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1496	}
 
 
 
 
 
 
 
 
 
 
 
 
1497
1498	if (err)
1499		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
1500
1501	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
 
 
 
 
 
 
 
 
 
1502}
1503
1504#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1505
1506/*
1507 * get_vma_policy(@task, @vma, @addr)
1508 * @task - task for fallback if vma policy == default
1509 * @vma   - virtual memory area whose policy is sought
1510 * @addr  - address in @vma for shared policy lookup
1511 *
1512 * Returns effective policy for a VMA at specified address.
1513 * Falls back to @task or system default policy, as necessary.
1514 * Current or other task's task mempolicy and non-shared vma policies
1515 * are protected by the task's mmap_sem, which must be held for read by
1516 * the caller.
1517 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1518 * count--added by the get_policy() vm_op, as appropriate--to protect against
1519 * freeing by another task.  It is the caller's responsibility to free the
1520 * extra reference for shared policies.
1521 */
1522struct mempolicy *get_vma_policy(struct task_struct *task,
1523		struct vm_area_struct *vma, unsigned long addr)
1524{
1525	struct mempolicy *pol = task->mempolicy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526
1527	if (vma) {
1528		if (vma->vm_ops && vma->vm_ops->get_policy) {
1529			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1530									addr);
1531			if (vpol)
1532				pol = vpol;
1533		} else if (vma->vm_policy)
1534			pol = vma->vm_policy;
1535	}
 
 
1536	if (!pol)
1537		pol = &default_policy;
1538	return pol;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1539}
1540
1541/*
1542 * Return a nodemask representing a mempolicy for filtering nodes for
1543 * page allocation
1544 */
1545static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1546{
1547	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1548	if (unlikely(policy->mode == MPOL_BIND) &&
1549			gfp_zone(gfp) >= policy_zone &&
1550			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1551		return &policy->v.nodes;
1552
1553	return NULL;
1554}
1555
1556/* Return a zonelist indicated by gfp for node representing a mempolicy */
1557static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1558	int nd)
1559{
1560	switch (policy->mode) {
1561	case MPOL_PREFERRED:
1562		if (!(policy->flags & MPOL_F_LOCAL))
1563			nd = policy->v.preferred_node;
1564		break;
1565	case MPOL_BIND:
1566		/*
1567		 * Normally, MPOL_BIND allocations are node-local within the
1568		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1569		 * current node isn't part of the mask, we use the zonelist for
1570		 * the first node in the mask instead.
1571		 */
1572		if (unlikely(gfp & __GFP_THISNODE) &&
1573				unlikely(!node_isset(nd, policy->v.nodes)))
1574			nd = first_node(policy->v.nodes);
1575		break;
1576	default:
1577		BUG();
1578	}
1579	return node_zonelist(nd, gfp);
 
1580}
1581
1582/* Do dynamic interleaving for a process */
1583static unsigned interleave_nodes(struct mempolicy *policy)
1584{
1585	unsigned nid, next;
1586	struct task_struct *me = current;
1587
1588	nid = me->il_next;
1589	next = next_node(nid, policy->v.nodes);
1590	if (next >= MAX_NUMNODES)
1591		next = first_node(policy->v.nodes);
1592	if (next < MAX_NUMNODES)
1593		me->il_next = next;
1594	return nid;
1595}
1596
1597/*
1598 * Depending on the memory policy provide a node from which to allocate the
1599 * next slab entry.
1600 * @policy must be protected by freeing by the caller.  If @policy is
1601 * the current task's mempolicy, this protection is implicit, as only the
1602 * task can change it's policy.  The system default policy requires no
1603 * such protection.
1604 */
1605unsigned slab_node(struct mempolicy *policy)
1606{
1607	if (!policy || policy->flags & MPOL_F_LOCAL)
1608		return numa_node_id();
 
 
 
 
 
 
 
1609
1610	switch (policy->mode) {
1611	case MPOL_PREFERRED:
1612		/*
1613		 * handled MPOL_F_LOCAL above
1614		 */
1615		return policy->v.preferred_node;
1616
1617	case MPOL_INTERLEAVE:
1618		return interleave_nodes(policy);
1619
1620	case MPOL_BIND: {
 
 
1621		/*
1622		 * Follow bind policy behavior and start allocation at the
1623		 * first node.
1624		 */
1625		struct zonelist *zonelist;
1626		struct zone *zone;
1627		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1628		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1629		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1630							&policy->v.nodes,
1631							&zone);
1632		return zone ? zone->node : numa_node_id();
1633	}
 
 
1634
1635	default:
1636		BUG();
1637	}
1638}
1639
1640/* Do static interleaving for a VMA with known offset. */
1641static unsigned offset_il_node(struct mempolicy *pol,
1642		struct vm_area_struct *vma, unsigned long off)
1643{
1644	unsigned nnodes = nodes_weight(pol->v.nodes);
1645	unsigned target;
1646	int c;
1647	int nid = -1;
 
 
 
 
 
 
 
 
 
 
 
1648
 
1649	if (!nnodes)
1650		return numa_node_id();
1651	target = (unsigned int)off % nnodes;
1652	c = 0;
1653	do {
1654		nid = next_node(nid, pol->v.nodes);
1655		c++;
1656	} while (c <= target);
1657	return nid;
1658}
1659
1660/* Determine a node number for interleave */
1661static inline unsigned interleave_nid(struct mempolicy *pol,
1662		 struct vm_area_struct *vma, unsigned long addr, int shift)
1663{
1664	if (vma) {
1665		unsigned long off;
1666
1667		/*
1668		 * for small pages, there is no difference between
1669		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1670		 * for huge pages, since vm_pgoff is in units of small
1671		 * pages, we need to shift off the always 0 bits to get
1672		 * a useful offset.
1673		 */
1674		BUG_ON(shift < PAGE_SHIFT);
1675		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1676		off += (addr - vma->vm_start) >> shift;
1677		return offset_il_node(pol, vma, off);
1678	} else
1679		return interleave_nodes(pol);
1680}
1681
1682/*
1683 * Return the bit number of a random bit set in the nodemask.
1684 * (returns -1 if nodemask is empty)
1685 */
1686int node_random(const nodemask_t *maskp)
1687{
1688	int w, bit = -1;
1689
1690	w = nodes_weight(*maskp);
1691	if (w)
1692		bit = bitmap_ord_to_pos(maskp->bits,
1693			get_random_int() % w, MAX_NUMNODES);
1694	return bit;
1695}
1696
1697#ifdef CONFIG_HUGETLBFS
1698/*
1699 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1700 * @vma = virtual memory area whose policy is sought
1701 * @addr = address in @vma for shared policy lookup and interleave policy
1702 * @gfp_flags = for requested zone
1703 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1704 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1705 *
1706 * Returns a zonelist suitable for a huge page allocation and a pointer
1707 * to the struct mempolicy for conditional unref after allocation.
1708 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1709 * @nodemask for filtering the zonelist.
1710 *
1711 * Must be protected by get_mems_allowed()
1712 */
1713struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1714				gfp_t gfp_flags, struct mempolicy **mpol,
1715				nodemask_t **nodemask)
1716{
1717	struct zonelist *zl;
1718
1719	*mpol = get_vma_policy(current, vma, addr);
1720	*nodemask = NULL;	/* assume !MPOL_BIND */
1721
1722	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1723		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1724				huge_page_shift(hstate_vma(vma))), gfp_flags);
1725	} else {
1726		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1727		if ((*mpol)->mode == MPOL_BIND)
1728			*nodemask = &(*mpol)->v.nodes;
1729	}
1730	return zl;
1731}
1732
1733/*
1734 * init_nodemask_of_mempolicy
1735 *
1736 * If the current task's mempolicy is "default" [NULL], return 'false'
1737 * to indicate default policy.  Otherwise, extract the policy nodemask
1738 * for 'bind' or 'interleave' policy into the argument nodemask, or
1739 * initialize the argument nodemask to contain the single node for
1740 * 'preferred' or 'local' policy and return 'true' to indicate presence
1741 * of non-default mempolicy.
1742 *
1743 * We don't bother with reference counting the mempolicy [mpol_get/put]
1744 * because the current task is examining it's own mempolicy and a task's
1745 * mempolicy is only ever changed by the task itself.
1746 *
1747 * N.B., it is the caller's responsibility to free a returned nodemask.
1748 */
1749bool init_nodemask_of_mempolicy(nodemask_t *mask)
1750{
1751	struct mempolicy *mempolicy;
1752	int nid;
1753
1754	if (!(mask && current->mempolicy))
1755		return false;
1756
1757	task_lock(current);
1758	mempolicy = current->mempolicy;
1759	switch (mempolicy->mode) {
1760	case MPOL_PREFERRED:
1761		if (mempolicy->flags & MPOL_F_LOCAL)
1762			nid = numa_node_id();
1763		else
1764			nid = mempolicy->v.preferred_node;
1765		init_nodemask_of_node(mask, nid);
1766		break;
1767
1768	case MPOL_BIND:
1769		/* Fall through */
1770	case MPOL_INTERLEAVE:
1771		*mask =  mempolicy->v.nodes;
1772		break;
1773
1774	default:
1775		BUG();
1776	}
1777	task_unlock(current);
1778
1779	return true;
1780}
1781#endif
1782
1783/*
1784 * mempolicy_nodemask_intersects
1785 *
1786 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1787 * policy.  Otherwise, check for intersection between mask and the policy
1788 * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1789 * policy, always return true since it may allocate elsewhere on fallback.
1790 *
1791 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1792 */
1793bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1794					const nodemask_t *mask)
1795{
1796	struct mempolicy *mempolicy;
1797	bool ret = true;
1798
1799	if (!mask)
1800		return ret;
 
1801	task_lock(tsk);
1802	mempolicy = tsk->mempolicy;
1803	if (!mempolicy)
1804		goto out;
 
1805
1806	switch (mempolicy->mode) {
1807	case MPOL_PREFERRED:
1808		/*
1809		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1810		 * allocate from, they may fallback to other nodes when oom.
1811		 * Thus, it's possible for tsk to have allocated memory from
1812		 * nodes in mask.
1813		 */
1814		break;
1815	case MPOL_BIND:
1816	case MPOL_INTERLEAVE:
1817		ret = nodes_intersects(mempolicy->v.nodes, *mask);
1818		break;
1819	default:
1820		BUG();
1821	}
1822out:
1823	task_unlock(tsk);
1824	return ret;
1825}
1826
1827/* Allocate a page in interleaved policy.
1828   Own path because it needs to do special accounting. */
1829static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1830					unsigned nid)
1831{
1832	struct zonelist *zl;
1833	struct page *page;
1834
1835	zl = node_zonelist(nid, gfp);
1836	page = __alloc_pages(gfp, order, zl);
1837	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1838		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
 
 
 
 
 
1839	return page;
1840}
1841
1842/**
1843 * 	alloc_pages_vma	- Allocate a page for a VMA.
 
 
 
 
 
 
 
 
 
 
 
1844 *
1845 * 	@gfp:
1846 *      %GFP_USER    user allocation.
1847 *      %GFP_KERNEL  kernel allocations,
1848 *      %GFP_HIGHMEM highmem/user allocations,
1849 *      %GFP_FS      allocation should not call back into a file system.
1850 *      %GFP_ATOMIC  don't sleep.
1851 *
1852 *	@order:Order of the GFP allocation.
1853 * 	@vma:  Pointer to VMA or NULL if not available.
1854 *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1855 *
1856 * 	This function allocates a page from the kernel page pool and applies
1857 *	a NUMA policy associated with the VMA or the current process.
1858 *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1859 *	mm_struct of the VMA to prevent it from going away. Should be used for
1860 *	all allocations for pages that will be mapped into
1861 * 	user space. Returns NULL when no page can be allocated.
1862 *
1863 *	Should be called with the mm_sem of the vma hold.
1864 */
1865struct page *
1866alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1867		unsigned long addr, int node)
1868{
1869	struct mempolicy *pol;
1870	struct zonelist *zl;
1871	struct page *page;
1872	unsigned int cpuset_mems_cookie;
 
1873
1874retry_cpuset:
1875	pol = get_vma_policy(current, vma, addr);
1876	cpuset_mems_cookie = get_mems_allowed();
1877
1878	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1879		unsigned nid;
1880
1881		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1882		mpol_cond_put(pol);
1883		page = alloc_page_interleave(gfp, order, nid);
1884		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1885			goto retry_cpuset;
 
 
 
1886
1887		return page;
1888	}
1889	zl = policy_zonelist(gfp, pol, node);
1890	if (unlikely(mpol_needs_cond_ref(pol))) {
1891		/*
1892		 * slow path: ref counted shared policy
 
 
 
 
 
 
 
1893		 */
1894		struct page *page =  __alloc_pages_nodemask(gfp, order,
1895						zl, policy_nodemask(gfp, pol));
1896		__mpol_put(pol);
1897		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1898			goto retry_cpuset;
1899		return page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1900	}
1901	/*
1902	 * fast path:  default or task policy
1903	 */
1904	page = __alloc_pages_nodemask(gfp, order, zl,
1905				      policy_nodemask(gfp, pol));
1906	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1907		goto retry_cpuset;
1908	return page;
1909}
 
1910
1911/**
1912 * 	alloc_pages_current - Allocate pages.
1913 *
1914 *	@gfp:
1915 *		%GFP_USER   user allocation,
1916 *      	%GFP_KERNEL kernel allocation,
1917 *      	%GFP_HIGHMEM highmem allocation,
1918 *      	%GFP_FS     don't call back into a file system.
1919 *      	%GFP_ATOMIC don't sleep.
1920 *	@order: Power of two of allocation size in pages. 0 is a single page.
1921 *
1922 *	Allocate a page from the kernel page pool.  When not in
1923 *	interrupt context and apply the current process NUMA policy.
1924 *	Returns NULL when no page can be allocated.
1925 *
1926 *	Don't call cpuset_update_task_memory_state() unless
1927 *	1) it's ok to take cpuset_sem (can WAIT), and
1928 *	2) allocating for current task (not interrupt).
1929 */
1930struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1931{
1932	struct mempolicy *pol = current->mempolicy;
1933	struct page *page;
1934	unsigned int cpuset_mems_cookie;
1935
1936	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1937		pol = &default_policy;
1938
1939retry_cpuset:
1940	cpuset_mems_cookie = get_mems_allowed();
1941
1942	/*
1943	 * No reference counting needed for current->mempolicy
1944	 * nor system default_policy
1945	 */
1946	if (pol->mode == MPOL_INTERLEAVE)
1947		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1948	else
1949		page = __alloc_pages_nodemask(gfp, order,
1950				policy_zonelist(gfp, pol, numa_node_id()),
1951				policy_nodemask(gfp, pol));
1952
1953	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1954		goto retry_cpuset;
 
 
 
 
 
1955
1956	return page;
 
 
 
1957}
1958EXPORT_SYMBOL(alloc_pages_current);
1959
1960/*
1961 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1962 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1963 * with the mems_allowed returned by cpuset_mems_allowed().  This
1964 * keeps mempolicies cpuset relative after its cpuset moves.  See
1965 * further kernel/cpuset.c update_nodemask().
1966 *
1967 * current's mempolicy may be rebinded by the other task(the task that changes
1968 * cpuset's mems), so we needn't do rebind work for current task.
1969 */
1970
1971/* Slow path of a mempolicy duplicate */
1972struct mempolicy *__mpol_dup(struct mempolicy *old)
1973{
1974	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1975
1976	if (!new)
1977		return ERR_PTR(-ENOMEM);
1978
1979	/* task's mempolicy is protected by alloc_lock */
1980	if (old == current->mempolicy) {
1981		task_lock(current);
1982		*new = *old;
1983		task_unlock(current);
1984	} else
1985		*new = *old;
1986
1987	rcu_read_lock();
1988	if (current_cpuset_is_being_rebound()) {
1989		nodemask_t mems = cpuset_mems_allowed(current);
1990		if (new->flags & MPOL_F_REBINDING)
1991			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1992		else
1993			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1994	}
1995	rcu_read_unlock();
1996	atomic_set(&new->refcnt, 1);
1997	return new;
1998}
1999
2000/*
2001 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
2002 * eliminate the * MPOL_F_* flags that require conditional ref and
2003 * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
2004 * after return.  Use the returned value.
2005 *
2006 * Allows use of a mempolicy for, e.g., multiple allocations with a single
2007 * policy lookup, even if the policy needs/has extra ref on lookup.
2008 * shmem_readahead needs this.
2009 */
2010struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
2011						struct mempolicy *frompol)
2012{
2013	if (!mpol_needs_cond_ref(frompol))
2014		return frompol;
2015
2016	*tompol = *frompol;
2017	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
2018	__mpol_put(frompol);
2019	return tompol;
2020}
2021
2022/* Slow path of a mempolicy comparison */
2023bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2024{
2025	if (!a || !b)
2026		return false;
2027	if (a->mode != b->mode)
2028		return false;
2029	if (a->flags != b->flags)
2030		return false;
2031	if (mpol_store_user_nodemask(a))
2032		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2033			return false;
2034
2035	switch (a->mode) {
2036	case MPOL_BIND:
2037		/* Fall through */
2038	case MPOL_INTERLEAVE:
2039		return !!nodes_equal(a->v.nodes, b->v.nodes);
2040	case MPOL_PREFERRED:
2041		return a->v.preferred_node == b->v.preferred_node;
 
 
2042	default:
2043		BUG();
2044		return false;
2045	}
2046}
2047
2048/*
2049 * Shared memory backing store policy support.
2050 *
2051 * Remember policies even when nobody has shared memory mapped.
2052 * The policies are kept in Red-Black tree linked from the inode.
2053 * They are protected by the sp->lock spinlock, which should be held
2054 * for any accesses to the tree.
2055 */
2056
2057/* lookup first element intersecting start-end */
2058/* Caller holds sp->lock */
 
 
2059static struct sp_node *
2060sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2061{
2062	struct rb_node *n = sp->root.rb_node;
2063
2064	while (n) {
2065		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2066
2067		if (start >= p->end)
2068			n = n->rb_right;
2069		else if (end <= p->start)
2070			n = n->rb_left;
2071		else
2072			break;
2073	}
2074	if (!n)
2075		return NULL;
2076	for (;;) {
2077		struct sp_node *w = NULL;
2078		struct rb_node *prev = rb_prev(n);
2079		if (!prev)
2080			break;
2081		w = rb_entry(prev, struct sp_node, nd);
2082		if (w->end <= start)
2083			break;
2084		n = prev;
2085	}
2086	return rb_entry(n, struct sp_node, nd);
2087}
2088
2089/* Insert a new shared policy into the list. */
2090/* Caller holds sp->lock */
 
 
2091static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2092{
2093	struct rb_node **p = &sp->root.rb_node;
2094	struct rb_node *parent = NULL;
2095	struct sp_node *nd;
2096
2097	while (*p) {
2098		parent = *p;
2099		nd = rb_entry(parent, struct sp_node, nd);
2100		if (new->start < nd->start)
2101			p = &(*p)->rb_left;
2102		else if (new->end > nd->end)
2103			p = &(*p)->rb_right;
2104		else
2105			BUG();
2106	}
2107	rb_link_node(&new->nd, parent, p);
2108	rb_insert_color(&new->nd, &sp->root);
2109	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2110		 new->policy ? new->policy->mode : 0);
2111}
2112
2113/* Find shared policy intersecting idx */
2114struct mempolicy *
2115mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2116{
2117	struct mempolicy *pol = NULL;
2118	struct sp_node *sn;
2119
2120	if (!sp->root.rb_node)
2121		return NULL;
2122	spin_lock(&sp->lock);
2123	sn = sp_lookup(sp, idx, idx+1);
2124	if (sn) {
2125		mpol_get(sn->policy);
2126		pol = sn->policy;
2127	}
2128	spin_unlock(&sp->lock);
2129	return pol;
2130}
2131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2132static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2133{
2134	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2135	rb_erase(&n->nd, &sp->root);
2136	mpol_put(n->policy);
2137	kmem_cache_free(sn_cache, n);
 
 
 
 
 
 
 
2138}
2139
2140static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2141				struct mempolicy *pol)
2142{
2143	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
 
2144
 
2145	if (!n)
2146		return NULL;
2147	n->start = start;
2148	n->end = end;
2149	mpol_get(pol);
2150	pol->flags |= MPOL_F_SHARED;	/* for unref */
2151	n->policy = pol;
 
 
 
 
2152	return n;
2153}
2154
2155/* Replace a policy range. */
2156static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2157				 unsigned long end, struct sp_node *new)
2158{
2159	struct sp_node *n, *new2 = NULL;
 
 
 
2160
2161restart:
2162	spin_lock(&sp->lock);
2163	n = sp_lookup(sp, start, end);
2164	/* Take care of old policies in the same range. */
2165	while (n && n->start < end) {
2166		struct rb_node *next = rb_next(&n->nd);
2167		if (n->start >= start) {
2168			if (n->end <= end)
2169				sp_delete(sp, n);
2170			else
2171				n->start = end;
2172		} else {
2173			/* Old policy spanning whole new range. */
2174			if (n->end > end) {
2175				if (!new2) {
2176					spin_unlock(&sp->lock);
2177					new2 = sp_alloc(end, n->end, n->policy);
2178					if (!new2)
2179						return -ENOMEM;
2180					goto restart;
2181				}
2182				n->end = start;
2183				sp_insert(sp, new2);
2184				new2 = NULL;
 
2185				break;
2186			} else
2187				n->end = start;
2188		}
2189		if (!next)
2190			break;
2191		n = rb_entry(next, struct sp_node, nd);
2192	}
2193	if (new)
2194		sp_insert(sp, new);
2195	spin_unlock(&sp->lock);
2196	if (new2) {
2197		mpol_put(new2->policy);
2198		kmem_cache_free(sn_cache, new2);
2199	}
2200	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2201}
2202
2203/**
2204 * mpol_shared_policy_init - initialize shared policy for inode
2205 * @sp: pointer to inode shared policy
2206 * @mpol:  struct mempolicy to install
2207 *
2208 * Install non-NULL @mpol in inode's shared policy rb-tree.
2209 * On entry, the current task has a reference on a non-NULL @mpol.
2210 * This must be released on exit.
2211 * This is called at get_inode() calls and we can use GFP_KERNEL.
2212 */
2213void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2214{
2215	int ret;
2216
2217	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2218	spin_lock_init(&sp->lock);
2219
2220	if (mpol) {
2221		struct vm_area_struct pvma;
2222		struct mempolicy *new;
2223		NODEMASK_SCRATCH(scratch);
2224
2225		if (!scratch)
2226			goto put_mpol;
2227		/* contextualize the tmpfs mount point mempolicy */
2228		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2229		if (IS_ERR(new))
2230			goto free_scratch; /* no valid nodemask intersection */
2231
2232		task_lock(current);
2233		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2234		task_unlock(current);
2235		if (ret)
2236			goto put_new;
2237
2238		/* Create pseudo-vma that contains just the policy */
2239		memset(&pvma, 0, sizeof(struct vm_area_struct));
2240		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2241		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2242
2243put_new:
2244		mpol_put(new);			/* drop initial ref */
2245free_scratch:
2246		NODEMASK_SCRATCH_FREE(scratch);
2247put_mpol:
2248		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2249	}
2250}
2251
2252int mpol_set_shared_policy(struct shared_policy *info,
2253			struct vm_area_struct *vma, struct mempolicy *npol)
2254{
2255	int err;
2256	struct sp_node *new = NULL;
2257	unsigned long sz = vma_pages(vma);
2258
2259	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2260		 vma->vm_pgoff,
2261		 sz, npol ? npol->mode : -1,
2262		 npol ? npol->flags : -1,
2263		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2264
2265	if (npol) {
2266		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2267		if (!new)
2268			return -ENOMEM;
2269	}
2270	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2271	if (err && new)
2272		kmem_cache_free(sn_cache, new);
2273	return err;
2274}
2275
2276/* Free a backing policy store on inode delete. */
2277void mpol_free_shared_policy(struct shared_policy *p)
2278{
2279	struct sp_node *n;
2280	struct rb_node *next;
2281
2282	if (!p->root.rb_node)
2283		return;
2284	spin_lock(&p->lock);
2285	next = rb_first(&p->root);
2286	while (next) {
2287		n = rb_entry(next, struct sp_node, nd);
2288		next = rb_next(&n->nd);
2289		rb_erase(&n->nd, &p->root);
2290		mpol_put(n->policy);
2291		kmem_cache_free(sn_cache, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2292	}
2293	spin_unlock(&p->lock);
 
 
 
 
 
 
 
 
 
2294}
 
2295
2296/* assumes fs == KERNEL_DS */
2297void __init numa_policy_init(void)
2298{
2299	nodemask_t interleave_nodes;
2300	unsigned long largest = 0;
2301	int nid, prefer = 0;
2302
2303	policy_cache = kmem_cache_create("numa_policy",
2304					 sizeof(struct mempolicy),
2305					 0, SLAB_PANIC, NULL);
2306
2307	sn_cache = kmem_cache_create("shared_policy_node",
2308				     sizeof(struct sp_node),
2309				     0, SLAB_PANIC, NULL);
2310
 
 
 
 
 
 
 
 
 
2311	/*
2312	 * Set interleaving policy for system init. Interleaving is only
2313	 * enabled across suitably sized nodes (default is >= 16MB), or
2314	 * fall back to the largest node if they're all smaller.
2315	 */
2316	nodes_clear(interleave_nodes);
2317	for_each_node_state(nid, N_HIGH_MEMORY) {
2318		unsigned long total_pages = node_present_pages(nid);
2319
2320		/* Preserve the largest node */
2321		if (largest < total_pages) {
2322			largest = total_pages;
2323			prefer = nid;
2324		}
2325
2326		/* Interleave this node? */
2327		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2328			node_set(nid, interleave_nodes);
2329	}
2330
2331	/* All too small, use the largest */
2332	if (unlikely(nodes_empty(interleave_nodes)))
2333		node_set(prefer, interleave_nodes);
2334
2335	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2336		printk("numa_policy_init: interleaving failed\n");
 
 
2337}
2338
2339/* Reset policy of current process to default */
2340void numa_default_policy(void)
2341{
2342	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2343}
2344
2345/*
2346 * Parse and format mempolicy from/to strings
2347 */
2348
2349/*
2350 * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
2351 * Used only for mpol_parse_str() and mpol_to_str()
2352 */
2353#define MPOL_LOCAL MPOL_MAX
2354static const char * const policy_modes[] =
2355{
2356	[MPOL_DEFAULT]    = "default",
2357	[MPOL_PREFERRED]  = "prefer",
2358	[MPOL_BIND]       = "bind",
2359	[MPOL_INTERLEAVE] = "interleave",
2360	[MPOL_LOCAL]      = "local"
2361};
2362
2363
2364#ifdef CONFIG_TMPFS
2365/**
2366 * mpol_parse_str - parse string to mempolicy
2367 * @str:  string containing mempolicy to parse
2368 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2369 * @no_context:  flag whether to "contextualize" the mempolicy
2370 *
2371 * Format of input:
2372 *	<mode>[=<flags>][:<nodelist>]
2373 *
2374 * if @no_context is true, save the input nodemask in w.user_nodemask in
2375 * the returned mempolicy.  This will be used to "clone" the mempolicy in
2376 * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
2377 * mount option.  Note that if 'static' or 'relative' mode flags were
2378 * specified, the input nodemask will already have been saved.  Saving
2379 * it again is redundant, but safe.
2380 *
2381 * On success, returns 0, else 1
2382 */
2383int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2384{
2385	struct mempolicy *new = NULL;
2386	unsigned short mode;
2387	unsigned short uninitialized_var(mode_flags);
2388	nodemask_t nodes;
2389	char *nodelist = strchr(str, ':');
2390	char *flags = strchr(str, '=');
2391	int err = 1;
 
 
 
2392
2393	if (nodelist) {
2394		/* NUL-terminate mode or flags string */
2395		*nodelist++ = '\0';
2396		if (nodelist_parse(nodelist, nodes))
2397			goto out;
2398		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2399			goto out;
2400	} else
2401		nodes_clear(nodes);
2402
2403	if (flags)
2404		*flags++ = '\0';	/* terminate mode string */
2405
2406	for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2407		if (!strcmp(str, policy_modes[mode])) {
2408			break;
2409		}
2410	}
2411	if (mode > MPOL_LOCAL)
2412		goto out;
2413
2414	switch (mode) {
2415	case MPOL_PREFERRED:
2416		/*
2417		 * Insist on a nodelist of one node only
 
 
2418		 */
2419		if (nodelist) {
2420			char *rest = nodelist;
2421			while (isdigit(*rest))
2422				rest++;
2423			if (*rest)
2424				goto out;
 
 
2425		}
2426		break;
2427	case MPOL_INTERLEAVE:
2428		/*
2429		 * Default to online nodes with memory if no nodelist
2430		 */
2431		if (!nodelist)
2432			nodes = node_states[N_HIGH_MEMORY];
2433		break;
2434	case MPOL_LOCAL:
2435		/*
2436		 * Don't allow a nodelist;  mpol_new() checks flags
2437		 */
2438		if (nodelist)
2439			goto out;
2440		mode = MPOL_PREFERRED;
2441		break;
2442	case MPOL_DEFAULT:
2443		/*
2444		 * Insist on a empty nodelist
2445		 */
2446		if (!nodelist)
2447			err = 0;
2448		goto out;
2449	case MPOL_BIND:
2450		/*
2451		 * Insist on a nodelist
2452		 */
2453		if (!nodelist)
2454			goto out;
2455	}
2456
2457	mode_flags = 0;
2458	if (flags) {
2459		/*
2460		 * Currently, we only support two mutually exclusive
2461		 * mode flags.
2462		 */
2463		if (!strcmp(flags, "static"))
2464			mode_flags |= MPOL_F_STATIC_NODES;
2465		else if (!strcmp(flags, "relative"))
2466			mode_flags |= MPOL_F_RELATIVE_NODES;
2467		else
2468			goto out;
2469	}
2470
2471	new = mpol_new(mode, mode_flags, &nodes);
2472	if (IS_ERR(new))
2473		goto out;
2474
2475	if (no_context) {
2476		/* save for contextualization */
2477		new->w.user_nodemask = nodes;
 
 
 
 
 
 
2478	} else {
2479		int ret;
2480		NODEMASK_SCRATCH(scratch);
2481		if (scratch) {
2482			task_lock(current);
2483			ret = mpol_set_nodemask(new, &nodes, scratch);
2484			task_unlock(current);
2485		} else
2486			ret = -ENOMEM;
2487		NODEMASK_SCRATCH_FREE(scratch);
2488		if (ret) {
2489			mpol_put(new);
2490			goto out;
2491		}
2492	}
 
 
 
 
 
 
 
2493	err = 0;
2494
2495out:
2496	/* Restore string for error message */
2497	if (nodelist)
2498		*--nodelist = ':';
2499	if (flags)
2500		*--flags = '=';
2501	if (!err)
2502		*mpol = new;
2503	return err;
2504}
2505#endif /* CONFIG_TMPFS */
2506
2507/**
2508 * mpol_to_str - format a mempolicy structure for printing
2509 * @buffer:  to contain formatted mempolicy string
2510 * @maxlen:  length of @buffer
2511 * @pol:  pointer to mempolicy to be formatted
2512 * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2513 *
2514 * Convert a mempolicy into a string.
2515 * Returns the number of characters in buffer (if positive)
2516 * or an error (negative)
2517 */
2518int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2519{
2520	char *p = buffer;
2521	int l;
2522	nodemask_t nodes;
2523	unsigned short mode;
2524	unsigned short flags = pol ? pol->flags : 0;
2525
2526	/*
2527	 * Sanity check:  room for longest mode, flag and some nodes
2528	 */
2529	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2530
2531	if (!pol || pol == &default_policy)
2532		mode = MPOL_DEFAULT;
2533	else
2534		mode = pol->mode;
 
 
2535
2536	switch (mode) {
2537	case MPOL_DEFAULT:
2538		nodes_clear(nodes);
2539		break;
2540
2541	case MPOL_PREFERRED:
2542		nodes_clear(nodes);
2543		if (flags & MPOL_F_LOCAL)
2544			mode = MPOL_LOCAL;	/* pseudo-policy */
2545		else
2546			node_set(pol->v.preferred_node, nodes);
2547		break;
2548
2549	case MPOL_BIND:
2550		/* Fall through */
2551	case MPOL_INTERLEAVE:
2552		if (no_context)
2553			nodes = pol->w.user_nodemask;
2554		else
2555			nodes = pol->v.nodes;
2556		break;
2557
2558	default:
2559		return -EINVAL;
 
 
2560	}
2561
2562	l = strlen(policy_modes[mode]);
2563	if (buffer + maxlen < p + l + 1)
2564		return -ENOSPC;
2565
2566	strcpy(p, policy_modes[mode]);
2567	p += l;
2568
2569	if (flags & MPOL_MODE_FLAGS) {
2570		if (buffer + maxlen < p + 2)
2571			return -ENOSPC;
2572		*p++ = '=';
2573
2574		/*
2575		 * Currently, the only defined flags are mutually exclusive
2576		 */
2577		if (flags & MPOL_F_STATIC_NODES)
2578			p += snprintf(p, buffer + maxlen - p, "static");
2579		else if (flags & MPOL_F_RELATIVE_NODES)
2580			p += snprintf(p, buffer + maxlen - p, "relative");
2581	}
2582
2583	if (!nodes_empty(nodes)) {
2584		if (buffer + maxlen < p + 2)
2585			return -ENOSPC;
2586		*p++ = ':';
2587	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2588	}
2589	return p - buffer;
2590}