Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * mm/rmap.c - physical to virtual reverse mappings
   3 *
   4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
   5 * Released under the General Public License (GPL).
   6 *
   7 * Simple, low overhead reverse mapping scheme.
   8 * Please try to keep this thing as modular as possible.
   9 *
  10 * Provides methods for unmapping each kind of mapped page:
  11 * the anon methods track anonymous pages, and
  12 * the file methods track pages belonging to an inode.
  13 *
  14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17 * Contributions by Hugh Dickins 2003, 2004
  18 */
  19
  20/*
  21 * Lock ordering in mm:
  22 *
  23 * inode->i_mutex	(while writing or truncating, not reading or faulting)
  24 *   mm->mmap_sem
  25 *     page->flags PG_locked (lock_page)
  26 *       mapping->i_mmap_mutex
  27 *         anon_vma->mutex
  28 *           mm->page_table_lock or pte_lock
  29 *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  30 *             swap_lock (in swap_duplicate, swap_info_get)
  31 *               mmlist_lock (in mmput, drain_mmlist and others)
  32 *               mapping->private_lock (in __set_page_dirty_buffers)
  33 *               inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  34 *               bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
  35 *                 sb_lock (within inode_lock in fs/fs-writeback.c)
  36 *                 mapping->tree_lock (widely used, in set_page_dirty,
  37 *                           in arch-dependent flush_dcache_mmap_lock,
  38 *                           within bdi.wb->list_lock in __sync_single_inode)
  39 *
  40 * anon_vma->mutex,mapping->i_mutex      (memory_failure, collect_procs_anon)
  41 *   ->tasklist_lock
  42 *     pte map lock
  43 */
  44
  45#include <linux/mm.h>
  46#include <linux/pagemap.h>
  47#include <linux/swap.h>
  48#include <linux/swapops.h>
  49#include <linux/slab.h>
  50#include <linux/init.h>
  51#include <linux/ksm.h>
  52#include <linux/rmap.h>
  53#include <linux/rcupdate.h>
  54#include <linux/module.h>
  55#include <linux/memcontrol.h>
  56#include <linux/mmu_notifier.h>
  57#include <linux/migrate.h>
  58#include <linux/hugetlb.h>
  59
  60#include <asm/tlbflush.h>
  61
  62#include "internal.h"
  63
  64static struct kmem_cache *anon_vma_cachep;
  65static struct kmem_cache *anon_vma_chain_cachep;
  66
  67static inline struct anon_vma *anon_vma_alloc(void)
  68{
  69	struct anon_vma *anon_vma;
  70
  71	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  72	if (anon_vma) {
  73		atomic_set(&anon_vma->refcount, 1);
  74		/*
  75		 * Initialise the anon_vma root to point to itself. If called
  76		 * from fork, the root will be reset to the parents anon_vma.
  77		 */
  78		anon_vma->root = anon_vma;
  79	}
  80
  81	return anon_vma;
  82}
  83
  84static inline void anon_vma_free(struct anon_vma *anon_vma)
  85{
  86	VM_BUG_ON(atomic_read(&anon_vma->refcount));
  87
  88	/*
  89	 * Synchronize against page_lock_anon_vma() such that
  90	 * we can safely hold the lock without the anon_vma getting
  91	 * freed.
  92	 *
  93	 * Relies on the full mb implied by the atomic_dec_and_test() from
  94	 * put_anon_vma() against the acquire barrier implied by
  95	 * mutex_trylock() from page_lock_anon_vma(). This orders:
  96	 *
  97	 * page_lock_anon_vma()		VS	put_anon_vma()
  98	 *   mutex_trylock()			  atomic_dec_and_test()
  99	 *   LOCK				  MB
 100	 *   atomic_read()			  mutex_is_locked()
 101	 *
 102	 * LOCK should suffice since the actual taking of the lock must
 103	 * happen _before_ what follows.
 104	 */
 105	if (mutex_is_locked(&anon_vma->root->mutex)) {
 106		anon_vma_lock(anon_vma);
 107		anon_vma_unlock(anon_vma);
 108	}
 109
 110	kmem_cache_free(anon_vma_cachep, anon_vma);
 111}
 112
 113static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 114{
 115	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 116}
 117
 118static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
 119{
 120	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
 121}
 122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123/**
 124 * anon_vma_prepare - attach an anon_vma to a memory region
 125 * @vma: the memory region in question
 126 *
 127 * This makes sure the memory mapping described by 'vma' has
 128 * an 'anon_vma' attached to it, so that we can associate the
 129 * anonymous pages mapped into it with that anon_vma.
 130 *
 131 * The common case will be that we already have one, but if
 132 * not we either need to find an adjacent mapping that we
 133 * can re-use the anon_vma from (very common when the only
 134 * reason for splitting a vma has been mprotect()), or we
 135 * allocate a new one.
 136 *
 137 * Anon-vma allocations are very subtle, because we may have
 138 * optimistically looked up an anon_vma in page_lock_anon_vma()
 139 * and that may actually touch the spinlock even in the newly
 140 * allocated vma (it depends on RCU to make sure that the
 141 * anon_vma isn't actually destroyed).
 142 *
 143 * As a result, we need to do proper anon_vma locking even
 144 * for the new allocation. At the same time, we do not want
 145 * to do any locking for the common case of already having
 146 * an anon_vma.
 147 *
 148 * This must be called with the mmap_sem held for reading.
 149 */
 150int anon_vma_prepare(struct vm_area_struct *vma)
 151{
 152	struct anon_vma *anon_vma = vma->anon_vma;
 153	struct anon_vma_chain *avc;
 154
 155	might_sleep();
 156	if (unlikely(!anon_vma)) {
 157		struct mm_struct *mm = vma->vm_mm;
 158		struct anon_vma *allocated;
 159
 160		avc = anon_vma_chain_alloc(GFP_KERNEL);
 161		if (!avc)
 162			goto out_enomem;
 163
 164		anon_vma = find_mergeable_anon_vma(vma);
 165		allocated = NULL;
 166		if (!anon_vma) {
 167			anon_vma = anon_vma_alloc();
 168			if (unlikely(!anon_vma))
 169				goto out_enomem_free_avc;
 170			allocated = anon_vma;
 171		}
 172
 173		anon_vma_lock(anon_vma);
 174		/* page_table_lock to protect against threads */
 175		spin_lock(&mm->page_table_lock);
 176		if (likely(!vma->anon_vma)) {
 177			vma->anon_vma = anon_vma;
 178			avc->anon_vma = anon_vma;
 179			avc->vma = vma;
 180			list_add(&avc->same_vma, &vma->anon_vma_chain);
 181			list_add_tail(&avc->same_anon_vma, &anon_vma->head);
 182			allocated = NULL;
 183			avc = NULL;
 184		}
 185		spin_unlock(&mm->page_table_lock);
 186		anon_vma_unlock(anon_vma);
 187
 188		if (unlikely(allocated))
 189			put_anon_vma(allocated);
 190		if (unlikely(avc))
 191			anon_vma_chain_free(avc);
 192	}
 193	return 0;
 194
 195 out_enomem_free_avc:
 196	anon_vma_chain_free(avc);
 197 out_enomem:
 198	return -ENOMEM;
 199}
 200
 201/*
 202 * This is a useful helper function for locking the anon_vma root as
 203 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
 204 * have the same vma.
 205 *
 206 * Such anon_vma's should have the same root, so you'd expect to see
 207 * just a single mutex_lock for the whole traversal.
 208 */
 209static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
 210{
 211	struct anon_vma *new_root = anon_vma->root;
 212	if (new_root != root) {
 213		if (WARN_ON_ONCE(root))
 214			mutex_unlock(&root->mutex);
 215		root = new_root;
 216		mutex_lock(&root->mutex);
 217	}
 218	return root;
 219}
 220
 221static inline void unlock_anon_vma_root(struct anon_vma *root)
 222{
 223	if (root)
 224		mutex_unlock(&root->mutex);
 225}
 226
 227static void anon_vma_chain_link(struct vm_area_struct *vma,
 228				struct anon_vma_chain *avc,
 229				struct anon_vma *anon_vma)
 230{
 231	avc->vma = vma;
 232	avc->anon_vma = anon_vma;
 233	list_add(&avc->same_vma, &vma->anon_vma_chain);
 234
 235	/*
 236	 * It's critical to add new vmas to the tail of the anon_vma,
 237	 * see comment in huge_memory.c:__split_huge_page().
 238	 */
 239	list_add_tail(&avc->same_anon_vma, &anon_vma->head);
 240}
 241
 242/*
 243 * Attach the anon_vmas from src to dst.
 244 * Returns 0 on success, -ENOMEM on failure.
 245 */
 246int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 247{
 248	struct anon_vma_chain *avc, *pavc;
 249	struct anon_vma *root = NULL;
 250
 251	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
 252		struct anon_vma *anon_vma;
 253
 254		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
 255		if (unlikely(!avc)) {
 256			unlock_anon_vma_root(root);
 257			root = NULL;
 258			avc = anon_vma_chain_alloc(GFP_KERNEL);
 259			if (!avc)
 260				goto enomem_failure;
 261		}
 262		anon_vma = pavc->anon_vma;
 263		root = lock_anon_vma_root(root, anon_vma);
 264		anon_vma_chain_link(dst, avc, anon_vma);
 265	}
 266	unlock_anon_vma_root(root);
 267	return 0;
 268
 269 enomem_failure:
 270	unlink_anon_vmas(dst);
 271	return -ENOMEM;
 272}
 273
 274/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275 * Attach vma to its own anon_vma, as well as to the anon_vmas that
 276 * the corresponding VMA in the parent process is attached to.
 277 * Returns 0 on success, non-zero on failure.
 278 */
 279int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 280{
 281	struct anon_vma_chain *avc;
 282	struct anon_vma *anon_vma;
 283
 284	/* Don't bother if the parent process has no anon_vma here. */
 285	if (!pvma->anon_vma)
 286		return 0;
 287
 288	/*
 289	 * First, attach the new VMA to the parent VMA's anon_vmas,
 290	 * so rmap can find non-COWed pages in child processes.
 291	 */
 292	if (anon_vma_clone(vma, pvma))
 293		return -ENOMEM;
 294
 295	/* Then add our own anon_vma. */
 296	anon_vma = anon_vma_alloc();
 297	if (!anon_vma)
 298		goto out_error;
 299	avc = anon_vma_chain_alloc(GFP_KERNEL);
 300	if (!avc)
 301		goto out_error_free_anon_vma;
 302
 303	/*
 304	 * The root anon_vma's spinlock is the lock actually used when we
 305	 * lock any of the anon_vmas in this anon_vma tree.
 306	 */
 307	anon_vma->root = pvma->anon_vma->root;
 308	/*
 309	 * With refcounts, an anon_vma can stay around longer than the
 310	 * process it belongs to. The root anon_vma needs to be pinned until
 311	 * this anon_vma is freed, because the lock lives in the root.
 312	 */
 313	get_anon_vma(anon_vma->root);
 314	/* Mark this anon_vma as the one where our new (COWed) pages go. */
 315	vma->anon_vma = anon_vma;
 316	anon_vma_lock(anon_vma);
 317	anon_vma_chain_link(vma, avc, anon_vma);
 318	anon_vma_unlock(anon_vma);
 319
 320	return 0;
 321
 322 out_error_free_anon_vma:
 323	put_anon_vma(anon_vma);
 324 out_error:
 325	unlink_anon_vmas(vma);
 326	return -ENOMEM;
 327}
 328
 329void unlink_anon_vmas(struct vm_area_struct *vma)
 330{
 331	struct anon_vma_chain *avc, *next;
 332	struct anon_vma *root = NULL;
 333
 334	/*
 335	 * Unlink each anon_vma chained to the VMA.  This list is ordered
 336	 * from newest to oldest, ensuring the root anon_vma gets freed last.
 337	 */
 338	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 339		struct anon_vma *anon_vma = avc->anon_vma;
 340
 341		root = lock_anon_vma_root(root, anon_vma);
 342		list_del(&avc->same_anon_vma);
 343
 344		/*
 345		 * Leave empty anon_vmas on the list - we'll need
 346		 * to free them outside the lock.
 347		 */
 348		if (list_empty(&anon_vma->head))
 349			continue;
 350
 351		list_del(&avc->same_vma);
 352		anon_vma_chain_free(avc);
 353	}
 354	unlock_anon_vma_root(root);
 355
 356	/*
 357	 * Iterate the list once more, it now only contains empty and unlinked
 358	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
 359	 * needing to acquire the anon_vma->root->mutex.
 360	 */
 361	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 362		struct anon_vma *anon_vma = avc->anon_vma;
 363
 364		put_anon_vma(anon_vma);
 365
 366		list_del(&avc->same_vma);
 367		anon_vma_chain_free(avc);
 368	}
 369}
 370
 371static void anon_vma_ctor(void *data)
 372{
 373	struct anon_vma *anon_vma = data;
 374
 375	mutex_init(&anon_vma->mutex);
 376	atomic_set(&anon_vma->refcount, 0);
 377	INIT_LIST_HEAD(&anon_vma->head);
 378}
 379
 380void __init anon_vma_init(void)
 381{
 382	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 383			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
 384	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
 385}
 386
 387/*
 388 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
 389 *
 390 * Since there is no serialization what so ever against page_remove_rmap()
 391 * the best this function can do is return a locked anon_vma that might
 392 * have been relevant to this page.
 393 *
 394 * The page might have been remapped to a different anon_vma or the anon_vma
 395 * returned may already be freed (and even reused).
 396 *
 397 * In case it was remapped to a different anon_vma, the new anon_vma will be a
 398 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
 399 * ensure that any anon_vma obtained from the page will still be valid for as
 400 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
 401 *
 402 * All users of this function must be very careful when walking the anon_vma
 403 * chain and verify that the page in question is indeed mapped in it
 404 * [ something equivalent to page_mapped_in_vma() ].
 405 *
 406 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
 407 * that the anon_vma pointer from page->mapping is valid if there is a
 408 * mapcount, we can dereference the anon_vma after observing those.
 409 */
 410struct anon_vma *page_get_anon_vma(struct page *page)
 411{
 412	struct anon_vma *anon_vma = NULL;
 413	unsigned long anon_mapping;
 414
 415	rcu_read_lock();
 416	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
 417	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 418		goto out;
 419	if (!page_mapped(page))
 420		goto out;
 421
 422	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 423	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 424		anon_vma = NULL;
 425		goto out;
 426	}
 427
 428	/*
 429	 * If this page is still mapped, then its anon_vma cannot have been
 430	 * freed.  But if it has been unmapped, we have no security against the
 431	 * anon_vma structure being freed and reused (for another anon_vma:
 432	 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
 433	 * above cannot corrupt).
 434	 */
 435	if (!page_mapped(page)) {
 436		put_anon_vma(anon_vma);
 437		anon_vma = NULL;
 438	}
 439out:
 440	rcu_read_unlock();
 441
 442	return anon_vma;
 443}
 444
 445/*
 446 * Similar to page_get_anon_vma() except it locks the anon_vma.
 447 *
 448 * Its a little more complex as it tries to keep the fast path to a single
 449 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
 450 * reference like with page_get_anon_vma() and then block on the mutex.
 451 */
 452struct anon_vma *page_lock_anon_vma(struct page *page)
 453{
 454	struct anon_vma *anon_vma = NULL;
 455	struct anon_vma *root_anon_vma;
 456	unsigned long anon_mapping;
 457
 458	rcu_read_lock();
 459	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
 460	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 461		goto out;
 462	if (!page_mapped(page))
 463		goto out;
 464
 465	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 466	root_anon_vma = ACCESS_ONCE(anon_vma->root);
 467	if (mutex_trylock(&root_anon_vma->mutex)) {
 468		/*
 469		 * If the page is still mapped, then this anon_vma is still
 470		 * its anon_vma, and holding the mutex ensures that it will
 471		 * not go away, see anon_vma_free().
 472		 */
 473		if (!page_mapped(page)) {
 474			mutex_unlock(&root_anon_vma->mutex);
 475			anon_vma = NULL;
 476		}
 477		goto out;
 478	}
 479
 480	/* trylock failed, we got to sleep */
 481	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 482		anon_vma = NULL;
 483		goto out;
 484	}
 485
 486	if (!page_mapped(page)) {
 487		put_anon_vma(anon_vma);
 488		anon_vma = NULL;
 489		goto out;
 490	}
 491
 492	/* we pinned the anon_vma, its safe to sleep */
 493	rcu_read_unlock();
 494	anon_vma_lock(anon_vma);
 495
 496	if (atomic_dec_and_test(&anon_vma->refcount)) {
 497		/*
 498		 * Oops, we held the last refcount, release the lock
 499		 * and bail -- can't simply use put_anon_vma() because
 500		 * we'll deadlock on the anon_vma_lock() recursion.
 501		 */
 502		anon_vma_unlock(anon_vma);
 503		__put_anon_vma(anon_vma);
 504		anon_vma = NULL;
 505	}
 506
 507	return anon_vma;
 508
 509out:
 510	rcu_read_unlock();
 511	return anon_vma;
 512}
 513
 514void page_unlock_anon_vma(struct anon_vma *anon_vma)
 515{
 516	anon_vma_unlock(anon_vma);
 517}
 518
 519/*
 520 * At what user virtual address is page expected in @vma?
 521 * Returns virtual address or -EFAULT if page's index/offset is not
 522 * within the range mapped the @vma.
 523 */
 524inline unsigned long
 525vma_address(struct page *page, struct vm_area_struct *vma)
 526{
 527	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 528	unsigned long address;
 529
 530	if (unlikely(is_vm_hugetlb_page(vma)))
 531		pgoff = page->index << huge_page_order(page_hstate(page));
 532	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 533	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
 534		/* page should be within @vma mapping range */
 535		return -EFAULT;
 536	}
 537	return address;
 538}
 539
 540/*
 541 * At what user virtual address is page expected in vma?
 542 * Caller should check the page is actually part of the vma.
 543 */
 544unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 545{
 546	if (PageAnon(page)) {
 547		struct anon_vma *page__anon_vma = page_anon_vma(page);
 548		/*
 549		 * Note: swapoff's unuse_vma() is more efficient with this
 550		 * check, and needs it to match anon_vma when KSM is active.
 551		 */
 552		if (!vma->anon_vma || !page__anon_vma ||
 553		    vma->anon_vma->root != page__anon_vma->root)
 554			return -EFAULT;
 555	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
 556		if (!vma->vm_file ||
 557		    vma->vm_file->f_mapping != page->mapping)
 558			return -EFAULT;
 559	} else
 560		return -EFAULT;
 561	return vma_address(page, vma);
 562}
 563
 564/*
 565 * Check that @page is mapped at @address into @mm.
 566 *
 567 * If @sync is false, page_check_address may perform a racy check to avoid
 568 * the page table lock when the pte is not present (helpful when reclaiming
 569 * highly shared pages).
 570 *
 571 * On success returns with pte mapped and locked.
 572 */
 573pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
 574			  unsigned long address, spinlock_t **ptlp, int sync)
 575{
 576	pgd_t *pgd;
 577	pud_t *pud;
 578	pmd_t *pmd;
 579	pte_t *pte;
 580	spinlock_t *ptl;
 581
 582	if (unlikely(PageHuge(page))) {
 583		pte = huge_pte_offset(mm, address);
 584		ptl = &mm->page_table_lock;
 585		goto check;
 586	}
 587
 588	pgd = pgd_offset(mm, address);
 589	if (!pgd_present(*pgd))
 590		return NULL;
 591
 592	pud = pud_offset(pgd, address);
 593	if (!pud_present(*pud))
 594		return NULL;
 595
 596	pmd = pmd_offset(pud, address);
 597	if (!pmd_present(*pmd))
 598		return NULL;
 599	if (pmd_trans_huge(*pmd))
 600		return NULL;
 601
 602	pte = pte_offset_map(pmd, address);
 603	/* Make a quick check before getting the lock */
 604	if (!sync && !pte_present(*pte)) {
 605		pte_unmap(pte);
 606		return NULL;
 607	}
 608
 609	ptl = pte_lockptr(mm, pmd);
 610check:
 611	spin_lock(ptl);
 612	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
 613		*ptlp = ptl;
 614		return pte;
 615	}
 616	pte_unmap_unlock(pte, ptl);
 617	return NULL;
 618}
 619
 620/**
 621 * page_mapped_in_vma - check whether a page is really mapped in a VMA
 622 * @page: the page to test
 623 * @vma: the VMA to test
 624 *
 625 * Returns 1 if the page is mapped into the page tables of the VMA, 0
 626 * if the page is not mapped into the page tables of this VMA.  Only
 627 * valid for normal file or anonymous VMAs.
 628 */
 629int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 630{
 631	unsigned long address;
 632	pte_t *pte;
 633	spinlock_t *ptl;
 634
 635	address = vma_address(page, vma);
 636	if (address == -EFAULT)		/* out of vma range */
 637		return 0;
 638	pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
 639	if (!pte)			/* the page is not in this mm */
 640		return 0;
 641	pte_unmap_unlock(pte, ptl);
 642
 643	return 1;
 644}
 645
 646/*
 647 * Subfunctions of page_referenced: page_referenced_one called
 648 * repeatedly from either page_referenced_anon or page_referenced_file.
 649 */
 650int page_referenced_one(struct page *page, struct vm_area_struct *vma,
 651			unsigned long address, unsigned int *mapcount,
 652			unsigned long *vm_flags)
 653{
 654	struct mm_struct *mm = vma->vm_mm;
 655	int referenced = 0;
 656
 657	if (unlikely(PageTransHuge(page))) {
 658		pmd_t *pmd;
 659
 660		spin_lock(&mm->page_table_lock);
 661		/*
 662		 * rmap might return false positives; we must filter
 663		 * these out using page_check_address_pmd().
 664		 */
 665		pmd = page_check_address_pmd(page, mm, address,
 666					     PAGE_CHECK_ADDRESS_PMD_FLAG);
 667		if (!pmd) {
 668			spin_unlock(&mm->page_table_lock);
 669			goto out;
 670		}
 671
 672		if (vma->vm_flags & VM_LOCKED) {
 673			spin_unlock(&mm->page_table_lock);
 674			*mapcount = 0;	/* break early from loop */
 675			*vm_flags |= VM_LOCKED;
 676			goto out;
 677		}
 678
 679		/* go ahead even if the pmd is pmd_trans_splitting() */
 680		if (pmdp_clear_flush_young_notify(vma, address, pmd))
 681			referenced++;
 682		spin_unlock(&mm->page_table_lock);
 683	} else {
 684		pte_t *pte;
 685		spinlock_t *ptl;
 686
 687		/*
 688		 * rmap might return false positives; we must filter
 689		 * these out using page_check_address().
 690		 */
 691		pte = page_check_address(page, mm, address, &ptl, 0);
 692		if (!pte)
 693			goto out;
 694
 695		if (vma->vm_flags & VM_LOCKED) {
 696			pte_unmap_unlock(pte, ptl);
 697			*mapcount = 0;	/* break early from loop */
 698			*vm_flags |= VM_LOCKED;
 699			goto out;
 700		}
 701
 702		if (ptep_clear_flush_young_notify(vma, address, pte)) {
 703			/*
 704			 * Don't treat a reference through a sequentially read
 705			 * mapping as such.  If the page has been used in
 706			 * another mapping, we will catch it; if this other
 707			 * mapping is already gone, the unmap path will have
 708			 * set PG_referenced or activated the page.
 709			 */
 710			if (likely(!VM_SequentialReadHint(vma)))
 711				referenced++;
 712		}
 713		pte_unmap_unlock(pte, ptl);
 714	}
 715
 716	/* Pretend the page is referenced if the task has the
 717	   swap token and is in the middle of a page fault. */
 718	if (mm != current->mm && has_swap_token(mm) &&
 719			rwsem_is_locked(&mm->mmap_sem))
 720		referenced++;
 721
 722	(*mapcount)--;
 723
 724	if (referenced)
 725		*vm_flags |= vma->vm_flags;
 726out:
 727	return referenced;
 728}
 729
 730static int page_referenced_anon(struct page *page,
 731				struct mem_cgroup *mem_cont,
 732				unsigned long *vm_flags)
 733{
 734	unsigned int mapcount;
 735	struct anon_vma *anon_vma;
 736	struct anon_vma_chain *avc;
 737	int referenced = 0;
 738
 739	anon_vma = page_lock_anon_vma(page);
 740	if (!anon_vma)
 741		return referenced;
 742
 743	mapcount = page_mapcount(page);
 744	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
 745		struct vm_area_struct *vma = avc->vma;
 746		unsigned long address = vma_address(page, vma);
 747		if (address == -EFAULT)
 748			continue;
 749		/*
 750		 * If we are reclaiming on behalf of a cgroup, skip
 751		 * counting on behalf of references from different
 752		 * cgroups
 753		 */
 754		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
 755			continue;
 756		referenced += page_referenced_one(page, vma, address,
 757						  &mapcount, vm_flags);
 758		if (!mapcount)
 759			break;
 760	}
 761
 762	page_unlock_anon_vma(anon_vma);
 763	return referenced;
 764}
 765
 766/**
 767 * page_referenced_file - referenced check for object-based rmap
 768 * @page: the page we're checking references on.
 769 * @mem_cont: target memory controller
 770 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 771 *
 772 * For an object-based mapped page, find all the places it is mapped and
 773 * check/clear the referenced flag.  This is done by following the page->mapping
 774 * pointer, then walking the chain of vmas it holds.  It returns the number
 775 * of references it found.
 776 *
 777 * This function is only called from page_referenced for object-based pages.
 778 */
 779static int page_referenced_file(struct page *page,
 780				struct mem_cgroup *mem_cont,
 781				unsigned long *vm_flags)
 782{
 783	unsigned int mapcount;
 784	struct address_space *mapping = page->mapping;
 785	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 786	struct vm_area_struct *vma;
 787	struct prio_tree_iter iter;
 788	int referenced = 0;
 789
 790	/*
 791	 * The caller's checks on page->mapping and !PageAnon have made
 792	 * sure that this is a file page: the check for page->mapping
 793	 * excludes the case just before it gets set on an anon page.
 794	 */
 795	BUG_ON(PageAnon(page));
 796
 797	/*
 798	 * The page lock not only makes sure that page->mapping cannot
 799	 * suddenly be NULLified by truncation, it makes sure that the
 800	 * structure at mapping cannot be freed and reused yet,
 801	 * so we can safely take mapping->i_mmap_mutex.
 802	 */
 803	BUG_ON(!PageLocked(page));
 804
 805	mutex_lock(&mapping->i_mmap_mutex);
 806
 807	/*
 808	 * i_mmap_mutex does not stabilize mapcount at all, but mapcount
 809	 * is more likely to be accurate if we note it after spinning.
 810	 */
 811	mapcount = page_mapcount(page);
 812
 813	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 814		unsigned long address = vma_address(page, vma);
 815		if (address == -EFAULT)
 816			continue;
 817		/*
 818		 * If we are reclaiming on behalf of a cgroup, skip
 819		 * counting on behalf of references from different
 820		 * cgroups
 821		 */
 822		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
 823			continue;
 824		referenced += page_referenced_one(page, vma, address,
 825						  &mapcount, vm_flags);
 826		if (!mapcount)
 827			break;
 828	}
 829
 830	mutex_unlock(&mapping->i_mmap_mutex);
 831	return referenced;
 832}
 833
 834/**
 835 * page_referenced - test if the page was referenced
 836 * @page: the page to test
 837 * @is_locked: caller holds lock on the page
 838 * @mem_cont: target memory controller
 839 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 840 *
 841 * Quick test_and_clear_referenced for all mappings to a page,
 842 * returns the number of ptes which referenced the page.
 843 */
 844int page_referenced(struct page *page,
 845		    int is_locked,
 846		    struct mem_cgroup *mem_cont,
 847		    unsigned long *vm_flags)
 848{
 849	int referenced = 0;
 850	int we_locked = 0;
 851
 852	*vm_flags = 0;
 853	if (page_mapped(page) && page_rmapping(page)) {
 854		if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
 855			we_locked = trylock_page(page);
 856			if (!we_locked) {
 857				referenced++;
 858				goto out;
 859			}
 860		}
 861		if (unlikely(PageKsm(page)))
 862			referenced += page_referenced_ksm(page, mem_cont,
 863								vm_flags);
 864		else if (PageAnon(page))
 865			referenced += page_referenced_anon(page, mem_cont,
 866								vm_flags);
 867		else if (page->mapping)
 868			referenced += page_referenced_file(page, mem_cont,
 869								vm_flags);
 870		if (we_locked)
 871			unlock_page(page);
 872
 873		if (page_test_and_clear_young(page_to_pfn(page)))
 874			referenced++;
 875	}
 876out:
 877	return referenced;
 878}
 879
 880static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 881			    unsigned long address)
 882{
 883	struct mm_struct *mm = vma->vm_mm;
 884	pte_t *pte;
 885	spinlock_t *ptl;
 886	int ret = 0;
 887
 888	pte = page_check_address(page, mm, address, &ptl, 1);
 889	if (!pte)
 890		goto out;
 891
 892	if (pte_dirty(*pte) || pte_write(*pte)) {
 893		pte_t entry;
 894
 895		flush_cache_page(vma, address, pte_pfn(*pte));
 896		entry = ptep_clear_flush_notify(vma, address, pte);
 897		entry = pte_wrprotect(entry);
 898		entry = pte_mkclean(entry);
 899		set_pte_at(mm, address, pte, entry);
 900		ret = 1;
 901	}
 902
 903	pte_unmap_unlock(pte, ptl);
 904out:
 905	return ret;
 906}
 907
 908static int page_mkclean_file(struct address_space *mapping, struct page *page)
 909{
 910	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 911	struct vm_area_struct *vma;
 912	struct prio_tree_iter iter;
 913	int ret = 0;
 914
 915	BUG_ON(PageAnon(page));
 916
 917	mutex_lock(&mapping->i_mmap_mutex);
 918	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 919		if (vma->vm_flags & VM_SHARED) {
 920			unsigned long address = vma_address(page, vma);
 921			if (address == -EFAULT)
 922				continue;
 923			ret += page_mkclean_one(page, vma, address);
 924		}
 925	}
 926	mutex_unlock(&mapping->i_mmap_mutex);
 927	return ret;
 928}
 929
 930int page_mkclean(struct page *page)
 931{
 932	int ret = 0;
 933
 934	BUG_ON(!PageLocked(page));
 935
 936	if (page_mapped(page)) {
 937		struct address_space *mapping = page_mapping(page);
 938		if (mapping) {
 939			ret = page_mkclean_file(mapping, page);
 940			if (page_test_and_clear_dirty(page_to_pfn(page), 1))
 941				ret = 1;
 942		}
 943	}
 944
 945	return ret;
 946}
 947EXPORT_SYMBOL_GPL(page_mkclean);
 948
 949/**
 950 * page_move_anon_rmap - move a page to our anon_vma
 951 * @page:	the page to move to our anon_vma
 952 * @vma:	the vma the page belongs to
 953 * @address:	the user virtual address mapped
 954 *
 955 * When a page belongs exclusively to one process after a COW event,
 956 * that page can be moved into the anon_vma that belongs to just that
 957 * process, so the rmap code will not search the parent or sibling
 958 * processes.
 959 */
 960void page_move_anon_rmap(struct page *page,
 961	struct vm_area_struct *vma, unsigned long address)
 962{
 963	struct anon_vma *anon_vma = vma->anon_vma;
 964
 965	VM_BUG_ON(!PageLocked(page));
 966	VM_BUG_ON(!anon_vma);
 967	VM_BUG_ON(page->index != linear_page_index(vma, address));
 968
 969	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
 970	page->mapping = (struct address_space *) anon_vma;
 971}
 972
 973/**
 974 * __page_set_anon_rmap - set up new anonymous rmap
 975 * @page:	Page to add to rmap	
 976 * @vma:	VM area to add page to.
 977 * @address:	User virtual address of the mapping	
 978 * @exclusive:	the page is exclusively owned by the current process
 979 */
 980static void __page_set_anon_rmap(struct page *page,
 981	struct vm_area_struct *vma, unsigned long address, int exclusive)
 982{
 983	struct anon_vma *anon_vma = vma->anon_vma;
 984
 985	BUG_ON(!anon_vma);
 986
 987	if (PageAnon(page))
 988		return;
 989
 990	/*
 991	 * If the page isn't exclusively mapped into this vma,
 992	 * we must use the _oldest_ possible anon_vma for the
 993	 * page mapping!
 994	 */
 995	if (!exclusive)
 996		anon_vma = anon_vma->root;
 997
 998	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
 999	page->mapping = (struct address_space *) anon_vma;
1000	page->index = linear_page_index(vma, address);
1001}
1002
1003/**
1004 * __page_check_anon_rmap - sanity check anonymous rmap addition
1005 * @page:	the page to add the mapping to
1006 * @vma:	the vm area in which the mapping is added
1007 * @address:	the user virtual address mapped
1008 */
1009static void __page_check_anon_rmap(struct page *page,
1010	struct vm_area_struct *vma, unsigned long address)
1011{
1012#ifdef CONFIG_DEBUG_VM
1013	/*
1014	 * The page's anon-rmap details (mapping and index) are guaranteed to
1015	 * be set up correctly at this point.
1016	 *
1017	 * We have exclusion against page_add_anon_rmap because the caller
1018	 * always holds the page locked, except if called from page_dup_rmap,
1019	 * in which case the page is already known to be setup.
1020	 *
1021	 * We have exclusion against page_add_new_anon_rmap because those pages
1022	 * are initially only visible via the pagetables, and the pte is locked
1023	 * over the call to page_add_new_anon_rmap.
1024	 */
1025	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1026	BUG_ON(page->index != linear_page_index(vma, address));
1027#endif
1028}
1029
1030/**
1031 * page_add_anon_rmap - add pte mapping to an anonymous page
1032 * @page:	the page to add the mapping to
1033 * @vma:	the vm area in which the mapping is added
1034 * @address:	the user virtual address mapped
1035 *
1036 * The caller needs to hold the pte lock, and the page must be locked in
1037 * the anon_vma case: to serialize mapping,index checking after setting,
1038 * and to ensure that PageAnon is not being upgraded racily to PageKsm
1039 * (but PageKsm is never downgraded to PageAnon).
1040 */
1041void page_add_anon_rmap(struct page *page,
1042	struct vm_area_struct *vma, unsigned long address)
1043{
1044	do_page_add_anon_rmap(page, vma, address, 0);
1045}
1046
1047/*
1048 * Special version of the above for do_swap_page, which often runs
1049 * into pages that are exclusively owned by the current process.
1050 * Everybody else should continue to use page_add_anon_rmap above.
1051 */
1052void do_page_add_anon_rmap(struct page *page,
1053	struct vm_area_struct *vma, unsigned long address, int exclusive)
1054{
1055	int first = atomic_inc_and_test(&page->_mapcount);
1056	if (first) {
1057		if (!PageTransHuge(page))
1058			__inc_zone_page_state(page, NR_ANON_PAGES);
1059		else
1060			__inc_zone_page_state(page,
1061					      NR_ANON_TRANSPARENT_HUGEPAGES);
1062	}
1063	if (unlikely(PageKsm(page)))
1064		return;
1065
1066	VM_BUG_ON(!PageLocked(page));
1067	/* address might be in next vma when migration races vma_adjust */
1068	if (first)
1069		__page_set_anon_rmap(page, vma, address, exclusive);
1070	else
1071		__page_check_anon_rmap(page, vma, address);
1072}
1073
1074/**
1075 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1076 * @page:	the page to add the mapping to
1077 * @vma:	the vm area in which the mapping is added
1078 * @address:	the user virtual address mapped
1079 *
1080 * Same as page_add_anon_rmap but must only be called on *new* pages.
1081 * This means the inc-and-test can be bypassed.
1082 * Page does not have to be locked.
1083 */
1084void page_add_new_anon_rmap(struct page *page,
1085	struct vm_area_struct *vma, unsigned long address)
1086{
1087	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1088	SetPageSwapBacked(page);
1089	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
1090	if (!PageTransHuge(page))
1091		__inc_zone_page_state(page, NR_ANON_PAGES);
1092	else
1093		__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1094	__page_set_anon_rmap(page, vma, address, 1);
1095	if (page_evictable(page, vma))
1096		lru_cache_add_lru(page, LRU_ACTIVE_ANON);
1097	else
1098		add_page_to_unevictable_list(page);
1099}
1100
1101/**
1102 * page_add_file_rmap - add pte mapping to a file page
1103 * @page: the page to add the mapping to
1104 *
1105 * The caller needs to hold the pte lock.
1106 */
1107void page_add_file_rmap(struct page *page)
1108{
 
 
 
 
1109	if (atomic_inc_and_test(&page->_mapcount)) {
1110		__inc_zone_page_state(page, NR_FILE_MAPPED);
1111		mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
1112	}
 
1113}
1114
1115/**
1116 * page_remove_rmap - take down pte mapping from a page
1117 * @page: page to remove mapping from
1118 *
1119 * The caller needs to hold the pte lock.
1120 */
1121void page_remove_rmap(struct page *page)
1122{
 
 
 
 
 
 
 
 
 
 
 
 
1123	/* page still mapped by someone else? */
1124	if (!atomic_add_negative(-1, &page->_mapcount))
1125		return;
1126
1127	/*
1128	 * Now that the last pte has gone, s390 must transfer dirty
1129	 * flag from storage key to struct page.  We can usually skip
1130	 * this if the page is anon, so about to be freed; but perhaps
1131	 * not if it's in swapcache - there might be another pte slot
1132	 * containing the swap entry, but page not yet written to swap.
1133	 */
1134	if ((!PageAnon(page) || PageSwapCache(page)) &&
1135	    page_test_and_clear_dirty(page_to_pfn(page), 1))
1136		set_page_dirty(page);
1137	/*
1138	 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1139	 * and not charged by memcg for now.
1140	 */
1141	if (unlikely(PageHuge(page)))
1142		return;
1143	if (PageAnon(page)) {
1144		mem_cgroup_uncharge_page(page);
1145		if (!PageTransHuge(page))
1146			__dec_zone_page_state(page, NR_ANON_PAGES);
1147		else
1148			__dec_zone_page_state(page,
1149					      NR_ANON_TRANSPARENT_HUGEPAGES);
1150	} else {
1151		__dec_zone_page_state(page, NR_FILE_MAPPED);
1152		mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
1153	}
1154	/*
1155	 * It would be tidy to reset the PageAnon mapping here,
1156	 * but that might overwrite a racing page_add_anon_rmap
1157	 * which increments mapcount after us but sets mapping
1158	 * before us: so leave the reset to free_hot_cold_page,
1159	 * and remember that it's only reliable while mapped.
1160	 * Leaving it set also helps swapoff to reinstate ptes
1161	 * faster for those pages still in swapcache.
1162	 */
 
 
 
1163}
1164
1165/*
1166 * Subfunctions of try_to_unmap: try_to_unmap_one called
1167 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
1168 */
1169int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1170		     unsigned long address, enum ttu_flags flags)
1171{
1172	struct mm_struct *mm = vma->vm_mm;
1173	pte_t *pte;
1174	pte_t pteval;
1175	spinlock_t *ptl;
1176	int ret = SWAP_AGAIN;
1177
1178	pte = page_check_address(page, mm, address, &ptl, 0);
1179	if (!pte)
1180		goto out;
1181
1182	/*
1183	 * If the page is mlock()d, we cannot swap it out.
1184	 * If it's recently referenced (perhaps page_referenced
1185	 * skipped over this mm) then we should reactivate it.
1186	 */
1187	if (!(flags & TTU_IGNORE_MLOCK)) {
1188		if (vma->vm_flags & VM_LOCKED)
1189			goto out_mlock;
1190
1191		if (TTU_ACTION(flags) == TTU_MUNLOCK)
1192			goto out_unmap;
1193	}
1194	if (!(flags & TTU_IGNORE_ACCESS)) {
1195		if (ptep_clear_flush_young_notify(vma, address, pte)) {
1196			ret = SWAP_FAIL;
1197			goto out_unmap;
1198		}
1199  	}
1200
1201	/* Nuke the page table entry. */
1202	flush_cache_page(vma, address, page_to_pfn(page));
1203	pteval = ptep_clear_flush_notify(vma, address, pte);
1204
1205	/* Move the dirty bit to the physical page now the pte is gone. */
1206	if (pte_dirty(pteval))
1207		set_page_dirty(page);
1208
1209	/* Update high watermark before we lower rss */
1210	update_hiwater_rss(mm);
1211
1212	if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1213		if (PageAnon(page))
1214			dec_mm_counter(mm, MM_ANONPAGES);
1215		else
1216			dec_mm_counter(mm, MM_FILEPAGES);
1217		set_pte_at(mm, address, pte,
1218				swp_entry_to_pte(make_hwpoison_entry(page)));
1219	} else if (PageAnon(page)) {
1220		swp_entry_t entry = { .val = page_private(page) };
1221
1222		if (PageSwapCache(page)) {
1223			/*
1224			 * Store the swap location in the pte.
1225			 * See handle_pte_fault() ...
1226			 */
1227			if (swap_duplicate(entry) < 0) {
1228				set_pte_at(mm, address, pte, pteval);
1229				ret = SWAP_FAIL;
1230				goto out_unmap;
1231			}
1232			if (list_empty(&mm->mmlist)) {
1233				spin_lock(&mmlist_lock);
1234				if (list_empty(&mm->mmlist))
1235					list_add(&mm->mmlist, &init_mm.mmlist);
1236				spin_unlock(&mmlist_lock);
1237			}
1238			dec_mm_counter(mm, MM_ANONPAGES);
1239			inc_mm_counter(mm, MM_SWAPENTS);
1240		} else if (PAGE_MIGRATION) {
1241			/*
1242			 * Store the pfn of the page in a special migration
1243			 * pte. do_swap_page() will wait until the migration
1244			 * pte is removed and then restart fault handling.
1245			 */
1246			BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1247			entry = make_migration_entry(page, pte_write(pteval));
1248		}
1249		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1250		BUG_ON(pte_file(*pte));
1251	} else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
 
1252		/* Establish migration entry for a file page */
1253		swp_entry_t entry;
1254		entry = make_migration_entry(page, pte_write(pteval));
1255		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1256	} else
1257		dec_mm_counter(mm, MM_FILEPAGES);
1258
1259	page_remove_rmap(page);
1260	page_cache_release(page);
1261
1262out_unmap:
1263	pte_unmap_unlock(pte, ptl);
1264out:
1265	return ret;
1266
1267out_mlock:
1268	pte_unmap_unlock(pte, ptl);
1269
1270
1271	/*
1272	 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1273	 * unstable result and race. Plus, We can't wait here because
1274	 * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
1275	 * if trylock failed, the page remain in evictable lru and later
1276	 * vmscan could retry to move the page to unevictable lru if the
1277	 * page is actually mlocked.
1278	 */
1279	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1280		if (vma->vm_flags & VM_LOCKED) {
1281			mlock_vma_page(page);
1282			ret = SWAP_MLOCK;
1283		}
1284		up_read(&vma->vm_mm->mmap_sem);
1285	}
1286	return ret;
1287}
1288
1289/*
1290 * objrmap doesn't work for nonlinear VMAs because the assumption that
1291 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
1292 * Consequently, given a particular page and its ->index, we cannot locate the
1293 * ptes which are mapping that page without an exhaustive linear search.
1294 *
1295 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
1296 * maps the file to which the target page belongs.  The ->vm_private_data field
1297 * holds the current cursor into that scan.  Successive searches will circulate
1298 * around the vma's virtual address space.
1299 *
1300 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
1301 * more scanning pressure is placed against them as well.   Eventually pages
1302 * will become fully unmapped and are eligible for eviction.
1303 *
1304 * For very sparsely populated VMAs this is a little inefficient - chances are
1305 * there there won't be many ptes located within the scan cluster.  In this case
1306 * maybe we could scan further - to the end of the pte page, perhaps.
1307 *
1308 * Mlocked pages:  check VM_LOCKED under mmap_sem held for read, if we can
1309 * acquire it without blocking.  If vma locked, mlock the pages in the cluster,
1310 * rather than unmapping them.  If we encounter the "check_page" that vmscan is
1311 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
1312 */
1313#define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
1314#define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
1315
1316static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1317		struct vm_area_struct *vma, struct page *check_page)
1318{
1319	struct mm_struct *mm = vma->vm_mm;
1320	pgd_t *pgd;
1321	pud_t *pud;
1322	pmd_t *pmd;
1323	pte_t *pte;
1324	pte_t pteval;
1325	spinlock_t *ptl;
1326	struct page *page;
1327	unsigned long address;
1328	unsigned long end;
1329	int ret = SWAP_AGAIN;
1330	int locked_vma = 0;
1331
1332	address = (vma->vm_start + cursor) & CLUSTER_MASK;
1333	end = address + CLUSTER_SIZE;
1334	if (address < vma->vm_start)
1335		address = vma->vm_start;
1336	if (end > vma->vm_end)
1337		end = vma->vm_end;
1338
1339	pgd = pgd_offset(mm, address);
1340	if (!pgd_present(*pgd))
1341		return ret;
1342
1343	pud = pud_offset(pgd, address);
1344	if (!pud_present(*pud))
1345		return ret;
1346
1347	pmd = pmd_offset(pud, address);
1348	if (!pmd_present(*pmd))
1349		return ret;
1350
1351	/*
1352	 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
1353	 * keep the sem while scanning the cluster for mlocking pages.
1354	 */
1355	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1356		locked_vma = (vma->vm_flags & VM_LOCKED);
1357		if (!locked_vma)
1358			up_read(&vma->vm_mm->mmap_sem); /* don't need it */
1359	}
1360
1361	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1362
1363	/* Update high watermark before we lower rss */
1364	update_hiwater_rss(mm);
1365
1366	for (; address < end; pte++, address += PAGE_SIZE) {
1367		if (!pte_present(*pte))
1368			continue;
1369		page = vm_normal_page(vma, address, *pte);
1370		BUG_ON(!page || PageAnon(page));
1371
1372		if (locked_vma) {
1373			mlock_vma_page(page);   /* no-op if already mlocked */
1374			if (page == check_page)
1375				ret = SWAP_MLOCK;
1376			continue;	/* don't unmap */
1377		}
1378
1379		if (ptep_clear_flush_young_notify(vma, address, pte))
1380			continue;
1381
1382		/* Nuke the page table entry. */
1383		flush_cache_page(vma, address, pte_pfn(*pte));
1384		pteval = ptep_clear_flush_notify(vma, address, pte);
1385
1386		/* If nonlinear, store the file page offset in the pte. */
1387		if (page->index != linear_page_index(vma, address))
1388			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
1389
1390		/* Move the dirty bit to the physical page now the pte is gone. */
1391		if (pte_dirty(pteval))
1392			set_page_dirty(page);
1393
1394		page_remove_rmap(page);
1395		page_cache_release(page);
1396		dec_mm_counter(mm, MM_FILEPAGES);
1397		(*mapcount)--;
1398	}
1399	pte_unmap_unlock(pte - 1, ptl);
1400	if (locked_vma)
1401		up_read(&vma->vm_mm->mmap_sem);
1402	return ret;
1403}
1404
1405bool is_vma_temporary_stack(struct vm_area_struct *vma)
1406{
1407	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1408
1409	if (!maybe_stack)
1410		return false;
1411
1412	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1413						VM_STACK_INCOMPLETE_SETUP)
1414		return true;
1415
1416	return false;
1417}
1418
1419/**
1420 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1421 * rmap method
1422 * @page: the page to unmap/unlock
1423 * @flags: action and flags
1424 *
1425 * Find all the mappings of a page using the mapping pointer and the vma chains
1426 * contained in the anon_vma struct it points to.
1427 *
1428 * This function is only called from try_to_unmap/try_to_munlock for
1429 * anonymous pages.
1430 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1431 * where the page was found will be held for write.  So, we won't recheck
1432 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1433 * 'LOCKED.
1434 */
1435static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1436{
1437	struct anon_vma *anon_vma;
1438	struct anon_vma_chain *avc;
1439	int ret = SWAP_AGAIN;
1440
1441	anon_vma = page_lock_anon_vma(page);
1442	if (!anon_vma)
1443		return ret;
1444
1445	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1446		struct vm_area_struct *vma = avc->vma;
1447		unsigned long address;
1448
1449		/*
1450		 * During exec, a temporary VMA is setup and later moved.
1451		 * The VMA is moved under the anon_vma lock but not the
1452		 * page tables leading to a race where migration cannot
1453		 * find the migration ptes. Rather than increasing the
1454		 * locking requirements of exec(), migration skips
1455		 * temporary VMAs until after exec() completes.
1456		 */
1457		if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
1458				is_vma_temporary_stack(vma))
1459			continue;
1460
1461		address = vma_address(page, vma);
1462		if (address == -EFAULT)
1463			continue;
1464		ret = try_to_unmap_one(page, vma, address, flags);
1465		if (ret != SWAP_AGAIN || !page_mapped(page))
1466			break;
1467	}
1468
1469	page_unlock_anon_vma(anon_vma);
1470	return ret;
1471}
1472
1473/**
1474 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
1475 * @page: the page to unmap/unlock
1476 * @flags: action and flags
1477 *
1478 * Find all the mappings of a page using the mapping pointer and the vma chains
1479 * contained in the address_space struct it points to.
1480 *
1481 * This function is only called from try_to_unmap/try_to_munlock for
1482 * object-based pages.
1483 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1484 * where the page was found will be held for write.  So, we won't recheck
1485 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1486 * 'LOCKED.
1487 */
1488static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1489{
1490	struct address_space *mapping = page->mapping;
1491	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1492	struct vm_area_struct *vma;
1493	struct prio_tree_iter iter;
1494	int ret = SWAP_AGAIN;
1495	unsigned long cursor;
1496	unsigned long max_nl_cursor = 0;
1497	unsigned long max_nl_size = 0;
1498	unsigned int mapcount;
1499
1500	mutex_lock(&mapping->i_mmap_mutex);
1501	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1502		unsigned long address = vma_address(page, vma);
1503		if (address == -EFAULT)
1504			continue;
1505		ret = try_to_unmap_one(page, vma, address, flags);
1506		if (ret != SWAP_AGAIN || !page_mapped(page))
1507			goto out;
1508	}
1509
1510	if (list_empty(&mapping->i_mmap_nonlinear))
1511		goto out;
1512
1513	/*
1514	 * We don't bother to try to find the munlocked page in nonlinears.
1515	 * It's costly. Instead, later, page reclaim logic may call
1516	 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
1517	 */
1518	if (TTU_ACTION(flags) == TTU_MUNLOCK)
1519		goto out;
1520
1521	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1522						shared.vm_set.list) {
1523		cursor = (unsigned long) vma->vm_private_data;
1524		if (cursor > max_nl_cursor)
1525			max_nl_cursor = cursor;
1526		cursor = vma->vm_end - vma->vm_start;
1527		if (cursor > max_nl_size)
1528			max_nl_size = cursor;
1529	}
1530
1531	if (max_nl_size == 0) {	/* all nonlinears locked or reserved ? */
1532		ret = SWAP_FAIL;
1533		goto out;
1534	}
1535
1536	/*
1537	 * We don't try to search for this page in the nonlinear vmas,
1538	 * and page_referenced wouldn't have found it anyway.  Instead
1539	 * just walk the nonlinear vmas trying to age and unmap some.
1540	 * The mapcount of the page we came in with is irrelevant,
1541	 * but even so use it as a guide to how hard we should try?
1542	 */
1543	mapcount = page_mapcount(page);
1544	if (!mapcount)
1545		goto out;
1546	cond_resched();
1547
1548	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1549	if (max_nl_cursor == 0)
1550		max_nl_cursor = CLUSTER_SIZE;
1551
1552	do {
1553		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1554						shared.vm_set.list) {
1555			cursor = (unsigned long) vma->vm_private_data;
1556			while ( cursor < max_nl_cursor &&
1557				cursor < vma->vm_end - vma->vm_start) {
1558				if (try_to_unmap_cluster(cursor, &mapcount,
1559						vma, page) == SWAP_MLOCK)
1560					ret = SWAP_MLOCK;
1561				cursor += CLUSTER_SIZE;
1562				vma->vm_private_data = (void *) cursor;
1563				if ((int)mapcount <= 0)
1564					goto out;
1565			}
1566			vma->vm_private_data = (void *) max_nl_cursor;
1567		}
1568		cond_resched();
1569		max_nl_cursor += CLUSTER_SIZE;
1570	} while (max_nl_cursor <= max_nl_size);
1571
1572	/*
1573	 * Don't loop forever (perhaps all the remaining pages are
1574	 * in locked vmas).  Reset cursor on all unreserved nonlinear
1575	 * vmas, now forgetting on which ones it had fallen behind.
1576	 */
1577	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1578		vma->vm_private_data = NULL;
1579out:
1580	mutex_unlock(&mapping->i_mmap_mutex);
1581	return ret;
1582}
1583
1584/**
1585 * try_to_unmap - try to remove all page table mappings to a page
1586 * @page: the page to get unmapped
1587 * @flags: action and flags
1588 *
1589 * Tries to remove all the page table entries which are mapping this
1590 * page, used in the pageout path.  Caller must hold the page lock.
1591 * Return values are:
1592 *
1593 * SWAP_SUCCESS	- we succeeded in removing all mappings
1594 * SWAP_AGAIN	- we missed a mapping, try again later
1595 * SWAP_FAIL	- the page is unswappable
1596 * SWAP_MLOCK	- page is mlocked.
1597 */
1598int try_to_unmap(struct page *page, enum ttu_flags flags)
1599{
1600	int ret;
1601
1602	BUG_ON(!PageLocked(page));
1603	VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
1604
1605	if (unlikely(PageKsm(page)))
1606		ret = try_to_unmap_ksm(page, flags);
1607	else if (PageAnon(page))
1608		ret = try_to_unmap_anon(page, flags);
1609	else
1610		ret = try_to_unmap_file(page, flags);
1611	if (ret != SWAP_MLOCK && !page_mapped(page))
1612		ret = SWAP_SUCCESS;
1613	return ret;
1614}
1615
1616/**
1617 * try_to_munlock - try to munlock a page
1618 * @page: the page to be munlocked
1619 *
1620 * Called from munlock code.  Checks all of the VMAs mapping the page
1621 * to make sure nobody else has this page mlocked. The page will be
1622 * returned with PG_mlocked cleared if no other vmas have it mlocked.
1623 *
1624 * Return values are:
1625 *
1626 * SWAP_AGAIN	- no vma is holding page mlocked, or,
1627 * SWAP_AGAIN	- page mapped in mlocked vma -- couldn't acquire mmap sem
1628 * SWAP_FAIL	- page cannot be located at present
1629 * SWAP_MLOCK	- page is now mlocked.
1630 */
1631int try_to_munlock(struct page *page)
1632{
1633	VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1634
1635	if (unlikely(PageKsm(page)))
1636		return try_to_unmap_ksm(page, TTU_MUNLOCK);
1637	else if (PageAnon(page))
1638		return try_to_unmap_anon(page, TTU_MUNLOCK);
1639	else
1640		return try_to_unmap_file(page, TTU_MUNLOCK);
1641}
1642
1643void __put_anon_vma(struct anon_vma *anon_vma)
1644{
1645	struct anon_vma *root = anon_vma->root;
1646
1647	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1648		anon_vma_free(root);
1649
1650	anon_vma_free(anon_vma);
1651}
1652
1653#ifdef CONFIG_MIGRATION
1654/*
1655 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1656 * Called by migrate.c to remove migration ptes, but might be used more later.
1657 */
1658static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1659		struct vm_area_struct *, unsigned long, void *), void *arg)
1660{
1661	struct anon_vma *anon_vma;
1662	struct anon_vma_chain *avc;
1663	int ret = SWAP_AGAIN;
1664
1665	/*
1666	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1667	 * because that depends on page_mapped(); but not all its usages
1668	 * are holding mmap_sem. Users without mmap_sem are required to
1669	 * take a reference count to prevent the anon_vma disappearing
1670	 */
1671	anon_vma = page_anon_vma(page);
1672	if (!anon_vma)
1673		return ret;
1674	anon_vma_lock(anon_vma);
1675	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1676		struct vm_area_struct *vma = avc->vma;
1677		unsigned long address = vma_address(page, vma);
1678		if (address == -EFAULT)
1679			continue;
1680		ret = rmap_one(page, vma, address, arg);
1681		if (ret != SWAP_AGAIN)
1682			break;
1683	}
1684	anon_vma_unlock(anon_vma);
1685	return ret;
1686}
1687
1688static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1689		struct vm_area_struct *, unsigned long, void *), void *arg)
1690{
1691	struct address_space *mapping = page->mapping;
1692	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1693	struct vm_area_struct *vma;
1694	struct prio_tree_iter iter;
1695	int ret = SWAP_AGAIN;
1696
1697	if (!mapping)
1698		return ret;
1699	mutex_lock(&mapping->i_mmap_mutex);
1700	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1701		unsigned long address = vma_address(page, vma);
1702		if (address == -EFAULT)
1703			continue;
1704		ret = rmap_one(page, vma, address, arg);
1705		if (ret != SWAP_AGAIN)
1706			break;
1707	}
1708	/*
1709	 * No nonlinear handling: being always shared, nonlinear vmas
1710	 * never contain migration ptes.  Decide what to do about this
1711	 * limitation to linear when we need rmap_walk() on nonlinear.
1712	 */
1713	mutex_unlock(&mapping->i_mmap_mutex);
1714	return ret;
1715}
1716
1717int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1718		struct vm_area_struct *, unsigned long, void *), void *arg)
1719{
1720	VM_BUG_ON(!PageLocked(page));
1721
1722	if (unlikely(PageKsm(page)))
1723		return rmap_walk_ksm(page, rmap_one, arg);
1724	else if (PageAnon(page))
1725		return rmap_walk_anon(page, rmap_one, arg);
1726	else
1727		return rmap_walk_file(page, rmap_one, arg);
1728}
1729#endif /* CONFIG_MIGRATION */
1730
1731#ifdef CONFIG_HUGETLB_PAGE
1732/*
1733 * The following three functions are for anonymous (private mapped) hugepages.
1734 * Unlike common anonymous pages, anonymous hugepages have no accounting code
1735 * and no lru code, because we handle hugepages differently from common pages.
1736 */
1737static void __hugepage_set_anon_rmap(struct page *page,
1738	struct vm_area_struct *vma, unsigned long address, int exclusive)
1739{
1740	struct anon_vma *anon_vma = vma->anon_vma;
1741
1742	BUG_ON(!anon_vma);
1743
1744	if (PageAnon(page))
1745		return;
1746	if (!exclusive)
1747		anon_vma = anon_vma->root;
1748
1749	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1750	page->mapping = (struct address_space *) anon_vma;
1751	page->index = linear_page_index(vma, address);
1752}
1753
1754void hugepage_add_anon_rmap(struct page *page,
1755			    struct vm_area_struct *vma, unsigned long address)
1756{
1757	struct anon_vma *anon_vma = vma->anon_vma;
1758	int first;
1759
1760	BUG_ON(!PageLocked(page));
1761	BUG_ON(!anon_vma);
1762	/* address might be in next vma when migration races vma_adjust */
1763	first = atomic_inc_and_test(&page->_mapcount);
1764	if (first)
1765		__hugepage_set_anon_rmap(page, vma, address, 0);
1766}
1767
1768void hugepage_add_new_anon_rmap(struct page *page,
1769			struct vm_area_struct *vma, unsigned long address)
1770{
1771	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1772	atomic_set(&page->_mapcount, 0);
1773	__hugepage_set_anon_rmap(page, vma, address, 1);
1774}
1775#endif /* CONFIG_HUGETLB_PAGE */
v3.5.6
   1/*
   2 * mm/rmap.c - physical to virtual reverse mappings
   3 *
   4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
   5 * Released under the General Public License (GPL).
   6 *
   7 * Simple, low overhead reverse mapping scheme.
   8 * Please try to keep this thing as modular as possible.
   9 *
  10 * Provides methods for unmapping each kind of mapped page:
  11 * the anon methods track anonymous pages, and
  12 * the file methods track pages belonging to an inode.
  13 *
  14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17 * Contributions by Hugh Dickins 2003, 2004
  18 */
  19
  20/*
  21 * Lock ordering in mm:
  22 *
  23 * inode->i_mutex	(while writing or truncating, not reading or faulting)
  24 *   mm->mmap_sem
  25 *     page->flags PG_locked (lock_page)
  26 *       mapping->i_mmap_mutex
  27 *         anon_vma->mutex
  28 *           mm->page_table_lock or pte_lock
  29 *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  30 *             swap_lock (in swap_duplicate, swap_info_get)
  31 *               mmlist_lock (in mmput, drain_mmlist and others)
  32 *               mapping->private_lock (in __set_page_dirty_buffers)
  33 *               inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  34 *               bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
  35 *                 sb_lock (within inode_lock in fs/fs-writeback.c)
  36 *                 mapping->tree_lock (widely used, in set_page_dirty,
  37 *                           in arch-dependent flush_dcache_mmap_lock,
  38 *                           within bdi.wb->list_lock in __sync_single_inode)
  39 *
  40 * anon_vma->mutex,mapping->i_mutex      (memory_failure, collect_procs_anon)
  41 *   ->tasklist_lock
  42 *     pte map lock
  43 */
  44
  45#include <linux/mm.h>
  46#include <linux/pagemap.h>
  47#include <linux/swap.h>
  48#include <linux/swapops.h>
  49#include <linux/slab.h>
  50#include <linux/init.h>
  51#include <linux/ksm.h>
  52#include <linux/rmap.h>
  53#include <linux/rcupdate.h>
  54#include <linux/export.h>
  55#include <linux/memcontrol.h>
  56#include <linux/mmu_notifier.h>
  57#include <linux/migrate.h>
  58#include <linux/hugetlb.h>
  59
  60#include <asm/tlbflush.h>
  61
  62#include "internal.h"
  63
  64static struct kmem_cache *anon_vma_cachep;
  65static struct kmem_cache *anon_vma_chain_cachep;
  66
  67static inline struct anon_vma *anon_vma_alloc(void)
  68{
  69	struct anon_vma *anon_vma;
  70
  71	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  72	if (anon_vma) {
  73		atomic_set(&anon_vma->refcount, 1);
  74		/*
  75		 * Initialise the anon_vma root to point to itself. If called
  76		 * from fork, the root will be reset to the parents anon_vma.
  77		 */
  78		anon_vma->root = anon_vma;
  79	}
  80
  81	return anon_vma;
  82}
  83
  84static inline void anon_vma_free(struct anon_vma *anon_vma)
  85{
  86	VM_BUG_ON(atomic_read(&anon_vma->refcount));
  87
  88	/*
  89	 * Synchronize against page_lock_anon_vma() such that
  90	 * we can safely hold the lock without the anon_vma getting
  91	 * freed.
  92	 *
  93	 * Relies on the full mb implied by the atomic_dec_and_test() from
  94	 * put_anon_vma() against the acquire barrier implied by
  95	 * mutex_trylock() from page_lock_anon_vma(). This orders:
  96	 *
  97	 * page_lock_anon_vma()		VS	put_anon_vma()
  98	 *   mutex_trylock()			  atomic_dec_and_test()
  99	 *   LOCK				  MB
 100	 *   atomic_read()			  mutex_is_locked()
 101	 *
 102	 * LOCK should suffice since the actual taking of the lock must
 103	 * happen _before_ what follows.
 104	 */
 105	if (mutex_is_locked(&anon_vma->root->mutex)) {
 106		anon_vma_lock(anon_vma);
 107		anon_vma_unlock(anon_vma);
 108	}
 109
 110	kmem_cache_free(anon_vma_cachep, anon_vma);
 111}
 112
 113static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 114{
 115	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 116}
 117
 118static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
 119{
 120	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
 121}
 122
 123static void anon_vma_chain_link(struct vm_area_struct *vma,
 124				struct anon_vma_chain *avc,
 125				struct anon_vma *anon_vma)
 126{
 127	avc->vma = vma;
 128	avc->anon_vma = anon_vma;
 129	list_add(&avc->same_vma, &vma->anon_vma_chain);
 130
 131	/*
 132	 * It's critical to add new vmas to the tail of the anon_vma,
 133	 * see comment in huge_memory.c:__split_huge_page().
 134	 */
 135	list_add_tail(&avc->same_anon_vma, &anon_vma->head);
 136}
 137
 138/**
 139 * anon_vma_prepare - attach an anon_vma to a memory region
 140 * @vma: the memory region in question
 141 *
 142 * This makes sure the memory mapping described by 'vma' has
 143 * an 'anon_vma' attached to it, so that we can associate the
 144 * anonymous pages mapped into it with that anon_vma.
 145 *
 146 * The common case will be that we already have one, but if
 147 * not we either need to find an adjacent mapping that we
 148 * can re-use the anon_vma from (very common when the only
 149 * reason for splitting a vma has been mprotect()), or we
 150 * allocate a new one.
 151 *
 152 * Anon-vma allocations are very subtle, because we may have
 153 * optimistically looked up an anon_vma in page_lock_anon_vma()
 154 * and that may actually touch the spinlock even in the newly
 155 * allocated vma (it depends on RCU to make sure that the
 156 * anon_vma isn't actually destroyed).
 157 *
 158 * As a result, we need to do proper anon_vma locking even
 159 * for the new allocation. At the same time, we do not want
 160 * to do any locking for the common case of already having
 161 * an anon_vma.
 162 *
 163 * This must be called with the mmap_sem held for reading.
 164 */
 165int anon_vma_prepare(struct vm_area_struct *vma)
 166{
 167	struct anon_vma *anon_vma = vma->anon_vma;
 168	struct anon_vma_chain *avc;
 169
 170	might_sleep();
 171	if (unlikely(!anon_vma)) {
 172		struct mm_struct *mm = vma->vm_mm;
 173		struct anon_vma *allocated;
 174
 175		avc = anon_vma_chain_alloc(GFP_KERNEL);
 176		if (!avc)
 177			goto out_enomem;
 178
 179		anon_vma = find_mergeable_anon_vma(vma);
 180		allocated = NULL;
 181		if (!anon_vma) {
 182			anon_vma = anon_vma_alloc();
 183			if (unlikely(!anon_vma))
 184				goto out_enomem_free_avc;
 185			allocated = anon_vma;
 186		}
 187
 188		anon_vma_lock(anon_vma);
 189		/* page_table_lock to protect against threads */
 190		spin_lock(&mm->page_table_lock);
 191		if (likely(!vma->anon_vma)) {
 192			vma->anon_vma = anon_vma;
 193			anon_vma_chain_link(vma, avc, anon_vma);
 
 
 
 194			allocated = NULL;
 195			avc = NULL;
 196		}
 197		spin_unlock(&mm->page_table_lock);
 198		anon_vma_unlock(anon_vma);
 199
 200		if (unlikely(allocated))
 201			put_anon_vma(allocated);
 202		if (unlikely(avc))
 203			anon_vma_chain_free(avc);
 204	}
 205	return 0;
 206
 207 out_enomem_free_avc:
 208	anon_vma_chain_free(avc);
 209 out_enomem:
 210	return -ENOMEM;
 211}
 212
 213/*
 214 * This is a useful helper function for locking the anon_vma root as
 215 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
 216 * have the same vma.
 217 *
 218 * Such anon_vma's should have the same root, so you'd expect to see
 219 * just a single mutex_lock for the whole traversal.
 220 */
 221static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
 222{
 223	struct anon_vma *new_root = anon_vma->root;
 224	if (new_root != root) {
 225		if (WARN_ON_ONCE(root))
 226			mutex_unlock(&root->mutex);
 227		root = new_root;
 228		mutex_lock(&root->mutex);
 229	}
 230	return root;
 231}
 232
 233static inline void unlock_anon_vma_root(struct anon_vma *root)
 234{
 235	if (root)
 236		mutex_unlock(&root->mutex);
 237}
 238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 239/*
 240 * Attach the anon_vmas from src to dst.
 241 * Returns 0 on success, -ENOMEM on failure.
 242 */
 243int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 244{
 245	struct anon_vma_chain *avc, *pavc;
 246	struct anon_vma *root = NULL;
 247
 248	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
 249		struct anon_vma *anon_vma;
 250
 251		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
 252		if (unlikely(!avc)) {
 253			unlock_anon_vma_root(root);
 254			root = NULL;
 255			avc = anon_vma_chain_alloc(GFP_KERNEL);
 256			if (!avc)
 257				goto enomem_failure;
 258		}
 259		anon_vma = pavc->anon_vma;
 260		root = lock_anon_vma_root(root, anon_vma);
 261		anon_vma_chain_link(dst, avc, anon_vma);
 262	}
 263	unlock_anon_vma_root(root);
 264	return 0;
 265
 266 enomem_failure:
 267	unlink_anon_vmas(dst);
 268	return -ENOMEM;
 269}
 270
 271/*
 272 * Some rmap walk that needs to find all ptes/hugepmds without false
 273 * negatives (like migrate and split_huge_page) running concurrent
 274 * with operations that copy or move pagetables (like mremap() and
 275 * fork()) to be safe. They depend on the anon_vma "same_anon_vma"
 276 * list to be in a certain order: the dst_vma must be placed after the
 277 * src_vma in the list. This is always guaranteed by fork() but
 278 * mremap() needs to call this function to enforce it in case the
 279 * dst_vma isn't newly allocated and chained with the anon_vma_clone()
 280 * function but just an extension of a pre-existing vma through
 281 * vma_merge.
 282 *
 283 * NOTE: the same_anon_vma list can still be changed by other
 284 * processes while mremap runs because mremap doesn't hold the
 285 * anon_vma mutex to prevent modifications to the list while it
 286 * runs. All we need to enforce is that the relative order of this
 287 * process vmas isn't changing (we don't care about other vmas
 288 * order). Each vma corresponds to an anon_vma_chain structure so
 289 * there's no risk that other processes calling anon_vma_moveto_tail()
 290 * and changing the same_anon_vma list under mremap() will screw with
 291 * the relative order of this process vmas in the list, because we
 292 * they can't alter the order of any vma that belongs to this
 293 * process. And there can't be another anon_vma_moveto_tail() running
 294 * concurrently with mremap() coming from this process because we hold
 295 * the mmap_sem for the whole mremap(). fork() ordering dependency
 296 * also shouldn't be affected because fork() only cares that the
 297 * parent vmas are placed in the list before the child vmas and
 298 * anon_vma_moveto_tail() won't reorder vmas from either the fork()
 299 * parent or child.
 300 */
 301void anon_vma_moveto_tail(struct vm_area_struct *dst)
 302{
 303	struct anon_vma_chain *pavc;
 304	struct anon_vma *root = NULL;
 305
 306	list_for_each_entry_reverse(pavc, &dst->anon_vma_chain, same_vma) {
 307		struct anon_vma *anon_vma = pavc->anon_vma;
 308		VM_BUG_ON(pavc->vma != dst);
 309		root = lock_anon_vma_root(root, anon_vma);
 310		list_del(&pavc->same_anon_vma);
 311		list_add_tail(&pavc->same_anon_vma, &anon_vma->head);
 312	}
 313	unlock_anon_vma_root(root);
 314}
 315
 316/*
 317 * Attach vma to its own anon_vma, as well as to the anon_vmas that
 318 * the corresponding VMA in the parent process is attached to.
 319 * Returns 0 on success, non-zero on failure.
 320 */
 321int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 322{
 323	struct anon_vma_chain *avc;
 324	struct anon_vma *anon_vma;
 325
 326	/* Don't bother if the parent process has no anon_vma here. */
 327	if (!pvma->anon_vma)
 328		return 0;
 329
 330	/*
 331	 * First, attach the new VMA to the parent VMA's anon_vmas,
 332	 * so rmap can find non-COWed pages in child processes.
 333	 */
 334	if (anon_vma_clone(vma, pvma))
 335		return -ENOMEM;
 336
 337	/* Then add our own anon_vma. */
 338	anon_vma = anon_vma_alloc();
 339	if (!anon_vma)
 340		goto out_error;
 341	avc = anon_vma_chain_alloc(GFP_KERNEL);
 342	if (!avc)
 343		goto out_error_free_anon_vma;
 344
 345	/*
 346	 * The root anon_vma's spinlock is the lock actually used when we
 347	 * lock any of the anon_vmas in this anon_vma tree.
 348	 */
 349	anon_vma->root = pvma->anon_vma->root;
 350	/*
 351	 * With refcounts, an anon_vma can stay around longer than the
 352	 * process it belongs to. The root anon_vma needs to be pinned until
 353	 * this anon_vma is freed, because the lock lives in the root.
 354	 */
 355	get_anon_vma(anon_vma->root);
 356	/* Mark this anon_vma as the one where our new (COWed) pages go. */
 357	vma->anon_vma = anon_vma;
 358	anon_vma_lock(anon_vma);
 359	anon_vma_chain_link(vma, avc, anon_vma);
 360	anon_vma_unlock(anon_vma);
 361
 362	return 0;
 363
 364 out_error_free_anon_vma:
 365	put_anon_vma(anon_vma);
 366 out_error:
 367	unlink_anon_vmas(vma);
 368	return -ENOMEM;
 369}
 370
 371void unlink_anon_vmas(struct vm_area_struct *vma)
 372{
 373	struct anon_vma_chain *avc, *next;
 374	struct anon_vma *root = NULL;
 375
 376	/*
 377	 * Unlink each anon_vma chained to the VMA.  This list is ordered
 378	 * from newest to oldest, ensuring the root anon_vma gets freed last.
 379	 */
 380	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 381		struct anon_vma *anon_vma = avc->anon_vma;
 382
 383		root = lock_anon_vma_root(root, anon_vma);
 384		list_del(&avc->same_anon_vma);
 385
 386		/*
 387		 * Leave empty anon_vmas on the list - we'll need
 388		 * to free them outside the lock.
 389		 */
 390		if (list_empty(&anon_vma->head))
 391			continue;
 392
 393		list_del(&avc->same_vma);
 394		anon_vma_chain_free(avc);
 395	}
 396	unlock_anon_vma_root(root);
 397
 398	/*
 399	 * Iterate the list once more, it now only contains empty and unlinked
 400	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
 401	 * needing to acquire the anon_vma->root->mutex.
 402	 */
 403	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 404		struct anon_vma *anon_vma = avc->anon_vma;
 405
 406		put_anon_vma(anon_vma);
 407
 408		list_del(&avc->same_vma);
 409		anon_vma_chain_free(avc);
 410	}
 411}
 412
 413static void anon_vma_ctor(void *data)
 414{
 415	struct anon_vma *anon_vma = data;
 416
 417	mutex_init(&anon_vma->mutex);
 418	atomic_set(&anon_vma->refcount, 0);
 419	INIT_LIST_HEAD(&anon_vma->head);
 420}
 421
 422void __init anon_vma_init(void)
 423{
 424	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 425			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
 426	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
 427}
 428
 429/*
 430 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
 431 *
 432 * Since there is no serialization what so ever against page_remove_rmap()
 433 * the best this function can do is return a locked anon_vma that might
 434 * have been relevant to this page.
 435 *
 436 * The page might have been remapped to a different anon_vma or the anon_vma
 437 * returned may already be freed (and even reused).
 438 *
 439 * In case it was remapped to a different anon_vma, the new anon_vma will be a
 440 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
 441 * ensure that any anon_vma obtained from the page will still be valid for as
 442 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
 443 *
 444 * All users of this function must be very careful when walking the anon_vma
 445 * chain and verify that the page in question is indeed mapped in it
 446 * [ something equivalent to page_mapped_in_vma() ].
 447 *
 448 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
 449 * that the anon_vma pointer from page->mapping is valid if there is a
 450 * mapcount, we can dereference the anon_vma after observing those.
 451 */
 452struct anon_vma *page_get_anon_vma(struct page *page)
 453{
 454	struct anon_vma *anon_vma = NULL;
 455	unsigned long anon_mapping;
 456
 457	rcu_read_lock();
 458	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
 459	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 460		goto out;
 461	if (!page_mapped(page))
 462		goto out;
 463
 464	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 465	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 466		anon_vma = NULL;
 467		goto out;
 468	}
 469
 470	/*
 471	 * If this page is still mapped, then its anon_vma cannot have been
 472	 * freed.  But if it has been unmapped, we have no security against the
 473	 * anon_vma structure being freed and reused (for another anon_vma:
 474	 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
 475	 * above cannot corrupt).
 476	 */
 477	if (!page_mapped(page)) {
 478		put_anon_vma(anon_vma);
 479		anon_vma = NULL;
 480	}
 481out:
 482	rcu_read_unlock();
 483
 484	return anon_vma;
 485}
 486
 487/*
 488 * Similar to page_get_anon_vma() except it locks the anon_vma.
 489 *
 490 * Its a little more complex as it tries to keep the fast path to a single
 491 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
 492 * reference like with page_get_anon_vma() and then block on the mutex.
 493 */
 494struct anon_vma *page_lock_anon_vma(struct page *page)
 495{
 496	struct anon_vma *anon_vma = NULL;
 497	struct anon_vma *root_anon_vma;
 498	unsigned long anon_mapping;
 499
 500	rcu_read_lock();
 501	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
 502	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 503		goto out;
 504	if (!page_mapped(page))
 505		goto out;
 506
 507	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 508	root_anon_vma = ACCESS_ONCE(anon_vma->root);
 509	if (mutex_trylock(&root_anon_vma->mutex)) {
 510		/*
 511		 * If the page is still mapped, then this anon_vma is still
 512		 * its anon_vma, and holding the mutex ensures that it will
 513		 * not go away, see anon_vma_free().
 514		 */
 515		if (!page_mapped(page)) {
 516			mutex_unlock(&root_anon_vma->mutex);
 517			anon_vma = NULL;
 518		}
 519		goto out;
 520	}
 521
 522	/* trylock failed, we got to sleep */
 523	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 524		anon_vma = NULL;
 525		goto out;
 526	}
 527
 528	if (!page_mapped(page)) {
 529		put_anon_vma(anon_vma);
 530		anon_vma = NULL;
 531		goto out;
 532	}
 533
 534	/* we pinned the anon_vma, its safe to sleep */
 535	rcu_read_unlock();
 536	anon_vma_lock(anon_vma);
 537
 538	if (atomic_dec_and_test(&anon_vma->refcount)) {
 539		/*
 540		 * Oops, we held the last refcount, release the lock
 541		 * and bail -- can't simply use put_anon_vma() because
 542		 * we'll deadlock on the anon_vma_lock() recursion.
 543		 */
 544		anon_vma_unlock(anon_vma);
 545		__put_anon_vma(anon_vma);
 546		anon_vma = NULL;
 547	}
 548
 549	return anon_vma;
 550
 551out:
 552	rcu_read_unlock();
 553	return anon_vma;
 554}
 555
 556void page_unlock_anon_vma(struct anon_vma *anon_vma)
 557{
 558	anon_vma_unlock(anon_vma);
 559}
 560
 561/*
 562 * At what user virtual address is page expected in @vma?
 563 * Returns virtual address or -EFAULT if page's index/offset is not
 564 * within the range mapped the @vma.
 565 */
 566inline unsigned long
 567vma_address(struct page *page, struct vm_area_struct *vma)
 568{
 569	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 570	unsigned long address;
 571
 572	if (unlikely(is_vm_hugetlb_page(vma)))
 573		pgoff = page->index << huge_page_order(page_hstate(page));
 574	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 575	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
 576		/* page should be within @vma mapping range */
 577		return -EFAULT;
 578	}
 579	return address;
 580}
 581
 582/*
 583 * At what user virtual address is page expected in vma?
 584 * Caller should check the page is actually part of the vma.
 585 */
 586unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 587{
 588	if (PageAnon(page)) {
 589		struct anon_vma *page__anon_vma = page_anon_vma(page);
 590		/*
 591		 * Note: swapoff's unuse_vma() is more efficient with this
 592		 * check, and needs it to match anon_vma when KSM is active.
 593		 */
 594		if (!vma->anon_vma || !page__anon_vma ||
 595		    vma->anon_vma->root != page__anon_vma->root)
 596			return -EFAULT;
 597	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
 598		if (!vma->vm_file ||
 599		    vma->vm_file->f_mapping != page->mapping)
 600			return -EFAULT;
 601	} else
 602		return -EFAULT;
 603	return vma_address(page, vma);
 604}
 605
 606/*
 607 * Check that @page is mapped at @address into @mm.
 608 *
 609 * If @sync is false, page_check_address may perform a racy check to avoid
 610 * the page table lock when the pte is not present (helpful when reclaiming
 611 * highly shared pages).
 612 *
 613 * On success returns with pte mapped and locked.
 614 */
 615pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
 616			  unsigned long address, spinlock_t **ptlp, int sync)
 617{
 618	pgd_t *pgd;
 619	pud_t *pud;
 620	pmd_t *pmd;
 621	pte_t *pte;
 622	spinlock_t *ptl;
 623
 624	if (unlikely(PageHuge(page))) {
 625		pte = huge_pte_offset(mm, address);
 626		ptl = &mm->page_table_lock;
 627		goto check;
 628	}
 629
 630	pgd = pgd_offset(mm, address);
 631	if (!pgd_present(*pgd))
 632		return NULL;
 633
 634	pud = pud_offset(pgd, address);
 635	if (!pud_present(*pud))
 636		return NULL;
 637
 638	pmd = pmd_offset(pud, address);
 639	if (!pmd_present(*pmd))
 640		return NULL;
 641	if (pmd_trans_huge(*pmd))
 642		return NULL;
 643
 644	pte = pte_offset_map(pmd, address);
 645	/* Make a quick check before getting the lock */
 646	if (!sync && !pte_present(*pte)) {
 647		pte_unmap(pte);
 648		return NULL;
 649	}
 650
 651	ptl = pte_lockptr(mm, pmd);
 652check:
 653	spin_lock(ptl);
 654	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
 655		*ptlp = ptl;
 656		return pte;
 657	}
 658	pte_unmap_unlock(pte, ptl);
 659	return NULL;
 660}
 661
 662/**
 663 * page_mapped_in_vma - check whether a page is really mapped in a VMA
 664 * @page: the page to test
 665 * @vma: the VMA to test
 666 *
 667 * Returns 1 if the page is mapped into the page tables of the VMA, 0
 668 * if the page is not mapped into the page tables of this VMA.  Only
 669 * valid for normal file or anonymous VMAs.
 670 */
 671int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 672{
 673	unsigned long address;
 674	pte_t *pte;
 675	spinlock_t *ptl;
 676
 677	address = vma_address(page, vma);
 678	if (address == -EFAULT)		/* out of vma range */
 679		return 0;
 680	pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
 681	if (!pte)			/* the page is not in this mm */
 682		return 0;
 683	pte_unmap_unlock(pte, ptl);
 684
 685	return 1;
 686}
 687
 688/*
 689 * Subfunctions of page_referenced: page_referenced_one called
 690 * repeatedly from either page_referenced_anon or page_referenced_file.
 691 */
 692int page_referenced_one(struct page *page, struct vm_area_struct *vma,
 693			unsigned long address, unsigned int *mapcount,
 694			unsigned long *vm_flags)
 695{
 696	struct mm_struct *mm = vma->vm_mm;
 697	int referenced = 0;
 698
 699	if (unlikely(PageTransHuge(page))) {
 700		pmd_t *pmd;
 701
 702		spin_lock(&mm->page_table_lock);
 703		/*
 704		 * rmap might return false positives; we must filter
 705		 * these out using page_check_address_pmd().
 706		 */
 707		pmd = page_check_address_pmd(page, mm, address,
 708					     PAGE_CHECK_ADDRESS_PMD_FLAG);
 709		if (!pmd) {
 710			spin_unlock(&mm->page_table_lock);
 711			goto out;
 712		}
 713
 714		if (vma->vm_flags & VM_LOCKED) {
 715			spin_unlock(&mm->page_table_lock);
 716			*mapcount = 0;	/* break early from loop */
 717			*vm_flags |= VM_LOCKED;
 718			goto out;
 719		}
 720
 721		/* go ahead even if the pmd is pmd_trans_splitting() */
 722		if (pmdp_clear_flush_young_notify(vma, address, pmd))
 723			referenced++;
 724		spin_unlock(&mm->page_table_lock);
 725	} else {
 726		pte_t *pte;
 727		spinlock_t *ptl;
 728
 729		/*
 730		 * rmap might return false positives; we must filter
 731		 * these out using page_check_address().
 732		 */
 733		pte = page_check_address(page, mm, address, &ptl, 0);
 734		if (!pte)
 735			goto out;
 736
 737		if (vma->vm_flags & VM_LOCKED) {
 738			pte_unmap_unlock(pte, ptl);
 739			*mapcount = 0;	/* break early from loop */
 740			*vm_flags |= VM_LOCKED;
 741			goto out;
 742		}
 743
 744		if (ptep_clear_flush_young_notify(vma, address, pte)) {
 745			/*
 746			 * Don't treat a reference through a sequentially read
 747			 * mapping as such.  If the page has been used in
 748			 * another mapping, we will catch it; if this other
 749			 * mapping is already gone, the unmap path will have
 750			 * set PG_referenced or activated the page.
 751			 */
 752			if (likely(!VM_SequentialReadHint(vma)))
 753				referenced++;
 754		}
 755		pte_unmap_unlock(pte, ptl);
 756	}
 757
 
 
 
 
 
 
 758	(*mapcount)--;
 759
 760	if (referenced)
 761		*vm_flags |= vma->vm_flags;
 762out:
 763	return referenced;
 764}
 765
 766static int page_referenced_anon(struct page *page,
 767				struct mem_cgroup *memcg,
 768				unsigned long *vm_flags)
 769{
 770	unsigned int mapcount;
 771	struct anon_vma *anon_vma;
 772	struct anon_vma_chain *avc;
 773	int referenced = 0;
 774
 775	anon_vma = page_lock_anon_vma(page);
 776	if (!anon_vma)
 777		return referenced;
 778
 779	mapcount = page_mapcount(page);
 780	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
 781		struct vm_area_struct *vma = avc->vma;
 782		unsigned long address = vma_address(page, vma);
 783		if (address == -EFAULT)
 784			continue;
 785		/*
 786		 * If we are reclaiming on behalf of a cgroup, skip
 787		 * counting on behalf of references from different
 788		 * cgroups
 789		 */
 790		if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
 791			continue;
 792		referenced += page_referenced_one(page, vma, address,
 793						  &mapcount, vm_flags);
 794		if (!mapcount)
 795			break;
 796	}
 797
 798	page_unlock_anon_vma(anon_vma);
 799	return referenced;
 800}
 801
 802/**
 803 * page_referenced_file - referenced check for object-based rmap
 804 * @page: the page we're checking references on.
 805 * @memcg: target memory control group
 806 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 807 *
 808 * For an object-based mapped page, find all the places it is mapped and
 809 * check/clear the referenced flag.  This is done by following the page->mapping
 810 * pointer, then walking the chain of vmas it holds.  It returns the number
 811 * of references it found.
 812 *
 813 * This function is only called from page_referenced for object-based pages.
 814 */
 815static int page_referenced_file(struct page *page,
 816				struct mem_cgroup *memcg,
 817				unsigned long *vm_flags)
 818{
 819	unsigned int mapcount;
 820	struct address_space *mapping = page->mapping;
 821	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 822	struct vm_area_struct *vma;
 823	struct prio_tree_iter iter;
 824	int referenced = 0;
 825
 826	/*
 827	 * The caller's checks on page->mapping and !PageAnon have made
 828	 * sure that this is a file page: the check for page->mapping
 829	 * excludes the case just before it gets set on an anon page.
 830	 */
 831	BUG_ON(PageAnon(page));
 832
 833	/*
 834	 * The page lock not only makes sure that page->mapping cannot
 835	 * suddenly be NULLified by truncation, it makes sure that the
 836	 * structure at mapping cannot be freed and reused yet,
 837	 * so we can safely take mapping->i_mmap_mutex.
 838	 */
 839	BUG_ON(!PageLocked(page));
 840
 841	mutex_lock(&mapping->i_mmap_mutex);
 842
 843	/*
 844	 * i_mmap_mutex does not stabilize mapcount at all, but mapcount
 845	 * is more likely to be accurate if we note it after spinning.
 846	 */
 847	mapcount = page_mapcount(page);
 848
 849	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 850		unsigned long address = vma_address(page, vma);
 851		if (address == -EFAULT)
 852			continue;
 853		/*
 854		 * If we are reclaiming on behalf of a cgroup, skip
 855		 * counting on behalf of references from different
 856		 * cgroups
 857		 */
 858		if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
 859			continue;
 860		referenced += page_referenced_one(page, vma, address,
 861						  &mapcount, vm_flags);
 862		if (!mapcount)
 863			break;
 864	}
 865
 866	mutex_unlock(&mapping->i_mmap_mutex);
 867	return referenced;
 868}
 869
 870/**
 871 * page_referenced - test if the page was referenced
 872 * @page: the page to test
 873 * @is_locked: caller holds lock on the page
 874 * @memcg: target memory cgroup
 875 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 876 *
 877 * Quick test_and_clear_referenced for all mappings to a page,
 878 * returns the number of ptes which referenced the page.
 879 */
 880int page_referenced(struct page *page,
 881		    int is_locked,
 882		    struct mem_cgroup *memcg,
 883		    unsigned long *vm_flags)
 884{
 885	int referenced = 0;
 886	int we_locked = 0;
 887
 888	*vm_flags = 0;
 889	if (page_mapped(page) && page_rmapping(page)) {
 890		if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
 891			we_locked = trylock_page(page);
 892			if (!we_locked) {
 893				referenced++;
 894				goto out;
 895			}
 896		}
 897		if (unlikely(PageKsm(page)))
 898			referenced += page_referenced_ksm(page, memcg,
 899								vm_flags);
 900		else if (PageAnon(page))
 901			referenced += page_referenced_anon(page, memcg,
 902								vm_flags);
 903		else if (page->mapping)
 904			referenced += page_referenced_file(page, memcg,
 905								vm_flags);
 906		if (we_locked)
 907			unlock_page(page);
 908
 909		if (page_test_and_clear_young(page_to_pfn(page)))
 910			referenced++;
 911	}
 912out:
 913	return referenced;
 914}
 915
 916static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 917			    unsigned long address)
 918{
 919	struct mm_struct *mm = vma->vm_mm;
 920	pte_t *pte;
 921	spinlock_t *ptl;
 922	int ret = 0;
 923
 924	pte = page_check_address(page, mm, address, &ptl, 1);
 925	if (!pte)
 926		goto out;
 927
 928	if (pte_dirty(*pte) || pte_write(*pte)) {
 929		pte_t entry;
 930
 931		flush_cache_page(vma, address, pte_pfn(*pte));
 932		entry = ptep_clear_flush_notify(vma, address, pte);
 933		entry = pte_wrprotect(entry);
 934		entry = pte_mkclean(entry);
 935		set_pte_at(mm, address, pte, entry);
 936		ret = 1;
 937	}
 938
 939	pte_unmap_unlock(pte, ptl);
 940out:
 941	return ret;
 942}
 943
 944static int page_mkclean_file(struct address_space *mapping, struct page *page)
 945{
 946	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 947	struct vm_area_struct *vma;
 948	struct prio_tree_iter iter;
 949	int ret = 0;
 950
 951	BUG_ON(PageAnon(page));
 952
 953	mutex_lock(&mapping->i_mmap_mutex);
 954	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 955		if (vma->vm_flags & VM_SHARED) {
 956			unsigned long address = vma_address(page, vma);
 957			if (address == -EFAULT)
 958				continue;
 959			ret += page_mkclean_one(page, vma, address);
 960		}
 961	}
 962	mutex_unlock(&mapping->i_mmap_mutex);
 963	return ret;
 964}
 965
 966int page_mkclean(struct page *page)
 967{
 968	int ret = 0;
 969
 970	BUG_ON(!PageLocked(page));
 971
 972	if (page_mapped(page)) {
 973		struct address_space *mapping = page_mapping(page);
 974		if (mapping) {
 975			ret = page_mkclean_file(mapping, page);
 976			if (page_test_and_clear_dirty(page_to_pfn(page), 1))
 977				ret = 1;
 978		}
 979	}
 980
 981	return ret;
 982}
 983EXPORT_SYMBOL_GPL(page_mkclean);
 984
 985/**
 986 * page_move_anon_rmap - move a page to our anon_vma
 987 * @page:	the page to move to our anon_vma
 988 * @vma:	the vma the page belongs to
 989 * @address:	the user virtual address mapped
 990 *
 991 * When a page belongs exclusively to one process after a COW event,
 992 * that page can be moved into the anon_vma that belongs to just that
 993 * process, so the rmap code will not search the parent or sibling
 994 * processes.
 995 */
 996void page_move_anon_rmap(struct page *page,
 997	struct vm_area_struct *vma, unsigned long address)
 998{
 999	struct anon_vma *anon_vma = vma->anon_vma;
1000
1001	VM_BUG_ON(!PageLocked(page));
1002	VM_BUG_ON(!anon_vma);
1003	VM_BUG_ON(page->index != linear_page_index(vma, address));
1004
1005	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1006	page->mapping = (struct address_space *) anon_vma;
1007}
1008
1009/**
1010 * __page_set_anon_rmap - set up new anonymous rmap
1011 * @page:	Page to add to rmap	
1012 * @vma:	VM area to add page to.
1013 * @address:	User virtual address of the mapping	
1014 * @exclusive:	the page is exclusively owned by the current process
1015 */
1016static void __page_set_anon_rmap(struct page *page,
1017	struct vm_area_struct *vma, unsigned long address, int exclusive)
1018{
1019	struct anon_vma *anon_vma = vma->anon_vma;
1020
1021	BUG_ON(!anon_vma);
1022
1023	if (PageAnon(page))
1024		return;
1025
1026	/*
1027	 * If the page isn't exclusively mapped into this vma,
1028	 * we must use the _oldest_ possible anon_vma for the
1029	 * page mapping!
1030	 */
1031	if (!exclusive)
1032		anon_vma = anon_vma->root;
1033
1034	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1035	page->mapping = (struct address_space *) anon_vma;
1036	page->index = linear_page_index(vma, address);
1037}
1038
1039/**
1040 * __page_check_anon_rmap - sanity check anonymous rmap addition
1041 * @page:	the page to add the mapping to
1042 * @vma:	the vm area in which the mapping is added
1043 * @address:	the user virtual address mapped
1044 */
1045static void __page_check_anon_rmap(struct page *page,
1046	struct vm_area_struct *vma, unsigned long address)
1047{
1048#ifdef CONFIG_DEBUG_VM
1049	/*
1050	 * The page's anon-rmap details (mapping and index) are guaranteed to
1051	 * be set up correctly at this point.
1052	 *
1053	 * We have exclusion against page_add_anon_rmap because the caller
1054	 * always holds the page locked, except if called from page_dup_rmap,
1055	 * in which case the page is already known to be setup.
1056	 *
1057	 * We have exclusion against page_add_new_anon_rmap because those pages
1058	 * are initially only visible via the pagetables, and the pte is locked
1059	 * over the call to page_add_new_anon_rmap.
1060	 */
1061	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1062	BUG_ON(page->index != linear_page_index(vma, address));
1063#endif
1064}
1065
1066/**
1067 * page_add_anon_rmap - add pte mapping to an anonymous page
1068 * @page:	the page to add the mapping to
1069 * @vma:	the vm area in which the mapping is added
1070 * @address:	the user virtual address mapped
1071 *
1072 * The caller needs to hold the pte lock, and the page must be locked in
1073 * the anon_vma case: to serialize mapping,index checking after setting,
1074 * and to ensure that PageAnon is not being upgraded racily to PageKsm
1075 * (but PageKsm is never downgraded to PageAnon).
1076 */
1077void page_add_anon_rmap(struct page *page,
1078	struct vm_area_struct *vma, unsigned long address)
1079{
1080	do_page_add_anon_rmap(page, vma, address, 0);
1081}
1082
1083/*
1084 * Special version of the above for do_swap_page, which often runs
1085 * into pages that are exclusively owned by the current process.
1086 * Everybody else should continue to use page_add_anon_rmap above.
1087 */
1088void do_page_add_anon_rmap(struct page *page,
1089	struct vm_area_struct *vma, unsigned long address, int exclusive)
1090{
1091	int first = atomic_inc_and_test(&page->_mapcount);
1092	if (first) {
1093		if (!PageTransHuge(page))
1094			__inc_zone_page_state(page, NR_ANON_PAGES);
1095		else
1096			__inc_zone_page_state(page,
1097					      NR_ANON_TRANSPARENT_HUGEPAGES);
1098	}
1099	if (unlikely(PageKsm(page)))
1100		return;
1101
1102	VM_BUG_ON(!PageLocked(page));
1103	/* address might be in next vma when migration races vma_adjust */
1104	if (first)
1105		__page_set_anon_rmap(page, vma, address, exclusive);
1106	else
1107		__page_check_anon_rmap(page, vma, address);
1108}
1109
1110/**
1111 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1112 * @page:	the page to add the mapping to
1113 * @vma:	the vm area in which the mapping is added
1114 * @address:	the user virtual address mapped
1115 *
1116 * Same as page_add_anon_rmap but must only be called on *new* pages.
1117 * This means the inc-and-test can be bypassed.
1118 * Page does not have to be locked.
1119 */
1120void page_add_new_anon_rmap(struct page *page,
1121	struct vm_area_struct *vma, unsigned long address)
1122{
1123	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1124	SetPageSwapBacked(page);
1125	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
1126	if (!PageTransHuge(page))
1127		__inc_zone_page_state(page, NR_ANON_PAGES);
1128	else
1129		__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1130	__page_set_anon_rmap(page, vma, address, 1);
1131	if (page_evictable(page, vma))
1132		lru_cache_add_lru(page, LRU_ACTIVE_ANON);
1133	else
1134		add_page_to_unevictable_list(page);
1135}
1136
1137/**
1138 * page_add_file_rmap - add pte mapping to a file page
1139 * @page: the page to add the mapping to
1140 *
1141 * The caller needs to hold the pte lock.
1142 */
1143void page_add_file_rmap(struct page *page)
1144{
1145	bool locked;
1146	unsigned long flags;
1147
1148	mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1149	if (atomic_inc_and_test(&page->_mapcount)) {
1150		__inc_zone_page_state(page, NR_FILE_MAPPED);
1151		mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
1152	}
1153	mem_cgroup_end_update_page_stat(page, &locked, &flags);
1154}
1155
1156/**
1157 * page_remove_rmap - take down pte mapping from a page
1158 * @page: page to remove mapping from
1159 *
1160 * The caller needs to hold the pte lock.
1161 */
1162void page_remove_rmap(struct page *page)
1163{
1164	bool anon = PageAnon(page);
1165	bool locked;
1166	unsigned long flags;
1167
1168	/*
1169	 * The anon case has no mem_cgroup page_stat to update; but may
1170	 * uncharge_page() below, where the lock ordering can deadlock if
1171	 * we hold the lock against page_stat move: so avoid it on anon.
1172	 */
1173	if (!anon)
1174		mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1175
1176	/* page still mapped by someone else? */
1177	if (!atomic_add_negative(-1, &page->_mapcount))
1178		goto out;
1179
1180	/*
1181	 * Now that the last pte has gone, s390 must transfer dirty
1182	 * flag from storage key to struct page.  We can usually skip
1183	 * this if the page is anon, so about to be freed; but perhaps
1184	 * not if it's in swapcache - there might be another pte slot
1185	 * containing the swap entry, but page not yet written to swap.
1186	 */
1187	if ((!anon || PageSwapCache(page)) &&
1188	    page_test_and_clear_dirty(page_to_pfn(page), 1))
1189		set_page_dirty(page);
1190	/*
1191	 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1192	 * and not charged by memcg for now.
1193	 */
1194	if (unlikely(PageHuge(page)))
1195		goto out;
1196	if (anon) {
1197		mem_cgroup_uncharge_page(page);
1198		if (!PageTransHuge(page))
1199			__dec_zone_page_state(page, NR_ANON_PAGES);
1200		else
1201			__dec_zone_page_state(page,
1202					      NR_ANON_TRANSPARENT_HUGEPAGES);
1203	} else {
1204		__dec_zone_page_state(page, NR_FILE_MAPPED);
1205		mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
1206	}
1207	/*
1208	 * It would be tidy to reset the PageAnon mapping here,
1209	 * but that might overwrite a racing page_add_anon_rmap
1210	 * which increments mapcount after us but sets mapping
1211	 * before us: so leave the reset to free_hot_cold_page,
1212	 * and remember that it's only reliable while mapped.
1213	 * Leaving it set also helps swapoff to reinstate ptes
1214	 * faster for those pages still in swapcache.
1215	 */
1216out:
1217	if (!anon)
1218		mem_cgroup_end_update_page_stat(page, &locked, &flags);
1219}
1220
1221/*
1222 * Subfunctions of try_to_unmap: try_to_unmap_one called
1223 * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
1224 */
1225int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1226		     unsigned long address, enum ttu_flags flags)
1227{
1228	struct mm_struct *mm = vma->vm_mm;
1229	pte_t *pte;
1230	pte_t pteval;
1231	spinlock_t *ptl;
1232	int ret = SWAP_AGAIN;
1233
1234	pte = page_check_address(page, mm, address, &ptl, 0);
1235	if (!pte)
1236		goto out;
1237
1238	/*
1239	 * If the page is mlock()d, we cannot swap it out.
1240	 * If it's recently referenced (perhaps page_referenced
1241	 * skipped over this mm) then we should reactivate it.
1242	 */
1243	if (!(flags & TTU_IGNORE_MLOCK)) {
1244		if (vma->vm_flags & VM_LOCKED)
1245			goto out_mlock;
1246
1247		if (TTU_ACTION(flags) == TTU_MUNLOCK)
1248			goto out_unmap;
1249	}
1250	if (!(flags & TTU_IGNORE_ACCESS)) {
1251		if (ptep_clear_flush_young_notify(vma, address, pte)) {
1252			ret = SWAP_FAIL;
1253			goto out_unmap;
1254		}
1255  	}
1256
1257	/* Nuke the page table entry. */
1258	flush_cache_page(vma, address, page_to_pfn(page));
1259	pteval = ptep_clear_flush_notify(vma, address, pte);
1260
1261	/* Move the dirty bit to the physical page now the pte is gone. */
1262	if (pte_dirty(pteval))
1263		set_page_dirty(page);
1264
1265	/* Update high watermark before we lower rss */
1266	update_hiwater_rss(mm);
1267
1268	if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1269		if (PageAnon(page))
1270			dec_mm_counter(mm, MM_ANONPAGES);
1271		else
1272			dec_mm_counter(mm, MM_FILEPAGES);
1273		set_pte_at(mm, address, pte,
1274				swp_entry_to_pte(make_hwpoison_entry(page)));
1275	} else if (PageAnon(page)) {
1276		swp_entry_t entry = { .val = page_private(page) };
1277
1278		if (PageSwapCache(page)) {
1279			/*
1280			 * Store the swap location in the pte.
1281			 * See handle_pte_fault() ...
1282			 */
1283			if (swap_duplicate(entry) < 0) {
1284				set_pte_at(mm, address, pte, pteval);
1285				ret = SWAP_FAIL;
1286				goto out_unmap;
1287			}
1288			if (list_empty(&mm->mmlist)) {
1289				spin_lock(&mmlist_lock);
1290				if (list_empty(&mm->mmlist))
1291					list_add(&mm->mmlist, &init_mm.mmlist);
1292				spin_unlock(&mmlist_lock);
1293			}
1294			dec_mm_counter(mm, MM_ANONPAGES);
1295			inc_mm_counter(mm, MM_SWAPENTS);
1296		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
1297			/*
1298			 * Store the pfn of the page in a special migration
1299			 * pte. do_swap_page() will wait until the migration
1300			 * pte is removed and then restart fault handling.
1301			 */
1302			BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1303			entry = make_migration_entry(page, pte_write(pteval));
1304		}
1305		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1306		BUG_ON(pte_file(*pte));
1307	} else if (IS_ENABLED(CONFIG_MIGRATION) &&
1308		   (TTU_ACTION(flags) == TTU_MIGRATION)) {
1309		/* Establish migration entry for a file page */
1310		swp_entry_t entry;
1311		entry = make_migration_entry(page, pte_write(pteval));
1312		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1313	} else
1314		dec_mm_counter(mm, MM_FILEPAGES);
1315
1316	page_remove_rmap(page);
1317	page_cache_release(page);
1318
1319out_unmap:
1320	pte_unmap_unlock(pte, ptl);
1321out:
1322	return ret;
1323
1324out_mlock:
1325	pte_unmap_unlock(pte, ptl);
1326
1327
1328	/*
1329	 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1330	 * unstable result and race. Plus, We can't wait here because
1331	 * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
1332	 * if trylock failed, the page remain in evictable lru and later
1333	 * vmscan could retry to move the page to unevictable lru if the
1334	 * page is actually mlocked.
1335	 */
1336	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1337		if (vma->vm_flags & VM_LOCKED) {
1338			mlock_vma_page(page);
1339			ret = SWAP_MLOCK;
1340		}
1341		up_read(&vma->vm_mm->mmap_sem);
1342	}
1343	return ret;
1344}
1345
1346/*
1347 * objrmap doesn't work for nonlinear VMAs because the assumption that
1348 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
1349 * Consequently, given a particular page and its ->index, we cannot locate the
1350 * ptes which are mapping that page without an exhaustive linear search.
1351 *
1352 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
1353 * maps the file to which the target page belongs.  The ->vm_private_data field
1354 * holds the current cursor into that scan.  Successive searches will circulate
1355 * around the vma's virtual address space.
1356 *
1357 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
1358 * more scanning pressure is placed against them as well.   Eventually pages
1359 * will become fully unmapped and are eligible for eviction.
1360 *
1361 * For very sparsely populated VMAs this is a little inefficient - chances are
1362 * there there won't be many ptes located within the scan cluster.  In this case
1363 * maybe we could scan further - to the end of the pte page, perhaps.
1364 *
1365 * Mlocked pages:  check VM_LOCKED under mmap_sem held for read, if we can
1366 * acquire it without blocking.  If vma locked, mlock the pages in the cluster,
1367 * rather than unmapping them.  If we encounter the "check_page" that vmscan is
1368 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
1369 */
1370#define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
1371#define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
1372
1373static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1374		struct vm_area_struct *vma, struct page *check_page)
1375{
1376	struct mm_struct *mm = vma->vm_mm;
1377	pgd_t *pgd;
1378	pud_t *pud;
1379	pmd_t *pmd;
1380	pte_t *pte;
1381	pte_t pteval;
1382	spinlock_t *ptl;
1383	struct page *page;
1384	unsigned long address;
1385	unsigned long end;
1386	int ret = SWAP_AGAIN;
1387	int locked_vma = 0;
1388
1389	address = (vma->vm_start + cursor) & CLUSTER_MASK;
1390	end = address + CLUSTER_SIZE;
1391	if (address < vma->vm_start)
1392		address = vma->vm_start;
1393	if (end > vma->vm_end)
1394		end = vma->vm_end;
1395
1396	pgd = pgd_offset(mm, address);
1397	if (!pgd_present(*pgd))
1398		return ret;
1399
1400	pud = pud_offset(pgd, address);
1401	if (!pud_present(*pud))
1402		return ret;
1403
1404	pmd = pmd_offset(pud, address);
1405	if (!pmd_present(*pmd))
1406		return ret;
1407
1408	/*
1409	 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
1410	 * keep the sem while scanning the cluster for mlocking pages.
1411	 */
1412	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1413		locked_vma = (vma->vm_flags & VM_LOCKED);
1414		if (!locked_vma)
1415			up_read(&vma->vm_mm->mmap_sem); /* don't need it */
1416	}
1417
1418	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1419
1420	/* Update high watermark before we lower rss */
1421	update_hiwater_rss(mm);
1422
1423	for (; address < end; pte++, address += PAGE_SIZE) {
1424		if (!pte_present(*pte))
1425			continue;
1426		page = vm_normal_page(vma, address, *pte);
1427		BUG_ON(!page || PageAnon(page));
1428
1429		if (locked_vma) {
1430			mlock_vma_page(page);   /* no-op if already mlocked */
1431			if (page == check_page)
1432				ret = SWAP_MLOCK;
1433			continue;	/* don't unmap */
1434		}
1435
1436		if (ptep_clear_flush_young_notify(vma, address, pte))
1437			continue;
1438
1439		/* Nuke the page table entry. */
1440		flush_cache_page(vma, address, pte_pfn(*pte));
1441		pteval = ptep_clear_flush_notify(vma, address, pte);
1442
1443		/* If nonlinear, store the file page offset in the pte. */
1444		if (page->index != linear_page_index(vma, address))
1445			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
1446
1447		/* Move the dirty bit to the physical page now the pte is gone. */
1448		if (pte_dirty(pteval))
1449			set_page_dirty(page);
1450
1451		page_remove_rmap(page);
1452		page_cache_release(page);
1453		dec_mm_counter(mm, MM_FILEPAGES);
1454		(*mapcount)--;
1455	}
1456	pte_unmap_unlock(pte - 1, ptl);
1457	if (locked_vma)
1458		up_read(&vma->vm_mm->mmap_sem);
1459	return ret;
1460}
1461
1462bool is_vma_temporary_stack(struct vm_area_struct *vma)
1463{
1464	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1465
1466	if (!maybe_stack)
1467		return false;
1468
1469	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1470						VM_STACK_INCOMPLETE_SETUP)
1471		return true;
1472
1473	return false;
1474}
1475
1476/**
1477 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1478 * rmap method
1479 * @page: the page to unmap/unlock
1480 * @flags: action and flags
1481 *
1482 * Find all the mappings of a page using the mapping pointer and the vma chains
1483 * contained in the anon_vma struct it points to.
1484 *
1485 * This function is only called from try_to_unmap/try_to_munlock for
1486 * anonymous pages.
1487 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1488 * where the page was found will be held for write.  So, we won't recheck
1489 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1490 * 'LOCKED.
1491 */
1492static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1493{
1494	struct anon_vma *anon_vma;
1495	struct anon_vma_chain *avc;
1496	int ret = SWAP_AGAIN;
1497
1498	anon_vma = page_lock_anon_vma(page);
1499	if (!anon_vma)
1500		return ret;
1501
1502	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1503		struct vm_area_struct *vma = avc->vma;
1504		unsigned long address;
1505
1506		/*
1507		 * During exec, a temporary VMA is setup and later moved.
1508		 * The VMA is moved under the anon_vma lock but not the
1509		 * page tables leading to a race where migration cannot
1510		 * find the migration ptes. Rather than increasing the
1511		 * locking requirements of exec(), migration skips
1512		 * temporary VMAs until after exec() completes.
1513		 */
1514		if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1515				is_vma_temporary_stack(vma))
1516			continue;
1517
1518		address = vma_address(page, vma);
1519		if (address == -EFAULT)
1520			continue;
1521		ret = try_to_unmap_one(page, vma, address, flags);
1522		if (ret != SWAP_AGAIN || !page_mapped(page))
1523			break;
1524	}
1525
1526	page_unlock_anon_vma(anon_vma);
1527	return ret;
1528}
1529
1530/**
1531 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
1532 * @page: the page to unmap/unlock
1533 * @flags: action and flags
1534 *
1535 * Find all the mappings of a page using the mapping pointer and the vma chains
1536 * contained in the address_space struct it points to.
1537 *
1538 * This function is only called from try_to_unmap/try_to_munlock for
1539 * object-based pages.
1540 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1541 * where the page was found will be held for write.  So, we won't recheck
1542 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1543 * 'LOCKED.
1544 */
1545static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1546{
1547	struct address_space *mapping = page->mapping;
1548	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1549	struct vm_area_struct *vma;
1550	struct prio_tree_iter iter;
1551	int ret = SWAP_AGAIN;
1552	unsigned long cursor;
1553	unsigned long max_nl_cursor = 0;
1554	unsigned long max_nl_size = 0;
1555	unsigned int mapcount;
1556
1557	mutex_lock(&mapping->i_mmap_mutex);
1558	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1559		unsigned long address = vma_address(page, vma);
1560		if (address == -EFAULT)
1561			continue;
1562		ret = try_to_unmap_one(page, vma, address, flags);
1563		if (ret != SWAP_AGAIN || !page_mapped(page))
1564			goto out;
1565	}
1566
1567	if (list_empty(&mapping->i_mmap_nonlinear))
1568		goto out;
1569
1570	/*
1571	 * We don't bother to try to find the munlocked page in nonlinears.
1572	 * It's costly. Instead, later, page reclaim logic may call
1573	 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
1574	 */
1575	if (TTU_ACTION(flags) == TTU_MUNLOCK)
1576		goto out;
1577
1578	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1579						shared.vm_set.list) {
1580		cursor = (unsigned long) vma->vm_private_data;
1581		if (cursor > max_nl_cursor)
1582			max_nl_cursor = cursor;
1583		cursor = vma->vm_end - vma->vm_start;
1584		if (cursor > max_nl_size)
1585			max_nl_size = cursor;
1586	}
1587
1588	if (max_nl_size == 0) {	/* all nonlinears locked or reserved ? */
1589		ret = SWAP_FAIL;
1590		goto out;
1591	}
1592
1593	/*
1594	 * We don't try to search for this page in the nonlinear vmas,
1595	 * and page_referenced wouldn't have found it anyway.  Instead
1596	 * just walk the nonlinear vmas trying to age and unmap some.
1597	 * The mapcount of the page we came in with is irrelevant,
1598	 * but even so use it as a guide to how hard we should try?
1599	 */
1600	mapcount = page_mapcount(page);
1601	if (!mapcount)
1602		goto out;
1603	cond_resched();
1604
1605	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1606	if (max_nl_cursor == 0)
1607		max_nl_cursor = CLUSTER_SIZE;
1608
1609	do {
1610		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1611						shared.vm_set.list) {
1612			cursor = (unsigned long) vma->vm_private_data;
1613			while ( cursor < max_nl_cursor &&
1614				cursor < vma->vm_end - vma->vm_start) {
1615				if (try_to_unmap_cluster(cursor, &mapcount,
1616						vma, page) == SWAP_MLOCK)
1617					ret = SWAP_MLOCK;
1618				cursor += CLUSTER_SIZE;
1619				vma->vm_private_data = (void *) cursor;
1620				if ((int)mapcount <= 0)
1621					goto out;
1622			}
1623			vma->vm_private_data = (void *) max_nl_cursor;
1624		}
1625		cond_resched();
1626		max_nl_cursor += CLUSTER_SIZE;
1627	} while (max_nl_cursor <= max_nl_size);
1628
1629	/*
1630	 * Don't loop forever (perhaps all the remaining pages are
1631	 * in locked vmas).  Reset cursor on all unreserved nonlinear
1632	 * vmas, now forgetting on which ones it had fallen behind.
1633	 */
1634	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1635		vma->vm_private_data = NULL;
1636out:
1637	mutex_unlock(&mapping->i_mmap_mutex);
1638	return ret;
1639}
1640
1641/**
1642 * try_to_unmap - try to remove all page table mappings to a page
1643 * @page: the page to get unmapped
1644 * @flags: action and flags
1645 *
1646 * Tries to remove all the page table entries which are mapping this
1647 * page, used in the pageout path.  Caller must hold the page lock.
1648 * Return values are:
1649 *
1650 * SWAP_SUCCESS	- we succeeded in removing all mappings
1651 * SWAP_AGAIN	- we missed a mapping, try again later
1652 * SWAP_FAIL	- the page is unswappable
1653 * SWAP_MLOCK	- page is mlocked.
1654 */
1655int try_to_unmap(struct page *page, enum ttu_flags flags)
1656{
1657	int ret;
1658
1659	BUG_ON(!PageLocked(page));
1660	VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
1661
1662	if (unlikely(PageKsm(page)))
1663		ret = try_to_unmap_ksm(page, flags);
1664	else if (PageAnon(page))
1665		ret = try_to_unmap_anon(page, flags);
1666	else
1667		ret = try_to_unmap_file(page, flags);
1668	if (ret != SWAP_MLOCK && !page_mapped(page))
1669		ret = SWAP_SUCCESS;
1670	return ret;
1671}
1672
1673/**
1674 * try_to_munlock - try to munlock a page
1675 * @page: the page to be munlocked
1676 *
1677 * Called from munlock code.  Checks all of the VMAs mapping the page
1678 * to make sure nobody else has this page mlocked. The page will be
1679 * returned with PG_mlocked cleared if no other vmas have it mlocked.
1680 *
1681 * Return values are:
1682 *
1683 * SWAP_AGAIN	- no vma is holding page mlocked, or,
1684 * SWAP_AGAIN	- page mapped in mlocked vma -- couldn't acquire mmap sem
1685 * SWAP_FAIL	- page cannot be located at present
1686 * SWAP_MLOCK	- page is now mlocked.
1687 */
1688int try_to_munlock(struct page *page)
1689{
1690	VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1691
1692	if (unlikely(PageKsm(page)))
1693		return try_to_unmap_ksm(page, TTU_MUNLOCK);
1694	else if (PageAnon(page))
1695		return try_to_unmap_anon(page, TTU_MUNLOCK);
1696	else
1697		return try_to_unmap_file(page, TTU_MUNLOCK);
1698}
1699
1700void __put_anon_vma(struct anon_vma *anon_vma)
1701{
1702	struct anon_vma *root = anon_vma->root;
1703
1704	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1705		anon_vma_free(root);
1706
1707	anon_vma_free(anon_vma);
1708}
1709
1710#ifdef CONFIG_MIGRATION
1711/*
1712 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1713 * Called by migrate.c to remove migration ptes, but might be used more later.
1714 */
1715static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1716		struct vm_area_struct *, unsigned long, void *), void *arg)
1717{
1718	struct anon_vma *anon_vma;
1719	struct anon_vma_chain *avc;
1720	int ret = SWAP_AGAIN;
1721
1722	/*
1723	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1724	 * because that depends on page_mapped(); but not all its usages
1725	 * are holding mmap_sem. Users without mmap_sem are required to
1726	 * take a reference count to prevent the anon_vma disappearing
1727	 */
1728	anon_vma = page_anon_vma(page);
1729	if (!anon_vma)
1730		return ret;
1731	anon_vma_lock(anon_vma);
1732	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1733		struct vm_area_struct *vma = avc->vma;
1734		unsigned long address = vma_address(page, vma);
1735		if (address == -EFAULT)
1736			continue;
1737		ret = rmap_one(page, vma, address, arg);
1738		if (ret != SWAP_AGAIN)
1739			break;
1740	}
1741	anon_vma_unlock(anon_vma);
1742	return ret;
1743}
1744
1745static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1746		struct vm_area_struct *, unsigned long, void *), void *arg)
1747{
1748	struct address_space *mapping = page->mapping;
1749	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1750	struct vm_area_struct *vma;
1751	struct prio_tree_iter iter;
1752	int ret = SWAP_AGAIN;
1753
1754	if (!mapping)
1755		return ret;
1756	mutex_lock(&mapping->i_mmap_mutex);
1757	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1758		unsigned long address = vma_address(page, vma);
1759		if (address == -EFAULT)
1760			continue;
1761		ret = rmap_one(page, vma, address, arg);
1762		if (ret != SWAP_AGAIN)
1763			break;
1764	}
1765	/*
1766	 * No nonlinear handling: being always shared, nonlinear vmas
1767	 * never contain migration ptes.  Decide what to do about this
1768	 * limitation to linear when we need rmap_walk() on nonlinear.
1769	 */
1770	mutex_unlock(&mapping->i_mmap_mutex);
1771	return ret;
1772}
1773
1774int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1775		struct vm_area_struct *, unsigned long, void *), void *arg)
1776{
1777	VM_BUG_ON(!PageLocked(page));
1778
1779	if (unlikely(PageKsm(page)))
1780		return rmap_walk_ksm(page, rmap_one, arg);
1781	else if (PageAnon(page))
1782		return rmap_walk_anon(page, rmap_one, arg);
1783	else
1784		return rmap_walk_file(page, rmap_one, arg);
1785}
1786#endif /* CONFIG_MIGRATION */
1787
1788#ifdef CONFIG_HUGETLB_PAGE
1789/*
1790 * The following three functions are for anonymous (private mapped) hugepages.
1791 * Unlike common anonymous pages, anonymous hugepages have no accounting code
1792 * and no lru code, because we handle hugepages differently from common pages.
1793 */
1794static void __hugepage_set_anon_rmap(struct page *page,
1795	struct vm_area_struct *vma, unsigned long address, int exclusive)
1796{
1797	struct anon_vma *anon_vma = vma->anon_vma;
1798
1799	BUG_ON(!anon_vma);
1800
1801	if (PageAnon(page))
1802		return;
1803	if (!exclusive)
1804		anon_vma = anon_vma->root;
1805
1806	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1807	page->mapping = (struct address_space *) anon_vma;
1808	page->index = linear_page_index(vma, address);
1809}
1810
1811void hugepage_add_anon_rmap(struct page *page,
1812			    struct vm_area_struct *vma, unsigned long address)
1813{
1814	struct anon_vma *anon_vma = vma->anon_vma;
1815	int first;
1816
1817	BUG_ON(!PageLocked(page));
1818	BUG_ON(!anon_vma);
1819	/* address might be in next vma when migration races vma_adjust */
1820	first = atomic_inc_and_test(&page->_mapcount);
1821	if (first)
1822		__hugepage_set_anon_rmap(page, vma, address, 0);
1823}
1824
1825void hugepage_add_new_anon_rmap(struct page *page,
1826			struct vm_area_struct *vma, unsigned long address)
1827{
1828	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1829	atomic_set(&page->_mapcount, 0);
1830	__hugepage_set_anon_rmap(page, vma, address, 1);
1831}
1832#endif /* CONFIG_HUGETLB_PAGE */