Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * mm/rmap.c - physical to virtual reverse mappings
   3 *
   4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
   5 * Released under the General Public License (GPL).
   6 *
   7 * Simple, low overhead reverse mapping scheme.
   8 * Please try to keep this thing as modular as possible.
   9 *
  10 * Provides methods for unmapping each kind of mapped page:
  11 * the anon methods track anonymous pages, and
  12 * the file methods track pages belonging to an inode.
  13 *
  14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17 * Contributions by Hugh Dickins 2003, 2004
  18 */
  19
  20/*
  21 * Lock ordering in mm:
  22 *
  23 * inode->i_mutex	(while writing or truncating, not reading or faulting)
  24 *   mm->mmap_sem
  25 *     page->flags PG_locked (lock_page)
  26 *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
  27 *         mapping->i_mmap_rwsem
  28 *           anon_vma->rwsem
  29 *             mm->page_table_lock or pte_lock
  30 *               zone_lru_lock (in mark_page_accessed, isolate_lru_page)
  31 *               swap_lock (in swap_duplicate, swap_info_get)
  32 *                 mmlist_lock (in mmput, drain_mmlist and others)
  33 *                 mapping->private_lock (in __set_page_dirty_buffers)
  34 *                   mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
  35 *                     i_pages lock (widely used)
  36 *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  37 *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
  38 *                   sb_lock (within inode_lock in fs/fs-writeback.c)
  39 *                   i_pages lock (widely used, in set_page_dirty,
  40 *                             in arch-dependent flush_dcache_mmap_lock,
  41 *                             within bdi.wb->list_lock in __sync_single_inode)
 
  42 *
  43 * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
  44 *   ->tasklist_lock
  45 *     pte map lock
 
 
 
 
 
 
  46 */
  47
  48#include <linux/mm.h>
  49#include <linux/sched/mm.h>
  50#include <linux/sched/task.h>
  51#include <linux/pagemap.h>
  52#include <linux/swap.h>
  53#include <linux/swapops.h>
  54#include <linux/slab.h>
  55#include <linux/init.h>
  56#include <linux/ksm.h>
  57#include <linux/rmap.h>
  58#include <linux/rcupdate.h>
  59#include <linux/export.h>
  60#include <linux/memcontrol.h>
  61#include <linux/mmu_notifier.h>
  62#include <linux/migrate.h>
  63#include <linux/hugetlb.h>
 
  64#include <linux/backing-dev.h>
  65#include <linux/page_idle.h>
  66#include <linux/memremap.h>
 
 
  67
  68#include <asm/tlbflush.h>
  69
 
  70#include <trace/events/tlb.h>
 
  71
  72#include "internal.h"
  73
  74static struct kmem_cache *anon_vma_cachep;
  75static struct kmem_cache *anon_vma_chain_cachep;
  76
  77static inline struct anon_vma *anon_vma_alloc(void)
  78{
  79	struct anon_vma *anon_vma;
  80
  81	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  82	if (anon_vma) {
  83		atomic_set(&anon_vma->refcount, 1);
  84		anon_vma->degree = 1;	/* Reference for first vma */
 
  85		anon_vma->parent = anon_vma;
  86		/*
  87		 * Initialise the anon_vma root to point to itself. If called
  88		 * from fork, the root will be reset to the parents anon_vma.
  89		 */
  90		anon_vma->root = anon_vma;
  91	}
  92
  93	return anon_vma;
  94}
  95
  96static inline void anon_vma_free(struct anon_vma *anon_vma)
  97{
  98	VM_BUG_ON(atomic_read(&anon_vma->refcount));
  99
 100	/*
 101	 * Synchronize against page_lock_anon_vma_read() such that
 102	 * we can safely hold the lock without the anon_vma getting
 103	 * freed.
 104	 *
 105	 * Relies on the full mb implied by the atomic_dec_and_test() from
 106	 * put_anon_vma() against the acquire barrier implied by
 107	 * down_read_trylock() from page_lock_anon_vma_read(). This orders:
 108	 *
 109	 * page_lock_anon_vma_read()	VS	put_anon_vma()
 110	 *   down_read_trylock()		  atomic_dec_and_test()
 111	 *   LOCK				  MB
 112	 *   atomic_read()			  rwsem_is_locked()
 113	 *
 114	 * LOCK should suffice since the actual taking of the lock must
 115	 * happen _before_ what follows.
 116	 */
 117	might_sleep();
 118	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
 119		anon_vma_lock_write(anon_vma);
 120		anon_vma_unlock_write(anon_vma);
 121	}
 122
 123	kmem_cache_free(anon_vma_cachep, anon_vma);
 124}
 125
 126static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 127{
 128	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 129}
 130
 131static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
 132{
 133	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
 134}
 135
 136static void anon_vma_chain_link(struct vm_area_struct *vma,
 137				struct anon_vma_chain *avc,
 138				struct anon_vma *anon_vma)
 139{
 140	avc->vma = vma;
 141	avc->anon_vma = anon_vma;
 142	list_add(&avc->same_vma, &vma->anon_vma_chain);
 143	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
 144}
 145
 146/**
 147 * __anon_vma_prepare - attach an anon_vma to a memory region
 148 * @vma: the memory region in question
 149 *
 150 * This makes sure the memory mapping described by 'vma' has
 151 * an 'anon_vma' attached to it, so that we can associate the
 152 * anonymous pages mapped into it with that anon_vma.
 153 *
 154 * The common case will be that we already have one, which
 155 * is handled inline by anon_vma_prepare(). But if
 156 * not we either need to find an adjacent mapping that we
 157 * can re-use the anon_vma from (very common when the only
 158 * reason for splitting a vma has been mprotect()), or we
 159 * allocate a new one.
 160 *
 161 * Anon-vma allocations are very subtle, because we may have
 162 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
 163 * and that may actually touch the spinlock even in the newly
 164 * allocated vma (it depends on RCU to make sure that the
 165 * anon_vma isn't actually destroyed).
 166 *
 167 * As a result, we need to do proper anon_vma locking even
 168 * for the new allocation. At the same time, we do not want
 169 * to do any locking for the common case of already having
 170 * an anon_vma.
 171 *
 172 * This must be called with the mmap_sem held for reading.
 173 */
 174int __anon_vma_prepare(struct vm_area_struct *vma)
 175{
 176	struct mm_struct *mm = vma->vm_mm;
 177	struct anon_vma *anon_vma, *allocated;
 178	struct anon_vma_chain *avc;
 179
 180	might_sleep();
 181
 182	avc = anon_vma_chain_alloc(GFP_KERNEL);
 183	if (!avc)
 184		goto out_enomem;
 185
 186	anon_vma = find_mergeable_anon_vma(vma);
 187	allocated = NULL;
 188	if (!anon_vma) {
 189		anon_vma = anon_vma_alloc();
 190		if (unlikely(!anon_vma))
 191			goto out_enomem_free_avc;
 
 192		allocated = anon_vma;
 193	}
 194
 195	anon_vma_lock_write(anon_vma);
 196	/* page_table_lock to protect against threads */
 197	spin_lock(&mm->page_table_lock);
 198	if (likely(!vma->anon_vma)) {
 199		vma->anon_vma = anon_vma;
 200		anon_vma_chain_link(vma, avc, anon_vma);
 201		/* vma reference or self-parent link for new root */
 202		anon_vma->degree++;
 203		allocated = NULL;
 204		avc = NULL;
 205	}
 206	spin_unlock(&mm->page_table_lock);
 207	anon_vma_unlock_write(anon_vma);
 208
 209	if (unlikely(allocated))
 210		put_anon_vma(allocated);
 211	if (unlikely(avc))
 212		anon_vma_chain_free(avc);
 213
 214	return 0;
 215
 216 out_enomem_free_avc:
 217	anon_vma_chain_free(avc);
 218 out_enomem:
 219	return -ENOMEM;
 220}
 221
 222/*
 223 * This is a useful helper function for locking the anon_vma root as
 224 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
 225 * have the same vma.
 226 *
 227 * Such anon_vma's should have the same root, so you'd expect to see
 228 * just a single mutex_lock for the whole traversal.
 229 */
 230static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
 231{
 232	struct anon_vma *new_root = anon_vma->root;
 233	if (new_root != root) {
 234		if (WARN_ON_ONCE(root))
 235			up_write(&root->rwsem);
 236		root = new_root;
 237		down_write(&root->rwsem);
 238	}
 239	return root;
 240}
 241
 242static inline void unlock_anon_vma_root(struct anon_vma *root)
 243{
 244	if (root)
 245		up_write(&root->rwsem);
 246}
 247
 248/*
 249 * Attach the anon_vmas from src to dst.
 250 * Returns 0 on success, -ENOMEM on failure.
 251 *
 252 * If dst->anon_vma is NULL this function tries to find and reuse existing
 253 * anon_vma which has no vmas and only one child anon_vma. This prevents
 254 * degradation of anon_vma hierarchy to endless linear chain in case of
 255 * constantly forking task. On the other hand, an anon_vma with more than one
 256 * child isn't reused even if there was no alive vma, thus rmap walker has a
 257 * good chance of avoiding scanning the whole hierarchy when it searches where
 258 * page is mapped.
 
 
 
 
 
 
 259 */
 260int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 261{
 262	struct anon_vma_chain *avc, *pavc;
 263	struct anon_vma *root = NULL;
 264
 265	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
 266		struct anon_vma *anon_vma;
 267
 268		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
 269		if (unlikely(!avc)) {
 270			unlock_anon_vma_root(root);
 271			root = NULL;
 272			avc = anon_vma_chain_alloc(GFP_KERNEL);
 273			if (!avc)
 274				goto enomem_failure;
 275		}
 276		anon_vma = pavc->anon_vma;
 277		root = lock_anon_vma_root(root, anon_vma);
 278		anon_vma_chain_link(dst, avc, anon_vma);
 279
 280		/*
 281		 * Reuse existing anon_vma if its degree lower than two,
 282		 * that means it has no vma and only one anon_vma child.
 283		 *
 284		 * Do not chose parent anon_vma, otherwise first child
 285		 * will always reuse it. Root anon_vma is never reused:
 286		 * it has self-parent reference and at least one child.
 287		 */
 288		if (!dst->anon_vma && anon_vma != src->anon_vma &&
 289				anon_vma->degree < 2)
 
 290			dst->anon_vma = anon_vma;
 291	}
 292	if (dst->anon_vma)
 293		dst->anon_vma->degree++;
 294	unlock_anon_vma_root(root);
 295	return 0;
 296
 297 enomem_failure:
 298	/*
 299	 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
 300	 * decremented in unlink_anon_vmas().
 301	 * We can safely do this because callers of anon_vma_clone() don't care
 302	 * about dst->anon_vma if anon_vma_clone() failed.
 303	 */
 304	dst->anon_vma = NULL;
 305	unlink_anon_vmas(dst);
 306	return -ENOMEM;
 307}
 308
 309/*
 310 * Attach vma to its own anon_vma, as well as to the anon_vmas that
 311 * the corresponding VMA in the parent process is attached to.
 312 * Returns 0 on success, non-zero on failure.
 313 */
 314int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 315{
 316	struct anon_vma_chain *avc;
 317	struct anon_vma *anon_vma;
 318	int error;
 319
 320	/* Don't bother if the parent process has no anon_vma here. */
 321	if (!pvma->anon_vma)
 322		return 0;
 323
 324	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
 325	vma->anon_vma = NULL;
 326
 327	/*
 328	 * First, attach the new VMA to the parent VMA's anon_vmas,
 329	 * so rmap can find non-COWed pages in child processes.
 330	 */
 331	error = anon_vma_clone(vma, pvma);
 332	if (error)
 333		return error;
 334
 335	/* An existing anon_vma has been reused, all done then. */
 336	if (vma->anon_vma)
 337		return 0;
 338
 339	/* Then add our own anon_vma. */
 340	anon_vma = anon_vma_alloc();
 341	if (!anon_vma)
 342		goto out_error;
 
 343	avc = anon_vma_chain_alloc(GFP_KERNEL);
 344	if (!avc)
 345		goto out_error_free_anon_vma;
 346
 347	/*
 348	 * The root anon_vma's spinlock is the lock actually used when we
 349	 * lock any of the anon_vmas in this anon_vma tree.
 350	 */
 351	anon_vma->root = pvma->anon_vma->root;
 352	anon_vma->parent = pvma->anon_vma;
 353	/*
 354	 * With refcounts, an anon_vma can stay around longer than the
 355	 * process it belongs to. The root anon_vma needs to be pinned until
 356	 * this anon_vma is freed, because the lock lives in the root.
 357	 */
 358	get_anon_vma(anon_vma->root);
 359	/* Mark this anon_vma as the one where our new (COWed) pages go. */
 360	vma->anon_vma = anon_vma;
 361	anon_vma_lock_write(anon_vma);
 362	anon_vma_chain_link(vma, avc, anon_vma);
 363	anon_vma->parent->degree++;
 364	anon_vma_unlock_write(anon_vma);
 365
 366	return 0;
 367
 368 out_error_free_anon_vma:
 369	put_anon_vma(anon_vma);
 370 out_error:
 371	unlink_anon_vmas(vma);
 372	return -ENOMEM;
 373}
 374
 375void unlink_anon_vmas(struct vm_area_struct *vma)
 376{
 377	struct anon_vma_chain *avc, *next;
 378	struct anon_vma *root = NULL;
 379
 380	/*
 381	 * Unlink each anon_vma chained to the VMA.  This list is ordered
 382	 * from newest to oldest, ensuring the root anon_vma gets freed last.
 383	 */
 384	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 385		struct anon_vma *anon_vma = avc->anon_vma;
 386
 387		root = lock_anon_vma_root(root, anon_vma);
 388		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
 389
 390		/*
 391		 * Leave empty anon_vmas on the list - we'll need
 392		 * to free them outside the lock.
 393		 */
 394		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
 395			anon_vma->parent->degree--;
 396			continue;
 397		}
 398
 399		list_del(&avc->same_vma);
 400		anon_vma_chain_free(avc);
 401	}
 402	if (vma->anon_vma)
 403		vma->anon_vma->degree--;
 
 
 
 
 
 
 
 404	unlock_anon_vma_root(root);
 405
 406	/*
 407	 * Iterate the list once more, it now only contains empty and unlinked
 408	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
 409	 * needing to write-acquire the anon_vma->root->rwsem.
 410	 */
 411	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 412		struct anon_vma *anon_vma = avc->anon_vma;
 413
 414		VM_WARN_ON(anon_vma->degree);
 
 415		put_anon_vma(anon_vma);
 416
 417		list_del(&avc->same_vma);
 418		anon_vma_chain_free(avc);
 419	}
 420}
 421
 422static void anon_vma_ctor(void *data)
 423{
 424	struct anon_vma *anon_vma = data;
 425
 426	init_rwsem(&anon_vma->rwsem);
 427	atomic_set(&anon_vma->refcount, 0);
 428	anon_vma->rb_root = RB_ROOT_CACHED;
 429}
 430
 431void __init anon_vma_init(void)
 432{
 433	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 434			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
 435			anon_vma_ctor);
 436	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
 437			SLAB_PANIC|SLAB_ACCOUNT);
 438}
 439
 440/*
 441 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
 442 *
 443 * Since there is no serialization what so ever against page_remove_rmap()
 444 * the best this function can do is return a locked anon_vma that might
 445 * have been relevant to this page.
 446 *
 447 * The page might have been remapped to a different anon_vma or the anon_vma
 448 * returned may already be freed (and even reused).
 449 *
 450 * In case it was remapped to a different anon_vma, the new anon_vma will be a
 451 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
 452 * ensure that any anon_vma obtained from the page will still be valid for as
 453 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
 454 *
 455 * All users of this function must be very careful when walking the anon_vma
 456 * chain and verify that the page in question is indeed mapped in it
 457 * [ something equivalent to page_mapped_in_vma() ].
 458 *
 459 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
 460 * that the anon_vma pointer from page->mapping is valid if there is a
 461 * mapcount, we can dereference the anon_vma after observing those.
 
 462 */
 463struct anon_vma *page_get_anon_vma(struct page *page)
 464{
 465	struct anon_vma *anon_vma = NULL;
 466	unsigned long anon_mapping;
 467
 468	rcu_read_lock();
 469	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
 470	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 471		goto out;
 472	if (!page_mapped(page))
 473		goto out;
 474
 475	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 476	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 477		anon_vma = NULL;
 478		goto out;
 479	}
 480
 481	/*
 482	 * If this page is still mapped, then its anon_vma cannot have been
 483	 * freed.  But if it has been unmapped, we have no security against the
 484	 * anon_vma structure being freed and reused (for another anon_vma:
 485	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
 486	 * above cannot corrupt).
 487	 */
 488	if (!page_mapped(page)) {
 489		rcu_read_unlock();
 490		put_anon_vma(anon_vma);
 491		return NULL;
 492	}
 493out:
 494	rcu_read_unlock();
 495
 496	return anon_vma;
 497}
 498
 499/*
 500 * Similar to page_get_anon_vma() except it locks the anon_vma.
 501 *
 502 * Its a little more complex as it tries to keep the fast path to a single
 503 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
 504 * reference like with page_get_anon_vma() and then block on the mutex.
 
 505 */
 506struct anon_vma *page_lock_anon_vma_read(struct page *page)
 
 507{
 508	struct anon_vma *anon_vma = NULL;
 509	struct anon_vma *root_anon_vma;
 510	unsigned long anon_mapping;
 511
 512	rcu_read_lock();
 513	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
 514	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 515		goto out;
 516	if (!page_mapped(page))
 517		goto out;
 518
 519	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 520	root_anon_vma = READ_ONCE(anon_vma->root);
 521	if (down_read_trylock(&root_anon_vma->rwsem)) {
 522		/*
 523		 * If the page is still mapped, then this anon_vma is still
 524		 * its anon_vma, and holding the mutex ensures that it will
 525		 * not go away, see anon_vma_free().
 526		 */
 527		if (!page_mapped(page)) {
 528			up_read(&root_anon_vma->rwsem);
 529			anon_vma = NULL;
 530		}
 531		goto out;
 532	}
 533
 
 
 
 
 
 
 534	/* trylock failed, we got to sleep */
 535	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 536		anon_vma = NULL;
 537		goto out;
 538	}
 539
 540	if (!page_mapped(page)) {
 541		rcu_read_unlock();
 542		put_anon_vma(anon_vma);
 543		return NULL;
 544	}
 545
 546	/* we pinned the anon_vma, its safe to sleep */
 547	rcu_read_unlock();
 548	anon_vma_lock_read(anon_vma);
 549
 550	if (atomic_dec_and_test(&anon_vma->refcount)) {
 551		/*
 552		 * Oops, we held the last refcount, release the lock
 553		 * and bail -- can't simply use put_anon_vma() because
 554		 * we'll deadlock on the anon_vma_lock_write() recursion.
 555		 */
 556		anon_vma_unlock_read(anon_vma);
 557		__put_anon_vma(anon_vma);
 558		anon_vma = NULL;
 559	}
 560
 561	return anon_vma;
 562
 563out:
 564	rcu_read_unlock();
 565	return anon_vma;
 566}
 567
 568void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 569{
 570	anon_vma_unlock_read(anon_vma);
 571}
 572
 573#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 574/*
 575 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
 576 * important if a PTE was dirty when it was unmapped that it's flushed
 577 * before any IO is initiated on the page to prevent lost writes. Similarly,
 578 * it must be flushed before freeing to prevent data leakage.
 579 */
 580void try_to_unmap_flush(void)
 581{
 582	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 583
 584	if (!tlb_ubc->flush_required)
 585		return;
 586
 587	arch_tlbbatch_flush(&tlb_ubc->arch);
 588	tlb_ubc->flush_required = false;
 589	tlb_ubc->writable = false;
 590}
 591
 592/* Flush iff there are potentially writable TLB entries that can race with IO */
 593void try_to_unmap_flush_dirty(void)
 594{
 595	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 596
 597	if (tlb_ubc->writable)
 598		try_to_unmap_flush();
 599}
 600
 
 
 
 
 
 
 
 
 
 
 601static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 602{
 603	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 
 604
 605	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
 606	tlb_ubc->flush_required = true;
 607
 608	/*
 609	 * Ensure compiler does not re-order the setting of tlb_flush_batched
 610	 * before the PTE is cleared.
 611	 */
 612	barrier();
 613	mm->tlb_flush_batched = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 614
 615	/*
 616	 * If the PTE was dirty then it's best to assume it's writable. The
 617	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
 618	 * before the page is queued for IO.
 619	 */
 620	if (writable)
 621		tlb_ubc->writable = true;
 622}
 623
 624/*
 625 * Returns true if the TLB flush should be deferred to the end of a batch of
 626 * unmap operations to reduce IPIs.
 627 */
 628static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 629{
 630	bool should_defer = false;
 631
 632	if (!(flags & TTU_BATCH_FLUSH))
 633		return false;
 634
 635	/* If remote CPUs need to be flushed then defer batch the flush */
 636	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
 637		should_defer = true;
 638	put_cpu();
 639
 640	return should_defer;
 641}
 642
 643/*
 644 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
 645 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
 646 * operation such as mprotect or munmap to race between reclaim unmapping
 647 * the page and flushing the page. If this race occurs, it potentially allows
 648 * access to data via a stale TLB entry. Tracking all mm's that have TLB
 649 * batching in flight would be expensive during reclaim so instead track
 650 * whether TLB batching occurred in the past and if so then do a flush here
 651 * if required. This will cost one additional flush per reclaim cycle paid
 652 * by the first operation at risk such as mprotect and mumap.
 653 *
 654 * This must be called under the PTL so that an access to tlb_flush_batched
 655 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
 656 * via the PTL.
 657 */
 658void flush_tlb_batched_pending(struct mm_struct *mm)
 659{
 660	if (mm->tlb_flush_batched) {
 661		flush_tlb_mm(mm);
 
 662
 
 
 663		/*
 664		 * Do not allow the compiler to re-order the clearing of
 665		 * tlb_flush_batched before the tlb is flushed.
 666		 */
 667		barrier();
 668		mm->tlb_flush_batched = false;
 669	}
 670}
 671#else
 672static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 673{
 674}
 675
 676static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 677{
 678	return false;
 679}
 680#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 681
 682/*
 683 * At what user virtual address is page expected in vma?
 684 * Caller should check the page is actually part of the vma.
 685 */
 686unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 687{
 688	unsigned long address;
 689	if (PageAnon(page)) {
 690		struct anon_vma *page__anon_vma = page_anon_vma(page);
 691		/*
 692		 * Note: swapoff's unuse_vma() is more efficient with this
 693		 * check, and needs it to match anon_vma when KSM is active.
 694		 */
 695		if (!vma->anon_vma || !page__anon_vma ||
 696		    vma->anon_vma->root != page__anon_vma->root)
 697			return -EFAULT;
 698	} else if (page->mapping) {
 699		if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
 700			return -EFAULT;
 701	} else
 702		return -EFAULT;
 703	address = __vma_address(page, vma);
 704	if (unlikely(address < vma->vm_start || address >= vma->vm_end))
 705		return -EFAULT;
 706	return address;
 
 
 707}
 708
 
 
 
 
 
 709pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
 710{
 711	pgd_t *pgd;
 712	p4d_t *p4d;
 713	pud_t *pud;
 714	pmd_t *pmd = NULL;
 715	pmd_t pmde;
 716
 717	pgd = pgd_offset(mm, address);
 718	if (!pgd_present(*pgd))
 719		goto out;
 720
 721	p4d = p4d_offset(pgd, address);
 722	if (!p4d_present(*p4d))
 723		goto out;
 724
 725	pud = pud_offset(p4d, address);
 726	if (!pud_present(*pud))
 727		goto out;
 728
 729	pmd = pmd_offset(pud, address);
 730	/*
 731	 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
 732	 * without holding anon_vma lock for write.  So when looking for a
 733	 * genuine pmde (in which to find pte), test present and !THP together.
 734	 */
 735	pmde = *pmd;
 736	barrier();
 737	if (!pmd_present(pmde) || pmd_trans_huge(pmde))
 738		pmd = NULL;
 739out:
 740	return pmd;
 741}
 742
 743struct page_referenced_arg {
 744	int mapcount;
 745	int referenced;
 746	unsigned long vm_flags;
 747	struct mem_cgroup *memcg;
 748};
 749/*
 750 * arg: page_referenced_arg will be passed
 751 */
 752static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
 753			unsigned long address, void *arg)
 754{
 755	struct page_referenced_arg *pra = arg;
 756	struct page_vma_mapped_walk pvmw = {
 757		.page = page,
 758		.vma = vma,
 759		.address = address,
 760	};
 761	int referenced = 0;
 762
 763	while (page_vma_mapped_walk(&pvmw)) {
 764		address = pvmw.address;
 765
 766		if (vma->vm_flags & VM_LOCKED) {
 
 
 
 767			page_vma_mapped_walk_done(&pvmw);
 768			pra->vm_flags |= VM_LOCKED;
 769			return false; /* To break the loop */
 770		}
 771
 772		if (pvmw.pte) {
 
 
 
 
 
 
 773			if (ptep_clear_flush_young_notify(vma, address,
 774						pvmw.pte)) {
 775				/*
 776				 * Don't treat a reference through
 777				 * a sequentially read mapping as such.
 778				 * If the page has been used in another mapping,
 779				 * we will catch it; if this other mapping is
 780				 * already gone, the unmap path will have set
 781				 * PG_referenced or activated the page.
 782				 */
 783				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
 784					referenced++;
 785			}
 786		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
 787			if (pmdp_clear_flush_young_notify(vma, address,
 788						pvmw.pmd))
 789				referenced++;
 790		} else {
 791			/* unexpected pmd-mapped page? */
 792			WARN_ON_ONCE(1);
 793		}
 794
 795		pra->mapcount--;
 796	}
 797
 798	if (referenced)
 799		clear_page_idle(page);
 800	if (test_and_clear_page_young(page))
 801		referenced++;
 802
 803	if (referenced) {
 804		pra->referenced++;
 805		pra->vm_flags |= vma->vm_flags;
 806	}
 807
 808	if (!pra->mapcount)
 809		return false; /* To break the loop */
 810
 811	return true;
 812}
 813
 814static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
 815{
 816	struct page_referenced_arg *pra = arg;
 817	struct mem_cgroup *memcg = pra->memcg;
 818
 819	if (!mm_match_cgroup(vma->vm_mm, memcg))
 820		return true;
 821
 822	return false;
 823}
 824
 825/**
 826 * page_referenced - test if the page was referenced
 827 * @page: the page to test
 828 * @is_locked: caller holds lock on the page
 829 * @memcg: target memory cgroup
 830 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 
 
 831 *
 832 * Quick test_and_clear_referenced for all mappings to a page,
 833 * returns the number of ptes which referenced the page.
 834 */
 835int page_referenced(struct page *page,
 836		    int is_locked,
 837		    struct mem_cgroup *memcg,
 838		    unsigned long *vm_flags)
 839{
 840	int we_locked = 0;
 841	struct page_referenced_arg pra = {
 842		.mapcount = total_mapcount(page),
 843		.memcg = memcg,
 844	};
 845	struct rmap_walk_control rwc = {
 846		.rmap_one = page_referenced_one,
 847		.arg = (void *)&pra,
 848		.anon_lock = page_lock_anon_vma_read,
 
 849	};
 850
 851	*vm_flags = 0;
 852	if (!page_mapped(page))
 853		return 0;
 854
 855	if (!page_rmapping(page))
 856		return 0;
 857
 858	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
 859		we_locked = trylock_page(page);
 860		if (!we_locked)
 861			return 1;
 862	}
 863
 864	/*
 865	 * If we are reclaiming on behalf of a cgroup, skip
 866	 * counting on behalf of references from different
 867	 * cgroups
 868	 */
 869	if (memcg) {
 870		rwc.invalid_vma = invalid_page_referenced_vma;
 871	}
 872
 873	rmap_walk(page, &rwc);
 874	*vm_flags = pra.vm_flags;
 875
 876	if (we_locked)
 877		unlock_page(page);
 878
 879	return pra.referenced;
 880}
 881
 882static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 883			    unsigned long address, void *arg)
 884{
 885	struct page_vma_mapped_walk pvmw = {
 886		.page = page,
 887		.vma = vma,
 888		.address = address,
 889		.flags = PVMW_SYNC,
 890	};
 891	unsigned long start = address, end;
 892	int *cleaned = arg;
 893
 894	/*
 895	 * We have to assume the worse case ie pmd for invalidation. Note that
 896	 * the page can not be free from this function.
 897	 */
 898	end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
 899	mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
 
 900
 901	while (page_vma_mapped_walk(&pvmw)) {
 902		unsigned long cstart;
 903		int ret = 0;
 904
 905		cstart = address = pvmw.address;
 906		if (pvmw.pte) {
 907			pte_t entry;
 908			pte_t *pte = pvmw.pte;
 909
 910			if (!pte_dirty(*pte) && !pte_write(*pte))
 911				continue;
 912
 913			flush_cache_page(vma, address, pte_pfn(*pte));
 914			entry = ptep_clear_flush(vma, address, pte);
 915			entry = pte_wrprotect(entry);
 916			entry = pte_mkclean(entry);
 917			set_pte_at(vma->vm_mm, address, pte, entry);
 918			ret = 1;
 919		} else {
 920#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
 921			pmd_t *pmd = pvmw.pmd;
 922			pmd_t entry;
 923
 924			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
 925				continue;
 926
 927			flush_cache_page(vma, address, page_to_pfn(page));
 928			entry = pmdp_huge_clear_flush(vma, address, pmd);
 
 929			entry = pmd_wrprotect(entry);
 930			entry = pmd_mkclean(entry);
 931			set_pmd_at(vma->vm_mm, address, pmd, entry);
 932			cstart &= PMD_MASK;
 933			ret = 1;
 934#else
 935			/* unexpected pmd-mapped page? */
 936			WARN_ON_ONCE(1);
 937#endif
 938		}
 939
 940		/*
 941		 * No need to call mmu_notifier_invalidate_range() as we are
 942		 * downgrading page table protection not changing it to point
 943		 * to a new page.
 944		 *
 945		 * See Documentation/vm/mmu_notifier.txt
 946		 */
 947		if (ret)
 948			(*cleaned)++;
 949	}
 950
 951	mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
 
 
 
 
 
 
 
 
 
 
 
 952
 953	return true;
 954}
 955
 956static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
 957{
 958	if (vma->vm_flags & VM_SHARED)
 959		return false;
 960
 961	return true;
 962}
 963
 964int page_mkclean(struct page *page)
 965{
 966	int cleaned = 0;
 967	struct address_space *mapping;
 968	struct rmap_walk_control rwc = {
 969		.arg = (void *)&cleaned,
 970		.rmap_one = page_mkclean_one,
 971		.invalid_vma = invalid_mkclean_vma,
 972	};
 973
 974	BUG_ON(!PageLocked(page));
 975
 976	if (!page_mapped(page))
 977		return 0;
 978
 979	mapping = page_mapping(page);
 980	if (!mapping)
 981		return 0;
 982
 983	rmap_walk(page, &rwc);
 984
 985	return cleaned;
 986}
 987EXPORT_SYMBOL_GPL(page_mkclean);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988
 989/**
 990 * page_move_anon_rmap - move a page to our anon_vma
 991 * @page:	the page to move to our anon_vma
 992 * @vma:	the vma the page belongs to
 993 *
 994 * When a page belongs exclusively to one process after a COW event,
 995 * that page can be moved into the anon_vma that belongs to just that
 996 * process, so the rmap code will not search the parent or sibling
 997 * processes.
 998 */
 999void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1000{
1001	struct anon_vma *anon_vma = vma->anon_vma;
 
1002
1003	page = compound_head(page);
1004
1005	VM_BUG_ON_PAGE(!PageLocked(page), page);
1006	VM_BUG_ON_VMA(!anon_vma, vma);
1007
1008	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1009	/*
1010	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1011	 * simultaneously, so a concurrent reader (eg page_referenced()'s
1012	 * PageAnon()) will not see one without the other.
1013	 */
1014	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
 
1015}
1016
1017/**
1018 * __page_set_anon_rmap - set up new anonymous rmap
1019 * @page:	Page to add to rmap	
1020 * @vma:	VM area to add page to.
1021 * @address:	User virtual address of the mapping	
1022 * @exclusive:	the page is exclusively owned by the current process
1023 */
1024static void __page_set_anon_rmap(struct page *page,
1025	struct vm_area_struct *vma, unsigned long address, int exclusive)
1026{
1027	struct anon_vma *anon_vma = vma->anon_vma;
1028
1029	BUG_ON(!anon_vma);
1030
1031	if (PageAnon(page))
1032		return;
1033
1034	/*
1035	 * If the page isn't exclusively mapped into this vma,
1036	 * we must use the _oldest_ possible anon_vma for the
1037	 * page mapping!
1038	 */
1039	if (!exclusive)
1040		anon_vma = anon_vma->root;
1041
 
 
 
 
 
 
1042	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1043	page->mapping = (struct address_space *) anon_vma;
1044	page->index = linear_page_index(vma, address);
 
 
 
1045}
1046
1047/**
1048 * __page_check_anon_rmap - sanity check anonymous rmap addition
1049 * @page:	the page to add the mapping to
1050 * @vma:	the vm area in which the mapping is added
1051 * @address:	the user virtual address mapped
1052 */
1053static void __page_check_anon_rmap(struct page *page,
1054	struct vm_area_struct *vma, unsigned long address)
1055{
1056#ifdef CONFIG_DEBUG_VM
1057	/*
1058	 * The page's anon-rmap details (mapping and index) are guaranteed to
1059	 * be set up correctly at this point.
1060	 *
1061	 * We have exclusion against page_add_anon_rmap because the caller
1062	 * always holds the page locked, except if called from page_dup_rmap,
1063	 * in which case the page is already known to be setup.
1064	 *
1065	 * We have exclusion against page_add_new_anon_rmap because those pages
1066	 * are initially only visible via the pagetables, and the pte is locked
1067	 * over the call to page_add_new_anon_rmap.
1068	 */
1069	BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1070	BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
1071#endif
 
1072}
1073
1074/**
1075 * page_add_anon_rmap - add pte mapping to an anonymous page
1076 * @page:	the page to add the mapping to
1077 * @vma:	the vm area in which the mapping is added
1078 * @address:	the user virtual address mapped
1079 * @compound:	charge the page as compound or small page
1080 *
1081 * The caller needs to hold the pte lock, and the page must be locked in
1082 * the anon_vma case: to serialize mapping,index checking after setting,
1083 * and to ensure that PageAnon is not being upgraded racily to PageKsm
1084 * (but PageKsm is never downgraded to PageAnon).
1085 */
1086void page_add_anon_rmap(struct page *page,
1087	struct vm_area_struct *vma, unsigned long address, bool compound)
1088{
1089	do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1090}
1091
1092/*
1093 * Special version of the above for do_swap_page, which often runs
1094 * into pages that are exclusively owned by the current process.
1095 * Everybody else should continue to use page_add_anon_rmap above.
1096 */
1097void do_page_add_anon_rmap(struct page *page,
1098	struct vm_area_struct *vma, unsigned long address, int flags)
1099{
 
 
1100	bool compound = flags & RMAP_COMPOUND;
1101	bool first;
1102
1103	if (compound) {
1104		atomic_t *mapcount;
1105		VM_BUG_ON_PAGE(!PageLocked(page), page);
1106		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1107		mapcount = compound_mapcount_ptr(page);
1108		first = atomic_inc_and_test(mapcount);
1109	} else {
1110		first = atomic_inc_and_test(&page->_mapcount);
1111	}
 
 
 
 
 
 
 
1112
1113	if (first) {
1114		int nr = compound ? hpage_nr_pages(page) : 1;
1115		/*
1116		 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1117		 * these counters are not modified in interrupt context, and
1118		 * pte lock(a spinlock) is held, which implies preemption
1119		 * disabled.
1120		 */
1121		if (compound)
1122			__inc_node_page_state(page, NR_ANON_THPS);
1123		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
 
 
 
 
1124	}
1125	if (unlikely(PageKsm(page)))
1126		return;
1127
1128	VM_BUG_ON_PAGE(!PageLocked(page), page);
 
 
 
 
 
 
 
 
 
1129
1130	/* address might be in next vma when migration races vma_adjust */
1131	if (first)
1132		__page_set_anon_rmap(page, vma, address,
1133				flags & RMAP_EXCLUSIVE);
1134	else
1135		__page_check_anon_rmap(page, vma, address);
 
 
1136}
1137
1138/**
1139 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1140 * @page:	the page to add the mapping to
1141 * @vma:	the vm area in which the mapping is added
1142 * @address:	the user virtual address mapped
1143 * @compound:	charge the page as compound or small page
 
 
1144 *
1145 * Same as page_add_anon_rmap but must only be called on *new* pages.
1146 * This means the inc-and-test can be bypassed.
1147 * Page does not have to be locked.
1148 */
1149void page_add_new_anon_rmap(struct page *page,
1150	struct vm_area_struct *vma, unsigned long address, bool compound)
1151{
1152	int nr = compound ? hpage_nr_pages(page) : 1;
1153
1154	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1155	__SetPageSwapBacked(page);
1156	if (compound) {
1157		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1158		/* increment count (starts at -1) */
1159		atomic_set(compound_mapcount_ptr(page), 0);
1160		__inc_node_page_state(page, NR_ANON_THPS);
1161	} else {
1162		/* Anon THP always mapped first with PMD */
1163		VM_BUG_ON_PAGE(PageTransCompound(page), page);
1164		/* increment count (starts at -1) */
1165		atomic_set(&page->_mapcount, 0);
 
 
 
1166	}
1167	__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
 
1168	__page_set_anon_rmap(page, vma, address, 1);
1169}
1170
1171/**
1172 * page_add_file_rmap - add pte mapping to a file page
1173 * @page: the page to add the mapping to
1174 * @compound: charge the page as compound or small page
 
1175 *
1176 * The caller needs to hold the pte lock.
1177 */
1178void page_add_file_rmap(struct page *page, bool compound)
 
1179{
1180	int i, nr = 1;
 
 
1181
1182	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1183	lock_page_memcg(page);
1184	if (compound && PageTransHuge(page)) {
1185		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1186			if (atomic_inc_and_test(&page[i]._mapcount))
1187				nr++;
1188		}
1189		if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1190			goto out;
1191		VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1192		__inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1193	} else {
1194		if (PageTransCompound(page) && page_mapping(page)) {
1195			VM_WARN_ON_ONCE(!PageLocked(page));
1196
1197			SetPageDoubleMap(compound_head(page));
1198			if (PageMlocked(page))
1199				clear_page_mlock(compound_head(page));
 
 
 
 
 
1200		}
1201		if (!atomic_inc_and_test(&page->_mapcount))
1202			goto out;
1203	}
1204	__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1205out:
1206	unlock_page_memcg(page);
1207}
1208
1209static void page_remove_file_rmap(struct page *page, bool compound)
1210{
1211	int i, nr = 1;
1212
1213	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1214	lock_page_memcg(page);
1215
1216	/* Hugepages are not counted in NR_FILE_MAPPED for now. */
1217	if (unlikely(PageHuge(page))) {
1218		/* hugetlb pages are always mapped with pmds */
1219		atomic_dec(compound_mapcount_ptr(page));
1220		goto out;
1221	}
1222
1223	/* page still mapped by someone else? */
1224	if (compound && PageTransHuge(page)) {
1225		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1226			if (atomic_add_negative(-1, &page[i]._mapcount))
1227				nr++;
1228		}
1229		if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1230			goto out;
1231		VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1232		__dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1233	} else {
1234		if (!atomic_add_negative(-1, &page->_mapcount))
1235			goto out;
1236	}
1237
1238	/*
1239	 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
1240	 * these counters are not modified in interrupt context, and
1241	 * pte lock(a spinlock) is held, which implies preemption disabled.
1242	 */
1243	__mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1244
1245	if (unlikely(PageMlocked(page)))
1246		clear_page_mlock(page);
1247out:
1248	unlock_page_memcg(page);
1249}
1250
1251static void page_remove_anon_compound_rmap(struct page *page)
1252{
1253	int i, nr;
1254
1255	if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1256		return;
1257
1258	/* Hugepages are not counted in NR_ANON_PAGES for now. */
1259	if (unlikely(PageHuge(page)))
1260		return;
1261
1262	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1263		return;
1264
1265	__dec_node_page_state(page, NR_ANON_THPS);
1266
1267	if (TestClearPageDoubleMap(page)) {
1268		/*
1269		 * Subpages can be mapped with PTEs too. Check how many of
1270		 * themi are still mapped.
1271		 */
1272		for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1273			if (atomic_add_negative(-1, &page[i]._mapcount))
1274				nr++;
1275		}
1276	} else {
1277		nr = HPAGE_PMD_NR;
1278	}
1279
1280	if (unlikely(PageMlocked(page)))
1281		clear_page_mlock(page);
 
 
 
 
1282
1283	if (nr) {
1284		__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
1285		deferred_split_huge_page(page);
1286	}
1287}
1288
1289/**
1290 * page_remove_rmap - take down pte mapping from a page
1291 * @page:	page to remove mapping from
 
1292 * @compound:	uncharge the page as compound or small page
1293 *
1294 * The caller needs to hold the pte lock.
1295 */
1296void page_remove_rmap(struct page *page, bool compound)
 
1297{
1298	if (!PageAnon(page))
1299		return page_remove_file_rmap(page, compound);
 
1300
1301	if (compound)
1302		return page_remove_anon_compound_rmap(page);
1303
1304	/* page still mapped by someone else? */
1305	if (!atomic_add_negative(-1, &page->_mapcount))
 
 
1306		return;
 
1307
1308	/*
1309	 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1310	 * these counters are not modified in interrupt context, and
1311	 * pte lock(a spinlock) is held, which implies preemption disabled.
1312	 */
1313	__dec_node_page_state(page, NR_ANON_MAPPED);
1314
1315	if (unlikely(PageMlocked(page)))
1316		clear_page_mlock(page);
 
 
 
 
 
 
 
 
 
1317
1318	if (PageTransCompound(page))
1319		deferred_split_huge_page(compound_head(page));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320
1321	/*
1322	 * It would be tidy to reset the PageAnon mapping here,
1323	 * but that might overwrite a racing page_add_anon_rmap
1324	 * which increments mapcount after us but sets mapping
1325	 * before us: so leave the reset to free_unref_page,
1326	 * and remember that it's only reliable while mapped.
1327	 * Leaving it set also helps swapoff to reinstate ptes
1328	 * faster for those pages still in swapcache.
1329	 */
 
 
 
 
1330}
1331
1332/*
1333 * @arg: enum ttu_flags will be passed to this argument
1334 */
1335static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1336		     unsigned long address, void *arg)
1337{
1338	struct mm_struct *mm = vma->vm_mm;
1339	struct page_vma_mapped_walk pvmw = {
1340		.page = page,
1341		.vma = vma,
1342		.address = address,
1343	};
1344	pte_t pteval;
1345	struct page *subpage;
1346	bool ret = true;
1347	unsigned long start = address, end;
1348	enum ttu_flags flags = (enum ttu_flags)arg;
1349
1350	/* munlock has nothing to gain from examining un-locked vmas */
1351	if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1352		return true;
1353
1354	if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1355	    is_zone_device_page(page) && !is_device_private_page(page))
1356		return true;
 
 
 
 
 
1357
1358	if (flags & TTU_SPLIT_HUGE_PMD) {
1359		split_huge_pmd_address(vma, address,
1360				flags & TTU_SPLIT_FREEZE, page);
1361	}
1362
1363	/*
1364	 * We have to assume the worse case ie pmd for invalidation. Note that
1365	 * the page can not be free in this function as call of try_to_unmap()
1366	 * must hold a reference on the page.
 
 
 
1367	 */
1368	end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1369	mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
 
 
 
 
 
 
 
 
 
1370
1371	while (page_vma_mapped_walk(&pvmw)) {
1372#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1373		/* PMD-mapped THP migration entry */
1374		if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1375			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1376
1377			set_pmd_migration_entry(&pvmw, page);
1378			continue;
1379		}
1380#endif
1381
1382		/*
1383		 * If the page is mlock()d, we cannot swap it out.
1384		 * If it's recently referenced (perhaps page_referenced
1385		 * skipped over this mm) then we should reactivate it.
1386		 */
1387		if (!(flags & TTU_IGNORE_MLOCK)) {
1388			if (vma->vm_flags & VM_LOCKED) {
1389				/* PTE-mapped THP are never mlocked */
1390				if (!PageTransCompound(page)) {
1391					/*
1392					 * Holding pte lock, we do *not* need
1393					 * mmap_sem here
1394					 */
1395					mlock_vma_page(page);
1396				}
1397				ret = false;
1398				page_vma_mapped_walk_done(&pvmw);
1399				break;
1400			}
1401			if (flags & TTU_MUNLOCK)
1402				continue;
1403		}
1404
1405		/* Unexpected PMD-mapped THP? */
1406		VM_BUG_ON_PAGE(!pvmw.pte, page);
1407
1408		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1409		address = pvmw.address;
 
 
1410
1411
1412		if (IS_ENABLED(CONFIG_MIGRATION) &&
1413		    (flags & TTU_MIGRATION) &&
1414		    is_zone_device_page(page)) {
1415			swp_entry_t entry;
1416			pte_t swp_pte;
1417
1418			pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1419
1420			/*
1421			 * Store the pfn of the page in a special migration
1422			 * pte. do_swap_page() will wait until the migration
1423			 * pte is removed and then restart fault handling.
1424			 */
1425			entry = make_migration_entry(page, 0);
1426			swp_pte = swp_entry_to_pte(entry);
1427			if (pte_soft_dirty(pteval))
1428				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1429			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1430			/*
1431			 * No need to invalidate here it will synchronize on
1432			 * against the special swap migration pte.
 
 
 
1433			 */
1434			goto discard;
1435		}
1436
1437		if (!(flags & TTU_IGNORE_ACCESS)) {
1438			if (ptep_clear_flush_young_notify(vma, address,
1439						pvmw.pte)) {
1440				ret = false;
1441				page_vma_mapped_walk_done(&pvmw);
1442				break;
1443			}
1444		}
1445
1446		/* Nuke the page table entry. */
1447		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1448		if (should_defer_flush(mm, flags)) {
1449			/*
1450			 * We clear the PTE but do not flush so potentially
1451			 * a remote CPU could still be writing to the page.
1452			 * If the entry was previously clean then the
1453			 * architecture must guarantee that a clear->dirty
1454			 * transition on a cached TLB entry is written through
1455			 * and traps if the PTE is unmapped.
 
 
1456			 */
1457			pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1458
1459			set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460		} else {
1461			pteval = ptep_clear_flush(vma, address, pvmw.pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1462		}
1463
1464		/* Move the dirty bit to the page. Now the pte is gone. */
 
 
 
 
 
 
 
1465		if (pte_dirty(pteval))
1466			set_page_dirty(page);
1467
1468		/* Update high watermark before we lower rss */
1469		update_hiwater_rss(mm);
1470
1471		if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1472			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1473			if (PageHuge(page)) {
1474				int nr = 1 << compound_order(page);
1475				hugetlb_count_sub(nr, mm);
1476				set_huge_swap_pte_at(mm, address,
1477						     pvmw.pte, pteval,
1478						     vma_mmu_pagesize(vma));
1479			} else {
1480				dec_mm_counter(mm, mm_counter(page));
1481				set_pte_at(mm, address, pvmw.pte, pteval);
1482			}
1483
1484		} else if (pte_unused(pteval)) {
1485			/*
1486			 * The guest indicated that the page content is of no
1487			 * interest anymore. Simply discard the pte, vmscan
1488			 * will take care of the rest.
 
 
 
 
 
1489			 */
1490			dec_mm_counter(mm, mm_counter(page));
1491			/* We have to invalidate as we cleared the pte */
1492			mmu_notifier_invalidate_range(mm, address,
1493						      address + PAGE_SIZE);
1494		} else if (IS_ENABLED(CONFIG_MIGRATION) &&
1495				(flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1496			swp_entry_t entry;
1497			pte_t swp_pte;
1498
1499			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1500				set_pte_at(mm, address, pvmw.pte, pteval);
1501				ret = false;
1502				page_vma_mapped_walk_done(&pvmw);
1503				break;
1504			}
1505
1506			/*
1507			 * Store the pfn of the page in a special migration
1508			 * pte. do_swap_page() will wait until the migration
1509			 * pte is removed and then restart fault handling.
1510			 */
1511			entry = make_migration_entry(subpage,
1512					pte_write(pteval));
1513			swp_pte = swp_entry_to_pte(entry);
1514			if (pte_soft_dirty(pteval))
1515				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1516			set_pte_at(mm, address, pvmw.pte, swp_pte);
1517			/*
1518			 * No need to invalidate here it will synchronize on
1519			 * against the special swap migration pte.
1520			 */
1521		} else if (PageAnon(page)) {
1522			swp_entry_t entry = { .val = page_private(subpage) };
1523			pte_t swp_pte;
1524			/*
1525			 * Store the swap location in the pte.
1526			 * See handle_pte_fault() ...
1527			 */
1528			if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
 
1529				WARN_ON_ONCE(1);
1530				ret = false;
1531				/* We have to invalidate as we cleared the pte */
1532				mmu_notifier_invalidate_range(mm, address,
1533							address + PAGE_SIZE);
1534				page_vma_mapped_walk_done(&pvmw);
1535				break;
1536			}
1537
1538			/* MADV_FREE page check */
1539			if (!PageSwapBacked(page)) {
1540				if (!PageDirty(page)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1541					/* Invalidate as we cleared the pte */
1542					mmu_notifier_invalidate_range(mm,
1543						address, address + PAGE_SIZE);
1544					dec_mm_counter(mm, MM_ANONPAGES);
1545					goto discard;
1546				}
1547
1548				/*
1549				 * If the page was redirtied, it cannot be
1550				 * discarded. Remap the page to page table.
1551				 */
1552				set_pte_at(mm, address, pvmw.pte, pteval);
1553				SetPageSwapBacked(page);
1554				ret = false;
1555				page_vma_mapped_walk_done(&pvmw);
1556				break;
1557			}
1558
1559			if (swap_duplicate(entry) < 0) {
1560				set_pte_at(mm, address, pvmw.pte, pteval);
1561				ret = false;
1562				page_vma_mapped_walk_done(&pvmw);
1563				break;
1564			}
1565			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
 
 
 
 
 
 
 
 
 
 
 
1566				set_pte_at(mm, address, pvmw.pte, pteval);
1567				ret = false;
1568				page_vma_mapped_walk_done(&pvmw);
1569				break;
1570			}
 
 
 
 
 
 
 
 
 
 
 
1571			if (list_empty(&mm->mmlist)) {
1572				spin_lock(&mmlist_lock);
1573				if (list_empty(&mm->mmlist))
1574					list_add(&mm->mmlist, &init_mm.mmlist);
1575				spin_unlock(&mmlist_lock);
1576			}
1577			dec_mm_counter(mm, MM_ANONPAGES);
1578			inc_mm_counter(mm, MM_SWAPENTS);
1579			swp_pte = swp_entry_to_pte(entry);
 
 
1580			if (pte_soft_dirty(pteval))
1581				swp_pte = pte_swp_mksoft_dirty(swp_pte);
 
 
1582			set_pte_at(mm, address, pvmw.pte, swp_pte);
1583			/* Invalidate as we cleared the pte */
1584			mmu_notifier_invalidate_range(mm, address,
1585						      address + PAGE_SIZE);
1586		} else {
1587			/*
1588			 * We should not need to notify here as we reach this
1589			 * case only from freeze_page() itself only call from
1590			 * split_huge_page_to_list() so everything below must
1591			 * be true:
1592			 *   - page is not anonymous
1593			 *   - page is locked
 
1594			 *
1595			 * So as it is a locked file back page thus it can not
1596			 * be remove from the page cache and replace by a new
1597			 * page before mmu_notifier_invalidate_range_end so no
1598			 * concurrent thread might update its page table to
1599			 * point at new page while a device still is using this
1600			 * page.
1601			 *
1602			 * See Documentation/vm/mmu_notifier.txt
1603			 */
1604			dec_mm_counter(mm, mm_counter_file(page));
1605		}
1606discard:
1607		/*
1608		 * No need to call mmu_notifier_invalidate_range() it has be
1609		 * done above for all cases requiring it to happen under page
1610		 * table lock before mmu_notifier_invalidate_range_end()
1611		 *
1612		 * See Documentation/vm/mmu_notifier.txt
1613		 */
1614		page_remove_rmap(subpage, PageHuge(page));
1615		put_page(page);
 
 
1616	}
1617
1618	mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
1619
1620	return ret;
1621}
1622
1623bool is_vma_temporary_stack(struct vm_area_struct *vma)
1624{
1625	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1626
1627	if (!maybe_stack)
1628		return false;
1629
1630	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1631						VM_STACK_INCOMPLETE_SETUP)
1632		return true;
1633
1634	return false;
1635}
1636
1637static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1638{
1639	return is_vma_temporary_stack(vma);
1640}
1641
1642static int page_mapcount_is_zero(struct page *page)
1643{
1644	return !total_mapcount(page);
1645}
1646
1647/**
1648 * try_to_unmap - try to remove all page table mappings to a page
1649 * @page: the page to get unmapped
1650 * @flags: action and flags
1651 *
1652 * Tries to remove all the page table entries which are mapping this
1653 * page, used in the pageout path.  Caller must hold the page lock.
 
1654 *
1655 * If unmap is successful, return true. Otherwise, false.
1656 */
1657bool try_to_unmap(struct page *page, enum ttu_flags flags)
1658{
1659	struct rmap_walk_control rwc = {
1660		.rmap_one = try_to_unmap_one,
1661		.arg = (void *)flags,
1662		.done = page_mapcount_is_zero,
1663		.anon_lock = page_lock_anon_vma_read,
1664	};
1665
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1666	/*
1667	 * During exec, a temporary VMA is setup and later moved.
1668	 * The VMA is moved under the anon_vma lock but not the
1669	 * page tables leading to a race where migration cannot
1670	 * find the migration ptes. Rather than increasing the
1671	 * locking requirements of exec(), migration skips
1672	 * temporary VMAs until after exec() completes.
1673	 */
1674	if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1675	    && !PageKsm(page) && PageAnon(page))
1676		rwc.invalid_vma = invalid_migration_vma;
1677
1678	if (flags & TTU_RMAP_LOCKED)
1679		rmap_walk_locked(page, &rwc);
1680	else
1681		rmap_walk(page, &rwc);
1682
1683	return !page_mapcount(page) ? true : false;
1684}
1685
1686static int page_not_mapped(struct page *page)
1687{
1688	return !page_mapped(page);
 
 
 
1689};
1690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1691/**
1692 * try_to_munlock - try to munlock a page
1693 * @page: the page to be munlocked
 
 
 
 
 
 
 
1694 *
1695 * Called from munlock code.  Checks all of the VMAs mapping the page
1696 * to make sure nobody else has this page mlocked. The page will be
1697 * returned with PG_mlocked cleared if no other vmas have it mlocked.
1698 */
1699
1700void try_to_munlock(struct page *page)
1701{
 
 
 
 
 
 
1702	struct rmap_walk_control rwc = {
1703		.rmap_one = try_to_unmap_one,
1704		.arg = (void *)TTU_MUNLOCK,
1705		.done = page_not_mapped,
1706		.anon_lock = page_lock_anon_vma_read,
1707
1708	};
1709
1710	VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1711	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1712
1713	rmap_walk(page, &rwc);
1714}
 
 
1715
1716void __put_anon_vma(struct anon_vma *anon_vma)
1717{
1718	struct anon_vma *root = anon_vma->root;
1719
1720	anon_vma_free(anon_vma);
1721	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1722		anon_vma_free(root);
1723}
1724
1725static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1726					struct rmap_walk_control *rwc)
1727{
1728	struct anon_vma *anon_vma;
1729
1730	if (rwc->anon_lock)
1731		return rwc->anon_lock(page);
1732
1733	/*
1734	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
1735	 * because that depends on page_mapped(); but not all its usages
1736	 * are holding mmap_sem. Users without mmap_sem are required to
1737	 * take a reference count to prevent the anon_vma disappearing
1738	 */
1739	anon_vma = page_anon_vma(page);
1740	if (!anon_vma)
1741		return NULL;
1742
 
 
 
 
 
 
 
 
 
1743	anon_vma_lock_read(anon_vma);
 
1744	return anon_vma;
1745}
1746
1747/*
1748 * rmap_walk_anon - do something to anonymous page using the object-based
1749 * rmap method
1750 * @page: the page to be handled
1751 * @rwc: control variable according to each walk type
1752 *
1753 * Find all the mappings of a page using the mapping pointer and the vma chains
1754 * contained in the anon_vma struct it points to.
1755 *
1756 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1757 * where the page was found will be held for write.  So, we won't recheck
1758 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1759 * LOCKED.
1760 */
1761static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1762		bool locked)
1763{
1764	struct anon_vma *anon_vma;
1765	pgoff_t pgoff_start, pgoff_end;
1766	struct anon_vma_chain *avc;
1767
1768	if (locked) {
1769		anon_vma = page_anon_vma(page);
1770		/* anon_vma disappear under us? */
1771		VM_BUG_ON_PAGE(!anon_vma, page);
1772	} else {
1773		anon_vma = rmap_walk_anon_lock(page, rwc);
1774	}
1775	if (!anon_vma)
1776		return;
1777
1778	pgoff_start = page_to_pgoff(page);
1779	pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1780	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1781			pgoff_start, pgoff_end) {
1782		struct vm_area_struct *vma = avc->vma;
1783		unsigned long address = vma_address(page, vma);
1784
 
1785		cond_resched();
1786
1787		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1788			continue;
1789
1790		if (!rwc->rmap_one(page, vma, address, rwc->arg))
1791			break;
1792		if (rwc->done && rwc->done(page))
1793			break;
1794	}
1795
1796	if (!locked)
1797		anon_vma_unlock_read(anon_vma);
1798}
1799
1800/*
1801 * rmap_walk_file - do something to file page using the object-based rmap method
1802 * @page: the page to be handled
1803 * @rwc: control variable according to each walk type
1804 *
1805 * Find all the mappings of a page using the mapping pointer and the vma chains
1806 * contained in the address_space struct it points to.
1807 *
1808 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1809 * where the page was found will be held for write.  So, we won't recheck
1810 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1811 * LOCKED.
1812 */
1813static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1814		bool locked)
1815{
1816	struct address_space *mapping = page_mapping(page);
1817	pgoff_t pgoff_start, pgoff_end;
1818	struct vm_area_struct *vma;
1819
1820	/*
1821	 * The page lock not only makes sure that page->mapping cannot
1822	 * suddenly be NULLified by truncation, it makes sure that the
1823	 * structure at mapping cannot be freed and reused yet,
1824	 * so we can safely take mapping->i_mmap_rwsem.
1825	 */
1826	VM_BUG_ON_PAGE(!PageLocked(page), page);
1827
1828	if (!mapping)
1829		return;
1830
1831	pgoff_start = page_to_pgoff(page);
1832	pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1833	if (!locked)
 
 
 
 
 
 
 
 
1834		i_mmap_lock_read(mapping);
 
 
1835	vma_interval_tree_foreach(vma, &mapping->i_mmap,
1836			pgoff_start, pgoff_end) {
1837		unsigned long address = vma_address(page, vma);
1838
 
1839		cond_resched();
1840
1841		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1842			continue;
1843
1844		if (!rwc->rmap_one(page, vma, address, rwc->arg))
1845			goto done;
1846		if (rwc->done && rwc->done(page))
1847			goto done;
1848	}
1849
1850done:
1851	if (!locked)
1852		i_mmap_unlock_read(mapping);
1853}
1854
1855void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1856{
1857	if (unlikely(PageKsm(page)))
1858		rmap_walk_ksm(page, rwc);
1859	else if (PageAnon(page))
1860		rmap_walk_anon(page, rwc, false);
1861	else
1862		rmap_walk_file(page, rwc, false);
1863}
1864
1865/* Like rmap_walk, but caller holds relevant rmap lock */
1866void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1867{
1868	/* no ksm support for now */
1869	VM_BUG_ON_PAGE(PageKsm(page), page);
1870	if (PageAnon(page))
1871		rmap_walk_anon(page, rwc, true);
1872	else
1873		rmap_walk_file(page, rwc, true);
1874}
1875
1876#ifdef CONFIG_HUGETLB_PAGE
1877/*
1878 * The following three functions are for anonymous (private mapped) hugepages.
1879 * Unlike common anonymous pages, anonymous hugepages have no accounting code
1880 * and no lru code, because we handle hugepages differently from common pages.
 
 
1881 */
1882static void __hugepage_set_anon_rmap(struct page *page,
1883	struct vm_area_struct *vma, unsigned long address, int exclusive)
1884{
1885	struct anon_vma *anon_vma = vma->anon_vma;
1886
1887	BUG_ON(!anon_vma);
1888
1889	if (PageAnon(page))
1890		return;
1891	if (!exclusive)
1892		anon_vma = anon_vma->root;
1893
1894	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1895	page->mapping = (struct address_space *) anon_vma;
1896	page->index = linear_page_index(vma, address);
1897}
1898
1899void hugepage_add_anon_rmap(struct page *page,
1900			    struct vm_area_struct *vma, unsigned long address)
1901{
1902	struct anon_vma *anon_vma = vma->anon_vma;
1903	int first;
1904
1905	BUG_ON(!PageLocked(page));
1906	BUG_ON(!anon_vma);
1907	/* address might be in next vma when migration races vma_adjust */
1908	first = atomic_inc_and_test(compound_mapcount_ptr(page));
 
 
1909	if (first)
1910		__hugepage_set_anon_rmap(page, vma, address, 0);
 
1911}
1912
1913void hugepage_add_new_anon_rmap(struct page *page,
1914			struct vm_area_struct *vma, unsigned long address)
1915{
1916	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 
1917	atomic_set(compound_mapcount_ptr(page), 0);
1918	__hugepage_set_anon_rmap(page, vma, address, 1);
 
1919}
1920#endif /* CONFIG_HUGETLB_PAGE */
v6.2
   1/*
   2 * mm/rmap.c - physical to virtual reverse mappings
   3 *
   4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
   5 * Released under the General Public License (GPL).
   6 *
   7 * Simple, low overhead reverse mapping scheme.
   8 * Please try to keep this thing as modular as possible.
   9 *
  10 * Provides methods for unmapping each kind of mapped page:
  11 * the anon methods track anonymous pages, and
  12 * the file methods track pages belonging to an inode.
  13 *
  14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17 * Contributions by Hugh Dickins 2003, 2004
  18 */
  19
  20/*
  21 * Lock ordering in mm:
  22 *
  23 * inode->i_rwsem	(while writing or truncating, not reading or faulting)
  24 *   mm->mmap_lock
  25 *     mapping->invalidate_lock (in filemap_fault)
  26 *       page->flags PG_locked (lock_page)
  27 *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
  28 *           mapping->i_mmap_rwsem
  29 *             anon_vma->rwsem
  30 *               mm->page_table_lock or pte_lock
  31 *                 swap_lock (in swap_duplicate, swap_info_get)
  32 *                   mmlist_lock (in mmput, drain_mmlist and others)
  33 *                   mapping->private_lock (in block_dirty_folio)
  34 *                     folio_lock_memcg move_lock (in block_dirty_folio)
  35 *                       i_pages lock (widely used)
  36 *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
  37 *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  38 *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
  39 *                     sb_lock (within inode_lock in fs/fs-writeback.c)
  40 *                     i_pages lock (widely used, in set_page_dirty,
  41 *                               in arch-dependent flush_dcache_mmap_lock,
  42 *                               within bdi.wb->list_lock in __sync_single_inode)
  43 *
  44 * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
  45 *   ->tasklist_lock
  46 *     pte map lock
  47 *
  48 * hugetlbfs PageHuge() take locks in this order:
  49 *   hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
  50 *     vma_lock (hugetlb specific lock for pmd_sharing)
  51 *       mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
  52 *         page->flags PG_locked (lock_page)
  53 */
  54
  55#include <linux/mm.h>
  56#include <linux/sched/mm.h>
  57#include <linux/sched/task.h>
  58#include <linux/pagemap.h>
  59#include <linux/swap.h>
  60#include <linux/swapops.h>
  61#include <linux/slab.h>
  62#include <linux/init.h>
  63#include <linux/ksm.h>
  64#include <linux/rmap.h>
  65#include <linux/rcupdate.h>
  66#include <linux/export.h>
  67#include <linux/memcontrol.h>
  68#include <linux/mmu_notifier.h>
  69#include <linux/migrate.h>
  70#include <linux/hugetlb.h>
  71#include <linux/huge_mm.h>
  72#include <linux/backing-dev.h>
  73#include <linux/page_idle.h>
  74#include <linux/memremap.h>
  75#include <linux/userfaultfd_k.h>
  76#include <linux/mm_inline.h>
  77
  78#include <asm/tlbflush.h>
  79
  80#define CREATE_TRACE_POINTS
  81#include <trace/events/tlb.h>
  82#include <trace/events/migrate.h>
  83
  84#include "internal.h"
  85
  86static struct kmem_cache *anon_vma_cachep;
  87static struct kmem_cache *anon_vma_chain_cachep;
  88
  89static inline struct anon_vma *anon_vma_alloc(void)
  90{
  91	struct anon_vma *anon_vma;
  92
  93	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  94	if (anon_vma) {
  95		atomic_set(&anon_vma->refcount, 1);
  96		anon_vma->num_children = 0;
  97		anon_vma->num_active_vmas = 0;
  98		anon_vma->parent = anon_vma;
  99		/*
 100		 * Initialise the anon_vma root to point to itself. If called
 101		 * from fork, the root will be reset to the parents anon_vma.
 102		 */
 103		anon_vma->root = anon_vma;
 104	}
 105
 106	return anon_vma;
 107}
 108
 109static inline void anon_vma_free(struct anon_vma *anon_vma)
 110{
 111	VM_BUG_ON(atomic_read(&anon_vma->refcount));
 112
 113	/*
 114	 * Synchronize against folio_lock_anon_vma_read() such that
 115	 * we can safely hold the lock without the anon_vma getting
 116	 * freed.
 117	 *
 118	 * Relies on the full mb implied by the atomic_dec_and_test() from
 119	 * put_anon_vma() against the acquire barrier implied by
 120	 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
 121	 *
 122	 * folio_lock_anon_vma_read()	VS	put_anon_vma()
 123	 *   down_read_trylock()		  atomic_dec_and_test()
 124	 *   LOCK				  MB
 125	 *   atomic_read()			  rwsem_is_locked()
 126	 *
 127	 * LOCK should suffice since the actual taking of the lock must
 128	 * happen _before_ what follows.
 129	 */
 130	might_sleep();
 131	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
 132		anon_vma_lock_write(anon_vma);
 133		anon_vma_unlock_write(anon_vma);
 134	}
 135
 136	kmem_cache_free(anon_vma_cachep, anon_vma);
 137}
 138
 139static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 140{
 141	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 142}
 143
 144static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
 145{
 146	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
 147}
 148
 149static void anon_vma_chain_link(struct vm_area_struct *vma,
 150				struct anon_vma_chain *avc,
 151				struct anon_vma *anon_vma)
 152{
 153	avc->vma = vma;
 154	avc->anon_vma = anon_vma;
 155	list_add(&avc->same_vma, &vma->anon_vma_chain);
 156	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
 157}
 158
 159/**
 160 * __anon_vma_prepare - attach an anon_vma to a memory region
 161 * @vma: the memory region in question
 162 *
 163 * This makes sure the memory mapping described by 'vma' has
 164 * an 'anon_vma' attached to it, so that we can associate the
 165 * anonymous pages mapped into it with that anon_vma.
 166 *
 167 * The common case will be that we already have one, which
 168 * is handled inline by anon_vma_prepare(). But if
 169 * not we either need to find an adjacent mapping that we
 170 * can re-use the anon_vma from (very common when the only
 171 * reason for splitting a vma has been mprotect()), or we
 172 * allocate a new one.
 173 *
 174 * Anon-vma allocations are very subtle, because we may have
 175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
 176 * and that may actually touch the rwsem even in the newly
 177 * allocated vma (it depends on RCU to make sure that the
 178 * anon_vma isn't actually destroyed).
 179 *
 180 * As a result, we need to do proper anon_vma locking even
 181 * for the new allocation. At the same time, we do not want
 182 * to do any locking for the common case of already having
 183 * an anon_vma.
 184 *
 185 * This must be called with the mmap_lock held for reading.
 186 */
 187int __anon_vma_prepare(struct vm_area_struct *vma)
 188{
 189	struct mm_struct *mm = vma->vm_mm;
 190	struct anon_vma *anon_vma, *allocated;
 191	struct anon_vma_chain *avc;
 192
 193	might_sleep();
 194
 195	avc = anon_vma_chain_alloc(GFP_KERNEL);
 196	if (!avc)
 197		goto out_enomem;
 198
 199	anon_vma = find_mergeable_anon_vma(vma);
 200	allocated = NULL;
 201	if (!anon_vma) {
 202		anon_vma = anon_vma_alloc();
 203		if (unlikely(!anon_vma))
 204			goto out_enomem_free_avc;
 205		anon_vma->num_children++; /* self-parent link for new root */
 206		allocated = anon_vma;
 207	}
 208
 209	anon_vma_lock_write(anon_vma);
 210	/* page_table_lock to protect against threads */
 211	spin_lock(&mm->page_table_lock);
 212	if (likely(!vma->anon_vma)) {
 213		vma->anon_vma = anon_vma;
 214		anon_vma_chain_link(vma, avc, anon_vma);
 215		anon_vma->num_active_vmas++;
 
 216		allocated = NULL;
 217		avc = NULL;
 218	}
 219	spin_unlock(&mm->page_table_lock);
 220	anon_vma_unlock_write(anon_vma);
 221
 222	if (unlikely(allocated))
 223		put_anon_vma(allocated);
 224	if (unlikely(avc))
 225		anon_vma_chain_free(avc);
 226
 227	return 0;
 228
 229 out_enomem_free_avc:
 230	anon_vma_chain_free(avc);
 231 out_enomem:
 232	return -ENOMEM;
 233}
 234
 235/*
 236 * This is a useful helper function for locking the anon_vma root as
 237 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
 238 * have the same vma.
 239 *
 240 * Such anon_vma's should have the same root, so you'd expect to see
 241 * just a single mutex_lock for the whole traversal.
 242 */
 243static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
 244{
 245	struct anon_vma *new_root = anon_vma->root;
 246	if (new_root != root) {
 247		if (WARN_ON_ONCE(root))
 248			up_write(&root->rwsem);
 249		root = new_root;
 250		down_write(&root->rwsem);
 251	}
 252	return root;
 253}
 254
 255static inline void unlock_anon_vma_root(struct anon_vma *root)
 256{
 257	if (root)
 258		up_write(&root->rwsem);
 259}
 260
 261/*
 262 * Attach the anon_vmas from src to dst.
 263 * Returns 0 on success, -ENOMEM on failure.
 264 *
 265 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and
 266 * anon_vma_fork(). The first three want an exact copy of src, while the last
 267 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
 268 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
 269 * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
 270 *
 271 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
 272 * and reuse existing anon_vma which has no vmas and only one child anon_vma.
 273 * This prevents degradation of anon_vma hierarchy to endless linear chain in
 274 * case of constantly forking task. On the other hand, an anon_vma with more
 275 * than one child isn't reused even if there was no alive vma, thus rmap
 276 * walker has a good chance of avoiding scanning the whole hierarchy when it
 277 * searches where page is mapped.
 278 */
 279int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 280{
 281	struct anon_vma_chain *avc, *pavc;
 282	struct anon_vma *root = NULL;
 283
 284	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
 285		struct anon_vma *anon_vma;
 286
 287		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
 288		if (unlikely(!avc)) {
 289			unlock_anon_vma_root(root);
 290			root = NULL;
 291			avc = anon_vma_chain_alloc(GFP_KERNEL);
 292			if (!avc)
 293				goto enomem_failure;
 294		}
 295		anon_vma = pavc->anon_vma;
 296		root = lock_anon_vma_root(root, anon_vma);
 297		anon_vma_chain_link(dst, avc, anon_vma);
 298
 299		/*
 300		 * Reuse existing anon_vma if it has no vma and only one
 301		 * anon_vma child.
 302		 *
 303		 * Root anon_vma is never reused:
 
 304		 * it has self-parent reference and at least one child.
 305		 */
 306		if (!dst->anon_vma && src->anon_vma &&
 307		    anon_vma->num_children < 2 &&
 308		    anon_vma->num_active_vmas == 0)
 309			dst->anon_vma = anon_vma;
 310	}
 311	if (dst->anon_vma)
 312		dst->anon_vma->num_active_vmas++;
 313	unlock_anon_vma_root(root);
 314	return 0;
 315
 316 enomem_failure:
 317	/*
 318	 * dst->anon_vma is dropped here otherwise its num_active_vmas can
 319	 * be incorrectly decremented in unlink_anon_vmas().
 320	 * We can safely do this because callers of anon_vma_clone() don't care
 321	 * about dst->anon_vma if anon_vma_clone() failed.
 322	 */
 323	dst->anon_vma = NULL;
 324	unlink_anon_vmas(dst);
 325	return -ENOMEM;
 326}
 327
 328/*
 329 * Attach vma to its own anon_vma, as well as to the anon_vmas that
 330 * the corresponding VMA in the parent process is attached to.
 331 * Returns 0 on success, non-zero on failure.
 332 */
 333int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 334{
 335	struct anon_vma_chain *avc;
 336	struct anon_vma *anon_vma;
 337	int error;
 338
 339	/* Don't bother if the parent process has no anon_vma here. */
 340	if (!pvma->anon_vma)
 341		return 0;
 342
 343	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
 344	vma->anon_vma = NULL;
 345
 346	/*
 347	 * First, attach the new VMA to the parent VMA's anon_vmas,
 348	 * so rmap can find non-COWed pages in child processes.
 349	 */
 350	error = anon_vma_clone(vma, pvma);
 351	if (error)
 352		return error;
 353
 354	/* An existing anon_vma has been reused, all done then. */
 355	if (vma->anon_vma)
 356		return 0;
 357
 358	/* Then add our own anon_vma. */
 359	anon_vma = anon_vma_alloc();
 360	if (!anon_vma)
 361		goto out_error;
 362	anon_vma->num_active_vmas++;
 363	avc = anon_vma_chain_alloc(GFP_KERNEL);
 364	if (!avc)
 365		goto out_error_free_anon_vma;
 366
 367	/*
 368	 * The root anon_vma's rwsem is the lock actually used when we
 369	 * lock any of the anon_vmas in this anon_vma tree.
 370	 */
 371	anon_vma->root = pvma->anon_vma->root;
 372	anon_vma->parent = pvma->anon_vma;
 373	/*
 374	 * With refcounts, an anon_vma can stay around longer than the
 375	 * process it belongs to. The root anon_vma needs to be pinned until
 376	 * this anon_vma is freed, because the lock lives in the root.
 377	 */
 378	get_anon_vma(anon_vma->root);
 379	/* Mark this anon_vma as the one where our new (COWed) pages go. */
 380	vma->anon_vma = anon_vma;
 381	anon_vma_lock_write(anon_vma);
 382	anon_vma_chain_link(vma, avc, anon_vma);
 383	anon_vma->parent->num_children++;
 384	anon_vma_unlock_write(anon_vma);
 385
 386	return 0;
 387
 388 out_error_free_anon_vma:
 389	put_anon_vma(anon_vma);
 390 out_error:
 391	unlink_anon_vmas(vma);
 392	return -ENOMEM;
 393}
 394
 395void unlink_anon_vmas(struct vm_area_struct *vma)
 396{
 397	struct anon_vma_chain *avc, *next;
 398	struct anon_vma *root = NULL;
 399
 400	/*
 401	 * Unlink each anon_vma chained to the VMA.  This list is ordered
 402	 * from newest to oldest, ensuring the root anon_vma gets freed last.
 403	 */
 404	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 405		struct anon_vma *anon_vma = avc->anon_vma;
 406
 407		root = lock_anon_vma_root(root, anon_vma);
 408		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
 409
 410		/*
 411		 * Leave empty anon_vmas on the list - we'll need
 412		 * to free them outside the lock.
 413		 */
 414		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
 415			anon_vma->parent->num_children--;
 416			continue;
 417		}
 418
 419		list_del(&avc->same_vma);
 420		anon_vma_chain_free(avc);
 421	}
 422	if (vma->anon_vma) {
 423		vma->anon_vma->num_active_vmas--;
 424
 425		/*
 426		 * vma would still be needed after unlink, and anon_vma will be prepared
 427		 * when handle fault.
 428		 */
 429		vma->anon_vma = NULL;
 430	}
 431	unlock_anon_vma_root(root);
 432
 433	/*
 434	 * Iterate the list once more, it now only contains empty and unlinked
 435	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
 436	 * needing to write-acquire the anon_vma->root->rwsem.
 437	 */
 438	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 439		struct anon_vma *anon_vma = avc->anon_vma;
 440
 441		VM_WARN_ON(anon_vma->num_children);
 442		VM_WARN_ON(anon_vma->num_active_vmas);
 443		put_anon_vma(anon_vma);
 444
 445		list_del(&avc->same_vma);
 446		anon_vma_chain_free(avc);
 447	}
 448}
 449
 450static void anon_vma_ctor(void *data)
 451{
 452	struct anon_vma *anon_vma = data;
 453
 454	init_rwsem(&anon_vma->rwsem);
 455	atomic_set(&anon_vma->refcount, 0);
 456	anon_vma->rb_root = RB_ROOT_CACHED;
 457}
 458
 459void __init anon_vma_init(void)
 460{
 461	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 462			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
 463			anon_vma_ctor);
 464	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
 465			SLAB_PANIC|SLAB_ACCOUNT);
 466}
 467
 468/*
 469 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
 470 *
 471 * Since there is no serialization what so ever against page_remove_rmap()
 472 * the best this function can do is return a refcount increased anon_vma
 473 * that might have been relevant to this page.
 474 *
 475 * The page might have been remapped to a different anon_vma or the anon_vma
 476 * returned may already be freed (and even reused).
 477 *
 478 * In case it was remapped to a different anon_vma, the new anon_vma will be a
 479 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
 480 * ensure that any anon_vma obtained from the page will still be valid for as
 481 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
 482 *
 483 * All users of this function must be very careful when walking the anon_vma
 484 * chain and verify that the page in question is indeed mapped in it
 485 * [ something equivalent to page_mapped_in_vma() ].
 486 *
 487 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
 488 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
 489 * if there is a mapcount, we can dereference the anon_vma after observing
 490 * those.
 491 */
 492struct anon_vma *folio_get_anon_vma(struct folio *folio)
 493{
 494	struct anon_vma *anon_vma = NULL;
 495	unsigned long anon_mapping;
 496
 497	rcu_read_lock();
 498	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
 499	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 500		goto out;
 501	if (!folio_mapped(folio))
 502		goto out;
 503
 504	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 505	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 506		anon_vma = NULL;
 507		goto out;
 508	}
 509
 510	/*
 511	 * If this folio is still mapped, then its anon_vma cannot have been
 512	 * freed.  But if it has been unmapped, we have no security against the
 513	 * anon_vma structure being freed and reused (for another anon_vma:
 514	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
 515	 * above cannot corrupt).
 516	 */
 517	if (!folio_mapped(folio)) {
 518		rcu_read_unlock();
 519		put_anon_vma(anon_vma);
 520		return NULL;
 521	}
 522out:
 523	rcu_read_unlock();
 524
 525	return anon_vma;
 526}
 527
 528/*
 529 * Similar to folio_get_anon_vma() except it locks the anon_vma.
 530 *
 531 * Its a little more complex as it tries to keep the fast path to a single
 532 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
 533 * reference like with folio_get_anon_vma() and then block on the mutex
 534 * on !rwc->try_lock case.
 535 */
 536struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
 537					  struct rmap_walk_control *rwc)
 538{
 539	struct anon_vma *anon_vma = NULL;
 540	struct anon_vma *root_anon_vma;
 541	unsigned long anon_mapping;
 542
 543	rcu_read_lock();
 544	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
 545	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 546		goto out;
 547	if (!folio_mapped(folio))
 548		goto out;
 549
 550	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 551	root_anon_vma = READ_ONCE(anon_vma->root);
 552	if (down_read_trylock(&root_anon_vma->rwsem)) {
 553		/*
 554		 * If the folio is still mapped, then this anon_vma is still
 555		 * its anon_vma, and holding the mutex ensures that it will
 556		 * not go away, see anon_vma_free().
 557		 */
 558		if (!folio_mapped(folio)) {
 559			up_read(&root_anon_vma->rwsem);
 560			anon_vma = NULL;
 561		}
 562		goto out;
 563	}
 564
 565	if (rwc && rwc->try_lock) {
 566		anon_vma = NULL;
 567		rwc->contended = true;
 568		goto out;
 569	}
 570
 571	/* trylock failed, we got to sleep */
 572	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 573		anon_vma = NULL;
 574		goto out;
 575	}
 576
 577	if (!folio_mapped(folio)) {
 578		rcu_read_unlock();
 579		put_anon_vma(anon_vma);
 580		return NULL;
 581	}
 582
 583	/* we pinned the anon_vma, its safe to sleep */
 584	rcu_read_unlock();
 585	anon_vma_lock_read(anon_vma);
 586
 587	if (atomic_dec_and_test(&anon_vma->refcount)) {
 588		/*
 589		 * Oops, we held the last refcount, release the lock
 590		 * and bail -- can't simply use put_anon_vma() because
 591		 * we'll deadlock on the anon_vma_lock_write() recursion.
 592		 */
 593		anon_vma_unlock_read(anon_vma);
 594		__put_anon_vma(anon_vma);
 595		anon_vma = NULL;
 596	}
 597
 598	return anon_vma;
 599
 600out:
 601	rcu_read_unlock();
 602	return anon_vma;
 603}
 604
 
 
 
 
 
 605#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 606/*
 607 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
 608 * important if a PTE was dirty when it was unmapped that it's flushed
 609 * before any IO is initiated on the page to prevent lost writes. Similarly,
 610 * it must be flushed before freeing to prevent data leakage.
 611 */
 612void try_to_unmap_flush(void)
 613{
 614	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 615
 616	if (!tlb_ubc->flush_required)
 617		return;
 618
 619	arch_tlbbatch_flush(&tlb_ubc->arch);
 620	tlb_ubc->flush_required = false;
 621	tlb_ubc->writable = false;
 622}
 623
 624/* Flush iff there are potentially writable TLB entries that can race with IO */
 625void try_to_unmap_flush_dirty(void)
 626{
 627	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 628
 629	if (tlb_ubc->writable)
 630		try_to_unmap_flush();
 631}
 632
 633/*
 634 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
 635 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
 636 */
 637#define TLB_FLUSH_BATCH_FLUSHED_SHIFT	16
 638#define TLB_FLUSH_BATCH_PENDING_MASK			\
 639	((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
 640#define TLB_FLUSH_BATCH_PENDING_LARGE			\
 641	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
 642
 643static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 644{
 645	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 646	int batch, nbatch;
 647
 648	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
 649	tlb_ubc->flush_required = true;
 650
 651	/*
 652	 * Ensure compiler does not re-order the setting of tlb_flush_batched
 653	 * before the PTE is cleared.
 654	 */
 655	barrier();
 656	batch = atomic_read(&mm->tlb_flush_batched);
 657retry:
 658	if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
 659		/*
 660		 * Prevent `pending' from catching up with `flushed' because of
 661		 * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
 662		 * `pending' becomes large.
 663		 */
 664		nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1);
 665		if (nbatch != batch) {
 666			batch = nbatch;
 667			goto retry;
 668		}
 669	} else {
 670		atomic_inc(&mm->tlb_flush_batched);
 671	}
 672
 673	/*
 674	 * If the PTE was dirty then it's best to assume it's writable. The
 675	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
 676	 * before the page is queued for IO.
 677	 */
 678	if (writable)
 679		tlb_ubc->writable = true;
 680}
 681
 682/*
 683 * Returns true if the TLB flush should be deferred to the end of a batch of
 684 * unmap operations to reduce IPIs.
 685 */
 686static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 687{
 688	bool should_defer = false;
 689
 690	if (!(flags & TTU_BATCH_FLUSH))
 691		return false;
 692
 693	/* If remote CPUs need to be flushed then defer batch the flush */
 694	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
 695		should_defer = true;
 696	put_cpu();
 697
 698	return should_defer;
 699}
 700
 701/*
 702 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
 703 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
 704 * operation such as mprotect or munmap to race between reclaim unmapping
 705 * the page and flushing the page. If this race occurs, it potentially allows
 706 * access to data via a stale TLB entry. Tracking all mm's that have TLB
 707 * batching in flight would be expensive during reclaim so instead track
 708 * whether TLB batching occurred in the past and if so then do a flush here
 709 * if required. This will cost one additional flush per reclaim cycle paid
 710 * by the first operation at risk such as mprotect and mumap.
 711 *
 712 * This must be called under the PTL so that an access to tlb_flush_batched
 713 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
 714 * via the PTL.
 715 */
 716void flush_tlb_batched_pending(struct mm_struct *mm)
 717{
 718	int batch = atomic_read(&mm->tlb_flush_batched);
 719	int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
 720	int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
 721
 722	if (pending != flushed) {
 723		flush_tlb_mm(mm);
 724		/*
 725		 * If the new TLB flushing is pending during flushing, leave
 726		 * mm->tlb_flush_batched as is, to avoid losing flushing.
 727		 */
 728		atomic_cmpxchg(&mm->tlb_flush_batched, batch,
 729			       pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
 730	}
 731}
 732#else
 733static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 734{
 735}
 736
 737static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 738{
 739	return false;
 740}
 741#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 742
 743/*
 744 * At what user virtual address is page expected in vma?
 745 * Caller should check the page is actually part of the vma.
 746 */
 747unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 748{
 749	struct folio *folio = page_folio(page);
 750	if (folio_test_anon(folio)) {
 751		struct anon_vma *page__anon_vma = folio_anon_vma(folio);
 752		/*
 753		 * Note: swapoff's unuse_vma() is more efficient with this
 754		 * check, and needs it to match anon_vma when KSM is active.
 755		 */
 756		if (!vma->anon_vma || !page__anon_vma ||
 757		    vma->anon_vma->root != page__anon_vma->root)
 758			return -EFAULT;
 759	} else if (!vma->vm_file) {
 
 
 
 760		return -EFAULT;
 761	} else if (vma->vm_file->f_mapping != folio->mapping) {
 
 762		return -EFAULT;
 763	}
 764
 765	return vma_address(page, vma);
 766}
 767
 768/*
 769 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
 770 * NULL if it doesn't exist.  No guarantees / checks on what the pmd_t*
 771 * represents.
 772 */
 773pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
 774{
 775	pgd_t *pgd;
 776	p4d_t *p4d;
 777	pud_t *pud;
 778	pmd_t *pmd = NULL;
 
 779
 780	pgd = pgd_offset(mm, address);
 781	if (!pgd_present(*pgd))
 782		goto out;
 783
 784	p4d = p4d_offset(pgd, address);
 785	if (!p4d_present(*p4d))
 786		goto out;
 787
 788	pud = pud_offset(p4d, address);
 789	if (!pud_present(*pud))
 790		goto out;
 791
 792	pmd = pmd_offset(pud, address);
 
 
 
 
 
 
 
 
 
 793out:
 794	return pmd;
 795}
 796
 797struct folio_referenced_arg {
 798	int mapcount;
 799	int referenced;
 800	unsigned long vm_flags;
 801	struct mem_cgroup *memcg;
 802};
 803/*
 804 * arg: folio_referenced_arg will be passed
 805 */
 806static bool folio_referenced_one(struct folio *folio,
 807		struct vm_area_struct *vma, unsigned long address, void *arg)
 808{
 809	struct folio_referenced_arg *pra = arg;
 810	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 
 
 
 
 811	int referenced = 0;
 812
 813	while (page_vma_mapped_walk(&pvmw)) {
 814		address = pvmw.address;
 815
 816		if ((vma->vm_flags & VM_LOCKED) &&
 817		    (!folio_test_large(folio) || !pvmw.pte)) {
 818			/* Restore the mlock which got missed */
 819			mlock_vma_folio(folio, vma, !pvmw.pte);
 820			page_vma_mapped_walk_done(&pvmw);
 821			pra->vm_flags |= VM_LOCKED;
 822			return false; /* To break the loop */
 823		}
 824
 825		if (pvmw.pte) {
 826			if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
 827			    !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
 828				lru_gen_look_around(&pvmw);
 829				referenced++;
 830			}
 831
 832			if (ptep_clear_flush_young_notify(vma, address,
 833						pvmw.pte)) {
 834				/*
 835				 * Don't treat a reference through
 836				 * a sequentially read mapping as such.
 837				 * If the folio has been used in another mapping,
 838				 * we will catch it; if this other mapping is
 839				 * already gone, the unmap path will have set
 840				 * the referenced flag or activated the folio.
 841				 */
 842				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
 843					referenced++;
 844			}
 845		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
 846			if (pmdp_clear_flush_young_notify(vma, address,
 847						pvmw.pmd))
 848				referenced++;
 849		} else {
 850			/* unexpected pmd-mapped folio? */
 851			WARN_ON_ONCE(1);
 852		}
 853
 854		pra->mapcount--;
 855	}
 856
 857	if (referenced)
 858		folio_clear_idle(folio);
 859	if (folio_test_clear_young(folio))
 860		referenced++;
 861
 862	if (referenced) {
 863		pra->referenced++;
 864		pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
 865	}
 866
 867	if (!pra->mapcount)
 868		return false; /* To break the loop */
 869
 870	return true;
 871}
 872
 873static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
 874{
 875	struct folio_referenced_arg *pra = arg;
 876	struct mem_cgroup *memcg = pra->memcg;
 877
 878	if (!mm_match_cgroup(vma->vm_mm, memcg))
 879		return true;
 880
 881	return false;
 882}
 883
 884/**
 885 * folio_referenced() - Test if the folio was referenced.
 886 * @folio: The folio to test.
 887 * @is_locked: Caller holds lock on the folio.
 888 * @memcg: target memory cgroup
 889 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
 890 *
 891 * Quick test_and_clear_referenced for all mappings of a folio,
 892 *
 893 * Return: The number of mappings which referenced the folio. Return -1 if
 894 * the function bailed out due to rmap lock contention.
 895 */
 896int folio_referenced(struct folio *folio, int is_locked,
 897		     struct mem_cgroup *memcg, unsigned long *vm_flags)
 
 
 898{
 899	int we_locked = 0;
 900	struct folio_referenced_arg pra = {
 901		.mapcount = folio_mapcount(folio),
 902		.memcg = memcg,
 903	};
 904	struct rmap_walk_control rwc = {
 905		.rmap_one = folio_referenced_one,
 906		.arg = (void *)&pra,
 907		.anon_lock = folio_lock_anon_vma_read,
 908		.try_lock = true,
 909	};
 910
 911	*vm_flags = 0;
 912	if (!pra.mapcount)
 913		return 0;
 914
 915	if (!folio_raw_mapping(folio))
 916		return 0;
 917
 918	if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
 919		we_locked = folio_trylock(folio);
 920		if (!we_locked)
 921			return 1;
 922	}
 923
 924	/*
 925	 * If we are reclaiming on behalf of a cgroup, skip
 926	 * counting on behalf of references from different
 927	 * cgroups
 928	 */
 929	if (memcg) {
 930		rwc.invalid_vma = invalid_folio_referenced_vma;
 931	}
 932
 933	rmap_walk(folio, &rwc);
 934	*vm_flags = pra.vm_flags;
 935
 936	if (we_locked)
 937		folio_unlock(folio);
 938
 939	return rwc.contended ? -1 : pra.referenced;
 940}
 941
 942static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
 
 943{
 944	int cleaned = 0;
 945	struct vm_area_struct *vma = pvmw->vma;
 946	struct mmu_notifier_range range;
 947	unsigned long address = pvmw->address;
 
 
 
 
 948
 949	/*
 950	 * We have to assume the worse case ie pmd for invalidation. Note that
 951	 * the folio can not be freed from this function.
 952	 */
 953	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 954				0, vma, vma->vm_mm, address,
 955				vma_address_end(pvmw));
 956	mmu_notifier_invalidate_range_start(&range);
 957
 958	while (page_vma_mapped_walk(pvmw)) {
 
 959		int ret = 0;
 960
 961		address = pvmw->address;
 962		if (pvmw->pte) {
 963			pte_t entry;
 964			pte_t *pte = pvmw->pte;
 965
 966			if (!pte_dirty(*pte) && !pte_write(*pte))
 967				continue;
 968
 969			flush_cache_page(vma, address, pte_pfn(*pte));
 970			entry = ptep_clear_flush(vma, address, pte);
 971			entry = pte_wrprotect(entry);
 972			entry = pte_mkclean(entry);
 973			set_pte_at(vma->vm_mm, address, pte, entry);
 974			ret = 1;
 975		} else {
 976#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 977			pmd_t *pmd = pvmw->pmd;
 978			pmd_t entry;
 979
 980			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
 981				continue;
 982
 983			flush_cache_range(vma, address,
 984					  address + HPAGE_PMD_SIZE);
 985			entry = pmdp_invalidate(vma, address, pmd);
 986			entry = pmd_wrprotect(entry);
 987			entry = pmd_mkclean(entry);
 988			set_pmd_at(vma->vm_mm, address, pmd, entry);
 
 989			ret = 1;
 990#else
 991			/* unexpected pmd-mapped folio? */
 992			WARN_ON_ONCE(1);
 993#endif
 994		}
 995
 996		/*
 997		 * No need to call mmu_notifier_invalidate_range() as we are
 998		 * downgrading page table protection not changing it to point
 999		 * to a new page.
1000		 *
1001		 * See Documentation/mm/mmu_notifier.rst
1002		 */
1003		if (ret)
1004			cleaned++;
1005	}
1006
1007	mmu_notifier_invalidate_range_end(&range);
1008
1009	return cleaned;
1010}
1011
1012static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1013			     unsigned long address, void *arg)
1014{
1015	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1016	int *cleaned = arg;
1017
1018	*cleaned += page_vma_mkclean_one(&pvmw);
1019
1020	return true;
1021}
1022
1023static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1024{
1025	if (vma->vm_flags & VM_SHARED)
1026		return false;
1027
1028	return true;
1029}
1030
1031int folio_mkclean(struct folio *folio)
1032{
1033	int cleaned = 0;
1034	struct address_space *mapping;
1035	struct rmap_walk_control rwc = {
1036		.arg = (void *)&cleaned,
1037		.rmap_one = page_mkclean_one,
1038		.invalid_vma = invalid_mkclean_vma,
1039	};
1040
1041	BUG_ON(!folio_test_locked(folio));
1042
1043	if (!folio_mapped(folio))
1044		return 0;
1045
1046	mapping = folio_mapping(folio);
1047	if (!mapping)
1048		return 0;
1049
1050	rmap_walk(folio, &rwc);
1051
1052	return cleaned;
1053}
1054EXPORT_SYMBOL_GPL(folio_mkclean);
1055
1056/**
1057 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1058 *                     [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1059 *                     within the @vma of shared mappings. And since clean PTEs
1060 *                     should also be readonly, write protects them too.
1061 * @pfn: start pfn.
1062 * @nr_pages: number of physically contiguous pages srarting with @pfn.
1063 * @pgoff: page offset that the @pfn mapped with.
1064 * @vma: vma that @pfn mapped within.
1065 *
1066 * Returns the number of cleaned PTEs (including PMDs).
1067 */
1068int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1069		      struct vm_area_struct *vma)
1070{
1071	struct page_vma_mapped_walk pvmw = {
1072		.pfn		= pfn,
1073		.nr_pages	= nr_pages,
1074		.pgoff		= pgoff,
1075		.vma		= vma,
1076		.flags		= PVMW_SYNC,
1077	};
1078
1079	if (invalid_mkclean_vma(vma, NULL))
1080		return 0;
1081
1082	pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
1083	VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1084
1085	return page_vma_mkclean_one(&pvmw);
1086}
1087
1088int total_compound_mapcount(struct page *head)
1089{
1090	int mapcount = head_compound_mapcount(head);
1091	int nr_subpages;
1092	int i;
1093
1094	/* In the common case, avoid the loop when no subpages mapped by PTE */
1095	if (head_subpages_mapcount(head) == 0)
1096		return mapcount;
1097	/*
1098	 * Add all the PTE mappings of those subpages mapped by PTE.
1099	 * Limit the loop, knowing that only subpages_mapcount are mapped?
1100	 * Perhaps: given all the raciness, that may be a good or a bad idea.
1101	 */
1102	nr_subpages = thp_nr_pages(head);
1103	for (i = 0; i < nr_subpages; i++)
1104		mapcount += atomic_read(&head[i]._mapcount);
1105
1106	/* But each of those _mapcounts was based on -1 */
1107	mapcount += nr_subpages;
1108	return mapcount;
1109}
1110
1111/**
1112 * page_move_anon_rmap - move a page to our anon_vma
1113 * @page:	the page to move to our anon_vma
1114 * @vma:	the vma the page belongs to
1115 *
1116 * When a page belongs exclusively to one process after a COW event,
1117 * that page can be moved into the anon_vma that belongs to just that
1118 * process, so the rmap code will not search the parent or sibling
1119 * processes.
1120 */
1121void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1122{
1123	void *anon_vma = vma->anon_vma;
1124	struct folio *folio = page_folio(page);
1125
1126	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
 
1127	VM_BUG_ON_VMA(!anon_vma, vma);
1128
1129	anon_vma += PAGE_MAPPING_ANON;
1130	/*
1131	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1132	 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1133	 * folio_test_anon()) will not see one without the other.
1134	 */
1135	WRITE_ONCE(folio->mapping, anon_vma);
1136	SetPageAnonExclusive(page);
1137}
1138
1139/**
1140 * __page_set_anon_rmap - set up new anonymous rmap
1141 * @page:	Page or Hugepage to add to rmap
1142 * @vma:	VM area to add page to.
1143 * @address:	User virtual address of the mapping	
1144 * @exclusive:	the page is exclusively owned by the current process
1145 */
1146static void __page_set_anon_rmap(struct page *page,
1147	struct vm_area_struct *vma, unsigned long address, int exclusive)
1148{
1149	struct anon_vma *anon_vma = vma->anon_vma;
1150
1151	BUG_ON(!anon_vma);
1152
1153	if (PageAnon(page))
1154		goto out;
1155
1156	/*
1157	 * If the page isn't exclusively mapped into this vma,
1158	 * we must use the _oldest_ possible anon_vma for the
1159	 * page mapping!
1160	 */
1161	if (!exclusive)
1162		anon_vma = anon_vma->root;
1163
1164	/*
1165	 * page_idle does a lockless/optimistic rmap scan on page->mapping.
1166	 * Make sure the compiler doesn't split the stores of anon_vma and
1167	 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1168	 * could mistake the mapping for a struct address_space and crash.
1169	 */
1170	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1171	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1172	page->index = linear_page_index(vma, address);
1173out:
1174	if (exclusive)
1175		SetPageAnonExclusive(page);
1176}
1177
1178/**
1179 * __page_check_anon_rmap - sanity check anonymous rmap addition
1180 * @page:	the page to add the mapping to
1181 * @vma:	the vm area in which the mapping is added
1182 * @address:	the user virtual address mapped
1183 */
1184static void __page_check_anon_rmap(struct page *page,
1185	struct vm_area_struct *vma, unsigned long address)
1186{
1187	struct folio *folio = page_folio(page);
1188	/*
1189	 * The page's anon-rmap details (mapping and index) are guaranteed to
1190	 * be set up correctly at this point.
1191	 *
1192	 * We have exclusion against page_add_anon_rmap because the caller
1193	 * always holds the page locked.
 
1194	 *
1195	 * We have exclusion against page_add_new_anon_rmap because those pages
1196	 * are initially only visible via the pagetables, and the pte is locked
1197	 * over the call to page_add_new_anon_rmap.
1198	 */
1199	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1200			folio);
1201	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1202		       page);
1203}
1204
1205/**
1206 * page_add_anon_rmap - add pte mapping to an anonymous page
1207 * @page:	the page to add the mapping to
1208 * @vma:	the vm area in which the mapping is added
1209 * @address:	the user virtual address mapped
1210 * @flags:	the rmap flags
1211 *
1212 * The caller needs to hold the pte lock, and the page must be locked in
1213 * the anon_vma case: to serialize mapping,index checking after setting,
1214 * and to ensure that PageAnon is not being upgraded racily to PageKsm
1215 * (but PageKsm is never downgraded to PageAnon).
1216 */
1217void page_add_anon_rmap(struct page *page,
1218	struct vm_area_struct *vma, unsigned long address, rmap_t flags)
 
 
 
 
 
 
 
 
 
 
 
1219{
1220	atomic_t *mapped;
1221	int nr = 0, nr_pmdmapped = 0;
1222	bool compound = flags & RMAP_COMPOUND;
1223	bool first = true;
1224
1225	if (unlikely(PageKsm(page)))
1226		lock_page_memcg(page);
1227
1228	/* Is page being mapped by PTE? Is this its first map to be added? */
1229	if (likely(!compound)) {
 
 
1230		first = atomic_inc_and_test(&page->_mapcount);
1231		nr = first;
1232		if (first && PageCompound(page)) {
1233			mapped = subpages_mapcount_ptr(compound_head(page));
1234			nr = atomic_inc_return_relaxed(mapped);
1235			nr = (nr < COMPOUND_MAPPED);
1236		}
1237	} else if (PageTransHuge(page)) {
1238		/* That test is redundant: it's for safety or to optimize out */
1239
1240		first = atomic_inc_and_test(compound_mapcount_ptr(page));
1241		if (first) {
1242			mapped = subpages_mapcount_ptr(page);
1243			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
1244			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1245				nr_pmdmapped = thp_nr_pages(page);
1246				nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1247				/* Raced ahead of a remove and another add? */
1248				if (unlikely(nr < 0))
1249					nr = 0;
1250			} else {
1251				/* Raced ahead of a remove of COMPOUND_MAPPED */
1252				nr = 0;
1253			}
1254		}
1255	}
 
 
1256
1257	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
1258	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
1259
1260	if (nr_pmdmapped)
1261		__mod_lruvec_page_state(page, NR_ANON_THPS, nr_pmdmapped);
1262	if (nr)
1263		__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1264
1265	if (unlikely(PageKsm(page)))
1266		unlock_page_memcg(page);
1267
1268	/* address might be in next vma when migration races vma_adjust */
1269	else if (first)
1270		__page_set_anon_rmap(page, vma, address,
1271				     !!(flags & RMAP_EXCLUSIVE));
1272	else
1273		__page_check_anon_rmap(page, vma, address);
1274
1275	mlock_vma_page(page, vma, compound);
1276}
1277
1278/**
1279 * page_add_new_anon_rmap - add mapping to a new anonymous page
1280 * @page:	the page to add the mapping to
1281 * @vma:	the vm area in which the mapping is added
1282 * @address:	the user virtual address mapped
1283 *
1284 * If it's a compound page, it is accounted as a compound page. As the page
1285 * is new, it's assume to get mapped exclusively by a single process.
1286 *
1287 * Same as page_add_anon_rmap but must only be called on *new* pages.
1288 * This means the inc-and-test can be bypassed.
1289 * Page does not have to be locked.
1290 */
1291void page_add_new_anon_rmap(struct page *page,
1292	struct vm_area_struct *vma, unsigned long address)
1293{
1294	int nr;
1295
1296	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1297	__SetPageSwapBacked(page);
1298
1299	if (likely(!PageCompound(page))) {
1300		/* increment count (starts at -1) */
1301		atomic_set(&page->_mapcount, 0);
1302		nr = 1;
1303	} else {
1304		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 
1305		/* increment count (starts at -1) */
1306		atomic_set(compound_mapcount_ptr(page), 0);
1307		atomic_set(subpages_mapcount_ptr(page), COMPOUND_MAPPED);
1308		nr = thp_nr_pages(page);
1309		__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1310	}
1311
1312	__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1313	__page_set_anon_rmap(page, vma, address, 1);
1314}
1315
1316/**
1317 * page_add_file_rmap - add pte mapping to a file page
1318 * @page:	the page to add the mapping to
1319 * @vma:	the vm area in which the mapping is added
1320 * @compound:	charge the page as compound or small page
1321 *
1322 * The caller needs to hold the pte lock.
1323 */
1324void page_add_file_rmap(struct page *page,
1325	struct vm_area_struct *vma, bool compound)
1326{
1327	atomic_t *mapped;
1328	int nr = 0, nr_pmdmapped = 0;
1329	bool first;
1330
1331	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1332	lock_page_memcg(page);
 
 
 
 
 
 
 
 
 
 
 
 
1333
1334	/* Is page being mapped by PTE? Is this its first map to be added? */
1335	if (likely(!compound)) {
1336		first = atomic_inc_and_test(&page->_mapcount);
1337		nr = first;
1338		if (first && PageCompound(page)) {
1339			mapped = subpages_mapcount_ptr(compound_head(page));
1340			nr = atomic_inc_return_relaxed(mapped);
1341			nr = (nr < COMPOUND_MAPPED);
1342		}
1343	} else if (PageTransHuge(page)) {
1344		/* That test is redundant: it's for safety or to optimize out */
 
 
 
 
 
1345
1346		first = atomic_inc_and_test(compound_mapcount_ptr(page));
1347		if (first) {
1348			mapped = subpages_mapcount_ptr(page);
1349			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
1350			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1351				nr_pmdmapped = thp_nr_pages(page);
1352				nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1353				/* Raced ahead of a remove and another add? */
1354				if (unlikely(nr < 0))
1355					nr = 0;
1356			} else {
1357				/* Raced ahead of a remove of COMPOUND_MAPPED */
1358				nr = 0;
1359			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1360		}
 
 
1361	}
1362
1363	if (nr_pmdmapped)
1364		__mod_lruvec_page_state(page, PageSwapBacked(page) ?
1365			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
1366	if (nr)
1367		__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1368	unlock_page_memcg(page);
1369
1370	mlock_vma_page(page, vma, compound);
 
 
 
1371}
1372
1373/**
1374 * page_remove_rmap - take down pte mapping from a page
1375 * @page:	page to remove mapping from
1376 * @vma:	the vm area from which the mapping is removed
1377 * @compound:	uncharge the page as compound or small page
1378 *
1379 * The caller needs to hold the pte lock.
1380 */
1381void page_remove_rmap(struct page *page,
1382	struct vm_area_struct *vma, bool compound)
1383{
1384	atomic_t *mapped;
1385	int nr = 0, nr_pmdmapped = 0;
1386	bool last;
1387
1388	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
 
1389
1390	/* Hugetlb pages are not counted in NR_*MAPPED */
1391	if (unlikely(PageHuge(page))) {
1392		/* hugetlb pages are always mapped with pmds */
1393		atomic_dec(compound_mapcount_ptr(page));
1394		return;
1395	}
1396
1397	lock_page_memcg(page);
 
 
 
 
 
1398
1399	/* Is page being unmapped by PTE? Is this its last map to be removed? */
1400	if (likely(!compound)) {
1401		last = atomic_add_negative(-1, &page->_mapcount);
1402		nr = last;
1403		if (last && PageCompound(page)) {
1404			mapped = subpages_mapcount_ptr(compound_head(page));
1405			nr = atomic_dec_return_relaxed(mapped);
1406			nr = (nr < COMPOUND_MAPPED);
1407		}
1408	} else if (PageTransHuge(page)) {
1409		/* That test is redundant: it's for safety or to optimize out */
1410
1411		last = atomic_add_negative(-1, compound_mapcount_ptr(page));
1412		if (last) {
1413			mapped = subpages_mapcount_ptr(page);
1414			nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
1415			if (likely(nr < COMPOUND_MAPPED)) {
1416				nr_pmdmapped = thp_nr_pages(page);
1417				nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1418				/* Raced ahead of another remove and an add? */
1419				if (unlikely(nr < 0))
1420					nr = 0;
1421			} else {
1422				/* An add of COMPOUND_MAPPED raced ahead */
1423				nr = 0;
1424			}
1425		}
1426	}
1427
1428	if (nr_pmdmapped) {
1429		__mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_THPS :
1430				(PageSwapBacked(page) ? NR_SHMEM_PMDMAPPED :
1431				NR_FILE_PMDMAPPED), -nr_pmdmapped);
1432	}
1433	if (nr) {
1434		__mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_MAPPED :
1435				NR_FILE_MAPPED, -nr);
1436		/*
1437		 * Queue anon THP for deferred split if at least one small
1438		 * page of the compound page is unmapped, but at least one
1439		 * small page is still mapped.
1440		 */
1441		if (PageTransCompound(page) && PageAnon(page))
1442			if (!compound || nr < nr_pmdmapped)
1443				deferred_split_huge_page(compound_head(page));
1444	}
1445
1446	/*
1447	 * It would be tidy to reset PageAnon mapping when fully unmapped,
1448	 * but that might overwrite a racing page_add_anon_rmap
1449	 * which increments mapcount after us but sets mapping
1450	 * before us: so leave the reset to free_pages_prepare,
1451	 * and remember that it's only reliable while mapped.
 
 
1452	 */
1453
1454	unlock_page_memcg(page);
1455
1456	munlock_vma_page(page, vma, compound);
1457}
1458
1459/*
1460 * @arg: enum ttu_flags will be passed to this argument
1461 */
1462static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1463		     unsigned long address, void *arg)
1464{
1465	struct mm_struct *mm = vma->vm_mm;
1466	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 
 
 
 
1467	pte_t pteval;
1468	struct page *subpage;
1469	bool anon_exclusive, ret = true;
1470	struct mmu_notifier_range range;
1471	enum ttu_flags flags = (enum ttu_flags)(long)arg;
 
 
 
 
1472
1473	/*
1474	 * When racing against e.g. zap_pte_range() on another cpu,
1475	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1476	 * try_to_unmap() may return before page_mapped() has become false,
1477	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1478	 */
1479	if (flags & TTU_SYNC)
1480		pvmw.flags = PVMW_SYNC;
1481
1482	if (flags & TTU_SPLIT_HUGE_PMD)
1483		split_huge_pmd_address(vma, address, false, folio);
 
 
1484
1485	/*
1486	 * For THP, we have to assume the worse case ie pmd for invalidation.
1487	 * For hugetlb, it could be much worse if we need to do pud
1488	 * invalidation in the case of pmd sharing.
1489	 *
1490	 * Note that the folio can not be freed in this function as call of
1491	 * try_to_unmap() must hold a reference on the folio.
1492	 */
1493	range.end = vma_address_end(&pvmw);
1494	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1495				address, range.end);
1496	if (folio_test_hugetlb(folio)) {
1497		/*
1498		 * If sharing is possible, start and end will be adjusted
1499		 * accordingly.
1500		 */
1501		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1502						     &range.end);
1503	}
1504	mmu_notifier_invalidate_range_start(&range);
1505
1506	while (page_vma_mapped_walk(&pvmw)) {
1507		/* Unexpected PMD-mapped THP? */
1508		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
 
 
 
 
 
 
 
1509
1510		/*
1511		 * If the folio is in an mlock()d vma, we must not swap it out.
 
 
1512		 */
1513		if (!(flags & TTU_IGNORE_MLOCK) &&
1514		    (vma->vm_flags & VM_LOCKED)) {
1515			/* Restore the mlock which got missed */
1516			mlock_vma_folio(folio, vma, false);
1517			page_vma_mapped_walk_done(&pvmw);
1518			ret = false;
1519			break;
 
 
 
 
 
 
 
 
 
1520		}
1521
1522		subpage = folio_page(folio,
1523					pte_pfn(*pvmw.pte) - folio_pfn(folio));
 
 
1524		address = pvmw.address;
1525		anon_exclusive = folio_test_anon(folio) &&
1526				 PageAnonExclusive(subpage);
1527
1528		if (folio_test_hugetlb(folio)) {
1529			bool anon = folio_test_anon(folio);
 
 
 
 
 
 
1530
1531			/*
1532			 * The try_to_unmap() is only passed a hugetlb page
1533			 * in the case where the hugetlb page is poisoned.
 
1534			 */
1535			VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
 
 
 
 
1536			/*
1537			 * huge_pmd_unshare may unmap an entire PMD page.
1538			 * There is no way of knowing exactly which PMDs may
1539			 * be cached for this mm, so we must flush them all.
1540			 * start/end were already adjusted above to cover this
1541			 * range.
1542			 */
1543			flush_cache_range(vma, range.start, range.end);
 
1544
 
 
 
 
 
 
 
 
 
 
 
 
1545			/*
1546			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1547			 * held in write mode.  Caller needs to explicitly
1548			 * do this outside rmap routines.
1549			 *
1550			 * We also must hold hugetlb vma_lock in write mode.
1551			 * Lock order dictates acquiring vma_lock BEFORE
1552			 * i_mmap_rwsem.  We can only try lock here and fail
1553			 * if unsuccessful.
1554			 */
1555			if (!anon) {
1556				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1557				if (!hugetlb_vma_trylock_write(vma)) {
1558					page_vma_mapped_walk_done(&pvmw);
1559					ret = false;
1560					break;
1561				}
1562				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1563					hugetlb_vma_unlock_write(vma);
1564					flush_tlb_range(vma,
1565						range.start, range.end);
1566					mmu_notifier_invalidate_range(mm,
1567						range.start, range.end);
1568					/*
1569					 * The ref count of the PMD page was
1570					 * dropped which is part of the way map
1571					 * counting is done for shared PMDs.
1572					 * Return 'true' here.  When there is
1573					 * no other sharing, huge_pmd_unshare
1574					 * returns false and we will unmap the
1575					 * actual page and drop map count
1576					 * to zero.
1577					 */
1578					page_vma_mapped_walk_done(&pvmw);
1579					break;
1580				}
1581				hugetlb_vma_unlock_write(vma);
1582			}
1583			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1584		} else {
1585			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1586			/* Nuke the page table entry. */
1587			if (should_defer_flush(mm, flags)) {
1588				/*
1589				 * We clear the PTE but do not flush so potentially
1590				 * a remote CPU could still be writing to the folio.
1591				 * If the entry was previously clean then the
1592				 * architecture must guarantee that a clear->dirty
1593				 * transition on a cached TLB entry is written through
1594				 * and traps if the PTE is unmapped.
1595				 */
1596				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1597
1598				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1599			} else {
1600				pteval = ptep_clear_flush(vma, address, pvmw.pte);
1601			}
1602		}
1603
1604		/*
1605		 * Now the pte is cleared. If this pte was uffd-wp armed,
1606		 * we may want to replace a none pte with a marker pte if
1607		 * it's file-backed, so we don't lose the tracking info.
1608		 */
1609		pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
1610
1611		/* Set the dirty flag on the folio now the pte is gone. */
1612		if (pte_dirty(pteval))
1613			folio_mark_dirty(folio);
1614
1615		/* Update high watermark before we lower rss */
1616		update_hiwater_rss(mm);
1617
1618		if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
1619			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1620			if (folio_test_hugetlb(folio)) {
1621				hugetlb_count_sub(folio_nr_pages(folio), mm);
1622				set_huge_pte_at(mm, address, pvmw.pte, pteval);
 
 
 
1623			} else {
1624				dec_mm_counter(mm, mm_counter(&folio->page));
1625				set_pte_at(mm, address, pvmw.pte, pteval);
1626			}
1627
1628		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1629			/*
1630			 * The guest indicated that the page content is of no
1631			 * interest anymore. Simply discard the pte, vmscan
1632			 * will take care of the rest.
1633			 * A future reference will then fault in a new zero
1634			 * page. When userfaultfd is active, we must not drop
1635			 * this page though, as its main user (postcopy
1636			 * migration) will not expect userfaults on already
1637			 * copied pages.
1638			 */
1639			dec_mm_counter(mm, mm_counter(&folio->page));
1640			/* We have to invalidate as we cleared the pte */
1641			mmu_notifier_invalidate_range(mm, address,
1642						      address + PAGE_SIZE);
1643		} else if (folio_test_anon(folio)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1644			swp_entry_t entry = { .val = page_private(subpage) };
1645			pte_t swp_pte;
1646			/*
1647			 * Store the swap location in the pte.
1648			 * See handle_pte_fault() ...
1649			 */
1650			if (unlikely(folio_test_swapbacked(folio) !=
1651					folio_test_swapcache(folio))) {
1652				WARN_ON_ONCE(1);
1653				ret = false;
1654				/* We have to invalidate as we cleared the pte */
1655				mmu_notifier_invalidate_range(mm, address,
1656							address + PAGE_SIZE);
1657				page_vma_mapped_walk_done(&pvmw);
1658				break;
1659			}
1660
1661			/* MADV_FREE page check */
1662			if (!folio_test_swapbacked(folio)) {
1663				int ref_count, map_count;
1664
1665				/*
1666				 * Synchronize with gup_pte_range():
1667				 * - clear PTE; barrier; read refcount
1668				 * - inc refcount; barrier; read PTE
1669				 */
1670				smp_mb();
1671
1672				ref_count = folio_ref_count(folio);
1673				map_count = folio_mapcount(folio);
1674
1675				/*
1676				 * Order reads for page refcount and dirty flag
1677				 * (see comments in __remove_mapping()).
1678				 */
1679				smp_rmb();
1680
1681				/*
1682				 * The only page refs must be one from isolation
1683				 * plus the rmap(s) (dropped by discard:).
1684				 */
1685				if (ref_count == 1 + map_count &&
1686				    !folio_test_dirty(folio)) {
1687					/* Invalidate as we cleared the pte */
1688					mmu_notifier_invalidate_range(mm,
1689						address, address + PAGE_SIZE);
1690					dec_mm_counter(mm, MM_ANONPAGES);
1691					goto discard;
1692				}
1693
1694				/*
1695				 * If the folio was redirtied, it cannot be
1696				 * discarded. Remap the page to page table.
1697				 */
1698				set_pte_at(mm, address, pvmw.pte, pteval);
1699				folio_set_swapbacked(folio);
1700				ret = false;
1701				page_vma_mapped_walk_done(&pvmw);
1702				break;
1703			}
1704
1705			if (swap_duplicate(entry) < 0) {
1706				set_pte_at(mm, address, pvmw.pte, pteval);
1707				ret = false;
1708				page_vma_mapped_walk_done(&pvmw);
1709				break;
1710			}
1711			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1712				swap_free(entry);
1713				set_pte_at(mm, address, pvmw.pte, pteval);
1714				ret = false;
1715				page_vma_mapped_walk_done(&pvmw);
1716				break;
1717			}
1718
1719			/* See page_try_share_anon_rmap(): clear PTE first. */
1720			if (anon_exclusive &&
1721			    page_try_share_anon_rmap(subpage)) {
1722				swap_free(entry);
1723				set_pte_at(mm, address, pvmw.pte, pteval);
1724				ret = false;
1725				page_vma_mapped_walk_done(&pvmw);
1726				break;
1727			}
1728			/*
1729			 * Note: We *don't* remember if the page was mapped
1730			 * exclusively in the swap pte if the architecture
1731			 * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In
1732			 * that case, swapin code has to re-determine that
1733			 * manually and might detect the page as possibly
1734			 * shared, for example, if there are other references on
1735			 * the page or if the page is under writeback. We made
1736			 * sure that there are no GUP pins on the page that
1737			 * would rely on it, so for GUP pins this is fine.
1738			 */
1739			if (list_empty(&mm->mmlist)) {
1740				spin_lock(&mmlist_lock);
1741				if (list_empty(&mm->mmlist))
1742					list_add(&mm->mmlist, &init_mm.mmlist);
1743				spin_unlock(&mmlist_lock);
1744			}
1745			dec_mm_counter(mm, MM_ANONPAGES);
1746			inc_mm_counter(mm, MM_SWAPENTS);
1747			swp_pte = swp_entry_to_pte(entry);
1748			if (anon_exclusive)
1749				swp_pte = pte_swp_mkexclusive(swp_pte);
1750			if (pte_soft_dirty(pteval))
1751				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1752			if (pte_uffd_wp(pteval))
1753				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1754			set_pte_at(mm, address, pvmw.pte, swp_pte);
1755			/* Invalidate as we cleared the pte */
1756			mmu_notifier_invalidate_range(mm, address,
1757						      address + PAGE_SIZE);
1758		} else {
1759			/*
1760			 * This is a locked file-backed folio,
1761			 * so it cannot be removed from the page
1762			 * cache and replaced by a new folio before
1763			 * mmu_notifier_invalidate_range_end, so no
1764			 * concurrent thread might update its page table
1765			 * to point at a new folio while a device is
1766			 * still using this folio.
1767			 *
1768			 * See Documentation/mm/mmu_notifier.rst
 
 
 
 
 
 
 
1769			 */
1770			dec_mm_counter(mm, mm_counter_file(&folio->page));
1771		}
1772discard:
1773		/*
1774		 * No need to call mmu_notifier_invalidate_range() it has be
1775		 * done above for all cases requiring it to happen under page
1776		 * table lock before mmu_notifier_invalidate_range_end()
1777		 *
1778		 * See Documentation/mm/mmu_notifier.rst
1779		 */
1780		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
1781		if (vma->vm_flags & VM_LOCKED)
1782			mlock_page_drain_local();
1783		folio_put(folio);
1784	}
1785
1786	mmu_notifier_invalidate_range_end(&range);
1787
1788	return ret;
1789}
1790
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1791static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1792{
1793	return vma_is_temporary_stack(vma);
1794}
1795
1796static int folio_not_mapped(struct folio *folio)
1797{
1798	return !folio_mapped(folio);
1799}
1800
1801/**
1802 * try_to_unmap - Try to remove all page table mappings to a folio.
1803 * @folio: The folio to unmap.
1804 * @flags: action and flags
1805 *
1806 * Tries to remove all the page table entries which are mapping this
1807 * folio.  It is the caller's responsibility to check if the folio is
1808 * still mapped if needed (use TTU_SYNC to prevent accounting races).
1809 *
1810 * Context: Caller must hold the folio lock.
1811 */
1812void try_to_unmap(struct folio *folio, enum ttu_flags flags)
1813{
1814	struct rmap_walk_control rwc = {
1815		.rmap_one = try_to_unmap_one,
1816		.arg = (void *)flags,
1817		.done = folio_not_mapped,
1818		.anon_lock = folio_lock_anon_vma_read,
1819	};
1820
1821	if (flags & TTU_RMAP_LOCKED)
1822		rmap_walk_locked(folio, &rwc);
1823	else
1824		rmap_walk(folio, &rwc);
1825}
1826
1827/*
1828 * @arg: enum ttu_flags will be passed to this argument.
1829 *
1830 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
1831 * containing migration entries.
1832 */
1833static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1834		     unsigned long address, void *arg)
1835{
1836	struct mm_struct *mm = vma->vm_mm;
1837	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1838	pte_t pteval;
1839	struct page *subpage;
1840	bool anon_exclusive, ret = true;
1841	struct mmu_notifier_range range;
1842	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1843
1844	/*
1845	 * When racing against e.g. zap_pte_range() on another cpu,
1846	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1847	 * try_to_migrate() may return before page_mapped() has become false,
1848	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1849	 */
1850	if (flags & TTU_SYNC)
1851		pvmw.flags = PVMW_SYNC;
1852
1853	/*
1854	 * unmap_page() in mm/huge_memory.c is the only user of migration with
1855	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
1856	 */
1857	if (flags & TTU_SPLIT_HUGE_PMD)
1858		split_huge_pmd_address(vma, address, true, folio);
1859
1860	/*
1861	 * For THP, we have to assume the worse case ie pmd for invalidation.
1862	 * For hugetlb, it could be much worse if we need to do pud
1863	 * invalidation in the case of pmd sharing.
1864	 *
1865	 * Note that the page can not be free in this function as call of
1866	 * try_to_unmap() must hold a reference on the page.
1867	 */
1868	range.end = vma_address_end(&pvmw);
1869	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1870				address, range.end);
1871	if (folio_test_hugetlb(folio)) {
1872		/*
1873		 * If sharing is possible, start and end will be adjusted
1874		 * accordingly.
1875		 */
1876		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1877						     &range.end);
1878	}
1879	mmu_notifier_invalidate_range_start(&range);
1880
1881	while (page_vma_mapped_walk(&pvmw)) {
1882#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1883		/* PMD-mapped THP migration entry */
1884		if (!pvmw.pte) {
1885			subpage = folio_page(folio,
1886				pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
1887			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
1888					!folio_test_pmd_mappable(folio), folio);
1889
1890			if (set_pmd_migration_entry(&pvmw, subpage)) {
1891				ret = false;
1892				page_vma_mapped_walk_done(&pvmw);
1893				break;
1894			}
1895			continue;
1896		}
1897#endif
1898
1899		/* Unexpected PMD-mapped THP? */
1900		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1901
1902		if (folio_is_zone_device(folio)) {
1903			/*
1904			 * Our PTE is a non-present device exclusive entry and
1905			 * calculating the subpage as for the common case would
1906			 * result in an invalid pointer.
1907			 *
1908			 * Since only PAGE_SIZE pages can currently be
1909			 * migrated, just set it to page. This will need to be
1910			 * changed when hugepage migrations to device private
1911			 * memory are supported.
1912			 */
1913			VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
1914			subpage = &folio->page;
1915		} else {
1916			subpage = folio_page(folio,
1917					pte_pfn(*pvmw.pte) - folio_pfn(folio));
1918		}
1919		address = pvmw.address;
1920		anon_exclusive = folio_test_anon(folio) &&
1921				 PageAnonExclusive(subpage);
1922
1923		if (folio_test_hugetlb(folio)) {
1924			bool anon = folio_test_anon(folio);
1925
1926			/*
1927			 * huge_pmd_unshare may unmap an entire PMD page.
1928			 * There is no way of knowing exactly which PMDs may
1929			 * be cached for this mm, so we must flush them all.
1930			 * start/end were already adjusted above to cover this
1931			 * range.
1932			 */
1933			flush_cache_range(vma, range.start, range.end);
1934
1935			/*
1936			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1937			 * held in write mode.  Caller needs to explicitly
1938			 * do this outside rmap routines.
1939			 *
1940			 * We also must hold hugetlb vma_lock in write mode.
1941			 * Lock order dictates acquiring vma_lock BEFORE
1942			 * i_mmap_rwsem.  We can only try lock here and
1943			 * fail if unsuccessful.
1944			 */
1945			if (!anon) {
1946				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1947				if (!hugetlb_vma_trylock_write(vma)) {
1948					page_vma_mapped_walk_done(&pvmw);
1949					ret = false;
1950					break;
1951				}
1952				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1953					hugetlb_vma_unlock_write(vma);
1954					flush_tlb_range(vma,
1955						range.start, range.end);
1956					mmu_notifier_invalidate_range(mm,
1957						range.start, range.end);
1958
1959					/*
1960					 * The ref count of the PMD page was
1961					 * dropped which is part of the way map
1962					 * counting is done for shared PMDs.
1963					 * Return 'true' here.  When there is
1964					 * no other sharing, huge_pmd_unshare
1965					 * returns false and we will unmap the
1966					 * actual page and drop map count
1967					 * to zero.
1968					 */
1969					page_vma_mapped_walk_done(&pvmw);
1970					break;
1971				}
1972				hugetlb_vma_unlock_write(vma);
1973			}
1974			/* Nuke the hugetlb page table entry */
1975			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1976		} else {
1977			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1978			/* Nuke the page table entry. */
1979			pteval = ptep_clear_flush(vma, address, pvmw.pte);
1980		}
1981
1982		/* Set the dirty flag on the folio now the pte is gone. */
1983		if (pte_dirty(pteval))
1984			folio_mark_dirty(folio);
1985
1986		/* Update high watermark before we lower rss */
1987		update_hiwater_rss(mm);
1988
1989		if (folio_is_device_private(folio)) {
1990			unsigned long pfn = folio_pfn(folio);
1991			swp_entry_t entry;
1992			pte_t swp_pte;
1993
1994			if (anon_exclusive)
1995				BUG_ON(page_try_share_anon_rmap(subpage));
1996
1997			/*
1998			 * Store the pfn of the page in a special migration
1999			 * pte. do_swap_page() will wait until the migration
2000			 * pte is removed and then restart fault handling.
2001			 */
2002			entry = pte_to_swp_entry(pteval);
2003			if (is_writable_device_private_entry(entry))
2004				entry = make_writable_migration_entry(pfn);
2005			else if (anon_exclusive)
2006				entry = make_readable_exclusive_migration_entry(pfn);
2007			else
2008				entry = make_readable_migration_entry(pfn);
2009			swp_pte = swp_entry_to_pte(entry);
2010
2011			/*
2012			 * pteval maps a zone device page and is therefore
2013			 * a swap pte.
2014			 */
2015			if (pte_swp_soft_dirty(pteval))
2016				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2017			if (pte_swp_uffd_wp(pteval))
2018				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2019			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
2020			trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
2021						compound_order(&folio->page));
2022			/*
2023			 * No need to invalidate here it will synchronize on
2024			 * against the special swap migration pte.
2025			 */
2026		} else if (PageHWPoison(subpage)) {
2027			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2028			if (folio_test_hugetlb(folio)) {
2029				hugetlb_count_sub(folio_nr_pages(folio), mm);
2030				set_huge_pte_at(mm, address, pvmw.pte, pteval);
2031			} else {
2032				dec_mm_counter(mm, mm_counter(&folio->page));
2033				set_pte_at(mm, address, pvmw.pte, pteval);
2034			}
2035
2036		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
2037			/*
2038			 * The guest indicated that the page content is of no
2039			 * interest anymore. Simply discard the pte, vmscan
2040			 * will take care of the rest.
2041			 * A future reference will then fault in a new zero
2042			 * page. When userfaultfd is active, we must not drop
2043			 * this page though, as its main user (postcopy
2044			 * migration) will not expect userfaults on already
2045			 * copied pages.
2046			 */
2047			dec_mm_counter(mm, mm_counter(&folio->page));
2048			/* We have to invalidate as we cleared the pte */
2049			mmu_notifier_invalidate_range(mm, address,
2050						      address + PAGE_SIZE);
2051		} else {
2052			swp_entry_t entry;
2053			pte_t swp_pte;
2054
2055			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2056				if (folio_test_hugetlb(folio))
2057					set_huge_pte_at(mm, address, pvmw.pte, pteval);
2058				else
2059					set_pte_at(mm, address, pvmw.pte, pteval);
2060				ret = false;
2061				page_vma_mapped_walk_done(&pvmw);
2062				break;
2063			}
2064			VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
2065				       !anon_exclusive, subpage);
2066
2067			/* See page_try_share_anon_rmap(): clear PTE first. */
2068			if (anon_exclusive &&
2069			    page_try_share_anon_rmap(subpage)) {
2070				if (folio_test_hugetlb(folio))
2071					set_huge_pte_at(mm, address, pvmw.pte, pteval);
2072				else
2073					set_pte_at(mm, address, pvmw.pte, pteval);
2074				ret = false;
2075				page_vma_mapped_walk_done(&pvmw);
2076				break;
2077			}
2078
2079			/*
2080			 * Store the pfn of the page in a special migration
2081			 * pte. do_swap_page() will wait until the migration
2082			 * pte is removed and then restart fault handling.
2083			 */
2084			if (pte_write(pteval))
2085				entry = make_writable_migration_entry(
2086							page_to_pfn(subpage));
2087			else if (anon_exclusive)
2088				entry = make_readable_exclusive_migration_entry(
2089							page_to_pfn(subpage));
2090			else
2091				entry = make_readable_migration_entry(
2092							page_to_pfn(subpage));
2093			if (pte_young(pteval))
2094				entry = make_migration_entry_young(entry);
2095			if (pte_dirty(pteval))
2096				entry = make_migration_entry_dirty(entry);
2097			swp_pte = swp_entry_to_pte(entry);
2098			if (pte_soft_dirty(pteval))
2099				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2100			if (pte_uffd_wp(pteval))
2101				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2102			if (folio_test_hugetlb(folio))
2103				set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
2104			else
2105				set_pte_at(mm, address, pvmw.pte, swp_pte);
2106			trace_set_migration_pte(address, pte_val(swp_pte),
2107						compound_order(&folio->page));
2108			/*
2109			 * No need to invalidate here it will synchronize on
2110			 * against the special swap migration pte.
2111			 */
2112		}
2113
2114		/*
2115		 * No need to call mmu_notifier_invalidate_range() it has be
2116		 * done above for all cases requiring it to happen under page
2117		 * table lock before mmu_notifier_invalidate_range_end()
2118		 *
2119		 * See Documentation/mm/mmu_notifier.rst
2120		 */
2121		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
2122		if (vma->vm_flags & VM_LOCKED)
2123			mlock_page_drain_local();
2124		folio_put(folio);
2125	}
2126
2127	mmu_notifier_invalidate_range_end(&range);
2128
2129	return ret;
2130}
2131
2132/**
2133 * try_to_migrate - try to replace all page table mappings with swap entries
2134 * @folio: the folio to replace page table entries for
2135 * @flags: action and flags
2136 *
2137 * Tries to remove all the page table entries which are mapping this folio and
2138 * replace them with special swap entries. Caller must hold the folio lock.
2139 */
2140void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2141{
2142	struct rmap_walk_control rwc = {
2143		.rmap_one = try_to_migrate_one,
2144		.arg = (void *)flags,
2145		.done = folio_not_mapped,
2146		.anon_lock = folio_lock_anon_vma_read,
2147	};
2148
2149	/*
2150	 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2151	 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags.
2152	 */
2153	if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2154					TTU_SYNC)))
2155		return;
2156
2157	if (folio_is_zone_device(folio) &&
2158	    (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2159		return;
2160
2161	/*
2162	 * During exec, a temporary VMA is setup and later moved.
2163	 * The VMA is moved under the anon_vma lock but not the
2164	 * page tables leading to a race where migration cannot
2165	 * find the migration ptes. Rather than increasing the
2166	 * locking requirements of exec(), migration skips
2167	 * temporary VMAs until after exec() completes.
2168	 */
2169	if (!folio_test_ksm(folio) && folio_test_anon(folio))
 
2170		rwc.invalid_vma = invalid_migration_vma;
2171
2172	if (flags & TTU_RMAP_LOCKED)
2173		rmap_walk_locked(folio, &rwc);
2174	else
2175		rmap_walk(folio, &rwc);
 
 
2176}
2177
2178#ifdef CONFIG_DEVICE_PRIVATE
2179struct make_exclusive_args {
2180	struct mm_struct *mm;
2181	unsigned long address;
2182	void *owner;
2183	bool valid;
2184};
2185
2186static bool page_make_device_exclusive_one(struct folio *folio,
2187		struct vm_area_struct *vma, unsigned long address, void *priv)
2188{
2189	struct mm_struct *mm = vma->vm_mm;
2190	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2191	struct make_exclusive_args *args = priv;
2192	pte_t pteval;
2193	struct page *subpage;
2194	bool ret = true;
2195	struct mmu_notifier_range range;
2196	swp_entry_t entry;
2197	pte_t swp_pte;
2198
2199	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
2200				      vma->vm_mm, address, min(vma->vm_end,
2201				      address + folio_size(folio)),
2202				      args->owner);
2203	mmu_notifier_invalidate_range_start(&range);
2204
2205	while (page_vma_mapped_walk(&pvmw)) {
2206		/* Unexpected PMD-mapped THP? */
2207		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2208
2209		if (!pte_present(*pvmw.pte)) {
2210			ret = false;
2211			page_vma_mapped_walk_done(&pvmw);
2212			break;
2213		}
2214
2215		subpage = folio_page(folio,
2216				pte_pfn(*pvmw.pte) - folio_pfn(folio));
2217		address = pvmw.address;
2218
2219		/* Nuke the page table entry. */
2220		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
2221		pteval = ptep_clear_flush(vma, address, pvmw.pte);
2222
2223		/* Set the dirty flag on the folio now the pte is gone. */
2224		if (pte_dirty(pteval))
2225			folio_mark_dirty(folio);
2226
2227		/*
2228		 * Check that our target page is still mapped at the expected
2229		 * address.
2230		 */
2231		if (args->mm == mm && args->address == address &&
2232		    pte_write(pteval))
2233			args->valid = true;
2234
2235		/*
2236		 * Store the pfn of the page in a special migration
2237		 * pte. do_swap_page() will wait until the migration
2238		 * pte is removed and then restart fault handling.
2239		 */
2240		if (pte_write(pteval))
2241			entry = make_writable_device_exclusive_entry(
2242							page_to_pfn(subpage));
2243		else
2244			entry = make_readable_device_exclusive_entry(
2245							page_to_pfn(subpage));
2246		swp_pte = swp_entry_to_pte(entry);
2247		if (pte_soft_dirty(pteval))
2248			swp_pte = pte_swp_mksoft_dirty(swp_pte);
2249		if (pte_uffd_wp(pteval))
2250			swp_pte = pte_swp_mkuffd_wp(swp_pte);
2251
2252		set_pte_at(mm, address, pvmw.pte, swp_pte);
2253
2254		/*
2255		 * There is a reference on the page for the swap entry which has
2256		 * been removed, so shouldn't take another.
2257		 */
2258		page_remove_rmap(subpage, vma, false);
2259	}
2260
2261	mmu_notifier_invalidate_range_end(&range);
2262
2263	return ret;
2264}
2265
2266/**
2267 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2268 * @folio: The folio to replace page table entries for.
2269 * @mm: The mm_struct where the folio is expected to be mapped.
2270 * @address: Address where the folio is expected to be mapped.
2271 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2272 *
2273 * Tries to remove all the page table entries which are mapping this
2274 * folio and replace them with special device exclusive swap entries to
2275 * grant a device exclusive access to the folio.
2276 *
2277 * Context: Caller must hold the folio lock.
2278 * Return: false if the page is still mapped, or if it could not be unmapped
2279 * from the expected address. Otherwise returns true (success).
2280 */
2281static bool folio_make_device_exclusive(struct folio *folio,
2282		struct mm_struct *mm, unsigned long address, void *owner)
2283{
2284	struct make_exclusive_args args = {
2285		.mm = mm,
2286		.address = address,
2287		.owner = owner,
2288		.valid = false,
2289	};
2290	struct rmap_walk_control rwc = {
2291		.rmap_one = page_make_device_exclusive_one,
2292		.done = folio_not_mapped,
2293		.anon_lock = folio_lock_anon_vma_read,
2294		.arg = &args,
 
2295	};
2296
2297	/*
2298	 * Restrict to anonymous folios for now to avoid potential writeback
2299	 * issues.
2300	 */
2301	if (!folio_test_anon(folio))
2302		return false;
2303
2304	rmap_walk(folio, &rwc);
2305
2306	return args.valid && !folio_mapcount(folio);
2307}
2308
2309/**
2310 * make_device_exclusive_range() - Mark a range for exclusive use by a device
2311 * @mm: mm_struct of associated target process
2312 * @start: start of the region to mark for exclusive device access
2313 * @end: end address of region
2314 * @pages: returns the pages which were successfully marked for exclusive access
2315 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2316 *
2317 * Returns: number of pages found in the range by GUP. A page is marked for
2318 * exclusive access only if the page pointer is non-NULL.
2319 *
2320 * This function finds ptes mapping page(s) to the given address range, locks
2321 * them and replaces mappings with special swap entries preventing userspace CPU
2322 * access. On fault these entries are replaced with the original mapping after
2323 * calling MMU notifiers.
2324 *
2325 * A driver using this to program access from a device must use a mmu notifier
2326 * critical section to hold a device specific lock during programming. Once
2327 * programming is complete it should drop the page lock and reference after
2328 * which point CPU access to the page will revoke the exclusive access.
2329 */
2330int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
2331				unsigned long end, struct page **pages,
2332				void *owner)
2333{
2334	long npages = (end - start) >> PAGE_SHIFT;
2335	long i;
2336
2337	npages = get_user_pages_remote(mm, start, npages,
2338				       FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2339				       pages, NULL, NULL);
2340	if (npages < 0)
2341		return npages;
2342
2343	for (i = 0; i < npages; i++, start += PAGE_SIZE) {
2344		struct folio *folio = page_folio(pages[i]);
2345		if (PageTail(pages[i]) || !folio_trylock(folio)) {
2346			folio_put(folio);
2347			pages[i] = NULL;
2348			continue;
2349		}
2350
2351		if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2352			folio_unlock(folio);
2353			folio_put(folio);
2354			pages[i] = NULL;
2355		}
2356	}
2357
2358	return npages;
2359}
2360EXPORT_SYMBOL_GPL(make_device_exclusive_range);
2361#endif
2362
2363void __put_anon_vma(struct anon_vma *anon_vma)
2364{
2365	struct anon_vma *root = anon_vma->root;
2366
2367	anon_vma_free(anon_vma);
2368	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2369		anon_vma_free(root);
2370}
2371
2372static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2373					    struct rmap_walk_control *rwc)
2374{
2375	struct anon_vma *anon_vma;
2376
2377	if (rwc->anon_lock)
2378		return rwc->anon_lock(folio, rwc);
2379
2380	/*
2381	 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2382	 * because that depends on page_mapped(); but not all its usages
2383	 * are holding mmap_lock. Users without mmap_lock are required to
2384	 * take a reference count to prevent the anon_vma disappearing
2385	 */
2386	anon_vma = folio_anon_vma(folio);
2387	if (!anon_vma)
2388		return NULL;
2389
2390	if (anon_vma_trylock_read(anon_vma))
2391		goto out;
2392
2393	if (rwc->try_lock) {
2394		anon_vma = NULL;
2395		rwc->contended = true;
2396		goto out;
2397	}
2398
2399	anon_vma_lock_read(anon_vma);
2400out:
2401	return anon_vma;
2402}
2403
2404/*
2405 * rmap_walk_anon - do something to anonymous page using the object-based
2406 * rmap method
2407 * @page: the page to be handled
2408 * @rwc: control variable according to each walk type
2409 *
2410 * Find all the mappings of a page using the mapping pointer and the vma chains
2411 * contained in the anon_vma struct it points to.
 
 
 
 
 
2412 */
2413static void rmap_walk_anon(struct folio *folio,
2414		struct rmap_walk_control *rwc, bool locked)
2415{
2416	struct anon_vma *anon_vma;
2417	pgoff_t pgoff_start, pgoff_end;
2418	struct anon_vma_chain *avc;
2419
2420	if (locked) {
2421		anon_vma = folio_anon_vma(folio);
2422		/* anon_vma disappear under us? */
2423		VM_BUG_ON_FOLIO(!anon_vma, folio);
2424	} else {
2425		anon_vma = rmap_walk_anon_lock(folio, rwc);
2426	}
2427	if (!anon_vma)
2428		return;
2429
2430	pgoff_start = folio_pgoff(folio);
2431	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2432	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2433			pgoff_start, pgoff_end) {
2434		struct vm_area_struct *vma = avc->vma;
2435		unsigned long address = vma_address(&folio->page, vma);
2436
2437		VM_BUG_ON_VMA(address == -EFAULT, vma);
2438		cond_resched();
2439
2440		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2441			continue;
2442
2443		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2444			break;
2445		if (rwc->done && rwc->done(folio))
2446			break;
2447	}
2448
2449	if (!locked)
2450		anon_vma_unlock_read(anon_vma);
2451}
2452
2453/*
2454 * rmap_walk_file - do something to file page using the object-based rmap method
2455 * @page: the page to be handled
2456 * @rwc: control variable according to each walk type
2457 *
2458 * Find all the mappings of a page using the mapping pointer and the vma chains
2459 * contained in the address_space struct it points to.
 
 
 
 
 
2460 */
2461static void rmap_walk_file(struct folio *folio,
2462		struct rmap_walk_control *rwc, bool locked)
2463{
2464	struct address_space *mapping = folio_mapping(folio);
2465	pgoff_t pgoff_start, pgoff_end;
2466	struct vm_area_struct *vma;
2467
2468	/*
2469	 * The page lock not only makes sure that page->mapping cannot
2470	 * suddenly be NULLified by truncation, it makes sure that the
2471	 * structure at mapping cannot be freed and reused yet,
2472	 * so we can safely take mapping->i_mmap_rwsem.
2473	 */
2474	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2475
2476	if (!mapping)
2477		return;
2478
2479	pgoff_start = folio_pgoff(folio);
2480	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2481	if (!locked) {
2482		if (i_mmap_trylock_read(mapping))
2483			goto lookup;
2484
2485		if (rwc->try_lock) {
2486			rwc->contended = true;
2487			return;
2488		}
2489
2490		i_mmap_lock_read(mapping);
2491	}
2492lookup:
2493	vma_interval_tree_foreach(vma, &mapping->i_mmap,
2494			pgoff_start, pgoff_end) {
2495		unsigned long address = vma_address(&folio->page, vma);
2496
2497		VM_BUG_ON_VMA(address == -EFAULT, vma);
2498		cond_resched();
2499
2500		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2501			continue;
2502
2503		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2504			goto done;
2505		if (rwc->done && rwc->done(folio))
2506			goto done;
2507	}
2508
2509done:
2510	if (!locked)
2511		i_mmap_unlock_read(mapping);
2512}
2513
2514void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2515{
2516	if (unlikely(folio_test_ksm(folio)))
2517		rmap_walk_ksm(folio, rwc);
2518	else if (folio_test_anon(folio))
2519		rmap_walk_anon(folio, rwc, false);
2520	else
2521		rmap_walk_file(folio, rwc, false);
2522}
2523
2524/* Like rmap_walk, but caller holds relevant rmap lock */
2525void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2526{
2527	/* no ksm support for now */
2528	VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
2529	if (folio_test_anon(folio))
2530		rmap_walk_anon(folio, rwc, true);
2531	else
2532		rmap_walk_file(folio, rwc, true);
2533}
2534
2535#ifdef CONFIG_HUGETLB_PAGE
2536/*
2537 * The following two functions are for anonymous (private mapped) hugepages.
2538 * Unlike common anonymous pages, anonymous hugepages have no accounting code
2539 * and no lru code, because we handle hugepages differently from common pages.
2540 *
2541 * RMAP_COMPOUND is ignored.
2542 */
2543void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
2544			    unsigned long address, rmap_t flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2545{
2546	struct anon_vma *anon_vma = vma->anon_vma;
2547	int first;
2548
2549	BUG_ON(!PageLocked(page));
2550	BUG_ON(!anon_vma);
2551	/* address might be in next vma when migration races vma_adjust */
2552	first = atomic_inc_and_test(compound_mapcount_ptr(page));
2553	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
2554	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
2555	if (first)
2556		__page_set_anon_rmap(page, vma, address,
2557				     !!(flags & RMAP_EXCLUSIVE));
2558}
2559
2560void hugepage_add_new_anon_rmap(struct page *page,
2561			struct vm_area_struct *vma, unsigned long address)
2562{
2563	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2564	/* increment count (starts at -1) */
2565	atomic_set(compound_mapcount_ptr(page), 0);
2566	ClearHPageRestoreReserve(page);
2567	__page_set_anon_rmap(page, vma, address, 1);
2568}
2569#endif /* CONFIG_HUGETLB_PAGE */