Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * mm/rmap.c - physical to virtual reverse mappings
   3 *
   4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
   5 * Released under the General Public License (GPL).
   6 *
   7 * Simple, low overhead reverse mapping scheme.
   8 * Please try to keep this thing as modular as possible.
   9 *
  10 * Provides methods for unmapping each kind of mapped page:
  11 * the anon methods track anonymous pages, and
  12 * the file methods track pages belonging to an inode.
  13 *
  14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17 * Contributions by Hugh Dickins 2003, 2004
  18 */
  19
  20/*
  21 * Lock ordering in mm:
  22 *
  23 * inode->i_rwsem	(while writing or truncating, not reading or faulting)
  24 *   mm->mmap_lock
  25 *     mapping->invalidate_lock (in filemap_fault)
  26 *       page->flags PG_locked (lock_page)
  27 *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
  28 *           mapping->i_mmap_rwsem
  29 *             anon_vma->rwsem
  30 *               mm->page_table_lock or pte_lock
  31 *                 swap_lock (in swap_duplicate, swap_info_get)
  32 *                   mmlist_lock (in mmput, drain_mmlist and others)
  33 *                   mapping->private_lock (in block_dirty_folio)
  34 *                     folio_lock_memcg move_lock (in block_dirty_folio)
  35 *                       i_pages lock (widely used)
  36 *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
  37 *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  38 *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
  39 *                     sb_lock (within inode_lock in fs/fs-writeback.c)
  40 *                     i_pages lock (widely used, in set_page_dirty,
  41 *                               in arch-dependent flush_dcache_mmap_lock,
  42 *                               within bdi.wb->list_lock in __sync_single_inode)
  43 *
  44 * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
  45 *   ->tasklist_lock
  46 *     pte map lock
  47 *
  48 * hugetlbfs PageHuge() take locks in this order:
  49 *   hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
  50 *     vma_lock (hugetlb specific lock for pmd_sharing)
  51 *       mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
  52 *         page->flags PG_locked (lock_page)
  53 */
  54
  55#include <linux/mm.h>
  56#include <linux/sched/mm.h>
  57#include <linux/sched/task.h>
  58#include <linux/pagemap.h>
  59#include <linux/swap.h>
  60#include <linux/swapops.h>
  61#include <linux/slab.h>
  62#include <linux/init.h>
  63#include <linux/ksm.h>
  64#include <linux/rmap.h>
  65#include <linux/rcupdate.h>
  66#include <linux/export.h>
  67#include <linux/memcontrol.h>
  68#include <linux/mmu_notifier.h>
  69#include <linux/migrate.h>
  70#include <linux/hugetlb.h>
  71#include <linux/huge_mm.h>
  72#include <linux/backing-dev.h>
  73#include <linux/page_idle.h>
  74#include <linux/memremap.h>
  75#include <linux/userfaultfd_k.h>
  76#include <linux/mm_inline.h>
  77
  78#include <asm/tlbflush.h>
  79
  80#define CREATE_TRACE_POINTS
  81#include <trace/events/tlb.h>
  82#include <trace/events/migrate.h>
  83
  84#include "internal.h"
  85
  86static struct kmem_cache *anon_vma_cachep;
  87static struct kmem_cache *anon_vma_chain_cachep;
  88
  89static inline struct anon_vma *anon_vma_alloc(void)
  90{
  91	struct anon_vma *anon_vma;
  92
  93	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  94	if (anon_vma) {
  95		atomic_set(&anon_vma->refcount, 1);
  96		anon_vma->num_children = 0;
  97		anon_vma->num_active_vmas = 0;
  98		anon_vma->parent = anon_vma;
  99		/*
 100		 * Initialise the anon_vma root to point to itself. If called
 101		 * from fork, the root will be reset to the parents anon_vma.
 102		 */
 103		anon_vma->root = anon_vma;
 104	}
 105
 106	return anon_vma;
 107}
 108
 109static inline void anon_vma_free(struct anon_vma *anon_vma)
 110{
 111	VM_BUG_ON(atomic_read(&anon_vma->refcount));
 112
 113	/*
 114	 * Synchronize against folio_lock_anon_vma_read() such that
 115	 * we can safely hold the lock without the anon_vma getting
 116	 * freed.
 117	 *
 118	 * Relies on the full mb implied by the atomic_dec_and_test() from
 119	 * put_anon_vma() against the acquire barrier implied by
 120	 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
 121	 *
 122	 * folio_lock_anon_vma_read()	VS	put_anon_vma()
 123	 *   down_read_trylock()		  atomic_dec_and_test()
 124	 *   LOCK				  MB
 125	 *   atomic_read()			  rwsem_is_locked()
 126	 *
 127	 * LOCK should suffice since the actual taking of the lock must
 128	 * happen _before_ what follows.
 129	 */
 130	might_sleep();
 131	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
 132		anon_vma_lock_write(anon_vma);
 133		anon_vma_unlock_write(anon_vma);
 134	}
 135
 136	kmem_cache_free(anon_vma_cachep, anon_vma);
 137}
 138
 139static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 140{
 141	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 142}
 143
 144static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
 145{
 146	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
 147}
 148
 149static void anon_vma_chain_link(struct vm_area_struct *vma,
 150				struct anon_vma_chain *avc,
 151				struct anon_vma *anon_vma)
 152{
 153	avc->vma = vma;
 154	avc->anon_vma = anon_vma;
 155	list_add(&avc->same_vma, &vma->anon_vma_chain);
 156	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
 157}
 158
 159/**
 160 * __anon_vma_prepare - attach an anon_vma to a memory region
 161 * @vma: the memory region in question
 162 *
 163 * This makes sure the memory mapping described by 'vma' has
 164 * an 'anon_vma' attached to it, so that we can associate the
 165 * anonymous pages mapped into it with that anon_vma.
 166 *
 167 * The common case will be that we already have one, which
 168 * is handled inline by anon_vma_prepare(). But if
 169 * not we either need to find an adjacent mapping that we
 170 * can re-use the anon_vma from (very common when the only
 171 * reason for splitting a vma has been mprotect()), or we
 172 * allocate a new one.
 173 *
 174 * Anon-vma allocations are very subtle, because we may have
 175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
 176 * and that may actually touch the rwsem even in the newly
 177 * allocated vma (it depends on RCU to make sure that the
 178 * anon_vma isn't actually destroyed).
 179 *
 180 * As a result, we need to do proper anon_vma locking even
 181 * for the new allocation. At the same time, we do not want
 182 * to do any locking for the common case of already having
 183 * an anon_vma.
 184 *
 185 * This must be called with the mmap_lock held for reading.
 186 */
 187int __anon_vma_prepare(struct vm_area_struct *vma)
 188{
 189	struct mm_struct *mm = vma->vm_mm;
 190	struct anon_vma *anon_vma, *allocated;
 191	struct anon_vma_chain *avc;
 192
 193	might_sleep();
 194
 195	avc = anon_vma_chain_alloc(GFP_KERNEL);
 196	if (!avc)
 197		goto out_enomem;
 198
 199	anon_vma = find_mergeable_anon_vma(vma);
 200	allocated = NULL;
 201	if (!anon_vma) {
 202		anon_vma = anon_vma_alloc();
 203		if (unlikely(!anon_vma))
 204			goto out_enomem_free_avc;
 205		anon_vma->num_children++; /* self-parent link for new root */
 206		allocated = anon_vma;
 207	}
 208
 209	anon_vma_lock_write(anon_vma);
 210	/* page_table_lock to protect against threads */
 211	spin_lock(&mm->page_table_lock);
 212	if (likely(!vma->anon_vma)) {
 213		vma->anon_vma = anon_vma;
 214		anon_vma_chain_link(vma, avc, anon_vma);
 215		anon_vma->num_active_vmas++;
 
 216		allocated = NULL;
 217		avc = NULL;
 218	}
 219	spin_unlock(&mm->page_table_lock);
 220	anon_vma_unlock_write(anon_vma);
 221
 222	if (unlikely(allocated))
 223		put_anon_vma(allocated);
 224	if (unlikely(avc))
 225		anon_vma_chain_free(avc);
 226
 227	return 0;
 228
 229 out_enomem_free_avc:
 230	anon_vma_chain_free(avc);
 231 out_enomem:
 232	return -ENOMEM;
 233}
 234
 235/*
 236 * This is a useful helper function for locking the anon_vma root as
 237 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
 238 * have the same vma.
 239 *
 240 * Such anon_vma's should have the same root, so you'd expect to see
 241 * just a single mutex_lock for the whole traversal.
 242 */
 243static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
 244{
 245	struct anon_vma *new_root = anon_vma->root;
 246	if (new_root != root) {
 247		if (WARN_ON_ONCE(root))
 248			up_write(&root->rwsem);
 249		root = new_root;
 250		down_write(&root->rwsem);
 251	}
 252	return root;
 253}
 254
 255static inline void unlock_anon_vma_root(struct anon_vma *root)
 256{
 257	if (root)
 258		up_write(&root->rwsem);
 259}
 260
 261/*
 262 * Attach the anon_vmas from src to dst.
 263 * Returns 0 on success, -ENOMEM on failure.
 264 *
 265 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and
 266 * anon_vma_fork(). The first three want an exact copy of src, while the last
 267 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
 268 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
 269 * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
 270 *
 271 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
 272 * and reuse existing anon_vma which has no vmas and only one child anon_vma.
 273 * This prevents degradation of anon_vma hierarchy to endless linear chain in
 274 * case of constantly forking task. On the other hand, an anon_vma with more
 275 * than one child isn't reused even if there was no alive vma, thus rmap
 276 * walker has a good chance of avoiding scanning the whole hierarchy when it
 277 * searches where page is mapped.
 278 */
 279int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 280{
 281	struct anon_vma_chain *avc, *pavc;
 282	struct anon_vma *root = NULL;
 283
 284	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
 285		struct anon_vma *anon_vma;
 286
 287		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
 288		if (unlikely(!avc)) {
 289			unlock_anon_vma_root(root);
 290			root = NULL;
 291			avc = anon_vma_chain_alloc(GFP_KERNEL);
 292			if (!avc)
 293				goto enomem_failure;
 294		}
 295		anon_vma = pavc->anon_vma;
 296		root = lock_anon_vma_root(root, anon_vma);
 297		anon_vma_chain_link(dst, avc, anon_vma);
 298
 299		/*
 300		 * Reuse existing anon_vma if it has no vma and only one
 301		 * anon_vma child.
 302		 *
 303		 * Root anon_vma is never reused:
 
 304		 * it has self-parent reference and at least one child.
 305		 */
 306		if (!dst->anon_vma && src->anon_vma &&
 307		    anon_vma->num_children < 2 &&
 308		    anon_vma->num_active_vmas == 0)
 309			dst->anon_vma = anon_vma;
 310	}
 311	if (dst->anon_vma)
 312		dst->anon_vma->num_active_vmas++;
 313	unlock_anon_vma_root(root);
 314	return 0;
 315
 316 enomem_failure:
 317	/*
 318	 * dst->anon_vma is dropped here otherwise its num_active_vmas can
 319	 * be incorrectly decremented in unlink_anon_vmas().
 320	 * We can safely do this because callers of anon_vma_clone() don't care
 321	 * about dst->anon_vma if anon_vma_clone() failed.
 322	 */
 323	dst->anon_vma = NULL;
 324	unlink_anon_vmas(dst);
 325	return -ENOMEM;
 326}
 327
 328/*
 329 * Attach vma to its own anon_vma, as well as to the anon_vmas that
 330 * the corresponding VMA in the parent process is attached to.
 331 * Returns 0 on success, non-zero on failure.
 332 */
 333int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 334{
 335	struct anon_vma_chain *avc;
 336	struct anon_vma *anon_vma;
 337	int error;
 338
 339	/* Don't bother if the parent process has no anon_vma here. */
 340	if (!pvma->anon_vma)
 341		return 0;
 342
 343	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
 344	vma->anon_vma = NULL;
 345
 346	/*
 347	 * First, attach the new VMA to the parent VMA's anon_vmas,
 348	 * so rmap can find non-COWed pages in child processes.
 349	 */
 350	error = anon_vma_clone(vma, pvma);
 351	if (error)
 352		return error;
 353
 354	/* An existing anon_vma has been reused, all done then. */
 355	if (vma->anon_vma)
 356		return 0;
 357
 358	/* Then add our own anon_vma. */
 359	anon_vma = anon_vma_alloc();
 360	if (!anon_vma)
 361		goto out_error;
 362	anon_vma->num_active_vmas++;
 363	avc = anon_vma_chain_alloc(GFP_KERNEL);
 364	if (!avc)
 365		goto out_error_free_anon_vma;
 366
 367	/*
 368	 * The root anon_vma's rwsem is the lock actually used when we
 369	 * lock any of the anon_vmas in this anon_vma tree.
 370	 */
 371	anon_vma->root = pvma->anon_vma->root;
 372	anon_vma->parent = pvma->anon_vma;
 373	/*
 374	 * With refcounts, an anon_vma can stay around longer than the
 375	 * process it belongs to. The root anon_vma needs to be pinned until
 376	 * this anon_vma is freed, because the lock lives in the root.
 377	 */
 378	get_anon_vma(anon_vma->root);
 379	/* Mark this anon_vma as the one where our new (COWed) pages go. */
 380	vma->anon_vma = anon_vma;
 381	anon_vma_lock_write(anon_vma);
 382	anon_vma_chain_link(vma, avc, anon_vma);
 383	anon_vma->parent->num_children++;
 384	anon_vma_unlock_write(anon_vma);
 385
 386	return 0;
 387
 388 out_error_free_anon_vma:
 389	put_anon_vma(anon_vma);
 390 out_error:
 391	unlink_anon_vmas(vma);
 392	return -ENOMEM;
 393}
 394
 395void unlink_anon_vmas(struct vm_area_struct *vma)
 396{
 397	struct anon_vma_chain *avc, *next;
 398	struct anon_vma *root = NULL;
 399
 400	/*
 401	 * Unlink each anon_vma chained to the VMA.  This list is ordered
 402	 * from newest to oldest, ensuring the root anon_vma gets freed last.
 403	 */
 404	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 405		struct anon_vma *anon_vma = avc->anon_vma;
 406
 407		root = lock_anon_vma_root(root, anon_vma);
 408		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
 409
 410		/*
 411		 * Leave empty anon_vmas on the list - we'll need
 412		 * to free them outside the lock.
 413		 */
 414		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
 415			anon_vma->parent->num_children--;
 416			continue;
 417		}
 418
 419		list_del(&avc->same_vma);
 420		anon_vma_chain_free(avc);
 421	}
 422	if (vma->anon_vma) {
 423		vma->anon_vma->num_active_vmas--;
 424
 425		/*
 426		 * vma would still be needed after unlink, and anon_vma will be prepared
 427		 * when handle fault.
 428		 */
 429		vma->anon_vma = NULL;
 430	}
 431	unlock_anon_vma_root(root);
 432
 433	/*
 434	 * Iterate the list once more, it now only contains empty and unlinked
 435	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
 436	 * needing to write-acquire the anon_vma->root->rwsem.
 437	 */
 438	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 439		struct anon_vma *anon_vma = avc->anon_vma;
 440
 441		VM_WARN_ON(anon_vma->num_children);
 442		VM_WARN_ON(anon_vma->num_active_vmas);
 443		put_anon_vma(anon_vma);
 444
 445		list_del(&avc->same_vma);
 446		anon_vma_chain_free(avc);
 447	}
 448}
 449
 450static void anon_vma_ctor(void *data)
 451{
 452	struct anon_vma *anon_vma = data;
 453
 454	init_rwsem(&anon_vma->rwsem);
 455	atomic_set(&anon_vma->refcount, 0);
 456	anon_vma->rb_root = RB_ROOT_CACHED;
 457}
 458
 459void __init anon_vma_init(void)
 460{
 461	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 462			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
 463			anon_vma_ctor);
 464	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
 465			SLAB_PANIC|SLAB_ACCOUNT);
 466}
 467
 468/*
 469 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
 470 *
 471 * Since there is no serialization what so ever against page_remove_rmap()
 472 * the best this function can do is return a refcount increased anon_vma
 473 * that might have been relevant to this page.
 474 *
 475 * The page might have been remapped to a different anon_vma or the anon_vma
 476 * returned may already be freed (and even reused).
 477 *
 478 * In case it was remapped to a different anon_vma, the new anon_vma will be a
 479 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
 480 * ensure that any anon_vma obtained from the page will still be valid for as
 481 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
 482 *
 483 * All users of this function must be very careful when walking the anon_vma
 484 * chain and verify that the page in question is indeed mapped in it
 485 * [ something equivalent to page_mapped_in_vma() ].
 486 *
 487 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
 488 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
 489 * if there is a mapcount, we can dereference the anon_vma after observing
 490 * those.
 491 */
 492struct anon_vma *folio_get_anon_vma(struct folio *folio)
 493{
 494	struct anon_vma *anon_vma = NULL;
 495	unsigned long anon_mapping;
 496
 497	rcu_read_lock();
 498	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
 499	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 500		goto out;
 501	if (!folio_mapped(folio))
 502		goto out;
 503
 504	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 505	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 506		anon_vma = NULL;
 507		goto out;
 508	}
 509
 510	/*
 511	 * If this folio is still mapped, then its anon_vma cannot have been
 512	 * freed.  But if it has been unmapped, we have no security against the
 513	 * anon_vma structure being freed and reused (for another anon_vma:
 514	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
 515	 * above cannot corrupt).
 516	 */
 517	if (!folio_mapped(folio)) {
 518		rcu_read_unlock();
 519		put_anon_vma(anon_vma);
 520		return NULL;
 521	}
 522out:
 523	rcu_read_unlock();
 524
 525	return anon_vma;
 526}
 527
 528/*
 529 * Similar to folio_get_anon_vma() except it locks the anon_vma.
 530 *
 531 * Its a little more complex as it tries to keep the fast path to a single
 532 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
 533 * reference like with folio_get_anon_vma() and then block on the mutex
 534 * on !rwc->try_lock case.
 535 */
 536struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
 537					  struct rmap_walk_control *rwc)
 538{
 539	struct anon_vma *anon_vma = NULL;
 540	struct anon_vma *root_anon_vma;
 541	unsigned long anon_mapping;
 542
 543	rcu_read_lock();
 544	anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
 545	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 546		goto out;
 547	if (!folio_mapped(folio))
 548		goto out;
 549
 550	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 551	root_anon_vma = READ_ONCE(anon_vma->root);
 552	if (down_read_trylock(&root_anon_vma->rwsem)) {
 553		/*
 554		 * If the folio is still mapped, then this anon_vma is still
 555		 * its anon_vma, and holding the mutex ensures that it will
 556		 * not go away, see anon_vma_free().
 557		 */
 558		if (!folio_mapped(folio)) {
 559			up_read(&root_anon_vma->rwsem);
 560			anon_vma = NULL;
 561		}
 562		goto out;
 563	}
 564
 565	if (rwc && rwc->try_lock) {
 566		anon_vma = NULL;
 567		rwc->contended = true;
 568		goto out;
 569	}
 570
 571	/* trylock failed, we got to sleep */
 572	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 573		anon_vma = NULL;
 574		goto out;
 575	}
 576
 577	if (!folio_mapped(folio)) {
 578		rcu_read_unlock();
 579		put_anon_vma(anon_vma);
 580		return NULL;
 581	}
 582
 583	/* we pinned the anon_vma, its safe to sleep */
 584	rcu_read_unlock();
 585	anon_vma_lock_read(anon_vma);
 586
 587	if (atomic_dec_and_test(&anon_vma->refcount)) {
 588		/*
 589		 * Oops, we held the last refcount, release the lock
 590		 * and bail -- can't simply use put_anon_vma() because
 591		 * we'll deadlock on the anon_vma_lock_write() recursion.
 592		 */
 593		anon_vma_unlock_read(anon_vma);
 594		__put_anon_vma(anon_vma);
 595		anon_vma = NULL;
 596	}
 597
 598	return anon_vma;
 599
 600out:
 601	rcu_read_unlock();
 602	return anon_vma;
 603}
 604
 
 
 
 
 
 605#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 606/*
 607 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
 608 * important if a PTE was dirty when it was unmapped that it's flushed
 609 * before any IO is initiated on the page to prevent lost writes. Similarly,
 610 * it must be flushed before freeing to prevent data leakage.
 611 */
 612void try_to_unmap_flush(void)
 613{
 614	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 615
 616	if (!tlb_ubc->flush_required)
 617		return;
 618
 619	arch_tlbbatch_flush(&tlb_ubc->arch);
 620	tlb_ubc->flush_required = false;
 621	tlb_ubc->writable = false;
 622}
 623
 624/* Flush iff there are potentially writable TLB entries that can race with IO */
 625void try_to_unmap_flush_dirty(void)
 626{
 627	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 628
 629	if (tlb_ubc->writable)
 630		try_to_unmap_flush();
 631}
 632
 633/*
 634 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
 635 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
 636 */
 637#define TLB_FLUSH_BATCH_FLUSHED_SHIFT	16
 638#define TLB_FLUSH_BATCH_PENDING_MASK			\
 639	((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
 640#define TLB_FLUSH_BATCH_PENDING_LARGE			\
 641	(TLB_FLUSH_BATCH_PENDING_MASK / 2)
 642
 643static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 644{
 645	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 646	int batch, nbatch;
 647
 648	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
 649	tlb_ubc->flush_required = true;
 650
 651	/*
 652	 * Ensure compiler does not re-order the setting of tlb_flush_batched
 653	 * before the PTE is cleared.
 654	 */
 655	barrier();
 656	batch = atomic_read(&mm->tlb_flush_batched);
 657retry:
 658	if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) {
 659		/*
 660		 * Prevent `pending' from catching up with `flushed' because of
 661		 * overflow.  Reset `pending' and `flushed' to be 1 and 0 if
 662		 * `pending' becomes large.
 663		 */
 664		nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1);
 665		if (nbatch != batch) {
 666			batch = nbatch;
 667			goto retry;
 668		}
 669	} else {
 670		atomic_inc(&mm->tlb_flush_batched);
 671	}
 672
 673	/*
 674	 * If the PTE was dirty then it's best to assume it's writable. The
 675	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
 676	 * before the page is queued for IO.
 677	 */
 678	if (writable)
 679		tlb_ubc->writable = true;
 680}
 681
 682/*
 683 * Returns true if the TLB flush should be deferred to the end of a batch of
 684 * unmap operations to reduce IPIs.
 685 */
 686static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 687{
 688	bool should_defer = false;
 689
 690	if (!(flags & TTU_BATCH_FLUSH))
 691		return false;
 692
 693	/* If remote CPUs need to be flushed then defer batch the flush */
 694	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
 695		should_defer = true;
 696	put_cpu();
 697
 698	return should_defer;
 699}
 700
 701/*
 702 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
 703 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
 704 * operation such as mprotect or munmap to race between reclaim unmapping
 705 * the page and flushing the page. If this race occurs, it potentially allows
 706 * access to data via a stale TLB entry. Tracking all mm's that have TLB
 707 * batching in flight would be expensive during reclaim so instead track
 708 * whether TLB batching occurred in the past and if so then do a flush here
 709 * if required. This will cost one additional flush per reclaim cycle paid
 710 * by the first operation at risk such as mprotect and mumap.
 711 *
 712 * This must be called under the PTL so that an access to tlb_flush_batched
 713 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
 714 * via the PTL.
 715 */
 716void flush_tlb_batched_pending(struct mm_struct *mm)
 717{
 718	int batch = atomic_read(&mm->tlb_flush_batched);
 719	int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK;
 720	int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT;
 721
 722	if (pending != flushed) {
 723		flush_tlb_mm(mm);
 
 724		/*
 725		 * If the new TLB flushing is pending during flushing, leave
 726		 * mm->tlb_flush_batched as is, to avoid losing flushing.
 727		 */
 728		atomic_cmpxchg(&mm->tlb_flush_batched, batch,
 729			       pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT));
 730	}
 731}
 732#else
 733static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 734{
 735}
 736
 737static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 738{
 739	return false;
 740}
 741#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 742
 743/*
 744 * At what user virtual address is page expected in vma?
 745 * Caller should check the page is actually part of the vma.
 746 */
 747unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 748{
 749	struct folio *folio = page_folio(page);
 750	if (folio_test_anon(folio)) {
 751		struct anon_vma *page__anon_vma = folio_anon_vma(folio);
 752		/*
 753		 * Note: swapoff's unuse_vma() is more efficient with this
 754		 * check, and needs it to match anon_vma when KSM is active.
 755		 */
 756		if (!vma->anon_vma || !page__anon_vma ||
 757		    vma->anon_vma->root != page__anon_vma->root)
 758			return -EFAULT;
 759	} else if (!vma->vm_file) {
 760		return -EFAULT;
 761	} else if (vma->vm_file->f_mapping != folio->mapping) {
 762		return -EFAULT;
 763	}
 764
 765	return vma_address(page, vma);
 766}
 767
 768/*
 769 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
 770 * NULL if it doesn't exist.  No guarantees / checks on what the pmd_t*
 771 * represents.
 772 */
 773pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
 774{
 775	pgd_t *pgd;
 776	p4d_t *p4d;
 777	pud_t *pud;
 778	pmd_t *pmd = NULL;
 
 779
 780	pgd = pgd_offset(mm, address);
 781	if (!pgd_present(*pgd))
 782		goto out;
 783
 784	p4d = p4d_offset(pgd, address);
 785	if (!p4d_present(*p4d))
 786		goto out;
 787
 788	pud = pud_offset(p4d, address);
 789	if (!pud_present(*pud))
 790		goto out;
 791
 792	pmd = pmd_offset(pud, address);
 
 
 
 
 
 
 
 
 
 793out:
 794	return pmd;
 795}
 796
 797struct folio_referenced_arg {
 798	int mapcount;
 799	int referenced;
 800	unsigned long vm_flags;
 801	struct mem_cgroup *memcg;
 802};
 803/*
 804 * arg: folio_referenced_arg will be passed
 805 */
 806static bool folio_referenced_one(struct folio *folio,
 807		struct vm_area_struct *vma, unsigned long address, void *arg)
 808{
 809	struct folio_referenced_arg *pra = arg;
 810	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 
 
 
 
 811	int referenced = 0;
 812
 813	while (page_vma_mapped_walk(&pvmw)) {
 814		address = pvmw.address;
 815
 816		if ((vma->vm_flags & VM_LOCKED) &&
 817		    (!folio_test_large(folio) || !pvmw.pte)) {
 818			/* Restore the mlock which got missed */
 819			mlock_vma_folio(folio, vma, !pvmw.pte);
 820			page_vma_mapped_walk_done(&pvmw);
 821			pra->vm_flags |= VM_LOCKED;
 822			return false; /* To break the loop */
 823		}
 824
 825		if (pvmw.pte) {
 826			if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
 827			    !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
 828				lru_gen_look_around(&pvmw);
 829				referenced++;
 830			}
 831
 832			if (ptep_clear_flush_young_notify(vma, address,
 833						pvmw.pte)) {
 834				/*
 835				 * Don't treat a reference through
 836				 * a sequentially read mapping as such.
 837				 * If the folio has been used in another mapping,
 838				 * we will catch it; if this other mapping is
 839				 * already gone, the unmap path will have set
 840				 * the referenced flag or activated the folio.
 841				 */
 842				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
 843					referenced++;
 844			}
 845		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
 846			if (pmdp_clear_flush_young_notify(vma, address,
 847						pvmw.pmd))
 848				referenced++;
 849		} else {
 850			/* unexpected pmd-mapped folio? */
 851			WARN_ON_ONCE(1);
 852		}
 853
 854		pra->mapcount--;
 855	}
 856
 857	if (referenced)
 858		folio_clear_idle(folio);
 859	if (folio_test_clear_young(folio))
 860		referenced++;
 861
 862	if (referenced) {
 863		pra->referenced++;
 864		pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
 865	}
 866
 867	if (!pra->mapcount)
 868		return false; /* To break the loop */
 869
 870	return true;
 871}
 872
 873static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
 874{
 875	struct folio_referenced_arg *pra = arg;
 876	struct mem_cgroup *memcg = pra->memcg;
 877
 878	if (!mm_match_cgroup(vma->vm_mm, memcg))
 879		return true;
 880
 881	return false;
 882}
 883
 884/**
 885 * folio_referenced() - Test if the folio was referenced.
 886 * @folio: The folio to test.
 887 * @is_locked: Caller holds lock on the folio.
 888 * @memcg: target memory cgroup
 889 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
 890 *
 891 * Quick test_and_clear_referenced for all mappings of a folio,
 892 *
 893 * Return: The number of mappings which referenced the folio. Return -1 if
 894 * the function bailed out due to rmap lock contention.
 895 */
 896int folio_referenced(struct folio *folio, int is_locked,
 897		     struct mem_cgroup *memcg, unsigned long *vm_flags)
 
 
 898{
 899	int we_locked = 0;
 900	struct folio_referenced_arg pra = {
 901		.mapcount = folio_mapcount(folio),
 902		.memcg = memcg,
 903	};
 904	struct rmap_walk_control rwc = {
 905		.rmap_one = folio_referenced_one,
 906		.arg = (void *)&pra,
 907		.anon_lock = folio_lock_anon_vma_read,
 908		.try_lock = true,
 909	};
 910
 911	*vm_flags = 0;
 912	if (!pra.mapcount)
 913		return 0;
 914
 915	if (!folio_raw_mapping(folio))
 916		return 0;
 917
 918	if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
 919		we_locked = folio_trylock(folio);
 920		if (!we_locked)
 921			return 1;
 922	}
 923
 924	/*
 925	 * If we are reclaiming on behalf of a cgroup, skip
 926	 * counting on behalf of references from different
 927	 * cgroups
 928	 */
 929	if (memcg) {
 930		rwc.invalid_vma = invalid_folio_referenced_vma;
 931	}
 932
 933	rmap_walk(folio, &rwc);
 934	*vm_flags = pra.vm_flags;
 935
 936	if (we_locked)
 937		folio_unlock(folio);
 938
 939	return rwc.contended ? -1 : pra.referenced;
 940}
 941
 942static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
 
 943{
 944	int cleaned = 0;
 945	struct vm_area_struct *vma = pvmw->vma;
 
 
 
 
 946	struct mmu_notifier_range range;
 947	unsigned long address = pvmw->address;
 948
 949	/*
 950	 * We have to assume the worse case ie pmd for invalidation. Note that
 951	 * the folio can not be freed from this function.
 952	 */
 953	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 954				0, vma, vma->vm_mm, address,
 955				vma_address_end(pvmw));
 956	mmu_notifier_invalidate_range_start(&range);
 957
 958	while (page_vma_mapped_walk(pvmw)) {
 959		int ret = 0;
 960
 961		address = pvmw->address;
 962		if (pvmw->pte) {
 963			pte_t entry;
 964			pte_t *pte = pvmw->pte;
 965
 966			if (!pte_dirty(*pte) && !pte_write(*pte))
 967				continue;
 968
 969			flush_cache_page(vma, address, pte_pfn(*pte));
 970			entry = ptep_clear_flush(vma, address, pte);
 971			entry = pte_wrprotect(entry);
 972			entry = pte_mkclean(entry);
 973			set_pte_at(vma->vm_mm, address, pte, entry);
 974			ret = 1;
 975		} else {
 976#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 977			pmd_t *pmd = pvmw->pmd;
 978			pmd_t entry;
 979
 980			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
 981				continue;
 982
 983			flush_cache_range(vma, address,
 984					  address + HPAGE_PMD_SIZE);
 985			entry = pmdp_invalidate(vma, address, pmd);
 986			entry = pmd_wrprotect(entry);
 987			entry = pmd_mkclean(entry);
 988			set_pmd_at(vma->vm_mm, address, pmd, entry);
 989			ret = 1;
 990#else
 991			/* unexpected pmd-mapped folio? */
 992			WARN_ON_ONCE(1);
 993#endif
 994		}
 995
 996		/*
 997		 * No need to call mmu_notifier_invalidate_range() as we are
 998		 * downgrading page table protection not changing it to point
 999		 * to a new page.
1000		 *
1001		 * See Documentation/mm/mmu_notifier.rst
1002		 */
1003		if (ret)
1004			cleaned++;
1005	}
1006
1007	mmu_notifier_invalidate_range_end(&range);
1008
1009	return cleaned;
1010}
1011
1012static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1013			     unsigned long address, void *arg)
1014{
1015	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1016	int *cleaned = arg;
1017
1018	*cleaned += page_vma_mkclean_one(&pvmw);
1019
1020	return true;
1021}
1022
1023static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1024{
1025	if (vma->vm_flags & VM_SHARED)
1026		return false;
1027
1028	return true;
1029}
1030
1031int folio_mkclean(struct folio *folio)
1032{
1033	int cleaned = 0;
1034	struct address_space *mapping;
1035	struct rmap_walk_control rwc = {
1036		.arg = (void *)&cleaned,
1037		.rmap_one = page_mkclean_one,
1038		.invalid_vma = invalid_mkclean_vma,
1039	};
1040
1041	BUG_ON(!folio_test_locked(folio));
1042
1043	if (!folio_mapped(folio))
1044		return 0;
1045
1046	mapping = folio_mapping(folio);
1047	if (!mapping)
1048		return 0;
1049
1050	rmap_walk(folio, &rwc);
1051
1052	return cleaned;
1053}
1054EXPORT_SYMBOL_GPL(folio_mkclean);
1055
1056/**
1057 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1058 *                     [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
1059 *                     within the @vma of shared mappings. And since clean PTEs
1060 *                     should also be readonly, write protects them too.
1061 * @pfn: start pfn.
1062 * @nr_pages: number of physically contiguous pages srarting with @pfn.
1063 * @pgoff: page offset that the @pfn mapped with.
1064 * @vma: vma that @pfn mapped within.
1065 *
1066 * Returns the number of cleaned PTEs (including PMDs).
1067 */
1068int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
1069		      struct vm_area_struct *vma)
1070{
1071	struct page_vma_mapped_walk pvmw = {
1072		.pfn		= pfn,
1073		.nr_pages	= nr_pages,
1074		.pgoff		= pgoff,
1075		.vma		= vma,
1076		.flags		= PVMW_SYNC,
1077	};
1078
1079	if (invalid_mkclean_vma(vma, NULL))
1080		return 0;
1081
1082	pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
1083	VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1084
1085	return page_vma_mkclean_one(&pvmw);
1086}
1087
1088int total_compound_mapcount(struct page *head)
1089{
1090	int mapcount = head_compound_mapcount(head);
1091	int nr_subpages;
1092	int i;
1093
1094	/* In the common case, avoid the loop when no subpages mapped by PTE */
1095	if (head_subpages_mapcount(head) == 0)
1096		return mapcount;
1097	/*
1098	 * Add all the PTE mappings of those subpages mapped by PTE.
1099	 * Limit the loop, knowing that only subpages_mapcount are mapped?
1100	 * Perhaps: given all the raciness, that may be a good or a bad idea.
1101	 */
1102	nr_subpages = thp_nr_pages(head);
1103	for (i = 0; i < nr_subpages; i++)
1104		mapcount += atomic_read(&head[i]._mapcount);
1105
1106	/* But each of those _mapcounts was based on -1 */
1107	mapcount += nr_subpages;
1108	return mapcount;
1109}
1110
1111/**
1112 * page_move_anon_rmap - move a page to our anon_vma
1113 * @page:	the page to move to our anon_vma
1114 * @vma:	the vma the page belongs to
1115 *
1116 * When a page belongs exclusively to one process after a COW event,
1117 * that page can be moved into the anon_vma that belongs to just that
1118 * process, so the rmap code will not search the parent or sibling
1119 * processes.
1120 */
1121void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1122{
1123	void *anon_vma = vma->anon_vma;
1124	struct folio *folio = page_folio(page);
 
1125
1126	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1127	VM_BUG_ON_VMA(!anon_vma, vma);
1128
1129	anon_vma += PAGE_MAPPING_ANON;
1130	/*
1131	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1132	 * simultaneously, so a concurrent reader (eg folio_referenced()'s
1133	 * folio_test_anon()) will not see one without the other.
1134	 */
1135	WRITE_ONCE(folio->mapping, anon_vma);
1136	SetPageAnonExclusive(page);
1137}
1138
1139/**
1140 * __page_set_anon_rmap - set up new anonymous rmap
1141 * @page:	Page or Hugepage to add to rmap
1142 * @vma:	VM area to add page to.
1143 * @address:	User virtual address of the mapping	
1144 * @exclusive:	the page is exclusively owned by the current process
1145 */
1146static void __page_set_anon_rmap(struct page *page,
1147	struct vm_area_struct *vma, unsigned long address, int exclusive)
1148{
1149	struct anon_vma *anon_vma = vma->anon_vma;
1150
1151	BUG_ON(!anon_vma);
1152
1153	if (PageAnon(page))
1154		goto out;
1155
1156	/*
1157	 * If the page isn't exclusively mapped into this vma,
1158	 * we must use the _oldest_ possible anon_vma for the
1159	 * page mapping!
1160	 */
1161	if (!exclusive)
1162		anon_vma = anon_vma->root;
1163
1164	/*
1165	 * page_idle does a lockless/optimistic rmap scan on page->mapping.
1166	 * Make sure the compiler doesn't split the stores of anon_vma and
1167	 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1168	 * could mistake the mapping for a struct address_space and crash.
1169	 */
1170	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1171	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1172	page->index = linear_page_index(vma, address);
1173out:
1174	if (exclusive)
1175		SetPageAnonExclusive(page);
1176}
1177
1178/**
1179 * __page_check_anon_rmap - sanity check anonymous rmap addition
1180 * @page:	the page to add the mapping to
1181 * @vma:	the vm area in which the mapping is added
1182 * @address:	the user virtual address mapped
1183 */
1184static void __page_check_anon_rmap(struct page *page,
1185	struct vm_area_struct *vma, unsigned long address)
1186{
1187	struct folio *folio = page_folio(page);
1188	/*
1189	 * The page's anon-rmap details (mapping and index) are guaranteed to
1190	 * be set up correctly at this point.
1191	 *
1192	 * We have exclusion against page_add_anon_rmap because the caller
1193	 * always holds the page locked.
1194	 *
1195	 * We have exclusion against page_add_new_anon_rmap because those pages
1196	 * are initially only visible via the pagetables, and the pte is locked
1197	 * over the call to page_add_new_anon_rmap.
1198	 */
1199	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1200			folio);
1201	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1202		       page);
1203}
1204
1205/**
1206 * page_add_anon_rmap - add pte mapping to an anonymous page
1207 * @page:	the page to add the mapping to
1208 * @vma:	the vm area in which the mapping is added
1209 * @address:	the user virtual address mapped
1210 * @flags:	the rmap flags
1211 *
1212 * The caller needs to hold the pte lock, and the page must be locked in
1213 * the anon_vma case: to serialize mapping,index checking after setting,
1214 * and to ensure that PageAnon is not being upgraded racily to PageKsm
1215 * (but PageKsm is never downgraded to PageAnon).
1216 */
1217void page_add_anon_rmap(struct page *page,
1218	struct vm_area_struct *vma, unsigned long address, rmap_t flags)
 
 
 
 
 
 
 
 
 
 
 
1219{
1220	atomic_t *mapped;
1221	int nr = 0, nr_pmdmapped = 0;
1222	bool compound = flags & RMAP_COMPOUND;
1223	bool first = true;
1224
1225	if (unlikely(PageKsm(page)))
1226		lock_page_memcg(page);
 
 
1227
1228	/* Is page being mapped by PTE? Is this its first map to be added? */
1229	if (likely(!compound)) {
 
 
 
 
 
1230		first = atomic_inc_and_test(&page->_mapcount);
1231		nr = first;
1232		if (first && PageCompound(page)) {
1233			mapped = subpages_mapcount_ptr(compound_head(page));
1234			nr = atomic_inc_return_relaxed(mapped);
1235			nr = (nr < COMPOUND_MAPPED);
1236		}
1237	} else if (PageTransHuge(page)) {
1238		/* That test is redundant: it's for safety or to optimize out */
1239
1240		first = atomic_inc_and_test(compound_mapcount_ptr(page));
1241		if (first) {
1242			mapped = subpages_mapcount_ptr(page);
1243			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
1244			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1245				nr_pmdmapped = thp_nr_pages(page);
1246				nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1247				/* Raced ahead of a remove and another add? */
1248				if (unlikely(nr < 0))
1249					nr = 0;
1250			} else {
1251				/* Raced ahead of a remove of COMPOUND_MAPPED */
1252				nr = 0;
1253			}
1254		}
1255	}
1256
1257	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
1258	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
1259
1260	if (nr_pmdmapped)
1261		__mod_lruvec_page_state(page, NR_ANON_THPS, nr_pmdmapped);
1262	if (nr)
 
 
 
 
1263		__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
 
1264
1265	if (unlikely(PageKsm(page)))
1266		unlock_page_memcg(page);
 
 
1267
1268	/* address might be in next vma when migration races vma_adjust */
1269	else if (first)
1270		__page_set_anon_rmap(page, vma, address,
1271				     !!(flags & RMAP_EXCLUSIVE));
1272	else
1273		__page_check_anon_rmap(page, vma, address);
1274
1275	mlock_vma_page(page, vma, compound);
1276}
1277
1278/**
1279 * page_add_new_anon_rmap - add mapping to a new anonymous page
1280 * @page:	the page to add the mapping to
1281 * @vma:	the vm area in which the mapping is added
1282 * @address:	the user virtual address mapped
1283 *
1284 * If it's a compound page, it is accounted as a compound page. As the page
1285 * is new, it's assume to get mapped exclusively by a single process.
1286 *
1287 * Same as page_add_anon_rmap but must only be called on *new* pages.
1288 * This means the inc-and-test can be bypassed.
1289 * Page does not have to be locked.
1290 */
1291void page_add_new_anon_rmap(struct page *page,
1292	struct vm_area_struct *vma, unsigned long address)
1293{
1294	int nr;
1295
1296	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1297	__SetPageSwapBacked(page);
1298
1299	if (likely(!PageCompound(page))) {
1300		/* increment count (starts at -1) */
1301		atomic_set(&page->_mapcount, 0);
1302		nr = 1;
1303	} else {
1304		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1305		/* increment count (starts at -1) */
1306		atomic_set(compound_mapcount_ptr(page), 0);
1307		atomic_set(subpages_mapcount_ptr(page), COMPOUND_MAPPED);
1308		nr = thp_nr_pages(page);
 
1309		__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
 
 
 
 
 
1310	}
1311
1312	__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1313	__page_set_anon_rmap(page, vma, address, 1);
1314}
1315
1316/**
1317 * page_add_file_rmap - add pte mapping to a file page
1318 * @page:	the page to add the mapping to
1319 * @vma:	the vm area in which the mapping is added
1320 * @compound:	charge the page as compound or small page
1321 *
1322 * The caller needs to hold the pte lock.
1323 */
1324void page_add_file_rmap(struct page *page,
1325	struct vm_area_struct *vma, bool compound)
1326{
1327	atomic_t *mapped;
1328	int nr = 0, nr_pmdmapped = 0;
1329	bool first;
1330
1331	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1332	lock_page_memcg(page);
 
 
1333
1334	/* Is page being mapped by PTE? Is this its first map to be added? */
1335	if (likely(!compound)) {
1336		first = atomic_inc_and_test(&page->_mapcount);
1337		nr = first;
1338		if (first && PageCompound(page)) {
1339			mapped = subpages_mapcount_ptr(compound_head(page));
1340			nr = atomic_inc_return_relaxed(mapped);
1341			nr = (nr < COMPOUND_MAPPED);
1342		}
1343	} else if (PageTransHuge(page)) {
1344		/* That test is redundant: it's for safety or to optimize out */
1345
1346		first = atomic_inc_and_test(compound_mapcount_ptr(page));
1347		if (first) {
1348			mapped = subpages_mapcount_ptr(page);
1349			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
1350			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
1351				nr_pmdmapped = thp_nr_pages(page);
1352				nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1353				/* Raced ahead of a remove and another add? */
1354				if (unlikely(nr < 0))
1355					nr = 0;
1356			} else {
1357				/* Raced ahead of a remove of COMPOUND_MAPPED */
1358				nr = 0;
1359			}
1360		}
 
 
1361	}
1362
1363	if (nr_pmdmapped)
1364		__mod_lruvec_page_state(page, PageSwapBacked(page) ?
1365			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
1366	if (nr)
1367		__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1368	unlock_page_memcg(page);
1369
1370	mlock_vma_page(page, vma, compound);
1371}
1372
1373/**
1374 * page_remove_rmap - take down pte mapping from a page
1375 * @page:	page to remove mapping from
1376 * @vma:	the vm area from which the mapping is removed
1377 * @compound:	uncharge the page as compound or small page
1378 *
1379 * The caller needs to hold the pte lock.
1380 */
1381void page_remove_rmap(struct page *page,
1382	struct vm_area_struct *vma, bool compound)
1383{
1384	atomic_t *mapped;
1385	int nr = 0, nr_pmdmapped = 0;
1386	bool last;
1387
1388	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1389
1390	/* Hugetlb pages are not counted in NR_*MAPPED */
1391	if (unlikely(PageHuge(page))) {
1392		/* hugetlb pages are always mapped with pmds */
1393		atomic_dec(compound_mapcount_ptr(page));
1394		return;
1395	}
1396
1397	lock_page_memcg(page);
1398
1399	/* Is page being unmapped by PTE? Is this its last map to be removed? */
1400	if (likely(!compound)) {
1401		last = atomic_add_negative(-1, &page->_mapcount);
1402		nr = last;
1403		if (last && PageCompound(page)) {
1404			mapped = subpages_mapcount_ptr(compound_head(page));
1405			nr = atomic_dec_return_relaxed(mapped);
1406			nr = (nr < COMPOUND_MAPPED);
1407		}
1408	} else if (PageTransHuge(page)) {
1409		/* That test is redundant: it's for safety or to optimize out */
1410
1411		last = atomic_add_negative(-1, compound_mapcount_ptr(page));
1412		if (last) {
1413			mapped = subpages_mapcount_ptr(page);
1414			nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
1415			if (likely(nr < COMPOUND_MAPPED)) {
1416				nr_pmdmapped = thp_nr_pages(page);
1417				nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
1418				/* Raced ahead of another remove and an add? */
1419				if (unlikely(nr < 0))
1420					nr = 0;
1421			} else {
1422				/* An add of COMPOUND_MAPPED raced ahead */
1423				nr = 0;
1424			}
1425		}
 
 
 
 
 
 
 
 
 
 
 
1426	}
1427
1428	if (nr_pmdmapped) {
1429		__mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_THPS :
1430				(PageSwapBacked(page) ? NR_SHMEM_PMDMAPPED :
1431				NR_FILE_PMDMAPPED), -nr_pmdmapped);
1432	}
1433	if (nr) {
1434		__mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_MAPPED :
1435				NR_FILE_MAPPED, -nr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1436		/*
1437		 * Queue anon THP for deferred split if at least one small
 
 
 
 
 
 
 
 
 
1438		 * page of the compound page is unmapped, but at least one
1439		 * small page is still mapped.
1440		 */
1441		if (PageTransCompound(page) && PageAnon(page))
1442			if (!compound || nr < nr_pmdmapped)
1443				deferred_split_huge_page(compound_head(page));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444	}
1445
 
 
 
 
 
 
 
 
 
1446	/*
1447	 * It would be tidy to reset PageAnon mapping when fully unmapped,
 
 
 
 
 
 
 
 
 
 
 
 
 
1448	 * but that might overwrite a racing page_add_anon_rmap
1449	 * which increments mapcount after us but sets mapping
1450	 * before us: so leave the reset to free_pages_prepare,
1451	 * and remember that it's only reliable while mapped.
 
 
1452	 */
1453
1454	unlock_page_memcg(page);
1455
1456	munlock_vma_page(page, vma, compound);
1457}
1458
1459/*
1460 * @arg: enum ttu_flags will be passed to this argument
1461 */
1462static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1463		     unsigned long address, void *arg)
1464{
1465	struct mm_struct *mm = vma->vm_mm;
1466	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 
 
 
 
1467	pte_t pteval;
1468	struct page *subpage;
1469	bool anon_exclusive, ret = true;
1470	struct mmu_notifier_range range;
1471	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1472
1473	/*
1474	 * When racing against e.g. zap_pte_range() on another cpu,
1475	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1476	 * try_to_unmap() may return before page_mapped() has become false,
1477	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1478	 */
1479	if (flags & TTU_SYNC)
1480		pvmw.flags = PVMW_SYNC;
1481
1482	if (flags & TTU_SPLIT_HUGE_PMD)
1483		split_huge_pmd_address(vma, address, false, folio);
1484
1485	/*
1486	 * For THP, we have to assume the worse case ie pmd for invalidation.
1487	 * For hugetlb, it could be much worse if we need to do pud
1488	 * invalidation in the case of pmd sharing.
1489	 *
1490	 * Note that the folio can not be freed in this function as call of
1491	 * try_to_unmap() must hold a reference on the folio.
1492	 */
1493	range.end = vma_address_end(&pvmw);
 
1494	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1495				address, range.end);
1496	if (folio_test_hugetlb(folio)) {
1497		/*
1498		 * If sharing is possible, start and end will be adjusted
1499		 * accordingly.
1500		 */
1501		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1502						     &range.end);
1503	}
1504	mmu_notifier_invalidate_range_start(&range);
1505
1506	while (page_vma_mapped_walk(&pvmw)) {
1507		/* Unexpected PMD-mapped THP? */
1508		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1509
1510		/*
1511		 * If the folio is in an mlock()d vma, we must not swap it out.
1512		 */
1513		if (!(flags & TTU_IGNORE_MLOCK) &&
1514		    (vma->vm_flags & VM_LOCKED)) {
1515			/* Restore the mlock which got missed */
1516			mlock_vma_folio(folio, vma, false);
 
 
 
 
 
 
 
1517			page_vma_mapped_walk_done(&pvmw);
1518			ret = false;
1519			break;
1520		}
1521
1522		subpage = folio_page(folio,
1523					pte_pfn(*pvmw.pte) - folio_pfn(folio));
1524		address = pvmw.address;
1525		anon_exclusive = folio_test_anon(folio) &&
1526				 PageAnonExclusive(subpage);
1527
1528		if (folio_test_hugetlb(folio)) {
1529			bool anon = folio_test_anon(folio);
1530
1531			/*
1532			 * The try_to_unmap() is only passed a hugetlb page
1533			 * in the case where the hugetlb page is poisoned.
1534			 */
1535			VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage);
1536			/*
1537			 * huge_pmd_unshare may unmap an entire PMD page.
1538			 * There is no way of knowing exactly which PMDs may
1539			 * be cached for this mm, so we must flush them all.
1540			 * start/end were already adjusted above to cover this
1541			 * range.
1542			 */
1543			flush_cache_range(vma, range.start, range.end);
1544
 
1545			/*
1546			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1547			 * held in write mode.  Caller needs to explicitly
1548			 * do this outside rmap routines.
1549			 *
1550			 * We also must hold hugetlb vma_lock in write mode.
1551			 * Lock order dictates acquiring vma_lock BEFORE
1552			 * i_mmap_rwsem.  We can only try lock here and fail
1553			 * if unsuccessful.
1554			 */
1555			if (!anon) {
1556				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1557				if (!hugetlb_vma_trylock_write(vma)) {
1558					page_vma_mapped_walk_done(&pvmw);
1559					ret = false;
1560					break;
1561				}
1562				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1563					hugetlb_vma_unlock_write(vma);
1564					flush_tlb_range(vma,
1565						range.start, range.end);
1566					mmu_notifier_invalidate_range(mm,
1567						range.start, range.end);
1568					/*
1569					 * The ref count of the PMD page was
1570					 * dropped which is part of the way map
1571					 * counting is done for shared PMDs.
1572					 * Return 'true' here.  When there is
1573					 * no other sharing, huge_pmd_unshare
1574					 * returns false and we will unmap the
1575					 * actual page and drop map count
1576					 * to zero.
1577					 */
1578					page_vma_mapped_walk_done(&pvmw);
1579					break;
1580				}
1581				hugetlb_vma_unlock_write(vma);
1582			}
1583			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1584		} else {
1585			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1586			/* Nuke the page table entry. */
1587			if (should_defer_flush(mm, flags)) {
1588				/*
1589				 * We clear the PTE but do not flush so potentially
1590				 * a remote CPU could still be writing to the folio.
1591				 * If the entry was previously clean then the
1592				 * architecture must guarantee that a clear->dirty
1593				 * transition on a cached TLB entry is written through
1594				 * and traps if the PTE is unmapped.
1595				 */
1596				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
 
 
1597
1598				set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1599			} else {
1600				pteval = ptep_clear_flush(vma, address, pvmw.pte);
 
 
 
 
 
 
 
 
1601			}
1602		}
1603
1604		/*
1605		 * Now the pte is cleared. If this pte was uffd-wp armed,
1606		 * we may want to replace a none pte with a marker pte if
1607		 * it's file-backed, so we don't lose the tracking info.
1608		 */
1609		pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
 
 
 
 
 
 
 
 
 
 
 
1610
1611		/* Set the dirty flag on the folio now the pte is gone. */
1612		if (pte_dirty(pteval))
1613			folio_mark_dirty(folio);
1614
1615		/* Update high watermark before we lower rss */
1616		update_hiwater_rss(mm);
1617
1618		if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
1619			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1620			if (folio_test_hugetlb(folio)) {
1621				hugetlb_count_sub(folio_nr_pages(folio), mm);
1622				set_huge_pte_at(mm, address, pvmw.pte, pteval);
 
 
1623			} else {
1624				dec_mm_counter(mm, mm_counter(&folio->page));
1625				set_pte_at(mm, address, pvmw.pte, pteval);
1626			}
1627
1628		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1629			/*
1630			 * The guest indicated that the page content is of no
1631			 * interest anymore. Simply discard the pte, vmscan
1632			 * will take care of the rest.
1633			 * A future reference will then fault in a new zero
1634			 * page. When userfaultfd is active, we must not drop
1635			 * this page though, as its main user (postcopy
1636			 * migration) will not expect userfaults on already
1637			 * copied pages.
1638			 */
1639			dec_mm_counter(mm, mm_counter(&folio->page));
1640			/* We have to invalidate as we cleared the pte */
1641			mmu_notifier_invalidate_range(mm, address,
1642						      address + PAGE_SIZE);
1643		} else if (folio_test_anon(folio)) {
1644			swp_entry_t entry = { .val = page_private(subpage) };
1645			pte_t swp_pte;
1646			/*
1647			 * Store the swap location in the pte.
1648			 * See handle_pte_fault() ...
1649			 */
1650			if (unlikely(folio_test_swapbacked(folio) !=
1651					folio_test_swapcache(folio))) {
1652				WARN_ON_ONCE(1);
1653				ret = false;
1654				/* We have to invalidate as we cleared the pte */
1655				mmu_notifier_invalidate_range(mm, address,
1656							address + PAGE_SIZE);
1657				page_vma_mapped_walk_done(&pvmw);
1658				break;
1659			}
1660
1661			/* MADV_FREE page check */
1662			if (!folio_test_swapbacked(folio)) {
1663				int ref_count, map_count;
1664
1665				/*
1666				 * Synchronize with gup_pte_range():
1667				 * - clear PTE; barrier; read refcount
1668				 * - inc refcount; barrier; read PTE
1669				 */
1670				smp_mb();
1671
1672				ref_count = folio_ref_count(folio);
1673				map_count = folio_mapcount(folio);
1674
1675				/*
1676				 * Order reads for page refcount and dirty flag
1677				 * (see comments in __remove_mapping()).
1678				 */
1679				smp_rmb();
1680
1681				/*
1682				 * The only page refs must be one from isolation
1683				 * plus the rmap(s) (dropped by discard:).
1684				 */
1685				if (ref_count == 1 + map_count &&
1686				    !folio_test_dirty(folio)) {
1687					/* Invalidate as we cleared the pte */
1688					mmu_notifier_invalidate_range(mm,
1689						address, address + PAGE_SIZE);
1690					dec_mm_counter(mm, MM_ANONPAGES);
1691					goto discard;
1692				}
1693
1694				/*
1695				 * If the folio was redirtied, it cannot be
1696				 * discarded. Remap the page to page table.
1697				 */
1698				set_pte_at(mm, address, pvmw.pte, pteval);
1699				folio_set_swapbacked(folio);
1700				ret = false;
1701				page_vma_mapped_walk_done(&pvmw);
1702				break;
1703			}
1704
1705			if (swap_duplicate(entry) < 0) {
1706				set_pte_at(mm, address, pvmw.pte, pteval);
1707				ret = false;
1708				page_vma_mapped_walk_done(&pvmw);
1709				break;
1710			}
1711			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1712				swap_free(entry);
1713				set_pte_at(mm, address, pvmw.pte, pteval);
1714				ret = false;
1715				page_vma_mapped_walk_done(&pvmw);
1716				break;
1717			}
1718
1719			/* See page_try_share_anon_rmap(): clear PTE first. */
1720			if (anon_exclusive &&
1721			    page_try_share_anon_rmap(subpage)) {
1722				swap_free(entry);
1723				set_pte_at(mm, address, pvmw.pte, pteval);
1724				ret = false;
1725				page_vma_mapped_walk_done(&pvmw);
1726				break;
1727			}
1728			/*
1729			 * Note: We *don't* remember if the page was mapped
1730			 * exclusively in the swap pte if the architecture
1731			 * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In
1732			 * that case, swapin code has to re-determine that
1733			 * manually and might detect the page as possibly
1734			 * shared, for example, if there are other references on
1735			 * the page or if the page is under writeback. We made
1736			 * sure that there are no GUP pins on the page that
1737			 * would rely on it, so for GUP pins this is fine.
1738			 */
1739			if (list_empty(&mm->mmlist)) {
1740				spin_lock(&mmlist_lock);
1741				if (list_empty(&mm->mmlist))
1742					list_add(&mm->mmlist, &init_mm.mmlist);
1743				spin_unlock(&mmlist_lock);
1744			}
1745			dec_mm_counter(mm, MM_ANONPAGES);
1746			inc_mm_counter(mm, MM_SWAPENTS);
1747			swp_pte = swp_entry_to_pte(entry);
1748			if (anon_exclusive)
1749				swp_pte = pte_swp_mkexclusive(swp_pte);
1750			if (pte_soft_dirty(pteval))
1751				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1752			if (pte_uffd_wp(pteval))
1753				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1754			set_pte_at(mm, address, pvmw.pte, swp_pte);
1755			/* Invalidate as we cleared the pte */
1756			mmu_notifier_invalidate_range(mm, address,
1757						      address + PAGE_SIZE);
1758		} else {
1759			/*
1760			 * This is a locked file-backed folio,
1761			 * so it cannot be removed from the page
1762			 * cache and replaced by a new folio before
1763			 * mmu_notifier_invalidate_range_end, so no
1764			 * concurrent thread might update its page table
1765			 * to point at a new folio while a device is
1766			 * still using this folio.
1767			 *
1768			 * See Documentation/mm/mmu_notifier.rst
1769			 */
1770			dec_mm_counter(mm, mm_counter_file(&folio->page));
1771		}
1772discard:
1773		/*
1774		 * No need to call mmu_notifier_invalidate_range() it has be
1775		 * done above for all cases requiring it to happen under page
1776		 * table lock before mmu_notifier_invalidate_range_end()
1777		 *
1778		 * See Documentation/mm/mmu_notifier.rst
1779		 */
1780		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
1781		if (vma->vm_flags & VM_LOCKED)
1782			mlock_page_drain_local();
1783		folio_put(folio);
1784	}
1785
1786	mmu_notifier_invalidate_range_end(&range);
1787
1788	return ret;
1789}
1790
1791static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1792{
1793	return vma_is_temporary_stack(vma);
1794}
1795
1796static int folio_not_mapped(struct folio *folio)
1797{
1798	return !folio_mapped(folio);
1799}
1800
1801/**
1802 * try_to_unmap - Try to remove all page table mappings to a folio.
1803 * @folio: The folio to unmap.
1804 * @flags: action and flags
1805 *
1806 * Tries to remove all the page table entries which are mapping this
1807 * folio.  It is the caller's responsibility to check if the folio is
1808 * still mapped if needed (use TTU_SYNC to prevent accounting races).
1809 *
1810 * Context: Caller must hold the folio lock.
 
1811 */
1812void try_to_unmap(struct folio *folio, enum ttu_flags flags)
1813{
1814	struct rmap_walk_control rwc = {
1815		.rmap_one = try_to_unmap_one,
1816		.arg = (void *)flags,
1817		.done = folio_not_mapped,
1818		.anon_lock = folio_lock_anon_vma_read,
1819	};
1820
1821	if (flags & TTU_RMAP_LOCKED)
1822		rmap_walk_locked(folio, &rwc);
1823	else
1824		rmap_walk(folio, &rwc);
1825}
1826
1827/*
1828 * @arg: enum ttu_flags will be passed to this argument.
1829 *
1830 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
1831 * containing migration entries.
1832 */
1833static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1834		     unsigned long address, void *arg)
1835{
1836	struct mm_struct *mm = vma->vm_mm;
1837	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 
 
 
 
1838	pte_t pteval;
1839	struct page *subpage;
1840	bool anon_exclusive, ret = true;
1841	struct mmu_notifier_range range;
1842	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1843
1844	/*
1845	 * When racing against e.g. zap_pte_range() on another cpu,
1846	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1847	 * try_to_migrate() may return before page_mapped() has become false,
1848	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1849	 */
1850	if (flags & TTU_SYNC)
1851		pvmw.flags = PVMW_SYNC;
1852
1853	/*
1854	 * unmap_page() in mm/huge_memory.c is the only user of migration with
1855	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
1856	 */
1857	if (flags & TTU_SPLIT_HUGE_PMD)
1858		split_huge_pmd_address(vma, address, true, folio);
1859
1860	/*
1861	 * For THP, we have to assume the worse case ie pmd for invalidation.
1862	 * For hugetlb, it could be much worse if we need to do pud
1863	 * invalidation in the case of pmd sharing.
1864	 *
1865	 * Note that the page can not be free in this function as call of
1866	 * try_to_unmap() must hold a reference on the page.
1867	 */
1868	range.end = vma_address_end(&pvmw);
 
1869	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1870				address, range.end);
1871	if (folio_test_hugetlb(folio)) {
1872		/*
1873		 * If sharing is possible, start and end will be adjusted
1874		 * accordingly.
1875		 */
1876		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1877						     &range.end);
1878	}
1879	mmu_notifier_invalidate_range_start(&range);
1880
1881	while (page_vma_mapped_walk(&pvmw)) {
1882#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1883		/* PMD-mapped THP migration entry */
1884		if (!pvmw.pte) {
1885			subpage = folio_page(folio,
1886				pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
1887			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
1888					!folio_test_pmd_mappable(folio), folio);
1889
1890			if (set_pmd_migration_entry(&pvmw, subpage)) {
1891				ret = false;
1892				page_vma_mapped_walk_done(&pvmw);
1893				break;
1894			}
1895			continue;
1896		}
1897#endif
1898
1899		/* Unexpected PMD-mapped THP? */
1900		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
1901
1902		if (folio_is_zone_device(folio)) {
1903			/*
1904			 * Our PTE is a non-present device exclusive entry and
1905			 * calculating the subpage as for the common case would
1906			 * result in an invalid pointer.
1907			 *
1908			 * Since only PAGE_SIZE pages can currently be
1909			 * migrated, just set it to page. This will need to be
1910			 * changed when hugepage migrations to device private
1911			 * memory are supported.
1912			 */
1913			VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
1914			subpage = &folio->page;
1915		} else {
1916			subpage = folio_page(folio,
1917					pte_pfn(*pvmw.pte) - folio_pfn(folio));
1918		}
1919		address = pvmw.address;
1920		anon_exclusive = folio_test_anon(folio) &&
1921				 PageAnonExclusive(subpage);
1922
1923		if (folio_test_hugetlb(folio)) {
1924			bool anon = folio_test_anon(folio);
1925
1926			/*
1927			 * huge_pmd_unshare may unmap an entire PMD page.
1928			 * There is no way of knowing exactly which PMDs may
1929			 * be cached for this mm, so we must flush them all.
1930			 * start/end were already adjusted above to cover this
1931			 * range.
1932			 */
1933			flush_cache_range(vma, range.start, range.end);
1934
 
1935			/*
1936			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1937			 * held in write mode.  Caller needs to explicitly
1938			 * do this outside rmap routines.
1939			 *
1940			 * We also must hold hugetlb vma_lock in write mode.
1941			 * Lock order dictates acquiring vma_lock BEFORE
1942			 * i_mmap_rwsem.  We can only try lock here and
1943			 * fail if unsuccessful.
1944			 */
1945			if (!anon) {
1946				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1947				if (!hugetlb_vma_trylock_write(vma)) {
1948					page_vma_mapped_walk_done(&pvmw);
1949					ret = false;
1950					break;
1951				}
1952				if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1953					hugetlb_vma_unlock_write(vma);
1954					flush_tlb_range(vma,
1955						range.start, range.end);
1956					mmu_notifier_invalidate_range(mm,
1957						range.start, range.end);
1958
1959					/*
1960					 * The ref count of the PMD page was
1961					 * dropped which is part of the way map
1962					 * counting is done for shared PMDs.
1963					 * Return 'true' here.  When there is
1964					 * no other sharing, huge_pmd_unshare
1965					 * returns false and we will unmap the
1966					 * actual page and drop map count
1967					 * to zero.
1968					 */
1969					page_vma_mapped_walk_done(&pvmw);
1970					break;
1971				}
1972				hugetlb_vma_unlock_write(vma);
1973			}
1974			/* Nuke the hugetlb page table entry */
1975			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1976		} else {
1977			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1978			/* Nuke the page table entry. */
1979			pteval = ptep_clear_flush(vma, address, pvmw.pte);
1980		}
1981
1982		/* Set the dirty flag on the folio now the pte is gone. */
 
 
 
 
1983		if (pte_dirty(pteval))
1984			folio_mark_dirty(folio);
1985
1986		/* Update high watermark before we lower rss */
1987		update_hiwater_rss(mm);
1988
1989		if (folio_is_device_private(folio)) {
1990			unsigned long pfn = folio_pfn(folio);
1991			swp_entry_t entry;
1992			pte_t swp_pte;
1993
1994			if (anon_exclusive)
1995				BUG_ON(page_try_share_anon_rmap(subpage));
1996
1997			/*
1998			 * Store the pfn of the page in a special migration
1999			 * pte. do_swap_page() will wait until the migration
2000			 * pte is removed and then restart fault handling.
2001			 */
2002			entry = pte_to_swp_entry(pteval);
2003			if (is_writable_device_private_entry(entry))
2004				entry = make_writable_migration_entry(pfn);
2005			else if (anon_exclusive)
2006				entry = make_readable_exclusive_migration_entry(pfn);
2007			else
2008				entry = make_readable_migration_entry(pfn);
2009			swp_pte = swp_entry_to_pte(entry);
2010
2011			/*
2012			 * pteval maps a zone device page and is therefore
2013			 * a swap pte.
2014			 */
2015			if (pte_swp_soft_dirty(pteval))
2016				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2017			if (pte_swp_uffd_wp(pteval))
2018				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2019			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
2020			trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
2021						compound_order(&folio->page));
2022			/*
2023			 * No need to invalidate here it will synchronize on
2024			 * against the special swap migration pte.
 
 
 
 
 
 
 
2025			 */
2026		} else if (PageHWPoison(subpage)) {
 
2027			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
2028			if (folio_test_hugetlb(folio)) {
2029				hugetlb_count_sub(folio_nr_pages(folio), mm);
2030				set_huge_pte_at(mm, address, pvmw.pte, pteval);
 
 
2031			} else {
2032				dec_mm_counter(mm, mm_counter(&folio->page));
2033				set_pte_at(mm, address, pvmw.pte, pteval);
2034			}
2035
2036		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
2037			/*
2038			 * The guest indicated that the page content is of no
2039			 * interest anymore. Simply discard the pte, vmscan
2040			 * will take care of the rest.
2041			 * A future reference will then fault in a new zero
2042			 * page. When userfaultfd is active, we must not drop
2043			 * this page though, as its main user (postcopy
2044			 * migration) will not expect userfaults on already
2045			 * copied pages.
2046			 */
2047			dec_mm_counter(mm, mm_counter(&folio->page));
2048			/* We have to invalidate as we cleared the pte */
2049			mmu_notifier_invalidate_range(mm, address,
2050						      address + PAGE_SIZE);
2051		} else {
2052			swp_entry_t entry;
2053			pte_t swp_pte;
2054
2055			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2056				if (folio_test_hugetlb(folio))
2057					set_huge_pte_at(mm, address, pvmw.pte, pteval);
2058				else
2059					set_pte_at(mm, address, pvmw.pte, pteval);
2060				ret = false;
2061				page_vma_mapped_walk_done(&pvmw);
2062				break;
2063			}
2064			VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
2065				       !anon_exclusive, subpage);
2066
2067			/* See page_try_share_anon_rmap(): clear PTE first. */
2068			if (anon_exclusive &&
2069			    page_try_share_anon_rmap(subpage)) {
2070				if (folio_test_hugetlb(folio))
2071					set_huge_pte_at(mm, address, pvmw.pte, pteval);
2072				else
2073					set_pte_at(mm, address, pvmw.pte, pteval);
2074				ret = false;
2075				page_vma_mapped_walk_done(&pvmw);
2076				break;
2077			}
2078
2079			/*
2080			 * Store the pfn of the page in a special migration
2081			 * pte. do_swap_page() will wait until the migration
2082			 * pte is removed and then restart fault handling.
2083			 */
2084			if (pte_write(pteval))
2085				entry = make_writable_migration_entry(
2086							page_to_pfn(subpage));
2087			else if (anon_exclusive)
2088				entry = make_readable_exclusive_migration_entry(
2089							page_to_pfn(subpage));
2090			else
2091				entry = make_readable_migration_entry(
2092							page_to_pfn(subpage));
2093			if (pte_young(pteval))
2094				entry = make_migration_entry_young(entry);
2095			if (pte_dirty(pteval))
2096				entry = make_migration_entry_dirty(entry);
2097			swp_pte = swp_entry_to_pte(entry);
2098			if (pte_soft_dirty(pteval))
2099				swp_pte = pte_swp_mksoft_dirty(swp_pte);
2100			if (pte_uffd_wp(pteval))
2101				swp_pte = pte_swp_mkuffd_wp(swp_pte);
2102			if (folio_test_hugetlb(folio))
2103				set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
2104			else
2105				set_pte_at(mm, address, pvmw.pte, swp_pte);
2106			trace_set_migration_pte(address, pte_val(swp_pte),
2107						compound_order(&folio->page));
2108			/*
2109			 * No need to invalidate here it will synchronize on
2110			 * against the special swap migration pte.
2111			 */
2112		}
2113
2114		/*
2115		 * No need to call mmu_notifier_invalidate_range() it has be
2116		 * done above for all cases requiring it to happen under page
2117		 * table lock before mmu_notifier_invalidate_range_end()
2118		 *
2119		 * See Documentation/mm/mmu_notifier.rst
2120		 */
2121		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
2122		if (vma->vm_flags & VM_LOCKED)
2123			mlock_page_drain_local();
2124		folio_put(folio);
2125	}
2126
2127	mmu_notifier_invalidate_range_end(&range);
2128
2129	return ret;
2130}
2131
2132/**
2133 * try_to_migrate - try to replace all page table mappings with swap entries
2134 * @folio: the folio to replace page table entries for
2135 * @flags: action and flags
2136 *
2137 * Tries to remove all the page table entries which are mapping this folio and
2138 * replace them with special swap entries. Caller must hold the folio lock.
2139 */
2140void try_to_migrate(struct folio *folio, enum ttu_flags flags)
2141{
2142	struct rmap_walk_control rwc = {
2143		.rmap_one = try_to_migrate_one,
2144		.arg = (void *)flags,
2145		.done = folio_not_mapped,
2146		.anon_lock = folio_lock_anon_vma_read,
2147	};
2148
2149	/*
2150	 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
2151	 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags.
2152	 */
2153	if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2154					TTU_SYNC)))
2155		return;
2156
2157	if (folio_is_zone_device(folio) &&
2158	    (!folio_is_device_private(folio) && !folio_is_device_coherent(folio)))
2159		return;
2160
2161	/*
2162	 * During exec, a temporary VMA is setup and later moved.
2163	 * The VMA is moved under the anon_vma lock but not the
2164	 * page tables leading to a race where migration cannot
2165	 * find the migration ptes. Rather than increasing the
2166	 * locking requirements of exec(), migration skips
2167	 * temporary VMAs until after exec() completes.
2168	 */
2169	if (!folio_test_ksm(folio) && folio_test_anon(folio))
2170		rwc.invalid_vma = invalid_migration_vma;
2171
2172	if (flags & TTU_RMAP_LOCKED)
2173		rmap_walk_locked(folio, &rwc);
2174	else
2175		rmap_walk(folio, &rwc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2176}
2177
2178#ifdef CONFIG_DEVICE_PRIVATE
2179struct make_exclusive_args {
2180	struct mm_struct *mm;
2181	unsigned long address;
2182	void *owner;
2183	bool valid;
2184};
2185
2186static bool page_make_device_exclusive_one(struct folio *folio,
2187		struct vm_area_struct *vma, unsigned long address, void *priv)
2188{
2189	struct mm_struct *mm = vma->vm_mm;
2190	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 
 
 
 
2191	struct make_exclusive_args *args = priv;
2192	pte_t pteval;
2193	struct page *subpage;
2194	bool ret = true;
2195	struct mmu_notifier_range range;
2196	swp_entry_t entry;
2197	pte_t swp_pte;
2198
2199	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
2200				      vma->vm_mm, address, min(vma->vm_end,
2201				      address + folio_size(folio)),
2202				      args->owner);
2203	mmu_notifier_invalidate_range_start(&range);
2204
2205	while (page_vma_mapped_walk(&pvmw)) {
2206		/* Unexpected PMD-mapped THP? */
2207		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
2208
2209		if (!pte_present(*pvmw.pte)) {
2210			ret = false;
2211			page_vma_mapped_walk_done(&pvmw);
2212			break;
2213		}
2214
2215		subpage = folio_page(folio,
2216				pte_pfn(*pvmw.pte) - folio_pfn(folio));
2217		address = pvmw.address;
2218
2219		/* Nuke the page table entry. */
2220		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
2221		pteval = ptep_clear_flush(vma, address, pvmw.pte);
2222
2223		/* Set the dirty flag on the folio now the pte is gone. */
2224		if (pte_dirty(pteval))
2225			folio_mark_dirty(folio);
2226
2227		/*
2228		 * Check that our target page is still mapped at the expected
2229		 * address.
2230		 */
2231		if (args->mm == mm && args->address == address &&
2232		    pte_write(pteval))
2233			args->valid = true;
2234
2235		/*
2236		 * Store the pfn of the page in a special migration
2237		 * pte. do_swap_page() will wait until the migration
2238		 * pte is removed and then restart fault handling.
2239		 */
2240		if (pte_write(pteval))
2241			entry = make_writable_device_exclusive_entry(
2242							page_to_pfn(subpage));
2243		else
2244			entry = make_readable_device_exclusive_entry(
2245							page_to_pfn(subpage));
2246		swp_pte = swp_entry_to_pte(entry);
2247		if (pte_soft_dirty(pteval))
2248			swp_pte = pte_swp_mksoft_dirty(swp_pte);
2249		if (pte_uffd_wp(pteval))
2250			swp_pte = pte_swp_mkuffd_wp(swp_pte);
2251
2252		set_pte_at(mm, address, pvmw.pte, swp_pte);
2253
2254		/*
2255		 * There is a reference on the page for the swap entry which has
2256		 * been removed, so shouldn't take another.
2257		 */
2258		page_remove_rmap(subpage, vma, false);
2259	}
2260
2261	mmu_notifier_invalidate_range_end(&range);
2262
2263	return ret;
2264}
2265
2266/**
2267 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2268 * @folio: The folio to replace page table entries for.
2269 * @mm: The mm_struct where the folio is expected to be mapped.
2270 * @address: Address where the folio is expected to be mapped.
2271 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2272 *
2273 * Tries to remove all the page table entries which are mapping this
2274 * folio and replace them with special device exclusive swap entries to
2275 * grant a device exclusive access to the folio.
2276 *
2277 * Context: Caller must hold the folio lock.
2278 * Return: false if the page is still mapped, or if it could not be unmapped
2279 * from the expected address. Otherwise returns true (success).
2280 */
2281static bool folio_make_device_exclusive(struct folio *folio,
2282		struct mm_struct *mm, unsigned long address, void *owner)
2283{
2284	struct make_exclusive_args args = {
2285		.mm = mm,
2286		.address = address,
2287		.owner = owner,
2288		.valid = false,
2289	};
2290	struct rmap_walk_control rwc = {
2291		.rmap_one = page_make_device_exclusive_one,
2292		.done = folio_not_mapped,
2293		.anon_lock = folio_lock_anon_vma_read,
2294		.arg = &args,
2295	};
2296
2297	/*
2298	 * Restrict to anonymous folios for now to avoid potential writeback
2299	 * issues.
 
2300	 */
2301	if (!folio_test_anon(folio))
2302		return false;
2303
2304	rmap_walk(folio, &rwc);
2305
2306	return args.valid && !folio_mapcount(folio);
2307}
2308
2309/**
2310 * make_device_exclusive_range() - Mark a range for exclusive use by a device
2311 * @mm: mm_struct of associated target process
2312 * @start: start of the region to mark for exclusive device access
2313 * @end: end address of region
2314 * @pages: returns the pages which were successfully marked for exclusive access
2315 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2316 *
2317 * Returns: number of pages found in the range by GUP. A page is marked for
2318 * exclusive access only if the page pointer is non-NULL.
2319 *
2320 * This function finds ptes mapping page(s) to the given address range, locks
2321 * them and replaces mappings with special swap entries preventing userspace CPU
2322 * access. On fault these entries are replaced with the original mapping after
2323 * calling MMU notifiers.
2324 *
2325 * A driver using this to program access from a device must use a mmu notifier
2326 * critical section to hold a device specific lock during programming. Once
2327 * programming is complete it should drop the page lock and reference after
2328 * which point CPU access to the page will revoke the exclusive access.
2329 */
2330int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
2331				unsigned long end, struct page **pages,
2332				void *owner)
2333{
2334	long npages = (end - start) >> PAGE_SHIFT;
2335	long i;
2336
2337	npages = get_user_pages_remote(mm, start, npages,
2338				       FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2339				       pages, NULL, NULL);
2340	if (npages < 0)
2341		return npages;
2342
2343	for (i = 0; i < npages; i++, start += PAGE_SIZE) {
2344		struct folio *folio = page_folio(pages[i]);
2345		if (PageTail(pages[i]) || !folio_trylock(folio)) {
2346			folio_put(folio);
2347			pages[i] = NULL;
2348			continue;
2349		}
2350
2351		if (!folio_make_device_exclusive(folio, mm, start, owner)) {
2352			folio_unlock(folio);
2353			folio_put(folio);
2354			pages[i] = NULL;
2355		}
2356	}
2357
2358	return npages;
2359}
2360EXPORT_SYMBOL_GPL(make_device_exclusive_range);
2361#endif
2362
2363void __put_anon_vma(struct anon_vma *anon_vma)
2364{
2365	struct anon_vma *root = anon_vma->root;
2366
2367	anon_vma_free(anon_vma);
2368	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2369		anon_vma_free(root);
2370}
2371
2372static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
2373					    struct rmap_walk_control *rwc)
2374{
2375	struct anon_vma *anon_vma;
2376
2377	if (rwc->anon_lock)
2378		return rwc->anon_lock(folio, rwc);
2379
2380	/*
2381	 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
2382	 * because that depends on page_mapped(); but not all its usages
2383	 * are holding mmap_lock. Users without mmap_lock are required to
2384	 * take a reference count to prevent the anon_vma disappearing
2385	 */
2386	anon_vma = folio_anon_vma(folio);
2387	if (!anon_vma)
2388		return NULL;
2389
2390	if (anon_vma_trylock_read(anon_vma))
2391		goto out;
2392
2393	if (rwc->try_lock) {
2394		anon_vma = NULL;
2395		rwc->contended = true;
2396		goto out;
2397	}
2398
2399	anon_vma_lock_read(anon_vma);
2400out:
2401	return anon_vma;
2402}
2403
2404/*
2405 * rmap_walk_anon - do something to anonymous page using the object-based
2406 * rmap method
2407 * @page: the page to be handled
2408 * @rwc: control variable according to each walk type
2409 *
2410 * Find all the mappings of a page using the mapping pointer and the vma chains
2411 * contained in the anon_vma struct it points to.
 
 
 
 
 
2412 */
2413static void rmap_walk_anon(struct folio *folio,
2414		struct rmap_walk_control *rwc, bool locked)
2415{
2416	struct anon_vma *anon_vma;
2417	pgoff_t pgoff_start, pgoff_end;
2418	struct anon_vma_chain *avc;
2419
2420	if (locked) {
2421		anon_vma = folio_anon_vma(folio);
2422		/* anon_vma disappear under us? */
2423		VM_BUG_ON_FOLIO(!anon_vma, folio);
2424	} else {
2425		anon_vma = rmap_walk_anon_lock(folio, rwc);
2426	}
2427	if (!anon_vma)
2428		return;
2429
2430	pgoff_start = folio_pgoff(folio);
2431	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2432	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2433			pgoff_start, pgoff_end) {
2434		struct vm_area_struct *vma = avc->vma;
2435		unsigned long address = vma_address(&folio->page, vma);
2436
2437		VM_BUG_ON_VMA(address == -EFAULT, vma);
2438		cond_resched();
2439
2440		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2441			continue;
2442
2443		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2444			break;
2445		if (rwc->done && rwc->done(folio))
2446			break;
2447	}
2448
2449	if (!locked)
2450		anon_vma_unlock_read(anon_vma);
2451}
2452
2453/*
2454 * rmap_walk_file - do something to file page using the object-based rmap method
2455 * @page: the page to be handled
2456 * @rwc: control variable according to each walk type
2457 *
2458 * Find all the mappings of a page using the mapping pointer and the vma chains
2459 * contained in the address_space struct it points to.
 
 
 
 
 
2460 */
2461static void rmap_walk_file(struct folio *folio,
2462		struct rmap_walk_control *rwc, bool locked)
2463{
2464	struct address_space *mapping = folio_mapping(folio);
2465	pgoff_t pgoff_start, pgoff_end;
2466	struct vm_area_struct *vma;
2467
2468	/*
2469	 * The page lock not only makes sure that page->mapping cannot
2470	 * suddenly be NULLified by truncation, it makes sure that the
2471	 * structure at mapping cannot be freed and reused yet,
2472	 * so we can safely take mapping->i_mmap_rwsem.
2473	 */
2474	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2475
2476	if (!mapping)
2477		return;
2478
2479	pgoff_start = folio_pgoff(folio);
2480	pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
2481	if (!locked) {
2482		if (i_mmap_trylock_read(mapping))
2483			goto lookup;
2484
2485		if (rwc->try_lock) {
2486			rwc->contended = true;
2487			return;
2488		}
2489
2490		i_mmap_lock_read(mapping);
2491	}
2492lookup:
2493	vma_interval_tree_foreach(vma, &mapping->i_mmap,
2494			pgoff_start, pgoff_end) {
2495		unsigned long address = vma_address(&folio->page, vma);
2496
2497		VM_BUG_ON_VMA(address == -EFAULT, vma);
2498		cond_resched();
2499
2500		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2501			continue;
2502
2503		if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2504			goto done;
2505		if (rwc->done && rwc->done(folio))
2506			goto done;
2507	}
2508
2509done:
2510	if (!locked)
2511		i_mmap_unlock_read(mapping);
2512}
2513
2514void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
2515{
2516	if (unlikely(folio_test_ksm(folio)))
2517		rmap_walk_ksm(folio, rwc);
2518	else if (folio_test_anon(folio))
2519		rmap_walk_anon(folio, rwc, false);
2520	else
2521		rmap_walk_file(folio, rwc, false);
2522}
2523
2524/* Like rmap_walk, but caller holds relevant rmap lock */
2525void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
2526{
2527	/* no ksm support for now */
2528	VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
2529	if (folio_test_anon(folio))
2530		rmap_walk_anon(folio, rwc, true);
2531	else
2532		rmap_walk_file(folio, rwc, true);
2533}
2534
2535#ifdef CONFIG_HUGETLB_PAGE
2536/*
2537 * The following two functions are for anonymous (private mapped) hugepages.
2538 * Unlike common anonymous pages, anonymous hugepages have no accounting code
2539 * and no lru code, because we handle hugepages differently from common pages.
2540 *
2541 * RMAP_COMPOUND is ignored.
2542 */
2543void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
2544			    unsigned long address, rmap_t flags)
2545{
2546	struct anon_vma *anon_vma = vma->anon_vma;
2547	int first;
2548
2549	BUG_ON(!PageLocked(page));
2550	BUG_ON(!anon_vma);
2551	/* address might be in next vma when migration races vma_adjust */
2552	first = atomic_inc_and_test(compound_mapcount_ptr(page));
2553	VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
2554	VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
2555	if (first)
2556		__page_set_anon_rmap(page, vma, address,
2557				     !!(flags & RMAP_EXCLUSIVE));
2558}
2559
2560void hugepage_add_new_anon_rmap(struct page *page,
2561			struct vm_area_struct *vma, unsigned long address)
2562{
2563	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2564	/* increment count (starts at -1) */
2565	atomic_set(compound_mapcount_ptr(page), 0);
2566	ClearHPageRestoreReserve(page);
 
 
2567	__page_set_anon_rmap(page, vma, address, 1);
2568}
2569#endif /* CONFIG_HUGETLB_PAGE */
v5.14.15
   1/*
   2 * mm/rmap.c - physical to virtual reverse mappings
   3 *
   4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
   5 * Released under the General Public License (GPL).
   6 *
   7 * Simple, low overhead reverse mapping scheme.
   8 * Please try to keep this thing as modular as possible.
   9 *
  10 * Provides methods for unmapping each kind of mapped page:
  11 * the anon methods track anonymous pages, and
  12 * the file methods track pages belonging to an inode.
  13 *
  14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
  15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
  16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
  17 * Contributions by Hugh Dickins 2003, 2004
  18 */
  19
  20/*
  21 * Lock ordering in mm:
  22 *
  23 * inode->i_mutex	(while writing or truncating, not reading or faulting)
  24 *   mm->mmap_lock
  25 *     page->flags PG_locked (lock_page)   * (see huegtlbfs below)
  26 *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
  27 *         mapping->i_mmap_rwsem
  28 *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
  29 *           anon_vma->rwsem
  30 *             mm->page_table_lock or pte_lock
  31 *               swap_lock (in swap_duplicate, swap_info_get)
  32 *                 mmlist_lock (in mmput, drain_mmlist and others)
  33 *                 mapping->private_lock (in __set_page_dirty_buffers)
  34 *                   lock_page_memcg move_lock (in __set_page_dirty_buffers)
  35 *                     i_pages lock (widely used)
  36 *                       lruvec->lru_lock (in lock_page_lruvec_irq)
  37 *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
  38 *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
  39 *                   sb_lock (within inode_lock in fs/fs-writeback.c)
  40 *                   i_pages lock (widely used, in set_page_dirty,
  41 *                             in arch-dependent flush_dcache_mmap_lock,
  42 *                             within bdi.wb->list_lock in __sync_single_inode)
  43 *
  44 * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
  45 *   ->tasklist_lock
  46 *     pte map lock
  47 *
  48 * * hugetlbfs PageHuge() pages take locks in this order:
  49 *         mapping->i_mmap_rwsem
  50 *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
  51 *             page->flags PG_locked (lock_page)
 
  52 */
  53
  54#include <linux/mm.h>
  55#include <linux/sched/mm.h>
  56#include <linux/sched/task.h>
  57#include <linux/pagemap.h>
  58#include <linux/swap.h>
  59#include <linux/swapops.h>
  60#include <linux/slab.h>
  61#include <linux/init.h>
  62#include <linux/ksm.h>
  63#include <linux/rmap.h>
  64#include <linux/rcupdate.h>
  65#include <linux/export.h>
  66#include <linux/memcontrol.h>
  67#include <linux/mmu_notifier.h>
  68#include <linux/migrate.h>
  69#include <linux/hugetlb.h>
  70#include <linux/huge_mm.h>
  71#include <linux/backing-dev.h>
  72#include <linux/page_idle.h>
  73#include <linux/memremap.h>
  74#include <linux/userfaultfd_k.h>
 
  75
  76#include <asm/tlbflush.h>
  77
 
  78#include <trace/events/tlb.h>
 
  79
  80#include "internal.h"
  81
  82static struct kmem_cache *anon_vma_cachep;
  83static struct kmem_cache *anon_vma_chain_cachep;
  84
  85static inline struct anon_vma *anon_vma_alloc(void)
  86{
  87	struct anon_vma *anon_vma;
  88
  89	anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
  90	if (anon_vma) {
  91		atomic_set(&anon_vma->refcount, 1);
  92		anon_vma->degree = 1;	/* Reference for first vma */
 
  93		anon_vma->parent = anon_vma;
  94		/*
  95		 * Initialise the anon_vma root to point to itself. If called
  96		 * from fork, the root will be reset to the parents anon_vma.
  97		 */
  98		anon_vma->root = anon_vma;
  99	}
 100
 101	return anon_vma;
 102}
 103
 104static inline void anon_vma_free(struct anon_vma *anon_vma)
 105{
 106	VM_BUG_ON(atomic_read(&anon_vma->refcount));
 107
 108	/*
 109	 * Synchronize against page_lock_anon_vma_read() such that
 110	 * we can safely hold the lock without the anon_vma getting
 111	 * freed.
 112	 *
 113	 * Relies on the full mb implied by the atomic_dec_and_test() from
 114	 * put_anon_vma() against the acquire barrier implied by
 115	 * down_read_trylock() from page_lock_anon_vma_read(). This orders:
 116	 *
 117	 * page_lock_anon_vma_read()	VS	put_anon_vma()
 118	 *   down_read_trylock()		  atomic_dec_and_test()
 119	 *   LOCK				  MB
 120	 *   atomic_read()			  rwsem_is_locked()
 121	 *
 122	 * LOCK should suffice since the actual taking of the lock must
 123	 * happen _before_ what follows.
 124	 */
 125	might_sleep();
 126	if (rwsem_is_locked(&anon_vma->root->rwsem)) {
 127		anon_vma_lock_write(anon_vma);
 128		anon_vma_unlock_write(anon_vma);
 129	}
 130
 131	kmem_cache_free(anon_vma_cachep, anon_vma);
 132}
 133
 134static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
 135{
 136	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
 137}
 138
 139static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
 140{
 141	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
 142}
 143
 144static void anon_vma_chain_link(struct vm_area_struct *vma,
 145				struct anon_vma_chain *avc,
 146				struct anon_vma *anon_vma)
 147{
 148	avc->vma = vma;
 149	avc->anon_vma = anon_vma;
 150	list_add(&avc->same_vma, &vma->anon_vma_chain);
 151	anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
 152}
 153
 154/**
 155 * __anon_vma_prepare - attach an anon_vma to a memory region
 156 * @vma: the memory region in question
 157 *
 158 * This makes sure the memory mapping described by 'vma' has
 159 * an 'anon_vma' attached to it, so that we can associate the
 160 * anonymous pages mapped into it with that anon_vma.
 161 *
 162 * The common case will be that we already have one, which
 163 * is handled inline by anon_vma_prepare(). But if
 164 * not we either need to find an adjacent mapping that we
 165 * can re-use the anon_vma from (very common when the only
 166 * reason for splitting a vma has been mprotect()), or we
 167 * allocate a new one.
 168 *
 169 * Anon-vma allocations are very subtle, because we may have
 170 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
 171 * and that may actually touch the rwsem even in the newly
 172 * allocated vma (it depends on RCU to make sure that the
 173 * anon_vma isn't actually destroyed).
 174 *
 175 * As a result, we need to do proper anon_vma locking even
 176 * for the new allocation. At the same time, we do not want
 177 * to do any locking for the common case of already having
 178 * an anon_vma.
 179 *
 180 * This must be called with the mmap_lock held for reading.
 181 */
 182int __anon_vma_prepare(struct vm_area_struct *vma)
 183{
 184	struct mm_struct *mm = vma->vm_mm;
 185	struct anon_vma *anon_vma, *allocated;
 186	struct anon_vma_chain *avc;
 187
 188	might_sleep();
 189
 190	avc = anon_vma_chain_alloc(GFP_KERNEL);
 191	if (!avc)
 192		goto out_enomem;
 193
 194	anon_vma = find_mergeable_anon_vma(vma);
 195	allocated = NULL;
 196	if (!anon_vma) {
 197		anon_vma = anon_vma_alloc();
 198		if (unlikely(!anon_vma))
 199			goto out_enomem_free_avc;
 
 200		allocated = anon_vma;
 201	}
 202
 203	anon_vma_lock_write(anon_vma);
 204	/* page_table_lock to protect against threads */
 205	spin_lock(&mm->page_table_lock);
 206	if (likely(!vma->anon_vma)) {
 207		vma->anon_vma = anon_vma;
 208		anon_vma_chain_link(vma, avc, anon_vma);
 209		/* vma reference or self-parent link for new root */
 210		anon_vma->degree++;
 211		allocated = NULL;
 212		avc = NULL;
 213	}
 214	spin_unlock(&mm->page_table_lock);
 215	anon_vma_unlock_write(anon_vma);
 216
 217	if (unlikely(allocated))
 218		put_anon_vma(allocated);
 219	if (unlikely(avc))
 220		anon_vma_chain_free(avc);
 221
 222	return 0;
 223
 224 out_enomem_free_avc:
 225	anon_vma_chain_free(avc);
 226 out_enomem:
 227	return -ENOMEM;
 228}
 229
 230/*
 231 * This is a useful helper function for locking the anon_vma root as
 232 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
 233 * have the same vma.
 234 *
 235 * Such anon_vma's should have the same root, so you'd expect to see
 236 * just a single mutex_lock for the whole traversal.
 237 */
 238static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
 239{
 240	struct anon_vma *new_root = anon_vma->root;
 241	if (new_root != root) {
 242		if (WARN_ON_ONCE(root))
 243			up_write(&root->rwsem);
 244		root = new_root;
 245		down_write(&root->rwsem);
 246	}
 247	return root;
 248}
 249
 250static inline void unlock_anon_vma_root(struct anon_vma *root)
 251{
 252	if (root)
 253		up_write(&root->rwsem);
 254}
 255
 256/*
 257 * Attach the anon_vmas from src to dst.
 258 * Returns 0 on success, -ENOMEM on failure.
 259 *
 260 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and
 261 * anon_vma_fork(). The first three want an exact copy of src, while the last
 262 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
 263 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
 264 * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
 265 *
 266 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
 267 * and reuse existing anon_vma which has no vmas and only one child anon_vma.
 268 * This prevents degradation of anon_vma hierarchy to endless linear chain in
 269 * case of constantly forking task. On the other hand, an anon_vma with more
 270 * than one child isn't reused even if there was no alive vma, thus rmap
 271 * walker has a good chance of avoiding scanning the whole hierarchy when it
 272 * searches where page is mapped.
 273 */
 274int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 275{
 276	struct anon_vma_chain *avc, *pavc;
 277	struct anon_vma *root = NULL;
 278
 279	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
 280		struct anon_vma *anon_vma;
 281
 282		avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
 283		if (unlikely(!avc)) {
 284			unlock_anon_vma_root(root);
 285			root = NULL;
 286			avc = anon_vma_chain_alloc(GFP_KERNEL);
 287			if (!avc)
 288				goto enomem_failure;
 289		}
 290		anon_vma = pavc->anon_vma;
 291		root = lock_anon_vma_root(root, anon_vma);
 292		anon_vma_chain_link(dst, avc, anon_vma);
 293
 294		/*
 295		 * Reuse existing anon_vma if its degree lower than two,
 296		 * that means it has no vma and only one anon_vma child.
 297		 *
 298		 * Do not chose parent anon_vma, otherwise first child
 299		 * will always reuse it. Root anon_vma is never reused:
 300		 * it has self-parent reference and at least one child.
 301		 */
 302		if (!dst->anon_vma && src->anon_vma &&
 303		    anon_vma != src->anon_vma && anon_vma->degree < 2)
 
 304			dst->anon_vma = anon_vma;
 305	}
 306	if (dst->anon_vma)
 307		dst->anon_vma->degree++;
 308	unlock_anon_vma_root(root);
 309	return 0;
 310
 311 enomem_failure:
 312	/*
 313	 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
 314	 * decremented in unlink_anon_vmas().
 315	 * We can safely do this because callers of anon_vma_clone() don't care
 316	 * about dst->anon_vma if anon_vma_clone() failed.
 317	 */
 318	dst->anon_vma = NULL;
 319	unlink_anon_vmas(dst);
 320	return -ENOMEM;
 321}
 322
 323/*
 324 * Attach vma to its own anon_vma, as well as to the anon_vmas that
 325 * the corresponding VMA in the parent process is attached to.
 326 * Returns 0 on success, non-zero on failure.
 327 */
 328int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 329{
 330	struct anon_vma_chain *avc;
 331	struct anon_vma *anon_vma;
 332	int error;
 333
 334	/* Don't bother if the parent process has no anon_vma here. */
 335	if (!pvma->anon_vma)
 336		return 0;
 337
 338	/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
 339	vma->anon_vma = NULL;
 340
 341	/*
 342	 * First, attach the new VMA to the parent VMA's anon_vmas,
 343	 * so rmap can find non-COWed pages in child processes.
 344	 */
 345	error = anon_vma_clone(vma, pvma);
 346	if (error)
 347		return error;
 348
 349	/* An existing anon_vma has been reused, all done then. */
 350	if (vma->anon_vma)
 351		return 0;
 352
 353	/* Then add our own anon_vma. */
 354	anon_vma = anon_vma_alloc();
 355	if (!anon_vma)
 356		goto out_error;
 
 357	avc = anon_vma_chain_alloc(GFP_KERNEL);
 358	if (!avc)
 359		goto out_error_free_anon_vma;
 360
 361	/*
 362	 * The root anon_vma's rwsem is the lock actually used when we
 363	 * lock any of the anon_vmas in this anon_vma tree.
 364	 */
 365	anon_vma->root = pvma->anon_vma->root;
 366	anon_vma->parent = pvma->anon_vma;
 367	/*
 368	 * With refcounts, an anon_vma can stay around longer than the
 369	 * process it belongs to. The root anon_vma needs to be pinned until
 370	 * this anon_vma is freed, because the lock lives in the root.
 371	 */
 372	get_anon_vma(anon_vma->root);
 373	/* Mark this anon_vma as the one where our new (COWed) pages go. */
 374	vma->anon_vma = anon_vma;
 375	anon_vma_lock_write(anon_vma);
 376	anon_vma_chain_link(vma, avc, anon_vma);
 377	anon_vma->parent->degree++;
 378	anon_vma_unlock_write(anon_vma);
 379
 380	return 0;
 381
 382 out_error_free_anon_vma:
 383	put_anon_vma(anon_vma);
 384 out_error:
 385	unlink_anon_vmas(vma);
 386	return -ENOMEM;
 387}
 388
 389void unlink_anon_vmas(struct vm_area_struct *vma)
 390{
 391	struct anon_vma_chain *avc, *next;
 392	struct anon_vma *root = NULL;
 393
 394	/*
 395	 * Unlink each anon_vma chained to the VMA.  This list is ordered
 396	 * from newest to oldest, ensuring the root anon_vma gets freed last.
 397	 */
 398	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 399		struct anon_vma *anon_vma = avc->anon_vma;
 400
 401		root = lock_anon_vma_root(root, anon_vma);
 402		anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
 403
 404		/*
 405		 * Leave empty anon_vmas on the list - we'll need
 406		 * to free them outside the lock.
 407		 */
 408		if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
 409			anon_vma->parent->degree--;
 410			continue;
 411		}
 412
 413		list_del(&avc->same_vma);
 414		anon_vma_chain_free(avc);
 415	}
 416	if (vma->anon_vma) {
 417		vma->anon_vma->degree--;
 418
 419		/*
 420		 * vma would still be needed after unlink, and anon_vma will be prepared
 421		 * when handle fault.
 422		 */
 423		vma->anon_vma = NULL;
 424	}
 425	unlock_anon_vma_root(root);
 426
 427	/*
 428	 * Iterate the list once more, it now only contains empty and unlinked
 429	 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
 430	 * needing to write-acquire the anon_vma->root->rwsem.
 431	 */
 432	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
 433		struct anon_vma *anon_vma = avc->anon_vma;
 434
 435		VM_WARN_ON(anon_vma->degree);
 
 436		put_anon_vma(anon_vma);
 437
 438		list_del(&avc->same_vma);
 439		anon_vma_chain_free(avc);
 440	}
 441}
 442
 443static void anon_vma_ctor(void *data)
 444{
 445	struct anon_vma *anon_vma = data;
 446
 447	init_rwsem(&anon_vma->rwsem);
 448	atomic_set(&anon_vma->refcount, 0);
 449	anon_vma->rb_root = RB_ROOT_CACHED;
 450}
 451
 452void __init anon_vma_init(void)
 453{
 454	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
 455			0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
 456			anon_vma_ctor);
 457	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
 458			SLAB_PANIC|SLAB_ACCOUNT);
 459}
 460
 461/*
 462 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
 463 *
 464 * Since there is no serialization what so ever against page_remove_rmap()
 465 * the best this function can do is return a refcount increased anon_vma
 466 * that might have been relevant to this page.
 467 *
 468 * The page might have been remapped to a different anon_vma or the anon_vma
 469 * returned may already be freed (and even reused).
 470 *
 471 * In case it was remapped to a different anon_vma, the new anon_vma will be a
 472 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
 473 * ensure that any anon_vma obtained from the page will still be valid for as
 474 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
 475 *
 476 * All users of this function must be very careful when walking the anon_vma
 477 * chain and verify that the page in question is indeed mapped in it
 478 * [ something equivalent to page_mapped_in_vma() ].
 479 *
 480 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
 481 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
 482 * if there is a mapcount, we can dereference the anon_vma after observing
 483 * those.
 484 */
 485struct anon_vma *page_get_anon_vma(struct page *page)
 486{
 487	struct anon_vma *anon_vma = NULL;
 488	unsigned long anon_mapping;
 489
 490	rcu_read_lock();
 491	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
 492	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 493		goto out;
 494	if (!page_mapped(page))
 495		goto out;
 496
 497	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 498	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 499		anon_vma = NULL;
 500		goto out;
 501	}
 502
 503	/*
 504	 * If this page is still mapped, then its anon_vma cannot have been
 505	 * freed.  But if it has been unmapped, we have no security against the
 506	 * anon_vma structure being freed and reused (for another anon_vma:
 507	 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
 508	 * above cannot corrupt).
 509	 */
 510	if (!page_mapped(page)) {
 511		rcu_read_unlock();
 512		put_anon_vma(anon_vma);
 513		return NULL;
 514	}
 515out:
 516	rcu_read_unlock();
 517
 518	return anon_vma;
 519}
 520
 521/*
 522 * Similar to page_get_anon_vma() except it locks the anon_vma.
 523 *
 524 * Its a little more complex as it tries to keep the fast path to a single
 525 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
 526 * reference like with page_get_anon_vma() and then block on the mutex.
 
 527 */
 528struct anon_vma *page_lock_anon_vma_read(struct page *page)
 
 529{
 530	struct anon_vma *anon_vma = NULL;
 531	struct anon_vma *root_anon_vma;
 532	unsigned long anon_mapping;
 533
 534	rcu_read_lock();
 535	anon_mapping = (unsigned long)READ_ONCE(page->mapping);
 536	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 537		goto out;
 538	if (!page_mapped(page))
 539		goto out;
 540
 541	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
 542	root_anon_vma = READ_ONCE(anon_vma->root);
 543	if (down_read_trylock(&root_anon_vma->rwsem)) {
 544		/*
 545		 * If the page is still mapped, then this anon_vma is still
 546		 * its anon_vma, and holding the mutex ensures that it will
 547		 * not go away, see anon_vma_free().
 548		 */
 549		if (!page_mapped(page)) {
 550			up_read(&root_anon_vma->rwsem);
 551			anon_vma = NULL;
 552		}
 553		goto out;
 554	}
 555
 
 
 
 
 
 
 556	/* trylock failed, we got to sleep */
 557	if (!atomic_inc_not_zero(&anon_vma->refcount)) {
 558		anon_vma = NULL;
 559		goto out;
 560	}
 561
 562	if (!page_mapped(page)) {
 563		rcu_read_unlock();
 564		put_anon_vma(anon_vma);
 565		return NULL;
 566	}
 567
 568	/* we pinned the anon_vma, its safe to sleep */
 569	rcu_read_unlock();
 570	anon_vma_lock_read(anon_vma);
 571
 572	if (atomic_dec_and_test(&anon_vma->refcount)) {
 573		/*
 574		 * Oops, we held the last refcount, release the lock
 575		 * and bail -- can't simply use put_anon_vma() because
 576		 * we'll deadlock on the anon_vma_lock_write() recursion.
 577		 */
 578		anon_vma_unlock_read(anon_vma);
 579		__put_anon_vma(anon_vma);
 580		anon_vma = NULL;
 581	}
 582
 583	return anon_vma;
 584
 585out:
 586	rcu_read_unlock();
 587	return anon_vma;
 588}
 589
 590void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 591{
 592	anon_vma_unlock_read(anon_vma);
 593}
 594
 595#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 596/*
 597 * Flush TLB entries for recently unmapped pages from remote CPUs. It is
 598 * important if a PTE was dirty when it was unmapped that it's flushed
 599 * before any IO is initiated on the page to prevent lost writes. Similarly,
 600 * it must be flushed before freeing to prevent data leakage.
 601 */
 602void try_to_unmap_flush(void)
 603{
 604	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 605
 606	if (!tlb_ubc->flush_required)
 607		return;
 608
 609	arch_tlbbatch_flush(&tlb_ubc->arch);
 610	tlb_ubc->flush_required = false;
 611	tlb_ubc->writable = false;
 612}
 613
 614/* Flush iff there are potentially writable TLB entries that can race with IO */
 615void try_to_unmap_flush_dirty(void)
 616{
 617	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 618
 619	if (tlb_ubc->writable)
 620		try_to_unmap_flush();
 621}
 622
 
 
 
 
 
 
 
 
 
 
 623static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 624{
 625	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 
 626
 627	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
 628	tlb_ubc->flush_required = true;
 629
 630	/*
 631	 * Ensure compiler does not re-order the setting of tlb_flush_batched
 632	 * before the PTE is cleared.
 633	 */
 634	barrier();
 635	mm->tlb_flush_batched = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636
 637	/*
 638	 * If the PTE was dirty then it's best to assume it's writable. The
 639	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
 640	 * before the page is queued for IO.
 641	 */
 642	if (writable)
 643		tlb_ubc->writable = true;
 644}
 645
 646/*
 647 * Returns true if the TLB flush should be deferred to the end of a batch of
 648 * unmap operations to reduce IPIs.
 649 */
 650static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 651{
 652	bool should_defer = false;
 653
 654	if (!(flags & TTU_BATCH_FLUSH))
 655		return false;
 656
 657	/* If remote CPUs need to be flushed then defer batch the flush */
 658	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
 659		should_defer = true;
 660	put_cpu();
 661
 662	return should_defer;
 663}
 664
 665/*
 666 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
 667 * releasing the PTL if TLB flushes are batched. It's possible for a parallel
 668 * operation such as mprotect or munmap to race between reclaim unmapping
 669 * the page and flushing the page. If this race occurs, it potentially allows
 670 * access to data via a stale TLB entry. Tracking all mm's that have TLB
 671 * batching in flight would be expensive during reclaim so instead track
 672 * whether TLB batching occurred in the past and if so then do a flush here
 673 * if required. This will cost one additional flush per reclaim cycle paid
 674 * by the first operation at risk such as mprotect and mumap.
 675 *
 676 * This must be called under the PTL so that an access to tlb_flush_batched
 677 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
 678 * via the PTL.
 679 */
 680void flush_tlb_batched_pending(struct mm_struct *mm)
 681{
 682	if (data_race(mm->tlb_flush_batched)) {
 
 
 
 
 683		flush_tlb_mm(mm);
 684
 685		/*
 686		 * Do not allow the compiler to re-order the clearing of
 687		 * tlb_flush_batched before the tlb is flushed.
 688		 */
 689		barrier();
 690		mm->tlb_flush_batched = false;
 691	}
 692}
 693#else
 694static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 695{
 696}
 697
 698static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 699{
 700	return false;
 701}
 702#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 703
 704/*
 705 * At what user virtual address is page expected in vma?
 706 * Caller should check the page is actually part of the vma.
 707 */
 708unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 709{
 710	if (PageAnon(page)) {
 711		struct anon_vma *page__anon_vma = page_anon_vma(page);
 
 712		/*
 713		 * Note: swapoff's unuse_vma() is more efficient with this
 714		 * check, and needs it to match anon_vma when KSM is active.
 715		 */
 716		if (!vma->anon_vma || !page__anon_vma ||
 717		    vma->anon_vma->root != page__anon_vma->root)
 718			return -EFAULT;
 719	} else if (!vma->vm_file) {
 720		return -EFAULT;
 721	} else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
 722		return -EFAULT;
 723	}
 724
 725	return vma_address(page, vma);
 726}
 727
 
 
 
 
 
 728pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
 729{
 730	pgd_t *pgd;
 731	p4d_t *p4d;
 732	pud_t *pud;
 733	pmd_t *pmd = NULL;
 734	pmd_t pmde;
 735
 736	pgd = pgd_offset(mm, address);
 737	if (!pgd_present(*pgd))
 738		goto out;
 739
 740	p4d = p4d_offset(pgd, address);
 741	if (!p4d_present(*p4d))
 742		goto out;
 743
 744	pud = pud_offset(p4d, address);
 745	if (!pud_present(*pud))
 746		goto out;
 747
 748	pmd = pmd_offset(pud, address);
 749	/*
 750	 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
 751	 * without holding anon_vma lock for write.  So when looking for a
 752	 * genuine pmde (in which to find pte), test present and !THP together.
 753	 */
 754	pmde = *pmd;
 755	barrier();
 756	if (!pmd_present(pmde) || pmd_trans_huge(pmde))
 757		pmd = NULL;
 758out:
 759	return pmd;
 760}
 761
 762struct page_referenced_arg {
 763	int mapcount;
 764	int referenced;
 765	unsigned long vm_flags;
 766	struct mem_cgroup *memcg;
 767};
 768/*
 769 * arg: page_referenced_arg will be passed
 770 */
 771static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
 772			unsigned long address, void *arg)
 773{
 774	struct page_referenced_arg *pra = arg;
 775	struct page_vma_mapped_walk pvmw = {
 776		.page = page,
 777		.vma = vma,
 778		.address = address,
 779	};
 780	int referenced = 0;
 781
 782	while (page_vma_mapped_walk(&pvmw)) {
 783		address = pvmw.address;
 784
 785		if (vma->vm_flags & VM_LOCKED) {
 
 
 
 786			page_vma_mapped_walk_done(&pvmw);
 787			pra->vm_flags |= VM_LOCKED;
 788			return false; /* To break the loop */
 789		}
 790
 791		if (pvmw.pte) {
 
 
 
 
 
 
 792			if (ptep_clear_flush_young_notify(vma, address,
 793						pvmw.pte)) {
 794				/*
 795				 * Don't treat a reference through
 796				 * a sequentially read mapping as such.
 797				 * If the page has been used in another mapping,
 798				 * we will catch it; if this other mapping is
 799				 * already gone, the unmap path will have set
 800				 * PG_referenced or activated the page.
 801				 */
 802				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
 803					referenced++;
 804			}
 805		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
 806			if (pmdp_clear_flush_young_notify(vma, address,
 807						pvmw.pmd))
 808				referenced++;
 809		} else {
 810			/* unexpected pmd-mapped page? */
 811			WARN_ON_ONCE(1);
 812		}
 813
 814		pra->mapcount--;
 815	}
 816
 817	if (referenced)
 818		clear_page_idle(page);
 819	if (test_and_clear_page_young(page))
 820		referenced++;
 821
 822	if (referenced) {
 823		pra->referenced++;
 824		pra->vm_flags |= vma->vm_flags;
 825	}
 826
 827	if (!pra->mapcount)
 828		return false; /* To break the loop */
 829
 830	return true;
 831}
 832
 833static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
 834{
 835	struct page_referenced_arg *pra = arg;
 836	struct mem_cgroup *memcg = pra->memcg;
 837
 838	if (!mm_match_cgroup(vma->vm_mm, memcg))
 839		return true;
 840
 841	return false;
 842}
 843
 844/**
 845 * page_referenced - test if the page was referenced
 846 * @page: the page to test
 847 * @is_locked: caller holds lock on the page
 848 * @memcg: target memory cgroup
 849 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 850 *
 851 * Quick test_and_clear_referenced for all mappings to a page,
 852 * returns the number of ptes which referenced the page.
 
 
 853 */
 854int page_referenced(struct page *page,
 855		    int is_locked,
 856		    struct mem_cgroup *memcg,
 857		    unsigned long *vm_flags)
 858{
 859	int we_locked = 0;
 860	struct page_referenced_arg pra = {
 861		.mapcount = total_mapcount(page),
 862		.memcg = memcg,
 863	};
 864	struct rmap_walk_control rwc = {
 865		.rmap_one = page_referenced_one,
 866		.arg = (void *)&pra,
 867		.anon_lock = page_lock_anon_vma_read,
 
 868	};
 869
 870	*vm_flags = 0;
 871	if (!pra.mapcount)
 872		return 0;
 873
 874	if (!page_rmapping(page))
 875		return 0;
 876
 877	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
 878		we_locked = trylock_page(page);
 879		if (!we_locked)
 880			return 1;
 881	}
 882
 883	/*
 884	 * If we are reclaiming on behalf of a cgroup, skip
 885	 * counting on behalf of references from different
 886	 * cgroups
 887	 */
 888	if (memcg) {
 889		rwc.invalid_vma = invalid_page_referenced_vma;
 890	}
 891
 892	rmap_walk(page, &rwc);
 893	*vm_flags = pra.vm_flags;
 894
 895	if (we_locked)
 896		unlock_page(page);
 897
 898	return pra.referenced;
 899}
 900
 901static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 902			    unsigned long address, void *arg)
 903{
 904	struct page_vma_mapped_walk pvmw = {
 905		.page = page,
 906		.vma = vma,
 907		.address = address,
 908		.flags = PVMW_SYNC,
 909	};
 910	struct mmu_notifier_range range;
 911	int *cleaned = arg;
 912
 913	/*
 914	 * We have to assume the worse case ie pmd for invalidation. Note that
 915	 * the page can not be free from this function.
 916	 */
 917	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 918				0, vma, vma->vm_mm, address,
 919				vma_address_end(page, vma));
 920	mmu_notifier_invalidate_range_start(&range);
 921
 922	while (page_vma_mapped_walk(&pvmw)) {
 923		int ret = 0;
 924
 925		address = pvmw.address;
 926		if (pvmw.pte) {
 927			pte_t entry;
 928			pte_t *pte = pvmw.pte;
 929
 930			if (!pte_dirty(*pte) && !pte_write(*pte))
 931				continue;
 932
 933			flush_cache_page(vma, address, pte_pfn(*pte));
 934			entry = ptep_clear_flush(vma, address, pte);
 935			entry = pte_wrprotect(entry);
 936			entry = pte_mkclean(entry);
 937			set_pte_at(vma->vm_mm, address, pte, entry);
 938			ret = 1;
 939		} else {
 940#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 941			pmd_t *pmd = pvmw.pmd;
 942			pmd_t entry;
 943
 944			if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
 945				continue;
 946
 947			flush_cache_page(vma, address, page_to_pfn(page));
 
 948			entry = pmdp_invalidate(vma, address, pmd);
 949			entry = pmd_wrprotect(entry);
 950			entry = pmd_mkclean(entry);
 951			set_pmd_at(vma->vm_mm, address, pmd, entry);
 952			ret = 1;
 953#else
 954			/* unexpected pmd-mapped page? */
 955			WARN_ON_ONCE(1);
 956#endif
 957		}
 958
 959		/*
 960		 * No need to call mmu_notifier_invalidate_range() as we are
 961		 * downgrading page table protection not changing it to point
 962		 * to a new page.
 963		 *
 964		 * See Documentation/vm/mmu_notifier.rst
 965		 */
 966		if (ret)
 967			(*cleaned)++;
 968	}
 969
 970	mmu_notifier_invalidate_range_end(&range);
 971
 
 
 
 
 
 
 
 
 
 
 
 972	return true;
 973}
 974
 975static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
 976{
 977	if (vma->vm_flags & VM_SHARED)
 978		return false;
 979
 980	return true;
 981}
 982
 983int page_mkclean(struct page *page)
 984{
 985	int cleaned = 0;
 986	struct address_space *mapping;
 987	struct rmap_walk_control rwc = {
 988		.arg = (void *)&cleaned,
 989		.rmap_one = page_mkclean_one,
 990		.invalid_vma = invalid_mkclean_vma,
 991	};
 992
 993	BUG_ON(!PageLocked(page));
 994
 995	if (!page_mapped(page))
 996		return 0;
 997
 998	mapping = page_mapping(page);
 999	if (!mapping)
1000		return 0;
1001
1002	rmap_walk(page, &rwc);
1003
1004	return cleaned;
1005}
1006EXPORT_SYMBOL_GPL(page_mkclean);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007
1008/**
1009 * page_move_anon_rmap - move a page to our anon_vma
1010 * @page:	the page to move to our anon_vma
1011 * @vma:	the vma the page belongs to
1012 *
1013 * When a page belongs exclusively to one process after a COW event,
1014 * that page can be moved into the anon_vma that belongs to just that
1015 * process, so the rmap code will not search the parent or sibling
1016 * processes.
1017 */
1018void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1019{
1020	struct anon_vma *anon_vma = vma->anon_vma;
1021
1022	page = compound_head(page);
1023
1024	VM_BUG_ON_PAGE(!PageLocked(page), page);
1025	VM_BUG_ON_VMA(!anon_vma, vma);
1026
1027	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1028	/*
1029	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1030	 * simultaneously, so a concurrent reader (eg page_referenced()'s
1031	 * PageAnon()) will not see one without the other.
1032	 */
1033	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
 
1034}
1035
1036/**
1037 * __page_set_anon_rmap - set up new anonymous rmap
1038 * @page:	Page or Hugepage to add to rmap
1039 * @vma:	VM area to add page to.
1040 * @address:	User virtual address of the mapping	
1041 * @exclusive:	the page is exclusively owned by the current process
1042 */
1043static void __page_set_anon_rmap(struct page *page,
1044	struct vm_area_struct *vma, unsigned long address, int exclusive)
1045{
1046	struct anon_vma *anon_vma = vma->anon_vma;
1047
1048	BUG_ON(!anon_vma);
1049
1050	if (PageAnon(page))
1051		return;
1052
1053	/*
1054	 * If the page isn't exclusively mapped into this vma,
1055	 * we must use the _oldest_ possible anon_vma for the
1056	 * page mapping!
1057	 */
1058	if (!exclusive)
1059		anon_vma = anon_vma->root;
1060
1061	/*
1062	 * page_idle does a lockless/optimistic rmap scan on page->mapping.
1063	 * Make sure the compiler doesn't split the stores of anon_vma and
1064	 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code
1065	 * could mistake the mapping for a struct address_space and crash.
1066	 */
1067	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1068	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1069	page->index = linear_page_index(vma, address);
 
 
 
1070}
1071
1072/**
1073 * __page_check_anon_rmap - sanity check anonymous rmap addition
1074 * @page:	the page to add the mapping to
1075 * @vma:	the vm area in which the mapping is added
1076 * @address:	the user virtual address mapped
1077 */
1078static void __page_check_anon_rmap(struct page *page,
1079	struct vm_area_struct *vma, unsigned long address)
1080{
 
1081	/*
1082	 * The page's anon-rmap details (mapping and index) are guaranteed to
1083	 * be set up correctly at this point.
1084	 *
1085	 * We have exclusion against page_add_anon_rmap because the caller
1086	 * always holds the page locked.
1087	 *
1088	 * We have exclusion against page_add_new_anon_rmap because those pages
1089	 * are initially only visible via the pagetables, and the pte is locked
1090	 * over the call to page_add_new_anon_rmap.
1091	 */
1092	VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
 
1093	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1094		       page);
1095}
1096
1097/**
1098 * page_add_anon_rmap - add pte mapping to an anonymous page
1099 * @page:	the page to add the mapping to
1100 * @vma:	the vm area in which the mapping is added
1101 * @address:	the user virtual address mapped
1102 * @compound:	charge the page as compound or small page
1103 *
1104 * The caller needs to hold the pte lock, and the page must be locked in
1105 * the anon_vma case: to serialize mapping,index checking after setting,
1106 * and to ensure that PageAnon is not being upgraded racily to PageKsm
1107 * (but PageKsm is never downgraded to PageAnon).
1108 */
1109void page_add_anon_rmap(struct page *page,
1110	struct vm_area_struct *vma, unsigned long address, bool compound)
1111{
1112	do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1113}
1114
1115/*
1116 * Special version of the above for do_swap_page, which often runs
1117 * into pages that are exclusively owned by the current process.
1118 * Everybody else should continue to use page_add_anon_rmap above.
1119 */
1120void do_page_add_anon_rmap(struct page *page,
1121	struct vm_area_struct *vma, unsigned long address, int flags)
1122{
 
 
1123	bool compound = flags & RMAP_COMPOUND;
1124	bool first;
1125
1126	if (unlikely(PageKsm(page)))
1127		lock_page_memcg(page);
1128	else
1129		VM_BUG_ON_PAGE(!PageLocked(page), page);
1130
1131	if (compound) {
1132		atomic_t *mapcount;
1133		VM_BUG_ON_PAGE(!PageLocked(page), page);
1134		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1135		mapcount = compound_mapcount_ptr(page);
1136		first = atomic_inc_and_test(mapcount);
1137	} else {
1138		first = atomic_inc_and_test(&page->_mapcount);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139	}
1140
1141	if (first) {
1142		int nr = compound ? thp_nr_pages(page) : 1;
1143		/*
1144		 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1145		 * these counters are not modified in interrupt context, and
1146		 * pte lock(a spinlock) is held, which implies preemption
1147		 * disabled.
1148		 */
1149		if (compound)
1150			__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1151		__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1152	}
1153
1154	if (unlikely(PageKsm(page))) {
1155		unlock_page_memcg(page);
1156		return;
1157	}
1158
1159	/* address might be in next vma when migration races vma_adjust */
1160	if (first)
1161		__page_set_anon_rmap(page, vma, address,
1162				flags & RMAP_EXCLUSIVE);
1163	else
1164		__page_check_anon_rmap(page, vma, address);
 
 
1165}
1166
1167/**
1168 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1169 * @page:	the page to add the mapping to
1170 * @vma:	the vm area in which the mapping is added
1171 * @address:	the user virtual address mapped
1172 * @compound:	charge the page as compound or small page
 
 
1173 *
1174 * Same as page_add_anon_rmap but must only be called on *new* pages.
1175 * This means the inc-and-test can be bypassed.
1176 * Page does not have to be locked.
1177 */
1178void page_add_new_anon_rmap(struct page *page,
1179	struct vm_area_struct *vma, unsigned long address, bool compound)
1180{
1181	int nr = compound ? thp_nr_pages(page) : 1;
1182
1183	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1184	__SetPageSwapBacked(page);
1185	if (compound) {
 
 
 
 
 
1186		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1187		/* increment count (starts at -1) */
1188		atomic_set(compound_mapcount_ptr(page), 0);
1189		if (hpage_pincount_available(page))
1190			atomic_set(compound_pincount_ptr(page), 0);
1191
1192		__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1193	} else {
1194		/* Anon THP always mapped first with PMD */
1195		VM_BUG_ON_PAGE(PageTransCompound(page), page);
1196		/* increment count (starts at -1) */
1197		atomic_set(&page->_mapcount, 0);
1198	}
 
1199	__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1200	__page_set_anon_rmap(page, vma, address, 1);
1201}
1202
1203/**
1204 * page_add_file_rmap - add pte mapping to a file page
1205 * @page: the page to add the mapping to
1206 * @compound: charge the page as compound or small page
 
1207 *
1208 * The caller needs to hold the pte lock.
1209 */
1210void page_add_file_rmap(struct page *page, bool compound)
 
1211{
1212	int i, nr = 1;
 
 
1213
1214	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1215	lock_page_memcg(page);
1216	if (compound && PageTransHuge(page)) {
1217		int nr_pages = thp_nr_pages(page);
1218
1219		for (i = 0, nr = 0; i < nr_pages; i++) {
1220			if (atomic_inc_and_test(&page[i]._mapcount))
1221				nr++;
1222		}
1223		if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1224			goto out;
1225		if (PageSwapBacked(page))
1226			__mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
1227						nr_pages);
1228		else
1229			__mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
1230						nr_pages);
1231	} else {
1232		if (PageTransCompound(page) && page_mapping(page)) {
1233			VM_WARN_ON_ONCE(!PageLocked(page));
1234
1235			SetPageDoubleMap(compound_head(page));
1236			if (PageMlocked(page))
1237				clear_page_mlock(compound_head(page));
 
 
 
 
 
 
 
1238		}
1239		if (!atomic_inc_and_test(&page->_mapcount))
1240			goto out;
1241	}
1242	__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1243out:
 
 
 
 
1244	unlock_page_memcg(page);
 
 
1245}
1246
1247static void page_remove_file_rmap(struct page *page, bool compound)
 
 
 
 
 
 
 
 
 
1248{
1249	int i, nr = 1;
 
 
1250
1251	VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1252
1253	/* Hugepages are not counted in NR_FILE_MAPPED for now. */
1254	if (unlikely(PageHuge(page))) {
1255		/* hugetlb pages are always mapped with pmds */
1256		atomic_dec(compound_mapcount_ptr(page));
1257		return;
1258	}
1259
1260	/* page still mapped by someone else? */
1261	if (compound && PageTransHuge(page)) {
1262		int nr_pages = thp_nr_pages(page);
1263
1264		for (i = 0, nr = 0; i < nr_pages; i++) {
1265			if (atomic_add_negative(-1, &page[i]._mapcount))
1266				nr++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267		}
1268		if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1269			return;
1270		if (PageSwapBacked(page))
1271			__mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
1272						-nr_pages);
1273		else
1274			__mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
1275						-nr_pages);
1276	} else {
1277		if (!atomic_add_negative(-1, &page->_mapcount))
1278			return;
1279	}
1280
1281	/*
1282	 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
1283	 * these counters are not modified in interrupt context, and
1284	 * pte lock(a spinlock) is held, which implies preemption disabled.
1285	 */
1286	__mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1287
1288	if (unlikely(PageMlocked(page)))
1289		clear_page_mlock(page);
1290}
1291
1292static void page_remove_anon_compound_rmap(struct page *page)
1293{
1294	int i, nr;
1295
1296	if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1297		return;
1298
1299	/* Hugepages are not counted in NR_ANON_PAGES for now. */
1300	if (unlikely(PageHuge(page)))
1301		return;
1302
1303	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1304		return;
1305
1306	__mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page));
1307
1308	if (TestClearPageDoubleMap(page)) {
1309		/*
1310		 * Subpages can be mapped with PTEs too. Check how many of
1311		 * them are still mapped.
1312		 */
1313		for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1314			if (atomic_add_negative(-1, &page[i]._mapcount))
1315				nr++;
1316		}
1317
1318		/*
1319		 * Queue the page for deferred split if at least one small
1320		 * page of the compound page is unmapped, but at least one
1321		 * small page is still mapped.
1322		 */
1323		if (nr && nr < thp_nr_pages(page))
1324			deferred_split_huge_page(page);
1325	} else {
1326		nr = thp_nr_pages(page);
1327	}
1328
1329	if (unlikely(PageMlocked(page)))
1330		clear_page_mlock(page);
1331
1332	if (nr)
1333		__mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
1334}
1335
1336/**
1337 * page_remove_rmap - take down pte mapping from a page
1338 * @page:	page to remove mapping from
1339 * @compound:	uncharge the page as compound or small page
1340 *
1341 * The caller needs to hold the pte lock.
1342 */
1343void page_remove_rmap(struct page *page, bool compound)
1344{
1345	lock_page_memcg(page);
1346
1347	if (!PageAnon(page)) {
1348		page_remove_file_rmap(page, compound);
1349		goto out;
1350	}
1351
1352	if (compound) {
1353		page_remove_anon_compound_rmap(page);
1354		goto out;
1355	}
1356
1357	/* page still mapped by someone else? */
1358	if (!atomic_add_negative(-1, &page->_mapcount))
1359		goto out;
1360
1361	/*
1362	 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1363	 * these counters are not modified in interrupt context, and
1364	 * pte lock(a spinlock) is held, which implies preemption disabled.
1365	 */
1366	__dec_lruvec_page_state(page, NR_ANON_MAPPED);
1367
1368	if (unlikely(PageMlocked(page)))
1369		clear_page_mlock(page);
1370
1371	if (PageTransCompound(page))
1372		deferred_split_huge_page(compound_head(page));
1373
1374	/*
1375	 * It would be tidy to reset the PageAnon mapping here,
1376	 * but that might overwrite a racing page_add_anon_rmap
1377	 * which increments mapcount after us but sets mapping
1378	 * before us: so leave the reset to free_unref_page,
1379	 * and remember that it's only reliable while mapped.
1380	 * Leaving it set also helps swapoff to reinstate ptes
1381	 * faster for those pages still in swapcache.
1382	 */
1383out:
1384	unlock_page_memcg(page);
 
 
1385}
1386
1387/*
1388 * @arg: enum ttu_flags will be passed to this argument
1389 */
1390static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1391		     unsigned long address, void *arg)
1392{
1393	struct mm_struct *mm = vma->vm_mm;
1394	struct page_vma_mapped_walk pvmw = {
1395		.page = page,
1396		.vma = vma,
1397		.address = address,
1398	};
1399	pte_t pteval;
1400	struct page *subpage;
1401	bool ret = true;
1402	struct mmu_notifier_range range;
1403	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1404
1405	/*
1406	 * When racing against e.g. zap_pte_range() on another cpu,
1407	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1408	 * try_to_unmap() may return before page_mapped() has become false,
1409	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1410	 */
1411	if (flags & TTU_SYNC)
1412		pvmw.flags = PVMW_SYNC;
1413
1414	if (flags & TTU_SPLIT_HUGE_PMD)
1415		split_huge_pmd_address(vma, address, false, page);
1416
1417	/*
1418	 * For THP, we have to assume the worse case ie pmd for invalidation.
1419	 * For hugetlb, it could be much worse if we need to do pud
1420	 * invalidation in the case of pmd sharing.
1421	 *
1422	 * Note that the page can not be free in this function as call of
1423	 * try_to_unmap() must hold a reference on the page.
1424	 */
1425	range.end = PageKsm(page) ?
1426			address + PAGE_SIZE : vma_address_end(page, vma);
1427	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1428				address, range.end);
1429	if (PageHuge(page)) {
1430		/*
1431		 * If sharing is possible, start and end will be adjusted
1432		 * accordingly.
1433		 */
1434		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1435						     &range.end);
1436	}
1437	mmu_notifier_invalidate_range_start(&range);
1438
1439	while (page_vma_mapped_walk(&pvmw)) {
 
 
 
1440		/*
1441		 * If the page is mlock()d, we cannot swap it out.
1442		 */
1443		if (!(flags & TTU_IGNORE_MLOCK) &&
1444		    (vma->vm_flags & VM_LOCKED)) {
1445			/*
1446			 * PTE-mapped THP are never marked as mlocked: so do
1447			 * not set it on a DoubleMap THP, nor on an Anon THP
1448			 * (which may still be PTE-mapped after DoubleMap was
1449			 * cleared).  But stop unmapping even in those cases.
1450			 */
1451			if (!PageTransCompound(page) || (PageHead(page) &&
1452			     !PageDoubleMap(page) && !PageAnon(page)))
1453				mlock_vma_page(page);
1454			page_vma_mapped_walk_done(&pvmw);
1455			ret = false;
1456			break;
1457		}
1458
1459		/* Unexpected PMD-mapped THP? */
1460		VM_BUG_ON_PAGE(!pvmw.pte, page);
 
 
 
 
 
 
1461
1462		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1463		address = pvmw.address;
 
 
 
 
 
 
 
 
 
 
 
1464
1465		if (PageHuge(page) && !PageAnon(page)) {
1466			/*
1467			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1468			 * held in write mode.  Caller needs to explicitly
1469			 * do this outside rmap routines.
 
 
 
 
 
1470			 */
1471			VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1472			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1473				/*
1474				 * huge_pmd_unshare unmapped an entire PMD
1475				 * page.  There is no way of knowing exactly
1476				 * which PMDs may be cached for this mm, so
1477				 * we must flush them all.  start/end were
1478				 * already adjusted above to cover this range.
 
1479				 */
1480				flush_cache_range(vma, range.start, range.end);
1481				flush_tlb_range(vma, range.start, range.end);
1482				mmu_notifier_invalidate_range(mm, range.start,
1483							      range.end);
1484
1485				/*
1486				 * The ref count of the PMD page was dropped
1487				 * which is part of the way map counting
1488				 * is done for shared PMDs.  Return 'true'
1489				 * here.  When there is no other sharing,
1490				 * huge_pmd_unshare returns false and we will
1491				 * unmap the actual page and drop map count
1492				 * to zero.
1493				 */
1494				page_vma_mapped_walk_done(&pvmw);
1495				break;
1496			}
1497		}
1498
1499		/* Nuke the page table entry. */
1500		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1501		if (should_defer_flush(mm, flags)) {
1502			/*
1503			 * We clear the PTE but do not flush so potentially
1504			 * a remote CPU could still be writing to the page.
1505			 * If the entry was previously clean then the
1506			 * architecture must guarantee that a clear->dirty
1507			 * transition on a cached TLB entry is written through
1508			 * and traps if the PTE is unmapped.
1509			 */
1510			pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1511
1512			set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1513		} else {
1514			pteval = ptep_clear_flush(vma, address, pvmw.pte);
1515		}
1516
1517		/* Move the dirty bit to the page. Now the pte is gone. */
1518		if (pte_dirty(pteval))
1519			set_page_dirty(page);
1520
1521		/* Update high watermark before we lower rss */
1522		update_hiwater_rss(mm);
1523
1524		if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1525			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1526			if (PageHuge(page)) {
1527				hugetlb_count_sub(compound_nr(page), mm);
1528				set_huge_swap_pte_at(mm, address,
1529						     pvmw.pte, pteval,
1530						     vma_mmu_pagesize(vma));
1531			} else {
1532				dec_mm_counter(mm, mm_counter(page));
1533				set_pte_at(mm, address, pvmw.pte, pteval);
1534			}
1535
1536		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1537			/*
1538			 * The guest indicated that the page content is of no
1539			 * interest anymore. Simply discard the pte, vmscan
1540			 * will take care of the rest.
1541			 * A future reference will then fault in a new zero
1542			 * page. When userfaultfd is active, we must not drop
1543			 * this page though, as its main user (postcopy
1544			 * migration) will not expect userfaults on already
1545			 * copied pages.
1546			 */
1547			dec_mm_counter(mm, mm_counter(page));
1548			/* We have to invalidate as we cleared the pte */
1549			mmu_notifier_invalidate_range(mm, address,
1550						      address + PAGE_SIZE);
1551		} else if (PageAnon(page)) {
1552			swp_entry_t entry = { .val = page_private(subpage) };
1553			pte_t swp_pte;
1554			/*
1555			 * Store the swap location in the pte.
1556			 * See handle_pte_fault() ...
1557			 */
1558			if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
 
1559				WARN_ON_ONCE(1);
1560				ret = false;
1561				/* We have to invalidate as we cleared the pte */
1562				mmu_notifier_invalidate_range(mm, address,
1563							address + PAGE_SIZE);
1564				page_vma_mapped_walk_done(&pvmw);
1565				break;
1566			}
1567
1568			/* MADV_FREE page check */
1569			if (!PageSwapBacked(page)) {
1570				if (!PageDirty(page)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1571					/* Invalidate as we cleared the pte */
1572					mmu_notifier_invalidate_range(mm,
1573						address, address + PAGE_SIZE);
1574					dec_mm_counter(mm, MM_ANONPAGES);
1575					goto discard;
1576				}
1577
1578				/*
1579				 * If the page was redirtied, it cannot be
1580				 * discarded. Remap the page to page table.
1581				 */
1582				set_pte_at(mm, address, pvmw.pte, pteval);
1583				SetPageSwapBacked(page);
1584				ret = false;
1585				page_vma_mapped_walk_done(&pvmw);
1586				break;
1587			}
1588
1589			if (swap_duplicate(entry) < 0) {
1590				set_pte_at(mm, address, pvmw.pte, pteval);
1591				ret = false;
1592				page_vma_mapped_walk_done(&pvmw);
1593				break;
1594			}
1595			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
 
 
 
 
 
 
 
 
 
 
 
1596				set_pte_at(mm, address, pvmw.pte, pteval);
1597				ret = false;
1598				page_vma_mapped_walk_done(&pvmw);
1599				break;
1600			}
 
 
 
 
 
 
 
 
 
 
 
1601			if (list_empty(&mm->mmlist)) {
1602				spin_lock(&mmlist_lock);
1603				if (list_empty(&mm->mmlist))
1604					list_add(&mm->mmlist, &init_mm.mmlist);
1605				spin_unlock(&mmlist_lock);
1606			}
1607			dec_mm_counter(mm, MM_ANONPAGES);
1608			inc_mm_counter(mm, MM_SWAPENTS);
1609			swp_pte = swp_entry_to_pte(entry);
 
 
1610			if (pte_soft_dirty(pteval))
1611				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1612			if (pte_uffd_wp(pteval))
1613				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1614			set_pte_at(mm, address, pvmw.pte, swp_pte);
1615			/* Invalidate as we cleared the pte */
1616			mmu_notifier_invalidate_range(mm, address,
1617						      address + PAGE_SIZE);
1618		} else {
1619			/*
1620			 * This is a locked file-backed page, thus it cannot
1621			 * be removed from the page cache and replaced by a new
1622			 * page before mmu_notifier_invalidate_range_end, so no
1623			 * concurrent thread might update its page table to
1624			 * point at new page while a device still is using this
1625			 * page.
 
1626			 *
1627			 * See Documentation/vm/mmu_notifier.rst
1628			 */
1629			dec_mm_counter(mm, mm_counter_file(page));
1630		}
1631discard:
1632		/*
1633		 * No need to call mmu_notifier_invalidate_range() it has be
1634		 * done above for all cases requiring it to happen under page
1635		 * table lock before mmu_notifier_invalidate_range_end()
1636		 *
1637		 * See Documentation/vm/mmu_notifier.rst
1638		 */
1639		page_remove_rmap(subpage, PageHuge(page));
1640		put_page(page);
 
 
1641	}
1642
1643	mmu_notifier_invalidate_range_end(&range);
1644
1645	return ret;
1646}
1647
1648static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1649{
1650	return vma_is_temporary_stack(vma);
1651}
1652
1653static int page_not_mapped(struct page *page)
1654{
1655	return !page_mapped(page);
1656}
1657
1658/**
1659 * try_to_unmap - try to remove all page table mappings to a page
1660 * @page: the page to get unmapped
1661 * @flags: action and flags
1662 *
1663 * Tries to remove all the page table entries which are mapping this
1664 * page, used in the pageout path.  Caller must hold the page lock.
 
1665 *
1666 * It is the caller's responsibility to check if the page is still
1667 * mapped when needed (use TTU_SYNC to prevent accounting races).
1668 */
1669void try_to_unmap(struct page *page, enum ttu_flags flags)
1670{
1671	struct rmap_walk_control rwc = {
1672		.rmap_one = try_to_unmap_one,
1673		.arg = (void *)flags,
1674		.done = page_not_mapped,
1675		.anon_lock = page_lock_anon_vma_read,
1676	};
1677
1678	if (flags & TTU_RMAP_LOCKED)
1679		rmap_walk_locked(page, &rwc);
1680	else
1681		rmap_walk(page, &rwc);
1682}
1683
1684/*
1685 * @arg: enum ttu_flags will be passed to this argument.
1686 *
1687 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
1688 * containing migration entries.
1689 */
1690static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
1691		     unsigned long address, void *arg)
1692{
1693	struct mm_struct *mm = vma->vm_mm;
1694	struct page_vma_mapped_walk pvmw = {
1695		.page = page,
1696		.vma = vma,
1697		.address = address,
1698	};
1699	pte_t pteval;
1700	struct page *subpage;
1701	bool ret = true;
1702	struct mmu_notifier_range range;
1703	enum ttu_flags flags = (enum ttu_flags)(long)arg;
1704
1705	/*
1706	 * When racing against e.g. zap_pte_range() on another cpu,
1707	 * in between its ptep_get_and_clear_full() and page_remove_rmap(),
1708	 * try_to_migrate() may return before page_mapped() has become false,
1709	 * if page table locking is skipped: use TTU_SYNC to wait for that.
1710	 */
1711	if (flags & TTU_SYNC)
1712		pvmw.flags = PVMW_SYNC;
1713
1714	/*
1715	 * unmap_page() in mm/huge_memory.c is the only user of migration with
1716	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
1717	 */
1718	if (flags & TTU_SPLIT_HUGE_PMD)
1719		split_huge_pmd_address(vma, address, true, page);
1720
1721	/*
1722	 * For THP, we have to assume the worse case ie pmd for invalidation.
1723	 * For hugetlb, it could be much worse if we need to do pud
1724	 * invalidation in the case of pmd sharing.
1725	 *
1726	 * Note that the page can not be free in this function as call of
1727	 * try_to_unmap() must hold a reference on the page.
1728	 */
1729	range.end = PageKsm(page) ?
1730			address + PAGE_SIZE : vma_address_end(page, vma);
1731	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1732				address, range.end);
1733	if (PageHuge(page)) {
1734		/*
1735		 * If sharing is possible, start and end will be adjusted
1736		 * accordingly.
1737		 */
1738		adjust_range_if_pmd_sharing_possible(vma, &range.start,
1739						     &range.end);
1740	}
1741	mmu_notifier_invalidate_range_start(&range);
1742
1743	while (page_vma_mapped_walk(&pvmw)) {
1744#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1745		/* PMD-mapped THP migration entry */
1746		if (!pvmw.pte) {
1747			VM_BUG_ON_PAGE(PageHuge(page) ||
1748				       !PageTransCompound(page), page);
 
 
1749
1750			set_pmd_migration_entry(&pvmw, page);
 
 
 
 
1751			continue;
1752		}
1753#endif
1754
1755		/* Unexpected PMD-mapped THP? */
1756		VM_BUG_ON_PAGE(!pvmw.pte, page);
1757
1758		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759		address = pvmw.address;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1760
1761		if (PageHuge(page) && !PageAnon(page)) {
1762			/*
1763			 * To call huge_pmd_unshare, i_mmap_rwsem must be
1764			 * held in write mode.  Caller needs to explicitly
1765			 * do this outside rmap routines.
 
 
 
 
 
1766			 */
1767			VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1768			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
1769				/*
1770				 * huge_pmd_unshare unmapped an entire PMD
1771				 * page.  There is no way of knowing exactly
1772				 * which PMDs may be cached for this mm, so
1773				 * we must flush them all.  start/end were
1774				 * already adjusted above to cover this range.
1775				 */
1776				flush_cache_range(vma, range.start, range.end);
1777				flush_tlb_range(vma, range.start, range.end);
1778				mmu_notifier_invalidate_range(mm, range.start,
1779							      range.end);
1780
1781				/*
1782				 * The ref count of the PMD page was dropped
1783				 * which is part of the way map counting
1784				 * is done for shared PMDs.  Return 'true'
1785				 * here.  When there is no other sharing,
1786				 * huge_pmd_unshare returns false and we will
1787				 * unmap the actual page and drop map count
1788				 * to zero.
1789				 */
1790				page_vma_mapped_walk_done(&pvmw);
1791				break;
 
 
 
1792			}
 
 
 
 
 
 
1793		}
1794
1795		/* Nuke the page table entry. */
1796		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1797		pteval = ptep_clear_flush(vma, address, pvmw.pte);
1798
1799		/* Move the dirty bit to the page. Now the pte is gone. */
1800		if (pte_dirty(pteval))
1801			set_page_dirty(page);
1802
1803		/* Update high watermark before we lower rss */
1804		update_hiwater_rss(mm);
1805
1806		if (is_zone_device_page(page)) {
 
1807			swp_entry_t entry;
1808			pte_t swp_pte;
1809
 
 
 
1810			/*
1811			 * Store the pfn of the page in a special migration
1812			 * pte. do_swap_page() will wait until the migration
1813			 * pte is removed and then restart fault handling.
1814			 */
1815			entry = make_readable_migration_entry(
1816							page_to_pfn(page));
 
 
 
 
 
1817			swp_pte = swp_entry_to_pte(entry);
1818
1819			/*
1820			 * pteval maps a zone device page and is therefore
1821			 * a swap pte.
1822			 */
1823			if (pte_swp_soft_dirty(pteval))
1824				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1825			if (pte_swp_uffd_wp(pteval))
1826				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1827			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
 
 
1828			/*
1829			 * No need to invalidate here it will synchronize on
1830			 * against the special swap migration pte.
1831			 *
1832			 * The assignment to subpage above was computed from a
1833			 * swap PTE which results in an invalid pointer.
1834			 * Since only PAGE_SIZE pages can currently be
1835			 * migrated, just set it to page. This will need to be
1836			 * changed when hugepage migrations to device private
1837			 * memory are supported.
1838			 */
1839			subpage = page;
1840		} else if (PageHWPoison(page)) {
1841			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1842			if (PageHuge(page)) {
1843				hugetlb_count_sub(compound_nr(page), mm);
1844				set_huge_swap_pte_at(mm, address,
1845						     pvmw.pte, pteval,
1846						     vma_mmu_pagesize(vma));
1847			} else {
1848				dec_mm_counter(mm, mm_counter(page));
1849				set_pte_at(mm, address, pvmw.pte, pteval);
1850			}
1851
1852		} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1853			/*
1854			 * The guest indicated that the page content is of no
1855			 * interest anymore. Simply discard the pte, vmscan
1856			 * will take care of the rest.
1857			 * A future reference will then fault in a new zero
1858			 * page. When userfaultfd is active, we must not drop
1859			 * this page though, as its main user (postcopy
1860			 * migration) will not expect userfaults on already
1861			 * copied pages.
1862			 */
1863			dec_mm_counter(mm, mm_counter(page));
1864			/* We have to invalidate as we cleared the pte */
1865			mmu_notifier_invalidate_range(mm, address,
1866						      address + PAGE_SIZE);
1867		} else {
1868			swp_entry_t entry;
1869			pte_t swp_pte;
1870
1871			if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1872				set_pte_at(mm, address, pvmw.pte, pteval);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1873				ret = false;
1874				page_vma_mapped_walk_done(&pvmw);
1875				break;
1876			}
1877
1878			/*
1879			 * Store the pfn of the page in a special migration
1880			 * pte. do_swap_page() will wait until the migration
1881			 * pte is removed and then restart fault handling.
1882			 */
1883			if (pte_write(pteval))
1884				entry = make_writable_migration_entry(
1885							page_to_pfn(subpage));
 
 
 
1886			else
1887				entry = make_readable_migration_entry(
1888							page_to_pfn(subpage));
1889
 
 
 
1890			swp_pte = swp_entry_to_pte(entry);
1891			if (pte_soft_dirty(pteval))
1892				swp_pte = pte_swp_mksoft_dirty(swp_pte);
1893			if (pte_uffd_wp(pteval))
1894				swp_pte = pte_swp_mkuffd_wp(swp_pte);
1895			set_pte_at(mm, address, pvmw.pte, swp_pte);
 
 
 
 
 
1896			/*
1897			 * No need to invalidate here it will synchronize on
1898			 * against the special swap migration pte.
1899			 */
1900		}
1901
1902		/*
1903		 * No need to call mmu_notifier_invalidate_range() it has be
1904		 * done above for all cases requiring it to happen under page
1905		 * table lock before mmu_notifier_invalidate_range_end()
1906		 *
1907		 * See Documentation/vm/mmu_notifier.rst
1908		 */
1909		page_remove_rmap(subpage, PageHuge(page));
1910		put_page(page);
 
 
1911	}
1912
1913	mmu_notifier_invalidate_range_end(&range);
1914
1915	return ret;
1916}
1917
1918/**
1919 * try_to_migrate - try to replace all page table mappings with swap entries
1920 * @page: the page to replace page table entries for
1921 * @flags: action and flags
1922 *
1923 * Tries to remove all the page table entries which are mapping this page and
1924 * replace them with special swap entries. Caller must hold the page lock.
1925 */
1926void try_to_migrate(struct page *page, enum ttu_flags flags)
1927{
1928	struct rmap_walk_control rwc = {
1929		.rmap_one = try_to_migrate_one,
1930		.arg = (void *)flags,
1931		.done = page_not_mapped,
1932		.anon_lock = page_lock_anon_vma_read,
1933	};
1934
1935	/*
1936	 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and
1937	 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags.
1938	 */
1939	if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
1940					TTU_SYNC)))
1941		return;
1942
1943	if (is_zone_device_page(page) && !is_device_private_page(page))
 
1944		return;
1945
1946	/*
1947	 * During exec, a temporary VMA is setup and later moved.
1948	 * The VMA is moved under the anon_vma lock but not the
1949	 * page tables leading to a race where migration cannot
1950	 * find the migration ptes. Rather than increasing the
1951	 * locking requirements of exec(), migration skips
1952	 * temporary VMAs until after exec() completes.
1953	 */
1954	if (!PageKsm(page) && PageAnon(page))
1955		rwc.invalid_vma = invalid_migration_vma;
1956
1957	if (flags & TTU_RMAP_LOCKED)
1958		rmap_walk_locked(page, &rwc);
1959	else
1960		rmap_walk(page, &rwc);
1961}
1962
1963/*
1964 * Walks the vma's mapping a page and mlocks the page if any locked vma's are
1965 * found. Once one is found the page is locked and the scan can be terminated.
1966 */
1967static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
1968				 unsigned long address, void *unused)
1969{
1970	struct page_vma_mapped_walk pvmw = {
1971		.page = page,
1972		.vma = vma,
1973		.address = address,
1974	};
1975
1976	/* An un-locked vma doesn't have any pages to lock, continue the scan */
1977	if (!(vma->vm_flags & VM_LOCKED))
1978		return true;
1979
1980	while (page_vma_mapped_walk(&pvmw)) {
1981		/*
1982		 * Need to recheck under the ptl to serialise with
1983		 * __munlock_pagevec_fill() after VM_LOCKED is cleared in
1984		 * munlock_vma_pages_range().
1985		 */
1986		if (vma->vm_flags & VM_LOCKED) {
1987			/*
1988			 * PTE-mapped THP are never marked as mlocked; but
1989			 * this function is never called on a DoubleMap THP,
1990			 * nor on an Anon THP (which may still be PTE-mapped
1991			 * after DoubleMap was cleared).
1992			 */
1993			mlock_vma_page(page);
1994			/*
1995			 * No need to scan further once the page is marked
1996			 * as mlocked.
1997			 */
1998			page_vma_mapped_walk_done(&pvmw);
1999			return false;
2000		}
2001	}
2002
2003	return true;
2004}
2005
2006/**
2007 * page_mlock - try to mlock a page
2008 * @page: the page to be mlocked
2009 *
2010 * Called from munlock code. Checks all of the VMAs mapping the page and mlocks
2011 * the page if any are found. The page will be returned with PG_mlocked cleared
2012 * if it is not mapped by any locked vmas.
2013 */
2014void page_mlock(struct page *page)
2015{
2016	struct rmap_walk_control rwc = {
2017		.rmap_one = page_mlock_one,
2018		.done = page_not_mapped,
2019		.anon_lock = page_lock_anon_vma_read,
2020
2021	};
2022
2023	VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
2024	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
2025
2026	/* Anon THP are only marked as mlocked when singly mapped */
2027	if (PageTransCompound(page) && PageAnon(page))
2028		return;
2029
2030	rmap_walk(page, &rwc);
2031}
2032
2033#ifdef CONFIG_DEVICE_PRIVATE
2034struct make_exclusive_args {
2035	struct mm_struct *mm;
2036	unsigned long address;
2037	void *owner;
2038	bool valid;
2039};
2040
2041static bool page_make_device_exclusive_one(struct page *page,
2042		struct vm_area_struct *vma, unsigned long address, void *priv)
2043{
2044	struct mm_struct *mm = vma->vm_mm;
2045	struct page_vma_mapped_walk pvmw = {
2046		.page = page,
2047		.vma = vma,
2048		.address = address,
2049	};
2050	struct make_exclusive_args *args = priv;
2051	pte_t pteval;
2052	struct page *subpage;
2053	bool ret = true;
2054	struct mmu_notifier_range range;
2055	swp_entry_t entry;
2056	pte_t swp_pte;
2057
2058	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
2059				      vma->vm_mm, address, min(vma->vm_end,
2060				      address + page_size(page)), args->owner);
 
2061	mmu_notifier_invalidate_range_start(&range);
2062
2063	while (page_vma_mapped_walk(&pvmw)) {
2064		/* Unexpected PMD-mapped THP? */
2065		VM_BUG_ON_PAGE(!pvmw.pte, page);
2066
2067		if (!pte_present(*pvmw.pte)) {
2068			ret = false;
2069			page_vma_mapped_walk_done(&pvmw);
2070			break;
2071		}
2072
2073		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
 
2074		address = pvmw.address;
2075
2076		/* Nuke the page table entry. */
2077		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
2078		pteval = ptep_clear_flush(vma, address, pvmw.pte);
2079
2080		/* Move the dirty bit to the page. Now the pte is gone. */
2081		if (pte_dirty(pteval))
2082			set_page_dirty(page);
2083
2084		/*
2085		 * Check that our target page is still mapped at the expected
2086		 * address.
2087		 */
2088		if (args->mm == mm && args->address == address &&
2089		    pte_write(pteval))
2090			args->valid = true;
2091
2092		/*
2093		 * Store the pfn of the page in a special migration
2094		 * pte. do_swap_page() will wait until the migration
2095		 * pte is removed and then restart fault handling.
2096		 */
2097		if (pte_write(pteval))
2098			entry = make_writable_device_exclusive_entry(
2099							page_to_pfn(subpage));
2100		else
2101			entry = make_readable_device_exclusive_entry(
2102							page_to_pfn(subpage));
2103		swp_pte = swp_entry_to_pte(entry);
2104		if (pte_soft_dirty(pteval))
2105			swp_pte = pte_swp_mksoft_dirty(swp_pte);
2106		if (pte_uffd_wp(pteval))
2107			swp_pte = pte_swp_mkuffd_wp(swp_pte);
2108
2109		set_pte_at(mm, address, pvmw.pte, swp_pte);
2110
2111		/*
2112		 * There is a reference on the page for the swap entry which has
2113		 * been removed, so shouldn't take another.
2114		 */
2115		page_remove_rmap(subpage, false);
2116	}
2117
2118	mmu_notifier_invalidate_range_end(&range);
2119
2120	return ret;
2121}
2122
2123/**
2124 * page_make_device_exclusive - mark the page exclusively owned by a device
2125 * @page: the page to replace page table entries for
2126 * @mm: the mm_struct where the page is expected to be mapped
2127 * @address: address where the page is expected to be mapped
2128 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
2129 *
2130 * Tries to remove all the page table entries which are mapping this page and
2131 * replace them with special device exclusive swap entries to grant a device
2132 * exclusive access to the page. Caller must hold the page lock.
2133 *
2134 * Returns false if the page is still mapped, or if it could not be unmapped
 
2135 * from the expected address. Otherwise returns true (success).
2136 */
2137static bool page_make_device_exclusive(struct page *page, struct mm_struct *mm,
2138				unsigned long address, void *owner)
2139{
2140	struct make_exclusive_args args = {
2141		.mm = mm,
2142		.address = address,
2143		.owner = owner,
2144		.valid = false,
2145	};
2146	struct rmap_walk_control rwc = {
2147		.rmap_one = page_make_device_exclusive_one,
2148		.done = page_not_mapped,
2149		.anon_lock = page_lock_anon_vma_read,
2150		.arg = &args,
2151	};
2152
2153	/*
2154	 * Restrict to anonymous pages for now to avoid potential writeback
2155	 * issues. Also tail pages shouldn't be passed to rmap_walk so skip
2156	 * those.
2157	 */
2158	if (!PageAnon(page) || PageTail(page))
2159		return false;
2160
2161	rmap_walk(page, &rwc);
2162
2163	return args.valid && !page_mapcount(page);
2164}
2165
2166/**
2167 * make_device_exclusive_range() - Mark a range for exclusive use by a device
2168 * @mm: mm_struct of assoicated target process
2169 * @start: start of the region to mark for exclusive device access
2170 * @end: end address of region
2171 * @pages: returns the pages which were successfully marked for exclusive access
2172 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
2173 *
2174 * Returns: number of pages found in the range by GUP. A page is marked for
2175 * exclusive access only if the page pointer is non-NULL.
2176 *
2177 * This function finds ptes mapping page(s) to the given address range, locks
2178 * them and replaces mappings with special swap entries preventing userspace CPU
2179 * access. On fault these entries are replaced with the original mapping after
2180 * calling MMU notifiers.
2181 *
2182 * A driver using this to program access from a device must use a mmu notifier
2183 * critical section to hold a device specific lock during programming. Once
2184 * programming is complete it should drop the page lock and reference after
2185 * which point CPU access to the page will revoke the exclusive access.
2186 */
2187int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
2188				unsigned long end, struct page **pages,
2189				void *owner)
2190{
2191	long npages = (end - start) >> PAGE_SHIFT;
2192	long i;
2193
2194	npages = get_user_pages_remote(mm, start, npages,
2195				       FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
2196				       pages, NULL, NULL);
2197	if (npages < 0)
2198		return npages;
2199
2200	for (i = 0; i < npages; i++, start += PAGE_SIZE) {
2201		if (!trylock_page(pages[i])) {
2202			put_page(pages[i]);
 
2203			pages[i] = NULL;
2204			continue;
2205		}
2206
2207		if (!page_make_device_exclusive(pages[i], mm, start, owner)) {
2208			unlock_page(pages[i]);
2209			put_page(pages[i]);
2210			pages[i] = NULL;
2211		}
2212	}
2213
2214	return npages;
2215}
2216EXPORT_SYMBOL_GPL(make_device_exclusive_range);
2217#endif
2218
2219void __put_anon_vma(struct anon_vma *anon_vma)
2220{
2221	struct anon_vma *root = anon_vma->root;
2222
2223	anon_vma_free(anon_vma);
2224	if (root != anon_vma && atomic_dec_and_test(&root->refcount))
2225		anon_vma_free(root);
2226}
2227
2228static struct anon_vma *rmap_walk_anon_lock(struct page *page,
2229					struct rmap_walk_control *rwc)
2230{
2231	struct anon_vma *anon_vma;
2232
2233	if (rwc->anon_lock)
2234		return rwc->anon_lock(page);
2235
2236	/*
2237	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
2238	 * because that depends on page_mapped(); but not all its usages
2239	 * are holding mmap_lock. Users without mmap_lock are required to
2240	 * take a reference count to prevent the anon_vma disappearing
2241	 */
2242	anon_vma = page_anon_vma(page);
2243	if (!anon_vma)
2244		return NULL;
2245
 
 
 
 
 
 
 
 
 
2246	anon_vma_lock_read(anon_vma);
 
2247	return anon_vma;
2248}
2249
2250/*
2251 * rmap_walk_anon - do something to anonymous page using the object-based
2252 * rmap method
2253 * @page: the page to be handled
2254 * @rwc: control variable according to each walk type
2255 *
2256 * Find all the mappings of a page using the mapping pointer and the vma chains
2257 * contained in the anon_vma struct it points to.
2258 *
2259 * When called from page_mlock(), the mmap_lock of the mm containing the vma
2260 * where the page was found will be held for write.  So, we won't recheck
2261 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
2262 * LOCKED.
2263 */
2264static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
2265		bool locked)
2266{
2267	struct anon_vma *anon_vma;
2268	pgoff_t pgoff_start, pgoff_end;
2269	struct anon_vma_chain *avc;
2270
2271	if (locked) {
2272		anon_vma = page_anon_vma(page);
2273		/* anon_vma disappear under us? */
2274		VM_BUG_ON_PAGE(!anon_vma, page);
2275	} else {
2276		anon_vma = rmap_walk_anon_lock(page, rwc);
2277	}
2278	if (!anon_vma)
2279		return;
2280
2281	pgoff_start = page_to_pgoff(page);
2282	pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
2283	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
2284			pgoff_start, pgoff_end) {
2285		struct vm_area_struct *vma = avc->vma;
2286		unsigned long address = vma_address(page, vma);
2287
2288		VM_BUG_ON_VMA(address == -EFAULT, vma);
2289		cond_resched();
2290
2291		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2292			continue;
2293
2294		if (!rwc->rmap_one(page, vma, address, rwc->arg))
2295			break;
2296		if (rwc->done && rwc->done(page))
2297			break;
2298	}
2299
2300	if (!locked)
2301		anon_vma_unlock_read(anon_vma);
2302}
2303
2304/*
2305 * rmap_walk_file - do something to file page using the object-based rmap method
2306 * @page: the page to be handled
2307 * @rwc: control variable according to each walk type
2308 *
2309 * Find all the mappings of a page using the mapping pointer and the vma chains
2310 * contained in the address_space struct it points to.
2311 *
2312 * When called from page_mlock(), the mmap_lock of the mm containing the vma
2313 * where the page was found will be held for write.  So, we won't recheck
2314 * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
2315 * LOCKED.
2316 */
2317static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
2318		bool locked)
2319{
2320	struct address_space *mapping = page_mapping(page);
2321	pgoff_t pgoff_start, pgoff_end;
2322	struct vm_area_struct *vma;
2323
2324	/*
2325	 * The page lock not only makes sure that page->mapping cannot
2326	 * suddenly be NULLified by truncation, it makes sure that the
2327	 * structure at mapping cannot be freed and reused yet,
2328	 * so we can safely take mapping->i_mmap_rwsem.
2329	 */
2330	VM_BUG_ON_PAGE(!PageLocked(page), page);
2331
2332	if (!mapping)
2333		return;
2334
2335	pgoff_start = page_to_pgoff(page);
2336	pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
2337	if (!locked)
 
 
 
 
 
 
 
 
2338		i_mmap_lock_read(mapping);
 
 
2339	vma_interval_tree_foreach(vma, &mapping->i_mmap,
2340			pgoff_start, pgoff_end) {
2341		unsigned long address = vma_address(page, vma);
2342
2343		VM_BUG_ON_VMA(address == -EFAULT, vma);
2344		cond_resched();
2345
2346		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2347			continue;
2348
2349		if (!rwc->rmap_one(page, vma, address, rwc->arg))
2350			goto done;
2351		if (rwc->done && rwc->done(page))
2352			goto done;
2353	}
2354
2355done:
2356	if (!locked)
2357		i_mmap_unlock_read(mapping);
2358}
2359
2360void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
2361{
2362	if (unlikely(PageKsm(page)))
2363		rmap_walk_ksm(page, rwc);
2364	else if (PageAnon(page))
2365		rmap_walk_anon(page, rwc, false);
2366	else
2367		rmap_walk_file(page, rwc, false);
2368}
2369
2370/* Like rmap_walk, but caller holds relevant rmap lock */
2371void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
2372{
2373	/* no ksm support for now */
2374	VM_BUG_ON_PAGE(PageKsm(page), page);
2375	if (PageAnon(page))
2376		rmap_walk_anon(page, rwc, true);
2377	else
2378		rmap_walk_file(page, rwc, true);
2379}
2380
2381#ifdef CONFIG_HUGETLB_PAGE
2382/*
2383 * The following two functions are for anonymous (private mapped) hugepages.
2384 * Unlike common anonymous pages, anonymous hugepages have no accounting code
2385 * and no lru code, because we handle hugepages differently from common pages.
 
 
2386 */
2387void hugepage_add_anon_rmap(struct page *page,
2388			    struct vm_area_struct *vma, unsigned long address)
2389{
2390	struct anon_vma *anon_vma = vma->anon_vma;
2391	int first;
2392
2393	BUG_ON(!PageLocked(page));
2394	BUG_ON(!anon_vma);
2395	/* address might be in next vma when migration races vma_adjust */
2396	first = atomic_inc_and_test(compound_mapcount_ptr(page));
 
 
2397	if (first)
2398		__page_set_anon_rmap(page, vma, address, 0);
 
2399}
2400
2401void hugepage_add_new_anon_rmap(struct page *page,
2402			struct vm_area_struct *vma, unsigned long address)
2403{
2404	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 
2405	atomic_set(compound_mapcount_ptr(page), 0);
2406	if (hpage_pincount_available(page))
2407		atomic_set(compound_pincount_ptr(page), 0);
2408
2409	__page_set_anon_rmap(page, vma, address, 1);
2410}
2411#endif /* CONFIG_HUGETLB_PAGE */