Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  mm/userfaultfd.c
   4 *
   5 *  Copyright (C) 2015  Red Hat, Inc.
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/sched/signal.h>
  10#include <linux/pagemap.h>
  11#include <linux/rmap.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/userfaultfd_k.h>
  15#include <linux/mmu_notifier.h>
  16#include <linux/hugetlb.h>
  17#include <linux/shmem_fs.h>
  18#include <asm/tlbflush.h>
  19#include <asm/tlb.h>
  20#include "internal.h"
  21
  22static __always_inline
  23bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
  24{
  25	/* Make sure that the dst range is fully within dst_vma. */
  26	if (dst_end > dst_vma->vm_end)
  27		return false;
  28
  29	/*
  30	 * Check the vma is registered in uffd, this is required to
  31	 * enforce the VM_MAYWRITE check done at uffd registration
  32	 * time.
  33	 */
  34	if (!dst_vma->vm_userfaultfd_ctx.ctx)
  35		return false;
  36
  37	return true;
  38}
  39
  40static __always_inline
  41struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
  42						 unsigned long addr)
  43{
  44	struct vm_area_struct *vma;
  45
  46	mmap_assert_locked(mm);
  47	vma = vma_lookup(mm, addr);
  48	if (!vma)
  49		vma = ERR_PTR(-ENOENT);
  50	else if (!(vma->vm_flags & VM_SHARED) &&
  51		 unlikely(anon_vma_prepare(vma)))
  52		vma = ERR_PTR(-ENOMEM);
  53
  54	return vma;
  55}
  56
  57#ifdef CONFIG_PER_VMA_LOCK
  58/*
  59 * uffd_lock_vma() - Lookup and lock vma corresponding to @address.
  60 * @mm: mm to search vma in.
  61 * @address: address that the vma should contain.
  62 *
  63 * Should be called without holding mmap_lock.
  64 *
  65 * Return: A locked vma containing @address, -ENOENT if no vma is found, or
  66 * -ENOMEM if anon_vma couldn't be allocated.
  67 */
  68static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm,
  69				       unsigned long address)
  70{
  71	struct vm_area_struct *vma;
  72
  73	vma = lock_vma_under_rcu(mm, address);
  74	if (vma) {
  75		/*
  76		 * We know we're going to need to use anon_vma, so check
  77		 * that early.
  78		 */
  79		if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma))
  80			vma_end_read(vma);
  81		else
  82			return vma;
  83	}
  84
  85	mmap_read_lock(mm);
  86	vma = find_vma_and_prepare_anon(mm, address);
  87	if (!IS_ERR(vma)) {
  88		/*
  89		 * We cannot use vma_start_read() as it may fail due to
  90		 * false locked (see comment in vma_start_read()). We
  91		 * can avoid that by directly locking vm_lock under
  92		 * mmap_lock, which guarantees that nobody can lock the
  93		 * vma for write (vma_start_write()) under us.
  94		 */
  95		down_read(&vma->vm_lock->lock);
  96	}
  97
  98	mmap_read_unlock(mm);
  99	return vma;
 100}
 101
 102static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
 103					      unsigned long dst_start,
 104					      unsigned long len)
 105{
 106	struct vm_area_struct *dst_vma;
 107
 108	dst_vma = uffd_lock_vma(dst_mm, dst_start);
 109	if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
 110		return dst_vma;
 111
 112	vma_end_read(dst_vma);
 113	return ERR_PTR(-ENOENT);
 114}
 115
 116static void uffd_mfill_unlock(struct vm_area_struct *vma)
 117{
 118	vma_end_read(vma);
 119}
 120
 121#else
 122
 123static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
 124					      unsigned long dst_start,
 125					      unsigned long len)
 126{
 127	struct vm_area_struct *dst_vma;
 128
 129	mmap_read_lock(dst_mm);
 130	dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
 131	if (IS_ERR(dst_vma))
 132		goto out_unlock;
 133
 134	if (validate_dst_vma(dst_vma, dst_start + len))
 135		return dst_vma;
 136
 137	dst_vma = ERR_PTR(-ENOENT);
 138out_unlock:
 139	mmap_read_unlock(dst_mm);
 140	return dst_vma;
 141}
 142
 143static void uffd_mfill_unlock(struct vm_area_struct *vma)
 144{
 145	mmap_read_unlock(vma->vm_mm);
 146}
 147#endif
 148
 149/* Check if dst_addr is outside of file's size. Must be called with ptl held. */
 150static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
 151				 unsigned long dst_addr)
 152{
 153	struct inode *inode;
 154	pgoff_t offset, max_off;
 155
 156	if (!dst_vma->vm_file)
 157		return false;
 158
 159	inode = dst_vma->vm_file->f_inode;
 160	offset = linear_page_index(dst_vma, dst_addr);
 161	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 162	return offset >= max_off;
 163}
 164
 165/*
 166 * Install PTEs, to map dst_addr (within dst_vma) to page.
 167 *
 168 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
 169 * and anon, and for both shared and private VMAs.
 170 */
 171int mfill_atomic_install_pte(pmd_t *dst_pmd,
 172			     struct vm_area_struct *dst_vma,
 173			     unsigned long dst_addr, struct page *page,
 174			     bool newly_allocated, uffd_flags_t flags)
 175{
 176	int ret;
 177	struct mm_struct *dst_mm = dst_vma->vm_mm;
 178	pte_t _dst_pte, *dst_pte;
 179	bool writable = dst_vma->vm_flags & VM_WRITE;
 180	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
 181	spinlock_t *ptl;
 182	struct folio *folio = page_folio(page);
 183	bool page_in_cache = folio_mapping(folio);
 184
 185	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 186	_dst_pte = pte_mkdirty(_dst_pte);
 187	if (page_in_cache && !vm_shared)
 188		writable = false;
 189	if (writable)
 190		_dst_pte = pte_mkwrite(_dst_pte, dst_vma);
 191	if (flags & MFILL_ATOMIC_WP)
 192		_dst_pte = pte_mkuffd_wp(_dst_pte);
 193
 194	ret = -EAGAIN;
 195	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 196	if (!dst_pte)
 197		goto out;
 198
 199	if (mfill_file_over_size(dst_vma, dst_addr)) {
 200		ret = -EFAULT;
 201		goto out_unlock;
 202	}
 203
 204	ret = -EEXIST;
 205	/*
 206	 * We allow to overwrite a pte marker: consider when both MISSING|WP
 207	 * registered, we firstly wr-protect a none pte which has no page cache
 208	 * page backing it, then access the page.
 209	 */
 210	if (!pte_none_mostly(ptep_get(dst_pte)))
 211		goto out_unlock;
 212
 213	if (page_in_cache) {
 214		/* Usually, cache pages are already added to LRU */
 215		if (newly_allocated)
 216			folio_add_lru(folio);
 217		folio_add_file_rmap_pte(folio, page, dst_vma);
 218	} else {
 219		folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE);
 220		folio_add_lru_vma(folio, dst_vma);
 221	}
 222
 223	/*
 224	 * Must happen after rmap, as mm_counter() checks mapping (via
 225	 * PageAnon()), which is set by __page_set_anon_rmap().
 226	 */
 227	inc_mm_counter(dst_mm, mm_counter(folio));
 228
 229	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 230
 231	/* No need to invalidate - it was non-present before */
 232	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 233	ret = 0;
 234out_unlock:
 235	pte_unmap_unlock(dst_pte, ptl);
 236out:
 237	return ret;
 238}
 239
 240static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
 241				 struct vm_area_struct *dst_vma,
 242				 unsigned long dst_addr,
 243				 unsigned long src_addr,
 244				 uffd_flags_t flags,
 245				 struct folio **foliop)
 246{
 247	void *kaddr;
 248	int ret;
 249	struct folio *folio;
 250
 251	if (!*foliop) {
 252		ret = -ENOMEM;
 253		folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
 254					dst_addr);
 255		if (!folio)
 256			goto out;
 257
 258		kaddr = kmap_local_folio(folio, 0);
 259		/*
 260		 * The read mmap_lock is held here.  Despite the
 261		 * mmap_lock being read recursive a deadlock is still
 262		 * possible if a writer has taken a lock.  For example:
 263		 *
 264		 * process A thread 1 takes read lock on own mmap_lock
 265		 * process A thread 2 calls mmap, blocks taking write lock
 266		 * process B thread 1 takes page fault, read lock on own mmap lock
 267		 * process B thread 2 calls mmap, blocks taking write lock
 268		 * process A thread 1 blocks taking read lock on process B
 269		 * process B thread 1 blocks taking read lock on process A
 270		 *
 271		 * Disable page faults to prevent potential deadlock
 272		 * and retry the copy outside the mmap_lock.
 273		 */
 274		pagefault_disable();
 275		ret = copy_from_user(kaddr, (const void __user *) src_addr,
 276				     PAGE_SIZE);
 277		pagefault_enable();
 278		kunmap_local(kaddr);
 279
 280		/* fallback to copy_from_user outside mmap_lock */
 281		if (unlikely(ret)) {
 282			ret = -ENOENT;
 283			*foliop = folio;
 284			/* don't free the page */
 285			goto out;
 286		}
 287
 288		flush_dcache_folio(folio);
 289	} else {
 290		folio = *foliop;
 291		*foliop = NULL;
 292	}
 293
 294	/*
 295	 * The memory barrier inside __folio_mark_uptodate makes sure that
 296	 * preceding stores to the page contents become visible before
 297	 * the set_pte_at() write.
 298	 */
 299	__folio_mark_uptodate(folio);
 300
 301	ret = -ENOMEM;
 302	if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
 303		goto out_release;
 304
 305	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
 306				       &folio->page, true, flags);
 307	if (ret)
 308		goto out_release;
 309out:
 310	return ret;
 311out_release:
 312	folio_put(folio);
 313	goto out;
 314}
 315
 316static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
 317					 struct vm_area_struct *dst_vma,
 318					 unsigned long dst_addr)
 319{
 320	struct folio *folio;
 321	int ret = -ENOMEM;
 322
 323	folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
 324	if (!folio)
 325		return ret;
 326
 327	if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
 328		goto out_put;
 329
 330	/*
 331	 * The memory barrier inside __folio_mark_uptodate makes sure that
 332	 * zeroing out the folio become visible before mapping the page
 333	 * using set_pte_at(). See do_anonymous_page().
 334	 */
 335	__folio_mark_uptodate(folio);
 336
 337	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
 338				       &folio->page, true, 0);
 339	if (ret)
 340		goto out_put;
 341
 342	return 0;
 343out_put:
 344	folio_put(folio);
 345	return ret;
 346}
 347
 348static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
 349				     struct vm_area_struct *dst_vma,
 350				     unsigned long dst_addr)
 351{
 352	pte_t _dst_pte, *dst_pte;
 353	spinlock_t *ptl;
 354	int ret;
 355
 356	if (mm_forbids_zeropage(dst_vma->vm_mm))
 357		return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
 358
 359	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
 360					 dst_vma->vm_page_prot));
 361	ret = -EAGAIN;
 362	dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
 363	if (!dst_pte)
 364		goto out;
 365	if (mfill_file_over_size(dst_vma, dst_addr)) {
 366		ret = -EFAULT;
 367		goto out_unlock;
 368	}
 369	ret = -EEXIST;
 370	if (!pte_none(ptep_get(dst_pte)))
 371		goto out_unlock;
 372	set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
 373	/* No need to invalidate - it was non-present before */
 374	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 375	ret = 0;
 376out_unlock:
 377	pte_unmap_unlock(dst_pte, ptl);
 378out:
 379	return ret;
 380}
 381
 382/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
 383static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
 384				     struct vm_area_struct *dst_vma,
 385				     unsigned long dst_addr,
 386				     uffd_flags_t flags)
 387{
 388	struct inode *inode = file_inode(dst_vma->vm_file);
 389	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
 390	struct folio *folio;
 391	struct page *page;
 392	int ret;
 393
 394	ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC);
 395	/* Our caller expects us to return -EFAULT if we failed to find folio */
 396	if (ret == -ENOENT)
 397		ret = -EFAULT;
 398	if (ret)
 399		goto out;
 400	if (!folio) {
 401		ret = -EFAULT;
 402		goto out;
 403	}
 404
 405	page = folio_file_page(folio, pgoff);
 406	if (PageHWPoison(page)) {
 407		ret = -EIO;
 408		goto out_release;
 409	}
 410
 411	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
 412				       page, false, flags);
 413	if (ret)
 414		goto out_release;
 415
 416	folio_unlock(folio);
 417	ret = 0;
 418out:
 419	return ret;
 420out_release:
 421	folio_unlock(folio);
 422	folio_put(folio);
 423	goto out;
 424}
 425
 426/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
 427static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
 428				   struct vm_area_struct *dst_vma,
 429				   unsigned long dst_addr,
 430				   uffd_flags_t flags)
 431{
 432	int ret;
 433	struct mm_struct *dst_mm = dst_vma->vm_mm;
 434	pte_t _dst_pte, *dst_pte;
 435	spinlock_t *ptl;
 436
 437	_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
 438	ret = -EAGAIN;
 439	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 440	if (!dst_pte)
 441		goto out;
 442
 443	if (mfill_file_over_size(dst_vma, dst_addr)) {
 444		ret = -EFAULT;
 445		goto out_unlock;
 446	}
 447
 448	ret = -EEXIST;
 449	/* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
 450	if (!pte_none(ptep_get(dst_pte)))
 451		goto out_unlock;
 452
 453	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 454
 455	/* No need to invalidate - it was non-present before */
 456	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 457	ret = 0;
 458out_unlock:
 459	pte_unmap_unlock(dst_pte, ptl);
 460out:
 461	return ret;
 462}
 463
 464static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
 465{
 466	pgd_t *pgd;
 467	p4d_t *p4d;
 468	pud_t *pud;
 469
 470	pgd = pgd_offset(mm, address);
 471	p4d = p4d_alloc(mm, pgd, address);
 472	if (!p4d)
 473		return NULL;
 474	pud = pud_alloc(mm, p4d, address);
 475	if (!pud)
 476		return NULL;
 477	/*
 478	 * Note that we didn't run this because the pmd was
 479	 * missing, the *pmd may be already established and in
 480	 * turn it may also be a trans_huge_pmd.
 481	 */
 482	return pmd_alloc(mm, pud, address);
 483}
 484
 485#ifdef CONFIG_HUGETLB_PAGE
 486/*
 487 * mfill_atomic processing for HUGETLB vmas.  Note that this routine is
 488 * called with either vma-lock or mmap_lock held, it will release the lock
 489 * before returning.
 490 */
 491static __always_inline ssize_t mfill_atomic_hugetlb(
 492					      struct userfaultfd_ctx *ctx,
 493					      struct vm_area_struct *dst_vma,
 494					      unsigned long dst_start,
 495					      unsigned long src_start,
 496					      unsigned long len,
 497					      uffd_flags_t flags)
 498{
 499	struct mm_struct *dst_mm = dst_vma->vm_mm;
 500	ssize_t err;
 501	pte_t *dst_pte;
 502	unsigned long src_addr, dst_addr;
 503	long copied;
 504	struct folio *folio;
 505	unsigned long vma_hpagesize;
 506	pgoff_t idx;
 507	u32 hash;
 508	struct address_space *mapping;
 509
 510	/*
 511	 * There is no default zero huge page for all huge page sizes as
 512	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
 513	 * by THP.  Since we can not reliably insert a zero page, this
 514	 * feature is not supported.
 515	 */
 516	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
 517		up_read(&ctx->map_changing_lock);
 518		uffd_mfill_unlock(dst_vma);
 519		return -EINVAL;
 520	}
 521
 522	src_addr = src_start;
 523	dst_addr = dst_start;
 524	copied = 0;
 525	folio = NULL;
 526	vma_hpagesize = vma_kernel_pagesize(dst_vma);
 527
 528	/*
 529	 * Validate alignment based on huge page size
 530	 */
 531	err = -EINVAL;
 532	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
 533		goto out_unlock;
 534
 535retry:
 536	/*
 537	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
 538	 * retry, dst_vma will be set to NULL and we must lookup again.
 539	 */
 540	if (!dst_vma) {
 541		dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
 542		if (IS_ERR(dst_vma)) {
 543			err = PTR_ERR(dst_vma);
 544			goto out;
 545		}
 546
 547		err = -ENOENT;
 548		if (!is_vm_hugetlb_page(dst_vma))
 549			goto out_unlock_vma;
 550
 551		err = -EINVAL;
 552		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
 553			goto out_unlock_vma;
 554
 555		/*
 556		 * If memory mappings are changing because of non-cooperative
 557		 * operation (e.g. mremap) running in parallel, bail out and
 558		 * request the user to retry later
 559		 */
 560		down_read(&ctx->map_changing_lock);
 561		err = -EAGAIN;
 562		if (atomic_read(&ctx->mmap_changing))
 563			goto out_unlock;
 564	}
 565
 566	while (src_addr < src_start + len) {
 567		BUG_ON(dst_addr >= dst_start + len);
 568
 569		/*
 570		 * Serialize via vma_lock and hugetlb_fault_mutex.
 571		 * vma_lock ensures the dst_pte remains valid even
 572		 * in the case of shared pmds.  fault mutex prevents
 573		 * races with other faulting threads.
 574		 */
 575		idx = linear_page_index(dst_vma, dst_addr);
 576		mapping = dst_vma->vm_file->f_mapping;
 577		hash = hugetlb_fault_mutex_hash(mapping, idx);
 578		mutex_lock(&hugetlb_fault_mutex_table[hash]);
 579		hugetlb_vma_lock_read(dst_vma);
 580
 581		err = -ENOMEM;
 582		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
 583		if (!dst_pte) {
 584			hugetlb_vma_unlock_read(dst_vma);
 585			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 586			goto out_unlock;
 587		}
 588
 589		if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
 590		    !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
 591			err = -EEXIST;
 592			hugetlb_vma_unlock_read(dst_vma);
 593			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 594			goto out_unlock;
 595		}
 596
 597		err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
 598					       src_addr, flags, &folio);
 599
 600		hugetlb_vma_unlock_read(dst_vma);
 601		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 602
 603		cond_resched();
 604
 605		if (unlikely(err == -ENOENT)) {
 606			up_read(&ctx->map_changing_lock);
 607			uffd_mfill_unlock(dst_vma);
 608			BUG_ON(!folio);
 609
 610			err = copy_folio_from_user(folio,
 611						   (const void __user *)src_addr, true);
 612			if (unlikely(err)) {
 613				err = -EFAULT;
 614				goto out;
 615			}
 616
 617			dst_vma = NULL;
 618			goto retry;
 619		} else
 620			BUG_ON(folio);
 621
 622		if (!err) {
 623			dst_addr += vma_hpagesize;
 624			src_addr += vma_hpagesize;
 625			copied += vma_hpagesize;
 626
 627			if (fatal_signal_pending(current))
 628				err = -EINTR;
 629		}
 630		if (err)
 631			break;
 632	}
 633
 634out_unlock:
 635	up_read(&ctx->map_changing_lock);
 636out_unlock_vma:
 637	uffd_mfill_unlock(dst_vma);
 638out:
 639	if (folio)
 640		folio_put(folio);
 641	BUG_ON(copied < 0);
 642	BUG_ON(err > 0);
 643	BUG_ON(!copied && !err);
 644	return copied ? copied : err;
 645}
 646#else /* !CONFIG_HUGETLB_PAGE */
 647/* fail at build time if gcc attempts to use this */
 648extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
 649				    struct vm_area_struct *dst_vma,
 650				    unsigned long dst_start,
 651				    unsigned long src_start,
 652				    unsigned long len,
 653				    uffd_flags_t flags);
 654#endif /* CONFIG_HUGETLB_PAGE */
 655
 656static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
 657						struct vm_area_struct *dst_vma,
 658						unsigned long dst_addr,
 659						unsigned long src_addr,
 660						uffd_flags_t flags,
 661						struct folio **foliop)
 662{
 663	ssize_t err;
 664
 665	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
 666		return mfill_atomic_pte_continue(dst_pmd, dst_vma,
 667						 dst_addr, flags);
 668	} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
 669		return mfill_atomic_pte_poison(dst_pmd, dst_vma,
 670					       dst_addr, flags);
 671	}
 672
 673	/*
 674	 * The normal page fault path for a shmem will invoke the
 675	 * fault, fill the hole in the file and COW it right away. The
 676	 * result generates plain anonymous memory. So when we are
 677	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
 678	 * generate anonymous memory directly without actually filling
 679	 * the hole. For the MAP_PRIVATE case the robustness check
 680	 * only happens in the pagetable (to verify it's still none)
 681	 * and not in the radix tree.
 682	 */
 683	if (!(dst_vma->vm_flags & VM_SHARED)) {
 684		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
 685			err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
 686						    dst_addr, src_addr,
 687						    flags, foliop);
 688		else
 689			err = mfill_atomic_pte_zeropage(dst_pmd,
 690						 dst_vma, dst_addr);
 691	} else {
 692		err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
 693					     dst_addr, src_addr,
 694					     flags, foliop);
 695	}
 696
 697	return err;
 698}
 699
 700static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
 701					    unsigned long dst_start,
 702					    unsigned long src_start,
 703					    unsigned long len,
 704					    uffd_flags_t flags)
 705{
 706	struct mm_struct *dst_mm = ctx->mm;
 707	struct vm_area_struct *dst_vma;
 708	ssize_t err;
 709	pmd_t *dst_pmd;
 710	unsigned long src_addr, dst_addr;
 711	long copied;
 712	struct folio *folio;
 713
 714	/*
 715	 * Sanitize the command parameters:
 716	 */
 717	BUG_ON(dst_start & ~PAGE_MASK);
 718	BUG_ON(len & ~PAGE_MASK);
 719
 720	/* Does the address range wrap, or is the span zero-sized? */
 721	BUG_ON(src_start + len <= src_start);
 722	BUG_ON(dst_start + len <= dst_start);
 723
 724	src_addr = src_start;
 725	dst_addr = dst_start;
 726	copied = 0;
 727	folio = NULL;
 728retry:
 729	/*
 730	 * Make sure the vma is not shared, that the dst range is
 731	 * both valid and fully within a single existing vma.
 732	 */
 733	dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
 734	if (IS_ERR(dst_vma)) {
 735		err = PTR_ERR(dst_vma);
 736		goto out;
 737	}
 738
 739	/*
 740	 * If memory mappings are changing because of non-cooperative
 741	 * operation (e.g. mremap) running in parallel, bail out and
 742	 * request the user to retry later
 743	 */
 744	down_read(&ctx->map_changing_lock);
 745	err = -EAGAIN;
 746	if (atomic_read(&ctx->mmap_changing))
 747		goto out_unlock;
 748
 749	err = -EINVAL;
 750	/*
 751	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
 752	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
 753	 */
 754	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
 755	    dst_vma->vm_flags & VM_SHARED))
 756		goto out_unlock;
 757
 758	/*
 759	 * validate 'mode' now that we know the dst_vma: don't allow
 760	 * a wrprotect copy if the userfaultfd didn't register as WP.
 761	 */
 762	if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
 763		goto out_unlock;
 764
 765	/*
 766	 * If this is a HUGETLB vma, pass off to appropriate routine
 767	 */
 768	if (is_vm_hugetlb_page(dst_vma))
 769		return  mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
 770					     src_start, len, flags);
 771
 772	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
 773		goto out_unlock;
 774	if (!vma_is_shmem(dst_vma) &&
 775	    uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
 776		goto out_unlock;
 777
 778	while (src_addr < src_start + len) {
 779		pmd_t dst_pmdval;
 780
 781		BUG_ON(dst_addr >= dst_start + len);
 782
 783		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
 784		if (unlikely(!dst_pmd)) {
 785			err = -ENOMEM;
 786			break;
 787		}
 788
 789		dst_pmdval = pmdp_get_lockless(dst_pmd);
 790		if (unlikely(pmd_none(dst_pmdval)) &&
 791		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
 792			err = -ENOMEM;
 793			break;
 794		}
 795		dst_pmdval = pmdp_get_lockless(dst_pmd);
 796		/*
 797		 * If the dst_pmd is THP don't override it and just be strict.
 798		 * (This includes the case where the PMD used to be THP and
 799		 * changed back to none after __pte_alloc().)
 800		 */
 801		if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) ||
 802			     pmd_devmap(dst_pmdval))) {
 803			err = -EEXIST;
 804			break;
 805		}
 806		if (unlikely(pmd_bad(dst_pmdval))) {
 807			err = -EFAULT;
 808			break;
 809		}
 810		/*
 811		 * For shmem mappings, khugepaged is allowed to remove page
 812		 * tables under us; pte_offset_map_lock() will deal with that.
 813		 */
 814
 815		err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
 816				       src_addr, flags, &folio);
 817		cond_resched();
 818
 819		if (unlikely(err == -ENOENT)) {
 820			void *kaddr;
 821
 822			up_read(&ctx->map_changing_lock);
 823			uffd_mfill_unlock(dst_vma);
 824			BUG_ON(!folio);
 825
 826			kaddr = kmap_local_folio(folio, 0);
 827			err = copy_from_user(kaddr,
 828					     (const void __user *) src_addr,
 829					     PAGE_SIZE);
 830			kunmap_local(kaddr);
 831			if (unlikely(err)) {
 832				err = -EFAULT;
 833				goto out;
 834			}
 835			flush_dcache_folio(folio);
 836			goto retry;
 837		} else
 838			BUG_ON(folio);
 839
 840		if (!err) {
 841			dst_addr += PAGE_SIZE;
 842			src_addr += PAGE_SIZE;
 843			copied += PAGE_SIZE;
 844
 845			if (fatal_signal_pending(current))
 846				err = -EINTR;
 847		}
 848		if (err)
 849			break;
 850	}
 851
 852out_unlock:
 853	up_read(&ctx->map_changing_lock);
 854	uffd_mfill_unlock(dst_vma);
 855out:
 856	if (folio)
 857		folio_put(folio);
 858	BUG_ON(copied < 0);
 859	BUG_ON(err > 0);
 860	BUG_ON(!copied && !err);
 861	return copied ? copied : err;
 862}
 863
 864ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
 865			  unsigned long src_start, unsigned long len,
 866			  uffd_flags_t flags)
 867{
 868	return mfill_atomic(ctx, dst_start, src_start, len,
 869			    uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
 870}
 871
 872ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
 873			      unsigned long start,
 874			      unsigned long len)
 875{
 876	return mfill_atomic(ctx, start, 0, len,
 877			    uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
 878}
 879
 880ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
 881			      unsigned long len, uffd_flags_t flags)
 882{
 883
 884	/*
 885	 * A caller might reasonably assume that UFFDIO_CONTINUE contains an
 886	 * smp_wmb() to ensure that any writes to the about-to-be-mapped page by
 887	 * the thread doing the UFFDIO_CONTINUE are guaranteed to be visible to
 888	 * subsequent loads from the page through the newly mapped address range.
 889	 */
 890	smp_wmb();
 891
 892	return mfill_atomic(ctx, start, 0, len,
 893			    uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
 894}
 895
 896ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
 897			    unsigned long len, uffd_flags_t flags)
 898{
 899	return mfill_atomic(ctx, start, 0, len,
 900			    uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
 901}
 902
 903long uffd_wp_range(struct vm_area_struct *dst_vma,
 904		   unsigned long start, unsigned long len, bool enable_wp)
 905{
 906	unsigned int mm_cp_flags;
 907	struct mmu_gather tlb;
 908	long ret;
 909
 910	VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
 911			"The address range exceeds VMA boundary.\n");
 912	if (enable_wp)
 913		mm_cp_flags = MM_CP_UFFD_WP;
 914	else
 915		mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
 916
 917	/*
 918	 * vma->vm_page_prot already reflects that uffd-wp is enabled for this
 919	 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
 920	 * to be write-protected as default whenever protection changes.
 921	 * Try upgrading write permissions manually.
 922	 */
 923	if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
 924		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
 925	tlb_gather_mmu(&tlb, dst_vma->vm_mm);
 926	ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
 927	tlb_finish_mmu(&tlb);
 928
 929	return ret;
 930}
 931
 932int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
 933			unsigned long len, bool enable_wp)
 934{
 935	struct mm_struct *dst_mm = ctx->mm;
 936	unsigned long end = start + len;
 937	unsigned long _start, _end;
 938	struct vm_area_struct *dst_vma;
 939	unsigned long page_mask;
 940	long err;
 941	VMA_ITERATOR(vmi, dst_mm, start);
 942
 943	/*
 944	 * Sanitize the command parameters:
 945	 */
 946	BUG_ON(start & ~PAGE_MASK);
 947	BUG_ON(len & ~PAGE_MASK);
 948
 949	/* Does the address range wrap, or is the span zero-sized? */
 950	BUG_ON(start + len <= start);
 951
 952	mmap_read_lock(dst_mm);
 953
 954	/*
 955	 * If memory mappings are changing because of non-cooperative
 956	 * operation (e.g. mremap) running in parallel, bail out and
 957	 * request the user to retry later
 958	 */
 959	down_read(&ctx->map_changing_lock);
 960	err = -EAGAIN;
 961	if (atomic_read(&ctx->mmap_changing))
 962		goto out_unlock;
 963
 964	err = -ENOENT;
 965	for_each_vma_range(vmi, dst_vma, end) {
 966
 967		if (!userfaultfd_wp(dst_vma)) {
 968			err = -ENOENT;
 969			break;
 970		}
 971
 972		if (is_vm_hugetlb_page(dst_vma)) {
 973			err = -EINVAL;
 974			page_mask = vma_kernel_pagesize(dst_vma) - 1;
 975			if ((start & page_mask) || (len & page_mask))
 976				break;
 977		}
 978
 979		_start = max(dst_vma->vm_start, start);
 980		_end = min(dst_vma->vm_end, end);
 981
 982		err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
 983
 984		/* Return 0 on success, <0 on failures */
 985		if (err < 0)
 986			break;
 987		err = 0;
 988	}
 989out_unlock:
 990	up_read(&ctx->map_changing_lock);
 991	mmap_read_unlock(dst_mm);
 992	return err;
 993}
 994
 995
 996void double_pt_lock(spinlock_t *ptl1,
 997		    spinlock_t *ptl2)
 998	__acquires(ptl1)
 999	__acquires(ptl2)
1000{
1001	if (ptl1 > ptl2)
1002		swap(ptl1, ptl2);
1003	/* lock in virtual address order to avoid lock inversion */
1004	spin_lock(ptl1);
1005	if (ptl1 != ptl2)
1006		spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
1007	else
1008		__acquire(ptl2);
1009}
1010
1011void double_pt_unlock(spinlock_t *ptl1,
1012		      spinlock_t *ptl2)
1013	__releases(ptl1)
1014	__releases(ptl2)
1015{
1016	spin_unlock(ptl1);
1017	if (ptl1 != ptl2)
1018		spin_unlock(ptl2);
1019	else
1020		__release(ptl2);
1021}
1022
1023
1024static int move_present_pte(struct mm_struct *mm,
1025			    struct vm_area_struct *dst_vma,
1026			    struct vm_area_struct *src_vma,
1027			    unsigned long dst_addr, unsigned long src_addr,
1028			    pte_t *dst_pte, pte_t *src_pte,
1029			    pte_t orig_dst_pte, pte_t orig_src_pte,
1030			    spinlock_t *dst_ptl, spinlock_t *src_ptl,
1031			    struct folio *src_folio)
1032{
1033	int err = 0;
1034
1035	double_pt_lock(dst_ptl, src_ptl);
1036
1037	if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
1038	    !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1039		err = -EAGAIN;
1040		goto out;
1041	}
1042	if (folio_test_large(src_folio) ||
1043	    folio_maybe_dma_pinned(src_folio) ||
1044	    !PageAnonExclusive(&src_folio->page)) {
1045		err = -EBUSY;
1046		goto out;
1047	}
1048
1049	orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
1050	/* Folio got pinned from under us. Put it back and fail the move. */
1051	if (folio_maybe_dma_pinned(src_folio)) {
1052		set_pte_at(mm, src_addr, src_pte, orig_src_pte);
1053		err = -EBUSY;
1054		goto out;
1055	}
1056
1057	folio_move_anon_rmap(src_folio, dst_vma);
1058	src_folio->index = linear_page_index(dst_vma, dst_addr);
1059
1060	orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
1061	/* Follow mremap() behavior and treat the entry dirty after the move */
1062	orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
1063
1064	set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
1065out:
1066	double_pt_unlock(dst_ptl, src_ptl);
1067	return err;
1068}
1069
1070static int move_swap_pte(struct mm_struct *mm,
1071			 unsigned long dst_addr, unsigned long src_addr,
1072			 pte_t *dst_pte, pte_t *src_pte,
1073			 pte_t orig_dst_pte, pte_t orig_src_pte,
1074			 spinlock_t *dst_ptl, spinlock_t *src_ptl)
1075{
1076	if (!pte_swp_exclusive(orig_src_pte))
1077		return -EBUSY;
1078
1079	double_pt_lock(dst_ptl, src_ptl);
1080
1081	if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
1082	    !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1083		double_pt_unlock(dst_ptl, src_ptl);
1084		return -EAGAIN;
1085	}
1086
1087	orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
1088	set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
1089	double_pt_unlock(dst_ptl, src_ptl);
1090
1091	return 0;
1092}
1093
1094static int move_zeropage_pte(struct mm_struct *mm,
1095			     struct vm_area_struct *dst_vma,
1096			     struct vm_area_struct *src_vma,
1097			     unsigned long dst_addr, unsigned long src_addr,
1098			     pte_t *dst_pte, pte_t *src_pte,
1099			     pte_t orig_dst_pte, pte_t orig_src_pte,
1100			     spinlock_t *dst_ptl, spinlock_t *src_ptl)
1101{
1102	pte_t zero_pte;
1103
1104	double_pt_lock(dst_ptl, src_ptl);
1105	if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
1106	    !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1107		double_pt_unlock(dst_ptl, src_ptl);
1108		return -EAGAIN;
1109	}
1110
1111	zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
1112					 dst_vma->vm_page_prot));
1113	ptep_clear_flush(src_vma, src_addr, src_pte);
1114	set_pte_at(mm, dst_addr, dst_pte, zero_pte);
1115	double_pt_unlock(dst_ptl, src_ptl);
1116
1117	return 0;
1118}
1119
1120
1121/*
1122 * The mmap_lock for reading is held by the caller. Just move the page
1123 * from src_pmd to dst_pmd if possible, and return true if succeeded
1124 * in moving the page.
1125 */
1126static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
1127			  struct vm_area_struct *dst_vma,
1128			  struct vm_area_struct *src_vma,
1129			  unsigned long dst_addr, unsigned long src_addr,
1130			  __u64 mode)
1131{
1132	swp_entry_t entry;
1133	pte_t orig_src_pte, orig_dst_pte;
1134	pte_t src_folio_pte;
1135	spinlock_t *src_ptl, *dst_ptl;
1136	pte_t *src_pte = NULL;
1137	pte_t *dst_pte = NULL;
1138	pmd_t dummy_pmdval;
1139	struct folio *src_folio = NULL;
1140	struct anon_vma *src_anon_vma = NULL;
1141	struct mmu_notifier_range range;
1142	int err = 0;
1143
1144	flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
1145	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1146				src_addr, src_addr + PAGE_SIZE);
1147	mmu_notifier_invalidate_range_start(&range);
1148retry:
1149	/*
1150	 * Use the maywrite version to indicate that dst_pte will be modified,
1151	 * but since we will use pte_same() to detect the change of the pte
1152	 * entry, there is no need to get pmdval, so just pass a dummy variable
1153	 * to it.
1154	 */
1155	dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dummy_pmdval,
1156					   &dst_ptl);
1157
1158	/* Retry if a huge pmd materialized from under us */
1159	if (unlikely(!dst_pte)) {
1160		err = -EAGAIN;
1161		goto out;
1162	}
1163
1164	/* same as dst_pte */
1165	src_pte = pte_offset_map_rw_nolock(mm, src_pmd, src_addr, &dummy_pmdval,
1166					   &src_ptl);
1167
1168	/*
1169	 * We held the mmap_lock for reading so MADV_DONTNEED
1170	 * can zap transparent huge pages under us, or the
1171	 * transparent huge page fault can establish new
1172	 * transparent huge pages under us.
1173	 */
1174	if (unlikely(!src_pte)) {
1175		err = -EAGAIN;
1176		goto out;
1177	}
1178
1179	/* Sanity checks before the operation */
1180	if (WARN_ON_ONCE(pmd_none(*dst_pmd)) ||	WARN_ON_ONCE(pmd_none(*src_pmd)) ||
1181	    WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
1182		err = -EINVAL;
1183		goto out;
1184	}
1185
1186	spin_lock(dst_ptl);
1187	orig_dst_pte = ptep_get(dst_pte);
1188	spin_unlock(dst_ptl);
1189	if (!pte_none(orig_dst_pte)) {
1190		err = -EEXIST;
1191		goto out;
1192	}
1193
1194	spin_lock(src_ptl);
1195	orig_src_pte = ptep_get(src_pte);
1196	spin_unlock(src_ptl);
1197	if (pte_none(orig_src_pte)) {
1198		if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
1199			err = -ENOENT;
1200		else /* nothing to do to move a hole */
1201			err = 0;
1202		goto out;
1203	}
1204
1205	/* If PTE changed after we locked the folio them start over */
1206	if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
1207		err = -EAGAIN;
1208		goto out;
1209	}
1210
1211	if (pte_present(orig_src_pte)) {
1212		if (is_zero_pfn(pte_pfn(orig_src_pte))) {
1213			err = move_zeropage_pte(mm, dst_vma, src_vma,
1214					       dst_addr, src_addr, dst_pte, src_pte,
1215					       orig_dst_pte, orig_src_pte,
1216					       dst_ptl, src_ptl);
1217			goto out;
1218		}
1219
1220		/*
1221		 * Pin and lock both source folio and anon_vma. Since we are in
1222		 * RCU read section, we can't block, so on contention have to
1223		 * unmap the ptes, obtain the lock and retry.
1224		 */
1225		if (!src_folio) {
1226			struct folio *folio;
1227			bool locked;
1228
1229			/*
1230			 * Pin the page while holding the lock to be sure the
1231			 * page isn't freed under us
1232			 */
1233			spin_lock(src_ptl);
1234			if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
1235				spin_unlock(src_ptl);
1236				err = -EAGAIN;
1237				goto out;
1238			}
1239
1240			folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
1241			if (!folio || !PageAnonExclusive(&folio->page)) {
1242				spin_unlock(src_ptl);
1243				err = -EBUSY;
1244				goto out;
1245			}
1246
1247			locked = folio_trylock(folio);
1248			/*
1249			 * We avoid waiting for folio lock with a raised
1250			 * refcount for large folios because extra refcounts
1251			 * will result in split_folio() failing later and
1252			 * retrying.  If multiple tasks are trying to move a
1253			 * large folio we can end up livelocking.
1254			 */
1255			if (!locked && folio_test_large(folio)) {
1256				spin_unlock(src_ptl);
1257				err = -EAGAIN;
1258				goto out;
1259			}
1260
1261			folio_get(folio);
1262			src_folio = folio;
1263			src_folio_pte = orig_src_pte;
1264			spin_unlock(src_ptl);
1265
1266			if (!locked) {
1267				pte_unmap(&orig_src_pte);
1268				pte_unmap(&orig_dst_pte);
1269				src_pte = dst_pte = NULL;
1270				/* now we can block and wait */
1271				folio_lock(src_folio);
1272				goto retry;
1273			}
1274
1275			if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
1276				err = -EBUSY;
1277				goto out;
1278			}
1279		}
1280
1281		/* at this point we have src_folio locked */
1282		if (folio_test_large(src_folio)) {
1283			/* split_folio() can block */
1284			pte_unmap(&orig_src_pte);
1285			pte_unmap(&orig_dst_pte);
1286			src_pte = dst_pte = NULL;
1287			err = split_folio(src_folio);
1288			if (err)
1289				goto out;
1290			/* have to reacquire the folio after it got split */
1291			folio_unlock(src_folio);
1292			folio_put(src_folio);
1293			src_folio = NULL;
1294			goto retry;
1295		}
1296
1297		if (!src_anon_vma) {
1298			/*
1299			 * folio_referenced walks the anon_vma chain
1300			 * without the folio lock. Serialize against it with
1301			 * the anon_vma lock, the folio lock is not enough.
1302			 */
1303			src_anon_vma = folio_get_anon_vma(src_folio);
1304			if (!src_anon_vma) {
1305				/* page was unmapped from under us */
1306				err = -EAGAIN;
1307				goto out;
1308			}
1309			if (!anon_vma_trylock_write(src_anon_vma)) {
1310				pte_unmap(&orig_src_pte);
1311				pte_unmap(&orig_dst_pte);
1312				src_pte = dst_pte = NULL;
1313				/* now we can block and wait */
1314				anon_vma_lock_write(src_anon_vma);
1315				goto retry;
1316			}
1317		}
1318
1319		err = move_present_pte(mm,  dst_vma, src_vma,
1320				       dst_addr, src_addr, dst_pte, src_pte,
1321				       orig_dst_pte, orig_src_pte,
1322				       dst_ptl, src_ptl, src_folio);
1323	} else {
1324		entry = pte_to_swp_entry(orig_src_pte);
1325		if (non_swap_entry(entry)) {
1326			if (is_migration_entry(entry)) {
1327				pte_unmap(&orig_src_pte);
1328				pte_unmap(&orig_dst_pte);
1329				src_pte = dst_pte = NULL;
1330				migration_entry_wait(mm, src_pmd, src_addr);
1331				err = -EAGAIN;
1332			} else
1333				err = -EFAULT;
1334			goto out;
1335		}
1336
1337		err = move_swap_pte(mm, dst_addr, src_addr,
1338				    dst_pte, src_pte,
1339				    orig_dst_pte, orig_src_pte,
1340				    dst_ptl, src_ptl);
1341	}
1342
1343out:
1344	if (src_anon_vma) {
1345		anon_vma_unlock_write(src_anon_vma);
1346		put_anon_vma(src_anon_vma);
1347	}
1348	if (src_folio) {
1349		folio_unlock(src_folio);
1350		folio_put(src_folio);
1351	}
1352	if (dst_pte)
1353		pte_unmap(dst_pte);
1354	if (src_pte)
1355		pte_unmap(src_pte);
1356	mmu_notifier_invalidate_range_end(&range);
1357
1358	return err;
1359}
1360
1361#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1362static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1363					unsigned long src_addr,
1364					unsigned long src_end)
1365{
1366	return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
1367		src_end - src_addr < HPAGE_PMD_SIZE;
1368}
1369#else
1370static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1371					unsigned long src_addr,
1372					unsigned long src_end)
1373{
1374	/* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
1375	return false;
1376}
1377#endif
1378
1379static inline bool vma_move_compatible(struct vm_area_struct *vma)
1380{
1381	return !(vma->vm_flags & (VM_PFNMAP | VM_IO |  VM_HUGETLB |
1382				  VM_MIXEDMAP | VM_SHADOW_STACK));
1383}
1384
1385static int validate_move_areas(struct userfaultfd_ctx *ctx,
1386			       struct vm_area_struct *src_vma,
1387			       struct vm_area_struct *dst_vma)
1388{
1389	/* Only allow moving if both have the same access and protection */
1390	if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
1391	    pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
1392		return -EINVAL;
1393
1394	/* Only allow moving if both are mlocked or both aren't */
1395	if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
1396		return -EINVAL;
1397
1398	/*
1399	 * For now, we keep it simple and only move between writable VMAs.
1400	 * Access flags are equal, therefore cheching only the source is enough.
1401	 */
1402	if (!(src_vma->vm_flags & VM_WRITE))
1403		return -EINVAL;
1404
1405	/* Check if vma flags indicate content which can be moved */
1406	if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
1407		return -EINVAL;
1408
1409	/* Ensure dst_vma is registered in uffd we are operating on */
1410	if (!dst_vma->vm_userfaultfd_ctx.ctx ||
1411	    dst_vma->vm_userfaultfd_ctx.ctx != ctx)
1412		return -EINVAL;
1413
1414	/* Only allow moving across anonymous vmas */
1415	if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
1416		return -EINVAL;
1417
1418	return 0;
1419}
1420
1421static __always_inline
1422int find_vmas_mm_locked(struct mm_struct *mm,
1423			unsigned long dst_start,
1424			unsigned long src_start,
1425			struct vm_area_struct **dst_vmap,
1426			struct vm_area_struct **src_vmap)
1427{
1428	struct vm_area_struct *vma;
1429
1430	mmap_assert_locked(mm);
1431	vma = find_vma_and_prepare_anon(mm, dst_start);
1432	if (IS_ERR(vma))
1433		return PTR_ERR(vma);
1434
1435	*dst_vmap = vma;
1436	/* Skip finding src_vma if src_start is in dst_vma */
1437	if (src_start >= vma->vm_start && src_start < vma->vm_end)
1438		goto out_success;
1439
1440	vma = vma_lookup(mm, src_start);
1441	if (!vma)
1442		return -ENOENT;
1443out_success:
1444	*src_vmap = vma;
1445	return 0;
1446}
1447
1448#ifdef CONFIG_PER_VMA_LOCK
1449static int uffd_move_lock(struct mm_struct *mm,
1450			  unsigned long dst_start,
1451			  unsigned long src_start,
1452			  struct vm_area_struct **dst_vmap,
1453			  struct vm_area_struct **src_vmap)
1454{
1455	struct vm_area_struct *vma;
1456	int err;
1457
1458	vma = uffd_lock_vma(mm, dst_start);
1459	if (IS_ERR(vma))
1460		return PTR_ERR(vma);
1461
1462	*dst_vmap = vma;
1463	/*
1464	 * Skip finding src_vma if src_start is in dst_vma. This also ensures
1465	 * that we don't lock the same vma twice.
1466	 */
1467	if (src_start >= vma->vm_start && src_start < vma->vm_end) {
1468		*src_vmap = vma;
1469		return 0;
1470	}
1471
1472	/*
1473	 * Using uffd_lock_vma() to get src_vma can lead to following deadlock:
1474	 *
1475	 * Thread1				Thread2
1476	 * -------				-------
1477	 * vma_start_read(dst_vma)
1478	 *					mmap_write_lock(mm)
1479	 *					vma_start_write(src_vma)
1480	 * vma_start_read(src_vma)
1481	 * mmap_read_lock(mm)
1482	 *					vma_start_write(dst_vma)
1483	 */
1484	*src_vmap = lock_vma_under_rcu(mm, src_start);
1485	if (likely(*src_vmap))
1486		return 0;
1487
1488	/* Undo any locking and retry in mmap_lock critical section */
1489	vma_end_read(*dst_vmap);
1490
1491	mmap_read_lock(mm);
1492	err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
1493	if (!err) {
1494		/*
1495		 * See comment in uffd_lock_vma() as to why not using
1496		 * vma_start_read() here.
1497		 */
1498		down_read(&(*dst_vmap)->vm_lock->lock);
1499		if (*dst_vmap != *src_vmap)
1500			down_read_nested(&(*src_vmap)->vm_lock->lock,
1501					 SINGLE_DEPTH_NESTING);
1502	}
1503	mmap_read_unlock(mm);
1504	return err;
1505}
1506
1507static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1508			     struct vm_area_struct *src_vma)
1509{
1510	vma_end_read(src_vma);
1511	if (src_vma != dst_vma)
1512		vma_end_read(dst_vma);
1513}
1514
1515#else
1516
1517static int uffd_move_lock(struct mm_struct *mm,
1518			  unsigned long dst_start,
1519			  unsigned long src_start,
1520			  struct vm_area_struct **dst_vmap,
1521			  struct vm_area_struct **src_vmap)
1522{
1523	int err;
1524
1525	mmap_read_lock(mm);
1526	err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
1527	if (err)
1528		mmap_read_unlock(mm);
1529	return err;
1530}
1531
1532static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1533			     struct vm_area_struct *src_vma)
1534{
1535	mmap_assert_locked(src_vma->vm_mm);
1536	mmap_read_unlock(dst_vma->vm_mm);
1537}
1538#endif
1539
1540/**
1541 * move_pages - move arbitrary anonymous pages of an existing vma
1542 * @ctx: pointer to the userfaultfd context
1543 * @dst_start: start of the destination virtual memory range
1544 * @src_start: start of the source virtual memory range
1545 * @len: length of the virtual memory range
1546 * @mode: flags from uffdio_move.mode
1547 *
1548 * It will either use the mmap_lock in read mode or per-vma locks
1549 *
1550 * move_pages() remaps arbitrary anonymous pages atomically in zero
1551 * copy. It only works on non shared anonymous pages because those can
1552 * be relocated without generating non linear anon_vmas in the rmap
1553 * code.
1554 *
1555 * It provides a zero copy mechanism to handle userspace page faults.
1556 * The source vma pages should have mapcount == 1, which can be
1557 * enforced by using madvise(MADV_DONTFORK) on src vma.
1558 *
1559 * The thread receiving the page during the userland page fault
1560 * will receive the faulting page in the source vma through the network,
1561 * storage or any other I/O device (MADV_DONTFORK in the source vma
1562 * avoids move_pages() to fail with -EBUSY if the process forks before
1563 * move_pages() is called), then it will call move_pages() to map the
1564 * page in the faulting address in the destination vma.
1565 *
1566 * This userfaultfd command works purely via pagetables, so it's the
1567 * most efficient way to move physical non shared anonymous pages
1568 * across different virtual addresses. Unlike mremap()/mmap()/munmap()
1569 * it does not create any new vmas. The mapping in the destination
1570 * address is atomic.
1571 *
1572 * It only works if the vma protection bits are identical from the
1573 * source and destination vma.
1574 *
1575 * It can remap non shared anonymous pages within the same vma too.
1576 *
1577 * If the source virtual memory range has any unmapped holes, or if
1578 * the destination virtual memory range is not a whole unmapped hole,
1579 * move_pages() will fail respectively with -ENOENT or -EEXIST. This
1580 * provides a very strict behavior to avoid any chance of memory
1581 * corruption going unnoticed if there are userland race conditions.
1582 * Only one thread should resolve the userland page fault at any given
1583 * time for any given faulting address. This means that if two threads
1584 * try to both call move_pages() on the same destination address at the
1585 * same time, the second thread will get an explicit error from this
1586 * command.
1587 *
1588 * The command retval will return "len" is successful. The command
1589 * however can be interrupted by fatal signals or errors. If
1590 * interrupted it will return the number of bytes successfully
1591 * remapped before the interruption if any, or the negative error if
1592 * none. It will never return zero. Either it will return an error or
1593 * an amount of bytes successfully moved. If the retval reports a
1594 * "short" remap, the move_pages() command should be repeated by
1595 * userland with src+retval, dst+reval, len-retval if it wants to know
1596 * about the error that interrupted it.
1597 *
1598 * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
1599 * prevent -ENOENT errors to materialize if there are holes in the
1600 * source virtual range that is being remapped. The holes will be
1601 * accounted as successfully remapped in the retval of the
1602 * command. This is mostly useful to remap hugepage naturally aligned
1603 * virtual regions without knowing if there are transparent hugepage
1604 * in the regions or not, but preventing the risk of having to split
1605 * the hugepmd during the remap.
1606 *
1607 * If there's any rmap walk that is taking the anon_vma locks without
1608 * first obtaining the folio lock (the only current instance is
1609 * folio_referenced), they will have to verify if the folio->mapping
1610 * has changed after taking the anon_vma lock. If it changed they
1611 * should release the lock and retry obtaining a new anon_vma, because
1612 * it means the anon_vma was changed by move_pages() before the lock
1613 * could be obtained. This is the only additional complexity added to
1614 * the rmap code to provide this anonymous page remapping functionality.
1615 */
1616ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
1617		   unsigned long src_start, unsigned long len, __u64 mode)
1618{
1619	struct mm_struct *mm = ctx->mm;
1620	struct vm_area_struct *src_vma, *dst_vma;
1621	unsigned long src_addr, dst_addr;
1622	pmd_t *src_pmd, *dst_pmd;
1623	long err = -EINVAL;
1624	ssize_t moved = 0;
1625
1626	/* Sanitize the command parameters. */
1627	if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
1628	    WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
1629	    WARN_ON_ONCE(len & ~PAGE_MASK))
1630		goto out;
1631
1632	/* Does the address range wrap, or is the span zero-sized? */
1633	if (WARN_ON_ONCE(src_start + len <= src_start) ||
1634	    WARN_ON_ONCE(dst_start + len <= dst_start))
1635		goto out;
1636
1637	err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
1638	if (err)
1639		goto out;
1640
1641	/* Re-check after taking map_changing_lock */
1642	err = -EAGAIN;
1643	down_read(&ctx->map_changing_lock);
1644	if (likely(atomic_read(&ctx->mmap_changing)))
1645		goto out_unlock;
1646	/*
1647	 * Make sure the vma is not shared, that the src and dst remap
1648	 * ranges are both valid and fully within a single existing
1649	 * vma.
1650	 */
1651	err = -EINVAL;
1652	if (src_vma->vm_flags & VM_SHARED)
1653		goto out_unlock;
1654	if (src_start + len > src_vma->vm_end)
1655		goto out_unlock;
1656
1657	if (dst_vma->vm_flags & VM_SHARED)
1658		goto out_unlock;
1659	if (dst_start + len > dst_vma->vm_end)
1660		goto out_unlock;
1661
1662	err = validate_move_areas(ctx, src_vma, dst_vma);
1663	if (err)
1664		goto out_unlock;
1665
1666	for (src_addr = src_start, dst_addr = dst_start;
1667	     src_addr < src_start + len;) {
1668		spinlock_t *ptl;
1669		pmd_t dst_pmdval;
1670		unsigned long step_size;
1671
1672		/*
1673		 * Below works because anonymous area would not have a
1674		 * transparent huge PUD. If file-backed support is added,
1675		 * that case would need to be handled here.
1676		 */
1677		src_pmd = mm_find_pmd(mm, src_addr);
1678		if (unlikely(!src_pmd)) {
1679			if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1680				err = -ENOENT;
1681				break;
1682			}
1683			src_pmd = mm_alloc_pmd(mm, src_addr);
1684			if (unlikely(!src_pmd)) {
1685				err = -ENOMEM;
1686				break;
1687			}
1688		}
1689		dst_pmd = mm_alloc_pmd(mm, dst_addr);
1690		if (unlikely(!dst_pmd)) {
1691			err = -ENOMEM;
1692			break;
1693		}
1694
1695		dst_pmdval = pmdp_get_lockless(dst_pmd);
1696		/*
1697		 * If the dst_pmd is mapped as THP don't override it and just
1698		 * be strict. If dst_pmd changes into TPH after this check, the
1699		 * move_pages_huge_pmd() will detect the change and retry
1700		 * while move_pages_pte() will detect the change and fail.
1701		 */
1702		if (unlikely(pmd_trans_huge(dst_pmdval))) {
1703			err = -EEXIST;
1704			break;
1705		}
1706
1707		ptl = pmd_trans_huge_lock(src_pmd, src_vma);
1708		if (ptl) {
1709			if (pmd_devmap(*src_pmd)) {
1710				spin_unlock(ptl);
1711				err = -ENOENT;
1712				break;
1713			}
1714
1715			/* Check if we can move the pmd without splitting it. */
1716			if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
1717			    !pmd_none(dst_pmdval)) {
1718				struct folio *folio = pmd_folio(*src_pmd);
1719
1720				if (!folio || (!is_huge_zero_folio(folio) &&
1721					       !PageAnonExclusive(&folio->page))) {
1722					spin_unlock(ptl);
1723					err = -EBUSY;
1724					break;
1725				}
1726
1727				spin_unlock(ptl);
1728				split_huge_pmd(src_vma, src_pmd, src_addr);
1729				/* The folio will be split by move_pages_pte() */
1730				continue;
1731			}
1732
1733			err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
1734						  dst_pmdval, dst_vma, src_vma,
1735						  dst_addr, src_addr);
1736			step_size = HPAGE_PMD_SIZE;
1737		} else {
1738			if (pmd_none(*src_pmd)) {
1739				if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1740					err = -ENOENT;
1741					break;
1742				}
1743				if (unlikely(__pte_alloc(mm, src_pmd))) {
1744					err = -ENOMEM;
1745					break;
1746				}
1747			}
1748
1749			if (unlikely(pte_alloc(mm, dst_pmd))) {
1750				err = -ENOMEM;
1751				break;
1752			}
1753
1754			err = move_pages_pte(mm, dst_pmd, src_pmd,
1755					     dst_vma, src_vma,
1756					     dst_addr, src_addr, mode);
1757			step_size = PAGE_SIZE;
1758		}
1759
1760		cond_resched();
1761
1762		if (fatal_signal_pending(current)) {
1763			/* Do not override an error */
1764			if (!err || err == -EAGAIN)
1765				err = -EINTR;
1766			break;
1767		}
1768
1769		if (err) {
1770			if (err == -EAGAIN)
1771				continue;
1772			break;
1773		}
1774
1775		/* Proceed to the next page */
1776		dst_addr += step_size;
1777		src_addr += step_size;
1778		moved += step_size;
1779	}
1780
1781out_unlock:
1782	up_read(&ctx->map_changing_lock);
1783	uffd_move_unlock(dst_vma, src_vma);
1784out:
1785	VM_WARN_ON(moved < 0);
1786	VM_WARN_ON(err > 0);
1787	VM_WARN_ON(!moved && !err);
1788	return moved ? moved : err;
1789}
1790
1791static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
1792				     vm_flags_t flags)
1793{
1794	const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
1795
1796	vm_flags_reset(vma, flags);
1797	/*
1798	 * For shared mappings, we want to enable writenotify while
1799	 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
1800	 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
1801	 */
1802	if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
1803		vma_set_page_prot(vma);
1804}
1805
1806static void userfaultfd_set_ctx(struct vm_area_struct *vma,
1807				struct userfaultfd_ctx *ctx,
1808				unsigned long flags)
1809{
1810	vma_start_write(vma);
1811	vma->vm_userfaultfd_ctx = (struct vm_userfaultfd_ctx){ctx};
1812	userfaultfd_set_vm_flags(vma,
1813				 (vma->vm_flags & ~__VM_UFFD_FLAGS) | flags);
1814}
1815
1816void userfaultfd_reset_ctx(struct vm_area_struct *vma)
1817{
1818	userfaultfd_set_ctx(vma, NULL, 0);
1819}
1820
1821struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
1822					     struct vm_area_struct *prev,
1823					     struct vm_area_struct *vma,
1824					     unsigned long start,
1825					     unsigned long end)
1826{
1827	struct vm_area_struct *ret;
1828
1829	/* Reset ptes for the whole vma range if wr-protected */
1830	if (userfaultfd_wp(vma))
1831		uffd_wp_range(vma, start, end - start, false);
1832
1833	ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
1834				    vma->vm_flags & ~__VM_UFFD_FLAGS,
1835				    NULL_VM_UFFD_CTX);
1836
1837	/*
1838	 * In the vma_merge() successful mprotect-like case 8:
1839	 * the next vma was merged into the current one and
1840	 * the current one has not been updated yet.
1841	 */
1842	if (!IS_ERR(ret))
1843		userfaultfd_reset_ctx(ret);
1844
1845	return ret;
1846}
1847
1848/* Assumes mmap write lock taken, and mm_struct pinned. */
1849int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
1850			       struct vm_area_struct *vma,
1851			       unsigned long vm_flags,
1852			       unsigned long start, unsigned long end,
1853			       bool wp_async)
1854{
1855	VMA_ITERATOR(vmi, ctx->mm, start);
1856	struct vm_area_struct *prev = vma_prev(&vmi);
1857	unsigned long vma_end;
1858	unsigned long new_flags;
1859
1860	if (vma->vm_start < start)
1861		prev = vma;
1862
1863	for_each_vma_range(vmi, vma, end) {
1864		cond_resched();
1865
1866		BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
1867		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1868		       vma->vm_userfaultfd_ctx.ctx != ctx);
1869		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1870
1871		/*
1872		 * Nothing to do: this vma is already registered into this
1873		 * userfaultfd and with the right tracking mode too.
1874		 */
1875		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1876		    (vma->vm_flags & vm_flags) == vm_flags)
1877			goto skip;
1878
1879		if (vma->vm_start > start)
1880			start = vma->vm_start;
1881		vma_end = min(end, vma->vm_end);
1882
1883		new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
1884		vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
1885					    new_flags,
1886					    (struct vm_userfaultfd_ctx){ctx});
1887		if (IS_ERR(vma))
1888			return PTR_ERR(vma);
1889
1890		/*
1891		 * In the vma_merge() successful mprotect-like case 8:
1892		 * the next vma was merged into the current one and
1893		 * the current one has not been updated yet.
1894		 */
1895		userfaultfd_set_ctx(vma, ctx, vm_flags);
1896
1897		if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
1898			hugetlb_unshare_all_pmds(vma);
1899
1900skip:
1901		prev = vma;
1902		start = vma->vm_end;
1903	}
1904
1905	return 0;
1906}
1907
1908void userfaultfd_release_new(struct userfaultfd_ctx *ctx)
1909{
1910	struct mm_struct *mm = ctx->mm;
1911	struct vm_area_struct *vma;
1912	VMA_ITERATOR(vmi, mm, 0);
1913
1914	/* the various vma->vm_userfaultfd_ctx still points to it */
1915	mmap_write_lock(mm);
1916	for_each_vma(vmi, vma) {
1917		if (vma->vm_userfaultfd_ctx.ctx == ctx)
1918			userfaultfd_reset_ctx(vma);
1919	}
1920	mmap_write_unlock(mm);
1921}
1922
1923void userfaultfd_release_all(struct mm_struct *mm,
1924			     struct userfaultfd_ctx *ctx)
1925{
1926	struct vm_area_struct *vma, *prev;
1927	VMA_ITERATOR(vmi, mm, 0);
1928
1929	if (!mmget_not_zero(mm))
1930		return;
1931
1932	/*
1933	 * Flush page faults out of all CPUs. NOTE: all page faults
1934	 * must be retried without returning VM_FAULT_SIGBUS if
1935	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
1936	 * changes while handle_userfault released the mmap_lock. So
1937	 * it's critical that released is set to true (above), before
1938	 * taking the mmap_lock for writing.
1939	 */
1940	mmap_write_lock(mm);
1941	prev = NULL;
1942	for_each_vma(vmi, vma) {
1943		cond_resched();
1944		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
1945		       !!(vma->vm_flags & __VM_UFFD_FLAGS));
1946		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
1947			prev = vma;
1948			continue;
1949		}
1950
1951		vma = userfaultfd_clear_vma(&vmi, prev, vma,
1952					    vma->vm_start, vma->vm_end);
1953		prev = vma;
1954	}
1955	mmap_write_unlock(mm);
1956	mmput(mm);
1957}