Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *  mm/userfaultfd.c
  3 *
  4 *  Copyright (C) 2015  Red Hat, Inc.
  5 *
  6 *  This work is licensed under the terms of the GNU GPL, version 2. See
  7 *  the COPYING file in the top-level directory.
  8 */
  9
 10#include <linux/mm.h>
 11#include <linux/sched/signal.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/userfaultfd_k.h>
 17#include <linux/mmu_notifier.h>
 18#include <linux/hugetlb.h>
 19#include <linux/shmem_fs.h>
 20#include <asm/tlbflush.h>
 
 21#include "internal.h"
 22
 23static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 24			    pmd_t *dst_pmd,
 25			    struct vm_area_struct *dst_vma,
 26			    unsigned long dst_addr,
 27			    unsigned long src_addr,
 28			    struct page **pagep)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29{
 30	struct mem_cgroup *memcg;
 
 31	pte_t _dst_pte, *dst_pte;
 
 
 
 32	spinlock_t *ptl;
 33	void *page_kaddr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34	int ret;
 35	struct page *page;
 36
 37	if (!*pagep) {
 38		ret = -ENOMEM;
 39		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
 40		if (!page)
 
 41			goto out;
 42
 43		page_kaddr = kmap_atomic(page);
 44		ret = copy_from_user(page_kaddr,
 45				     (const void __user *) src_addr,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46				     PAGE_SIZE);
 47		kunmap_atomic(page_kaddr);
 
 48
 49		/* fallback to copy_from_user outside mmap_sem */
 50		if (unlikely(ret)) {
 51			ret = -EFAULT;
 52			*pagep = page;
 53			/* don't free the page */
 54			goto out;
 55		}
 
 
 56	} else {
 57		page = *pagep;
 58		*pagep = NULL;
 59	}
 60
 61	/*
 62	 * The memory barrier inside __SetPageUptodate makes sure that
 63	 * preceeding stores to the page contents become visible before
 64	 * the set_pte_at() write.
 65	 */
 66	__SetPageUptodate(page);
 67
 68	ret = -ENOMEM;
 69	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
 70		goto out_release;
 71
 72	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 73	if (dst_vma->vm_flags & VM_WRITE)
 74		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75
 
 
 
 
 
 
 
 
 
 
 76	ret = -EEXIST;
 77	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 78	if (!pte_none(*dst_pte))
 79		goto out_release_uncharge_unlock;
 
 
 
 
 
 
 
 
 80
 81	inc_mm_counter(dst_mm, MM_ANONPAGES);
 82	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 83	mem_cgroup_commit_charge(page, memcg, false, false);
 84	lru_cache_add_active_or_unevictable(page, dst_vma);
 
 
 
 
 
 
 
 85
 86	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 
 
 
 
 
 
 
 
 
 87
 88	/* No need to invalidate - it was non-present before */
 89	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
 
 
 90
 91	pte_unmap_unlock(dst_pte, ptl);
 
 
 
 
 
 92	ret = 0;
 93out:
 94	return ret;
 95out_release_uncharge_unlock:
 96	pte_unmap_unlock(dst_pte, ptl);
 97	mem_cgroup_cancel_charge(page, memcg, false);
 98out_release:
 99	put_page(page);
 
100	goto out;
101}
102
103static int mfill_zeropage_pte(struct mm_struct *dst_mm,
104			      pmd_t *dst_pmd,
105			      struct vm_area_struct *dst_vma,
106			      unsigned long dst_addr)
 
107{
 
 
108	pte_t _dst_pte, *dst_pte;
109	spinlock_t *ptl;
110	int ret;
111
112	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
113					 dst_vma->vm_page_prot));
114	ret = -EEXIST;
115	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
116	if (!pte_none(*dst_pte))
 
 
 
 
 
 
 
 
 
 
117		goto out_unlock;
 
118	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 
119	/* No need to invalidate - it was non-present before */
120	update_mmu_cache(dst_vma, dst_addr, dst_pte);
121	ret = 0;
122out_unlock:
123	pte_unmap_unlock(dst_pte, ptl);
 
124	return ret;
125}
126
127static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
128{
129	pgd_t *pgd;
130	p4d_t *p4d;
131	pud_t *pud;
132
133	pgd = pgd_offset(mm, address);
134	p4d = p4d_alloc(mm, pgd, address);
135	if (!p4d)
136		return NULL;
137	pud = pud_alloc(mm, p4d, address);
138	if (!pud)
139		return NULL;
140	/*
141	 * Note that we didn't run this because the pmd was
142	 * missing, the *pmd may be already established and in
143	 * turn it may also be a trans_huge_pmd.
144	 */
145	return pmd_alloc(mm, pud, address);
146}
147
148#ifdef CONFIG_HUGETLB_PAGE
149/*
150 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
151 * called with mmap_sem held, it will release mmap_sem before returning.
152 */
153static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
154					      struct vm_area_struct *dst_vma,
155					      unsigned long dst_start,
156					      unsigned long src_start,
157					      unsigned long len,
158					      bool zeropage)
 
159{
160	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
161	int vm_shared = dst_vma->vm_flags & VM_SHARED;
162	ssize_t err;
163	pte_t *dst_pte;
164	unsigned long src_addr, dst_addr;
165	long copied;
166	struct page *page;
167	struct hstate *h;
168	unsigned long vma_hpagesize;
169	pgoff_t idx;
170	u32 hash;
171	struct address_space *mapping;
172
173	/*
174	 * There is no default zero huge page for all huge page sizes as
175	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
176	 * by THP.  Since we can not reliably insert a zero page, this
177	 * feature is not supported.
178	 */
179	if (zeropage) {
180		up_read(&dst_mm->mmap_sem);
181		return -EINVAL;
182	}
183
184	src_addr = src_start;
185	dst_addr = dst_start;
186	copied = 0;
187	page = NULL;
188	vma_hpagesize = vma_kernel_pagesize(dst_vma);
189
190	/*
191	 * Validate alignment based on huge page size
192	 */
193	err = -EINVAL;
194	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
195		goto out_unlock;
196
197retry:
198	/*
199	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
200	 * retry, dst_vma will be set to NULL and we must lookup again.
201	 */
202	if (!dst_vma) {
203		err = -ENOENT;
204		dst_vma = find_vma(dst_mm, dst_start);
205		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
206			goto out_unlock;
207		/*
208		 * Only allow __mcopy_atomic_hugetlb on userfaultfd
209		 * registered ranges.
210		 */
211		if (!dst_vma->vm_userfaultfd_ctx.ctx)
212			goto out_unlock;
213
214		if (dst_start < dst_vma->vm_start ||
215		    dst_start + len > dst_vma->vm_end)
216			goto out_unlock;
217
218		err = -EINVAL;
219		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
220			goto out_unlock;
221
222		vm_shared = dst_vma->vm_flags & VM_SHARED;
223	}
224
225	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
226		    (len - copied) & (vma_hpagesize - 1)))
227		goto out_unlock;
228
229	/*
230	 * If not shared, ensure the dst_vma has a anon_vma.
231	 */
232	err = -ENOMEM;
233	if (!vm_shared) {
234		if (unlikely(anon_vma_prepare(dst_vma)))
235			goto out_unlock;
236	}
237
238	h = hstate_vma(dst_vma);
239
240	while (src_addr < src_start + len) {
241		pte_t dst_pteval;
242
243		BUG_ON(dst_addr >= dst_start + len);
244		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
245
246		/*
247		 * Serialize via hugetlb_fault_mutex
 
 
 
248		 */
249		idx = linear_page_index(dst_vma, dst_addr);
250		mapping = dst_vma->vm_file->f_mapping;
251		hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
252								idx, dst_addr);
253		mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
254
255		err = -ENOMEM;
256		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
257		if (!dst_pte) {
 
258			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
259			goto out_unlock;
260		}
261
262		err = -EEXIST;
263		dst_pteval = huge_ptep_get(dst_pte);
264		if (!huge_pte_none(dst_pteval)) {
 
265			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
266			goto out_unlock;
267		}
268
269		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
270						dst_addr, src_addr, &page);
271
 
272		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
273		vm_alloc_shared = vm_shared;
274
275		cond_resched();
276
277		if (unlikely(err == -EFAULT)) {
278			up_read(&dst_mm->mmap_sem);
279			BUG_ON(!page);
280
281			err = copy_huge_page_from_user(page,
282						(const void __user *)src_addr,
283						pages_per_huge_page(h), true);
284			if (unlikely(err)) {
285				err = -EFAULT;
286				goto out;
287			}
288			down_read(&dst_mm->mmap_sem);
 
 
 
 
 
 
 
 
 
289
290			dst_vma = NULL;
291			goto retry;
292		} else
293			BUG_ON(page);
294
295		if (!err) {
296			dst_addr += vma_hpagesize;
297			src_addr += vma_hpagesize;
298			copied += vma_hpagesize;
299
300			if (fatal_signal_pending(current))
301				err = -EINTR;
302		}
303		if (err)
304			break;
305	}
306
307out_unlock:
308	up_read(&dst_mm->mmap_sem);
309out:
310	if (page) {
311		/*
312		 * We encountered an error and are about to free a newly
313		 * allocated huge page.
314		 *
315		 * Reservation handling is very subtle, and is different for
316		 * private and shared mappings.  See the routine
317		 * restore_reserve_on_error for details.  Unfortunately, we
318		 * can not call restore_reserve_on_error now as it would
319		 * require holding mmap_sem.
320		 *
321		 * If a reservation for the page existed in the reservation
322		 * map of a private mapping, the map was modified to indicate
323		 * the reservation was consumed when the page was allocated.
324		 * We clear the PagePrivate flag now so that the global
325		 * reserve count will not be incremented in free_huge_page.
326		 * The reservation map will still indicate the reservation
327		 * was consumed and possibly prevent later page allocation.
328		 * This is better than leaking a global reservation.  If no
329		 * reservation existed, it is still safe to clear PagePrivate
330		 * as no adjustments to reservation counts were made during
331		 * allocation.
332		 *
333		 * The reservation map for shared mappings indicates which
334		 * pages have reservations.  When a huge page is allocated
335		 * for an address with a reservation, no change is made to
336		 * the reserve map.  In this case PagePrivate will be set
337		 * to indicate that the global reservation count should be
338		 * incremented when the page is freed.  This is the desired
339		 * behavior.  However, when a huge page is allocated for an
340		 * address without a reservation a reservation entry is added
341		 * to the reservation map, and PagePrivate will not be set.
342		 * When the page is freed, the global reserve count will NOT
343		 * be incremented and it will appear as though we have leaked
344		 * reserved page.  In this case, set PagePrivate so that the
345		 * global reserve count will be incremented to match the
346		 * reservation map entry which was created.
347		 *
348		 * Note that vm_alloc_shared is based on the flags of the vma
349		 * for which the page was originally allocated.  dst_vma could
350		 * be different or NULL on error.
351		 */
352		if (vm_alloc_shared)
353			SetPagePrivate(page);
354		else
355			ClearPagePrivate(page);
356		put_page(page);
357	}
358	BUG_ON(copied < 0);
359	BUG_ON(err > 0);
360	BUG_ON(!copied && !err);
361	return copied ? copied : err;
362}
363#else /* !CONFIG_HUGETLB_PAGE */
364/* fail at build time if gcc attempts to use this */
365extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
366				      struct vm_area_struct *dst_vma,
367				      unsigned long dst_start,
368				      unsigned long src_start,
369				      unsigned long len,
370				      bool zeropage);
371#endif /* CONFIG_HUGETLB_PAGE */
372
373static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
374						pmd_t *dst_pmd,
375						struct vm_area_struct *dst_vma,
376						unsigned long dst_addr,
377						unsigned long src_addr,
378						struct page **page,
379						bool zeropage)
380{
381	ssize_t err;
382
383	if (vma_is_anonymous(dst_vma)) {
384		if (!zeropage)
385			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
386					       dst_addr, src_addr, page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387		else
388			err = mfill_zeropage_pte(dst_mm, dst_pmd,
389						 dst_vma, dst_addr);
390	} else {
391		if (!zeropage)
392			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
393						     dst_vma, dst_addr,
394						     src_addr, page);
395		else
396			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
397						       dst_vma, dst_addr);
398	}
399
400	return err;
401}
402
403static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
404					      unsigned long dst_start,
405					      unsigned long src_start,
406					      unsigned long len,
407					      bool zeropage)
 
408{
409	struct vm_area_struct *dst_vma;
410	ssize_t err;
411	pmd_t *dst_pmd;
412	unsigned long src_addr, dst_addr;
413	long copied;
414	struct page *page;
415
416	/*
417	 * Sanitize the command parameters:
418	 */
419	BUG_ON(dst_start & ~PAGE_MASK);
420	BUG_ON(len & ~PAGE_MASK);
421
422	/* Does the address range wrap, or is the span zero-sized? */
423	BUG_ON(src_start + len <= src_start);
424	BUG_ON(dst_start + len <= dst_start);
425
426	src_addr = src_start;
427	dst_addr = dst_start;
428	copied = 0;
429	page = NULL;
430retry:
431	down_read(&dst_mm->mmap_sem);
 
 
 
 
 
 
 
 
 
432
433	/*
434	 * Make sure the vma is not shared, that the dst range is
435	 * both valid and fully within a single existing vma.
436	 */
437	err = -ENOENT;
438	dst_vma = find_vma(dst_mm, dst_start);
439	if (!dst_vma)
440		goto out_unlock;
441	/*
442	 * Be strict and only allow __mcopy_atomic on userfaultfd
443	 * registered ranges to prevent userland errors going
444	 * unnoticed. As far as the VM consistency is concerned, it
445	 * would be perfectly safe to remove this check, but there's
446	 * no useful usage for __mcopy_atomic ouside of userfaultfd
447	 * registered ranges. This is after all why these are ioctls
448	 * belonging to the userfaultfd and not syscalls.
449	 */
450	if (!dst_vma->vm_userfaultfd_ctx.ctx)
451		goto out_unlock;
452
453	if (dst_start < dst_vma->vm_start ||
454	    dst_start + len > dst_vma->vm_end)
455		goto out_unlock;
456
457	err = -EINVAL;
458	/*
459	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
460	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
461	 */
462	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
463	    dst_vma->vm_flags & VM_SHARED))
464		goto out_unlock;
465
466	/*
 
 
 
 
 
 
 
467	 * If this is a HUGETLB vma, pass off to appropriate routine
468	 */
469	if (is_vm_hugetlb_page(dst_vma))
470		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
471						src_start, len, zeropage);
472
473	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
474		goto out_unlock;
 
 
 
475
476	/*
477	 * Ensure the dst_vma has a anon_vma or this page
478	 * would get a NULL anon_vma when moved in the
479	 * dst_vma.
480	 */
481	err = -ENOMEM;
482	if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma)))
 
483		goto out_unlock;
484
485	while (src_addr < src_start + len) {
486		pmd_t dst_pmdval;
487
488		BUG_ON(dst_addr >= dst_start + len);
489
490		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
491		if (unlikely(!dst_pmd)) {
492			err = -ENOMEM;
493			break;
494		}
495
496		dst_pmdval = pmd_read_atomic(dst_pmd);
497		/*
498		 * If the dst_pmd is mapped as THP don't
499		 * override it and just be strict.
500		 */
501		if (unlikely(pmd_trans_huge(dst_pmdval))) {
502			err = -EEXIST;
503			break;
504		}
505		if (unlikely(pmd_none(dst_pmdval)) &&
506		    unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
507			err = -ENOMEM;
508			break;
509		}
510		/* If an huge pmd materialized from under us fail */
511		if (unlikely(pmd_trans_huge(*dst_pmd))) {
512			err = -EFAULT;
513			break;
514		}
515
516		BUG_ON(pmd_none(*dst_pmd));
517		BUG_ON(pmd_trans_huge(*dst_pmd));
518
519		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
520				       src_addr, &page, zeropage);
521		cond_resched();
522
523		if (unlikely(err == -EFAULT)) {
524			void *page_kaddr;
525
526			up_read(&dst_mm->mmap_sem);
527			BUG_ON(!page);
528
529			page_kaddr = kmap(page);
530			err = copy_from_user(page_kaddr,
531					     (const void __user *) src_addr,
532					     PAGE_SIZE);
533			kunmap(page);
534			if (unlikely(err)) {
535				err = -EFAULT;
536				goto out;
537			}
 
538			goto retry;
539		} else
540			BUG_ON(page);
541
542		if (!err) {
543			dst_addr += PAGE_SIZE;
544			src_addr += PAGE_SIZE;
545			copied += PAGE_SIZE;
546
547			if (fatal_signal_pending(current))
548				err = -EINTR;
549		}
550		if (err)
551			break;
552	}
553
554out_unlock:
555	up_read(&dst_mm->mmap_sem);
556out:
557	if (page)
558		put_page(page);
559	BUG_ON(copied < 0);
560	BUG_ON(err > 0);
561	BUG_ON(!copied && !err);
562	return copied ? copied : err;
563}
564
565ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
566		     unsigned long src_start, unsigned long len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567{
568	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569}
570
571ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
572		       unsigned long len)
 
 
 
 
 
 
 
 
573{
574	return __mcopy_atomic(dst_mm, start, 0, len, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  mm/userfaultfd.c
   4 *
   5 *  Copyright (C) 2015  Red Hat, Inc.
 
 
 
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/sched/signal.h>
  10#include <linux/pagemap.h>
  11#include <linux/rmap.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/userfaultfd_k.h>
  15#include <linux/mmu_notifier.h>
  16#include <linux/hugetlb.h>
  17#include <linux/shmem_fs.h>
  18#include <asm/tlbflush.h>
  19#include <asm/tlb.h>
  20#include "internal.h"
  21
  22static __always_inline
  23struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
  24				    unsigned long dst_start,
  25				    unsigned long len)
  26{
  27	/*
  28	 * Make sure that the dst range is both valid and fully within a
  29	 * single existing vma.
  30	 */
  31	struct vm_area_struct *dst_vma;
  32
  33	dst_vma = find_vma(dst_mm, dst_start);
  34	if (!range_in_vma(dst_vma, dst_start, dst_start + len))
  35		return NULL;
  36
  37	/*
  38	 * Check the vma is registered in uffd, this is required to
  39	 * enforce the VM_MAYWRITE check done at uffd registration
  40	 * time.
  41	 */
  42	if (!dst_vma->vm_userfaultfd_ctx.ctx)
  43		return NULL;
  44
  45	return dst_vma;
  46}
  47
  48/* Check if dst_addr is outside of file's size. Must be called with ptl held. */
  49static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
  50				 unsigned long dst_addr)
  51{
  52	struct inode *inode;
  53	pgoff_t offset, max_off;
  54
  55	if (!dst_vma->vm_file)
  56		return false;
  57
  58	inode = dst_vma->vm_file->f_inode;
  59	offset = linear_page_index(dst_vma, dst_addr);
  60	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
  61	return offset >= max_off;
  62}
  63
  64/*
  65 * Install PTEs, to map dst_addr (within dst_vma) to page.
  66 *
  67 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
  68 * and anon, and for both shared and private VMAs.
  69 */
  70int mfill_atomic_install_pte(pmd_t *dst_pmd,
  71			     struct vm_area_struct *dst_vma,
  72			     unsigned long dst_addr, struct page *page,
  73			     bool newly_allocated, uffd_flags_t flags)
  74{
  75	int ret;
  76	struct mm_struct *dst_mm = dst_vma->vm_mm;
  77	pte_t _dst_pte, *dst_pte;
  78	bool writable = dst_vma->vm_flags & VM_WRITE;
  79	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
  80	bool page_in_cache = page_mapping(page);
  81	spinlock_t *ptl;
  82	struct folio *folio;
  83
  84	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
  85	_dst_pte = pte_mkdirty(_dst_pte);
  86	if (page_in_cache && !vm_shared)
  87		writable = false;
  88	if (writable)
  89		_dst_pte = pte_mkwrite(_dst_pte, dst_vma);
  90	if (flags & MFILL_ATOMIC_WP)
  91		_dst_pte = pte_mkuffd_wp(_dst_pte);
  92
  93	ret = -EAGAIN;
  94	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
  95	if (!dst_pte)
  96		goto out;
  97
  98	if (mfill_file_over_size(dst_vma, dst_addr)) {
  99		ret = -EFAULT;
 100		goto out_unlock;
 101	}
 102
 103	ret = -EEXIST;
 104	/*
 105	 * We allow to overwrite a pte marker: consider when both MISSING|WP
 106	 * registered, we firstly wr-protect a none pte which has no page cache
 107	 * page backing it, then access the page.
 108	 */
 109	if (!pte_none_mostly(ptep_get(dst_pte)))
 110		goto out_unlock;
 111
 112	folio = page_folio(page);
 113	if (page_in_cache) {
 114		/* Usually, cache pages are already added to LRU */
 115		if (newly_allocated)
 116			folio_add_lru(folio);
 117		folio_add_file_rmap_pte(folio, page, dst_vma);
 118	} else {
 119		folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
 120		folio_add_lru_vma(folio, dst_vma);
 121	}
 122
 123	/*
 124	 * Must happen after rmap, as mm_counter() checks mapping (via
 125	 * PageAnon()), which is set by __page_set_anon_rmap().
 126	 */
 127	inc_mm_counter(dst_mm, mm_counter(page));
 128
 129	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 130
 131	/* No need to invalidate - it was non-present before */
 132	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 133	ret = 0;
 134out_unlock:
 135	pte_unmap_unlock(dst_pte, ptl);
 136out:
 137	return ret;
 138}
 139
 140static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
 141				 struct vm_area_struct *dst_vma,
 142				 unsigned long dst_addr,
 143				 unsigned long src_addr,
 144				 uffd_flags_t flags,
 145				 struct folio **foliop)
 146{
 147	void *kaddr;
 148	int ret;
 149	struct folio *folio;
 150
 151	if (!*foliop) {
 152		ret = -ENOMEM;
 153		folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
 154					dst_addr, false);
 155		if (!folio)
 156			goto out;
 157
 158		kaddr = kmap_local_folio(folio, 0);
 159		/*
 160		 * The read mmap_lock is held here.  Despite the
 161		 * mmap_lock being read recursive a deadlock is still
 162		 * possible if a writer has taken a lock.  For example:
 163		 *
 164		 * process A thread 1 takes read lock on own mmap_lock
 165		 * process A thread 2 calls mmap, blocks taking write lock
 166		 * process B thread 1 takes page fault, read lock on own mmap lock
 167		 * process B thread 2 calls mmap, blocks taking write lock
 168		 * process A thread 1 blocks taking read lock on process B
 169		 * process B thread 1 blocks taking read lock on process A
 170		 *
 171		 * Disable page faults to prevent potential deadlock
 172		 * and retry the copy outside the mmap_lock.
 173		 */
 174		pagefault_disable();
 175		ret = copy_from_user(kaddr, (const void __user *) src_addr,
 176				     PAGE_SIZE);
 177		pagefault_enable();
 178		kunmap_local(kaddr);
 179
 180		/* fallback to copy_from_user outside mmap_lock */
 181		if (unlikely(ret)) {
 182			ret = -ENOENT;
 183			*foliop = folio;
 184			/* don't free the page */
 185			goto out;
 186		}
 187
 188		flush_dcache_folio(folio);
 189	} else {
 190		folio = *foliop;
 191		*foliop = NULL;
 192	}
 193
 194	/*
 195	 * The memory barrier inside __folio_mark_uptodate makes sure that
 196	 * preceding stores to the page contents become visible before
 197	 * the set_pte_at() write.
 198	 */
 199	__folio_mark_uptodate(folio);
 200
 201	ret = -ENOMEM;
 202	if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
 203		goto out_release;
 204
 205	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
 206				       &folio->page, true, flags);
 207	if (ret)
 208		goto out_release;
 209out:
 210	return ret;
 211out_release:
 212	folio_put(folio);
 213	goto out;
 214}
 215
 216static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
 217				     struct vm_area_struct *dst_vma,
 218				     unsigned long dst_addr)
 219{
 220	pte_t _dst_pte, *dst_pte;
 221	spinlock_t *ptl;
 222	int ret;
 223
 224	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
 225					 dst_vma->vm_page_prot));
 226	ret = -EAGAIN;
 227	dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
 228	if (!dst_pte)
 229		goto out;
 230	if (mfill_file_over_size(dst_vma, dst_addr)) {
 231		ret = -EFAULT;
 232		goto out_unlock;
 233	}
 234	ret = -EEXIST;
 235	if (!pte_none(ptep_get(dst_pte)))
 236		goto out_unlock;
 237	set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
 238	/* No need to invalidate - it was non-present before */
 239	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 240	ret = 0;
 241out_unlock:
 242	pte_unmap_unlock(dst_pte, ptl);
 243out:
 244	return ret;
 245}
 246
 247/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
 248static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
 249				     struct vm_area_struct *dst_vma,
 250				     unsigned long dst_addr,
 251				     uffd_flags_t flags)
 252{
 253	struct inode *inode = file_inode(dst_vma->vm_file);
 254	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
 255	struct folio *folio;
 256	struct page *page;
 257	int ret;
 258
 259	ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
 260	/* Our caller expects us to return -EFAULT if we failed to find folio */
 261	if (ret == -ENOENT)
 262		ret = -EFAULT;
 263	if (ret)
 264		goto out;
 265	if (!folio) {
 266		ret = -EFAULT;
 267		goto out;
 268	}
 269
 270	page = folio_file_page(folio, pgoff);
 271	if (PageHWPoison(page)) {
 272		ret = -EIO;
 273		goto out_release;
 274	}
 275
 276	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
 277				       page, false, flags);
 278	if (ret)
 279		goto out_release;
 280
 281	folio_unlock(folio);
 282	ret = 0;
 283out:
 284	return ret;
 
 
 
 285out_release:
 286	folio_unlock(folio);
 287	folio_put(folio);
 288	goto out;
 289}
 290
 291/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
 292static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
 293				   struct vm_area_struct *dst_vma,
 294				   unsigned long dst_addr,
 295				   uffd_flags_t flags)
 296{
 297	int ret;
 298	struct mm_struct *dst_mm = dst_vma->vm_mm;
 299	pte_t _dst_pte, *dst_pte;
 300	spinlock_t *ptl;
 
 301
 302	_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
 303	ret = -EAGAIN;
 
 304	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 305	if (!dst_pte)
 306		goto out;
 307
 308	if (mfill_file_over_size(dst_vma, dst_addr)) {
 309		ret = -EFAULT;
 310		goto out_unlock;
 311	}
 312
 313	ret = -EEXIST;
 314	/* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
 315	if (!pte_none(ptep_get(dst_pte)))
 316		goto out_unlock;
 317
 318	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 319
 320	/* No need to invalidate - it was non-present before */
 321	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 322	ret = 0;
 323out_unlock:
 324	pte_unmap_unlock(dst_pte, ptl);
 325out:
 326	return ret;
 327}
 328
 329static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
 330{
 331	pgd_t *pgd;
 332	p4d_t *p4d;
 333	pud_t *pud;
 334
 335	pgd = pgd_offset(mm, address);
 336	p4d = p4d_alloc(mm, pgd, address);
 337	if (!p4d)
 338		return NULL;
 339	pud = pud_alloc(mm, p4d, address);
 340	if (!pud)
 341		return NULL;
 342	/*
 343	 * Note that we didn't run this because the pmd was
 344	 * missing, the *pmd may be already established and in
 345	 * turn it may also be a trans_huge_pmd.
 346	 */
 347	return pmd_alloc(mm, pud, address);
 348}
 349
 350#ifdef CONFIG_HUGETLB_PAGE
 351/*
 352 * mfill_atomic processing for HUGETLB vmas.  Note that this routine is
 353 * called with mmap_lock held, it will release mmap_lock before returning.
 354 */
 355static __always_inline ssize_t mfill_atomic_hugetlb(
 356					      struct vm_area_struct *dst_vma,
 357					      unsigned long dst_start,
 358					      unsigned long src_start,
 359					      unsigned long len,
 360					      atomic_t *mmap_changing,
 361					      uffd_flags_t flags)
 362{
 363	struct mm_struct *dst_mm = dst_vma->vm_mm;
 364	int vm_shared = dst_vma->vm_flags & VM_SHARED;
 365	ssize_t err;
 366	pte_t *dst_pte;
 367	unsigned long src_addr, dst_addr;
 368	long copied;
 369	struct folio *folio;
 
 370	unsigned long vma_hpagesize;
 371	pgoff_t idx;
 372	u32 hash;
 373	struct address_space *mapping;
 374
 375	/*
 376	 * There is no default zero huge page for all huge page sizes as
 377	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
 378	 * by THP.  Since we can not reliably insert a zero page, this
 379	 * feature is not supported.
 380	 */
 381	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
 382		mmap_read_unlock(dst_mm);
 383		return -EINVAL;
 384	}
 385
 386	src_addr = src_start;
 387	dst_addr = dst_start;
 388	copied = 0;
 389	folio = NULL;
 390	vma_hpagesize = vma_kernel_pagesize(dst_vma);
 391
 392	/*
 393	 * Validate alignment based on huge page size
 394	 */
 395	err = -EINVAL;
 396	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
 397		goto out_unlock;
 398
 399retry:
 400	/*
 401	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
 402	 * retry, dst_vma will be set to NULL and we must lookup again.
 403	 */
 404	if (!dst_vma) {
 405		err = -ENOENT;
 406		dst_vma = find_dst_vma(dst_mm, dst_start, len);
 407		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
 408			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 409
 410		err = -EINVAL;
 411		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
 412			goto out_unlock;
 413
 414		vm_shared = dst_vma->vm_flags & VM_SHARED;
 415	}
 416
 
 
 
 
 417	/*
 418	 * If not shared, ensure the dst_vma has a anon_vma.
 419	 */
 420	err = -ENOMEM;
 421	if (!vm_shared) {
 422		if (unlikely(anon_vma_prepare(dst_vma)))
 423			goto out_unlock;
 424	}
 425
 
 
 426	while (src_addr < src_start + len) {
 
 
 427		BUG_ON(dst_addr >= dst_start + len);
 
 428
 429		/*
 430		 * Serialize via vma_lock and hugetlb_fault_mutex.
 431		 * vma_lock ensures the dst_pte remains valid even
 432		 * in the case of shared pmds.  fault mutex prevents
 433		 * races with other faulting threads.
 434		 */
 435		idx = linear_page_index(dst_vma, dst_addr);
 436		mapping = dst_vma->vm_file->f_mapping;
 437		hash = hugetlb_fault_mutex_hash(mapping, idx);
 
 438		mutex_lock(&hugetlb_fault_mutex_table[hash]);
 439		hugetlb_vma_lock_read(dst_vma);
 440
 441		err = -ENOMEM;
 442		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
 443		if (!dst_pte) {
 444			hugetlb_vma_unlock_read(dst_vma);
 445			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 446			goto out_unlock;
 447		}
 448
 449		if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
 450		    !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
 451			err = -EEXIST;
 452			hugetlb_vma_unlock_read(dst_vma);
 453			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 454			goto out_unlock;
 455		}
 456
 457		err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
 458					       src_addr, flags, &folio);
 459
 460		hugetlb_vma_unlock_read(dst_vma);
 461		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
 462
 463		cond_resched();
 464
 465		if (unlikely(err == -ENOENT)) {
 466			mmap_read_unlock(dst_mm);
 467			BUG_ON(!folio);
 468
 469			err = copy_folio_from_user(folio,
 470						   (const void __user *)src_addr, true);
 
 471			if (unlikely(err)) {
 472				err = -EFAULT;
 473				goto out;
 474			}
 475			mmap_read_lock(dst_mm);
 476			/*
 477			 * If memory mappings are changing because of non-cooperative
 478			 * operation (e.g. mremap) running in parallel, bail out and
 479			 * request the user to retry later
 480			 */
 481			if (mmap_changing && atomic_read(mmap_changing)) {
 482				err = -EAGAIN;
 483				break;
 484			}
 485
 486			dst_vma = NULL;
 487			goto retry;
 488		} else
 489			BUG_ON(folio);
 490
 491		if (!err) {
 492			dst_addr += vma_hpagesize;
 493			src_addr += vma_hpagesize;
 494			copied += vma_hpagesize;
 495
 496			if (fatal_signal_pending(current))
 497				err = -EINTR;
 498		}
 499		if (err)
 500			break;
 501	}
 502
 503out_unlock:
 504	mmap_read_unlock(dst_mm);
 505out:
 506	if (folio)
 507		folio_put(folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508	BUG_ON(copied < 0);
 509	BUG_ON(err > 0);
 510	BUG_ON(!copied && !err);
 511	return copied ? copied : err;
 512}
 513#else /* !CONFIG_HUGETLB_PAGE */
 514/* fail at build time if gcc attempts to use this */
 515extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
 516				    unsigned long dst_start,
 517				    unsigned long src_start,
 518				    unsigned long len,
 519				    atomic_t *mmap_changing,
 520				    uffd_flags_t flags);
 521#endif /* CONFIG_HUGETLB_PAGE */
 522
 523static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
 
 524						struct vm_area_struct *dst_vma,
 525						unsigned long dst_addr,
 526						unsigned long src_addr,
 527						uffd_flags_t flags,
 528						struct folio **foliop)
 529{
 530	ssize_t err;
 531
 532	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
 533		return mfill_atomic_pte_continue(dst_pmd, dst_vma,
 534						 dst_addr, flags);
 535	} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
 536		return mfill_atomic_pte_poison(dst_pmd, dst_vma,
 537					       dst_addr, flags);
 538	}
 539
 540	/*
 541	 * The normal page fault path for a shmem will invoke the
 542	 * fault, fill the hole in the file and COW it right away. The
 543	 * result generates plain anonymous memory. So when we are
 544	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
 545	 * generate anonymous memory directly without actually filling
 546	 * the hole. For the MAP_PRIVATE case the robustness check
 547	 * only happens in the pagetable (to verify it's still none)
 548	 * and not in the radix tree.
 549	 */
 550	if (!(dst_vma->vm_flags & VM_SHARED)) {
 551		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
 552			err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
 553						    dst_addr, src_addr,
 554						    flags, foliop);
 555		else
 556			err = mfill_atomic_pte_zeropage(dst_pmd,
 557						 dst_vma, dst_addr);
 558	} else {
 559		err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
 560					     dst_addr, src_addr,
 561					     flags, foliop);
 
 
 
 
 562	}
 563
 564	return err;
 565}
 566
 567static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
 568					    unsigned long dst_start,
 569					    unsigned long src_start,
 570					    unsigned long len,
 571					    atomic_t *mmap_changing,
 572					    uffd_flags_t flags)
 573{
 574	struct vm_area_struct *dst_vma;
 575	ssize_t err;
 576	pmd_t *dst_pmd;
 577	unsigned long src_addr, dst_addr;
 578	long copied;
 579	struct folio *folio;
 580
 581	/*
 582	 * Sanitize the command parameters:
 583	 */
 584	BUG_ON(dst_start & ~PAGE_MASK);
 585	BUG_ON(len & ~PAGE_MASK);
 586
 587	/* Does the address range wrap, or is the span zero-sized? */
 588	BUG_ON(src_start + len <= src_start);
 589	BUG_ON(dst_start + len <= dst_start);
 590
 591	src_addr = src_start;
 592	dst_addr = dst_start;
 593	copied = 0;
 594	folio = NULL;
 595retry:
 596	mmap_read_lock(dst_mm);
 597
 598	/*
 599	 * If memory mappings are changing because of non-cooperative
 600	 * operation (e.g. mremap) running in parallel, bail out and
 601	 * request the user to retry later
 602	 */
 603	err = -EAGAIN;
 604	if (mmap_changing && atomic_read(mmap_changing))
 605		goto out_unlock;
 606
 607	/*
 608	 * Make sure the vma is not shared, that the dst range is
 609	 * both valid and fully within a single existing vma.
 610	 */
 611	err = -ENOENT;
 612	dst_vma = find_dst_vma(dst_mm, dst_start, len);
 613	if (!dst_vma)
 614		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615
 616	err = -EINVAL;
 617	/*
 618	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
 619	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
 620	 */
 621	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
 622	    dst_vma->vm_flags & VM_SHARED))
 623		goto out_unlock;
 624
 625	/*
 626	 * validate 'mode' now that we know the dst_vma: don't allow
 627	 * a wrprotect copy if the userfaultfd didn't register as WP.
 628	 */
 629	if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
 630		goto out_unlock;
 631
 632	/*
 633	 * If this is a HUGETLB vma, pass off to appropriate routine
 634	 */
 635	if (is_vm_hugetlb_page(dst_vma))
 636		return  mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
 637					     len, mmap_changing, flags);
 638
 639	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
 640		goto out_unlock;
 641	if (!vma_is_shmem(dst_vma) &&
 642	    uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
 643		goto out_unlock;
 644
 645	/*
 646	 * Ensure the dst_vma has a anon_vma or this page
 647	 * would get a NULL anon_vma when moved in the
 648	 * dst_vma.
 649	 */
 650	err = -ENOMEM;
 651	if (!(dst_vma->vm_flags & VM_SHARED) &&
 652	    unlikely(anon_vma_prepare(dst_vma)))
 653		goto out_unlock;
 654
 655	while (src_addr < src_start + len) {
 656		pmd_t dst_pmdval;
 657
 658		BUG_ON(dst_addr >= dst_start + len);
 659
 660		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
 661		if (unlikely(!dst_pmd)) {
 662			err = -ENOMEM;
 663			break;
 664		}
 665
 666		dst_pmdval = pmdp_get_lockless(dst_pmd);
 667		/*
 668		 * If the dst_pmd is mapped as THP don't
 669		 * override it and just be strict.
 670		 */
 671		if (unlikely(pmd_trans_huge(dst_pmdval))) {
 672			err = -EEXIST;
 673			break;
 674		}
 675		if (unlikely(pmd_none(dst_pmdval)) &&
 676		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
 677			err = -ENOMEM;
 678			break;
 679		}
 680		/* If an huge pmd materialized from under us fail */
 681		if (unlikely(pmd_trans_huge(*dst_pmd))) {
 682			err = -EFAULT;
 683			break;
 684		}
 685
 686		BUG_ON(pmd_none(*dst_pmd));
 687		BUG_ON(pmd_trans_huge(*dst_pmd));
 688
 689		err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
 690				       src_addr, flags, &folio);
 691		cond_resched();
 692
 693		if (unlikely(err == -ENOENT)) {
 694			void *kaddr;
 695
 696			mmap_read_unlock(dst_mm);
 697			BUG_ON(!folio);
 698
 699			kaddr = kmap_local_folio(folio, 0);
 700			err = copy_from_user(kaddr,
 701					     (const void __user *) src_addr,
 702					     PAGE_SIZE);
 703			kunmap_local(kaddr);
 704			if (unlikely(err)) {
 705				err = -EFAULT;
 706				goto out;
 707			}
 708			flush_dcache_folio(folio);
 709			goto retry;
 710		} else
 711			BUG_ON(folio);
 712
 713		if (!err) {
 714			dst_addr += PAGE_SIZE;
 715			src_addr += PAGE_SIZE;
 716			copied += PAGE_SIZE;
 717
 718			if (fatal_signal_pending(current))
 719				err = -EINTR;
 720		}
 721		if (err)
 722			break;
 723	}
 724
 725out_unlock:
 726	mmap_read_unlock(dst_mm);
 727out:
 728	if (folio)
 729		folio_put(folio);
 730	BUG_ON(copied < 0);
 731	BUG_ON(err > 0);
 732	BUG_ON(!copied && !err);
 733	return copied ? copied : err;
 734}
 735
 736ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
 737			  unsigned long src_start, unsigned long len,
 738			  atomic_t *mmap_changing, uffd_flags_t flags)
 739{
 740	return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing,
 741			    uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
 742}
 743
 744ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start,
 745			      unsigned long len, atomic_t *mmap_changing)
 746{
 747	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
 748			    uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
 749}
 750
 751ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
 752			      unsigned long len, atomic_t *mmap_changing,
 753			      uffd_flags_t flags)
 754{
 755	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
 756			    uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
 757}
 758
 759ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
 760			    unsigned long len, atomic_t *mmap_changing,
 761			    uffd_flags_t flags)
 762{
 763	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
 764			    uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
 765}
 766
 767long uffd_wp_range(struct vm_area_struct *dst_vma,
 768		   unsigned long start, unsigned long len, bool enable_wp)
 769{
 770	unsigned int mm_cp_flags;
 771	struct mmu_gather tlb;
 772	long ret;
 773
 774	VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
 775			"The address range exceeds VMA boundary.\n");
 776	if (enable_wp)
 777		mm_cp_flags = MM_CP_UFFD_WP;
 778	else
 779		mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
 780
 781	/*
 782	 * vma->vm_page_prot already reflects that uffd-wp is enabled for this
 783	 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
 784	 * to be write-protected as default whenever protection changes.
 785	 * Try upgrading write permissions manually.
 786	 */
 787	if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
 788		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
 789	tlb_gather_mmu(&tlb, dst_vma->vm_mm);
 790	ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
 791	tlb_finish_mmu(&tlb);
 792
 793	return ret;
 794}
 795
 796int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
 797			unsigned long len, bool enable_wp,
 798			atomic_t *mmap_changing)
 799{
 800	unsigned long end = start + len;
 801	unsigned long _start, _end;
 802	struct vm_area_struct *dst_vma;
 803	unsigned long page_mask;
 804	long err;
 805	VMA_ITERATOR(vmi, dst_mm, start);
 806
 807	/*
 808	 * Sanitize the command parameters:
 809	 */
 810	BUG_ON(start & ~PAGE_MASK);
 811	BUG_ON(len & ~PAGE_MASK);
 812
 813	/* Does the address range wrap, or is the span zero-sized? */
 814	BUG_ON(start + len <= start);
 815
 816	mmap_read_lock(dst_mm);
 817
 818	/*
 819	 * If memory mappings are changing because of non-cooperative
 820	 * operation (e.g. mremap) running in parallel, bail out and
 821	 * request the user to retry later
 822	 */
 823	err = -EAGAIN;
 824	if (mmap_changing && atomic_read(mmap_changing))
 825		goto out_unlock;
 826
 827	err = -ENOENT;
 828	for_each_vma_range(vmi, dst_vma, end) {
 829
 830		if (!userfaultfd_wp(dst_vma)) {
 831			err = -ENOENT;
 832			break;
 833		}
 834
 835		if (is_vm_hugetlb_page(dst_vma)) {
 836			err = -EINVAL;
 837			page_mask = vma_kernel_pagesize(dst_vma) - 1;
 838			if ((start & page_mask) || (len & page_mask))
 839				break;
 840		}
 841
 842		_start = max(dst_vma->vm_start, start);
 843		_end = min(dst_vma->vm_end, end);
 844
 845		err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
 846
 847		/* Return 0 on success, <0 on failures */
 848		if (err < 0)
 849			break;
 850		err = 0;
 851	}
 852out_unlock:
 853	mmap_read_unlock(dst_mm);
 854	return err;
 855}
 856
 857
 858void double_pt_lock(spinlock_t *ptl1,
 859		    spinlock_t *ptl2)
 860	__acquires(ptl1)
 861	__acquires(ptl2)
 862{
 863	spinlock_t *ptl_tmp;
 864
 865	if (ptl1 > ptl2) {
 866		/* exchange ptl1 and ptl2 */
 867		ptl_tmp = ptl1;
 868		ptl1 = ptl2;
 869		ptl2 = ptl_tmp;
 870	}
 871	/* lock in virtual address order to avoid lock inversion */
 872	spin_lock(ptl1);
 873	if (ptl1 != ptl2)
 874		spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
 875	else
 876		__acquire(ptl2);
 877}
 878
 879void double_pt_unlock(spinlock_t *ptl1,
 880		      spinlock_t *ptl2)
 881	__releases(ptl1)
 882	__releases(ptl2)
 883{
 884	spin_unlock(ptl1);
 885	if (ptl1 != ptl2)
 886		spin_unlock(ptl2);
 887	else
 888		__release(ptl2);
 889}
 890
 891
 892static int move_present_pte(struct mm_struct *mm,
 893			    struct vm_area_struct *dst_vma,
 894			    struct vm_area_struct *src_vma,
 895			    unsigned long dst_addr, unsigned long src_addr,
 896			    pte_t *dst_pte, pte_t *src_pte,
 897			    pte_t orig_dst_pte, pte_t orig_src_pte,
 898			    spinlock_t *dst_ptl, spinlock_t *src_ptl,
 899			    struct folio *src_folio)
 900{
 901	int err = 0;
 902
 903	double_pt_lock(dst_ptl, src_ptl);
 904
 905	if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
 906	    !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
 907		err = -EAGAIN;
 908		goto out;
 909	}
 910	if (folio_test_large(src_folio) ||
 911	    folio_maybe_dma_pinned(src_folio) ||
 912	    !PageAnonExclusive(&src_folio->page)) {
 913		err = -EBUSY;
 914		goto out;
 915	}
 916
 917	orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
 918	/* Folio got pinned from under us. Put it back and fail the move. */
 919	if (folio_maybe_dma_pinned(src_folio)) {
 920		set_pte_at(mm, src_addr, src_pte, orig_src_pte);
 921		err = -EBUSY;
 922		goto out;
 923	}
 924
 925	folio_move_anon_rmap(src_folio, dst_vma);
 926	WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
 927
 928	orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
 929	/* Follow mremap() behavior and treat the entry dirty after the move */
 930	orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
 931
 932	set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
 933out:
 934	double_pt_unlock(dst_ptl, src_ptl);
 935	return err;
 936}
 937
 938static int move_swap_pte(struct mm_struct *mm,
 939			 unsigned long dst_addr, unsigned long src_addr,
 940			 pte_t *dst_pte, pte_t *src_pte,
 941			 pte_t orig_dst_pte, pte_t orig_src_pte,
 942			 spinlock_t *dst_ptl, spinlock_t *src_ptl)
 943{
 944	if (!pte_swp_exclusive(orig_src_pte))
 945		return -EBUSY;
 946
 947	double_pt_lock(dst_ptl, src_ptl);
 948
 949	if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
 950	    !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
 951		double_pt_unlock(dst_ptl, src_ptl);
 952		return -EAGAIN;
 953	}
 954
 955	orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
 956	set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
 957	double_pt_unlock(dst_ptl, src_ptl);
 958
 959	return 0;
 960}
 961
 962/*
 963 * The mmap_lock for reading is held by the caller. Just move the page
 964 * from src_pmd to dst_pmd if possible, and return true if succeeded
 965 * in moving the page.
 966 */
 967static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
 968			  struct vm_area_struct *dst_vma,
 969			  struct vm_area_struct *src_vma,
 970			  unsigned long dst_addr, unsigned long src_addr,
 971			  __u64 mode)
 972{
 973	swp_entry_t entry;
 974	pte_t orig_src_pte, orig_dst_pte;
 975	pte_t src_folio_pte;
 976	spinlock_t *src_ptl, *dst_ptl;
 977	pte_t *src_pte = NULL;
 978	pte_t *dst_pte = NULL;
 979
 980	struct folio *src_folio = NULL;
 981	struct anon_vma *src_anon_vma = NULL;
 982	struct mmu_notifier_range range;
 983	int err = 0;
 984
 985	flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
 986	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
 987				src_addr, src_addr + PAGE_SIZE);
 988	mmu_notifier_invalidate_range_start(&range);
 989retry:
 990	dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
 991
 992	/* Retry if a huge pmd materialized from under us */
 993	if (unlikely(!dst_pte)) {
 994		err = -EAGAIN;
 995		goto out;
 996	}
 997
 998	src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
 999
1000	/*
1001	 * We held the mmap_lock for reading so MADV_DONTNEED
1002	 * can zap transparent huge pages under us, or the
1003	 * transparent huge page fault can establish new
1004	 * transparent huge pages under us.
1005	 */
1006	if (unlikely(!src_pte)) {
1007		err = -EAGAIN;
1008		goto out;
1009	}
1010
1011	/* Sanity checks before the operation */
1012	if (WARN_ON_ONCE(pmd_none(*dst_pmd)) ||	WARN_ON_ONCE(pmd_none(*src_pmd)) ||
1013	    WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
1014		err = -EINVAL;
1015		goto out;
1016	}
1017
1018	spin_lock(dst_ptl);
1019	orig_dst_pte = ptep_get(dst_pte);
1020	spin_unlock(dst_ptl);
1021	if (!pte_none(orig_dst_pte)) {
1022		err = -EEXIST;
1023		goto out;
1024	}
1025
1026	spin_lock(src_ptl);
1027	orig_src_pte = ptep_get(src_pte);
1028	spin_unlock(src_ptl);
1029	if (pte_none(orig_src_pte)) {
1030		if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
1031			err = -ENOENT;
1032		else /* nothing to do to move a hole */
1033			err = 0;
1034		goto out;
1035	}
1036
1037	/* If PTE changed after we locked the folio them start over */
1038	if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
1039		err = -EAGAIN;
1040		goto out;
1041	}
1042
1043	if (pte_present(orig_src_pte)) {
1044		/*
1045		 * Pin and lock both source folio and anon_vma. Since we are in
1046		 * RCU read section, we can't block, so on contention have to
1047		 * unmap the ptes, obtain the lock and retry.
1048		 */
1049		if (!src_folio) {
1050			struct folio *folio;
1051
1052			/*
1053			 * Pin the page while holding the lock to be sure the
1054			 * page isn't freed under us
1055			 */
1056			spin_lock(src_ptl);
1057			if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
1058				spin_unlock(src_ptl);
1059				err = -EAGAIN;
1060				goto out;
1061			}
1062
1063			folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
1064			if (!folio || !PageAnonExclusive(&folio->page)) {
1065				spin_unlock(src_ptl);
1066				err = -EBUSY;
1067				goto out;
1068			}
1069
1070			folio_get(folio);
1071			src_folio = folio;
1072			src_folio_pte = orig_src_pte;
1073			spin_unlock(src_ptl);
1074
1075			if (!folio_trylock(src_folio)) {
1076				pte_unmap(&orig_src_pte);
1077				pte_unmap(&orig_dst_pte);
1078				src_pte = dst_pte = NULL;
1079				/* now we can block and wait */
1080				folio_lock(src_folio);
1081				goto retry;
1082			}
1083
1084			if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
1085				err = -EBUSY;
1086				goto out;
1087			}
1088		}
1089
1090		/* at this point we have src_folio locked */
1091		if (folio_test_large(src_folio)) {
1092			/* split_folio() can block */
1093			pte_unmap(&orig_src_pte);
1094			pte_unmap(&orig_dst_pte);
1095			src_pte = dst_pte = NULL;
1096			err = split_folio(src_folio);
1097			if (err)
1098				goto out;
1099			/* have to reacquire the folio after it got split */
1100			folio_unlock(src_folio);
1101			folio_put(src_folio);
1102			src_folio = NULL;
1103			goto retry;
1104		}
1105
1106		if (!src_anon_vma) {
1107			/*
1108			 * folio_referenced walks the anon_vma chain
1109			 * without the folio lock. Serialize against it with
1110			 * the anon_vma lock, the folio lock is not enough.
1111			 */
1112			src_anon_vma = folio_get_anon_vma(src_folio);
1113			if (!src_anon_vma) {
1114				/* page was unmapped from under us */
1115				err = -EAGAIN;
1116				goto out;
1117			}
1118			if (!anon_vma_trylock_write(src_anon_vma)) {
1119				pte_unmap(&orig_src_pte);
1120				pte_unmap(&orig_dst_pte);
1121				src_pte = dst_pte = NULL;
1122				/* now we can block and wait */
1123				anon_vma_lock_write(src_anon_vma);
1124				goto retry;
1125			}
1126		}
1127
1128		err = move_present_pte(mm,  dst_vma, src_vma,
1129				       dst_addr, src_addr, dst_pte, src_pte,
1130				       orig_dst_pte, orig_src_pte,
1131				       dst_ptl, src_ptl, src_folio);
1132	} else {
1133		entry = pte_to_swp_entry(orig_src_pte);
1134		if (non_swap_entry(entry)) {
1135			if (is_migration_entry(entry)) {
1136				pte_unmap(&orig_src_pte);
1137				pte_unmap(&orig_dst_pte);
1138				src_pte = dst_pte = NULL;
1139				migration_entry_wait(mm, src_pmd, src_addr);
1140				err = -EAGAIN;
1141			} else
1142				err = -EFAULT;
1143			goto out;
1144		}
1145
1146		err = move_swap_pte(mm, dst_addr, src_addr,
1147				    dst_pte, src_pte,
1148				    orig_dst_pte, orig_src_pte,
1149				    dst_ptl, src_ptl);
1150	}
1151
1152out:
1153	if (src_anon_vma) {
1154		anon_vma_unlock_write(src_anon_vma);
1155		put_anon_vma(src_anon_vma);
1156	}
1157	if (src_folio) {
1158		folio_unlock(src_folio);
1159		folio_put(src_folio);
1160	}
1161	if (dst_pte)
1162		pte_unmap(dst_pte);
1163	if (src_pte)
1164		pte_unmap(src_pte);
1165	mmu_notifier_invalidate_range_end(&range);
1166
1167	return err;
1168}
1169
1170#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1171static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1172					unsigned long src_addr,
1173					unsigned long src_end)
1174{
1175	return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
1176		src_end - src_addr < HPAGE_PMD_SIZE;
1177}
1178#else
1179static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1180					unsigned long src_addr,
1181					unsigned long src_end)
1182{
1183	/* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
1184	return false;
1185}
1186#endif
1187
1188static inline bool vma_move_compatible(struct vm_area_struct *vma)
1189{
1190	return !(vma->vm_flags & (VM_PFNMAP | VM_IO |  VM_HUGETLB |
1191				  VM_MIXEDMAP | VM_SHADOW_STACK));
1192}
1193
1194static int validate_move_areas(struct userfaultfd_ctx *ctx,
1195			       struct vm_area_struct *src_vma,
1196			       struct vm_area_struct *dst_vma)
1197{
1198	/* Only allow moving if both have the same access and protection */
1199	if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
1200	    pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
1201		return -EINVAL;
1202
1203	/* Only allow moving if both are mlocked or both aren't */
1204	if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
1205		return -EINVAL;
1206
1207	/*
1208	 * For now, we keep it simple and only move between writable VMAs.
1209	 * Access flags are equal, therefore cheching only the source is enough.
1210	 */
1211	if (!(src_vma->vm_flags & VM_WRITE))
1212		return -EINVAL;
1213
1214	/* Check if vma flags indicate content which can be moved */
1215	if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
1216		return -EINVAL;
1217
1218	/* Ensure dst_vma is registered in uffd we are operating on */
1219	if (!dst_vma->vm_userfaultfd_ctx.ctx ||
1220	    dst_vma->vm_userfaultfd_ctx.ctx != ctx)
1221		return -EINVAL;
1222
1223	/* Only allow moving across anonymous vmas */
1224	if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
1225		return -EINVAL;
1226
1227	/*
1228	 * Ensure the dst_vma has a anon_vma or this page
1229	 * would get a NULL anon_vma when moved in the
1230	 * dst_vma.
1231	 */
1232	if (unlikely(anon_vma_prepare(dst_vma)))
1233		return -ENOMEM;
1234
1235	return 0;
1236}
1237
1238/**
1239 * move_pages - move arbitrary anonymous pages of an existing vma
1240 * @ctx: pointer to the userfaultfd context
1241 * @mm: the address space to move pages
1242 * @dst_start: start of the destination virtual memory range
1243 * @src_start: start of the source virtual memory range
1244 * @len: length of the virtual memory range
1245 * @mode: flags from uffdio_move.mode
1246 *
1247 * Must be called with mmap_lock held for read.
1248 *
1249 * move_pages() remaps arbitrary anonymous pages atomically in zero
1250 * copy. It only works on non shared anonymous pages because those can
1251 * be relocated without generating non linear anon_vmas in the rmap
1252 * code.
1253 *
1254 * It provides a zero copy mechanism to handle userspace page faults.
1255 * The source vma pages should have mapcount == 1, which can be
1256 * enforced by using madvise(MADV_DONTFORK) on src vma.
1257 *
1258 * The thread receiving the page during the userland page fault
1259 * will receive the faulting page in the source vma through the network,
1260 * storage or any other I/O device (MADV_DONTFORK in the source vma
1261 * avoids move_pages() to fail with -EBUSY if the process forks before
1262 * move_pages() is called), then it will call move_pages() to map the
1263 * page in the faulting address in the destination vma.
1264 *
1265 * This userfaultfd command works purely via pagetables, so it's the
1266 * most efficient way to move physical non shared anonymous pages
1267 * across different virtual addresses. Unlike mremap()/mmap()/munmap()
1268 * it does not create any new vmas. The mapping in the destination
1269 * address is atomic.
1270 *
1271 * It only works if the vma protection bits are identical from the
1272 * source and destination vma.
1273 *
1274 * It can remap non shared anonymous pages within the same vma too.
1275 *
1276 * If the source virtual memory range has any unmapped holes, or if
1277 * the destination virtual memory range is not a whole unmapped hole,
1278 * move_pages() will fail respectively with -ENOENT or -EEXIST. This
1279 * provides a very strict behavior to avoid any chance of memory
1280 * corruption going unnoticed if there are userland race conditions.
1281 * Only one thread should resolve the userland page fault at any given
1282 * time for any given faulting address. This means that if two threads
1283 * try to both call move_pages() on the same destination address at the
1284 * same time, the second thread will get an explicit error from this
1285 * command.
1286 *
1287 * The command retval will return "len" is successful. The command
1288 * however can be interrupted by fatal signals or errors. If
1289 * interrupted it will return the number of bytes successfully
1290 * remapped before the interruption if any, or the negative error if
1291 * none. It will never return zero. Either it will return an error or
1292 * an amount of bytes successfully moved. If the retval reports a
1293 * "short" remap, the move_pages() command should be repeated by
1294 * userland with src+retval, dst+reval, len-retval if it wants to know
1295 * about the error that interrupted it.
1296 *
1297 * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
1298 * prevent -ENOENT errors to materialize if there are holes in the
1299 * source virtual range that is being remapped. The holes will be
1300 * accounted as successfully remapped in the retval of the
1301 * command. This is mostly useful to remap hugepage naturally aligned
1302 * virtual regions without knowing if there are transparent hugepage
1303 * in the regions or not, but preventing the risk of having to split
1304 * the hugepmd during the remap.
1305 *
1306 * If there's any rmap walk that is taking the anon_vma locks without
1307 * first obtaining the folio lock (the only current instance is
1308 * folio_referenced), they will have to verify if the folio->mapping
1309 * has changed after taking the anon_vma lock. If it changed they
1310 * should release the lock and retry obtaining a new anon_vma, because
1311 * it means the anon_vma was changed by move_pages() before the lock
1312 * could be obtained. This is the only additional complexity added to
1313 * the rmap code to provide this anonymous page remapping functionality.
1314 */
1315ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
1316		   unsigned long dst_start, unsigned long src_start,
1317		   unsigned long len, __u64 mode)
1318{
1319	struct vm_area_struct *src_vma, *dst_vma;
1320	unsigned long src_addr, dst_addr;
1321	pmd_t *src_pmd, *dst_pmd;
1322	long err = -EINVAL;
1323	ssize_t moved = 0;
1324
1325	/* Sanitize the command parameters. */
1326	if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
1327	    WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
1328	    WARN_ON_ONCE(len & ~PAGE_MASK))
1329		goto out;
1330
1331	/* Does the address range wrap, or is the span zero-sized? */
1332	if (WARN_ON_ONCE(src_start + len <= src_start) ||
1333	    WARN_ON_ONCE(dst_start + len <= dst_start))
1334		goto out;
1335
1336	/*
1337	 * Make sure the vma is not shared, that the src and dst remap
1338	 * ranges are both valid and fully within a single existing
1339	 * vma.
1340	 */
1341	src_vma = find_vma(mm, src_start);
1342	if (!src_vma || (src_vma->vm_flags & VM_SHARED))
1343		goto out;
1344	if (src_start < src_vma->vm_start ||
1345	    src_start + len > src_vma->vm_end)
1346		goto out;
1347
1348	dst_vma = find_vma(mm, dst_start);
1349	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
1350		goto out;
1351	if (dst_start < dst_vma->vm_start ||
1352	    dst_start + len > dst_vma->vm_end)
1353		goto out;
1354
1355	err = validate_move_areas(ctx, src_vma, dst_vma);
1356	if (err)
1357		goto out;
1358
1359	for (src_addr = src_start, dst_addr = dst_start;
1360	     src_addr < src_start + len;) {
1361		spinlock_t *ptl;
1362		pmd_t dst_pmdval;
1363		unsigned long step_size;
1364
1365		/*
1366		 * Below works because anonymous area would not have a
1367		 * transparent huge PUD. If file-backed support is added,
1368		 * that case would need to be handled here.
1369		 */
1370		src_pmd = mm_find_pmd(mm, src_addr);
1371		if (unlikely(!src_pmd)) {
1372			if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1373				err = -ENOENT;
1374				break;
1375			}
1376			src_pmd = mm_alloc_pmd(mm, src_addr);
1377			if (unlikely(!src_pmd)) {
1378				err = -ENOMEM;
1379				break;
1380			}
1381		}
1382		dst_pmd = mm_alloc_pmd(mm, dst_addr);
1383		if (unlikely(!dst_pmd)) {
1384			err = -ENOMEM;
1385			break;
1386		}
1387
1388		dst_pmdval = pmdp_get_lockless(dst_pmd);
1389		/*
1390		 * If the dst_pmd is mapped as THP don't override it and just
1391		 * be strict. If dst_pmd changes into TPH after this check, the
1392		 * move_pages_huge_pmd() will detect the change and retry
1393		 * while move_pages_pte() will detect the change and fail.
1394		 */
1395		if (unlikely(pmd_trans_huge(dst_pmdval))) {
1396			err = -EEXIST;
1397			break;
1398		}
1399
1400		ptl = pmd_trans_huge_lock(src_pmd, src_vma);
1401		if (ptl) {
1402			if (pmd_devmap(*src_pmd)) {
1403				spin_unlock(ptl);
1404				err = -ENOENT;
1405				break;
1406			}
1407			/* Avoid moving zeropages for now */
1408			if (is_huge_zero_pmd(*src_pmd)) {
1409				spin_unlock(ptl);
1410				err = -EBUSY;
1411				break;
1412			}
1413
1414			/* Check if we can move the pmd without splitting it. */
1415			if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
1416			    !pmd_none(dst_pmdval)) {
1417				struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
1418
1419				if (!folio || !PageAnonExclusive(&folio->page)) {
1420					spin_unlock(ptl);
1421					err = -EBUSY;
1422					break;
1423				}
1424
1425				spin_unlock(ptl);
1426				split_huge_pmd(src_vma, src_pmd, src_addr);
1427				/* The folio will be split by move_pages_pte() */
1428				continue;
1429			}
1430
1431			err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
1432						  dst_pmdval, dst_vma, src_vma,
1433						  dst_addr, src_addr);
1434			step_size = HPAGE_PMD_SIZE;
1435		} else {
1436			if (pmd_none(*src_pmd)) {
1437				if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1438					err = -ENOENT;
1439					break;
1440				}
1441				if (unlikely(__pte_alloc(mm, src_pmd))) {
1442					err = -ENOMEM;
1443					break;
1444				}
1445			}
1446
1447			if (unlikely(pte_alloc(mm, dst_pmd))) {
1448				err = -ENOMEM;
1449				break;
1450			}
1451
1452			err = move_pages_pte(mm, dst_pmd, src_pmd,
1453					     dst_vma, src_vma,
1454					     dst_addr, src_addr, mode);
1455			step_size = PAGE_SIZE;
1456		}
1457
1458		cond_resched();
1459
1460		if (fatal_signal_pending(current)) {
1461			/* Do not override an error */
1462			if (!err || err == -EAGAIN)
1463				err = -EINTR;
1464			break;
1465		}
1466
1467		if (err) {
1468			if (err == -EAGAIN)
1469				continue;
1470			break;
1471		}
1472
1473		/* Proceed to the next page */
1474		dst_addr += step_size;
1475		src_addr += step_size;
1476		moved += step_size;
1477	}
1478
1479out:
1480	VM_WARN_ON(moved < 0);
1481	VM_WARN_ON(err > 0);
1482	VM_WARN_ON(!moved && !err);
1483	return moved ? moved : err;
1484}