Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  mm/userfaultfd.c
  4 *
  5 *  Copyright (C) 2015  Red Hat, Inc.
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/sched/signal.h>
 10#include <linux/pagemap.h>
 11#include <linux/rmap.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/userfaultfd_k.h>
 15#include <linux/mmu_notifier.h>
 16#include <linux/hugetlb.h>
 17#include <linux/shmem_fs.h>
 18#include <asm/tlbflush.h>
 
 19#include "internal.h"
 20
 21static __always_inline
 22struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
 23				    unsigned long dst_start,
 24				    unsigned long len)
 25{
 26	/*
 27	 * Make sure that the dst range is both valid and fully within a
 28	 * single existing vma.
 29	 */
 30	struct vm_area_struct *dst_vma;
 31
 32	dst_vma = find_vma(dst_mm, dst_start);
 33	if (!dst_vma)
 34		return NULL;
 35
 36	if (dst_start < dst_vma->vm_start ||
 37	    dst_start + len > dst_vma->vm_end)
 38		return NULL;
 39
 40	/*
 41	 * Check the vma is registered in uffd, this is required to
 42	 * enforce the VM_MAYWRITE check done at uffd registration
 43	 * time.
 44	 */
 45	if (!dst_vma->vm_userfaultfd_ctx.ctx)
 46		return NULL;
 47
 48	return dst_vma;
 49}
 50
 51/*
 52 * Install PTEs, to map dst_addr (within dst_vma) to page.
 53 *
 54 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
 55 * and anon, and for both shared and private VMAs.
 56 */
 57int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
 58			     struct vm_area_struct *dst_vma,
 59			     unsigned long dst_addr, struct page *page,
 60			     bool newly_allocated, bool wp_copy)
 61{
 62	int ret;
 63	pte_t _dst_pte, *dst_pte;
 64	bool writable = dst_vma->vm_flags & VM_WRITE;
 65	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
 66	bool page_in_cache = page->mapping;
 67	spinlock_t *ptl;
 
 68	struct inode *inode;
 69	pgoff_t offset, max_off;
 70
 71	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 
 72	if (page_in_cache && !vm_shared)
 73		writable = false;
 74	if (writable || !page_in_cache)
 75		_dst_pte = pte_mkdirty(_dst_pte);
 76	if (writable) {
 77		if (wp_copy)
 78			_dst_pte = pte_mkuffd_wp(_dst_pte);
 79		else
 80			_dst_pte = pte_mkwrite(_dst_pte);
 
 81	}
 82
 
 
 
 
 
 
 
 
 
 83	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 84
 85	if (vma_is_shmem(dst_vma)) {
 86		/* serialize against truncate with the page table lock */
 87		inode = dst_vma->vm_file->f_inode;
 88		offset = linear_page_index(dst_vma, dst_addr);
 89		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 90		ret = -EFAULT;
 91		if (unlikely(offset >= max_off))
 92			goto out_unlock;
 93	}
 94
 95	ret = -EEXIST;
 96	if (!pte_none(*dst_pte))
 
 
 
 
 
 97		goto out_unlock;
 98
 99	if (page_in_cache)
100		page_add_file_rmap(page, false);
101	else
102		page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 
 
 
 
 
 
103
104	/*
105	 * Must happen after rmap, as mm_counter() checks mapping (via
106	 * PageAnon()), which is set by __page_set_anon_rmap().
107	 */
108	inc_mm_counter(dst_mm, mm_counter(page));
109
110	if (newly_allocated)
111		lru_cache_add_inactive_or_unevictable(page, dst_vma);
112
113	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
114
115	/* No need to invalidate - it was non-present before */
116	update_mmu_cache(dst_vma, dst_addr, dst_pte);
117	ret = 0;
118out_unlock:
119	pte_unmap_unlock(dst_pte, ptl);
120	return ret;
121}
122
123static int mcopy_atomic_pte(struct mm_struct *dst_mm,
124			    pmd_t *dst_pmd,
125			    struct vm_area_struct *dst_vma,
126			    unsigned long dst_addr,
127			    unsigned long src_addr,
128			    struct page **pagep,
129			    bool wp_copy)
130{
131	void *page_kaddr;
132	int ret;
133	struct page *page;
134
135	if (!*pagep) {
136		ret = -ENOMEM;
137		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
138		if (!page)
139			goto out;
140
141		page_kaddr = kmap_atomic(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142		ret = copy_from_user(page_kaddr,
143				     (const void __user *) src_addr,
144				     PAGE_SIZE);
145		kunmap_atomic(page_kaddr);
 
146
147		/* fallback to copy_from_user outside mmap_lock */
148		if (unlikely(ret)) {
149			ret = -ENOENT;
150			*pagep = page;
151			/* don't free the page */
152			goto out;
153		}
 
 
154	} else {
155		page = *pagep;
156		*pagep = NULL;
157	}
158
159	/*
160	 * The memory barrier inside __SetPageUptodate makes sure that
161	 * preceding stores to the page contents become visible before
162	 * the set_pte_at() write.
163	 */
164	__SetPageUptodate(page);
165
166	ret = -ENOMEM;
167	if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
168		goto out_release;
169
170	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
171				       page, true, wp_copy);
172	if (ret)
173		goto out_release;
174out:
175	return ret;
176out_release:
177	put_page(page);
178	goto out;
179}
180
181static int mfill_zeropage_pte(struct mm_struct *dst_mm,
182			      pmd_t *dst_pmd,
183			      struct vm_area_struct *dst_vma,
184			      unsigned long dst_addr)
185{
186	pte_t _dst_pte, *dst_pte;
187	spinlock_t *ptl;
188	int ret;
189	pgoff_t offset, max_off;
190	struct inode *inode;
191
192	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
193					 dst_vma->vm_page_prot));
194	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
195	if (dst_vma->vm_file) {
196		/* the shmem MAP_PRIVATE case requires checking the i_size */
197		inode = dst_vma->vm_file->f_inode;
198		offset = linear_page_index(dst_vma, dst_addr);
199		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
200		ret = -EFAULT;
201		if (unlikely(offset >= max_off))
202			goto out_unlock;
203	}
204	ret = -EEXIST;
205	if (!pte_none(*dst_pte))
206		goto out_unlock;
207	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
208	/* No need to invalidate - it was non-present before */
209	update_mmu_cache(dst_vma, dst_addr, dst_pte);
210	ret = 0;
211out_unlock:
212	pte_unmap_unlock(dst_pte, ptl);
213	return ret;
214}
215
216/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
217static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
218				pmd_t *dst_pmd,
219				struct vm_area_struct *dst_vma,
220				unsigned long dst_addr,
221				bool wp_copy)
222{
223	struct inode *inode = file_inode(dst_vma->vm_file);
224	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
 
225	struct page *page;
226	int ret;
227
228	ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
 
 
 
229	if (ret)
230		goto out;
231	if (!page) {
232		ret = -EFAULT;
233		goto out;
234	}
235
 
 
 
 
 
 
236	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
237				       page, false, wp_copy);
238	if (ret)
239		goto out_release;
240
241	unlock_page(page);
242	ret = 0;
243out:
244	return ret;
245out_release:
246	unlock_page(page);
247	put_page(page);
248	goto out;
249}
250
251static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
252{
253	pgd_t *pgd;
254	p4d_t *p4d;
255	pud_t *pud;
256
257	pgd = pgd_offset(mm, address);
258	p4d = p4d_alloc(mm, pgd, address);
259	if (!p4d)
260		return NULL;
261	pud = pud_alloc(mm, p4d, address);
262	if (!pud)
263		return NULL;
264	/*
265	 * Note that we didn't run this because the pmd was
266	 * missing, the *pmd may be already established and in
267	 * turn it may also be a trans_huge_pmd.
268	 */
269	return pmd_alloc(mm, pud, address);
270}
271
272#ifdef CONFIG_HUGETLB_PAGE
273/*
274 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
275 * called with mmap_lock held, it will release mmap_lock before returning.
276 */
277static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
278					      struct vm_area_struct *dst_vma,
279					      unsigned long dst_start,
280					      unsigned long src_start,
281					      unsigned long len,
282					      enum mcopy_atomic_mode mode)
 
283{
284	int vm_shared = dst_vma->vm_flags & VM_SHARED;
285	ssize_t err;
286	pte_t *dst_pte;
287	unsigned long src_addr, dst_addr;
288	long copied;
289	struct page *page;
290	unsigned long vma_hpagesize;
291	pgoff_t idx;
292	u32 hash;
293	struct address_space *mapping;
294
295	/*
296	 * There is no default zero huge page for all huge page sizes as
297	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
298	 * by THP.  Since we can not reliably insert a zero page, this
299	 * feature is not supported.
300	 */
301	if (mode == MCOPY_ATOMIC_ZEROPAGE) {
302		mmap_read_unlock(dst_mm);
303		return -EINVAL;
304	}
305
306	src_addr = src_start;
307	dst_addr = dst_start;
308	copied = 0;
309	page = NULL;
310	vma_hpagesize = vma_kernel_pagesize(dst_vma);
311
312	/*
313	 * Validate alignment based on huge page size
314	 */
315	err = -EINVAL;
316	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
317		goto out_unlock;
318
319retry:
320	/*
321	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
322	 * retry, dst_vma will be set to NULL and we must lookup again.
323	 */
324	if (!dst_vma) {
325		err = -ENOENT;
326		dst_vma = find_dst_vma(dst_mm, dst_start, len);
327		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
328			goto out_unlock;
329
330		err = -EINVAL;
331		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
332			goto out_unlock;
333
334		vm_shared = dst_vma->vm_flags & VM_SHARED;
335	}
336
337	/*
338	 * If not shared, ensure the dst_vma has a anon_vma.
339	 */
340	err = -ENOMEM;
341	if (!vm_shared) {
342		if (unlikely(anon_vma_prepare(dst_vma)))
343			goto out_unlock;
344	}
345
346	while (src_addr < src_start + len) {
347		BUG_ON(dst_addr >= dst_start + len);
348
349		/*
350		 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
351		 * i_mmap_rwsem ensures the dst_pte remains valid even
352		 * in the case of shared pmds.  fault mutex prevents
353		 * races with other faulting threads.
354		 */
355		mapping = dst_vma->vm_file->f_mapping;
356		i_mmap_lock_read(mapping);
357		idx = linear_page_index(dst_vma, dst_addr);
 
358		hash = hugetlb_fault_mutex_hash(mapping, idx);
359		mutex_lock(&hugetlb_fault_mutex_table[hash]);
 
360
361		err = -ENOMEM;
362		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
363		if (!dst_pte) {
 
364			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
365			i_mmap_unlock_read(mapping);
366			goto out_unlock;
367		}
368
369		if (mode != MCOPY_ATOMIC_CONTINUE &&
370		    !huge_pte_none(huge_ptep_get(dst_pte))) {
371			err = -EEXIST;
 
372			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
373			i_mmap_unlock_read(mapping);
374			goto out_unlock;
375		}
376
377		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
378					       dst_addr, src_addr, mode, &page);
 
379
 
380		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
381		i_mmap_unlock_read(mapping);
382
383		cond_resched();
384
385		if (unlikely(err == -ENOENT)) {
386			mmap_read_unlock(dst_mm);
387			BUG_ON(!page);
388
389			err = copy_huge_page_from_user(page,
390						(const void __user *)src_addr,
391						vma_hpagesize / PAGE_SIZE,
392						true);
393			if (unlikely(err)) {
394				err = -EFAULT;
395				goto out;
396			}
397			mmap_read_lock(dst_mm);
398
399			dst_vma = NULL;
400			goto retry;
401		} else
402			BUG_ON(page);
403
404		if (!err) {
405			dst_addr += vma_hpagesize;
406			src_addr += vma_hpagesize;
407			copied += vma_hpagesize;
408
409			if (fatal_signal_pending(current))
410				err = -EINTR;
411		}
412		if (err)
413			break;
414	}
415
416out_unlock:
417	mmap_read_unlock(dst_mm);
418out:
419	if (page)
420		put_page(page);
421	BUG_ON(copied < 0);
422	BUG_ON(err > 0);
423	BUG_ON(!copied && !err);
424	return copied ? copied : err;
425}
426#else /* !CONFIG_HUGETLB_PAGE */
427/* fail at build time if gcc attempts to use this */
428extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
429				      struct vm_area_struct *dst_vma,
430				      unsigned long dst_start,
431				      unsigned long src_start,
432				      unsigned long len,
433				      enum mcopy_atomic_mode mode);
 
434#endif /* CONFIG_HUGETLB_PAGE */
435
436static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
437						pmd_t *dst_pmd,
438						struct vm_area_struct *dst_vma,
439						unsigned long dst_addr,
440						unsigned long src_addr,
441						struct page **page,
442						enum mcopy_atomic_mode mode,
443						bool wp_copy)
444{
445	ssize_t err;
446
447	if (mode == MCOPY_ATOMIC_CONTINUE) {
448		return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
449					    wp_copy);
450	}
451
452	/*
453	 * The normal page fault path for a shmem will invoke the
454	 * fault, fill the hole in the file and COW it right away. The
455	 * result generates plain anonymous memory. So when we are
456	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
457	 * generate anonymous memory directly without actually filling
458	 * the hole. For the MAP_PRIVATE case the robustness check
459	 * only happens in the pagetable (to verify it's still none)
460	 * and not in the radix tree.
461	 */
462	if (!(dst_vma->vm_flags & VM_SHARED)) {
463		if (mode == MCOPY_ATOMIC_NORMAL)
464			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
465					       dst_addr, src_addr, page,
466					       wp_copy);
467		else
468			err = mfill_zeropage_pte(dst_mm, dst_pmd,
469						 dst_vma, dst_addr);
470	} else {
471		VM_WARN_ON_ONCE(wp_copy);
472		err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
473					     dst_addr, src_addr,
474					     mode != MCOPY_ATOMIC_NORMAL,
475					     page);
476	}
477
478	return err;
479}
480
481static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
482					      unsigned long dst_start,
483					      unsigned long src_start,
484					      unsigned long len,
485					      enum mcopy_atomic_mode mcopy_mode,
486					      bool *mmap_changing,
487					      __u64 mode)
488{
489	struct vm_area_struct *dst_vma;
490	ssize_t err;
491	pmd_t *dst_pmd;
492	unsigned long src_addr, dst_addr;
493	long copied;
494	struct page *page;
495	bool wp_copy;
496
497	/*
498	 * Sanitize the command parameters:
499	 */
500	BUG_ON(dst_start & ~PAGE_MASK);
501	BUG_ON(len & ~PAGE_MASK);
502
503	/* Does the address range wrap, or is the span zero-sized? */
504	BUG_ON(src_start + len <= src_start);
505	BUG_ON(dst_start + len <= dst_start);
506
507	src_addr = src_start;
508	dst_addr = dst_start;
509	copied = 0;
510	page = NULL;
511retry:
512	mmap_read_lock(dst_mm);
513
514	/*
515	 * If memory mappings are changing because of non-cooperative
516	 * operation (e.g. mremap) running in parallel, bail out and
517	 * request the user to retry later
518	 */
519	err = -EAGAIN;
520	if (mmap_changing && READ_ONCE(*mmap_changing))
521		goto out_unlock;
522
523	/*
524	 * Make sure the vma is not shared, that the dst range is
525	 * both valid and fully within a single existing vma.
526	 */
527	err = -ENOENT;
528	dst_vma = find_dst_vma(dst_mm, dst_start, len);
529	if (!dst_vma)
530		goto out_unlock;
531
532	err = -EINVAL;
533	/*
534	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
535	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
536	 */
537	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
538	    dst_vma->vm_flags & VM_SHARED))
539		goto out_unlock;
540
541	/*
542	 * validate 'mode' now that we know the dst_vma: don't allow
543	 * a wrprotect copy if the userfaultfd didn't register as WP.
544	 */
545	wp_copy = mode & UFFDIO_COPY_MODE_WP;
546	if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
547		goto out_unlock;
548
549	/*
550	 * If this is a HUGETLB vma, pass off to appropriate routine
551	 */
552	if (is_vm_hugetlb_page(dst_vma))
553		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
554						src_start, len, mcopy_mode);
 
555
556	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
557		goto out_unlock;
558	if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
559		goto out_unlock;
560
561	/*
562	 * Ensure the dst_vma has a anon_vma or this page
563	 * would get a NULL anon_vma when moved in the
564	 * dst_vma.
565	 */
566	err = -ENOMEM;
567	if (!(dst_vma->vm_flags & VM_SHARED) &&
568	    unlikely(anon_vma_prepare(dst_vma)))
569		goto out_unlock;
570
571	while (src_addr < src_start + len) {
572		pmd_t dst_pmdval;
573
574		BUG_ON(dst_addr >= dst_start + len);
575
576		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
577		if (unlikely(!dst_pmd)) {
578			err = -ENOMEM;
579			break;
580		}
581
582		dst_pmdval = pmd_read_atomic(dst_pmd);
583		/*
584		 * If the dst_pmd is mapped as THP don't
585		 * override it and just be strict.
586		 */
587		if (unlikely(pmd_trans_huge(dst_pmdval))) {
588			err = -EEXIST;
589			break;
590		}
591		if (unlikely(pmd_none(dst_pmdval)) &&
592		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
593			err = -ENOMEM;
594			break;
595		}
596		/* If an huge pmd materialized from under us fail */
597		if (unlikely(pmd_trans_huge(*dst_pmd))) {
598			err = -EFAULT;
599			break;
600		}
601
602		BUG_ON(pmd_none(*dst_pmd));
603		BUG_ON(pmd_trans_huge(*dst_pmd));
604
605		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
606				       src_addr, &page, mcopy_mode, wp_copy);
607		cond_resched();
608
609		if (unlikely(err == -ENOENT)) {
610			void *page_kaddr;
611
612			mmap_read_unlock(dst_mm);
613			BUG_ON(!page);
614
615			page_kaddr = kmap(page);
616			err = copy_from_user(page_kaddr,
617					     (const void __user *) src_addr,
618					     PAGE_SIZE);
619			kunmap(page);
620			if (unlikely(err)) {
621				err = -EFAULT;
622				goto out;
623			}
 
624			goto retry;
625		} else
626			BUG_ON(page);
627
628		if (!err) {
629			dst_addr += PAGE_SIZE;
630			src_addr += PAGE_SIZE;
631			copied += PAGE_SIZE;
632
633			if (fatal_signal_pending(current))
634				err = -EINTR;
635		}
636		if (err)
637			break;
638	}
639
640out_unlock:
641	mmap_read_unlock(dst_mm);
642out:
643	if (page)
644		put_page(page);
645	BUG_ON(copied < 0);
646	BUG_ON(err > 0);
647	BUG_ON(!copied && !err);
648	return copied ? copied : err;
649}
650
651ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
652		     unsigned long src_start, unsigned long len,
653		     bool *mmap_changing, __u64 mode)
654{
655	return __mcopy_atomic(dst_mm, dst_start, src_start, len,
656			      MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
657}
658
659ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
660		       unsigned long len, bool *mmap_changing)
661{
662	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
663			      mmap_changing, 0);
664}
665
666ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
667		       unsigned long len, bool *mmap_changing)
668{
669	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
670			      mmap_changing, 0);
671}
672
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
673int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
674			unsigned long len, bool enable_wp, bool *mmap_changing)
 
675{
676	struct vm_area_struct *dst_vma;
677	pgprot_t newprot;
678	int err;
679
680	/*
681	 * Sanitize the command parameters:
682	 */
683	BUG_ON(start & ~PAGE_MASK);
684	BUG_ON(len & ~PAGE_MASK);
685
686	/* Does the address range wrap, or is the span zero-sized? */
687	BUG_ON(start + len <= start);
688
689	mmap_read_lock(dst_mm);
690
691	/*
692	 * If memory mappings are changing because of non-cooperative
693	 * operation (e.g. mremap) running in parallel, bail out and
694	 * request the user to retry later
695	 */
696	err = -EAGAIN;
697	if (mmap_changing && READ_ONCE(*mmap_changing))
698		goto out_unlock;
699
700	err = -ENOENT;
701	dst_vma = find_dst_vma(dst_mm, start, len);
702	/*
703	 * Make sure the vma is not shared, that the dst range is
704	 * both valid and fully within a single existing vma.
705	 */
706	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
707		goto out_unlock;
708	if (!userfaultfd_wp(dst_vma))
709		goto out_unlock;
710	if (!vma_is_anonymous(dst_vma))
711		goto out_unlock;
712
713	if (enable_wp)
714		newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
715	else
716		newprot = vm_get_page_prot(dst_vma->vm_flags);
 
 
717
718	change_protection(dst_vma, start, start + len, newprot,
719			  enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
720
721	err = 0;
722out_unlock:
723	mmap_read_unlock(dst_mm);
724	return err;
725}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  mm/userfaultfd.c
  4 *
  5 *  Copyright (C) 2015  Red Hat, Inc.
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/sched/signal.h>
 10#include <linux/pagemap.h>
 11#include <linux/rmap.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/userfaultfd_k.h>
 15#include <linux/mmu_notifier.h>
 16#include <linux/hugetlb.h>
 17#include <linux/shmem_fs.h>
 18#include <asm/tlbflush.h>
 19#include <asm/tlb.h>
 20#include "internal.h"
 21
 22static __always_inline
 23struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
 24				    unsigned long dst_start,
 25				    unsigned long len)
 26{
 27	/*
 28	 * Make sure that the dst range is both valid and fully within a
 29	 * single existing vma.
 30	 */
 31	struct vm_area_struct *dst_vma;
 32
 33	dst_vma = find_vma(dst_mm, dst_start);
 34	if (!dst_vma)
 35		return NULL;
 36
 37	if (dst_start < dst_vma->vm_start ||
 38	    dst_start + len > dst_vma->vm_end)
 39		return NULL;
 40
 41	/*
 42	 * Check the vma is registered in uffd, this is required to
 43	 * enforce the VM_MAYWRITE check done at uffd registration
 44	 * time.
 45	 */
 46	if (!dst_vma->vm_userfaultfd_ctx.ctx)
 47		return NULL;
 48
 49	return dst_vma;
 50}
 51
 52/*
 53 * Install PTEs, to map dst_addr (within dst_vma) to page.
 54 *
 55 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
 56 * and anon, and for both shared and private VMAs.
 57 */
 58int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
 59			     struct vm_area_struct *dst_vma,
 60			     unsigned long dst_addr, struct page *page,
 61			     bool newly_allocated, bool wp_copy)
 62{
 63	int ret;
 64	pte_t _dst_pte, *dst_pte;
 65	bool writable = dst_vma->vm_flags & VM_WRITE;
 66	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
 67	bool page_in_cache = page_mapping(page);
 68	spinlock_t *ptl;
 69	struct folio *folio;
 70	struct inode *inode;
 71	pgoff_t offset, max_off;
 72
 73	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 74	_dst_pte = pte_mkdirty(_dst_pte);
 75	if (page_in_cache && !vm_shared)
 76		writable = false;
 77
 78	/*
 79	 * Always mark a PTE as write-protected when needed, regardless of
 80	 * VM_WRITE, which the user might change.
 81	 */
 82	if (wp_copy) {
 83		_dst_pte = pte_mkuffd_wp(_dst_pte);
 84		writable = false;
 85	}
 86
 87	if (writable)
 88		_dst_pte = pte_mkwrite(_dst_pte);
 89	else
 90		/*
 91		 * We need this to make sure write bit removed; as mk_pte()
 92		 * could return a pte with write bit set.
 93		 */
 94		_dst_pte = pte_wrprotect(_dst_pte);
 95
 96	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 97
 98	if (vma_is_shmem(dst_vma)) {
 99		/* serialize against truncate with the page table lock */
100		inode = dst_vma->vm_file->f_inode;
101		offset = linear_page_index(dst_vma, dst_addr);
102		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
103		ret = -EFAULT;
104		if (unlikely(offset >= max_off))
105			goto out_unlock;
106	}
107
108	ret = -EEXIST;
109	/*
110	 * We allow to overwrite a pte marker: consider when both MISSING|WP
111	 * registered, we firstly wr-protect a none pte which has no page cache
112	 * page backing it, then access the page.
113	 */
114	if (!pte_none_mostly(*dst_pte))
115		goto out_unlock;
116
117	folio = page_folio(page);
118	if (page_in_cache) {
119		/* Usually, cache pages are already added to LRU */
120		if (newly_allocated)
121			folio_add_lru(folio);
122		page_add_file_rmap(page, dst_vma, false);
123	} else {
124		page_add_new_anon_rmap(page, dst_vma, dst_addr);
125		folio_add_lru_vma(folio, dst_vma);
126	}
127
128	/*
129	 * Must happen after rmap, as mm_counter() checks mapping (via
130	 * PageAnon()), which is set by __page_set_anon_rmap().
131	 */
132	inc_mm_counter(dst_mm, mm_counter(page));
133
 
 
 
134	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
135
136	/* No need to invalidate - it was non-present before */
137	update_mmu_cache(dst_vma, dst_addr, dst_pte);
138	ret = 0;
139out_unlock:
140	pte_unmap_unlock(dst_pte, ptl);
141	return ret;
142}
143
144static int mcopy_atomic_pte(struct mm_struct *dst_mm,
145			    pmd_t *dst_pmd,
146			    struct vm_area_struct *dst_vma,
147			    unsigned long dst_addr,
148			    unsigned long src_addr,
149			    struct page **pagep,
150			    bool wp_copy)
151{
152	void *page_kaddr;
153	int ret;
154	struct page *page;
155
156	if (!*pagep) {
157		ret = -ENOMEM;
158		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
159		if (!page)
160			goto out;
161
162		page_kaddr = kmap_local_page(page);
163		/*
164		 * The read mmap_lock is held here.  Despite the
165		 * mmap_lock being read recursive a deadlock is still
166		 * possible if a writer has taken a lock.  For example:
167		 *
168		 * process A thread 1 takes read lock on own mmap_lock
169		 * process A thread 2 calls mmap, blocks taking write lock
170		 * process B thread 1 takes page fault, read lock on own mmap lock
171		 * process B thread 2 calls mmap, blocks taking write lock
172		 * process A thread 1 blocks taking read lock on process B
173		 * process B thread 1 blocks taking read lock on process A
174		 *
175		 * Disable page faults to prevent potential deadlock
176		 * and retry the copy outside the mmap_lock.
177		 */
178		pagefault_disable();
179		ret = copy_from_user(page_kaddr,
180				     (const void __user *) src_addr,
181				     PAGE_SIZE);
182		pagefault_enable();
183		kunmap_local(page_kaddr);
184
185		/* fallback to copy_from_user outside mmap_lock */
186		if (unlikely(ret)) {
187			ret = -ENOENT;
188			*pagep = page;
189			/* don't free the page */
190			goto out;
191		}
192
193		flush_dcache_page(page);
194	} else {
195		page = *pagep;
196		*pagep = NULL;
197	}
198
199	/*
200	 * The memory barrier inside __SetPageUptodate makes sure that
201	 * preceding stores to the page contents become visible before
202	 * the set_pte_at() write.
203	 */
204	__SetPageUptodate(page);
205
206	ret = -ENOMEM;
207	if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL))
208		goto out_release;
209
210	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
211				       page, true, wp_copy);
212	if (ret)
213		goto out_release;
214out:
215	return ret;
216out_release:
217	put_page(page);
218	goto out;
219}
220
221static int mfill_zeropage_pte(struct mm_struct *dst_mm,
222			      pmd_t *dst_pmd,
223			      struct vm_area_struct *dst_vma,
224			      unsigned long dst_addr)
225{
226	pte_t _dst_pte, *dst_pte;
227	spinlock_t *ptl;
228	int ret;
229	pgoff_t offset, max_off;
230	struct inode *inode;
231
232	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
233					 dst_vma->vm_page_prot));
234	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
235	if (dst_vma->vm_file) {
236		/* the shmem MAP_PRIVATE case requires checking the i_size */
237		inode = dst_vma->vm_file->f_inode;
238		offset = linear_page_index(dst_vma, dst_addr);
239		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
240		ret = -EFAULT;
241		if (unlikely(offset >= max_off))
242			goto out_unlock;
243	}
244	ret = -EEXIST;
245	if (!pte_none(*dst_pte))
246		goto out_unlock;
247	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
248	/* No need to invalidate - it was non-present before */
249	update_mmu_cache(dst_vma, dst_addr, dst_pte);
250	ret = 0;
251out_unlock:
252	pte_unmap_unlock(dst_pte, ptl);
253	return ret;
254}
255
256/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
257static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
258				pmd_t *dst_pmd,
259				struct vm_area_struct *dst_vma,
260				unsigned long dst_addr,
261				bool wp_copy)
262{
263	struct inode *inode = file_inode(dst_vma->vm_file);
264	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
265	struct folio *folio;
266	struct page *page;
267	int ret;
268
269	ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
270	/* Our caller expects us to return -EFAULT if we failed to find folio */
271	if (ret == -ENOENT)
272		ret = -EFAULT;
273	if (ret)
274		goto out;
275	if (!folio) {
276		ret = -EFAULT;
277		goto out;
278	}
279
280	page = folio_file_page(folio, pgoff);
281	if (PageHWPoison(page)) {
282		ret = -EIO;
283		goto out_release;
284	}
285
286	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
287				       page, false, wp_copy);
288	if (ret)
289		goto out_release;
290
291	folio_unlock(folio);
292	ret = 0;
293out:
294	return ret;
295out_release:
296	folio_unlock(folio);
297	folio_put(folio);
298	goto out;
299}
300
301static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
302{
303	pgd_t *pgd;
304	p4d_t *p4d;
305	pud_t *pud;
306
307	pgd = pgd_offset(mm, address);
308	p4d = p4d_alloc(mm, pgd, address);
309	if (!p4d)
310		return NULL;
311	pud = pud_alloc(mm, p4d, address);
312	if (!pud)
313		return NULL;
314	/*
315	 * Note that we didn't run this because the pmd was
316	 * missing, the *pmd may be already established and in
317	 * turn it may also be a trans_huge_pmd.
318	 */
319	return pmd_alloc(mm, pud, address);
320}
321
322#ifdef CONFIG_HUGETLB_PAGE
323/*
324 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
325 * called with mmap_lock held, it will release mmap_lock before returning.
326 */
327static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
328					      struct vm_area_struct *dst_vma,
329					      unsigned long dst_start,
330					      unsigned long src_start,
331					      unsigned long len,
332					      enum mcopy_atomic_mode mode,
333					      bool wp_copy)
334{
335	int vm_shared = dst_vma->vm_flags & VM_SHARED;
336	ssize_t err;
337	pte_t *dst_pte;
338	unsigned long src_addr, dst_addr;
339	long copied;
340	struct page *page;
341	unsigned long vma_hpagesize;
342	pgoff_t idx;
343	u32 hash;
344	struct address_space *mapping;
345
346	/*
347	 * There is no default zero huge page for all huge page sizes as
348	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
349	 * by THP.  Since we can not reliably insert a zero page, this
350	 * feature is not supported.
351	 */
352	if (mode == MCOPY_ATOMIC_ZEROPAGE) {
353		mmap_read_unlock(dst_mm);
354		return -EINVAL;
355	}
356
357	src_addr = src_start;
358	dst_addr = dst_start;
359	copied = 0;
360	page = NULL;
361	vma_hpagesize = vma_kernel_pagesize(dst_vma);
362
363	/*
364	 * Validate alignment based on huge page size
365	 */
366	err = -EINVAL;
367	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
368		goto out_unlock;
369
370retry:
371	/*
372	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
373	 * retry, dst_vma will be set to NULL and we must lookup again.
374	 */
375	if (!dst_vma) {
376		err = -ENOENT;
377		dst_vma = find_dst_vma(dst_mm, dst_start, len);
378		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
379			goto out_unlock;
380
381		err = -EINVAL;
382		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
383			goto out_unlock;
384
385		vm_shared = dst_vma->vm_flags & VM_SHARED;
386	}
387
388	/*
389	 * If not shared, ensure the dst_vma has a anon_vma.
390	 */
391	err = -ENOMEM;
392	if (!vm_shared) {
393		if (unlikely(anon_vma_prepare(dst_vma)))
394			goto out_unlock;
395	}
396
397	while (src_addr < src_start + len) {
398		BUG_ON(dst_addr >= dst_start + len);
399
400		/*
401		 * Serialize via vma_lock and hugetlb_fault_mutex.
402		 * vma_lock ensures the dst_pte remains valid even
403		 * in the case of shared pmds.  fault mutex prevents
404		 * races with other faulting threads.
405		 */
 
 
406		idx = linear_page_index(dst_vma, dst_addr);
407		mapping = dst_vma->vm_file->f_mapping;
408		hash = hugetlb_fault_mutex_hash(mapping, idx);
409		mutex_lock(&hugetlb_fault_mutex_table[hash]);
410		hugetlb_vma_lock_read(dst_vma);
411
412		err = -ENOMEM;
413		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
414		if (!dst_pte) {
415			hugetlb_vma_unlock_read(dst_vma);
416			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
417			goto out_unlock;
418		}
419
420		if (mode != MCOPY_ATOMIC_CONTINUE &&
421		    !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
422			err = -EEXIST;
423			hugetlb_vma_unlock_read(dst_vma);
424			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
425			goto out_unlock;
426		}
427
428		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
429					       dst_addr, src_addr, mode, &page,
430					       wp_copy);
431
432		hugetlb_vma_unlock_read(dst_vma);
433		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
434
435		cond_resched();
436
437		if (unlikely(err == -ENOENT)) {
438			mmap_read_unlock(dst_mm);
439			BUG_ON(!page);
440
441			err = copy_huge_page_from_user(page,
442						(const void __user *)src_addr,
443						vma_hpagesize / PAGE_SIZE,
444						true);
445			if (unlikely(err)) {
446				err = -EFAULT;
447				goto out;
448			}
449			mmap_read_lock(dst_mm);
450
451			dst_vma = NULL;
452			goto retry;
453		} else
454			BUG_ON(page);
455
456		if (!err) {
457			dst_addr += vma_hpagesize;
458			src_addr += vma_hpagesize;
459			copied += vma_hpagesize;
460
461			if (fatal_signal_pending(current))
462				err = -EINTR;
463		}
464		if (err)
465			break;
466	}
467
468out_unlock:
469	mmap_read_unlock(dst_mm);
470out:
471	if (page)
472		put_page(page);
473	BUG_ON(copied < 0);
474	BUG_ON(err > 0);
475	BUG_ON(!copied && !err);
476	return copied ? copied : err;
477}
478#else /* !CONFIG_HUGETLB_PAGE */
479/* fail at build time if gcc attempts to use this */
480extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
481				      struct vm_area_struct *dst_vma,
482				      unsigned long dst_start,
483				      unsigned long src_start,
484				      unsigned long len,
485				      enum mcopy_atomic_mode mode,
486				      bool wp_copy);
487#endif /* CONFIG_HUGETLB_PAGE */
488
489static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
490						pmd_t *dst_pmd,
491						struct vm_area_struct *dst_vma,
492						unsigned long dst_addr,
493						unsigned long src_addr,
494						struct page **page,
495						enum mcopy_atomic_mode mode,
496						bool wp_copy)
497{
498	ssize_t err;
499
500	if (mode == MCOPY_ATOMIC_CONTINUE) {
501		return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
502					    wp_copy);
503	}
504
505	/*
506	 * The normal page fault path for a shmem will invoke the
507	 * fault, fill the hole in the file and COW it right away. The
508	 * result generates plain anonymous memory. So when we are
509	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
510	 * generate anonymous memory directly without actually filling
511	 * the hole. For the MAP_PRIVATE case the robustness check
512	 * only happens in the pagetable (to verify it's still none)
513	 * and not in the radix tree.
514	 */
515	if (!(dst_vma->vm_flags & VM_SHARED)) {
516		if (mode == MCOPY_ATOMIC_NORMAL)
517			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
518					       dst_addr, src_addr, page,
519					       wp_copy);
520		else
521			err = mfill_zeropage_pte(dst_mm, dst_pmd,
522						 dst_vma, dst_addr);
523	} else {
 
524		err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
525					     dst_addr, src_addr,
526					     mode != MCOPY_ATOMIC_NORMAL,
527					     wp_copy, page);
528	}
529
530	return err;
531}
532
533static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
534					      unsigned long dst_start,
535					      unsigned long src_start,
536					      unsigned long len,
537					      enum mcopy_atomic_mode mcopy_mode,
538					      atomic_t *mmap_changing,
539					      __u64 mode)
540{
541	struct vm_area_struct *dst_vma;
542	ssize_t err;
543	pmd_t *dst_pmd;
544	unsigned long src_addr, dst_addr;
545	long copied;
546	struct page *page;
547	bool wp_copy;
548
549	/*
550	 * Sanitize the command parameters:
551	 */
552	BUG_ON(dst_start & ~PAGE_MASK);
553	BUG_ON(len & ~PAGE_MASK);
554
555	/* Does the address range wrap, or is the span zero-sized? */
556	BUG_ON(src_start + len <= src_start);
557	BUG_ON(dst_start + len <= dst_start);
558
559	src_addr = src_start;
560	dst_addr = dst_start;
561	copied = 0;
562	page = NULL;
563retry:
564	mmap_read_lock(dst_mm);
565
566	/*
567	 * If memory mappings are changing because of non-cooperative
568	 * operation (e.g. mremap) running in parallel, bail out and
569	 * request the user to retry later
570	 */
571	err = -EAGAIN;
572	if (mmap_changing && atomic_read(mmap_changing))
573		goto out_unlock;
574
575	/*
576	 * Make sure the vma is not shared, that the dst range is
577	 * both valid and fully within a single existing vma.
578	 */
579	err = -ENOENT;
580	dst_vma = find_dst_vma(dst_mm, dst_start, len);
581	if (!dst_vma)
582		goto out_unlock;
583
584	err = -EINVAL;
585	/*
586	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
587	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
588	 */
589	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
590	    dst_vma->vm_flags & VM_SHARED))
591		goto out_unlock;
592
593	/*
594	 * validate 'mode' now that we know the dst_vma: don't allow
595	 * a wrprotect copy if the userfaultfd didn't register as WP.
596	 */
597	wp_copy = mode & UFFDIO_COPY_MODE_WP;
598	if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
599		goto out_unlock;
600
601	/*
602	 * If this is a HUGETLB vma, pass off to appropriate routine
603	 */
604	if (is_vm_hugetlb_page(dst_vma))
605		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
606					       src_start, len, mcopy_mode,
607					       wp_copy);
608
609	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
610		goto out_unlock;
611	if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
612		goto out_unlock;
613
614	/*
615	 * Ensure the dst_vma has a anon_vma or this page
616	 * would get a NULL anon_vma when moved in the
617	 * dst_vma.
618	 */
619	err = -ENOMEM;
620	if (!(dst_vma->vm_flags & VM_SHARED) &&
621	    unlikely(anon_vma_prepare(dst_vma)))
622		goto out_unlock;
623
624	while (src_addr < src_start + len) {
625		pmd_t dst_pmdval;
626
627		BUG_ON(dst_addr >= dst_start + len);
628
629		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
630		if (unlikely(!dst_pmd)) {
631			err = -ENOMEM;
632			break;
633		}
634
635		dst_pmdval = pmdp_get_lockless(dst_pmd);
636		/*
637		 * If the dst_pmd is mapped as THP don't
638		 * override it and just be strict.
639		 */
640		if (unlikely(pmd_trans_huge(dst_pmdval))) {
641			err = -EEXIST;
642			break;
643		}
644		if (unlikely(pmd_none(dst_pmdval)) &&
645		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
646			err = -ENOMEM;
647			break;
648		}
649		/* If an huge pmd materialized from under us fail */
650		if (unlikely(pmd_trans_huge(*dst_pmd))) {
651			err = -EFAULT;
652			break;
653		}
654
655		BUG_ON(pmd_none(*dst_pmd));
656		BUG_ON(pmd_trans_huge(*dst_pmd));
657
658		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
659				       src_addr, &page, mcopy_mode, wp_copy);
660		cond_resched();
661
662		if (unlikely(err == -ENOENT)) {
663			void *page_kaddr;
664
665			mmap_read_unlock(dst_mm);
666			BUG_ON(!page);
667
668			page_kaddr = kmap_local_page(page);
669			err = copy_from_user(page_kaddr,
670					     (const void __user *) src_addr,
671					     PAGE_SIZE);
672			kunmap_local(page_kaddr);
673			if (unlikely(err)) {
674				err = -EFAULT;
675				goto out;
676			}
677			flush_dcache_page(page);
678			goto retry;
679		} else
680			BUG_ON(page);
681
682		if (!err) {
683			dst_addr += PAGE_SIZE;
684			src_addr += PAGE_SIZE;
685			copied += PAGE_SIZE;
686
687			if (fatal_signal_pending(current))
688				err = -EINTR;
689		}
690		if (err)
691			break;
692	}
693
694out_unlock:
695	mmap_read_unlock(dst_mm);
696out:
697	if (page)
698		put_page(page);
699	BUG_ON(copied < 0);
700	BUG_ON(err > 0);
701	BUG_ON(!copied && !err);
702	return copied ? copied : err;
703}
704
705ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
706		     unsigned long src_start, unsigned long len,
707		     atomic_t *mmap_changing, __u64 mode)
708{
709	return __mcopy_atomic(dst_mm, dst_start, src_start, len,
710			      MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
711}
712
713ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
714		       unsigned long len, atomic_t *mmap_changing)
715{
716	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
717			      mmap_changing, 0);
718}
719
720ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
721		       unsigned long len, atomic_t *mmap_changing)
722{
723	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
724			      mmap_changing, 0);
725}
726
727void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
728		   unsigned long start, unsigned long len, bool enable_wp)
729{
730	struct mmu_gather tlb;
731	pgprot_t newprot;
732
733	if (enable_wp)
734		newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
735	else
736		newprot = vm_get_page_prot(dst_vma->vm_flags);
737
738	tlb_gather_mmu(&tlb, dst_mm);
739	change_protection(&tlb, dst_vma, start, start + len, newprot,
740			  enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
741	tlb_finish_mmu(&tlb);
742}
743
744int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
745			unsigned long len, bool enable_wp,
746			atomic_t *mmap_changing)
747{
748	struct vm_area_struct *dst_vma;
749	unsigned long page_mask;
750	int err;
751
752	/*
753	 * Sanitize the command parameters:
754	 */
755	BUG_ON(start & ~PAGE_MASK);
756	BUG_ON(len & ~PAGE_MASK);
757
758	/* Does the address range wrap, or is the span zero-sized? */
759	BUG_ON(start + len <= start);
760
761	mmap_read_lock(dst_mm);
762
763	/*
764	 * If memory mappings are changing because of non-cooperative
765	 * operation (e.g. mremap) running in parallel, bail out and
766	 * request the user to retry later
767	 */
768	err = -EAGAIN;
769	if (mmap_changing && atomic_read(mmap_changing))
770		goto out_unlock;
771
772	err = -ENOENT;
773	dst_vma = find_dst_vma(dst_mm, start, len);
774
775	if (!dst_vma)
 
 
 
776		goto out_unlock;
777	if (!userfaultfd_wp(dst_vma))
778		goto out_unlock;
779	if (!vma_can_userfault(dst_vma, dst_vma->vm_flags))
780		goto out_unlock;
781
782	if (is_vm_hugetlb_page(dst_vma)) {
783		err = -EINVAL;
784		page_mask = vma_kernel_pagesize(dst_vma) - 1;
785		if ((start & page_mask) || (len & page_mask))
786			goto out_unlock;
787	}
788
789	uffd_wp_range(dst_mm, dst_vma, start, len, enable_wp);
 
790
791	err = 0;
792out_unlock:
793	mmap_read_unlock(dst_mm);
794	return err;
795}