Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *  mm/userfaultfd.c
  3 *
  4 *  Copyright (C) 2015  Red Hat, Inc.
  5 *
  6 *  This work is licensed under the terms of the GNU GPL, version 2. See
  7 *  the COPYING file in the top-level directory.
  8 */
  9
 10#include <linux/mm.h>
 11#include <linux/sched/signal.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/userfaultfd_k.h>
 17#include <linux/mmu_notifier.h>
 18#include <linux/hugetlb.h>
 19#include <linux/shmem_fs.h>
 20#include <asm/tlbflush.h>
 21#include "internal.h"
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 24			    pmd_t *dst_pmd,
 25			    struct vm_area_struct *dst_vma,
 26			    unsigned long dst_addr,
 27			    unsigned long src_addr,
 28			    struct page **pagep)
 
 29{
 30	struct mem_cgroup *memcg;
 31	pte_t _dst_pte, *dst_pte;
 32	spinlock_t *ptl;
 33	void *page_kaddr;
 34	int ret;
 35	struct page *page;
 
 
 36
 37	if (!*pagep) {
 38		ret = -ENOMEM;
 39		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
 40		if (!page)
 41			goto out;
 42
 43		page_kaddr = kmap_atomic(page);
 44		ret = copy_from_user(page_kaddr,
 45				     (const void __user *) src_addr,
 46				     PAGE_SIZE);
 47		kunmap_atomic(page_kaddr);
 48
 49		/* fallback to copy_from_user outside mmap_sem */
 50		if (unlikely(ret)) {
 51			ret = -EFAULT;
 52			*pagep = page;
 53			/* don't free the page */
 54			goto out;
 55		}
 56	} else {
 57		page = *pagep;
 58		*pagep = NULL;
 59	}
 60
 61	/*
 62	 * The memory barrier inside __SetPageUptodate makes sure that
 63	 * preceeding stores to the page contents become visible before
 64	 * the set_pte_at() write.
 65	 */
 66	__SetPageUptodate(page);
 67
 68	ret = -ENOMEM;
 69	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
 70		goto out_release;
 71
 72	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 73	if (dst_vma->vm_flags & VM_WRITE)
 74		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
 
 
 
 
 75
 76	ret = -EEXIST;
 77	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 
 
 
 
 
 
 
 
 
 
 78	if (!pte_none(*dst_pte))
 79		goto out_release_uncharge_unlock;
 80
 81	inc_mm_counter(dst_mm, MM_ANONPAGES);
 82	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 83	mem_cgroup_commit_charge(page, memcg, false, false);
 84	lru_cache_add_active_or_unevictable(page, dst_vma);
 85
 86	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 87
 88	/* No need to invalidate - it was non-present before */
 89	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 90
 91	pte_unmap_unlock(dst_pte, ptl);
 92	ret = 0;
 93out:
 94	return ret;
 95out_release_uncharge_unlock:
 96	pte_unmap_unlock(dst_pte, ptl);
 97	mem_cgroup_cancel_charge(page, memcg, false);
 98out_release:
 99	put_page(page);
100	goto out;
101}
102
103static int mfill_zeropage_pte(struct mm_struct *dst_mm,
104			      pmd_t *dst_pmd,
105			      struct vm_area_struct *dst_vma,
106			      unsigned long dst_addr)
107{
108	pte_t _dst_pte, *dst_pte;
109	spinlock_t *ptl;
110	int ret;
 
 
111
112	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
113					 dst_vma->vm_page_prot));
114	ret = -EEXIST;
115	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 
 
 
 
 
 
 
 
 
 
116	if (!pte_none(*dst_pte))
117		goto out_unlock;
118	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
119	/* No need to invalidate - it was non-present before */
120	update_mmu_cache(dst_vma, dst_addr, dst_pte);
121	ret = 0;
122out_unlock:
123	pte_unmap_unlock(dst_pte, ptl);
124	return ret;
125}
126
127static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
128{
129	pgd_t *pgd;
130	p4d_t *p4d;
131	pud_t *pud;
132
133	pgd = pgd_offset(mm, address);
134	p4d = p4d_alloc(mm, pgd, address);
135	if (!p4d)
136		return NULL;
137	pud = pud_alloc(mm, p4d, address);
138	if (!pud)
139		return NULL;
140	/*
141	 * Note that we didn't run this because the pmd was
142	 * missing, the *pmd may be already established and in
143	 * turn it may also be a trans_huge_pmd.
144	 */
145	return pmd_alloc(mm, pud, address);
146}
147
148#ifdef CONFIG_HUGETLB_PAGE
149/*
150 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
151 * called with mmap_sem held, it will release mmap_sem before returning.
152 */
153static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
154					      struct vm_area_struct *dst_vma,
155					      unsigned long dst_start,
156					      unsigned long src_start,
157					      unsigned long len,
158					      bool zeropage)
159{
160	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
161	int vm_shared = dst_vma->vm_flags & VM_SHARED;
162	ssize_t err;
163	pte_t *dst_pte;
164	unsigned long src_addr, dst_addr;
165	long copied;
166	struct page *page;
167	struct hstate *h;
168	unsigned long vma_hpagesize;
169	pgoff_t idx;
170	u32 hash;
171	struct address_space *mapping;
172
173	/*
174	 * There is no default zero huge page for all huge page sizes as
175	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
176	 * by THP.  Since we can not reliably insert a zero page, this
177	 * feature is not supported.
178	 */
179	if (zeropage) {
180		up_read(&dst_mm->mmap_sem);
181		return -EINVAL;
182	}
183
184	src_addr = src_start;
185	dst_addr = dst_start;
186	copied = 0;
187	page = NULL;
188	vma_hpagesize = vma_kernel_pagesize(dst_vma);
189
190	/*
191	 * Validate alignment based on huge page size
192	 */
193	err = -EINVAL;
194	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
195		goto out_unlock;
196
197retry:
198	/*
199	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
200	 * retry, dst_vma will be set to NULL and we must lookup again.
201	 */
202	if (!dst_vma) {
203		err = -ENOENT;
204		dst_vma = find_vma(dst_mm, dst_start);
205		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
206			goto out_unlock;
207		/*
208		 * Only allow __mcopy_atomic_hugetlb on userfaultfd
209		 * registered ranges.
210		 */
211		if (!dst_vma->vm_userfaultfd_ctx.ctx)
212			goto out_unlock;
213
214		if (dst_start < dst_vma->vm_start ||
215		    dst_start + len > dst_vma->vm_end)
216			goto out_unlock;
217
218		err = -EINVAL;
219		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
220			goto out_unlock;
221
222		vm_shared = dst_vma->vm_flags & VM_SHARED;
223	}
224
225	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
226		    (len - copied) & (vma_hpagesize - 1)))
227		goto out_unlock;
228
229	/*
230	 * If not shared, ensure the dst_vma has a anon_vma.
231	 */
232	err = -ENOMEM;
233	if (!vm_shared) {
234		if (unlikely(anon_vma_prepare(dst_vma)))
235			goto out_unlock;
236	}
237
238	h = hstate_vma(dst_vma);
239
240	while (src_addr < src_start + len) {
241		pte_t dst_pteval;
242
243		BUG_ON(dst_addr >= dst_start + len);
244		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
245
246		/*
247		 * Serialize via hugetlb_fault_mutex
 
 
 
248		 */
249		idx = linear_page_index(dst_vma, dst_addr);
250		mapping = dst_vma->vm_file->f_mapping;
251		hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
252								idx, dst_addr);
 
253		mutex_lock(&hugetlb_fault_mutex_table[hash]);
254
255		err = -ENOMEM;
256		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
257		if (!dst_pte) {
258			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
259			goto out_unlock;
260		}
261
262		err = -EEXIST;
263		dst_pteval = huge_ptep_get(dst_pte);
264		if (!huge_pte_none(dst_pteval)) {
265			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
266			goto out_unlock;
267		}
268
269		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
270						dst_addr, src_addr, &page);
271
272		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
273		vm_alloc_shared = vm_shared;
274
275		cond_resched();
276
277		if (unlikely(err == -EFAULT)) {
278			up_read(&dst_mm->mmap_sem);
279			BUG_ON(!page);
280
281			err = copy_huge_page_from_user(page,
282						(const void __user *)src_addr,
283						pages_per_huge_page(h), true);
 
284			if (unlikely(err)) {
285				err = -EFAULT;
286				goto out;
287			}
288			down_read(&dst_mm->mmap_sem);
289
290			dst_vma = NULL;
291			goto retry;
292		} else
293			BUG_ON(page);
294
295		if (!err) {
296			dst_addr += vma_hpagesize;
297			src_addr += vma_hpagesize;
298			copied += vma_hpagesize;
299
300			if (fatal_signal_pending(current))
301				err = -EINTR;
302		}
303		if (err)
304			break;
305	}
306
307out_unlock:
308	up_read(&dst_mm->mmap_sem);
309out:
310	if (page) {
311		/*
312		 * We encountered an error and are about to free a newly
313		 * allocated huge page.
314		 *
315		 * Reservation handling is very subtle, and is different for
316		 * private and shared mappings.  See the routine
317		 * restore_reserve_on_error for details.  Unfortunately, we
318		 * can not call restore_reserve_on_error now as it would
319		 * require holding mmap_sem.
320		 *
321		 * If a reservation for the page existed in the reservation
322		 * map of a private mapping, the map was modified to indicate
323		 * the reservation was consumed when the page was allocated.
324		 * We clear the PagePrivate flag now so that the global
325		 * reserve count will not be incremented in free_huge_page.
326		 * The reservation map will still indicate the reservation
327		 * was consumed and possibly prevent later page allocation.
328		 * This is better than leaking a global reservation.  If no
329		 * reservation existed, it is still safe to clear PagePrivate
330		 * as no adjustments to reservation counts were made during
331		 * allocation.
332		 *
333		 * The reservation map for shared mappings indicates which
334		 * pages have reservations.  When a huge page is allocated
335		 * for an address with a reservation, no change is made to
336		 * the reserve map.  In this case PagePrivate will be set
337		 * to indicate that the global reservation count should be
338		 * incremented when the page is freed.  This is the desired
339		 * behavior.  However, when a huge page is allocated for an
340		 * address without a reservation a reservation entry is added
341		 * to the reservation map, and PagePrivate will not be set.
342		 * When the page is freed, the global reserve count will NOT
343		 * be incremented and it will appear as though we have leaked
344		 * reserved page.  In this case, set PagePrivate so that the
345		 * global reserve count will be incremented to match the
346		 * reservation map entry which was created.
347		 *
348		 * Note that vm_alloc_shared is based on the flags of the vma
349		 * for which the page was originally allocated.  dst_vma could
350		 * be different or NULL on error.
351		 */
352		if (vm_alloc_shared)
353			SetPagePrivate(page);
354		else
355			ClearPagePrivate(page);
356		put_page(page);
357	}
358	BUG_ON(copied < 0);
359	BUG_ON(err > 0);
360	BUG_ON(!copied && !err);
361	return copied ? copied : err;
362}
363#else /* !CONFIG_HUGETLB_PAGE */
364/* fail at build time if gcc attempts to use this */
365extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
366				      struct vm_area_struct *dst_vma,
367				      unsigned long dst_start,
368				      unsigned long src_start,
369				      unsigned long len,
370				      bool zeropage);
371#endif /* CONFIG_HUGETLB_PAGE */
372
373static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
374						pmd_t *dst_pmd,
375						struct vm_area_struct *dst_vma,
376						unsigned long dst_addr,
377						unsigned long src_addr,
378						struct page **page,
379						bool zeropage)
 
380{
381	ssize_t err;
382
383	if (vma_is_anonymous(dst_vma)) {
 
 
 
 
 
 
 
 
 
 
384		if (!zeropage)
385			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
386					       dst_addr, src_addr, page);
 
387		else
388			err = mfill_zeropage_pte(dst_mm, dst_pmd,
389						 dst_vma, dst_addr);
390	} else {
 
391		if (!zeropage)
392			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
393						     dst_vma, dst_addr,
394						     src_addr, page);
395		else
396			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
397						       dst_vma, dst_addr);
398	}
399
400	return err;
401}
402
403static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
404					      unsigned long dst_start,
405					      unsigned long src_start,
406					      unsigned long len,
407					      bool zeropage)
 
 
408{
409	struct vm_area_struct *dst_vma;
410	ssize_t err;
411	pmd_t *dst_pmd;
412	unsigned long src_addr, dst_addr;
413	long copied;
414	struct page *page;
 
415
416	/*
417	 * Sanitize the command parameters:
418	 */
419	BUG_ON(dst_start & ~PAGE_MASK);
420	BUG_ON(len & ~PAGE_MASK);
421
422	/* Does the address range wrap, or is the span zero-sized? */
423	BUG_ON(src_start + len <= src_start);
424	BUG_ON(dst_start + len <= dst_start);
425
426	src_addr = src_start;
427	dst_addr = dst_start;
428	copied = 0;
429	page = NULL;
430retry:
431	down_read(&dst_mm->mmap_sem);
 
 
 
 
 
 
 
 
 
432
433	/*
434	 * Make sure the vma is not shared, that the dst range is
435	 * both valid and fully within a single existing vma.
436	 */
437	err = -ENOENT;
438	dst_vma = find_vma(dst_mm, dst_start);
439	if (!dst_vma)
440		goto out_unlock;
441	/*
442	 * Be strict and only allow __mcopy_atomic on userfaultfd
443	 * registered ranges to prevent userland errors going
444	 * unnoticed. As far as the VM consistency is concerned, it
445	 * would be perfectly safe to remove this check, but there's
446	 * no useful usage for __mcopy_atomic ouside of userfaultfd
447	 * registered ranges. This is after all why these are ioctls
448	 * belonging to the userfaultfd and not syscalls.
449	 */
450	if (!dst_vma->vm_userfaultfd_ctx.ctx)
451		goto out_unlock;
452
453	if (dst_start < dst_vma->vm_start ||
454	    dst_start + len > dst_vma->vm_end)
455		goto out_unlock;
456
457	err = -EINVAL;
458	/*
459	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
460	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
461	 */
462	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
463	    dst_vma->vm_flags & VM_SHARED))
464		goto out_unlock;
465
466	/*
 
 
 
 
 
 
 
 
467	 * If this is a HUGETLB vma, pass off to appropriate routine
468	 */
469	if (is_vm_hugetlb_page(dst_vma))
470		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
471						src_start, len, zeropage);
472
473	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
474		goto out_unlock;
475
476	/*
477	 * Ensure the dst_vma has a anon_vma or this page
478	 * would get a NULL anon_vma when moved in the
479	 * dst_vma.
480	 */
481	err = -ENOMEM;
482	if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma)))
 
483		goto out_unlock;
484
485	while (src_addr < src_start + len) {
486		pmd_t dst_pmdval;
487
488		BUG_ON(dst_addr >= dst_start + len);
489
490		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
491		if (unlikely(!dst_pmd)) {
492			err = -ENOMEM;
493			break;
494		}
495
496		dst_pmdval = pmd_read_atomic(dst_pmd);
497		/*
498		 * If the dst_pmd is mapped as THP don't
499		 * override it and just be strict.
500		 */
501		if (unlikely(pmd_trans_huge(dst_pmdval))) {
502			err = -EEXIST;
503			break;
504		}
505		if (unlikely(pmd_none(dst_pmdval)) &&
506		    unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
507			err = -ENOMEM;
508			break;
509		}
510		/* If an huge pmd materialized from under us fail */
511		if (unlikely(pmd_trans_huge(*dst_pmd))) {
512			err = -EFAULT;
513			break;
514		}
515
516		BUG_ON(pmd_none(*dst_pmd));
517		BUG_ON(pmd_trans_huge(*dst_pmd));
518
519		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
520				       src_addr, &page, zeropage);
521		cond_resched();
522
523		if (unlikely(err == -EFAULT)) {
524			void *page_kaddr;
525
526			up_read(&dst_mm->mmap_sem);
527			BUG_ON(!page);
528
529			page_kaddr = kmap(page);
530			err = copy_from_user(page_kaddr,
531					     (const void __user *) src_addr,
532					     PAGE_SIZE);
533			kunmap(page);
534			if (unlikely(err)) {
535				err = -EFAULT;
536				goto out;
537			}
538			goto retry;
539		} else
540			BUG_ON(page);
541
542		if (!err) {
543			dst_addr += PAGE_SIZE;
544			src_addr += PAGE_SIZE;
545			copied += PAGE_SIZE;
546
547			if (fatal_signal_pending(current))
548				err = -EINTR;
549		}
550		if (err)
551			break;
552	}
553
554out_unlock:
555	up_read(&dst_mm->mmap_sem);
556out:
557	if (page)
558		put_page(page);
559	BUG_ON(copied < 0);
560	BUG_ON(err > 0);
561	BUG_ON(!copied && !err);
562	return copied ? copied : err;
563}
564
565ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
566		     unsigned long src_start, unsigned long len)
 
567{
568	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
 
569}
570
571ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
572		       unsigned long len)
 
 
 
 
 
 
573{
574	return __mcopy_atomic(dst_mm, start, 0, len, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  mm/userfaultfd.c
  4 *
  5 *  Copyright (C) 2015  Red Hat, Inc.
 
 
 
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/sched/signal.h>
 10#include <linux/pagemap.h>
 11#include <linux/rmap.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/userfaultfd_k.h>
 15#include <linux/mmu_notifier.h>
 16#include <linux/hugetlb.h>
 17#include <linux/shmem_fs.h>
 18#include <asm/tlbflush.h>
 19#include "internal.h"
 20
 21static __always_inline
 22struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
 23				    unsigned long dst_start,
 24				    unsigned long len)
 25{
 26	/*
 27	 * Make sure that the dst range is both valid and fully within a
 28	 * single existing vma.
 29	 */
 30	struct vm_area_struct *dst_vma;
 31
 32	dst_vma = find_vma(dst_mm, dst_start);
 33	if (!dst_vma)
 34		return NULL;
 35
 36	if (dst_start < dst_vma->vm_start ||
 37	    dst_start + len > dst_vma->vm_end)
 38		return NULL;
 39
 40	/*
 41	 * Check the vma is registered in uffd, this is required to
 42	 * enforce the VM_MAYWRITE check done at uffd registration
 43	 * time.
 44	 */
 45	if (!dst_vma->vm_userfaultfd_ctx.ctx)
 46		return NULL;
 47
 48	return dst_vma;
 49}
 50
 51static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 52			    pmd_t *dst_pmd,
 53			    struct vm_area_struct *dst_vma,
 54			    unsigned long dst_addr,
 55			    unsigned long src_addr,
 56			    struct page **pagep,
 57			    bool wp_copy)
 58{
 
 59	pte_t _dst_pte, *dst_pte;
 60	spinlock_t *ptl;
 61	void *page_kaddr;
 62	int ret;
 63	struct page *page;
 64	pgoff_t offset, max_off;
 65	struct inode *inode;
 66
 67	if (!*pagep) {
 68		ret = -ENOMEM;
 69		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
 70		if (!page)
 71			goto out;
 72
 73		page_kaddr = kmap_atomic(page);
 74		ret = copy_from_user(page_kaddr,
 75				     (const void __user *) src_addr,
 76				     PAGE_SIZE);
 77		kunmap_atomic(page_kaddr);
 78
 79		/* fallback to copy_from_user outside mmap_lock */
 80		if (unlikely(ret)) {
 81			ret = -ENOENT;
 82			*pagep = page;
 83			/* don't free the page */
 84			goto out;
 85		}
 86	} else {
 87		page = *pagep;
 88		*pagep = NULL;
 89	}
 90
 91	/*
 92	 * The memory barrier inside __SetPageUptodate makes sure that
 93	 * preceding stores to the page contents become visible before
 94	 * the set_pte_at() write.
 95	 */
 96	__SetPageUptodate(page);
 97
 98	ret = -ENOMEM;
 99	if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
100		goto out_release;
101
102	_dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
103	if (dst_vma->vm_flags & VM_WRITE) {
104		if (wp_copy)
105			_dst_pte = pte_mkuffd_wp(_dst_pte);
106		else
107			_dst_pte = pte_mkwrite(_dst_pte);
108	}
109
 
110	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
111	if (dst_vma->vm_file) {
112		/* the shmem MAP_PRIVATE case requires checking the i_size */
113		inode = dst_vma->vm_file->f_inode;
114		offset = linear_page_index(dst_vma, dst_addr);
115		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
116		ret = -EFAULT;
117		if (unlikely(offset >= max_off))
118			goto out_release_uncharge_unlock;
119	}
120	ret = -EEXIST;
121	if (!pte_none(*dst_pte))
122		goto out_release_uncharge_unlock;
123
124	inc_mm_counter(dst_mm, MM_ANONPAGES);
125	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
126	lru_cache_add_inactive_or_unevictable(page, dst_vma);
 
127
128	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
129
130	/* No need to invalidate - it was non-present before */
131	update_mmu_cache(dst_vma, dst_addr, dst_pte);
132
133	pte_unmap_unlock(dst_pte, ptl);
134	ret = 0;
135out:
136	return ret;
137out_release_uncharge_unlock:
138	pte_unmap_unlock(dst_pte, ptl);
 
139out_release:
140	put_page(page);
141	goto out;
142}
143
144static int mfill_zeropage_pte(struct mm_struct *dst_mm,
145			      pmd_t *dst_pmd,
146			      struct vm_area_struct *dst_vma,
147			      unsigned long dst_addr)
148{
149	pte_t _dst_pte, *dst_pte;
150	spinlock_t *ptl;
151	int ret;
152	pgoff_t offset, max_off;
153	struct inode *inode;
154
155	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
156					 dst_vma->vm_page_prot));
 
157	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
158	if (dst_vma->vm_file) {
159		/* the shmem MAP_PRIVATE case requires checking the i_size */
160		inode = dst_vma->vm_file->f_inode;
161		offset = linear_page_index(dst_vma, dst_addr);
162		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
163		ret = -EFAULT;
164		if (unlikely(offset >= max_off))
165			goto out_unlock;
166	}
167	ret = -EEXIST;
168	if (!pte_none(*dst_pte))
169		goto out_unlock;
170	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
171	/* No need to invalidate - it was non-present before */
172	update_mmu_cache(dst_vma, dst_addr, dst_pte);
173	ret = 0;
174out_unlock:
175	pte_unmap_unlock(dst_pte, ptl);
176	return ret;
177}
178
179static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
180{
181	pgd_t *pgd;
182	p4d_t *p4d;
183	pud_t *pud;
184
185	pgd = pgd_offset(mm, address);
186	p4d = p4d_alloc(mm, pgd, address);
187	if (!p4d)
188		return NULL;
189	pud = pud_alloc(mm, p4d, address);
190	if (!pud)
191		return NULL;
192	/*
193	 * Note that we didn't run this because the pmd was
194	 * missing, the *pmd may be already established and in
195	 * turn it may also be a trans_huge_pmd.
196	 */
197	return pmd_alloc(mm, pud, address);
198}
199
200#ifdef CONFIG_HUGETLB_PAGE
201/*
202 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
203 * called with mmap_lock held, it will release mmap_lock before returning.
204 */
205static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
206					      struct vm_area_struct *dst_vma,
207					      unsigned long dst_start,
208					      unsigned long src_start,
209					      unsigned long len,
210					      bool zeropage)
211{
212	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
213	int vm_shared = dst_vma->vm_flags & VM_SHARED;
214	ssize_t err;
215	pte_t *dst_pte;
216	unsigned long src_addr, dst_addr;
217	long copied;
218	struct page *page;
 
219	unsigned long vma_hpagesize;
220	pgoff_t idx;
221	u32 hash;
222	struct address_space *mapping;
223
224	/*
225	 * There is no default zero huge page for all huge page sizes as
226	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
227	 * by THP.  Since we can not reliably insert a zero page, this
228	 * feature is not supported.
229	 */
230	if (zeropage) {
231		mmap_read_unlock(dst_mm);
232		return -EINVAL;
233	}
234
235	src_addr = src_start;
236	dst_addr = dst_start;
237	copied = 0;
238	page = NULL;
239	vma_hpagesize = vma_kernel_pagesize(dst_vma);
240
241	/*
242	 * Validate alignment based on huge page size
243	 */
244	err = -EINVAL;
245	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
246		goto out_unlock;
247
248retry:
249	/*
250	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
251	 * retry, dst_vma will be set to NULL and we must lookup again.
252	 */
253	if (!dst_vma) {
254		err = -ENOENT;
255		dst_vma = find_dst_vma(dst_mm, dst_start, len);
256		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
257			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
258
259		err = -EINVAL;
260		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
261			goto out_unlock;
262
263		vm_shared = dst_vma->vm_flags & VM_SHARED;
264	}
265
 
 
 
 
266	/*
267	 * If not shared, ensure the dst_vma has a anon_vma.
268	 */
269	err = -ENOMEM;
270	if (!vm_shared) {
271		if (unlikely(anon_vma_prepare(dst_vma)))
272			goto out_unlock;
273	}
274
 
 
275	while (src_addr < src_start + len) {
276		pte_t dst_pteval;
277
278		BUG_ON(dst_addr >= dst_start + len);
 
279
280		/*
281		 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
282		 * i_mmap_rwsem ensures the dst_pte remains valid even
283		 * in the case of shared pmds.  fault mutex prevents
284		 * races with other faulting threads.
285		 */
 
286		mapping = dst_vma->vm_file->f_mapping;
287		i_mmap_lock_read(mapping);
288		idx = linear_page_index(dst_vma, dst_addr);
289		hash = hugetlb_fault_mutex_hash(mapping, idx);
290		mutex_lock(&hugetlb_fault_mutex_table[hash]);
291
292		err = -ENOMEM;
293		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
294		if (!dst_pte) {
295			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
296			i_mmap_unlock_read(mapping);
297			goto out_unlock;
298		}
299
300		err = -EEXIST;
301		dst_pteval = huge_ptep_get(dst_pte);
302		if (!huge_pte_none(dst_pteval)) {
303			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
304			i_mmap_unlock_read(mapping);
305			goto out_unlock;
306		}
307
308		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
309						dst_addr, src_addr, &page);
310
311		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
312		i_mmap_unlock_read(mapping);
313		vm_alloc_shared = vm_shared;
314
315		cond_resched();
316
317		if (unlikely(err == -ENOENT)) {
318			mmap_read_unlock(dst_mm);
319			BUG_ON(!page);
320
321			err = copy_huge_page_from_user(page,
322						(const void __user *)src_addr,
323						vma_hpagesize / PAGE_SIZE,
324						true);
325			if (unlikely(err)) {
326				err = -EFAULT;
327				goto out;
328			}
329			mmap_read_lock(dst_mm);
330
331			dst_vma = NULL;
332			goto retry;
333		} else
334			BUG_ON(page);
335
336		if (!err) {
337			dst_addr += vma_hpagesize;
338			src_addr += vma_hpagesize;
339			copied += vma_hpagesize;
340
341			if (fatal_signal_pending(current))
342				err = -EINTR;
343		}
344		if (err)
345			break;
346	}
347
348out_unlock:
349	mmap_read_unlock(dst_mm);
350out:
351	if (page) {
352		/*
353		 * We encountered an error and are about to free a newly
354		 * allocated huge page.
355		 *
356		 * Reservation handling is very subtle, and is different for
357		 * private and shared mappings.  See the routine
358		 * restore_reserve_on_error for details.  Unfortunately, we
359		 * can not call restore_reserve_on_error now as it would
360		 * require holding mmap_lock.
361		 *
362		 * If a reservation for the page existed in the reservation
363		 * map of a private mapping, the map was modified to indicate
364		 * the reservation was consumed when the page was allocated.
365		 * We clear the PagePrivate flag now so that the global
366		 * reserve count will not be incremented in free_huge_page.
367		 * The reservation map will still indicate the reservation
368		 * was consumed and possibly prevent later page allocation.
369		 * This is better than leaking a global reservation.  If no
370		 * reservation existed, it is still safe to clear PagePrivate
371		 * as no adjustments to reservation counts were made during
372		 * allocation.
373		 *
374		 * The reservation map for shared mappings indicates which
375		 * pages have reservations.  When a huge page is allocated
376		 * for an address with a reservation, no change is made to
377		 * the reserve map.  In this case PagePrivate will be set
378		 * to indicate that the global reservation count should be
379		 * incremented when the page is freed.  This is the desired
380		 * behavior.  However, when a huge page is allocated for an
381		 * address without a reservation a reservation entry is added
382		 * to the reservation map, and PagePrivate will not be set.
383		 * When the page is freed, the global reserve count will NOT
384		 * be incremented and it will appear as though we have leaked
385		 * reserved page.  In this case, set PagePrivate so that the
386		 * global reserve count will be incremented to match the
387		 * reservation map entry which was created.
388		 *
389		 * Note that vm_alloc_shared is based on the flags of the vma
390		 * for which the page was originally allocated.  dst_vma could
391		 * be different or NULL on error.
392		 */
393		if (vm_alloc_shared)
394			SetPagePrivate(page);
395		else
396			ClearPagePrivate(page);
397		put_page(page);
398	}
399	BUG_ON(copied < 0);
400	BUG_ON(err > 0);
401	BUG_ON(!copied && !err);
402	return copied ? copied : err;
403}
404#else /* !CONFIG_HUGETLB_PAGE */
405/* fail at build time if gcc attempts to use this */
406extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
407				      struct vm_area_struct *dst_vma,
408				      unsigned long dst_start,
409				      unsigned long src_start,
410				      unsigned long len,
411				      bool zeropage);
412#endif /* CONFIG_HUGETLB_PAGE */
413
414static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
415						pmd_t *dst_pmd,
416						struct vm_area_struct *dst_vma,
417						unsigned long dst_addr,
418						unsigned long src_addr,
419						struct page **page,
420						bool zeropage,
421						bool wp_copy)
422{
423	ssize_t err;
424
425	/*
426	 * The normal page fault path for a shmem will invoke the
427	 * fault, fill the hole in the file and COW it right away. The
428	 * result generates plain anonymous memory. So when we are
429	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
430	 * generate anonymous memory directly without actually filling
431	 * the hole. For the MAP_PRIVATE case the robustness check
432	 * only happens in the pagetable (to verify it's still none)
433	 * and not in the radix tree.
434	 */
435	if (!(dst_vma->vm_flags & VM_SHARED)) {
436		if (!zeropage)
437			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
438					       dst_addr, src_addr, page,
439					       wp_copy);
440		else
441			err = mfill_zeropage_pte(dst_mm, dst_pmd,
442						 dst_vma, dst_addr);
443	} else {
444		VM_WARN_ON_ONCE(wp_copy);
445		if (!zeropage)
446			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
447						     dst_vma, dst_addr,
448						     src_addr, page);
449		else
450			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
451						       dst_vma, dst_addr);
452	}
453
454	return err;
455}
456
457static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
458					      unsigned long dst_start,
459					      unsigned long src_start,
460					      unsigned long len,
461					      bool zeropage,
462					      bool *mmap_changing,
463					      __u64 mode)
464{
465	struct vm_area_struct *dst_vma;
466	ssize_t err;
467	pmd_t *dst_pmd;
468	unsigned long src_addr, dst_addr;
469	long copied;
470	struct page *page;
471	bool wp_copy;
472
473	/*
474	 * Sanitize the command parameters:
475	 */
476	BUG_ON(dst_start & ~PAGE_MASK);
477	BUG_ON(len & ~PAGE_MASK);
478
479	/* Does the address range wrap, or is the span zero-sized? */
480	BUG_ON(src_start + len <= src_start);
481	BUG_ON(dst_start + len <= dst_start);
482
483	src_addr = src_start;
484	dst_addr = dst_start;
485	copied = 0;
486	page = NULL;
487retry:
488	mmap_read_lock(dst_mm);
489
490	/*
491	 * If memory mappings are changing because of non-cooperative
492	 * operation (e.g. mremap) running in parallel, bail out and
493	 * request the user to retry later
494	 */
495	err = -EAGAIN;
496	if (mmap_changing && READ_ONCE(*mmap_changing))
497		goto out_unlock;
498
499	/*
500	 * Make sure the vma is not shared, that the dst range is
501	 * both valid and fully within a single existing vma.
502	 */
503	err = -ENOENT;
504	dst_vma = find_dst_vma(dst_mm, dst_start, len);
505	if (!dst_vma)
506		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
508	err = -EINVAL;
509	/*
510	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
511	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
512	 */
513	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
514	    dst_vma->vm_flags & VM_SHARED))
515		goto out_unlock;
516
517	/*
518	 * validate 'mode' now that we know the dst_vma: don't allow
519	 * a wrprotect copy if the userfaultfd didn't register as WP.
520	 */
521	wp_copy = mode & UFFDIO_COPY_MODE_WP;
522	if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
523		goto out_unlock;
524
525	/*
526	 * If this is a HUGETLB vma, pass off to appropriate routine
527	 */
528	if (is_vm_hugetlb_page(dst_vma))
529		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
530						src_start, len, zeropage);
531
532	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
533		goto out_unlock;
534
535	/*
536	 * Ensure the dst_vma has a anon_vma or this page
537	 * would get a NULL anon_vma when moved in the
538	 * dst_vma.
539	 */
540	err = -ENOMEM;
541	if (!(dst_vma->vm_flags & VM_SHARED) &&
542	    unlikely(anon_vma_prepare(dst_vma)))
543		goto out_unlock;
544
545	while (src_addr < src_start + len) {
546		pmd_t dst_pmdval;
547
548		BUG_ON(dst_addr >= dst_start + len);
549
550		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
551		if (unlikely(!dst_pmd)) {
552			err = -ENOMEM;
553			break;
554		}
555
556		dst_pmdval = pmd_read_atomic(dst_pmd);
557		/*
558		 * If the dst_pmd is mapped as THP don't
559		 * override it and just be strict.
560		 */
561		if (unlikely(pmd_trans_huge(dst_pmdval))) {
562			err = -EEXIST;
563			break;
564		}
565		if (unlikely(pmd_none(dst_pmdval)) &&
566		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
567			err = -ENOMEM;
568			break;
569		}
570		/* If an huge pmd materialized from under us fail */
571		if (unlikely(pmd_trans_huge(*dst_pmd))) {
572			err = -EFAULT;
573			break;
574		}
575
576		BUG_ON(pmd_none(*dst_pmd));
577		BUG_ON(pmd_trans_huge(*dst_pmd));
578
579		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
580				       src_addr, &page, zeropage, wp_copy);
581		cond_resched();
582
583		if (unlikely(err == -ENOENT)) {
584			void *page_kaddr;
585
586			mmap_read_unlock(dst_mm);
587			BUG_ON(!page);
588
589			page_kaddr = kmap(page);
590			err = copy_from_user(page_kaddr,
591					     (const void __user *) src_addr,
592					     PAGE_SIZE);
593			kunmap(page);
594			if (unlikely(err)) {
595				err = -EFAULT;
596				goto out;
597			}
598			goto retry;
599		} else
600			BUG_ON(page);
601
602		if (!err) {
603			dst_addr += PAGE_SIZE;
604			src_addr += PAGE_SIZE;
605			copied += PAGE_SIZE;
606
607			if (fatal_signal_pending(current))
608				err = -EINTR;
609		}
610		if (err)
611			break;
612	}
613
614out_unlock:
615	mmap_read_unlock(dst_mm);
616out:
617	if (page)
618		put_page(page);
619	BUG_ON(copied < 0);
620	BUG_ON(err > 0);
621	BUG_ON(!copied && !err);
622	return copied ? copied : err;
623}
624
625ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
626		     unsigned long src_start, unsigned long len,
627		     bool *mmap_changing, __u64 mode)
628{
629	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
630			      mmap_changing, mode);
631}
632
633ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
634		       unsigned long len, bool *mmap_changing)
635{
636	return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
637}
638
639int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
640			unsigned long len, bool enable_wp, bool *mmap_changing)
641{
642	struct vm_area_struct *dst_vma;
643	pgprot_t newprot;
644	int err;
645
646	/*
647	 * Sanitize the command parameters:
648	 */
649	BUG_ON(start & ~PAGE_MASK);
650	BUG_ON(len & ~PAGE_MASK);
651
652	/* Does the address range wrap, or is the span zero-sized? */
653	BUG_ON(start + len <= start);
654
655	mmap_read_lock(dst_mm);
656
657	/*
658	 * If memory mappings are changing because of non-cooperative
659	 * operation (e.g. mremap) running in parallel, bail out and
660	 * request the user to retry later
661	 */
662	err = -EAGAIN;
663	if (mmap_changing && READ_ONCE(*mmap_changing))
664		goto out_unlock;
665
666	err = -ENOENT;
667	dst_vma = find_dst_vma(dst_mm, start, len);
668	/*
669	 * Make sure the vma is not shared, that the dst range is
670	 * both valid and fully within a single existing vma.
671	 */
672	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
673		goto out_unlock;
674	if (!userfaultfd_wp(dst_vma))
675		goto out_unlock;
676	if (!vma_is_anonymous(dst_vma))
677		goto out_unlock;
678
679	if (enable_wp)
680		newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
681	else
682		newprot = vm_get_page_prot(dst_vma->vm_flags);
683
684	change_protection(dst_vma, start, start + len, newprot,
685			  enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
686
687	err = 0;
688out_unlock:
689	mmap_read_unlock(dst_mm);
690	return err;
691}