Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *  mm/userfaultfd.c
  3 *
  4 *  Copyright (C) 2015  Red Hat, Inc.
  5 *
  6 *  This work is licensed under the terms of the GNU GPL, version 2. See
  7 *  the COPYING file in the top-level directory.
  8 */
  9
 10#include <linux/mm.h>
 11#include <linux/sched/signal.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/userfaultfd_k.h>
 17#include <linux/mmu_notifier.h>
 18#include <linux/hugetlb.h>
 19#include <linux/shmem_fs.h>
 20#include <asm/tlbflush.h>
 21#include "internal.h"
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 24			    pmd_t *dst_pmd,
 25			    struct vm_area_struct *dst_vma,
 26			    unsigned long dst_addr,
 27			    unsigned long src_addr,
 28			    struct page **pagep)
 
 29{
 30	struct mem_cgroup *memcg;
 31	pte_t _dst_pte, *dst_pte;
 32	spinlock_t *ptl;
 33	void *page_kaddr;
 34	int ret;
 35	struct page *page;
 36
 37	if (!*pagep) {
 38		ret = -ENOMEM;
 39		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
 40		if (!page)
 41			goto out;
 42
 43		page_kaddr = kmap_atomic(page);
 44		ret = copy_from_user(page_kaddr,
 45				     (const void __user *) src_addr,
 46				     PAGE_SIZE);
 47		kunmap_atomic(page_kaddr);
 48
 49		/* fallback to copy_from_user outside mmap_sem */
 50		if (unlikely(ret)) {
 51			ret = -EFAULT;
 52			*pagep = page;
 53			/* don't free the page */
 54			goto out;
 55		}
 56	} else {
 57		page = *pagep;
 58		*pagep = NULL;
 59	}
 60
 61	/*
 62	 * The memory barrier inside __SetPageUptodate makes sure that
 63	 * preceeding stores to the page contents become visible before
 64	 * the set_pte_at() write.
 65	 */
 66	__SetPageUptodate(page);
 67
 68	ret = -ENOMEM;
 69	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
 70		goto out_release;
 71
 72	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 73	if (dst_vma->vm_flags & VM_WRITE)
 74		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
 75
 76	ret = -EEXIST;
 77	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 78	if (!pte_none(*dst_pte))
 79		goto out_release_uncharge_unlock;
 80
 81	inc_mm_counter(dst_mm, MM_ANONPAGES);
 82	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 83	mem_cgroup_commit_charge(page, memcg, false, false);
 84	lru_cache_add_active_or_unevictable(page, dst_vma);
 85
 86	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 87
 88	/* No need to invalidate - it was non-present before */
 89	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 90
 91	pte_unmap_unlock(dst_pte, ptl);
 92	ret = 0;
 93out:
 94	return ret;
 95out_release_uncharge_unlock:
 96	pte_unmap_unlock(dst_pte, ptl);
 97	mem_cgroup_cancel_charge(page, memcg, false);
 98out_release:
 99	put_page(page);
100	goto out;
101}
102
103static int mfill_zeropage_pte(struct mm_struct *dst_mm,
104			      pmd_t *dst_pmd,
105			      struct vm_area_struct *dst_vma,
106			      unsigned long dst_addr)
107{
108	pte_t _dst_pte, *dst_pte;
109	spinlock_t *ptl;
110	int ret;
 
 
111
112	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
113					 dst_vma->vm_page_prot));
114	ret = -EEXIST;
115	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 
 
 
 
 
 
 
 
 
 
116	if (!pte_none(*dst_pte))
117		goto out_unlock;
118	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
119	/* No need to invalidate - it was non-present before */
120	update_mmu_cache(dst_vma, dst_addr, dst_pte);
121	ret = 0;
122out_unlock:
123	pte_unmap_unlock(dst_pte, ptl);
124	return ret;
125}
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
128{
129	pgd_t *pgd;
130	p4d_t *p4d;
131	pud_t *pud;
132
133	pgd = pgd_offset(mm, address);
134	p4d = p4d_alloc(mm, pgd, address);
135	if (!p4d)
136		return NULL;
137	pud = pud_alloc(mm, p4d, address);
138	if (!pud)
139		return NULL;
140	/*
141	 * Note that we didn't run this because the pmd was
142	 * missing, the *pmd may be already established and in
143	 * turn it may also be a trans_huge_pmd.
144	 */
145	return pmd_alloc(mm, pud, address);
146}
147
148#ifdef CONFIG_HUGETLB_PAGE
149/*
150 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
151 * called with mmap_sem held, it will release mmap_sem before returning.
152 */
153static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
154					      struct vm_area_struct *dst_vma,
155					      unsigned long dst_start,
156					      unsigned long src_start,
157					      unsigned long len,
158					      bool zeropage)
159{
160	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
161	int vm_shared = dst_vma->vm_flags & VM_SHARED;
162	ssize_t err;
163	pte_t *dst_pte;
164	unsigned long src_addr, dst_addr;
165	long copied;
166	struct page *page;
167	struct hstate *h;
168	unsigned long vma_hpagesize;
169	pgoff_t idx;
170	u32 hash;
171	struct address_space *mapping;
172
173	/*
174	 * There is no default zero huge page for all huge page sizes as
175	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
176	 * by THP.  Since we can not reliably insert a zero page, this
177	 * feature is not supported.
178	 */
179	if (zeropage) {
180		up_read(&dst_mm->mmap_sem);
181		return -EINVAL;
182	}
183
184	src_addr = src_start;
185	dst_addr = dst_start;
186	copied = 0;
187	page = NULL;
188	vma_hpagesize = vma_kernel_pagesize(dst_vma);
189
190	/*
191	 * Validate alignment based on huge page size
192	 */
193	err = -EINVAL;
194	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
195		goto out_unlock;
196
197retry:
198	/*
199	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
200	 * retry, dst_vma will be set to NULL and we must lookup again.
201	 */
202	if (!dst_vma) {
203		err = -ENOENT;
204		dst_vma = find_vma(dst_mm, dst_start);
205		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
206			goto out_unlock;
207		/*
208		 * Only allow __mcopy_atomic_hugetlb on userfaultfd
209		 * registered ranges.
210		 */
211		if (!dst_vma->vm_userfaultfd_ctx.ctx)
212			goto out_unlock;
213
214		if (dst_start < dst_vma->vm_start ||
215		    dst_start + len > dst_vma->vm_end)
216			goto out_unlock;
217
218		err = -EINVAL;
219		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
220			goto out_unlock;
221
222		vm_shared = dst_vma->vm_flags & VM_SHARED;
223	}
224
225	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
226		    (len - copied) & (vma_hpagesize - 1)))
227		goto out_unlock;
228
229	/*
230	 * If not shared, ensure the dst_vma has a anon_vma.
231	 */
232	err = -ENOMEM;
233	if (!vm_shared) {
234		if (unlikely(anon_vma_prepare(dst_vma)))
235			goto out_unlock;
236	}
237
238	h = hstate_vma(dst_vma);
239
240	while (src_addr < src_start + len) {
241		pte_t dst_pteval;
242
243		BUG_ON(dst_addr >= dst_start + len);
244		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
245
246		/*
247		 * Serialize via hugetlb_fault_mutex
 
 
 
248		 */
249		idx = linear_page_index(dst_vma, dst_addr);
250		mapping = dst_vma->vm_file->f_mapping;
251		hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
252								idx, dst_addr);
 
253		mutex_lock(&hugetlb_fault_mutex_table[hash]);
254
255		err = -ENOMEM;
256		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
257		if (!dst_pte) {
258			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
259			goto out_unlock;
260		}
261
262		err = -EEXIST;
263		dst_pteval = huge_ptep_get(dst_pte);
264		if (!huge_pte_none(dst_pteval)) {
265			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
266			goto out_unlock;
267		}
268
269		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
270						dst_addr, src_addr, &page);
271
272		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
273		vm_alloc_shared = vm_shared;
274
275		cond_resched();
276
277		if (unlikely(err == -EFAULT)) {
278			up_read(&dst_mm->mmap_sem);
279			BUG_ON(!page);
280
281			err = copy_huge_page_from_user(page,
282						(const void __user *)src_addr,
283						pages_per_huge_page(h), true);
 
284			if (unlikely(err)) {
285				err = -EFAULT;
286				goto out;
287			}
288			down_read(&dst_mm->mmap_sem);
289
290			dst_vma = NULL;
291			goto retry;
292		} else
293			BUG_ON(page);
294
295		if (!err) {
296			dst_addr += vma_hpagesize;
297			src_addr += vma_hpagesize;
298			copied += vma_hpagesize;
299
300			if (fatal_signal_pending(current))
301				err = -EINTR;
302		}
303		if (err)
304			break;
305	}
306
307out_unlock:
308	up_read(&dst_mm->mmap_sem);
309out:
310	if (page) {
311		/*
312		 * We encountered an error and are about to free a newly
313		 * allocated huge page.
314		 *
315		 * Reservation handling is very subtle, and is different for
316		 * private and shared mappings.  See the routine
317		 * restore_reserve_on_error for details.  Unfortunately, we
318		 * can not call restore_reserve_on_error now as it would
319		 * require holding mmap_sem.
320		 *
321		 * If a reservation for the page existed in the reservation
322		 * map of a private mapping, the map was modified to indicate
323		 * the reservation was consumed when the page was allocated.
324		 * We clear the PagePrivate flag now so that the global
325		 * reserve count will not be incremented in free_huge_page.
326		 * The reservation map will still indicate the reservation
327		 * was consumed and possibly prevent later page allocation.
328		 * This is better than leaking a global reservation.  If no
329		 * reservation existed, it is still safe to clear PagePrivate
330		 * as no adjustments to reservation counts were made during
331		 * allocation.
332		 *
333		 * The reservation map for shared mappings indicates which
334		 * pages have reservations.  When a huge page is allocated
335		 * for an address with a reservation, no change is made to
336		 * the reserve map.  In this case PagePrivate will be set
337		 * to indicate that the global reservation count should be
338		 * incremented when the page is freed.  This is the desired
339		 * behavior.  However, when a huge page is allocated for an
340		 * address without a reservation a reservation entry is added
341		 * to the reservation map, and PagePrivate will not be set.
342		 * When the page is freed, the global reserve count will NOT
343		 * be incremented and it will appear as though we have leaked
344		 * reserved page.  In this case, set PagePrivate so that the
345		 * global reserve count will be incremented to match the
346		 * reservation map entry which was created.
347		 *
348		 * Note that vm_alloc_shared is based on the flags of the vma
349		 * for which the page was originally allocated.  dst_vma could
350		 * be different or NULL on error.
351		 */
352		if (vm_alloc_shared)
353			SetPagePrivate(page);
354		else
355			ClearPagePrivate(page);
356		put_page(page);
357	}
358	BUG_ON(copied < 0);
359	BUG_ON(err > 0);
360	BUG_ON(!copied && !err);
361	return copied ? copied : err;
362}
363#else /* !CONFIG_HUGETLB_PAGE */
364/* fail at build time if gcc attempts to use this */
365extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
366				      struct vm_area_struct *dst_vma,
367				      unsigned long dst_start,
368				      unsigned long src_start,
369				      unsigned long len,
370				      bool zeropage);
371#endif /* CONFIG_HUGETLB_PAGE */
372
373static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
374						pmd_t *dst_pmd,
375						struct vm_area_struct *dst_vma,
376						unsigned long dst_addr,
377						unsigned long src_addr,
378						struct page **page,
379						bool zeropage)
 
380{
381	ssize_t err;
382
383	if (vma_is_anonymous(dst_vma)) {
384		if (!zeropage)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
386					       dst_addr, src_addr, page);
 
387		else
388			err = mfill_zeropage_pte(dst_mm, dst_pmd,
389						 dst_vma, dst_addr);
390	} else {
391		if (!zeropage)
392			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
393						     dst_vma, dst_addr,
394						     src_addr, page);
395		else
396			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
397						       dst_vma, dst_addr);
398	}
399
400	return err;
401}
402
403static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
404					      unsigned long dst_start,
405					      unsigned long src_start,
406					      unsigned long len,
407					      bool zeropage)
 
 
408{
409	struct vm_area_struct *dst_vma;
410	ssize_t err;
411	pmd_t *dst_pmd;
412	unsigned long src_addr, dst_addr;
413	long copied;
414	struct page *page;
 
415
416	/*
417	 * Sanitize the command parameters:
418	 */
419	BUG_ON(dst_start & ~PAGE_MASK);
420	BUG_ON(len & ~PAGE_MASK);
421
422	/* Does the address range wrap, or is the span zero-sized? */
423	BUG_ON(src_start + len <= src_start);
424	BUG_ON(dst_start + len <= dst_start);
425
426	src_addr = src_start;
427	dst_addr = dst_start;
428	copied = 0;
429	page = NULL;
430retry:
431	down_read(&dst_mm->mmap_sem);
 
 
 
 
 
 
 
 
 
432
433	/*
434	 * Make sure the vma is not shared, that the dst range is
435	 * both valid and fully within a single existing vma.
436	 */
437	err = -ENOENT;
438	dst_vma = find_vma(dst_mm, dst_start);
439	if (!dst_vma)
440		goto out_unlock;
441	/*
442	 * Be strict and only allow __mcopy_atomic on userfaultfd
443	 * registered ranges to prevent userland errors going
444	 * unnoticed. As far as the VM consistency is concerned, it
445	 * would be perfectly safe to remove this check, but there's
446	 * no useful usage for __mcopy_atomic ouside of userfaultfd
447	 * registered ranges. This is after all why these are ioctls
448	 * belonging to the userfaultfd and not syscalls.
449	 */
450	if (!dst_vma->vm_userfaultfd_ctx.ctx)
451		goto out_unlock;
452
453	if (dst_start < dst_vma->vm_start ||
454	    dst_start + len > dst_vma->vm_end)
455		goto out_unlock;
456
457	err = -EINVAL;
458	/*
459	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
460	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
461	 */
462	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
463	    dst_vma->vm_flags & VM_SHARED))
464		goto out_unlock;
465
466	/*
 
 
 
 
 
 
 
 
467	 * If this is a HUGETLB vma, pass off to appropriate routine
468	 */
469	if (is_vm_hugetlb_page(dst_vma))
470		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
471						src_start, len, zeropage);
472
473	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
474		goto out_unlock;
 
 
475
476	/*
477	 * Ensure the dst_vma has a anon_vma or this page
478	 * would get a NULL anon_vma when moved in the
479	 * dst_vma.
480	 */
481	err = -ENOMEM;
482	if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma)))
 
483		goto out_unlock;
484
485	while (src_addr < src_start + len) {
486		pmd_t dst_pmdval;
487
488		BUG_ON(dst_addr >= dst_start + len);
489
490		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
491		if (unlikely(!dst_pmd)) {
492			err = -ENOMEM;
493			break;
494		}
495
496		dst_pmdval = pmd_read_atomic(dst_pmd);
497		/*
498		 * If the dst_pmd is mapped as THP don't
499		 * override it and just be strict.
500		 */
501		if (unlikely(pmd_trans_huge(dst_pmdval))) {
502			err = -EEXIST;
503			break;
504		}
505		if (unlikely(pmd_none(dst_pmdval)) &&
506		    unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
507			err = -ENOMEM;
508			break;
509		}
510		/* If an huge pmd materialized from under us fail */
511		if (unlikely(pmd_trans_huge(*dst_pmd))) {
512			err = -EFAULT;
513			break;
514		}
515
516		BUG_ON(pmd_none(*dst_pmd));
517		BUG_ON(pmd_trans_huge(*dst_pmd));
518
519		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
520				       src_addr, &page, zeropage);
521		cond_resched();
522
523		if (unlikely(err == -EFAULT)) {
524			void *page_kaddr;
525
526			up_read(&dst_mm->mmap_sem);
527			BUG_ON(!page);
528
529			page_kaddr = kmap(page);
530			err = copy_from_user(page_kaddr,
531					     (const void __user *) src_addr,
532					     PAGE_SIZE);
533			kunmap(page);
534			if (unlikely(err)) {
535				err = -EFAULT;
536				goto out;
537			}
538			goto retry;
539		} else
540			BUG_ON(page);
541
542		if (!err) {
543			dst_addr += PAGE_SIZE;
544			src_addr += PAGE_SIZE;
545			copied += PAGE_SIZE;
546
547			if (fatal_signal_pending(current))
548				err = -EINTR;
549		}
550		if (err)
551			break;
552	}
553
554out_unlock:
555	up_read(&dst_mm->mmap_sem);
556out:
557	if (page)
558		put_page(page);
559	BUG_ON(copied < 0);
560	BUG_ON(err > 0);
561	BUG_ON(!copied && !err);
562	return copied ? copied : err;
563}
564
565ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
566		     unsigned long src_start, unsigned long len)
 
567{
568	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
 
569}
570
571ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
572		       unsigned long len)
573{
574	return __mcopy_atomic(dst_mm, start, 0, len, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
575}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  mm/userfaultfd.c
  4 *
  5 *  Copyright (C) 2015  Red Hat, Inc.
 
 
 
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/sched/signal.h>
 10#include <linux/pagemap.h>
 11#include <linux/rmap.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/userfaultfd_k.h>
 15#include <linux/mmu_notifier.h>
 16#include <linux/hugetlb.h>
 17#include <linux/shmem_fs.h>
 18#include <asm/tlbflush.h>
 19#include "internal.h"
 20
 21static __always_inline
 22struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
 23				    unsigned long dst_start,
 24				    unsigned long len)
 25{
 26	/*
 27	 * Make sure that the dst range is both valid and fully within a
 28	 * single existing vma.
 29	 */
 30	struct vm_area_struct *dst_vma;
 31
 32	dst_vma = find_vma(dst_mm, dst_start);
 33	if (!dst_vma)
 34		return NULL;
 35
 36	if (dst_start < dst_vma->vm_start ||
 37	    dst_start + len > dst_vma->vm_end)
 38		return NULL;
 39
 40	/*
 41	 * Check the vma is registered in uffd, this is required to
 42	 * enforce the VM_MAYWRITE check done at uffd registration
 43	 * time.
 44	 */
 45	if (!dst_vma->vm_userfaultfd_ctx.ctx)
 46		return NULL;
 47
 48	return dst_vma;
 49}
 50
 51/*
 52 * Install PTEs, to map dst_addr (within dst_vma) to page.
 53 *
 54 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
 55 * and anon, and for both shared and private VMAs.
 56 */
 57int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
 58			     struct vm_area_struct *dst_vma,
 59			     unsigned long dst_addr, struct page *page,
 60			     bool newly_allocated, bool wp_copy)
 61{
 62	int ret;
 63	pte_t _dst_pte, *dst_pte;
 64	bool writable = dst_vma->vm_flags & VM_WRITE;
 65	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
 66	bool page_in_cache = page->mapping;
 67	spinlock_t *ptl;
 68	struct inode *inode;
 69	pgoff_t offset, max_off;
 70
 71	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 72	if (page_in_cache && !vm_shared)
 73		writable = false;
 74	if (writable || !page_in_cache)
 75		_dst_pte = pte_mkdirty(_dst_pte);
 76	if (writable) {
 77		if (wp_copy)
 78			_dst_pte = pte_mkuffd_wp(_dst_pte);
 79		else
 80			_dst_pte = pte_mkwrite(_dst_pte);
 81	}
 82
 83	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 84
 85	if (vma_is_shmem(dst_vma)) {
 86		/* serialize against truncate with the page table lock */
 87		inode = dst_vma->vm_file->f_inode;
 88		offset = linear_page_index(dst_vma, dst_addr);
 89		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 90		ret = -EFAULT;
 91		if (unlikely(offset >= max_off))
 92			goto out_unlock;
 93	}
 94
 95	ret = -EEXIST;
 96	if (!pte_none(*dst_pte))
 97		goto out_unlock;
 98
 99	if (page_in_cache)
100		page_add_file_rmap(page, false);
101	else
102		page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
103
104	/*
105	 * Must happen after rmap, as mm_counter() checks mapping (via
106	 * PageAnon()), which is set by __page_set_anon_rmap().
107	 */
108	inc_mm_counter(dst_mm, mm_counter(page));
109
110	if (newly_allocated)
111		lru_cache_add_inactive_or_unevictable(page, dst_vma);
112
113	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
114
115	/* No need to invalidate - it was non-present before */
116	update_mmu_cache(dst_vma, dst_addr, dst_pte);
117	ret = 0;
118out_unlock:
119	pte_unmap_unlock(dst_pte, ptl);
120	return ret;
121}
122
123static int mcopy_atomic_pte(struct mm_struct *dst_mm,
124			    pmd_t *dst_pmd,
125			    struct vm_area_struct *dst_vma,
126			    unsigned long dst_addr,
127			    unsigned long src_addr,
128			    struct page **pagep,
129			    bool wp_copy)
130{
 
 
 
131	void *page_kaddr;
132	int ret;
133	struct page *page;
134
135	if (!*pagep) {
136		ret = -ENOMEM;
137		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
138		if (!page)
139			goto out;
140
141		page_kaddr = kmap_atomic(page);
142		ret = copy_from_user(page_kaddr,
143				     (const void __user *) src_addr,
144				     PAGE_SIZE);
145		kunmap_atomic(page_kaddr);
146
147		/* fallback to copy_from_user outside mmap_lock */
148		if (unlikely(ret)) {
149			ret = -ENOENT;
150			*pagep = page;
151			/* don't free the page */
152			goto out;
153		}
154	} else {
155		page = *pagep;
156		*pagep = NULL;
157	}
158
159	/*
160	 * The memory barrier inside __SetPageUptodate makes sure that
161	 * preceding stores to the page contents become visible before
162	 * the set_pte_at() write.
163	 */
164	__SetPageUptodate(page);
165
166	ret = -ENOMEM;
167	if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
168		goto out_release;
169
170	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
171				       page, true, wp_copy);
172	if (ret)
173		goto out_release;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174out:
175	return ret;
 
 
 
176out_release:
177	put_page(page);
178	goto out;
179}
180
181static int mfill_zeropage_pte(struct mm_struct *dst_mm,
182			      pmd_t *dst_pmd,
183			      struct vm_area_struct *dst_vma,
184			      unsigned long dst_addr)
185{
186	pte_t _dst_pte, *dst_pte;
187	spinlock_t *ptl;
188	int ret;
189	pgoff_t offset, max_off;
190	struct inode *inode;
191
192	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
193					 dst_vma->vm_page_prot));
 
194	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
195	if (dst_vma->vm_file) {
196		/* the shmem MAP_PRIVATE case requires checking the i_size */
197		inode = dst_vma->vm_file->f_inode;
198		offset = linear_page_index(dst_vma, dst_addr);
199		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
200		ret = -EFAULT;
201		if (unlikely(offset >= max_off))
202			goto out_unlock;
203	}
204	ret = -EEXIST;
205	if (!pte_none(*dst_pte))
206		goto out_unlock;
207	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
208	/* No need to invalidate - it was non-present before */
209	update_mmu_cache(dst_vma, dst_addr, dst_pte);
210	ret = 0;
211out_unlock:
212	pte_unmap_unlock(dst_pte, ptl);
213	return ret;
214}
215
216/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
217static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
218				pmd_t *dst_pmd,
219				struct vm_area_struct *dst_vma,
220				unsigned long dst_addr,
221				bool wp_copy)
222{
223	struct inode *inode = file_inode(dst_vma->vm_file);
224	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
225	struct page *page;
226	int ret;
227
228	ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
229	if (ret)
230		goto out;
231	if (!page) {
232		ret = -EFAULT;
233		goto out;
234	}
235
236	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
237				       page, false, wp_copy);
238	if (ret)
239		goto out_release;
240
241	unlock_page(page);
242	ret = 0;
243out:
244	return ret;
245out_release:
246	unlock_page(page);
247	put_page(page);
248	goto out;
249}
250
251static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
252{
253	pgd_t *pgd;
254	p4d_t *p4d;
255	pud_t *pud;
256
257	pgd = pgd_offset(mm, address);
258	p4d = p4d_alloc(mm, pgd, address);
259	if (!p4d)
260		return NULL;
261	pud = pud_alloc(mm, p4d, address);
262	if (!pud)
263		return NULL;
264	/*
265	 * Note that we didn't run this because the pmd was
266	 * missing, the *pmd may be already established and in
267	 * turn it may also be a trans_huge_pmd.
268	 */
269	return pmd_alloc(mm, pud, address);
270}
271
272#ifdef CONFIG_HUGETLB_PAGE
273/*
274 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
275 * called with mmap_lock held, it will release mmap_lock before returning.
276 */
277static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
278					      struct vm_area_struct *dst_vma,
279					      unsigned long dst_start,
280					      unsigned long src_start,
281					      unsigned long len,
282					      enum mcopy_atomic_mode mode)
283{
 
284	int vm_shared = dst_vma->vm_flags & VM_SHARED;
285	ssize_t err;
286	pte_t *dst_pte;
287	unsigned long src_addr, dst_addr;
288	long copied;
289	struct page *page;
 
290	unsigned long vma_hpagesize;
291	pgoff_t idx;
292	u32 hash;
293	struct address_space *mapping;
294
295	/*
296	 * There is no default zero huge page for all huge page sizes as
297	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
298	 * by THP.  Since we can not reliably insert a zero page, this
299	 * feature is not supported.
300	 */
301	if (mode == MCOPY_ATOMIC_ZEROPAGE) {
302		mmap_read_unlock(dst_mm);
303		return -EINVAL;
304	}
305
306	src_addr = src_start;
307	dst_addr = dst_start;
308	copied = 0;
309	page = NULL;
310	vma_hpagesize = vma_kernel_pagesize(dst_vma);
311
312	/*
313	 * Validate alignment based on huge page size
314	 */
315	err = -EINVAL;
316	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
317		goto out_unlock;
318
319retry:
320	/*
321	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
322	 * retry, dst_vma will be set to NULL and we must lookup again.
323	 */
324	if (!dst_vma) {
325		err = -ENOENT;
326		dst_vma = find_dst_vma(dst_mm, dst_start, len);
327		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
328			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
329
330		err = -EINVAL;
331		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
332			goto out_unlock;
333
334		vm_shared = dst_vma->vm_flags & VM_SHARED;
335	}
336
 
 
 
 
337	/*
338	 * If not shared, ensure the dst_vma has a anon_vma.
339	 */
340	err = -ENOMEM;
341	if (!vm_shared) {
342		if (unlikely(anon_vma_prepare(dst_vma)))
343			goto out_unlock;
344	}
345
 
 
346	while (src_addr < src_start + len) {
 
 
347		BUG_ON(dst_addr >= dst_start + len);
 
348
349		/*
350		 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
351		 * i_mmap_rwsem ensures the dst_pte remains valid even
352		 * in the case of shared pmds.  fault mutex prevents
353		 * races with other faulting threads.
354		 */
 
355		mapping = dst_vma->vm_file->f_mapping;
356		i_mmap_lock_read(mapping);
357		idx = linear_page_index(dst_vma, dst_addr);
358		hash = hugetlb_fault_mutex_hash(mapping, idx);
359		mutex_lock(&hugetlb_fault_mutex_table[hash]);
360
361		err = -ENOMEM;
362		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
363		if (!dst_pte) {
364			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
365			i_mmap_unlock_read(mapping);
366			goto out_unlock;
367		}
368
369		if (mode != MCOPY_ATOMIC_CONTINUE &&
370		    !huge_pte_none(huge_ptep_get(dst_pte))) {
371			err = -EEXIST;
372			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
373			i_mmap_unlock_read(mapping);
374			goto out_unlock;
375		}
376
377		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
378					       dst_addr, src_addr, mode, &page);
379
380		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
381		i_mmap_unlock_read(mapping);
382
383		cond_resched();
384
385		if (unlikely(err == -ENOENT)) {
386			mmap_read_unlock(dst_mm);
387			BUG_ON(!page);
388
389			err = copy_huge_page_from_user(page,
390						(const void __user *)src_addr,
391						vma_hpagesize / PAGE_SIZE,
392						true);
393			if (unlikely(err)) {
394				err = -EFAULT;
395				goto out;
396			}
397			mmap_read_lock(dst_mm);
398
399			dst_vma = NULL;
400			goto retry;
401		} else
402			BUG_ON(page);
403
404		if (!err) {
405			dst_addr += vma_hpagesize;
406			src_addr += vma_hpagesize;
407			copied += vma_hpagesize;
408
409			if (fatal_signal_pending(current))
410				err = -EINTR;
411		}
412		if (err)
413			break;
414	}
415
416out_unlock:
417	mmap_read_unlock(dst_mm);
418out:
419	if (page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420		put_page(page);
 
421	BUG_ON(copied < 0);
422	BUG_ON(err > 0);
423	BUG_ON(!copied && !err);
424	return copied ? copied : err;
425}
426#else /* !CONFIG_HUGETLB_PAGE */
427/* fail at build time if gcc attempts to use this */
428extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
429				      struct vm_area_struct *dst_vma,
430				      unsigned long dst_start,
431				      unsigned long src_start,
432				      unsigned long len,
433				      enum mcopy_atomic_mode mode);
434#endif /* CONFIG_HUGETLB_PAGE */
435
436static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
437						pmd_t *dst_pmd,
438						struct vm_area_struct *dst_vma,
439						unsigned long dst_addr,
440						unsigned long src_addr,
441						struct page **page,
442						enum mcopy_atomic_mode mode,
443						bool wp_copy)
444{
445	ssize_t err;
446
447	if (mode == MCOPY_ATOMIC_CONTINUE) {
448		return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
449					    wp_copy);
450	}
451
452	/*
453	 * The normal page fault path for a shmem will invoke the
454	 * fault, fill the hole in the file and COW it right away. The
455	 * result generates plain anonymous memory. So when we are
456	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
457	 * generate anonymous memory directly without actually filling
458	 * the hole. For the MAP_PRIVATE case the robustness check
459	 * only happens in the pagetable (to verify it's still none)
460	 * and not in the radix tree.
461	 */
462	if (!(dst_vma->vm_flags & VM_SHARED)) {
463		if (mode == MCOPY_ATOMIC_NORMAL)
464			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
465					       dst_addr, src_addr, page,
466					       wp_copy);
467		else
468			err = mfill_zeropage_pte(dst_mm, dst_pmd,
469						 dst_vma, dst_addr);
470	} else {
471		VM_WARN_ON_ONCE(wp_copy);
472		err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
473					     dst_addr, src_addr,
474					     mode != MCOPY_ATOMIC_NORMAL,
475					     page);
 
 
476	}
477
478	return err;
479}
480
481static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
482					      unsigned long dst_start,
483					      unsigned long src_start,
484					      unsigned long len,
485					      enum mcopy_atomic_mode mcopy_mode,
486					      bool *mmap_changing,
487					      __u64 mode)
488{
489	struct vm_area_struct *dst_vma;
490	ssize_t err;
491	pmd_t *dst_pmd;
492	unsigned long src_addr, dst_addr;
493	long copied;
494	struct page *page;
495	bool wp_copy;
496
497	/*
498	 * Sanitize the command parameters:
499	 */
500	BUG_ON(dst_start & ~PAGE_MASK);
501	BUG_ON(len & ~PAGE_MASK);
502
503	/* Does the address range wrap, or is the span zero-sized? */
504	BUG_ON(src_start + len <= src_start);
505	BUG_ON(dst_start + len <= dst_start);
506
507	src_addr = src_start;
508	dst_addr = dst_start;
509	copied = 0;
510	page = NULL;
511retry:
512	mmap_read_lock(dst_mm);
513
514	/*
515	 * If memory mappings are changing because of non-cooperative
516	 * operation (e.g. mremap) running in parallel, bail out and
517	 * request the user to retry later
518	 */
519	err = -EAGAIN;
520	if (mmap_changing && READ_ONCE(*mmap_changing))
521		goto out_unlock;
522
523	/*
524	 * Make sure the vma is not shared, that the dst range is
525	 * both valid and fully within a single existing vma.
526	 */
527	err = -ENOENT;
528	dst_vma = find_dst_vma(dst_mm, dst_start, len);
529	if (!dst_vma)
530		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531
532	err = -EINVAL;
533	/*
534	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
535	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
536	 */
537	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
538	    dst_vma->vm_flags & VM_SHARED))
539		goto out_unlock;
540
541	/*
542	 * validate 'mode' now that we know the dst_vma: don't allow
543	 * a wrprotect copy if the userfaultfd didn't register as WP.
544	 */
545	wp_copy = mode & UFFDIO_COPY_MODE_WP;
546	if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
547		goto out_unlock;
548
549	/*
550	 * If this is a HUGETLB vma, pass off to appropriate routine
551	 */
552	if (is_vm_hugetlb_page(dst_vma))
553		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
554						src_start, len, mcopy_mode);
555
556	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
557		goto out_unlock;
558	if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
559		goto out_unlock;
560
561	/*
562	 * Ensure the dst_vma has a anon_vma or this page
563	 * would get a NULL anon_vma when moved in the
564	 * dst_vma.
565	 */
566	err = -ENOMEM;
567	if (!(dst_vma->vm_flags & VM_SHARED) &&
568	    unlikely(anon_vma_prepare(dst_vma)))
569		goto out_unlock;
570
571	while (src_addr < src_start + len) {
572		pmd_t dst_pmdval;
573
574		BUG_ON(dst_addr >= dst_start + len);
575
576		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
577		if (unlikely(!dst_pmd)) {
578			err = -ENOMEM;
579			break;
580		}
581
582		dst_pmdval = pmd_read_atomic(dst_pmd);
583		/*
584		 * If the dst_pmd is mapped as THP don't
585		 * override it and just be strict.
586		 */
587		if (unlikely(pmd_trans_huge(dst_pmdval))) {
588			err = -EEXIST;
589			break;
590		}
591		if (unlikely(pmd_none(dst_pmdval)) &&
592		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
593			err = -ENOMEM;
594			break;
595		}
596		/* If an huge pmd materialized from under us fail */
597		if (unlikely(pmd_trans_huge(*dst_pmd))) {
598			err = -EFAULT;
599			break;
600		}
601
602		BUG_ON(pmd_none(*dst_pmd));
603		BUG_ON(pmd_trans_huge(*dst_pmd));
604
605		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
606				       src_addr, &page, mcopy_mode, wp_copy);
607		cond_resched();
608
609		if (unlikely(err == -ENOENT)) {
610			void *page_kaddr;
611
612			mmap_read_unlock(dst_mm);
613			BUG_ON(!page);
614
615			page_kaddr = kmap(page);
616			err = copy_from_user(page_kaddr,
617					     (const void __user *) src_addr,
618					     PAGE_SIZE);
619			kunmap(page);
620			if (unlikely(err)) {
621				err = -EFAULT;
622				goto out;
623			}
624			goto retry;
625		} else
626			BUG_ON(page);
627
628		if (!err) {
629			dst_addr += PAGE_SIZE;
630			src_addr += PAGE_SIZE;
631			copied += PAGE_SIZE;
632
633			if (fatal_signal_pending(current))
634				err = -EINTR;
635		}
636		if (err)
637			break;
638	}
639
640out_unlock:
641	mmap_read_unlock(dst_mm);
642out:
643	if (page)
644		put_page(page);
645	BUG_ON(copied < 0);
646	BUG_ON(err > 0);
647	BUG_ON(!copied && !err);
648	return copied ? copied : err;
649}
650
651ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
652		     unsigned long src_start, unsigned long len,
653		     bool *mmap_changing, __u64 mode)
654{
655	return __mcopy_atomic(dst_mm, dst_start, src_start, len,
656			      MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
657}
658
659ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
660		       unsigned long len, bool *mmap_changing)
661{
662	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
663			      mmap_changing, 0);
664}
665
666ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
667		       unsigned long len, bool *mmap_changing)
668{
669	return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
670			      mmap_changing, 0);
671}
672
673int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
674			unsigned long len, bool enable_wp, bool *mmap_changing)
675{
676	struct vm_area_struct *dst_vma;
677	pgprot_t newprot;
678	int err;
679
680	/*
681	 * Sanitize the command parameters:
682	 */
683	BUG_ON(start & ~PAGE_MASK);
684	BUG_ON(len & ~PAGE_MASK);
685
686	/* Does the address range wrap, or is the span zero-sized? */
687	BUG_ON(start + len <= start);
688
689	mmap_read_lock(dst_mm);
690
691	/*
692	 * If memory mappings are changing because of non-cooperative
693	 * operation (e.g. mremap) running in parallel, bail out and
694	 * request the user to retry later
695	 */
696	err = -EAGAIN;
697	if (mmap_changing && READ_ONCE(*mmap_changing))
698		goto out_unlock;
699
700	err = -ENOENT;
701	dst_vma = find_dst_vma(dst_mm, start, len);
702	/*
703	 * Make sure the vma is not shared, that the dst range is
704	 * both valid and fully within a single existing vma.
705	 */
706	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
707		goto out_unlock;
708	if (!userfaultfd_wp(dst_vma))
709		goto out_unlock;
710	if (!vma_is_anonymous(dst_vma))
711		goto out_unlock;
712
713	if (enable_wp)
714		newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
715	else
716		newprot = vm_get_page_prot(dst_vma->vm_flags);
717
718	change_protection(dst_vma, start, start + len, newprot,
719			  enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
720
721	err = 0;
722out_unlock:
723	mmap_read_unlock(dst_mm);
724	return err;
725}