Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  mm/userfaultfd.c
  4 *
  5 *  Copyright (C) 2015  Red Hat, Inc.
 
 
 
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/sched/signal.h>
 10#include <linux/pagemap.h>
 11#include <linux/rmap.h>
 12#include <linux/swap.h>
 13#include <linux/swapops.h>
 14#include <linux/userfaultfd_k.h>
 15#include <linux/mmu_notifier.h>
 16#include <linux/hugetlb.h>
 17#include <linux/shmem_fs.h>
 18#include <asm/tlbflush.h>
 19#include "internal.h"
 20
 21static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 22			    pmd_t *dst_pmd,
 23			    struct vm_area_struct *dst_vma,
 24			    unsigned long dst_addr,
 25			    unsigned long src_addr,
 26			    struct page **pagep)
 27{
 28	struct mem_cgroup *memcg;
 29	pte_t _dst_pte, *dst_pte;
 30	spinlock_t *ptl;
 31	void *page_kaddr;
 32	int ret;
 33	struct page *page;
 34	pgoff_t offset, max_off;
 35	struct inode *inode;
 36
 37	if (!*pagep) {
 38		ret = -ENOMEM;
 39		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
 40		if (!page)
 41			goto out;
 42
 43		page_kaddr = kmap_atomic(page);
 44		ret = copy_from_user(page_kaddr,
 45				     (const void __user *) src_addr,
 46				     PAGE_SIZE);
 47		kunmap_atomic(page_kaddr);
 48
 49		/* fallback to copy_from_user outside mmap_sem */
 50		if (unlikely(ret)) {
 51			ret = -ENOENT;
 52			*pagep = page;
 53			/* don't free the page */
 54			goto out;
 55		}
 56	} else {
 57		page = *pagep;
 58		*pagep = NULL;
 59	}
 60
 61	/*
 62	 * The memory barrier inside __SetPageUptodate makes sure that
 63	 * preceeding stores to the page contents become visible before
 64	 * the set_pte_at() write.
 65	 */
 66	__SetPageUptodate(page);
 67
 68	ret = -ENOMEM;
 69	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
 70		goto out_release;
 71
 72	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 73	if (dst_vma->vm_flags & VM_WRITE)
 74		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
 75
 76	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 77	if (dst_vma->vm_file) {
 78		/* the shmem MAP_PRIVATE case requires checking the i_size */
 79		inode = dst_vma->vm_file->f_inode;
 80		offset = linear_page_index(dst_vma, dst_addr);
 81		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 82		ret = -EFAULT;
 83		if (unlikely(offset >= max_off))
 84			goto out_release_uncharge_unlock;
 85	}
 86	ret = -EEXIST;
 
 87	if (!pte_none(*dst_pte))
 88		goto out_release_uncharge_unlock;
 89
 90	inc_mm_counter(dst_mm, MM_ANONPAGES);
 91	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 92	mem_cgroup_commit_charge(page, memcg, false, false);
 93	lru_cache_add_active_or_unevictable(page, dst_vma);
 94
 95	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 96
 97	/* No need to invalidate - it was non-present before */
 98	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 99
100	pte_unmap_unlock(dst_pte, ptl);
101	ret = 0;
102out:
103	return ret;
104out_release_uncharge_unlock:
105	pte_unmap_unlock(dst_pte, ptl);
106	mem_cgroup_cancel_charge(page, memcg, false);
107out_release:
108	put_page(page);
109	goto out;
110}
111
112static int mfill_zeropage_pte(struct mm_struct *dst_mm,
113			      pmd_t *dst_pmd,
114			      struct vm_area_struct *dst_vma,
115			      unsigned long dst_addr)
116{
117	pte_t _dst_pte, *dst_pte;
118	spinlock_t *ptl;
119	int ret;
120	pgoff_t offset, max_off;
121	struct inode *inode;
122
123	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
124					 dst_vma->vm_page_prot));
125	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
126	if (dst_vma->vm_file) {
127		/* the shmem MAP_PRIVATE case requires checking the i_size */
128		inode = dst_vma->vm_file->f_inode;
129		offset = linear_page_index(dst_vma, dst_addr);
130		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
131		ret = -EFAULT;
132		if (unlikely(offset >= max_off))
133			goto out_unlock;
134	}
135	ret = -EEXIST;
 
136	if (!pte_none(*dst_pte))
137		goto out_unlock;
138	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
139	/* No need to invalidate - it was non-present before */
140	update_mmu_cache(dst_vma, dst_addr, dst_pte);
141	ret = 0;
142out_unlock:
143	pte_unmap_unlock(dst_pte, ptl);
144	return ret;
145}
146
147static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
148{
149	pgd_t *pgd;
150	p4d_t *p4d;
151	pud_t *pud;
152
153	pgd = pgd_offset(mm, address);
154	p4d = p4d_alloc(mm, pgd, address);
155	if (!p4d)
156		return NULL;
157	pud = pud_alloc(mm, p4d, address);
158	if (!pud)
159		return NULL;
160	/*
161	 * Note that we didn't run this because the pmd was
162	 * missing, the *pmd may be already established and in
163	 * turn it may also be a trans_huge_pmd.
164	 */
165	return pmd_alloc(mm, pud, address);
166}
167
168#ifdef CONFIG_HUGETLB_PAGE
169/*
170 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
171 * called with mmap_sem held, it will release mmap_sem before returning.
172 */
173static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
174					      struct vm_area_struct *dst_vma,
175					      unsigned long dst_start,
176					      unsigned long src_start,
177					      unsigned long len,
178					      bool zeropage)
179{
180	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
181	int vm_shared = dst_vma->vm_flags & VM_SHARED;
182	ssize_t err;
183	pte_t *dst_pte;
184	unsigned long src_addr, dst_addr;
185	long copied;
186	struct page *page;
187	struct hstate *h;
188	unsigned long vma_hpagesize;
189	pgoff_t idx;
190	u32 hash;
191	struct address_space *mapping;
192
193	/*
194	 * There is no default zero huge page for all huge page sizes as
195	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
196	 * by THP.  Since we can not reliably insert a zero page, this
197	 * feature is not supported.
198	 */
199	if (zeropage) {
200		up_read(&dst_mm->mmap_sem);
201		return -EINVAL;
202	}
203
204	src_addr = src_start;
205	dst_addr = dst_start;
206	copied = 0;
207	page = NULL;
208	vma_hpagesize = vma_kernel_pagesize(dst_vma);
209
210	/*
211	 * Validate alignment based on huge page size
212	 */
213	err = -EINVAL;
214	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
215		goto out_unlock;
216
217retry:
218	/*
219	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
220	 * retry, dst_vma will be set to NULL and we must lookup again.
221	 */
222	if (!dst_vma) {
223		err = -ENOENT;
224		dst_vma = find_vma(dst_mm, dst_start);
225		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
226			goto out_unlock;
227		/*
228		 * Check the vma is registered in uffd, this is
229		 * required to enforce the VM_MAYWRITE check done at
230		 * uffd registration time.
231		 */
232		if (!dst_vma->vm_userfaultfd_ctx.ctx)
233			goto out_unlock;
234
235		if (dst_start < dst_vma->vm_start ||
236		    dst_start + len > dst_vma->vm_end)
237			goto out_unlock;
238
239		err = -EINVAL;
240		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
241			goto out_unlock;
242
243		vm_shared = dst_vma->vm_flags & VM_SHARED;
244	}
245
246	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
247		    (len - copied) & (vma_hpagesize - 1)))
248		goto out_unlock;
249
250	/*
251	 * If not shared, ensure the dst_vma has a anon_vma.
252	 */
253	err = -ENOMEM;
254	if (!vm_shared) {
255		if (unlikely(anon_vma_prepare(dst_vma)))
256			goto out_unlock;
257	}
258
259	h = hstate_vma(dst_vma);
260
261	while (src_addr < src_start + len) {
262		pte_t dst_pteval;
263
264		BUG_ON(dst_addr >= dst_start + len);
265		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
266
267		/*
268		 * Serialize via hugetlb_fault_mutex
269		 */
270		idx = linear_page_index(dst_vma, dst_addr);
271		mapping = dst_vma->vm_file->f_mapping;
272		hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
 
273		mutex_lock(&hugetlb_fault_mutex_table[hash]);
274
275		err = -ENOMEM;
276		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
277		if (!dst_pte) {
278			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
279			goto out_unlock;
280		}
281
282		err = -EEXIST;
283		dst_pteval = huge_ptep_get(dst_pte);
284		if (!huge_pte_none(dst_pteval)) {
285			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
286			goto out_unlock;
287		}
288
289		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
290						dst_addr, src_addr, &page);
291
292		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
293		vm_alloc_shared = vm_shared;
294
295		cond_resched();
296
297		if (unlikely(err == -ENOENT)) {
298			up_read(&dst_mm->mmap_sem);
299			BUG_ON(!page);
300
301			err = copy_huge_page_from_user(page,
302						(const void __user *)src_addr,
303						pages_per_huge_page(h), true);
304			if (unlikely(err)) {
305				err = -EFAULT;
306				goto out;
307			}
308			down_read(&dst_mm->mmap_sem);
309
310			dst_vma = NULL;
311			goto retry;
312		} else
313			BUG_ON(page);
314
315		if (!err) {
316			dst_addr += vma_hpagesize;
317			src_addr += vma_hpagesize;
318			copied += vma_hpagesize;
319
320			if (fatal_signal_pending(current))
321				err = -EINTR;
322		}
323		if (err)
324			break;
325	}
326
327out_unlock:
328	up_read(&dst_mm->mmap_sem);
329out:
330	if (page) {
331		/*
332		 * We encountered an error and are about to free a newly
333		 * allocated huge page.
334		 *
335		 * Reservation handling is very subtle, and is different for
336		 * private and shared mappings.  See the routine
337		 * restore_reserve_on_error for details.  Unfortunately, we
338		 * can not call restore_reserve_on_error now as it would
339		 * require holding mmap_sem.
340		 *
341		 * If a reservation for the page existed in the reservation
342		 * map of a private mapping, the map was modified to indicate
343		 * the reservation was consumed when the page was allocated.
344		 * We clear the PagePrivate flag now so that the global
345		 * reserve count will not be incremented in free_huge_page.
346		 * The reservation map will still indicate the reservation
347		 * was consumed and possibly prevent later page allocation.
348		 * This is better than leaking a global reservation.  If no
349		 * reservation existed, it is still safe to clear PagePrivate
350		 * as no adjustments to reservation counts were made during
351		 * allocation.
352		 *
353		 * The reservation map for shared mappings indicates which
354		 * pages have reservations.  When a huge page is allocated
355		 * for an address with a reservation, no change is made to
356		 * the reserve map.  In this case PagePrivate will be set
357		 * to indicate that the global reservation count should be
358		 * incremented when the page is freed.  This is the desired
359		 * behavior.  However, when a huge page is allocated for an
360		 * address without a reservation a reservation entry is added
361		 * to the reservation map, and PagePrivate will not be set.
362		 * When the page is freed, the global reserve count will NOT
363		 * be incremented and it will appear as though we have leaked
364		 * reserved page.  In this case, set PagePrivate so that the
365		 * global reserve count will be incremented to match the
366		 * reservation map entry which was created.
367		 *
368		 * Note that vm_alloc_shared is based on the flags of the vma
369		 * for which the page was originally allocated.  dst_vma could
370		 * be different or NULL on error.
371		 */
372		if (vm_alloc_shared)
373			SetPagePrivate(page);
374		else
375			ClearPagePrivate(page);
376		put_page(page);
377	}
378	BUG_ON(copied < 0);
379	BUG_ON(err > 0);
380	BUG_ON(!copied && !err);
381	return copied ? copied : err;
382}
383#else /* !CONFIG_HUGETLB_PAGE */
384/* fail at build time if gcc attempts to use this */
385extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
386				      struct vm_area_struct *dst_vma,
387				      unsigned long dst_start,
388				      unsigned long src_start,
389				      unsigned long len,
390				      bool zeropage);
391#endif /* CONFIG_HUGETLB_PAGE */
392
393static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
394						pmd_t *dst_pmd,
395						struct vm_area_struct *dst_vma,
396						unsigned long dst_addr,
397						unsigned long src_addr,
398						struct page **page,
399						bool zeropage)
400{
401	ssize_t err;
402
403	/*
404	 * The normal page fault path for a shmem will invoke the
405	 * fault, fill the hole in the file and COW it right away. The
406	 * result generates plain anonymous memory. So when we are
407	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
408	 * generate anonymous memory directly without actually filling
409	 * the hole. For the MAP_PRIVATE case the robustness check
410	 * only happens in the pagetable (to verify it's still none)
411	 * and not in the radix tree.
412	 */
413	if (!(dst_vma->vm_flags & VM_SHARED)) {
414		if (!zeropage)
415			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
416					       dst_addr, src_addr, page);
417		else
418			err = mfill_zeropage_pte(dst_mm, dst_pmd,
419						 dst_vma, dst_addr);
420	} else {
421		if (!zeropage)
422			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
423						     dst_vma, dst_addr,
424						     src_addr, page);
425		else
426			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
427						       dst_vma, dst_addr);
428	}
429
430	return err;
431}
432
433static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
434					      unsigned long dst_start,
435					      unsigned long src_start,
436					      unsigned long len,
437					      bool zeropage,
438					      bool *mmap_changing)
439{
440	struct vm_area_struct *dst_vma;
441	ssize_t err;
442	pmd_t *dst_pmd;
443	unsigned long src_addr, dst_addr;
444	long copied;
445	struct page *page;
446
447	/*
448	 * Sanitize the command parameters:
449	 */
450	BUG_ON(dst_start & ~PAGE_MASK);
451	BUG_ON(len & ~PAGE_MASK);
452
453	/* Does the address range wrap, or is the span zero-sized? */
454	BUG_ON(src_start + len <= src_start);
455	BUG_ON(dst_start + len <= dst_start);
456
457	src_addr = src_start;
458	dst_addr = dst_start;
459	copied = 0;
460	page = NULL;
461retry:
462	down_read(&dst_mm->mmap_sem);
463
464	/*
465	 * If memory mappings are changing because of non-cooperative
466	 * operation (e.g. mremap) running in parallel, bail out and
467	 * request the user to retry later
468	 */
469	err = -EAGAIN;
470	if (mmap_changing && READ_ONCE(*mmap_changing))
471		goto out_unlock;
472
473	/*
474	 * Make sure the vma is not shared, that the dst range is
475	 * both valid and fully within a single existing vma.
476	 */
477	err = -ENOENT;
478	dst_vma = find_vma(dst_mm, dst_start);
479	if (!dst_vma)
480		goto out_unlock;
481	/*
482	 * Check the vma is registered in uffd, this is required to
483	 * enforce the VM_MAYWRITE check done at uffd registration
484	 * time.
 
 
 
 
485	 */
486	if (!dst_vma->vm_userfaultfd_ctx.ctx)
487		goto out_unlock;
488
489	if (dst_start < dst_vma->vm_start ||
490	    dst_start + len > dst_vma->vm_end)
491		goto out_unlock;
492
493	err = -EINVAL;
494	/*
495	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
496	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
497	 */
498	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
499	    dst_vma->vm_flags & VM_SHARED))
500		goto out_unlock;
501
502	/*
503	 * If this is a HUGETLB vma, pass off to appropriate routine
504	 */
505	if (is_vm_hugetlb_page(dst_vma))
506		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
507						src_start, len, zeropage);
508
509	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
510		goto out_unlock;
511
512	/*
513	 * Ensure the dst_vma has a anon_vma or this page
514	 * would get a NULL anon_vma when moved in the
515	 * dst_vma.
516	 */
517	err = -ENOMEM;
518	if (!(dst_vma->vm_flags & VM_SHARED) &&
519	    unlikely(anon_vma_prepare(dst_vma)))
520		goto out_unlock;
521
522	while (src_addr < src_start + len) {
523		pmd_t dst_pmdval;
524
525		BUG_ON(dst_addr >= dst_start + len);
526
527		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
528		if (unlikely(!dst_pmd)) {
529			err = -ENOMEM;
530			break;
531		}
532
533		dst_pmdval = pmd_read_atomic(dst_pmd);
534		/*
535		 * If the dst_pmd is mapped as THP don't
536		 * override it and just be strict.
537		 */
538		if (unlikely(pmd_trans_huge(dst_pmdval))) {
539			err = -EEXIST;
540			break;
541		}
542		if (unlikely(pmd_none(dst_pmdval)) &&
543		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
544			err = -ENOMEM;
545			break;
546		}
547		/* If an huge pmd materialized from under us fail */
548		if (unlikely(pmd_trans_huge(*dst_pmd))) {
549			err = -EFAULT;
550			break;
551		}
552
553		BUG_ON(pmd_none(*dst_pmd));
554		BUG_ON(pmd_trans_huge(*dst_pmd));
555
556		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
557				       src_addr, &page, zeropage);
558		cond_resched();
559
560		if (unlikely(err == -ENOENT)) {
561			void *page_kaddr;
562
563			up_read(&dst_mm->mmap_sem);
564			BUG_ON(!page);
565
566			page_kaddr = kmap(page);
567			err = copy_from_user(page_kaddr,
568					     (const void __user *) src_addr,
569					     PAGE_SIZE);
570			kunmap(page);
571			if (unlikely(err)) {
572				err = -EFAULT;
573				goto out;
574			}
575			goto retry;
576		} else
577			BUG_ON(page);
578
579		if (!err) {
580			dst_addr += PAGE_SIZE;
581			src_addr += PAGE_SIZE;
582			copied += PAGE_SIZE;
583
584			if (fatal_signal_pending(current))
585				err = -EINTR;
586		}
587		if (err)
588			break;
589	}
590
591out_unlock:
592	up_read(&dst_mm->mmap_sem);
593out:
594	if (page)
595		put_page(page);
596	BUG_ON(copied < 0);
597	BUG_ON(err > 0);
598	BUG_ON(!copied && !err);
599	return copied ? copied : err;
600}
601
602ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
603		     unsigned long src_start, unsigned long len,
604		     bool *mmap_changing)
605{
606	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
607			      mmap_changing);
608}
609
610ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
611		       unsigned long len, bool *mmap_changing)
612{
613	return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
614}
v4.17
 
  1/*
  2 *  mm/userfaultfd.c
  3 *
  4 *  Copyright (C) 2015  Red Hat, Inc.
  5 *
  6 *  This work is licensed under the terms of the GNU GPL, version 2. See
  7 *  the COPYING file in the top-level directory.
  8 */
  9
 10#include <linux/mm.h>
 11#include <linux/sched/signal.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/swap.h>
 15#include <linux/swapops.h>
 16#include <linux/userfaultfd_k.h>
 17#include <linux/mmu_notifier.h>
 18#include <linux/hugetlb.h>
 19#include <linux/shmem_fs.h>
 20#include <asm/tlbflush.h>
 21#include "internal.h"
 22
 23static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 24			    pmd_t *dst_pmd,
 25			    struct vm_area_struct *dst_vma,
 26			    unsigned long dst_addr,
 27			    unsigned long src_addr,
 28			    struct page **pagep)
 29{
 30	struct mem_cgroup *memcg;
 31	pte_t _dst_pte, *dst_pte;
 32	spinlock_t *ptl;
 33	void *page_kaddr;
 34	int ret;
 35	struct page *page;
 
 
 36
 37	if (!*pagep) {
 38		ret = -ENOMEM;
 39		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
 40		if (!page)
 41			goto out;
 42
 43		page_kaddr = kmap_atomic(page);
 44		ret = copy_from_user(page_kaddr,
 45				     (const void __user *) src_addr,
 46				     PAGE_SIZE);
 47		kunmap_atomic(page_kaddr);
 48
 49		/* fallback to copy_from_user outside mmap_sem */
 50		if (unlikely(ret)) {
 51			ret = -EFAULT;
 52			*pagep = page;
 53			/* don't free the page */
 54			goto out;
 55		}
 56	} else {
 57		page = *pagep;
 58		*pagep = NULL;
 59	}
 60
 61	/*
 62	 * The memory barrier inside __SetPageUptodate makes sure that
 63	 * preceeding stores to the page contents become visible before
 64	 * the set_pte_at() write.
 65	 */
 66	__SetPageUptodate(page);
 67
 68	ret = -ENOMEM;
 69	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
 70		goto out_release;
 71
 72	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 73	if (dst_vma->vm_flags & VM_WRITE)
 74		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
 75
 
 
 
 
 
 
 
 
 
 
 76	ret = -EEXIST;
 77	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 78	if (!pte_none(*dst_pte))
 79		goto out_release_uncharge_unlock;
 80
 81	inc_mm_counter(dst_mm, MM_ANONPAGES);
 82	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 83	mem_cgroup_commit_charge(page, memcg, false, false);
 84	lru_cache_add_active_or_unevictable(page, dst_vma);
 85
 86	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 87
 88	/* No need to invalidate - it was non-present before */
 89	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 90
 91	pte_unmap_unlock(dst_pte, ptl);
 92	ret = 0;
 93out:
 94	return ret;
 95out_release_uncharge_unlock:
 96	pte_unmap_unlock(dst_pte, ptl);
 97	mem_cgroup_cancel_charge(page, memcg, false);
 98out_release:
 99	put_page(page);
100	goto out;
101}
102
103static int mfill_zeropage_pte(struct mm_struct *dst_mm,
104			      pmd_t *dst_pmd,
105			      struct vm_area_struct *dst_vma,
106			      unsigned long dst_addr)
107{
108	pte_t _dst_pte, *dst_pte;
109	spinlock_t *ptl;
110	int ret;
 
 
111
112	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
113					 dst_vma->vm_page_prot));
 
 
 
 
 
 
 
 
 
 
114	ret = -EEXIST;
115	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
116	if (!pte_none(*dst_pte))
117		goto out_unlock;
118	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
119	/* No need to invalidate - it was non-present before */
120	update_mmu_cache(dst_vma, dst_addr, dst_pte);
121	ret = 0;
122out_unlock:
123	pte_unmap_unlock(dst_pte, ptl);
124	return ret;
125}
126
127static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
128{
129	pgd_t *pgd;
130	p4d_t *p4d;
131	pud_t *pud;
132
133	pgd = pgd_offset(mm, address);
134	p4d = p4d_alloc(mm, pgd, address);
135	if (!p4d)
136		return NULL;
137	pud = pud_alloc(mm, p4d, address);
138	if (!pud)
139		return NULL;
140	/*
141	 * Note that we didn't run this because the pmd was
142	 * missing, the *pmd may be already established and in
143	 * turn it may also be a trans_huge_pmd.
144	 */
145	return pmd_alloc(mm, pud, address);
146}
147
148#ifdef CONFIG_HUGETLB_PAGE
149/*
150 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
151 * called with mmap_sem held, it will release mmap_sem before returning.
152 */
153static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
154					      struct vm_area_struct *dst_vma,
155					      unsigned long dst_start,
156					      unsigned long src_start,
157					      unsigned long len,
158					      bool zeropage)
159{
160	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
161	int vm_shared = dst_vma->vm_flags & VM_SHARED;
162	ssize_t err;
163	pte_t *dst_pte;
164	unsigned long src_addr, dst_addr;
165	long copied;
166	struct page *page;
167	struct hstate *h;
168	unsigned long vma_hpagesize;
169	pgoff_t idx;
170	u32 hash;
171	struct address_space *mapping;
172
173	/*
174	 * There is no default zero huge page for all huge page sizes as
175	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
176	 * by THP.  Since we can not reliably insert a zero page, this
177	 * feature is not supported.
178	 */
179	if (zeropage) {
180		up_read(&dst_mm->mmap_sem);
181		return -EINVAL;
182	}
183
184	src_addr = src_start;
185	dst_addr = dst_start;
186	copied = 0;
187	page = NULL;
188	vma_hpagesize = vma_kernel_pagesize(dst_vma);
189
190	/*
191	 * Validate alignment based on huge page size
192	 */
193	err = -EINVAL;
194	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
195		goto out_unlock;
196
197retry:
198	/*
199	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
200	 * retry, dst_vma will be set to NULL and we must lookup again.
201	 */
202	if (!dst_vma) {
203		err = -ENOENT;
204		dst_vma = find_vma(dst_mm, dst_start);
205		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
206			goto out_unlock;
207		/*
208		 * Only allow __mcopy_atomic_hugetlb on userfaultfd
209		 * registered ranges.
 
210		 */
211		if (!dst_vma->vm_userfaultfd_ctx.ctx)
212			goto out_unlock;
213
214		if (dst_start < dst_vma->vm_start ||
215		    dst_start + len > dst_vma->vm_end)
216			goto out_unlock;
217
218		err = -EINVAL;
219		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
220			goto out_unlock;
221
222		vm_shared = dst_vma->vm_flags & VM_SHARED;
223	}
224
225	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
226		    (len - copied) & (vma_hpagesize - 1)))
227		goto out_unlock;
228
229	/*
230	 * If not shared, ensure the dst_vma has a anon_vma.
231	 */
232	err = -ENOMEM;
233	if (!vm_shared) {
234		if (unlikely(anon_vma_prepare(dst_vma)))
235			goto out_unlock;
236	}
237
238	h = hstate_vma(dst_vma);
239
240	while (src_addr < src_start + len) {
241		pte_t dst_pteval;
242
243		BUG_ON(dst_addr >= dst_start + len);
244		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
245
246		/*
247		 * Serialize via hugetlb_fault_mutex
248		 */
249		idx = linear_page_index(dst_vma, dst_addr);
250		mapping = dst_vma->vm_file->f_mapping;
251		hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
252								idx, dst_addr);
253		mutex_lock(&hugetlb_fault_mutex_table[hash]);
254
255		err = -ENOMEM;
256		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
257		if (!dst_pte) {
258			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
259			goto out_unlock;
260		}
261
262		err = -EEXIST;
263		dst_pteval = huge_ptep_get(dst_pte);
264		if (!huge_pte_none(dst_pteval)) {
265			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
266			goto out_unlock;
267		}
268
269		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
270						dst_addr, src_addr, &page);
271
272		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
273		vm_alloc_shared = vm_shared;
274
275		cond_resched();
276
277		if (unlikely(err == -EFAULT)) {
278			up_read(&dst_mm->mmap_sem);
279			BUG_ON(!page);
280
281			err = copy_huge_page_from_user(page,
282						(const void __user *)src_addr,
283						pages_per_huge_page(h), true);
284			if (unlikely(err)) {
285				err = -EFAULT;
286				goto out;
287			}
288			down_read(&dst_mm->mmap_sem);
289
290			dst_vma = NULL;
291			goto retry;
292		} else
293			BUG_ON(page);
294
295		if (!err) {
296			dst_addr += vma_hpagesize;
297			src_addr += vma_hpagesize;
298			copied += vma_hpagesize;
299
300			if (fatal_signal_pending(current))
301				err = -EINTR;
302		}
303		if (err)
304			break;
305	}
306
307out_unlock:
308	up_read(&dst_mm->mmap_sem);
309out:
310	if (page) {
311		/*
312		 * We encountered an error and are about to free a newly
313		 * allocated huge page.
314		 *
315		 * Reservation handling is very subtle, and is different for
316		 * private and shared mappings.  See the routine
317		 * restore_reserve_on_error for details.  Unfortunately, we
318		 * can not call restore_reserve_on_error now as it would
319		 * require holding mmap_sem.
320		 *
321		 * If a reservation for the page existed in the reservation
322		 * map of a private mapping, the map was modified to indicate
323		 * the reservation was consumed when the page was allocated.
324		 * We clear the PagePrivate flag now so that the global
325		 * reserve count will not be incremented in free_huge_page.
326		 * The reservation map will still indicate the reservation
327		 * was consumed and possibly prevent later page allocation.
328		 * This is better than leaking a global reservation.  If no
329		 * reservation existed, it is still safe to clear PagePrivate
330		 * as no adjustments to reservation counts were made during
331		 * allocation.
332		 *
333		 * The reservation map for shared mappings indicates which
334		 * pages have reservations.  When a huge page is allocated
335		 * for an address with a reservation, no change is made to
336		 * the reserve map.  In this case PagePrivate will be set
337		 * to indicate that the global reservation count should be
338		 * incremented when the page is freed.  This is the desired
339		 * behavior.  However, when a huge page is allocated for an
340		 * address without a reservation a reservation entry is added
341		 * to the reservation map, and PagePrivate will not be set.
342		 * When the page is freed, the global reserve count will NOT
343		 * be incremented and it will appear as though we have leaked
344		 * reserved page.  In this case, set PagePrivate so that the
345		 * global reserve count will be incremented to match the
346		 * reservation map entry which was created.
347		 *
348		 * Note that vm_alloc_shared is based on the flags of the vma
349		 * for which the page was originally allocated.  dst_vma could
350		 * be different or NULL on error.
351		 */
352		if (vm_alloc_shared)
353			SetPagePrivate(page);
354		else
355			ClearPagePrivate(page);
356		put_page(page);
357	}
358	BUG_ON(copied < 0);
359	BUG_ON(err > 0);
360	BUG_ON(!copied && !err);
361	return copied ? copied : err;
362}
363#else /* !CONFIG_HUGETLB_PAGE */
364/* fail at build time if gcc attempts to use this */
365extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
366				      struct vm_area_struct *dst_vma,
367				      unsigned long dst_start,
368				      unsigned long src_start,
369				      unsigned long len,
370				      bool zeropage);
371#endif /* CONFIG_HUGETLB_PAGE */
372
373static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
374						pmd_t *dst_pmd,
375						struct vm_area_struct *dst_vma,
376						unsigned long dst_addr,
377						unsigned long src_addr,
378						struct page **page,
379						bool zeropage)
380{
381	ssize_t err;
382
383	if (vma_is_anonymous(dst_vma)) {
 
 
 
 
 
 
 
 
 
 
384		if (!zeropage)
385			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
386					       dst_addr, src_addr, page);
387		else
388			err = mfill_zeropage_pte(dst_mm, dst_pmd,
389						 dst_vma, dst_addr);
390	} else {
391		if (!zeropage)
392			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
393						     dst_vma, dst_addr,
394						     src_addr, page);
395		else
396			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
397						       dst_vma, dst_addr);
398	}
399
400	return err;
401}
402
403static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
404					      unsigned long dst_start,
405					      unsigned long src_start,
406					      unsigned long len,
407					      bool zeropage)
 
408{
409	struct vm_area_struct *dst_vma;
410	ssize_t err;
411	pmd_t *dst_pmd;
412	unsigned long src_addr, dst_addr;
413	long copied;
414	struct page *page;
415
416	/*
417	 * Sanitize the command parameters:
418	 */
419	BUG_ON(dst_start & ~PAGE_MASK);
420	BUG_ON(len & ~PAGE_MASK);
421
422	/* Does the address range wrap, or is the span zero-sized? */
423	BUG_ON(src_start + len <= src_start);
424	BUG_ON(dst_start + len <= dst_start);
425
426	src_addr = src_start;
427	dst_addr = dst_start;
428	copied = 0;
429	page = NULL;
430retry:
431	down_read(&dst_mm->mmap_sem);
432
433	/*
 
 
 
 
 
 
 
 
 
434	 * Make sure the vma is not shared, that the dst range is
435	 * both valid and fully within a single existing vma.
436	 */
437	err = -ENOENT;
438	dst_vma = find_vma(dst_mm, dst_start);
439	if (!dst_vma)
440		goto out_unlock;
441	/*
442	 * Be strict and only allow __mcopy_atomic on userfaultfd
443	 * registered ranges to prevent userland errors going
444	 * unnoticed. As far as the VM consistency is concerned, it
445	 * would be perfectly safe to remove this check, but there's
446	 * no useful usage for __mcopy_atomic ouside of userfaultfd
447	 * registered ranges. This is after all why these are ioctls
448	 * belonging to the userfaultfd and not syscalls.
449	 */
450	if (!dst_vma->vm_userfaultfd_ctx.ctx)
451		goto out_unlock;
452
453	if (dst_start < dst_vma->vm_start ||
454	    dst_start + len > dst_vma->vm_end)
455		goto out_unlock;
456
457	err = -EINVAL;
458	/*
459	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
460	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
461	 */
462	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
463	    dst_vma->vm_flags & VM_SHARED))
464		goto out_unlock;
465
466	/*
467	 * If this is a HUGETLB vma, pass off to appropriate routine
468	 */
469	if (is_vm_hugetlb_page(dst_vma))
470		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
471						src_start, len, zeropage);
472
473	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
474		goto out_unlock;
475
476	/*
477	 * Ensure the dst_vma has a anon_vma or this page
478	 * would get a NULL anon_vma when moved in the
479	 * dst_vma.
480	 */
481	err = -ENOMEM;
482	if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma)))
 
483		goto out_unlock;
484
485	while (src_addr < src_start + len) {
486		pmd_t dst_pmdval;
487
488		BUG_ON(dst_addr >= dst_start + len);
489
490		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
491		if (unlikely(!dst_pmd)) {
492			err = -ENOMEM;
493			break;
494		}
495
496		dst_pmdval = pmd_read_atomic(dst_pmd);
497		/*
498		 * If the dst_pmd is mapped as THP don't
499		 * override it and just be strict.
500		 */
501		if (unlikely(pmd_trans_huge(dst_pmdval))) {
502			err = -EEXIST;
503			break;
504		}
505		if (unlikely(pmd_none(dst_pmdval)) &&
506		    unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
507			err = -ENOMEM;
508			break;
509		}
510		/* If an huge pmd materialized from under us fail */
511		if (unlikely(pmd_trans_huge(*dst_pmd))) {
512			err = -EFAULT;
513			break;
514		}
515
516		BUG_ON(pmd_none(*dst_pmd));
517		BUG_ON(pmd_trans_huge(*dst_pmd));
518
519		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
520				       src_addr, &page, zeropage);
521		cond_resched();
522
523		if (unlikely(err == -EFAULT)) {
524			void *page_kaddr;
525
526			up_read(&dst_mm->mmap_sem);
527			BUG_ON(!page);
528
529			page_kaddr = kmap(page);
530			err = copy_from_user(page_kaddr,
531					     (const void __user *) src_addr,
532					     PAGE_SIZE);
533			kunmap(page);
534			if (unlikely(err)) {
535				err = -EFAULT;
536				goto out;
537			}
538			goto retry;
539		} else
540			BUG_ON(page);
541
542		if (!err) {
543			dst_addr += PAGE_SIZE;
544			src_addr += PAGE_SIZE;
545			copied += PAGE_SIZE;
546
547			if (fatal_signal_pending(current))
548				err = -EINTR;
549		}
550		if (err)
551			break;
552	}
553
554out_unlock:
555	up_read(&dst_mm->mmap_sem);
556out:
557	if (page)
558		put_page(page);
559	BUG_ON(copied < 0);
560	BUG_ON(err > 0);
561	BUG_ON(!copied && !err);
562	return copied ? copied : err;
563}
564
565ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
566		     unsigned long src_start, unsigned long len)
 
567{
568	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
 
569}
570
571ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
572		       unsigned long len)
573{
574	return __mcopy_atomic(dst_mm, start, 0, len, true);
575}