Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/err.h>
5#include <linux/spinlock.h>
6
7#include <linux/mm.h>
8#include <linux/memremap.h>
9#include <linux/pagemap.h>
10#include <linux/rmap.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/secretmem.h>
14
15#include <linux/sched/signal.h>
16#include <linux/rwsem.h>
17#include <linux/hugetlb.h>
18#include <linux/migrate.h>
19#include <linux/mm_inline.h>
20#include <linux/sched/mm.h>
21
22#include <asm/mmu_context.h>
23#include <asm/tlbflush.h>
24
25#include "internal.h"
26
27struct follow_page_context {
28 struct dev_pagemap *pgmap;
29 unsigned int page_mask;
30};
31
32static inline void sanity_check_pinned_pages(struct page **pages,
33 unsigned long npages)
34{
35 if (!IS_ENABLED(CONFIG_DEBUG_VM))
36 return;
37
38 /*
39 * We only pin anonymous pages if they are exclusive. Once pinned, we
40 * can no longer turn them possibly shared and PageAnonExclusive() will
41 * stick around until the page is freed.
42 *
43 * We'd like to verify that our pinned anonymous pages are still mapped
44 * exclusively. The issue with anon THP is that we don't know how
45 * they are/were mapped when pinning them. However, for anon
46 * THP we can assume that either the given page (PTE-mapped THP) or
47 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
48 * neither is the case, there is certainly something wrong.
49 */
50 for (; npages; npages--, pages++) {
51 struct page *page = *pages;
52 struct folio *folio = page_folio(page);
53
54 if (!folio_test_anon(folio))
55 continue;
56 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
57 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
58 else
59 /* Either a PTE-mapped or a PMD-mapped THP. */
60 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
61 !PageAnonExclusive(page), page);
62 }
63}
64
65/*
66 * Return the folio with ref appropriately incremented,
67 * or NULL if that failed.
68 */
69static inline struct folio *try_get_folio(struct page *page, int refs)
70{
71 struct folio *folio;
72
73retry:
74 folio = page_folio(page);
75 if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
76 return NULL;
77 if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
78 return NULL;
79
80 /*
81 * At this point we have a stable reference to the folio; but it
82 * could be that between calling page_folio() and the refcount
83 * increment, the folio was split, in which case we'd end up
84 * holding a reference on a folio that has nothing to do with the page
85 * we were given anymore.
86 * So now that the folio is stable, recheck that the page still
87 * belongs to this folio.
88 */
89 if (unlikely(page_folio(page) != folio)) {
90 if (!put_devmap_managed_page_refs(&folio->page, refs))
91 folio_put_refs(folio, refs);
92 goto retry;
93 }
94
95 return folio;
96}
97
98/**
99 * try_grab_folio() - Attempt to get or pin a folio.
100 * @page: pointer to page to be grabbed
101 * @refs: the value to (effectively) add to the folio's refcount
102 * @flags: gup flags: these are the FOLL_* flag values.
103 *
104 * "grab" names in this file mean, "look at flags to decide whether to use
105 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
106 *
107 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
108 * same time. (That's true throughout the get_user_pages*() and
109 * pin_user_pages*() APIs.) Cases:
110 *
111 * FOLL_GET: folio's refcount will be incremented by @refs.
112 *
113 * FOLL_PIN on large folios: folio's refcount will be incremented by
114 * @refs, and its compound_pincount will be incremented by @refs.
115 *
116 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
117 * @refs * GUP_PIN_COUNTING_BIAS.
118 *
119 * Return: The folio containing @page (with refcount appropriately
120 * incremented) for success, or NULL upon failure. If neither FOLL_GET
121 * nor FOLL_PIN was set, that's considered failure, and furthermore,
122 * a likely bug in the caller, so a warning is also emitted.
123 */
124struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
125{
126 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
127 return NULL;
128
129 if (flags & FOLL_GET)
130 return try_get_folio(page, refs);
131 else if (flags & FOLL_PIN) {
132 struct folio *folio;
133
134 /*
135 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
136 * right zone, so fail and let the caller fall back to the slow
137 * path.
138 */
139 if (unlikely((flags & FOLL_LONGTERM) &&
140 !is_longterm_pinnable_page(page)))
141 return NULL;
142
143 /*
144 * CAUTION: Don't use compound_head() on the page before this
145 * point, the result won't be stable.
146 */
147 folio = try_get_folio(page, refs);
148 if (!folio)
149 return NULL;
150
151 /*
152 * When pinning a large folio, use an exact count to track it.
153 *
154 * However, be sure to *also* increment the normal folio
155 * refcount field at least once, so that the folio really
156 * is pinned. That's why the refcount from the earlier
157 * try_get_folio() is left intact.
158 */
159 if (folio_test_large(folio))
160 atomic_add(refs, folio_pincount_ptr(folio));
161 else
162 folio_ref_add(folio,
163 refs * (GUP_PIN_COUNTING_BIAS - 1));
164 /*
165 * Adjust the pincount before re-checking the PTE for changes.
166 * This is essentially a smp_mb() and is paired with a memory
167 * barrier in page_try_share_anon_rmap().
168 */
169 smp_mb__after_atomic();
170
171 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
172
173 return folio;
174 }
175
176 WARN_ON_ONCE(1);
177 return NULL;
178}
179
180static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
181{
182 if (flags & FOLL_PIN) {
183 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
184 if (folio_test_large(folio))
185 atomic_sub(refs, folio_pincount_ptr(folio));
186 else
187 refs *= GUP_PIN_COUNTING_BIAS;
188 }
189
190 if (!put_devmap_managed_page_refs(&folio->page, refs))
191 folio_put_refs(folio, refs);
192}
193
194/**
195 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
196 * @page: pointer to page to be grabbed
197 * @flags: gup flags: these are the FOLL_* flag values.
198 *
199 * This might not do anything at all, depending on the flags argument.
200 *
201 * "grab" names in this file mean, "look at flags to decide whether to use
202 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
203 *
204 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
205 * time. Cases: please see the try_grab_folio() documentation, with
206 * "refs=1".
207 *
208 * Return: 0 for success, or if no action was required (if neither FOLL_PIN
209 * nor FOLL_GET was set, nothing is done). A negative error code for failure:
210 *
211 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
212 * be grabbed.
213 */
214int __must_check try_grab_page(struct page *page, unsigned int flags)
215{
216 struct folio *folio = page_folio(page);
217
218 WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
219 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
220 return -ENOMEM;
221
222 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
223 return -EREMOTEIO;
224
225 if (flags & FOLL_GET)
226 folio_ref_inc(folio);
227 else if (flags & FOLL_PIN) {
228 /*
229 * Similar to try_grab_folio(): be sure to *also*
230 * increment the normal page refcount field at least once,
231 * so that the page really is pinned.
232 */
233 if (folio_test_large(folio)) {
234 folio_ref_add(folio, 1);
235 atomic_add(1, folio_pincount_ptr(folio));
236 } else {
237 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
238 }
239
240 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
241 }
242
243 return 0;
244}
245
246/**
247 * unpin_user_page() - release a dma-pinned page
248 * @page: pointer to page to be released
249 *
250 * Pages that were pinned via pin_user_pages*() must be released via either
251 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
252 * that such pages can be separately tracked and uniquely handled. In
253 * particular, interactions with RDMA and filesystems need special handling.
254 */
255void unpin_user_page(struct page *page)
256{
257 sanity_check_pinned_pages(&page, 1);
258 gup_put_folio(page_folio(page), 1, FOLL_PIN);
259}
260EXPORT_SYMBOL(unpin_user_page);
261
262static inline struct folio *gup_folio_range_next(struct page *start,
263 unsigned long npages, unsigned long i, unsigned int *ntails)
264{
265 struct page *next = nth_page(start, i);
266 struct folio *folio = page_folio(next);
267 unsigned int nr = 1;
268
269 if (folio_test_large(folio))
270 nr = min_t(unsigned int, npages - i,
271 folio_nr_pages(folio) - folio_page_idx(folio, next));
272
273 *ntails = nr;
274 return folio;
275}
276
277static inline struct folio *gup_folio_next(struct page **list,
278 unsigned long npages, unsigned long i, unsigned int *ntails)
279{
280 struct folio *folio = page_folio(list[i]);
281 unsigned int nr;
282
283 for (nr = i + 1; nr < npages; nr++) {
284 if (page_folio(list[nr]) != folio)
285 break;
286 }
287
288 *ntails = nr - i;
289 return folio;
290}
291
292/**
293 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
294 * @pages: array of pages to be maybe marked dirty, and definitely released.
295 * @npages: number of pages in the @pages array.
296 * @make_dirty: whether to mark the pages dirty
297 *
298 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
299 * variants called on that page.
300 *
301 * For each page in the @pages array, make that page (or its head page, if a
302 * compound page) dirty, if @make_dirty is true, and if the page was previously
303 * listed as clean. In any case, releases all pages using unpin_user_page(),
304 * possibly via unpin_user_pages(), for the non-dirty case.
305 *
306 * Please see the unpin_user_page() documentation for details.
307 *
308 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
309 * required, then the caller should a) verify that this is really correct,
310 * because _lock() is usually required, and b) hand code it:
311 * set_page_dirty_lock(), unpin_user_page().
312 *
313 */
314void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
315 bool make_dirty)
316{
317 unsigned long i;
318 struct folio *folio;
319 unsigned int nr;
320
321 if (!make_dirty) {
322 unpin_user_pages(pages, npages);
323 return;
324 }
325
326 sanity_check_pinned_pages(pages, npages);
327 for (i = 0; i < npages; i += nr) {
328 folio = gup_folio_next(pages, npages, i, &nr);
329 /*
330 * Checking PageDirty at this point may race with
331 * clear_page_dirty_for_io(), but that's OK. Two key
332 * cases:
333 *
334 * 1) This code sees the page as already dirty, so it
335 * skips the call to set_page_dirty(). That could happen
336 * because clear_page_dirty_for_io() called
337 * page_mkclean(), followed by set_page_dirty().
338 * However, now the page is going to get written back,
339 * which meets the original intention of setting it
340 * dirty, so all is well: clear_page_dirty_for_io() goes
341 * on to call TestClearPageDirty(), and write the page
342 * back.
343 *
344 * 2) This code sees the page as clean, so it calls
345 * set_page_dirty(). The page stays dirty, despite being
346 * written back, so it gets written back again in the
347 * next writeback cycle. This is harmless.
348 */
349 if (!folio_test_dirty(folio)) {
350 folio_lock(folio);
351 folio_mark_dirty(folio);
352 folio_unlock(folio);
353 }
354 gup_put_folio(folio, nr, FOLL_PIN);
355 }
356}
357EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
358
359/**
360 * unpin_user_page_range_dirty_lock() - release and optionally dirty
361 * gup-pinned page range
362 *
363 * @page: the starting page of a range maybe marked dirty, and definitely released.
364 * @npages: number of consecutive pages to release.
365 * @make_dirty: whether to mark the pages dirty
366 *
367 * "gup-pinned page range" refers to a range of pages that has had one of the
368 * pin_user_pages() variants called on that page.
369 *
370 * For the page ranges defined by [page .. page+npages], make that range (or
371 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
372 * page range was previously listed as clean.
373 *
374 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
375 * required, then the caller should a) verify that this is really correct,
376 * because _lock() is usually required, and b) hand code it:
377 * set_page_dirty_lock(), unpin_user_page().
378 *
379 */
380void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
381 bool make_dirty)
382{
383 unsigned long i;
384 struct folio *folio;
385 unsigned int nr;
386
387 for (i = 0; i < npages; i += nr) {
388 folio = gup_folio_range_next(page, npages, i, &nr);
389 if (make_dirty && !folio_test_dirty(folio)) {
390 folio_lock(folio);
391 folio_mark_dirty(folio);
392 folio_unlock(folio);
393 }
394 gup_put_folio(folio, nr, FOLL_PIN);
395 }
396}
397EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
398
399static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
400{
401 unsigned long i;
402 struct folio *folio;
403 unsigned int nr;
404
405 /*
406 * Don't perform any sanity checks because we might have raced with
407 * fork() and some anonymous pages might now actually be shared --
408 * which is why we're unpinning after all.
409 */
410 for (i = 0; i < npages; i += nr) {
411 folio = gup_folio_next(pages, npages, i, &nr);
412 gup_put_folio(folio, nr, FOLL_PIN);
413 }
414}
415
416/**
417 * unpin_user_pages() - release an array of gup-pinned pages.
418 * @pages: array of pages to be marked dirty and released.
419 * @npages: number of pages in the @pages array.
420 *
421 * For each page in the @pages array, release the page using unpin_user_page().
422 *
423 * Please see the unpin_user_page() documentation for details.
424 */
425void unpin_user_pages(struct page **pages, unsigned long npages)
426{
427 unsigned long i;
428 struct folio *folio;
429 unsigned int nr;
430
431 /*
432 * If this WARN_ON() fires, then the system *might* be leaking pages (by
433 * leaving them pinned), but probably not. More likely, gup/pup returned
434 * a hard -ERRNO error to the caller, who erroneously passed it here.
435 */
436 if (WARN_ON(IS_ERR_VALUE(npages)))
437 return;
438
439 sanity_check_pinned_pages(pages, npages);
440 for (i = 0; i < npages; i += nr) {
441 folio = gup_folio_next(pages, npages, i, &nr);
442 gup_put_folio(folio, nr, FOLL_PIN);
443 }
444}
445EXPORT_SYMBOL(unpin_user_pages);
446
447/*
448 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
449 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
450 * cache bouncing on large SMP machines for concurrent pinned gups.
451 */
452static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
453{
454 if (!test_bit(MMF_HAS_PINNED, mm_flags))
455 set_bit(MMF_HAS_PINNED, mm_flags);
456}
457
458#ifdef CONFIG_MMU
459static struct page *no_page_table(struct vm_area_struct *vma,
460 unsigned int flags)
461{
462 /*
463 * When core dumping an enormous anonymous area that nobody
464 * has touched so far, we don't want to allocate unnecessary pages or
465 * page tables. Return error instead of NULL to skip handle_mm_fault,
466 * then get_dump_page() will return NULL to leave a hole in the dump.
467 * But we can only make this optimization where a hole would surely
468 * be zero-filled if handle_mm_fault() actually did handle it.
469 */
470 if ((flags & FOLL_DUMP) &&
471 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
472 return ERR_PTR(-EFAULT);
473 return NULL;
474}
475
476static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
477 pte_t *pte, unsigned int flags)
478{
479 if (flags & FOLL_TOUCH) {
480 pte_t entry = *pte;
481
482 if (flags & FOLL_WRITE)
483 entry = pte_mkdirty(entry);
484 entry = pte_mkyoung(entry);
485
486 if (!pte_same(*pte, entry)) {
487 set_pte_at(vma->vm_mm, address, pte, entry);
488 update_mmu_cache(vma, address, pte);
489 }
490 }
491
492 /* Proper page table entry exists, but no corresponding struct page */
493 return -EEXIST;
494}
495
496/* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
497static inline bool can_follow_write_pte(pte_t pte, struct page *page,
498 struct vm_area_struct *vma,
499 unsigned int flags)
500{
501 /* If the pte is writable, we can write to the page. */
502 if (pte_write(pte))
503 return true;
504
505 /* Maybe FOLL_FORCE is set to override it? */
506 if (!(flags & FOLL_FORCE))
507 return false;
508
509 /* But FOLL_FORCE has no effect on shared mappings */
510 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
511 return false;
512
513 /* ... or read-only private ones */
514 if (!(vma->vm_flags & VM_MAYWRITE))
515 return false;
516
517 /* ... or already writable ones that just need to take a write fault */
518 if (vma->vm_flags & VM_WRITE)
519 return false;
520
521 /*
522 * See can_change_pte_writable(): we broke COW and could map the page
523 * writable if we have an exclusive anonymous page ...
524 */
525 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
526 return false;
527
528 /* ... and a write-fault isn't required for other reasons. */
529 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
530 return false;
531 return !userfaultfd_pte_wp(vma, pte);
532}
533
534static struct page *follow_page_pte(struct vm_area_struct *vma,
535 unsigned long address, pmd_t *pmd, unsigned int flags,
536 struct dev_pagemap **pgmap)
537{
538 struct mm_struct *mm = vma->vm_mm;
539 struct page *page;
540 spinlock_t *ptl;
541 pte_t *ptep, pte;
542 int ret;
543
544 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
545 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
546 (FOLL_PIN | FOLL_GET)))
547 return ERR_PTR(-EINVAL);
548 if (unlikely(pmd_bad(*pmd)))
549 return no_page_table(vma, flags);
550
551 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
552 pte = *ptep;
553 if (!pte_present(pte))
554 goto no_page;
555 if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
556 goto no_page;
557
558 page = vm_normal_page(vma, address, pte);
559
560 /*
561 * We only care about anon pages in can_follow_write_pte() and don't
562 * have to worry about pte_devmap() because they are never anon.
563 */
564 if ((flags & FOLL_WRITE) &&
565 !can_follow_write_pte(pte, page, vma, flags)) {
566 page = NULL;
567 goto out;
568 }
569
570 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
571 /*
572 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
573 * case since they are only valid while holding the pgmap
574 * reference.
575 */
576 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
577 if (*pgmap)
578 page = pte_page(pte);
579 else
580 goto no_page;
581 } else if (unlikely(!page)) {
582 if (flags & FOLL_DUMP) {
583 /* Avoid special (like zero) pages in core dumps */
584 page = ERR_PTR(-EFAULT);
585 goto out;
586 }
587
588 if (is_zero_pfn(pte_pfn(pte))) {
589 page = pte_page(pte);
590 } else {
591 ret = follow_pfn_pte(vma, address, ptep, flags);
592 page = ERR_PTR(ret);
593 goto out;
594 }
595 }
596
597 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
598 page = ERR_PTR(-EMLINK);
599 goto out;
600 }
601
602 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
603 !PageAnonExclusive(page), page);
604
605 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
606 ret = try_grab_page(page, flags);
607 if (unlikely(ret)) {
608 page = ERR_PTR(ret);
609 goto out;
610 }
611
612 /*
613 * We need to make the page accessible if and only if we are going
614 * to access its content (the FOLL_PIN case). Please see
615 * Documentation/core-api/pin_user_pages.rst for details.
616 */
617 if (flags & FOLL_PIN) {
618 ret = arch_make_page_accessible(page);
619 if (ret) {
620 unpin_user_page(page);
621 page = ERR_PTR(ret);
622 goto out;
623 }
624 }
625 if (flags & FOLL_TOUCH) {
626 if ((flags & FOLL_WRITE) &&
627 !pte_dirty(pte) && !PageDirty(page))
628 set_page_dirty(page);
629 /*
630 * pte_mkyoung() would be more correct here, but atomic care
631 * is needed to avoid losing the dirty bit: it is easier to use
632 * mark_page_accessed().
633 */
634 mark_page_accessed(page);
635 }
636out:
637 pte_unmap_unlock(ptep, ptl);
638 return page;
639no_page:
640 pte_unmap_unlock(ptep, ptl);
641 if (!pte_none(pte))
642 return NULL;
643 return no_page_table(vma, flags);
644}
645
646static struct page *follow_pmd_mask(struct vm_area_struct *vma,
647 unsigned long address, pud_t *pudp,
648 unsigned int flags,
649 struct follow_page_context *ctx)
650{
651 pmd_t *pmd, pmdval;
652 spinlock_t *ptl;
653 struct page *page;
654 struct mm_struct *mm = vma->vm_mm;
655
656 pmd = pmd_offset(pudp, address);
657 /*
658 * The READ_ONCE() will stabilize the pmdval in a register or
659 * on the stack so that it will stop changing under the code.
660 */
661 pmdval = READ_ONCE(*pmd);
662 if (pmd_none(pmdval))
663 return no_page_table(vma, flags);
664 if (!pmd_present(pmdval))
665 return no_page_table(vma, flags);
666 if (pmd_devmap(pmdval)) {
667 ptl = pmd_lock(mm, pmd);
668 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
669 spin_unlock(ptl);
670 if (page)
671 return page;
672 }
673 if (likely(!pmd_trans_huge(pmdval)))
674 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
675
676 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
677 return no_page_table(vma, flags);
678
679 ptl = pmd_lock(mm, pmd);
680 if (unlikely(!pmd_present(*pmd))) {
681 spin_unlock(ptl);
682 return no_page_table(vma, flags);
683 }
684 if (unlikely(!pmd_trans_huge(*pmd))) {
685 spin_unlock(ptl);
686 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
687 }
688 if (flags & FOLL_SPLIT_PMD) {
689 int ret;
690 page = pmd_page(*pmd);
691 if (is_huge_zero_page(page)) {
692 spin_unlock(ptl);
693 ret = 0;
694 split_huge_pmd(vma, pmd, address);
695 if (pmd_trans_unstable(pmd))
696 ret = -EBUSY;
697 } else {
698 spin_unlock(ptl);
699 split_huge_pmd(vma, pmd, address);
700 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
701 }
702
703 return ret ? ERR_PTR(ret) :
704 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
705 }
706 page = follow_trans_huge_pmd(vma, address, pmd, flags);
707 spin_unlock(ptl);
708 ctx->page_mask = HPAGE_PMD_NR - 1;
709 return page;
710}
711
712static struct page *follow_pud_mask(struct vm_area_struct *vma,
713 unsigned long address, p4d_t *p4dp,
714 unsigned int flags,
715 struct follow_page_context *ctx)
716{
717 pud_t *pud;
718 spinlock_t *ptl;
719 struct page *page;
720 struct mm_struct *mm = vma->vm_mm;
721
722 pud = pud_offset(p4dp, address);
723 if (pud_none(*pud))
724 return no_page_table(vma, flags);
725 if (pud_devmap(*pud)) {
726 ptl = pud_lock(mm, pud);
727 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
728 spin_unlock(ptl);
729 if (page)
730 return page;
731 }
732 if (unlikely(pud_bad(*pud)))
733 return no_page_table(vma, flags);
734
735 return follow_pmd_mask(vma, address, pud, flags, ctx);
736}
737
738static struct page *follow_p4d_mask(struct vm_area_struct *vma,
739 unsigned long address, pgd_t *pgdp,
740 unsigned int flags,
741 struct follow_page_context *ctx)
742{
743 p4d_t *p4d;
744
745 p4d = p4d_offset(pgdp, address);
746 if (p4d_none(*p4d))
747 return no_page_table(vma, flags);
748 BUILD_BUG_ON(p4d_huge(*p4d));
749 if (unlikely(p4d_bad(*p4d)))
750 return no_page_table(vma, flags);
751
752 return follow_pud_mask(vma, address, p4d, flags, ctx);
753}
754
755/**
756 * follow_page_mask - look up a page descriptor from a user-virtual address
757 * @vma: vm_area_struct mapping @address
758 * @address: virtual address to look up
759 * @flags: flags modifying lookup behaviour
760 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
761 * pointer to output page_mask
762 *
763 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
764 *
765 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
766 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
767 *
768 * When getting an anonymous page and the caller has to trigger unsharing
769 * of a shared anonymous page first, -EMLINK is returned. The caller should
770 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
771 * relevant with FOLL_PIN and !FOLL_WRITE.
772 *
773 * On output, the @ctx->page_mask is set according to the size of the page.
774 *
775 * Return: the mapped (struct page *), %NULL if no mapping exists, or
776 * an error pointer if there is a mapping to something not represented
777 * by a page descriptor (see also vm_normal_page()).
778 */
779static struct page *follow_page_mask(struct vm_area_struct *vma,
780 unsigned long address, unsigned int flags,
781 struct follow_page_context *ctx)
782{
783 pgd_t *pgd;
784 struct page *page;
785 struct mm_struct *mm = vma->vm_mm;
786
787 ctx->page_mask = 0;
788
789 /*
790 * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
791 * special hugetlb page table walking code. This eliminates the
792 * need to check for hugetlb entries in the general walking code.
793 *
794 * hugetlb_follow_page_mask is only for follow_page() handling here.
795 * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
796 */
797 if (is_vm_hugetlb_page(vma)) {
798 page = hugetlb_follow_page_mask(vma, address, flags);
799 if (!page)
800 page = no_page_table(vma, flags);
801 return page;
802 }
803
804 pgd = pgd_offset(mm, address);
805
806 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
807 return no_page_table(vma, flags);
808
809 return follow_p4d_mask(vma, address, pgd, flags, ctx);
810}
811
812struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
813 unsigned int foll_flags)
814{
815 struct follow_page_context ctx = { NULL };
816 struct page *page;
817
818 if (vma_is_secretmem(vma))
819 return NULL;
820
821 if (foll_flags & FOLL_PIN)
822 return NULL;
823
824 page = follow_page_mask(vma, address, foll_flags, &ctx);
825 if (ctx.pgmap)
826 put_dev_pagemap(ctx.pgmap);
827 return page;
828}
829
830static int get_gate_page(struct mm_struct *mm, unsigned long address,
831 unsigned int gup_flags, struct vm_area_struct **vma,
832 struct page **page)
833{
834 pgd_t *pgd;
835 p4d_t *p4d;
836 pud_t *pud;
837 pmd_t *pmd;
838 pte_t *pte;
839 int ret = -EFAULT;
840
841 /* user gate pages are read-only */
842 if (gup_flags & FOLL_WRITE)
843 return -EFAULT;
844 if (address > TASK_SIZE)
845 pgd = pgd_offset_k(address);
846 else
847 pgd = pgd_offset_gate(mm, address);
848 if (pgd_none(*pgd))
849 return -EFAULT;
850 p4d = p4d_offset(pgd, address);
851 if (p4d_none(*p4d))
852 return -EFAULT;
853 pud = pud_offset(p4d, address);
854 if (pud_none(*pud))
855 return -EFAULT;
856 pmd = pmd_offset(pud, address);
857 if (!pmd_present(*pmd))
858 return -EFAULT;
859 VM_BUG_ON(pmd_trans_huge(*pmd));
860 pte = pte_offset_map(pmd, address);
861 if (pte_none(*pte))
862 goto unmap;
863 *vma = get_gate_vma(mm);
864 if (!page)
865 goto out;
866 *page = vm_normal_page(*vma, address, *pte);
867 if (!*page) {
868 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
869 goto unmap;
870 *page = pte_page(*pte);
871 }
872 ret = try_grab_page(*page, gup_flags);
873 if (unlikely(ret))
874 goto unmap;
875out:
876 ret = 0;
877unmap:
878 pte_unmap(pte);
879 return ret;
880}
881
882/*
883 * mmap_lock must be held on entry. If @locked != NULL and *@flags
884 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
885 * is, *@locked will be set to 0 and -EBUSY returned.
886 */
887static int faultin_page(struct vm_area_struct *vma,
888 unsigned long address, unsigned int *flags, bool unshare,
889 int *locked)
890{
891 unsigned int fault_flags = 0;
892 vm_fault_t ret;
893
894 if (*flags & FOLL_NOFAULT)
895 return -EFAULT;
896 if (*flags & FOLL_WRITE)
897 fault_flags |= FAULT_FLAG_WRITE;
898 if (*flags & FOLL_REMOTE)
899 fault_flags |= FAULT_FLAG_REMOTE;
900 if (locked) {
901 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
902 /*
903 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
904 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
905 * That's because some callers may not be prepared to
906 * handle early exits caused by non-fatal signals.
907 */
908 if (*flags & FOLL_INTERRUPTIBLE)
909 fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
910 }
911 if (*flags & FOLL_NOWAIT)
912 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
913 if (*flags & FOLL_TRIED) {
914 /*
915 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
916 * can co-exist
917 */
918 fault_flags |= FAULT_FLAG_TRIED;
919 }
920 if (unshare) {
921 fault_flags |= FAULT_FLAG_UNSHARE;
922 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
923 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
924 }
925
926 ret = handle_mm_fault(vma, address, fault_flags, NULL);
927
928 if (ret & VM_FAULT_COMPLETED) {
929 /*
930 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
931 * mmap lock in the page fault handler. Sanity check this.
932 */
933 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
934 if (locked)
935 *locked = 0;
936 /*
937 * We should do the same as VM_FAULT_RETRY, but let's not
938 * return -EBUSY since that's not reflecting the reality of
939 * what has happened - we've just fully completed a page
940 * fault, with the mmap lock released. Use -EAGAIN to show
941 * that we want to take the mmap lock _again_.
942 */
943 return -EAGAIN;
944 }
945
946 if (ret & VM_FAULT_ERROR) {
947 int err = vm_fault_to_errno(ret, *flags);
948
949 if (err)
950 return err;
951 BUG();
952 }
953
954 if (ret & VM_FAULT_RETRY) {
955 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
956 *locked = 0;
957 return -EBUSY;
958 }
959
960 return 0;
961}
962
963static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
964{
965 vm_flags_t vm_flags = vma->vm_flags;
966 int write = (gup_flags & FOLL_WRITE);
967 int foreign = (gup_flags & FOLL_REMOTE);
968
969 if (vm_flags & (VM_IO | VM_PFNMAP))
970 return -EFAULT;
971
972 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
973 return -EFAULT;
974
975 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
976 return -EOPNOTSUPP;
977
978 if ((gup_flags & FOLL_LONGTERM) && (gup_flags & FOLL_PCI_P2PDMA))
979 return -EOPNOTSUPP;
980
981 if (vma_is_secretmem(vma))
982 return -EFAULT;
983
984 if (write) {
985 if (!(vm_flags & VM_WRITE)) {
986 if (!(gup_flags & FOLL_FORCE))
987 return -EFAULT;
988 /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
989 if (is_vm_hugetlb_page(vma))
990 return -EFAULT;
991 /*
992 * We used to let the write,force case do COW in a
993 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
994 * set a breakpoint in a read-only mapping of an
995 * executable, without corrupting the file (yet only
996 * when that file had been opened for writing!).
997 * Anon pages in shared mappings are surprising: now
998 * just reject it.
999 */
1000 if (!is_cow_mapping(vm_flags))
1001 return -EFAULT;
1002 }
1003 } else if (!(vm_flags & VM_READ)) {
1004 if (!(gup_flags & FOLL_FORCE))
1005 return -EFAULT;
1006 /*
1007 * Is there actually any vma we can reach here which does not
1008 * have VM_MAYREAD set?
1009 */
1010 if (!(vm_flags & VM_MAYREAD))
1011 return -EFAULT;
1012 }
1013 /*
1014 * gups are always data accesses, not instruction
1015 * fetches, so execute=false here
1016 */
1017 if (!arch_vma_access_permitted(vma, write, false, foreign))
1018 return -EFAULT;
1019 return 0;
1020}
1021
1022/**
1023 * __get_user_pages() - pin user pages in memory
1024 * @mm: mm_struct of target mm
1025 * @start: starting user address
1026 * @nr_pages: number of pages from start to pin
1027 * @gup_flags: flags modifying pin behaviour
1028 * @pages: array that receives pointers to the pages pinned.
1029 * Should be at least nr_pages long. Or NULL, if caller
1030 * only intends to ensure the pages are faulted in.
1031 * @vmas: array of pointers to vmas corresponding to each page.
1032 * Or NULL if the caller does not require them.
1033 * @locked: whether we're still with the mmap_lock held
1034 *
1035 * Returns either number of pages pinned (which may be less than the
1036 * number requested), or an error. Details about the return value:
1037 *
1038 * -- If nr_pages is 0, returns 0.
1039 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1040 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1041 * pages pinned. Again, this may be less than nr_pages.
1042 * -- 0 return value is possible when the fault would need to be retried.
1043 *
1044 * The caller is responsible for releasing returned @pages, via put_page().
1045 *
1046 * @vmas are valid only as long as mmap_lock is held.
1047 *
1048 * Must be called with mmap_lock held. It may be released. See below.
1049 *
1050 * __get_user_pages walks a process's page tables and takes a reference to
1051 * each struct page that each user address corresponds to at a given
1052 * instant. That is, it takes the page that would be accessed if a user
1053 * thread accesses the given user virtual address at that instant.
1054 *
1055 * This does not guarantee that the page exists in the user mappings when
1056 * __get_user_pages returns, and there may even be a completely different
1057 * page there in some cases (eg. if mmapped pagecache has been invalidated
1058 * and subsequently re faulted). However it does guarantee that the page
1059 * won't be freed completely. And mostly callers simply care that the page
1060 * contains data that was valid *at some point in time*. Typically, an IO
1061 * or similar operation cannot guarantee anything stronger anyway because
1062 * locks can't be held over the syscall boundary.
1063 *
1064 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1065 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1066 * appropriate) must be called after the page is finished with, and
1067 * before put_page is called.
1068 *
1069 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1070 * released by an up_read(). That can happen if @gup_flags does not
1071 * have FOLL_NOWAIT.
1072 *
1073 * A caller using such a combination of @locked and @gup_flags
1074 * must therefore hold the mmap_lock for reading only, and recognize
1075 * when it's been released. Otherwise, it must be held for either
1076 * reading or writing and will not be released.
1077 *
1078 * In most cases, get_user_pages or get_user_pages_fast should be used
1079 * instead of __get_user_pages. __get_user_pages should be used only if
1080 * you need some special @gup_flags.
1081 */
1082static long __get_user_pages(struct mm_struct *mm,
1083 unsigned long start, unsigned long nr_pages,
1084 unsigned int gup_flags, struct page **pages,
1085 struct vm_area_struct **vmas, int *locked)
1086{
1087 long ret = 0, i = 0;
1088 struct vm_area_struct *vma = NULL;
1089 struct follow_page_context ctx = { NULL };
1090
1091 if (!nr_pages)
1092 return 0;
1093
1094 start = untagged_addr(start);
1095
1096 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1097
1098 do {
1099 struct page *page;
1100 unsigned int foll_flags = gup_flags;
1101 unsigned int page_increm;
1102
1103 /* first iteration or cross vma bound */
1104 if (!vma || start >= vma->vm_end) {
1105 vma = find_extend_vma(mm, start);
1106 if (!vma && in_gate_area(mm, start)) {
1107 ret = get_gate_page(mm, start & PAGE_MASK,
1108 gup_flags, &vma,
1109 pages ? &pages[i] : NULL);
1110 if (ret)
1111 goto out;
1112 ctx.page_mask = 0;
1113 goto next_page;
1114 }
1115
1116 if (!vma) {
1117 ret = -EFAULT;
1118 goto out;
1119 }
1120 ret = check_vma_flags(vma, gup_flags);
1121 if (ret)
1122 goto out;
1123
1124 if (is_vm_hugetlb_page(vma)) {
1125 i = follow_hugetlb_page(mm, vma, pages, vmas,
1126 &start, &nr_pages, i,
1127 gup_flags, locked);
1128 if (locked && *locked == 0) {
1129 /*
1130 * We've got a VM_FAULT_RETRY
1131 * and we've lost mmap_lock.
1132 * We must stop here.
1133 */
1134 BUG_ON(gup_flags & FOLL_NOWAIT);
1135 goto out;
1136 }
1137 continue;
1138 }
1139 }
1140retry:
1141 /*
1142 * If we have a pending SIGKILL, don't keep faulting pages and
1143 * potentially allocating memory.
1144 */
1145 if (fatal_signal_pending(current)) {
1146 ret = -EINTR;
1147 goto out;
1148 }
1149 cond_resched();
1150
1151 page = follow_page_mask(vma, start, foll_flags, &ctx);
1152 if (!page || PTR_ERR(page) == -EMLINK) {
1153 ret = faultin_page(vma, start, &foll_flags,
1154 PTR_ERR(page) == -EMLINK, locked);
1155 switch (ret) {
1156 case 0:
1157 goto retry;
1158 case -EBUSY:
1159 case -EAGAIN:
1160 ret = 0;
1161 fallthrough;
1162 case -EFAULT:
1163 case -ENOMEM:
1164 case -EHWPOISON:
1165 goto out;
1166 }
1167 BUG();
1168 } else if (PTR_ERR(page) == -EEXIST) {
1169 /*
1170 * Proper page table entry exists, but no corresponding
1171 * struct page. If the caller expects **pages to be
1172 * filled in, bail out now, because that can't be done
1173 * for this page.
1174 */
1175 if (pages) {
1176 ret = PTR_ERR(page);
1177 goto out;
1178 }
1179
1180 goto next_page;
1181 } else if (IS_ERR(page)) {
1182 ret = PTR_ERR(page);
1183 goto out;
1184 }
1185 if (pages) {
1186 pages[i] = page;
1187 flush_anon_page(vma, page, start);
1188 flush_dcache_page(page);
1189 ctx.page_mask = 0;
1190 }
1191next_page:
1192 if (vmas) {
1193 vmas[i] = vma;
1194 ctx.page_mask = 0;
1195 }
1196 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1197 if (page_increm > nr_pages)
1198 page_increm = nr_pages;
1199 i += page_increm;
1200 start += page_increm * PAGE_SIZE;
1201 nr_pages -= page_increm;
1202 } while (nr_pages);
1203out:
1204 if (ctx.pgmap)
1205 put_dev_pagemap(ctx.pgmap);
1206 return i ? i : ret;
1207}
1208
1209static bool vma_permits_fault(struct vm_area_struct *vma,
1210 unsigned int fault_flags)
1211{
1212 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1213 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1214 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1215
1216 if (!(vm_flags & vma->vm_flags))
1217 return false;
1218
1219 /*
1220 * The architecture might have a hardware protection
1221 * mechanism other than read/write that can deny access.
1222 *
1223 * gup always represents data access, not instruction
1224 * fetches, so execute=false here:
1225 */
1226 if (!arch_vma_access_permitted(vma, write, false, foreign))
1227 return false;
1228
1229 return true;
1230}
1231
1232/**
1233 * fixup_user_fault() - manually resolve a user page fault
1234 * @mm: mm_struct of target mm
1235 * @address: user address
1236 * @fault_flags:flags to pass down to handle_mm_fault()
1237 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1238 * does not allow retry. If NULL, the caller must guarantee
1239 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1240 *
1241 * This is meant to be called in the specific scenario where for locking reasons
1242 * we try to access user memory in atomic context (within a pagefault_disable()
1243 * section), this returns -EFAULT, and we want to resolve the user fault before
1244 * trying again.
1245 *
1246 * Typically this is meant to be used by the futex code.
1247 *
1248 * The main difference with get_user_pages() is that this function will
1249 * unconditionally call handle_mm_fault() which will in turn perform all the
1250 * necessary SW fixup of the dirty and young bits in the PTE, while
1251 * get_user_pages() only guarantees to update these in the struct page.
1252 *
1253 * This is important for some architectures where those bits also gate the
1254 * access permission to the page because they are maintained in software. On
1255 * such architectures, gup() will not be enough to make a subsequent access
1256 * succeed.
1257 *
1258 * This function will not return with an unlocked mmap_lock. So it has not the
1259 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1260 */
1261int fixup_user_fault(struct mm_struct *mm,
1262 unsigned long address, unsigned int fault_flags,
1263 bool *unlocked)
1264{
1265 struct vm_area_struct *vma;
1266 vm_fault_t ret;
1267
1268 address = untagged_addr(address);
1269
1270 if (unlocked)
1271 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1272
1273retry:
1274 vma = find_extend_vma(mm, address);
1275 if (!vma || address < vma->vm_start)
1276 return -EFAULT;
1277
1278 if (!vma_permits_fault(vma, fault_flags))
1279 return -EFAULT;
1280
1281 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1282 fatal_signal_pending(current))
1283 return -EINTR;
1284
1285 ret = handle_mm_fault(vma, address, fault_flags, NULL);
1286
1287 if (ret & VM_FAULT_COMPLETED) {
1288 /*
1289 * NOTE: it's a pity that we need to retake the lock here
1290 * to pair with the unlock() in the callers. Ideally we
1291 * could tell the callers so they do not need to unlock.
1292 */
1293 mmap_read_lock(mm);
1294 *unlocked = true;
1295 return 0;
1296 }
1297
1298 if (ret & VM_FAULT_ERROR) {
1299 int err = vm_fault_to_errno(ret, 0);
1300
1301 if (err)
1302 return err;
1303 BUG();
1304 }
1305
1306 if (ret & VM_FAULT_RETRY) {
1307 mmap_read_lock(mm);
1308 *unlocked = true;
1309 fault_flags |= FAULT_FLAG_TRIED;
1310 goto retry;
1311 }
1312
1313 return 0;
1314}
1315EXPORT_SYMBOL_GPL(fixup_user_fault);
1316
1317/*
1318 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is
1319 * specified, it'll also respond to generic signals. The caller of GUP
1320 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
1321 */
1322static bool gup_signal_pending(unsigned int flags)
1323{
1324 if (fatal_signal_pending(current))
1325 return true;
1326
1327 if (!(flags & FOLL_INTERRUPTIBLE))
1328 return false;
1329
1330 return signal_pending(current);
1331}
1332
1333/*
1334 * Please note that this function, unlike __get_user_pages will not
1335 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1336 */
1337static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1338 unsigned long start,
1339 unsigned long nr_pages,
1340 struct page **pages,
1341 struct vm_area_struct **vmas,
1342 int *locked,
1343 unsigned int flags)
1344{
1345 long ret, pages_done;
1346 bool lock_dropped;
1347
1348 if (locked) {
1349 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1350 BUG_ON(vmas);
1351 /* check caller initialized locked */
1352 BUG_ON(*locked != 1);
1353 }
1354
1355 if (flags & FOLL_PIN)
1356 mm_set_has_pinned_flag(&mm->flags);
1357
1358 /*
1359 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1360 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1361 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1362 * for FOLL_GET, not for the newer FOLL_PIN.
1363 *
1364 * FOLL_PIN always expects pages to be non-null, but no need to assert
1365 * that here, as any failures will be obvious enough.
1366 */
1367 if (pages && !(flags & FOLL_PIN))
1368 flags |= FOLL_GET;
1369
1370 pages_done = 0;
1371 lock_dropped = false;
1372 for (;;) {
1373 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1374 vmas, locked);
1375 if (!locked)
1376 /* VM_FAULT_RETRY couldn't trigger, bypass */
1377 return ret;
1378
1379 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1380 if (!*locked) {
1381 BUG_ON(ret < 0);
1382 BUG_ON(ret >= nr_pages);
1383 }
1384
1385 if (ret > 0) {
1386 nr_pages -= ret;
1387 pages_done += ret;
1388 if (!nr_pages)
1389 break;
1390 }
1391 if (*locked) {
1392 /*
1393 * VM_FAULT_RETRY didn't trigger or it was a
1394 * FOLL_NOWAIT.
1395 */
1396 if (!pages_done)
1397 pages_done = ret;
1398 break;
1399 }
1400 /*
1401 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1402 * For the prefault case (!pages) we only update counts.
1403 */
1404 if (likely(pages))
1405 pages += ret;
1406 start += ret << PAGE_SHIFT;
1407 lock_dropped = true;
1408
1409retry:
1410 /*
1411 * Repeat on the address that fired VM_FAULT_RETRY
1412 * with both FAULT_FLAG_ALLOW_RETRY and
1413 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1414 * by fatal signals of even common signals, depending on
1415 * the caller's request. So we need to check it before we
1416 * start trying again otherwise it can loop forever.
1417 */
1418 if (gup_signal_pending(flags)) {
1419 if (!pages_done)
1420 pages_done = -EINTR;
1421 break;
1422 }
1423
1424 ret = mmap_read_lock_killable(mm);
1425 if (ret) {
1426 BUG_ON(ret > 0);
1427 if (!pages_done)
1428 pages_done = ret;
1429 break;
1430 }
1431
1432 *locked = 1;
1433 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1434 pages, NULL, locked);
1435 if (!*locked) {
1436 /* Continue to retry until we succeeded */
1437 BUG_ON(ret != 0);
1438 goto retry;
1439 }
1440 if (ret != 1) {
1441 BUG_ON(ret > 1);
1442 if (!pages_done)
1443 pages_done = ret;
1444 break;
1445 }
1446 nr_pages--;
1447 pages_done++;
1448 if (!nr_pages)
1449 break;
1450 if (likely(pages))
1451 pages++;
1452 start += PAGE_SIZE;
1453 }
1454 if (lock_dropped && *locked) {
1455 /*
1456 * We must let the caller know we temporarily dropped the lock
1457 * and so the critical section protected by it was lost.
1458 */
1459 mmap_read_unlock(mm);
1460 *locked = 0;
1461 }
1462 return pages_done;
1463}
1464
1465/**
1466 * populate_vma_page_range() - populate a range of pages in the vma.
1467 * @vma: target vma
1468 * @start: start address
1469 * @end: end address
1470 * @locked: whether the mmap_lock is still held
1471 *
1472 * This takes care of mlocking the pages too if VM_LOCKED is set.
1473 *
1474 * Return either number of pages pinned in the vma, or a negative error
1475 * code on error.
1476 *
1477 * vma->vm_mm->mmap_lock must be held.
1478 *
1479 * If @locked is NULL, it may be held for read or write and will
1480 * be unperturbed.
1481 *
1482 * If @locked is non-NULL, it must held for read only and may be
1483 * released. If it's released, *@locked will be set to 0.
1484 */
1485long populate_vma_page_range(struct vm_area_struct *vma,
1486 unsigned long start, unsigned long end, int *locked)
1487{
1488 struct mm_struct *mm = vma->vm_mm;
1489 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1490 int gup_flags;
1491 long ret;
1492
1493 VM_BUG_ON(!PAGE_ALIGNED(start));
1494 VM_BUG_ON(!PAGE_ALIGNED(end));
1495 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1496 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1497 mmap_assert_locked(mm);
1498
1499 /*
1500 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1501 * faultin_page() to break COW, so it has no work to do here.
1502 */
1503 if (vma->vm_flags & VM_LOCKONFAULT)
1504 return nr_pages;
1505
1506 gup_flags = FOLL_TOUCH;
1507 /*
1508 * We want to touch writable mappings with a write fault in order
1509 * to break COW, except for shared mappings because these don't COW
1510 * and we would not want to dirty them for nothing.
1511 */
1512 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1513 gup_flags |= FOLL_WRITE;
1514
1515 /*
1516 * We want mlock to succeed for regions that have any permissions
1517 * other than PROT_NONE.
1518 */
1519 if (vma_is_accessible(vma))
1520 gup_flags |= FOLL_FORCE;
1521
1522 /*
1523 * We made sure addr is within a VMA, so the following will
1524 * not result in a stack expansion that recurses back here.
1525 */
1526 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1527 NULL, NULL, locked);
1528 lru_add_drain();
1529 return ret;
1530}
1531
1532/*
1533 * faultin_vma_page_range() - populate (prefault) page tables inside the
1534 * given VMA range readable/writable
1535 *
1536 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1537 *
1538 * @vma: target vma
1539 * @start: start address
1540 * @end: end address
1541 * @write: whether to prefault readable or writable
1542 * @locked: whether the mmap_lock is still held
1543 *
1544 * Returns either number of processed pages in the vma, or a negative error
1545 * code on error (see __get_user_pages()).
1546 *
1547 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1548 * covered by the VMA.
1549 *
1550 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1551 *
1552 * If @locked is non-NULL, it must held for read only and may be released. If
1553 * it's released, *@locked will be set to 0.
1554 */
1555long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1556 unsigned long end, bool write, int *locked)
1557{
1558 struct mm_struct *mm = vma->vm_mm;
1559 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1560 int gup_flags;
1561 long ret;
1562
1563 VM_BUG_ON(!PAGE_ALIGNED(start));
1564 VM_BUG_ON(!PAGE_ALIGNED(end));
1565 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1566 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1567 mmap_assert_locked(mm);
1568
1569 /*
1570 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1571 * the page dirty with FOLL_WRITE -- which doesn't make a
1572 * difference with !FOLL_FORCE, because the page is writable
1573 * in the page table.
1574 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1575 * a poisoned page.
1576 * !FOLL_FORCE: Require proper access permissions.
1577 */
1578 gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
1579 if (write)
1580 gup_flags |= FOLL_WRITE;
1581
1582 /*
1583 * We want to report -EINVAL instead of -EFAULT for any permission
1584 * problems or incompatible mappings.
1585 */
1586 if (check_vma_flags(vma, gup_flags))
1587 return -EINVAL;
1588
1589 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1590 NULL, NULL, locked);
1591 lru_add_drain();
1592 return ret;
1593}
1594
1595/*
1596 * __mm_populate - populate and/or mlock pages within a range of address space.
1597 *
1598 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1599 * flags. VMAs must be already marked with the desired vm_flags, and
1600 * mmap_lock must not be held.
1601 */
1602int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1603{
1604 struct mm_struct *mm = current->mm;
1605 unsigned long end, nstart, nend;
1606 struct vm_area_struct *vma = NULL;
1607 int locked = 0;
1608 long ret = 0;
1609
1610 end = start + len;
1611
1612 for (nstart = start; nstart < end; nstart = nend) {
1613 /*
1614 * We want to fault in pages for [nstart; end) address range.
1615 * Find first corresponding VMA.
1616 */
1617 if (!locked) {
1618 locked = 1;
1619 mmap_read_lock(mm);
1620 vma = find_vma_intersection(mm, nstart, end);
1621 } else if (nstart >= vma->vm_end)
1622 vma = find_vma_intersection(mm, vma->vm_end, end);
1623
1624 if (!vma)
1625 break;
1626 /*
1627 * Set [nstart; nend) to intersection of desired address
1628 * range with the first VMA. Also, skip undesirable VMA types.
1629 */
1630 nend = min(end, vma->vm_end);
1631 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1632 continue;
1633 if (nstart < vma->vm_start)
1634 nstart = vma->vm_start;
1635 /*
1636 * Now fault in a range of pages. populate_vma_page_range()
1637 * double checks the vma flags, so that it won't mlock pages
1638 * if the vma was already munlocked.
1639 */
1640 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1641 if (ret < 0) {
1642 if (ignore_errors) {
1643 ret = 0;
1644 continue; /* continue at next VMA */
1645 }
1646 break;
1647 }
1648 nend = nstart + ret * PAGE_SIZE;
1649 ret = 0;
1650 }
1651 if (locked)
1652 mmap_read_unlock(mm);
1653 return ret; /* 0 or negative error code */
1654}
1655#else /* CONFIG_MMU */
1656static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1657 unsigned long nr_pages, struct page **pages,
1658 struct vm_area_struct **vmas, int *locked,
1659 unsigned int foll_flags)
1660{
1661 struct vm_area_struct *vma;
1662 unsigned long vm_flags;
1663 long i;
1664
1665 /* calculate required read or write permissions.
1666 * If FOLL_FORCE is set, we only require the "MAY" flags.
1667 */
1668 vm_flags = (foll_flags & FOLL_WRITE) ?
1669 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1670 vm_flags &= (foll_flags & FOLL_FORCE) ?
1671 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1672
1673 for (i = 0; i < nr_pages; i++) {
1674 vma = find_vma(mm, start);
1675 if (!vma)
1676 goto finish_or_fault;
1677
1678 /* protect what we can, including chardevs */
1679 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1680 !(vm_flags & vma->vm_flags))
1681 goto finish_or_fault;
1682
1683 if (pages) {
1684 pages[i] = virt_to_page((void *)start);
1685 if (pages[i])
1686 get_page(pages[i]);
1687 }
1688 if (vmas)
1689 vmas[i] = vma;
1690 start = (start + PAGE_SIZE) & PAGE_MASK;
1691 }
1692
1693 return i;
1694
1695finish_or_fault:
1696 return i ? : -EFAULT;
1697}
1698#endif /* !CONFIG_MMU */
1699
1700/**
1701 * fault_in_writeable - fault in userspace address range for writing
1702 * @uaddr: start of address range
1703 * @size: size of address range
1704 *
1705 * Returns the number of bytes not faulted in (like copy_to_user() and
1706 * copy_from_user()).
1707 */
1708size_t fault_in_writeable(char __user *uaddr, size_t size)
1709{
1710 char __user *start = uaddr, *end;
1711
1712 if (unlikely(size == 0))
1713 return 0;
1714 if (!user_write_access_begin(uaddr, size))
1715 return size;
1716 if (!PAGE_ALIGNED(uaddr)) {
1717 unsafe_put_user(0, uaddr, out);
1718 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1719 }
1720 end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1721 if (unlikely(end < start))
1722 end = NULL;
1723 while (uaddr != end) {
1724 unsafe_put_user(0, uaddr, out);
1725 uaddr += PAGE_SIZE;
1726 }
1727
1728out:
1729 user_write_access_end();
1730 if (size > uaddr - start)
1731 return size - (uaddr - start);
1732 return 0;
1733}
1734EXPORT_SYMBOL(fault_in_writeable);
1735
1736/**
1737 * fault_in_subpage_writeable - fault in an address range for writing
1738 * @uaddr: start of address range
1739 * @size: size of address range
1740 *
1741 * Fault in a user address range for writing while checking for permissions at
1742 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1743 * the caller cannot guarantee forward progress of a copy_to_user() loop.
1744 *
1745 * Returns the number of bytes not faulted in (like copy_to_user() and
1746 * copy_from_user()).
1747 */
1748size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
1749{
1750 size_t faulted_in;
1751
1752 /*
1753 * Attempt faulting in at page granularity first for page table
1754 * permission checking. The arch-specific probe_subpage_writeable()
1755 * functions may not check for this.
1756 */
1757 faulted_in = size - fault_in_writeable(uaddr, size);
1758 if (faulted_in)
1759 faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
1760
1761 return size - faulted_in;
1762}
1763EXPORT_SYMBOL(fault_in_subpage_writeable);
1764
1765/*
1766 * fault_in_safe_writeable - fault in an address range for writing
1767 * @uaddr: start of address range
1768 * @size: length of address range
1769 *
1770 * Faults in an address range for writing. This is primarily useful when we
1771 * already know that some or all of the pages in the address range aren't in
1772 * memory.
1773 *
1774 * Unlike fault_in_writeable(), this function is non-destructive.
1775 *
1776 * Note that we don't pin or otherwise hold the pages referenced that we fault
1777 * in. There's no guarantee that they'll stay in memory for any duration of
1778 * time.
1779 *
1780 * Returns the number of bytes not faulted in, like copy_to_user() and
1781 * copy_from_user().
1782 */
1783size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1784{
1785 unsigned long start = (unsigned long)uaddr, end;
1786 struct mm_struct *mm = current->mm;
1787 bool unlocked = false;
1788
1789 if (unlikely(size == 0))
1790 return 0;
1791 end = PAGE_ALIGN(start + size);
1792 if (end < start)
1793 end = 0;
1794
1795 mmap_read_lock(mm);
1796 do {
1797 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
1798 break;
1799 start = (start + PAGE_SIZE) & PAGE_MASK;
1800 } while (start != end);
1801 mmap_read_unlock(mm);
1802
1803 if (size > (unsigned long)uaddr - start)
1804 return size - ((unsigned long)uaddr - start);
1805 return 0;
1806}
1807EXPORT_SYMBOL(fault_in_safe_writeable);
1808
1809/**
1810 * fault_in_readable - fault in userspace address range for reading
1811 * @uaddr: start of user address range
1812 * @size: size of user address range
1813 *
1814 * Returns the number of bytes not faulted in (like copy_to_user() and
1815 * copy_from_user()).
1816 */
1817size_t fault_in_readable(const char __user *uaddr, size_t size)
1818{
1819 const char __user *start = uaddr, *end;
1820 volatile char c;
1821
1822 if (unlikely(size == 0))
1823 return 0;
1824 if (!user_read_access_begin(uaddr, size))
1825 return size;
1826 if (!PAGE_ALIGNED(uaddr)) {
1827 unsafe_get_user(c, uaddr, out);
1828 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1829 }
1830 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1831 if (unlikely(end < start))
1832 end = NULL;
1833 while (uaddr != end) {
1834 unsafe_get_user(c, uaddr, out);
1835 uaddr += PAGE_SIZE;
1836 }
1837
1838out:
1839 user_read_access_end();
1840 (void)c;
1841 if (size > uaddr - start)
1842 return size - (uaddr - start);
1843 return 0;
1844}
1845EXPORT_SYMBOL(fault_in_readable);
1846
1847/**
1848 * get_dump_page() - pin user page in memory while writing it to core dump
1849 * @addr: user address
1850 *
1851 * Returns struct page pointer of user page pinned for dump,
1852 * to be freed afterwards by put_page().
1853 *
1854 * Returns NULL on any kind of failure - a hole must then be inserted into
1855 * the corefile, to preserve alignment with its headers; and also returns
1856 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1857 * allowing a hole to be left in the corefile to save disk space.
1858 *
1859 * Called without mmap_lock (takes and releases the mmap_lock by itself).
1860 */
1861#ifdef CONFIG_ELF_CORE
1862struct page *get_dump_page(unsigned long addr)
1863{
1864 struct mm_struct *mm = current->mm;
1865 struct page *page;
1866 int locked = 1;
1867 int ret;
1868
1869 if (mmap_read_lock_killable(mm))
1870 return NULL;
1871 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1872 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1873 if (locked)
1874 mmap_read_unlock(mm);
1875 return (ret == 1) ? page : NULL;
1876}
1877#endif /* CONFIG_ELF_CORE */
1878
1879#ifdef CONFIG_MIGRATION
1880/*
1881 * Returns the number of collected pages. Return value is always >= 0.
1882 */
1883static unsigned long collect_longterm_unpinnable_pages(
1884 struct list_head *movable_page_list,
1885 unsigned long nr_pages,
1886 struct page **pages)
1887{
1888 unsigned long i, collected = 0;
1889 struct folio *prev_folio = NULL;
1890 bool drain_allow = true;
1891
1892 for (i = 0; i < nr_pages; i++) {
1893 struct folio *folio = page_folio(pages[i]);
1894
1895 if (folio == prev_folio)
1896 continue;
1897 prev_folio = folio;
1898
1899 if (folio_is_longterm_pinnable(folio))
1900 continue;
1901
1902 collected++;
1903
1904 if (folio_is_device_coherent(folio))
1905 continue;
1906
1907 if (folio_test_hugetlb(folio)) {
1908 isolate_hugetlb(&folio->page, movable_page_list);
1909 continue;
1910 }
1911
1912 if (!folio_test_lru(folio) && drain_allow) {
1913 lru_add_drain_all();
1914 drain_allow = false;
1915 }
1916
1917 if (folio_isolate_lru(folio))
1918 continue;
1919
1920 list_add_tail(&folio->lru, movable_page_list);
1921 node_stat_mod_folio(folio,
1922 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1923 folio_nr_pages(folio));
1924 }
1925
1926 return collected;
1927}
1928
1929/*
1930 * Unpins all pages and migrates device coherent pages and movable_page_list.
1931 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
1932 * (or partial success).
1933 */
1934static int migrate_longterm_unpinnable_pages(
1935 struct list_head *movable_page_list,
1936 unsigned long nr_pages,
1937 struct page **pages)
1938{
1939 int ret;
1940 unsigned long i;
1941
1942 for (i = 0; i < nr_pages; i++) {
1943 struct folio *folio = page_folio(pages[i]);
1944
1945 if (folio_is_device_coherent(folio)) {
1946 /*
1947 * Migration will fail if the page is pinned, so convert
1948 * the pin on the source page to a normal reference.
1949 */
1950 pages[i] = NULL;
1951 folio_get(folio);
1952 gup_put_folio(folio, 1, FOLL_PIN);
1953
1954 if (migrate_device_coherent_page(&folio->page)) {
1955 ret = -EBUSY;
1956 goto err;
1957 }
1958
1959 continue;
1960 }
1961
1962 /*
1963 * We can't migrate pages with unexpected references, so drop
1964 * the reference obtained by __get_user_pages_locked().
1965 * Migrating pages have been added to movable_page_list after
1966 * calling folio_isolate_lru() which takes a reference so the
1967 * page won't be freed if it's migrating.
1968 */
1969 unpin_user_page(pages[i]);
1970 pages[i] = NULL;
1971 }
1972
1973 if (!list_empty(movable_page_list)) {
1974 struct migration_target_control mtc = {
1975 .nid = NUMA_NO_NODE,
1976 .gfp_mask = GFP_USER | __GFP_NOWARN,
1977 };
1978
1979 if (migrate_pages(movable_page_list, alloc_migration_target,
1980 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1981 MR_LONGTERM_PIN, NULL)) {
1982 ret = -ENOMEM;
1983 goto err;
1984 }
1985 }
1986
1987 putback_movable_pages(movable_page_list);
1988
1989 return -EAGAIN;
1990
1991err:
1992 for (i = 0; i < nr_pages; i++)
1993 if (pages[i])
1994 unpin_user_page(pages[i]);
1995 putback_movable_pages(movable_page_list);
1996
1997 return ret;
1998}
1999
2000/*
2001 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2002 * pages in the range are required to be pinned via FOLL_PIN, before calling
2003 * this routine.
2004 *
2005 * If any pages in the range are not allowed to be pinned, then this routine
2006 * will migrate those pages away, unpin all the pages in the range and return
2007 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2008 * call this routine again.
2009 *
2010 * If an error other than -EAGAIN occurs, this indicates a migration failure.
2011 * The caller should give up, and propagate the error back up the call stack.
2012 *
2013 * If everything is OK and all pages in the range are allowed to be pinned, then
2014 * this routine leaves all pages pinned and returns zero for success.
2015 */
2016static long check_and_migrate_movable_pages(unsigned long nr_pages,
2017 struct page **pages)
2018{
2019 unsigned long collected;
2020 LIST_HEAD(movable_page_list);
2021
2022 collected = collect_longterm_unpinnable_pages(&movable_page_list,
2023 nr_pages, pages);
2024 if (!collected)
2025 return 0;
2026
2027 return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
2028 pages);
2029}
2030#else
2031static long check_and_migrate_movable_pages(unsigned long nr_pages,
2032 struct page **pages)
2033{
2034 return 0;
2035}
2036#endif /* CONFIG_MIGRATION */
2037
2038/*
2039 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2040 * allows us to process the FOLL_LONGTERM flag.
2041 */
2042static long __gup_longterm_locked(struct mm_struct *mm,
2043 unsigned long start,
2044 unsigned long nr_pages,
2045 struct page **pages,
2046 struct vm_area_struct **vmas,
2047 int *locked,
2048 unsigned int gup_flags)
2049{
2050 bool must_unlock = false;
2051 unsigned int flags;
2052 long rc, nr_pinned_pages;
2053
2054 if (locked && WARN_ON_ONCE(!*locked))
2055 return -EINVAL;
2056
2057 if (!(gup_flags & FOLL_LONGTERM))
2058 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
2059 locked, gup_flags);
2060
2061 /*
2062 * If we get to this point then FOLL_LONGTERM is set, and FOLL_LONGTERM
2063 * implies FOLL_PIN (although the reverse is not true). Therefore it is
2064 * correct to unconditionally call check_and_migrate_movable_pages()
2065 * which assumes pages have been pinned via FOLL_PIN.
2066 *
2067 * Enforce the above reasoning by asserting that FOLL_PIN is set.
2068 */
2069 if (WARN_ON(!(gup_flags & FOLL_PIN)))
2070 return -EINVAL;
2071 flags = memalloc_pin_save();
2072 do {
2073 if (locked && !*locked) {
2074 mmap_read_lock(mm);
2075 must_unlock = true;
2076 *locked = 1;
2077 }
2078 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
2079 pages, vmas, locked,
2080 gup_flags);
2081 if (nr_pinned_pages <= 0) {
2082 rc = nr_pinned_pages;
2083 break;
2084 }
2085 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
2086 } while (rc == -EAGAIN);
2087 memalloc_pin_restore(flags);
2088
2089 if (locked && *locked && must_unlock) {
2090 mmap_read_unlock(mm);
2091 *locked = 0;
2092 }
2093 return rc ? rc : nr_pinned_pages;
2094}
2095
2096static bool is_valid_gup_flags(unsigned int gup_flags)
2097{
2098 /*
2099 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2100 * never directly by the caller, so enforce that with an assertion:
2101 */
2102 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2103 return false;
2104 /*
2105 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
2106 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
2107 * FOLL_PIN.
2108 */
2109 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2110 return false;
2111
2112 return true;
2113}
2114
2115#ifdef CONFIG_MMU
2116/**
2117 * get_user_pages_remote() - pin user pages in memory
2118 * @mm: mm_struct of target mm
2119 * @start: starting user address
2120 * @nr_pages: number of pages from start to pin
2121 * @gup_flags: flags modifying lookup behaviour
2122 * @pages: array that receives pointers to the pages pinned.
2123 * Should be at least nr_pages long. Or NULL, if caller
2124 * only intends to ensure the pages are faulted in.
2125 * @vmas: array of pointers to vmas corresponding to each page.
2126 * Or NULL if the caller does not require them.
2127 * @locked: pointer to lock flag indicating whether lock is held and
2128 * subsequently whether VM_FAULT_RETRY functionality can be
2129 * utilised. Lock must initially be held.
2130 *
2131 * Returns either number of pages pinned (which may be less than the
2132 * number requested), or an error. Details about the return value:
2133 *
2134 * -- If nr_pages is 0, returns 0.
2135 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2136 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2137 * pages pinned. Again, this may be less than nr_pages.
2138 *
2139 * The caller is responsible for releasing returned @pages, via put_page().
2140 *
2141 * @vmas are valid only as long as mmap_lock is held.
2142 *
2143 * Must be called with mmap_lock held for read or write.
2144 *
2145 * get_user_pages_remote walks a process's page tables and takes a reference
2146 * to each struct page that each user address corresponds to at a given
2147 * instant. That is, it takes the page that would be accessed if a user
2148 * thread accesses the given user virtual address at that instant.
2149 *
2150 * This does not guarantee that the page exists in the user mappings when
2151 * get_user_pages_remote returns, and there may even be a completely different
2152 * page there in some cases (eg. if mmapped pagecache has been invalidated
2153 * and subsequently re faulted). However it does guarantee that the page
2154 * won't be freed completely. And mostly callers simply care that the page
2155 * contains data that was valid *at some point in time*. Typically, an IO
2156 * or similar operation cannot guarantee anything stronger anyway because
2157 * locks can't be held over the syscall boundary.
2158 *
2159 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2160 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2161 * be called after the page is finished with, and before put_page is called.
2162 *
2163 * get_user_pages_remote is typically used for fewer-copy IO operations,
2164 * to get a handle on the memory by some means other than accesses
2165 * via the user virtual addresses. The pages may be submitted for
2166 * DMA to devices or accessed via their kernel linear mapping (via the
2167 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2168 *
2169 * See also get_user_pages_fast, for performance critical applications.
2170 *
2171 * get_user_pages_remote should be phased out in favor of
2172 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2173 * should use get_user_pages_remote because it cannot pass
2174 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2175 */
2176long get_user_pages_remote(struct mm_struct *mm,
2177 unsigned long start, unsigned long nr_pages,
2178 unsigned int gup_flags, struct page **pages,
2179 struct vm_area_struct **vmas, int *locked)
2180{
2181 if (!is_valid_gup_flags(gup_flags))
2182 return -EINVAL;
2183
2184 return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, locked,
2185 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
2186}
2187EXPORT_SYMBOL(get_user_pages_remote);
2188
2189#else /* CONFIG_MMU */
2190long get_user_pages_remote(struct mm_struct *mm,
2191 unsigned long start, unsigned long nr_pages,
2192 unsigned int gup_flags, struct page **pages,
2193 struct vm_area_struct **vmas, int *locked)
2194{
2195 return 0;
2196}
2197#endif /* !CONFIG_MMU */
2198
2199/**
2200 * get_user_pages() - pin user pages in memory
2201 * @start: starting user address
2202 * @nr_pages: number of pages from start to pin
2203 * @gup_flags: flags modifying lookup behaviour
2204 * @pages: array that receives pointers to the pages pinned.
2205 * Should be at least nr_pages long. Or NULL, if caller
2206 * only intends to ensure the pages are faulted in.
2207 * @vmas: array of pointers to vmas corresponding to each page.
2208 * Or NULL if the caller does not require them.
2209 *
2210 * This is the same as get_user_pages_remote(), just with a less-flexible
2211 * calling convention where we assume that the mm being operated on belongs to
2212 * the current task, and doesn't allow passing of a locked parameter. We also
2213 * obviously don't pass FOLL_REMOTE in here.
2214 */
2215long get_user_pages(unsigned long start, unsigned long nr_pages,
2216 unsigned int gup_flags, struct page **pages,
2217 struct vm_area_struct **vmas)
2218{
2219 if (!is_valid_gup_flags(gup_flags))
2220 return -EINVAL;
2221
2222 return __gup_longterm_locked(current->mm, start, nr_pages,
2223 pages, vmas, NULL, gup_flags | FOLL_TOUCH);
2224}
2225EXPORT_SYMBOL(get_user_pages);
2226
2227/*
2228 * get_user_pages_unlocked() is suitable to replace the form:
2229 *
2230 * mmap_read_lock(mm);
2231 * get_user_pages(mm, ..., pages, NULL);
2232 * mmap_read_unlock(mm);
2233 *
2234 * with:
2235 *
2236 * get_user_pages_unlocked(mm, ..., pages);
2237 *
2238 * It is functionally equivalent to get_user_pages_fast so
2239 * get_user_pages_fast should be used instead if specific gup_flags
2240 * (e.g. FOLL_FORCE) are not required.
2241 */
2242long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2243 struct page **pages, unsigned int gup_flags)
2244{
2245 struct mm_struct *mm = current->mm;
2246 int locked = 1;
2247 long ret;
2248
2249 mmap_read_lock(mm);
2250 ret = __gup_longterm_locked(mm, start, nr_pages, pages, NULL, &locked,
2251 gup_flags | FOLL_TOUCH);
2252 if (locked)
2253 mmap_read_unlock(mm);
2254 return ret;
2255}
2256EXPORT_SYMBOL(get_user_pages_unlocked);
2257
2258/*
2259 * Fast GUP
2260 *
2261 * get_user_pages_fast attempts to pin user pages by walking the page
2262 * tables directly and avoids taking locks. Thus the walker needs to be
2263 * protected from page table pages being freed from under it, and should
2264 * block any THP splits.
2265 *
2266 * One way to achieve this is to have the walker disable interrupts, and
2267 * rely on IPIs from the TLB flushing code blocking before the page table
2268 * pages are freed. This is unsuitable for architectures that do not need
2269 * to broadcast an IPI when invalidating TLBs.
2270 *
2271 * Another way to achieve this is to batch up page table containing pages
2272 * belonging to more than one mm_user, then rcu_sched a callback to free those
2273 * pages. Disabling interrupts will allow the fast_gup walker to both block
2274 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2275 * (which is a relatively rare event). The code below adopts this strategy.
2276 *
2277 * Before activating this code, please be aware that the following assumptions
2278 * are currently made:
2279 *
2280 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2281 * free pages containing page tables or TLB flushing requires IPI broadcast.
2282 *
2283 * *) ptes can be read atomically by the architecture.
2284 *
2285 * *) access_ok is sufficient to validate userspace address ranges.
2286 *
2287 * The last two assumptions can be relaxed by the addition of helper functions.
2288 *
2289 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2290 */
2291#ifdef CONFIG_HAVE_FAST_GUP
2292
2293static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2294 unsigned int flags,
2295 struct page **pages)
2296{
2297 while ((*nr) - nr_start) {
2298 struct page *page = pages[--(*nr)];
2299
2300 ClearPageReferenced(page);
2301 if (flags & FOLL_PIN)
2302 unpin_user_page(page);
2303 else
2304 put_page(page);
2305 }
2306}
2307
2308#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2309/*
2310 * Fast-gup relies on pte change detection to avoid concurrent pgtable
2311 * operations.
2312 *
2313 * To pin the page, fast-gup needs to do below in order:
2314 * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2315 *
2316 * For the rest of pgtable operations where pgtable updates can be racy
2317 * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2318 * is pinned.
2319 *
2320 * Above will work for all pte-level operations, including THP split.
2321 *
2322 * For THP collapse, it's a bit more complicated because fast-gup may be
2323 * walking a pgtable page that is being freed (pte is still valid but pmd
2324 * can be cleared already). To avoid race in such condition, we need to
2325 * also check pmd here to make sure pmd doesn't change (corresponds to
2326 * pmdp_collapse_flush() in the THP collapse code path).
2327 */
2328static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2329 unsigned long end, unsigned int flags,
2330 struct page **pages, int *nr)
2331{
2332 struct dev_pagemap *pgmap = NULL;
2333 int nr_start = *nr, ret = 0;
2334 pte_t *ptep, *ptem;
2335
2336 ptem = ptep = pte_offset_map(&pmd, addr);
2337 do {
2338 pte_t pte = ptep_get_lockless(ptep);
2339 struct page *page;
2340 struct folio *folio;
2341
2342 if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
2343 goto pte_unmap;
2344
2345 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2346 goto pte_unmap;
2347
2348 if (pte_devmap(pte)) {
2349 if (unlikely(flags & FOLL_LONGTERM))
2350 goto pte_unmap;
2351
2352 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2353 if (unlikely(!pgmap)) {
2354 undo_dev_pagemap(nr, nr_start, flags, pages);
2355 goto pte_unmap;
2356 }
2357 } else if (pte_special(pte))
2358 goto pte_unmap;
2359
2360 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2361 page = pte_page(pte);
2362
2363 folio = try_grab_folio(page, 1, flags);
2364 if (!folio)
2365 goto pte_unmap;
2366
2367 if (unlikely(page_is_secretmem(page))) {
2368 gup_put_folio(folio, 1, flags);
2369 goto pte_unmap;
2370 }
2371
2372 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2373 unlikely(pte_val(pte) != pte_val(*ptep))) {
2374 gup_put_folio(folio, 1, flags);
2375 goto pte_unmap;
2376 }
2377
2378 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2379 gup_put_folio(folio, 1, flags);
2380 goto pte_unmap;
2381 }
2382
2383 /*
2384 * We need to make the page accessible if and only if we are
2385 * going to access its content (the FOLL_PIN case). Please
2386 * see Documentation/core-api/pin_user_pages.rst for
2387 * details.
2388 */
2389 if (flags & FOLL_PIN) {
2390 ret = arch_make_page_accessible(page);
2391 if (ret) {
2392 gup_put_folio(folio, 1, flags);
2393 goto pte_unmap;
2394 }
2395 }
2396 folio_set_referenced(folio);
2397 pages[*nr] = page;
2398 (*nr)++;
2399 } while (ptep++, addr += PAGE_SIZE, addr != end);
2400
2401 ret = 1;
2402
2403pte_unmap:
2404 if (pgmap)
2405 put_dev_pagemap(pgmap);
2406 pte_unmap(ptem);
2407 return ret;
2408}
2409#else
2410
2411/*
2412 * If we can't determine whether or not a pte is special, then fail immediately
2413 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2414 * to be special.
2415 *
2416 * For a futex to be placed on a THP tail page, get_futex_key requires a
2417 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2418 * useful to have gup_huge_pmd even if we can't operate on ptes.
2419 */
2420static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2421 unsigned long end, unsigned int flags,
2422 struct page **pages, int *nr)
2423{
2424 return 0;
2425}
2426#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2427
2428#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2429static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2430 unsigned long end, unsigned int flags,
2431 struct page **pages, int *nr)
2432{
2433 int nr_start = *nr;
2434 struct dev_pagemap *pgmap = NULL;
2435
2436 do {
2437 struct page *page = pfn_to_page(pfn);
2438
2439 pgmap = get_dev_pagemap(pfn, pgmap);
2440 if (unlikely(!pgmap)) {
2441 undo_dev_pagemap(nr, nr_start, flags, pages);
2442 break;
2443 }
2444
2445 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
2446 undo_dev_pagemap(nr, nr_start, flags, pages);
2447 break;
2448 }
2449
2450 SetPageReferenced(page);
2451 pages[*nr] = page;
2452 if (unlikely(try_grab_page(page, flags))) {
2453 undo_dev_pagemap(nr, nr_start, flags, pages);
2454 break;
2455 }
2456 (*nr)++;
2457 pfn++;
2458 } while (addr += PAGE_SIZE, addr != end);
2459
2460 put_dev_pagemap(pgmap);
2461 return addr == end;
2462}
2463
2464static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2465 unsigned long end, unsigned int flags,
2466 struct page **pages, int *nr)
2467{
2468 unsigned long fault_pfn;
2469 int nr_start = *nr;
2470
2471 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2472 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2473 return 0;
2474
2475 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2476 undo_dev_pagemap(nr, nr_start, flags, pages);
2477 return 0;
2478 }
2479 return 1;
2480}
2481
2482static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2483 unsigned long end, unsigned int flags,
2484 struct page **pages, int *nr)
2485{
2486 unsigned long fault_pfn;
2487 int nr_start = *nr;
2488
2489 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2490 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2491 return 0;
2492
2493 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2494 undo_dev_pagemap(nr, nr_start, flags, pages);
2495 return 0;
2496 }
2497 return 1;
2498}
2499#else
2500static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2501 unsigned long end, unsigned int flags,
2502 struct page **pages, int *nr)
2503{
2504 BUILD_BUG();
2505 return 0;
2506}
2507
2508static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2509 unsigned long end, unsigned int flags,
2510 struct page **pages, int *nr)
2511{
2512 BUILD_BUG();
2513 return 0;
2514}
2515#endif
2516
2517static int record_subpages(struct page *page, unsigned long addr,
2518 unsigned long end, struct page **pages)
2519{
2520 int nr;
2521
2522 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
2523 pages[nr] = nth_page(page, nr);
2524
2525 return nr;
2526}
2527
2528#ifdef CONFIG_ARCH_HAS_HUGEPD
2529static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2530 unsigned long sz)
2531{
2532 unsigned long __boundary = (addr + sz) & ~(sz-1);
2533 return (__boundary - 1 < end - 1) ? __boundary : end;
2534}
2535
2536static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2537 unsigned long end, unsigned int flags,
2538 struct page **pages, int *nr)
2539{
2540 unsigned long pte_end;
2541 struct page *page;
2542 struct folio *folio;
2543 pte_t pte;
2544 int refs;
2545
2546 pte_end = (addr + sz) & ~(sz-1);
2547 if (pte_end < end)
2548 end = pte_end;
2549
2550 pte = huge_ptep_get(ptep);
2551
2552 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2553 return 0;
2554
2555 /* hugepages are never "special" */
2556 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2557
2558 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
2559 refs = record_subpages(page, addr, end, pages + *nr);
2560
2561 folio = try_grab_folio(page, refs, flags);
2562 if (!folio)
2563 return 0;
2564
2565 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2566 gup_put_folio(folio, refs, flags);
2567 return 0;
2568 }
2569
2570 if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
2571 gup_put_folio(folio, refs, flags);
2572 return 0;
2573 }
2574
2575 *nr += refs;
2576 folio_set_referenced(folio);
2577 return 1;
2578}
2579
2580static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2581 unsigned int pdshift, unsigned long end, unsigned int flags,
2582 struct page **pages, int *nr)
2583{
2584 pte_t *ptep;
2585 unsigned long sz = 1UL << hugepd_shift(hugepd);
2586 unsigned long next;
2587
2588 ptep = hugepte_offset(hugepd, addr, pdshift);
2589 do {
2590 next = hugepte_addr_end(addr, end, sz);
2591 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2592 return 0;
2593 } while (ptep++, addr = next, addr != end);
2594
2595 return 1;
2596}
2597#else
2598static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2599 unsigned int pdshift, unsigned long end, unsigned int flags,
2600 struct page **pages, int *nr)
2601{
2602 return 0;
2603}
2604#endif /* CONFIG_ARCH_HAS_HUGEPD */
2605
2606static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2607 unsigned long end, unsigned int flags,
2608 struct page **pages, int *nr)
2609{
2610 struct page *page;
2611 struct folio *folio;
2612 int refs;
2613
2614 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2615 return 0;
2616
2617 if (pmd_devmap(orig)) {
2618 if (unlikely(flags & FOLL_LONGTERM))
2619 return 0;
2620 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2621 pages, nr);
2622 }
2623
2624 page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
2625 refs = record_subpages(page, addr, end, pages + *nr);
2626
2627 folio = try_grab_folio(page, refs, flags);
2628 if (!folio)
2629 return 0;
2630
2631 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2632 gup_put_folio(folio, refs, flags);
2633 return 0;
2634 }
2635
2636 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2637 gup_put_folio(folio, refs, flags);
2638 return 0;
2639 }
2640
2641 *nr += refs;
2642 folio_set_referenced(folio);
2643 return 1;
2644}
2645
2646static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2647 unsigned long end, unsigned int flags,
2648 struct page **pages, int *nr)
2649{
2650 struct page *page;
2651 struct folio *folio;
2652 int refs;
2653
2654 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2655 return 0;
2656
2657 if (pud_devmap(orig)) {
2658 if (unlikely(flags & FOLL_LONGTERM))
2659 return 0;
2660 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2661 pages, nr);
2662 }
2663
2664 page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
2665 refs = record_subpages(page, addr, end, pages + *nr);
2666
2667 folio = try_grab_folio(page, refs, flags);
2668 if (!folio)
2669 return 0;
2670
2671 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2672 gup_put_folio(folio, refs, flags);
2673 return 0;
2674 }
2675
2676 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2677 gup_put_folio(folio, refs, flags);
2678 return 0;
2679 }
2680
2681 *nr += refs;
2682 folio_set_referenced(folio);
2683 return 1;
2684}
2685
2686static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2687 unsigned long end, unsigned int flags,
2688 struct page **pages, int *nr)
2689{
2690 int refs;
2691 struct page *page;
2692 struct folio *folio;
2693
2694 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2695 return 0;
2696
2697 BUILD_BUG_ON(pgd_devmap(orig));
2698
2699 page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2700 refs = record_subpages(page, addr, end, pages + *nr);
2701
2702 folio = try_grab_folio(page, refs, flags);
2703 if (!folio)
2704 return 0;
2705
2706 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2707 gup_put_folio(folio, refs, flags);
2708 return 0;
2709 }
2710
2711 *nr += refs;
2712 folio_set_referenced(folio);
2713 return 1;
2714}
2715
2716static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2717 unsigned int flags, struct page **pages, int *nr)
2718{
2719 unsigned long next;
2720 pmd_t *pmdp;
2721
2722 pmdp = pmd_offset_lockless(pudp, pud, addr);
2723 do {
2724 pmd_t pmd = pmdp_get_lockless(pmdp);
2725
2726 next = pmd_addr_end(addr, end);
2727 if (!pmd_present(pmd))
2728 return 0;
2729
2730 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2731 pmd_devmap(pmd))) {
2732 if (pmd_protnone(pmd) &&
2733 !gup_can_follow_protnone(flags))
2734 return 0;
2735
2736 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2737 pages, nr))
2738 return 0;
2739
2740 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2741 /*
2742 * architecture have different format for hugetlbfs
2743 * pmd format and THP pmd format
2744 */
2745 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2746 PMD_SHIFT, next, flags, pages, nr))
2747 return 0;
2748 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
2749 return 0;
2750 } while (pmdp++, addr = next, addr != end);
2751
2752 return 1;
2753}
2754
2755static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2756 unsigned int flags, struct page **pages, int *nr)
2757{
2758 unsigned long next;
2759 pud_t *pudp;
2760
2761 pudp = pud_offset_lockless(p4dp, p4d, addr);
2762 do {
2763 pud_t pud = READ_ONCE(*pudp);
2764
2765 next = pud_addr_end(addr, end);
2766 if (unlikely(!pud_present(pud)))
2767 return 0;
2768 if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
2769 if (!gup_huge_pud(pud, pudp, addr, next, flags,
2770 pages, nr))
2771 return 0;
2772 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2773 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2774 PUD_SHIFT, next, flags, pages, nr))
2775 return 0;
2776 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2777 return 0;
2778 } while (pudp++, addr = next, addr != end);
2779
2780 return 1;
2781}
2782
2783static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2784 unsigned int flags, struct page **pages, int *nr)
2785{
2786 unsigned long next;
2787 p4d_t *p4dp;
2788
2789 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2790 do {
2791 p4d_t p4d = READ_ONCE(*p4dp);
2792
2793 next = p4d_addr_end(addr, end);
2794 if (p4d_none(p4d))
2795 return 0;
2796 BUILD_BUG_ON(p4d_huge(p4d));
2797 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2798 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2799 P4D_SHIFT, next, flags, pages, nr))
2800 return 0;
2801 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2802 return 0;
2803 } while (p4dp++, addr = next, addr != end);
2804
2805 return 1;
2806}
2807
2808static void gup_pgd_range(unsigned long addr, unsigned long end,
2809 unsigned int flags, struct page **pages, int *nr)
2810{
2811 unsigned long next;
2812 pgd_t *pgdp;
2813
2814 pgdp = pgd_offset(current->mm, addr);
2815 do {
2816 pgd_t pgd = READ_ONCE(*pgdp);
2817
2818 next = pgd_addr_end(addr, end);
2819 if (pgd_none(pgd))
2820 return;
2821 if (unlikely(pgd_huge(pgd))) {
2822 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2823 pages, nr))
2824 return;
2825 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2826 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2827 PGDIR_SHIFT, next, flags, pages, nr))
2828 return;
2829 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2830 return;
2831 } while (pgdp++, addr = next, addr != end);
2832}
2833#else
2834static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2835 unsigned int flags, struct page **pages, int *nr)
2836{
2837}
2838#endif /* CONFIG_HAVE_FAST_GUP */
2839
2840#ifndef gup_fast_permitted
2841/*
2842 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2843 * we need to fall back to the slow version:
2844 */
2845static bool gup_fast_permitted(unsigned long start, unsigned long end)
2846{
2847 return true;
2848}
2849#endif
2850
2851static unsigned long lockless_pages_from_mm(unsigned long start,
2852 unsigned long end,
2853 unsigned int gup_flags,
2854 struct page **pages)
2855{
2856 unsigned long flags;
2857 int nr_pinned = 0;
2858 unsigned seq;
2859
2860 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2861 !gup_fast_permitted(start, end))
2862 return 0;
2863
2864 if (gup_flags & FOLL_PIN) {
2865 seq = raw_read_seqcount(¤t->mm->write_protect_seq);
2866 if (seq & 1)
2867 return 0;
2868 }
2869
2870 /*
2871 * Disable interrupts. The nested form is used, in order to allow full,
2872 * general purpose use of this routine.
2873 *
2874 * With interrupts disabled, we block page table pages from being freed
2875 * from under us. See struct mmu_table_batch comments in
2876 * include/asm-generic/tlb.h for more details.
2877 *
2878 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2879 * that come from THPs splitting.
2880 */
2881 local_irq_save(flags);
2882 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2883 local_irq_restore(flags);
2884
2885 /*
2886 * When pinning pages for DMA there could be a concurrent write protect
2887 * from fork() via copy_page_range(), in this case always fail fast GUP.
2888 */
2889 if (gup_flags & FOLL_PIN) {
2890 if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
2891 unpin_user_pages_lockless(pages, nr_pinned);
2892 return 0;
2893 } else {
2894 sanity_check_pinned_pages(pages, nr_pinned);
2895 }
2896 }
2897 return nr_pinned;
2898}
2899
2900static int internal_get_user_pages_fast(unsigned long start,
2901 unsigned long nr_pages,
2902 unsigned int gup_flags,
2903 struct page **pages)
2904{
2905 unsigned long len, end;
2906 unsigned long nr_pinned;
2907 int ret;
2908
2909 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2910 FOLL_FORCE | FOLL_PIN | FOLL_GET |
2911 FOLL_FAST_ONLY | FOLL_NOFAULT |
2912 FOLL_PCI_P2PDMA)))
2913 return -EINVAL;
2914
2915 if (gup_flags & FOLL_PIN)
2916 mm_set_has_pinned_flag(¤t->mm->flags);
2917
2918 if (!(gup_flags & FOLL_FAST_ONLY))
2919 might_lock_read(¤t->mm->mmap_lock);
2920
2921 start = untagged_addr(start) & PAGE_MASK;
2922 len = nr_pages << PAGE_SHIFT;
2923 if (check_add_overflow(start, len, &end))
2924 return 0;
2925 if (unlikely(!access_ok((void __user *)start, len)))
2926 return -EFAULT;
2927
2928 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2929 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2930 return nr_pinned;
2931
2932 /* Slow path: try to get the remaining pages with get_user_pages */
2933 start += nr_pinned << PAGE_SHIFT;
2934 pages += nr_pinned;
2935 ret = get_user_pages_unlocked(start, nr_pages - nr_pinned, pages,
2936 gup_flags);
2937 if (ret < 0) {
2938 /*
2939 * The caller has to unpin the pages we already pinned so
2940 * returning -errno is not an option
2941 */
2942 if (nr_pinned)
2943 return nr_pinned;
2944 return ret;
2945 }
2946 return ret + nr_pinned;
2947}
2948
2949/**
2950 * get_user_pages_fast_only() - pin user pages in memory
2951 * @start: starting user address
2952 * @nr_pages: number of pages from start to pin
2953 * @gup_flags: flags modifying pin behaviour
2954 * @pages: array that receives pointers to the pages pinned.
2955 * Should be at least nr_pages long.
2956 *
2957 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2958 * the regular GUP.
2959 * Note a difference with get_user_pages_fast: this always returns the
2960 * number of pages pinned, 0 if no pages were pinned.
2961 *
2962 * If the architecture does not support this function, simply return with no
2963 * pages pinned.
2964 *
2965 * Careful, careful! COW breaking can go either way, so a non-write
2966 * access can get ambiguous page results. If you call this function without
2967 * 'write' set, you'd better be sure that you're ok with that ambiguity.
2968 */
2969int get_user_pages_fast_only(unsigned long start, int nr_pages,
2970 unsigned int gup_flags, struct page **pages)
2971{
2972 int nr_pinned;
2973 /*
2974 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2975 * because gup fast is always a "pin with a +1 page refcount" request.
2976 *
2977 * FOLL_FAST_ONLY is required in order to match the API description of
2978 * this routine: no fall back to regular ("slow") GUP.
2979 */
2980 gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2981
2982 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2983 pages);
2984
2985 /*
2986 * As specified in the API description above, this routine is not
2987 * allowed to return negative values. However, the common core
2988 * routine internal_get_user_pages_fast() *can* return -errno.
2989 * Therefore, correct for that here:
2990 */
2991 if (nr_pinned < 0)
2992 nr_pinned = 0;
2993
2994 return nr_pinned;
2995}
2996EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2997
2998/**
2999 * get_user_pages_fast() - pin user pages in memory
3000 * @start: starting user address
3001 * @nr_pages: number of pages from start to pin
3002 * @gup_flags: flags modifying pin behaviour
3003 * @pages: array that receives pointers to the pages pinned.
3004 * Should be at least nr_pages long.
3005 *
3006 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3007 * If not successful, it will fall back to taking the lock and
3008 * calling get_user_pages().
3009 *
3010 * Returns number of pages pinned. This may be fewer than the number requested.
3011 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3012 * -errno.
3013 */
3014int get_user_pages_fast(unsigned long start, int nr_pages,
3015 unsigned int gup_flags, struct page **pages)
3016{
3017 if (!is_valid_gup_flags(gup_flags))
3018 return -EINVAL;
3019
3020 /*
3021 * The caller may or may not have explicitly set FOLL_GET; either way is
3022 * OK. However, internally (within mm/gup.c), gup fast variants must set
3023 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3024 * request.
3025 */
3026 gup_flags |= FOLL_GET;
3027 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3028}
3029EXPORT_SYMBOL_GPL(get_user_pages_fast);
3030
3031/**
3032 * pin_user_pages_fast() - pin user pages in memory without taking locks
3033 *
3034 * @start: starting user address
3035 * @nr_pages: number of pages from start to pin
3036 * @gup_flags: flags modifying pin behaviour
3037 * @pages: array that receives pointers to the pages pinned.
3038 * Should be at least nr_pages long.
3039 *
3040 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3041 * get_user_pages_fast() for documentation on the function arguments, because
3042 * the arguments here are identical.
3043 *
3044 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3045 * see Documentation/core-api/pin_user_pages.rst for further details.
3046 */
3047int pin_user_pages_fast(unsigned long start, int nr_pages,
3048 unsigned int gup_flags, struct page **pages)
3049{
3050 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3051 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3052 return -EINVAL;
3053
3054 if (WARN_ON_ONCE(!pages))
3055 return -EINVAL;
3056
3057 gup_flags |= FOLL_PIN;
3058 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3059}
3060EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3061
3062/*
3063 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
3064 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
3065 *
3066 * The API rules are the same, too: no negative values may be returned.
3067 */
3068int pin_user_pages_fast_only(unsigned long start, int nr_pages,
3069 unsigned int gup_flags, struct page **pages)
3070{
3071 int nr_pinned;
3072
3073 /*
3074 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
3075 * rules require returning 0, rather than -errno:
3076 */
3077 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3078 return 0;
3079
3080 if (WARN_ON_ONCE(!pages))
3081 return 0;
3082 /*
3083 * FOLL_FAST_ONLY is required in order to match the API description of
3084 * this routine: no fall back to regular ("slow") GUP.
3085 */
3086 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
3087 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
3088 pages);
3089 /*
3090 * This routine is not allowed to return negative values. However,
3091 * internal_get_user_pages_fast() *can* return -errno. Therefore,
3092 * correct for that here:
3093 */
3094 if (nr_pinned < 0)
3095 nr_pinned = 0;
3096
3097 return nr_pinned;
3098}
3099EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
3100
3101/**
3102 * pin_user_pages_remote() - pin pages of a remote process
3103 *
3104 * @mm: mm_struct of target mm
3105 * @start: starting user address
3106 * @nr_pages: number of pages from start to pin
3107 * @gup_flags: flags modifying lookup behaviour
3108 * @pages: array that receives pointers to the pages pinned.
3109 * Should be at least nr_pages long.
3110 * @vmas: array of pointers to vmas corresponding to each page.
3111 * Or NULL if the caller does not require them.
3112 * @locked: pointer to lock flag indicating whether lock is held and
3113 * subsequently whether VM_FAULT_RETRY functionality can be
3114 * utilised. Lock must initially be held.
3115 *
3116 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3117 * get_user_pages_remote() for documentation on the function arguments, because
3118 * the arguments here are identical.
3119 *
3120 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3121 * see Documentation/core-api/pin_user_pages.rst for details.
3122 */
3123long pin_user_pages_remote(struct mm_struct *mm,
3124 unsigned long start, unsigned long nr_pages,
3125 unsigned int gup_flags, struct page **pages,
3126 struct vm_area_struct **vmas, int *locked)
3127{
3128 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3129 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3130 return -EINVAL;
3131
3132 if (WARN_ON_ONCE(!pages))
3133 return -EINVAL;
3134
3135 return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, locked,
3136 gup_flags | FOLL_PIN | FOLL_TOUCH |
3137 FOLL_REMOTE);
3138}
3139EXPORT_SYMBOL(pin_user_pages_remote);
3140
3141/**
3142 * pin_user_pages() - pin user pages in memory for use by other devices
3143 *
3144 * @start: starting user address
3145 * @nr_pages: number of pages from start to pin
3146 * @gup_flags: flags modifying lookup behaviour
3147 * @pages: array that receives pointers to the pages pinned.
3148 * Should be at least nr_pages long.
3149 * @vmas: array of pointers to vmas corresponding to each page.
3150 * Or NULL if the caller does not require them.
3151 *
3152 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3153 * FOLL_PIN is set.
3154 *
3155 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3156 * see Documentation/core-api/pin_user_pages.rst for details.
3157 */
3158long pin_user_pages(unsigned long start, unsigned long nr_pages,
3159 unsigned int gup_flags, struct page **pages,
3160 struct vm_area_struct **vmas)
3161{
3162 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3163 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3164 return -EINVAL;
3165
3166 if (WARN_ON_ONCE(!pages))
3167 return -EINVAL;
3168
3169 gup_flags |= FOLL_PIN;
3170 return __gup_longterm_locked(current->mm, start, nr_pages,
3171 pages, vmas, NULL, gup_flags);
3172}
3173EXPORT_SYMBOL(pin_user_pages);
3174
3175/*
3176 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3177 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3178 * FOLL_PIN and rejects FOLL_GET.
3179 */
3180long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3181 struct page **pages, unsigned int gup_flags)
3182{
3183 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3184 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3185 return -EINVAL;
3186
3187 if (WARN_ON_ONCE(!pages))
3188 return -EINVAL;
3189
3190 gup_flags |= FOLL_PIN;
3191 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3192}
3193EXPORT_SYMBOL(pin_user_pages_unlocked);
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
6#include <linux/mm.h>
7#include <linux/memremap.h>
8#include <linux/pagemap.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/swapops.h>
12
13#include <linux/sched.h>
14#include <linux/rwsem.h>
15#include <linux/hugetlb.h>
16
17#include <asm/mmu_context.h>
18#include <asm/pgtable.h>
19#include <asm/tlbflush.h>
20
21#include "internal.h"
22
23static struct page *no_page_table(struct vm_area_struct *vma,
24 unsigned int flags)
25{
26 /*
27 * When core dumping an enormous anonymous area that nobody
28 * has touched so far, we don't want to allocate unnecessary pages or
29 * page tables. Return error instead of NULL to skip handle_mm_fault,
30 * then get_dump_page() will return NULL to leave a hole in the dump.
31 * But we can only make this optimization where a hole would surely
32 * be zero-filled if handle_mm_fault() actually did handle it.
33 */
34 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
35 return ERR_PTR(-EFAULT);
36 return NULL;
37}
38
39static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
40 pte_t *pte, unsigned int flags)
41{
42 /* No page to get reference */
43 if (flags & FOLL_GET)
44 return -EFAULT;
45
46 if (flags & FOLL_TOUCH) {
47 pte_t entry = *pte;
48
49 if (flags & FOLL_WRITE)
50 entry = pte_mkdirty(entry);
51 entry = pte_mkyoung(entry);
52
53 if (!pte_same(*pte, entry)) {
54 set_pte_at(vma->vm_mm, address, pte, entry);
55 update_mmu_cache(vma, address, pte);
56 }
57 }
58
59 /* Proper page table entry exists, but no corresponding struct page */
60 return -EEXIST;
61}
62
63static struct page *follow_page_pte(struct vm_area_struct *vma,
64 unsigned long address, pmd_t *pmd, unsigned int flags)
65{
66 struct mm_struct *mm = vma->vm_mm;
67 struct dev_pagemap *pgmap = NULL;
68 struct page *page;
69 spinlock_t *ptl;
70 pte_t *ptep, pte;
71
72retry:
73 if (unlikely(pmd_bad(*pmd)))
74 return no_page_table(vma, flags);
75
76 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
77 pte = *ptep;
78 if (!pte_present(pte)) {
79 swp_entry_t entry;
80 /*
81 * KSM's break_ksm() relies upon recognizing a ksm page
82 * even while it is being migrated, so for that case we
83 * need migration_entry_wait().
84 */
85 if (likely(!(flags & FOLL_MIGRATION)))
86 goto no_page;
87 if (pte_none(pte))
88 goto no_page;
89 entry = pte_to_swp_entry(pte);
90 if (!is_migration_entry(entry))
91 goto no_page;
92 pte_unmap_unlock(ptep, ptl);
93 migration_entry_wait(mm, pmd, address);
94 goto retry;
95 }
96 if ((flags & FOLL_NUMA) && pte_protnone(pte))
97 goto no_page;
98 if ((flags & FOLL_WRITE) && !pte_write(pte)) {
99 pte_unmap_unlock(ptep, ptl);
100 return NULL;
101 }
102
103 page = vm_normal_page(vma, address, pte);
104 if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
105 /*
106 * Only return device mapping pages in the FOLL_GET case since
107 * they are only valid while holding the pgmap reference.
108 */
109 pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
110 if (pgmap)
111 page = pte_page(pte);
112 else
113 goto no_page;
114 } else if (unlikely(!page)) {
115 if (flags & FOLL_DUMP) {
116 /* Avoid special (like zero) pages in core dumps */
117 page = ERR_PTR(-EFAULT);
118 goto out;
119 }
120
121 if (is_zero_pfn(pte_pfn(pte))) {
122 page = pte_page(pte);
123 } else {
124 int ret;
125
126 ret = follow_pfn_pte(vma, address, ptep, flags);
127 page = ERR_PTR(ret);
128 goto out;
129 }
130 }
131
132 if (flags & FOLL_SPLIT && PageTransCompound(page)) {
133 int ret;
134 get_page(page);
135 pte_unmap_unlock(ptep, ptl);
136 lock_page(page);
137 ret = split_huge_page(page);
138 unlock_page(page);
139 put_page(page);
140 if (ret)
141 return ERR_PTR(ret);
142 goto retry;
143 }
144
145 if (flags & FOLL_GET) {
146 get_page(page);
147
148 /* drop the pgmap reference now that we hold the page */
149 if (pgmap) {
150 put_dev_pagemap(pgmap);
151 pgmap = NULL;
152 }
153 }
154 if (flags & FOLL_TOUCH) {
155 if ((flags & FOLL_WRITE) &&
156 !pte_dirty(pte) && !PageDirty(page))
157 set_page_dirty(page);
158 /*
159 * pte_mkyoung() would be more correct here, but atomic care
160 * is needed to avoid losing the dirty bit: it is easier to use
161 * mark_page_accessed().
162 */
163 mark_page_accessed(page);
164 }
165 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
166 /* Do not mlock pte-mapped THP */
167 if (PageTransCompound(page))
168 goto out;
169
170 /*
171 * The preliminary mapping check is mainly to avoid the
172 * pointless overhead of lock_page on the ZERO_PAGE
173 * which might bounce very badly if there is contention.
174 *
175 * If the page is already locked, we don't need to
176 * handle it now - vmscan will handle it later if and
177 * when it attempts to reclaim the page.
178 */
179 if (page->mapping && trylock_page(page)) {
180 lru_add_drain(); /* push cached pages to LRU */
181 /*
182 * Because we lock page here, and migration is
183 * blocked by the pte's page reference, and we
184 * know the page is still mapped, we don't even
185 * need to check for file-cache page truncation.
186 */
187 mlock_vma_page(page);
188 unlock_page(page);
189 }
190 }
191out:
192 pte_unmap_unlock(ptep, ptl);
193 return page;
194no_page:
195 pte_unmap_unlock(ptep, ptl);
196 if (!pte_none(pte))
197 return NULL;
198 return no_page_table(vma, flags);
199}
200
201/**
202 * follow_page_mask - look up a page descriptor from a user-virtual address
203 * @vma: vm_area_struct mapping @address
204 * @address: virtual address to look up
205 * @flags: flags modifying lookup behaviour
206 * @page_mask: on output, *page_mask is set according to the size of the page
207 *
208 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
209 *
210 * Returns the mapped (struct page *), %NULL if no mapping exists, or
211 * an error pointer if there is a mapping to something not represented
212 * by a page descriptor (see also vm_normal_page()).
213 */
214struct page *follow_page_mask(struct vm_area_struct *vma,
215 unsigned long address, unsigned int flags,
216 unsigned int *page_mask)
217{
218 pgd_t *pgd;
219 pud_t *pud;
220 pmd_t *pmd;
221 spinlock_t *ptl;
222 struct page *page;
223 struct mm_struct *mm = vma->vm_mm;
224
225 *page_mask = 0;
226
227 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
228 if (!IS_ERR(page)) {
229 BUG_ON(flags & FOLL_GET);
230 return page;
231 }
232
233 pgd = pgd_offset(mm, address);
234 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
235 return no_page_table(vma, flags);
236
237 pud = pud_offset(pgd, address);
238 if (pud_none(*pud))
239 return no_page_table(vma, flags);
240 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
241 page = follow_huge_pud(mm, address, pud, flags);
242 if (page)
243 return page;
244 return no_page_table(vma, flags);
245 }
246 if (unlikely(pud_bad(*pud)))
247 return no_page_table(vma, flags);
248
249 pmd = pmd_offset(pud, address);
250 if (pmd_none(*pmd))
251 return no_page_table(vma, flags);
252 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
253 page = follow_huge_pmd(mm, address, pmd, flags);
254 if (page)
255 return page;
256 return no_page_table(vma, flags);
257 }
258 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
259 return no_page_table(vma, flags);
260 if (pmd_devmap(*pmd)) {
261 ptl = pmd_lock(mm, pmd);
262 page = follow_devmap_pmd(vma, address, pmd, flags);
263 spin_unlock(ptl);
264 if (page)
265 return page;
266 }
267 if (likely(!pmd_trans_huge(*pmd)))
268 return follow_page_pte(vma, address, pmd, flags);
269
270 ptl = pmd_lock(mm, pmd);
271 if (unlikely(!pmd_trans_huge(*pmd))) {
272 spin_unlock(ptl);
273 return follow_page_pte(vma, address, pmd, flags);
274 }
275 if (flags & FOLL_SPLIT) {
276 int ret;
277 page = pmd_page(*pmd);
278 if (is_huge_zero_page(page)) {
279 spin_unlock(ptl);
280 ret = 0;
281 split_huge_pmd(vma, pmd, address);
282 } else {
283 get_page(page);
284 spin_unlock(ptl);
285 lock_page(page);
286 ret = split_huge_page(page);
287 unlock_page(page);
288 put_page(page);
289 }
290
291 return ret ? ERR_PTR(ret) :
292 follow_page_pte(vma, address, pmd, flags);
293 }
294
295 page = follow_trans_huge_pmd(vma, address, pmd, flags);
296 spin_unlock(ptl);
297 *page_mask = HPAGE_PMD_NR - 1;
298 return page;
299}
300
301static int get_gate_page(struct mm_struct *mm, unsigned long address,
302 unsigned int gup_flags, struct vm_area_struct **vma,
303 struct page **page)
304{
305 pgd_t *pgd;
306 pud_t *pud;
307 pmd_t *pmd;
308 pte_t *pte;
309 int ret = -EFAULT;
310
311 /* user gate pages are read-only */
312 if (gup_flags & FOLL_WRITE)
313 return -EFAULT;
314 if (address > TASK_SIZE)
315 pgd = pgd_offset_k(address);
316 else
317 pgd = pgd_offset_gate(mm, address);
318 BUG_ON(pgd_none(*pgd));
319 pud = pud_offset(pgd, address);
320 BUG_ON(pud_none(*pud));
321 pmd = pmd_offset(pud, address);
322 if (pmd_none(*pmd))
323 return -EFAULT;
324 VM_BUG_ON(pmd_trans_huge(*pmd));
325 pte = pte_offset_map(pmd, address);
326 if (pte_none(*pte))
327 goto unmap;
328 *vma = get_gate_vma(mm);
329 if (!page)
330 goto out;
331 *page = vm_normal_page(*vma, address, *pte);
332 if (!*page) {
333 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
334 goto unmap;
335 *page = pte_page(*pte);
336 }
337 get_page(*page);
338out:
339 ret = 0;
340unmap:
341 pte_unmap(pte);
342 return ret;
343}
344
345/*
346 * mmap_sem must be held on entry. If @nonblocking != NULL and
347 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
348 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
349 */
350static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
351 unsigned long address, unsigned int *flags, int *nonblocking)
352{
353 struct mm_struct *mm = vma->vm_mm;
354 unsigned int fault_flags = 0;
355 int ret;
356
357 /* mlock all present pages, but do not fault in new pages */
358 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
359 return -ENOENT;
360 /* For mm_populate(), just skip the stack guard page. */
361 if ((*flags & FOLL_POPULATE) &&
362 (stack_guard_page_start(vma, address) ||
363 stack_guard_page_end(vma, address + PAGE_SIZE)))
364 return -ENOENT;
365 if (*flags & FOLL_WRITE)
366 fault_flags |= FAULT_FLAG_WRITE;
367 if (*flags & FOLL_REMOTE)
368 fault_flags |= FAULT_FLAG_REMOTE;
369 if (nonblocking)
370 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
371 if (*flags & FOLL_NOWAIT)
372 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
373 if (*flags & FOLL_TRIED) {
374 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
375 fault_flags |= FAULT_FLAG_TRIED;
376 }
377
378 ret = handle_mm_fault(mm, vma, address, fault_flags);
379 if (ret & VM_FAULT_ERROR) {
380 if (ret & VM_FAULT_OOM)
381 return -ENOMEM;
382 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
383 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
384 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
385 return -EFAULT;
386 BUG();
387 }
388
389 if (tsk) {
390 if (ret & VM_FAULT_MAJOR)
391 tsk->maj_flt++;
392 else
393 tsk->min_flt++;
394 }
395
396 if (ret & VM_FAULT_RETRY) {
397 if (nonblocking)
398 *nonblocking = 0;
399 return -EBUSY;
400 }
401
402 /*
403 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
404 * necessary, even if maybe_mkwrite decided not to set pte_write. We
405 * can thus safely do subsequent page lookups as if they were reads.
406 * But only do so when looping for pte_write is futile: in some cases
407 * userspace may also be wanting to write to the gotten user page,
408 * which a read fault here might prevent (a readonly page might get
409 * reCOWed by userspace write).
410 */
411 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
412 *flags &= ~FOLL_WRITE;
413 return 0;
414}
415
416static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
417{
418 vm_flags_t vm_flags = vma->vm_flags;
419 int write = (gup_flags & FOLL_WRITE);
420 int foreign = (gup_flags & FOLL_REMOTE);
421
422 if (vm_flags & (VM_IO | VM_PFNMAP))
423 return -EFAULT;
424
425 if (write) {
426 if (!(vm_flags & VM_WRITE)) {
427 if (!(gup_flags & FOLL_FORCE))
428 return -EFAULT;
429 /*
430 * We used to let the write,force case do COW in a
431 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
432 * set a breakpoint in a read-only mapping of an
433 * executable, without corrupting the file (yet only
434 * when that file had been opened for writing!).
435 * Anon pages in shared mappings are surprising: now
436 * just reject it.
437 */
438 if (!is_cow_mapping(vm_flags))
439 return -EFAULT;
440 }
441 } else if (!(vm_flags & VM_READ)) {
442 if (!(gup_flags & FOLL_FORCE))
443 return -EFAULT;
444 /*
445 * Is there actually any vma we can reach here which does not
446 * have VM_MAYREAD set?
447 */
448 if (!(vm_flags & VM_MAYREAD))
449 return -EFAULT;
450 }
451 /*
452 * gups are always data accesses, not instruction
453 * fetches, so execute=false here
454 */
455 if (!arch_vma_access_permitted(vma, write, false, foreign))
456 return -EFAULT;
457 return 0;
458}
459
460/**
461 * __get_user_pages() - pin user pages in memory
462 * @tsk: task_struct of target task
463 * @mm: mm_struct of target mm
464 * @start: starting user address
465 * @nr_pages: number of pages from start to pin
466 * @gup_flags: flags modifying pin behaviour
467 * @pages: array that receives pointers to the pages pinned.
468 * Should be at least nr_pages long. Or NULL, if caller
469 * only intends to ensure the pages are faulted in.
470 * @vmas: array of pointers to vmas corresponding to each page.
471 * Or NULL if the caller does not require them.
472 * @nonblocking: whether waiting for disk IO or mmap_sem contention
473 *
474 * Returns number of pages pinned. This may be fewer than the number
475 * requested. If nr_pages is 0 or negative, returns 0. If no pages
476 * were pinned, returns -errno. Each page returned must be released
477 * with a put_page() call when it is finished with. vmas will only
478 * remain valid while mmap_sem is held.
479 *
480 * Must be called with mmap_sem held. It may be released. See below.
481 *
482 * __get_user_pages walks a process's page tables and takes a reference to
483 * each struct page that each user address corresponds to at a given
484 * instant. That is, it takes the page that would be accessed if a user
485 * thread accesses the given user virtual address at that instant.
486 *
487 * This does not guarantee that the page exists in the user mappings when
488 * __get_user_pages returns, and there may even be a completely different
489 * page there in some cases (eg. if mmapped pagecache has been invalidated
490 * and subsequently re faulted). However it does guarantee that the page
491 * won't be freed completely. And mostly callers simply care that the page
492 * contains data that was valid *at some point in time*. Typically, an IO
493 * or similar operation cannot guarantee anything stronger anyway because
494 * locks can't be held over the syscall boundary.
495 *
496 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
497 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
498 * appropriate) must be called after the page is finished with, and
499 * before put_page is called.
500 *
501 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
502 * or mmap_sem contention, and if waiting is needed to pin all pages,
503 * *@nonblocking will be set to 0. Further, if @gup_flags does not
504 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
505 * this case.
506 *
507 * A caller using such a combination of @nonblocking and @gup_flags
508 * must therefore hold the mmap_sem for reading only, and recognize
509 * when it's been released. Otherwise, it must be held for either
510 * reading or writing and will not be released.
511 *
512 * In most cases, get_user_pages or get_user_pages_fast should be used
513 * instead of __get_user_pages. __get_user_pages should be used only if
514 * you need some special @gup_flags.
515 */
516long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
517 unsigned long start, unsigned long nr_pages,
518 unsigned int gup_flags, struct page **pages,
519 struct vm_area_struct **vmas, int *nonblocking)
520{
521 long i = 0;
522 unsigned int page_mask;
523 struct vm_area_struct *vma = NULL;
524
525 if (!nr_pages)
526 return 0;
527
528 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
529
530 /*
531 * If FOLL_FORCE is set then do not force a full fault as the hinting
532 * fault information is unrelated to the reference behaviour of a task
533 * using the address space
534 */
535 if (!(gup_flags & FOLL_FORCE))
536 gup_flags |= FOLL_NUMA;
537
538 do {
539 struct page *page;
540 unsigned int foll_flags = gup_flags;
541 unsigned int page_increm;
542
543 /* first iteration or cross vma bound */
544 if (!vma || start >= vma->vm_end) {
545 vma = find_extend_vma(mm, start);
546 if (!vma && in_gate_area(mm, start)) {
547 int ret;
548 ret = get_gate_page(mm, start & PAGE_MASK,
549 gup_flags, &vma,
550 pages ? &pages[i] : NULL);
551 if (ret)
552 return i ? : ret;
553 page_mask = 0;
554 goto next_page;
555 }
556
557 if (!vma || check_vma_flags(vma, gup_flags))
558 return i ? : -EFAULT;
559 if (is_vm_hugetlb_page(vma)) {
560 i = follow_hugetlb_page(mm, vma, pages, vmas,
561 &start, &nr_pages, i,
562 gup_flags);
563 continue;
564 }
565 }
566retry:
567 /*
568 * If we have a pending SIGKILL, don't keep faulting pages and
569 * potentially allocating memory.
570 */
571 if (unlikely(fatal_signal_pending(current)))
572 return i ? i : -ERESTARTSYS;
573 cond_resched();
574 page = follow_page_mask(vma, start, foll_flags, &page_mask);
575 if (!page) {
576 int ret;
577 ret = faultin_page(tsk, vma, start, &foll_flags,
578 nonblocking);
579 switch (ret) {
580 case 0:
581 goto retry;
582 case -EFAULT:
583 case -ENOMEM:
584 case -EHWPOISON:
585 return i ? i : ret;
586 case -EBUSY:
587 return i;
588 case -ENOENT:
589 goto next_page;
590 }
591 BUG();
592 } else if (PTR_ERR(page) == -EEXIST) {
593 /*
594 * Proper page table entry exists, but no corresponding
595 * struct page.
596 */
597 goto next_page;
598 } else if (IS_ERR(page)) {
599 return i ? i : PTR_ERR(page);
600 }
601 if (pages) {
602 pages[i] = page;
603 flush_anon_page(vma, page, start);
604 flush_dcache_page(page);
605 page_mask = 0;
606 }
607next_page:
608 if (vmas) {
609 vmas[i] = vma;
610 page_mask = 0;
611 }
612 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
613 if (page_increm > nr_pages)
614 page_increm = nr_pages;
615 i += page_increm;
616 start += page_increm * PAGE_SIZE;
617 nr_pages -= page_increm;
618 } while (nr_pages);
619 return i;
620}
621EXPORT_SYMBOL(__get_user_pages);
622
623bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
624{
625 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
626 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
627 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
628
629 if (!(vm_flags & vma->vm_flags))
630 return false;
631
632 /*
633 * The architecture might have a hardware protection
634 * mechanism other than read/write that can deny access.
635 *
636 * gup always represents data access, not instruction
637 * fetches, so execute=false here:
638 */
639 if (!arch_vma_access_permitted(vma, write, false, foreign))
640 return false;
641
642 return true;
643}
644
645/*
646 * fixup_user_fault() - manually resolve a user page fault
647 * @tsk: the task_struct to use for page fault accounting, or
648 * NULL if faults are not to be recorded.
649 * @mm: mm_struct of target mm
650 * @address: user address
651 * @fault_flags:flags to pass down to handle_mm_fault()
652 * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller
653 * does not allow retry
654 *
655 * This is meant to be called in the specific scenario where for locking reasons
656 * we try to access user memory in atomic context (within a pagefault_disable()
657 * section), this returns -EFAULT, and we want to resolve the user fault before
658 * trying again.
659 *
660 * Typically this is meant to be used by the futex code.
661 *
662 * The main difference with get_user_pages() is that this function will
663 * unconditionally call handle_mm_fault() which will in turn perform all the
664 * necessary SW fixup of the dirty and young bits in the PTE, while
665 * get_user_pages() only guarantees to update these in the struct page.
666 *
667 * This is important for some architectures where those bits also gate the
668 * access permission to the page because they are maintained in software. On
669 * such architectures, gup() will not be enough to make a subsequent access
670 * succeed.
671 *
672 * This function will not return with an unlocked mmap_sem. So it has not the
673 * same semantics wrt the @mm->mmap_sem as does filemap_fault().
674 */
675int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
676 unsigned long address, unsigned int fault_flags,
677 bool *unlocked)
678{
679 struct vm_area_struct *vma;
680 int ret, major = 0;
681
682 if (unlocked)
683 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
684
685retry:
686 vma = find_extend_vma(mm, address);
687 if (!vma || address < vma->vm_start)
688 return -EFAULT;
689
690 if (!vma_permits_fault(vma, fault_flags))
691 return -EFAULT;
692
693 ret = handle_mm_fault(mm, vma, address, fault_flags);
694 major |= ret & VM_FAULT_MAJOR;
695 if (ret & VM_FAULT_ERROR) {
696 if (ret & VM_FAULT_OOM)
697 return -ENOMEM;
698 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
699 return -EHWPOISON;
700 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
701 return -EFAULT;
702 BUG();
703 }
704
705 if (ret & VM_FAULT_RETRY) {
706 down_read(&mm->mmap_sem);
707 if (!(fault_flags & FAULT_FLAG_TRIED)) {
708 *unlocked = true;
709 fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
710 fault_flags |= FAULT_FLAG_TRIED;
711 goto retry;
712 }
713 }
714
715 if (tsk) {
716 if (major)
717 tsk->maj_flt++;
718 else
719 tsk->min_flt++;
720 }
721 return 0;
722}
723
724static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
725 struct mm_struct *mm,
726 unsigned long start,
727 unsigned long nr_pages,
728 int write, int force,
729 struct page **pages,
730 struct vm_area_struct **vmas,
731 int *locked, bool notify_drop,
732 unsigned int flags)
733{
734 long ret, pages_done;
735 bool lock_dropped;
736
737 if (locked) {
738 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
739 BUG_ON(vmas);
740 /* check caller initialized locked */
741 BUG_ON(*locked != 1);
742 }
743
744 if (pages)
745 flags |= FOLL_GET;
746 if (write)
747 flags |= FOLL_WRITE;
748 if (force)
749 flags |= FOLL_FORCE;
750
751 pages_done = 0;
752 lock_dropped = false;
753 for (;;) {
754 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
755 vmas, locked);
756 if (!locked)
757 /* VM_FAULT_RETRY couldn't trigger, bypass */
758 return ret;
759
760 /* VM_FAULT_RETRY cannot return errors */
761 if (!*locked) {
762 BUG_ON(ret < 0);
763 BUG_ON(ret >= nr_pages);
764 }
765
766 if (!pages)
767 /* If it's a prefault don't insist harder */
768 return ret;
769
770 if (ret > 0) {
771 nr_pages -= ret;
772 pages_done += ret;
773 if (!nr_pages)
774 break;
775 }
776 if (*locked) {
777 /* VM_FAULT_RETRY didn't trigger */
778 if (!pages_done)
779 pages_done = ret;
780 break;
781 }
782 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
783 pages += ret;
784 start += ret << PAGE_SHIFT;
785
786 /*
787 * Repeat on the address that fired VM_FAULT_RETRY
788 * without FAULT_FLAG_ALLOW_RETRY but with
789 * FAULT_FLAG_TRIED.
790 */
791 *locked = 1;
792 lock_dropped = true;
793 down_read(&mm->mmap_sem);
794 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
795 pages, NULL, NULL);
796 if (ret != 1) {
797 BUG_ON(ret > 1);
798 if (!pages_done)
799 pages_done = ret;
800 break;
801 }
802 nr_pages--;
803 pages_done++;
804 if (!nr_pages)
805 break;
806 pages++;
807 start += PAGE_SIZE;
808 }
809 if (notify_drop && lock_dropped && *locked) {
810 /*
811 * We must let the caller know we temporarily dropped the lock
812 * and so the critical section protected by it was lost.
813 */
814 up_read(&mm->mmap_sem);
815 *locked = 0;
816 }
817 return pages_done;
818}
819
820/*
821 * We can leverage the VM_FAULT_RETRY functionality in the page fault
822 * paths better by using either get_user_pages_locked() or
823 * get_user_pages_unlocked().
824 *
825 * get_user_pages_locked() is suitable to replace the form:
826 *
827 * down_read(&mm->mmap_sem);
828 * do_something()
829 * get_user_pages(tsk, mm, ..., pages, NULL);
830 * up_read(&mm->mmap_sem);
831 *
832 * to:
833 *
834 * int locked = 1;
835 * down_read(&mm->mmap_sem);
836 * do_something()
837 * get_user_pages_locked(tsk, mm, ..., pages, &locked);
838 * if (locked)
839 * up_read(&mm->mmap_sem);
840 */
841long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
842 int write, int force, struct page **pages,
843 int *locked)
844{
845 return __get_user_pages_locked(current, current->mm, start, nr_pages,
846 write, force, pages, NULL, locked, true,
847 FOLL_TOUCH);
848}
849EXPORT_SYMBOL(get_user_pages_locked);
850
851/*
852 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
853 * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
854 *
855 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
856 * caller if required (just like with __get_user_pages). "FOLL_GET",
857 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
858 * according to the parameters "pages", "write", "force"
859 * respectively.
860 */
861__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
862 unsigned long start, unsigned long nr_pages,
863 int write, int force, struct page **pages,
864 unsigned int gup_flags)
865{
866 long ret;
867 int locked = 1;
868 down_read(&mm->mmap_sem);
869 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
870 pages, NULL, &locked, false, gup_flags);
871 if (locked)
872 up_read(&mm->mmap_sem);
873 return ret;
874}
875EXPORT_SYMBOL(__get_user_pages_unlocked);
876
877/*
878 * get_user_pages_unlocked() is suitable to replace the form:
879 *
880 * down_read(&mm->mmap_sem);
881 * get_user_pages(tsk, mm, ..., pages, NULL);
882 * up_read(&mm->mmap_sem);
883 *
884 * with:
885 *
886 * get_user_pages_unlocked(tsk, mm, ..., pages);
887 *
888 * It is functionally equivalent to get_user_pages_fast so
889 * get_user_pages_fast should be used instead, if the two parameters
890 * "tsk" and "mm" are respectively equal to current and current->mm,
891 * or if "force" shall be set to 1 (get_user_pages_fast misses the
892 * "force" parameter).
893 */
894long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
895 int write, int force, struct page **pages)
896{
897 return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
898 write, force, pages, FOLL_TOUCH);
899}
900EXPORT_SYMBOL(get_user_pages_unlocked);
901
902/*
903 * get_user_pages_remote() - pin user pages in memory
904 * @tsk: the task_struct to use for page fault accounting, or
905 * NULL if faults are not to be recorded.
906 * @mm: mm_struct of target mm
907 * @start: starting user address
908 * @nr_pages: number of pages from start to pin
909 * @write: whether pages will be written to by the caller
910 * @force: whether to force access even when user mapping is currently
911 * protected (but never forces write access to shared mapping).
912 * @pages: array that receives pointers to the pages pinned.
913 * Should be at least nr_pages long. Or NULL, if caller
914 * only intends to ensure the pages are faulted in.
915 * @vmas: array of pointers to vmas corresponding to each page.
916 * Or NULL if the caller does not require them.
917 *
918 * Returns number of pages pinned. This may be fewer than the number
919 * requested. If nr_pages is 0 or negative, returns 0. If no pages
920 * were pinned, returns -errno. Each page returned must be released
921 * with a put_page() call when it is finished with. vmas will only
922 * remain valid while mmap_sem is held.
923 *
924 * Must be called with mmap_sem held for read or write.
925 *
926 * get_user_pages walks a process's page tables and takes a reference to
927 * each struct page that each user address corresponds to at a given
928 * instant. That is, it takes the page that would be accessed if a user
929 * thread accesses the given user virtual address at that instant.
930 *
931 * This does not guarantee that the page exists in the user mappings when
932 * get_user_pages returns, and there may even be a completely different
933 * page there in some cases (eg. if mmapped pagecache has been invalidated
934 * and subsequently re faulted). However it does guarantee that the page
935 * won't be freed completely. And mostly callers simply care that the page
936 * contains data that was valid *at some point in time*. Typically, an IO
937 * or similar operation cannot guarantee anything stronger anyway because
938 * locks can't be held over the syscall boundary.
939 *
940 * If write=0, the page must not be written to. If the page is written to,
941 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
942 * after the page is finished with, and before put_page is called.
943 *
944 * get_user_pages is typically used for fewer-copy IO operations, to get a
945 * handle on the memory by some means other than accesses via the user virtual
946 * addresses. The pages may be submitted for DMA to devices or accessed via
947 * their kernel linear mapping (via the kmap APIs). Care should be taken to
948 * use the correct cache flushing APIs.
949 *
950 * See also get_user_pages_fast, for performance critical applications.
951 *
952 * get_user_pages should be phased out in favor of
953 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
954 * should use get_user_pages because it cannot pass
955 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
956 */
957long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
958 unsigned long start, unsigned long nr_pages,
959 int write, int force, struct page **pages,
960 struct vm_area_struct **vmas)
961{
962 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
963 pages, vmas, NULL, false,
964 FOLL_TOUCH | FOLL_REMOTE);
965}
966EXPORT_SYMBOL(get_user_pages_remote);
967
968/*
969 * This is the same as get_user_pages_remote(), just with a
970 * less-flexible calling convention where we assume that the task
971 * and mm being operated on are the current task's. We also
972 * obviously don't pass FOLL_REMOTE in here.
973 */
974long get_user_pages(unsigned long start, unsigned long nr_pages,
975 int write, int force, struct page **pages,
976 struct vm_area_struct **vmas)
977{
978 return __get_user_pages_locked(current, current->mm, start, nr_pages,
979 write, force, pages, vmas, NULL, false,
980 FOLL_TOUCH);
981}
982EXPORT_SYMBOL(get_user_pages);
983
984/**
985 * populate_vma_page_range() - populate a range of pages in the vma.
986 * @vma: target vma
987 * @start: start address
988 * @end: end address
989 * @nonblocking:
990 *
991 * This takes care of mlocking the pages too if VM_LOCKED is set.
992 *
993 * return 0 on success, negative error code on error.
994 *
995 * vma->vm_mm->mmap_sem must be held.
996 *
997 * If @nonblocking is NULL, it may be held for read or write and will
998 * be unperturbed.
999 *
1000 * If @nonblocking is non-NULL, it must held for read only and may be
1001 * released. If it's released, *@nonblocking will be set to 0.
1002 */
1003long populate_vma_page_range(struct vm_area_struct *vma,
1004 unsigned long start, unsigned long end, int *nonblocking)
1005{
1006 struct mm_struct *mm = vma->vm_mm;
1007 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1008 int gup_flags;
1009
1010 VM_BUG_ON(start & ~PAGE_MASK);
1011 VM_BUG_ON(end & ~PAGE_MASK);
1012 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1013 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1014 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1015
1016 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1017 if (vma->vm_flags & VM_LOCKONFAULT)
1018 gup_flags &= ~FOLL_POPULATE;
1019 /*
1020 * We want to touch writable mappings with a write fault in order
1021 * to break COW, except for shared mappings because these don't COW
1022 * and we would not want to dirty them for nothing.
1023 */
1024 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1025 gup_flags |= FOLL_WRITE;
1026
1027 /*
1028 * We want mlock to succeed for regions that have any permissions
1029 * other than PROT_NONE.
1030 */
1031 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
1032 gup_flags |= FOLL_FORCE;
1033
1034 /*
1035 * We made sure addr is within a VMA, so the following will
1036 * not result in a stack expansion that recurses back here.
1037 */
1038 return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1039 NULL, NULL, nonblocking);
1040}
1041
1042/*
1043 * __mm_populate - populate and/or mlock pages within a range of address space.
1044 *
1045 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1046 * flags. VMAs must be already marked with the desired vm_flags, and
1047 * mmap_sem must not be held.
1048 */
1049int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1050{
1051 struct mm_struct *mm = current->mm;
1052 unsigned long end, nstart, nend;
1053 struct vm_area_struct *vma = NULL;
1054 int locked = 0;
1055 long ret = 0;
1056
1057 VM_BUG_ON(start & ~PAGE_MASK);
1058 VM_BUG_ON(len != PAGE_ALIGN(len));
1059 end = start + len;
1060
1061 for (nstart = start; nstart < end; nstart = nend) {
1062 /*
1063 * We want to fault in pages for [nstart; end) address range.
1064 * Find first corresponding VMA.
1065 */
1066 if (!locked) {
1067 locked = 1;
1068 down_read(&mm->mmap_sem);
1069 vma = find_vma(mm, nstart);
1070 } else if (nstart >= vma->vm_end)
1071 vma = vma->vm_next;
1072 if (!vma || vma->vm_start >= end)
1073 break;
1074 /*
1075 * Set [nstart; nend) to intersection of desired address
1076 * range with the first VMA. Also, skip undesirable VMA types.
1077 */
1078 nend = min(end, vma->vm_end);
1079 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1080 continue;
1081 if (nstart < vma->vm_start)
1082 nstart = vma->vm_start;
1083 /*
1084 * Now fault in a range of pages. populate_vma_page_range()
1085 * double checks the vma flags, so that it won't mlock pages
1086 * if the vma was already munlocked.
1087 */
1088 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1089 if (ret < 0) {
1090 if (ignore_errors) {
1091 ret = 0;
1092 continue; /* continue at next VMA */
1093 }
1094 break;
1095 }
1096 nend = nstart + ret * PAGE_SIZE;
1097 ret = 0;
1098 }
1099 if (locked)
1100 up_read(&mm->mmap_sem);
1101 return ret; /* 0 or negative error code */
1102}
1103
1104/**
1105 * get_dump_page() - pin user page in memory while writing it to core dump
1106 * @addr: user address
1107 *
1108 * Returns struct page pointer of user page pinned for dump,
1109 * to be freed afterwards by put_page().
1110 *
1111 * Returns NULL on any kind of failure - a hole must then be inserted into
1112 * the corefile, to preserve alignment with its headers; and also returns
1113 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1114 * allowing a hole to be left in the corefile to save diskspace.
1115 *
1116 * Called without mmap_sem, but after all other threads have been killed.
1117 */
1118#ifdef CONFIG_ELF_CORE
1119struct page *get_dump_page(unsigned long addr)
1120{
1121 struct vm_area_struct *vma;
1122 struct page *page;
1123
1124 if (__get_user_pages(current, current->mm, addr, 1,
1125 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1126 NULL) < 1)
1127 return NULL;
1128 flush_cache_page(vma, addr, page_to_pfn(page));
1129 return page;
1130}
1131#endif /* CONFIG_ELF_CORE */
1132
1133/*
1134 * Generic RCU Fast GUP
1135 *
1136 * get_user_pages_fast attempts to pin user pages by walking the page
1137 * tables directly and avoids taking locks. Thus the walker needs to be
1138 * protected from page table pages being freed from under it, and should
1139 * block any THP splits.
1140 *
1141 * One way to achieve this is to have the walker disable interrupts, and
1142 * rely on IPIs from the TLB flushing code blocking before the page table
1143 * pages are freed. This is unsuitable for architectures that do not need
1144 * to broadcast an IPI when invalidating TLBs.
1145 *
1146 * Another way to achieve this is to batch up page table containing pages
1147 * belonging to more than one mm_user, then rcu_sched a callback to free those
1148 * pages. Disabling interrupts will allow the fast_gup walker to both block
1149 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1150 * (which is a relatively rare event). The code below adopts this strategy.
1151 *
1152 * Before activating this code, please be aware that the following assumptions
1153 * are currently made:
1154 *
1155 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
1156 * pages containing page tables.
1157 *
1158 * *) ptes can be read atomically by the architecture.
1159 *
1160 * *) access_ok is sufficient to validate userspace address ranges.
1161 *
1162 * The last two assumptions can be relaxed by the addition of helper functions.
1163 *
1164 * This code is based heavily on the PowerPC implementation by Nick Piggin.
1165 */
1166#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
1167
1168#ifdef __HAVE_ARCH_PTE_SPECIAL
1169static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1170 int write, struct page **pages, int *nr)
1171{
1172 pte_t *ptep, *ptem;
1173 int ret = 0;
1174
1175 ptem = ptep = pte_offset_map(&pmd, addr);
1176 do {
1177 /*
1178 * In the line below we are assuming that the pte can be read
1179 * atomically. If this is not the case for your architecture,
1180 * please wrap this in a helper function!
1181 *
1182 * for an example see gup_get_pte in arch/x86/mm/gup.c
1183 */
1184 pte_t pte = READ_ONCE(*ptep);
1185 struct page *head, *page;
1186
1187 /*
1188 * Similar to the PMD case below, NUMA hinting must take slow
1189 * path using the pte_protnone check.
1190 */
1191 if (!pte_present(pte) || pte_special(pte) ||
1192 pte_protnone(pte) || (write && !pte_write(pte)))
1193 goto pte_unmap;
1194
1195 if (!arch_pte_access_permitted(pte, write))
1196 goto pte_unmap;
1197
1198 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1199 page = pte_page(pte);
1200 head = compound_head(page);
1201
1202 if (!page_cache_get_speculative(head))
1203 goto pte_unmap;
1204
1205 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1206 put_page(head);
1207 goto pte_unmap;
1208 }
1209
1210 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1211 pages[*nr] = page;
1212 (*nr)++;
1213
1214 } while (ptep++, addr += PAGE_SIZE, addr != end);
1215
1216 ret = 1;
1217
1218pte_unmap:
1219 pte_unmap(ptem);
1220 return ret;
1221}
1222#else
1223
1224/*
1225 * If we can't determine whether or not a pte is special, then fail immediately
1226 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1227 * to be special.
1228 *
1229 * For a futex to be placed on a THP tail page, get_futex_key requires a
1230 * __get_user_pages_fast implementation that can pin pages. Thus it's still
1231 * useful to have gup_huge_pmd even if we can't operate on ptes.
1232 */
1233static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1234 int write, struct page **pages, int *nr)
1235{
1236 return 0;
1237}
1238#endif /* __HAVE_ARCH_PTE_SPECIAL */
1239
1240static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1241 unsigned long end, int write, struct page **pages, int *nr)
1242{
1243 struct page *head, *page;
1244 int refs;
1245
1246 if (write && !pmd_write(orig))
1247 return 0;
1248
1249 refs = 0;
1250 head = pmd_page(orig);
1251 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1252 do {
1253 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1254 pages[*nr] = page;
1255 (*nr)++;
1256 page++;
1257 refs++;
1258 } while (addr += PAGE_SIZE, addr != end);
1259
1260 if (!page_cache_add_speculative(head, refs)) {
1261 *nr -= refs;
1262 return 0;
1263 }
1264
1265 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1266 *nr -= refs;
1267 while (refs--)
1268 put_page(head);
1269 return 0;
1270 }
1271
1272 return 1;
1273}
1274
1275static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1276 unsigned long end, int write, struct page **pages, int *nr)
1277{
1278 struct page *head, *page;
1279 int refs;
1280
1281 if (write && !pud_write(orig))
1282 return 0;
1283
1284 refs = 0;
1285 head = pud_page(orig);
1286 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1287 do {
1288 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1289 pages[*nr] = page;
1290 (*nr)++;
1291 page++;
1292 refs++;
1293 } while (addr += PAGE_SIZE, addr != end);
1294
1295 if (!page_cache_add_speculative(head, refs)) {
1296 *nr -= refs;
1297 return 0;
1298 }
1299
1300 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1301 *nr -= refs;
1302 while (refs--)
1303 put_page(head);
1304 return 0;
1305 }
1306
1307 return 1;
1308}
1309
1310static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1311 unsigned long end, int write,
1312 struct page **pages, int *nr)
1313{
1314 int refs;
1315 struct page *head, *page;
1316
1317 if (write && !pgd_write(orig))
1318 return 0;
1319
1320 refs = 0;
1321 head = pgd_page(orig);
1322 page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1323 do {
1324 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1325 pages[*nr] = page;
1326 (*nr)++;
1327 page++;
1328 refs++;
1329 } while (addr += PAGE_SIZE, addr != end);
1330
1331 if (!page_cache_add_speculative(head, refs)) {
1332 *nr -= refs;
1333 return 0;
1334 }
1335
1336 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1337 *nr -= refs;
1338 while (refs--)
1339 put_page(head);
1340 return 0;
1341 }
1342
1343 return 1;
1344}
1345
1346static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1347 int write, struct page **pages, int *nr)
1348{
1349 unsigned long next;
1350 pmd_t *pmdp;
1351
1352 pmdp = pmd_offset(&pud, addr);
1353 do {
1354 pmd_t pmd = READ_ONCE(*pmdp);
1355
1356 next = pmd_addr_end(addr, end);
1357 if (pmd_none(pmd))
1358 return 0;
1359
1360 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1361 /*
1362 * NUMA hinting faults need to be handled in the GUP
1363 * slowpath for accounting purposes and so that they
1364 * can be serialised against THP migration.
1365 */
1366 if (pmd_protnone(pmd))
1367 return 0;
1368
1369 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1370 pages, nr))
1371 return 0;
1372
1373 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1374 /*
1375 * architecture have different format for hugetlbfs
1376 * pmd format and THP pmd format
1377 */
1378 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1379 PMD_SHIFT, next, write, pages, nr))
1380 return 0;
1381 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1382 return 0;
1383 } while (pmdp++, addr = next, addr != end);
1384
1385 return 1;
1386}
1387
1388static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1389 int write, struct page **pages, int *nr)
1390{
1391 unsigned long next;
1392 pud_t *pudp;
1393
1394 pudp = pud_offset(&pgd, addr);
1395 do {
1396 pud_t pud = READ_ONCE(*pudp);
1397
1398 next = pud_addr_end(addr, end);
1399 if (pud_none(pud))
1400 return 0;
1401 if (unlikely(pud_huge(pud))) {
1402 if (!gup_huge_pud(pud, pudp, addr, next, write,
1403 pages, nr))
1404 return 0;
1405 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1406 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1407 PUD_SHIFT, next, write, pages, nr))
1408 return 0;
1409 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1410 return 0;
1411 } while (pudp++, addr = next, addr != end);
1412
1413 return 1;
1414}
1415
1416/*
1417 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1418 * the regular GUP. It will only return non-negative values.
1419 */
1420int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1421 struct page **pages)
1422{
1423 struct mm_struct *mm = current->mm;
1424 unsigned long addr, len, end;
1425 unsigned long next, flags;
1426 pgd_t *pgdp;
1427 int nr = 0;
1428
1429 start &= PAGE_MASK;
1430 addr = start;
1431 len = (unsigned long) nr_pages << PAGE_SHIFT;
1432 end = start + len;
1433
1434 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1435 start, len)))
1436 return 0;
1437
1438 /*
1439 * Disable interrupts. We use the nested form as we can already have
1440 * interrupts disabled by get_futex_key.
1441 *
1442 * With interrupts disabled, we block page table pages from being
1443 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1444 * for more details.
1445 *
1446 * We do not adopt an rcu_read_lock(.) here as we also want to
1447 * block IPIs that come from THPs splitting.
1448 */
1449
1450 local_irq_save(flags);
1451 pgdp = pgd_offset(mm, addr);
1452 do {
1453 pgd_t pgd = READ_ONCE(*pgdp);
1454
1455 next = pgd_addr_end(addr, end);
1456 if (pgd_none(pgd))
1457 break;
1458 if (unlikely(pgd_huge(pgd))) {
1459 if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1460 pages, &nr))
1461 break;
1462 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1463 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1464 PGDIR_SHIFT, next, write, pages, &nr))
1465 break;
1466 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
1467 break;
1468 } while (pgdp++, addr = next, addr != end);
1469 local_irq_restore(flags);
1470
1471 return nr;
1472}
1473
1474/**
1475 * get_user_pages_fast() - pin user pages in memory
1476 * @start: starting user address
1477 * @nr_pages: number of pages from start to pin
1478 * @write: whether pages will be written to
1479 * @pages: array that receives pointers to the pages pinned.
1480 * Should be at least nr_pages long.
1481 *
1482 * Attempt to pin user pages in memory without taking mm->mmap_sem.
1483 * If not successful, it will fall back to taking the lock and
1484 * calling get_user_pages().
1485 *
1486 * Returns number of pages pinned. This may be fewer than the number
1487 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1488 * were pinned, returns -errno.
1489 */
1490int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1491 struct page **pages)
1492{
1493 int nr, ret;
1494
1495 start &= PAGE_MASK;
1496 nr = __get_user_pages_fast(start, nr_pages, write, pages);
1497 ret = nr;
1498
1499 if (nr < nr_pages) {
1500 /* Try to get the remaining pages with get_user_pages */
1501 start += nr << PAGE_SHIFT;
1502 pages += nr;
1503
1504 ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
1505
1506 /* Have to be a bit careful with return values */
1507 if (nr > 0) {
1508 if (ret < 0)
1509 ret = nr;
1510 else
1511 ret += nr;
1512 }
1513 }
1514
1515 return ret;
1516}
1517
1518#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */