Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13/*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24/*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32/*
33 * 05.04.94 - Multi-page memory management added for v1.1.
34 * Idea by Alex Bligh (alex@cconcepts.co.uk)
35 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * (Gerhard.Wichert@pdb.siemens.de)
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42#include <linux/kernel_stat.h>
43#include <linux/mm.h>
44#include <linux/sched/mm.h>
45#include <linux/sched/coredump.h>
46#include <linux/sched/numa_balancing.h>
47#include <linux/sched/task.h>
48#include <linux/hugetlb.h>
49#include <linux/mman.h>
50#include <linux/swap.h>
51#include <linux/highmem.h>
52#include <linux/pagemap.h>
53#include <linux/memremap.h>
54#include <linux/ksm.h>
55#include <linux/rmap.h>
56#include <linux/export.h>
57#include <linux/delayacct.h>
58#include <linux/init.h>
59#include <linux/pfn_t.h>
60#include <linux/writeback.h>
61#include <linux/memcontrol.h>
62#include <linux/mmu_notifier.h>
63#include <linux/swapops.h>
64#include <linux/elf.h>
65#include <linux/gfp.h>
66#include <linux/migrate.h>
67#include <linux/string.h>
68#include <linux/debugfs.h>
69#include <linux/userfaultfd_k.h>
70#include <linux/dax.h>
71#include <linux/oom.h>
72#include <linux/numa.h>
73#include <linux/perf_event.h>
74#include <linux/ptrace.h>
75#include <linux/vmalloc.h>
76
77#include <trace/events/kmem.h>
78
79#include <asm/io.h>
80#include <asm/mmu_context.h>
81#include <asm/pgalloc.h>
82#include <linux/uaccess.h>
83#include <asm/tlb.h>
84#include <asm/tlbflush.h>
85
86#include "pgalloc-track.h"
87#include "internal.h"
88
89#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
90#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
91#endif
92
93#ifndef CONFIG_NUMA
94unsigned long max_mapnr;
95EXPORT_SYMBOL(max_mapnr);
96
97struct page *mem_map;
98EXPORT_SYMBOL(mem_map);
99#endif
100
101/*
102 * A number of key systems in x86 including ioremap() rely on the assumption
103 * that high_memory defines the upper bound on direct map memory, then end
104 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
105 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
106 * and ZONE_HIGHMEM.
107 */
108void *high_memory;
109EXPORT_SYMBOL(high_memory);
110
111/*
112 * Randomize the address space (stacks, mmaps, brk, etc.).
113 *
114 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
115 * as ancient (libc5 based) binaries can segfault. )
116 */
117int randomize_va_space __read_mostly =
118#ifdef CONFIG_COMPAT_BRK
119 1;
120#else
121 2;
122#endif
123
124#ifndef arch_faults_on_old_pte
125static inline bool arch_faults_on_old_pte(void)
126{
127 /*
128 * Those arches which don't have hw access flag feature need to
129 * implement their own helper. By default, "true" means pagefault
130 * will be hit on old pte.
131 */
132 return true;
133}
134#endif
135
136#ifndef arch_wants_old_prefaulted_pte
137static inline bool arch_wants_old_prefaulted_pte(void)
138{
139 /*
140 * Transitioning a PTE from 'old' to 'young' can be expensive on
141 * some architectures, even if it's performed in hardware. By
142 * default, "false" means prefaulted entries will be 'young'.
143 */
144 return false;
145}
146#endif
147
148static int __init disable_randmaps(char *s)
149{
150 randomize_va_space = 0;
151 return 1;
152}
153__setup("norandmaps", disable_randmaps);
154
155unsigned long zero_pfn __read_mostly;
156EXPORT_SYMBOL(zero_pfn);
157
158unsigned long highest_memmap_pfn __read_mostly;
159
160/*
161 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
162 */
163static int __init init_zero_pfn(void)
164{
165 zero_pfn = page_to_pfn(ZERO_PAGE(0));
166 return 0;
167}
168early_initcall(init_zero_pfn);
169
170void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
171{
172 trace_rss_stat(mm, member, count);
173}
174
175#if defined(SPLIT_RSS_COUNTING)
176
177void sync_mm_rss(struct mm_struct *mm)
178{
179 int i;
180
181 for (i = 0; i < NR_MM_COUNTERS; i++) {
182 if (current->rss_stat.count[i]) {
183 add_mm_counter(mm, i, current->rss_stat.count[i]);
184 current->rss_stat.count[i] = 0;
185 }
186 }
187 current->rss_stat.events = 0;
188}
189
190static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
191{
192 struct task_struct *task = current;
193
194 if (likely(task->mm == mm))
195 task->rss_stat.count[member] += val;
196 else
197 add_mm_counter(mm, member, val);
198}
199#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
200#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
201
202/* sync counter once per 64 page faults */
203#define TASK_RSS_EVENTS_THRESH (64)
204static void check_sync_rss_stat(struct task_struct *task)
205{
206 if (unlikely(task != current))
207 return;
208 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
209 sync_mm_rss(task->mm);
210}
211#else /* SPLIT_RSS_COUNTING */
212
213#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
214#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
215
216static void check_sync_rss_stat(struct task_struct *task)
217{
218}
219
220#endif /* SPLIT_RSS_COUNTING */
221
222/*
223 * Note: this doesn't free the actual pages themselves. That
224 * has been handled earlier when unmapping all the memory regions.
225 */
226static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
227 unsigned long addr)
228{
229 pgtable_t token = pmd_pgtable(*pmd);
230 pmd_clear(pmd);
231 pte_free_tlb(tlb, token, addr);
232 mm_dec_nr_ptes(tlb->mm);
233}
234
235static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
236 unsigned long addr, unsigned long end,
237 unsigned long floor, unsigned long ceiling)
238{
239 pmd_t *pmd;
240 unsigned long next;
241 unsigned long start;
242
243 start = addr;
244 pmd = pmd_offset(pud, addr);
245 do {
246 next = pmd_addr_end(addr, end);
247 if (pmd_none_or_clear_bad(pmd))
248 continue;
249 free_pte_range(tlb, pmd, addr);
250 } while (pmd++, addr = next, addr != end);
251
252 start &= PUD_MASK;
253 if (start < floor)
254 return;
255 if (ceiling) {
256 ceiling &= PUD_MASK;
257 if (!ceiling)
258 return;
259 }
260 if (end - 1 > ceiling - 1)
261 return;
262
263 pmd = pmd_offset(pud, start);
264 pud_clear(pud);
265 pmd_free_tlb(tlb, pmd, start);
266 mm_dec_nr_pmds(tlb->mm);
267}
268
269static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
270 unsigned long addr, unsigned long end,
271 unsigned long floor, unsigned long ceiling)
272{
273 pud_t *pud;
274 unsigned long next;
275 unsigned long start;
276
277 start = addr;
278 pud = pud_offset(p4d, addr);
279 do {
280 next = pud_addr_end(addr, end);
281 if (pud_none_or_clear_bad(pud))
282 continue;
283 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
284 } while (pud++, addr = next, addr != end);
285
286 start &= P4D_MASK;
287 if (start < floor)
288 return;
289 if (ceiling) {
290 ceiling &= P4D_MASK;
291 if (!ceiling)
292 return;
293 }
294 if (end - 1 > ceiling - 1)
295 return;
296
297 pud = pud_offset(p4d, start);
298 p4d_clear(p4d);
299 pud_free_tlb(tlb, pud, start);
300 mm_dec_nr_puds(tlb->mm);
301}
302
303static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
304 unsigned long addr, unsigned long end,
305 unsigned long floor, unsigned long ceiling)
306{
307 p4d_t *p4d;
308 unsigned long next;
309 unsigned long start;
310
311 start = addr;
312 p4d = p4d_offset(pgd, addr);
313 do {
314 next = p4d_addr_end(addr, end);
315 if (p4d_none_or_clear_bad(p4d))
316 continue;
317 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
318 } while (p4d++, addr = next, addr != end);
319
320 start &= PGDIR_MASK;
321 if (start < floor)
322 return;
323 if (ceiling) {
324 ceiling &= PGDIR_MASK;
325 if (!ceiling)
326 return;
327 }
328 if (end - 1 > ceiling - 1)
329 return;
330
331 p4d = p4d_offset(pgd, start);
332 pgd_clear(pgd);
333 p4d_free_tlb(tlb, p4d, start);
334}
335
336/*
337 * This function frees user-level page tables of a process.
338 */
339void free_pgd_range(struct mmu_gather *tlb,
340 unsigned long addr, unsigned long end,
341 unsigned long floor, unsigned long ceiling)
342{
343 pgd_t *pgd;
344 unsigned long next;
345
346 /*
347 * The next few lines have given us lots of grief...
348 *
349 * Why are we testing PMD* at this top level? Because often
350 * there will be no work to do at all, and we'd prefer not to
351 * go all the way down to the bottom just to discover that.
352 *
353 * Why all these "- 1"s? Because 0 represents both the bottom
354 * of the address space and the top of it (using -1 for the
355 * top wouldn't help much: the masks would do the wrong thing).
356 * The rule is that addr 0 and floor 0 refer to the bottom of
357 * the address space, but end 0 and ceiling 0 refer to the top
358 * Comparisons need to use "end - 1" and "ceiling - 1" (though
359 * that end 0 case should be mythical).
360 *
361 * Wherever addr is brought up or ceiling brought down, we must
362 * be careful to reject "the opposite 0" before it confuses the
363 * subsequent tests. But what about where end is brought down
364 * by PMD_SIZE below? no, end can't go down to 0 there.
365 *
366 * Whereas we round start (addr) and ceiling down, by different
367 * masks at different levels, in order to test whether a table
368 * now has no other vmas using it, so can be freed, we don't
369 * bother to round floor or end up - the tests don't need that.
370 */
371
372 addr &= PMD_MASK;
373 if (addr < floor) {
374 addr += PMD_SIZE;
375 if (!addr)
376 return;
377 }
378 if (ceiling) {
379 ceiling &= PMD_MASK;
380 if (!ceiling)
381 return;
382 }
383 if (end - 1 > ceiling - 1)
384 end -= PMD_SIZE;
385 if (addr > end - 1)
386 return;
387 /*
388 * We add page table cache pages with PAGE_SIZE,
389 * (see pte_free_tlb()), flush the tlb if we need
390 */
391 tlb_change_page_size(tlb, PAGE_SIZE);
392 pgd = pgd_offset(tlb->mm, addr);
393 do {
394 next = pgd_addr_end(addr, end);
395 if (pgd_none_or_clear_bad(pgd))
396 continue;
397 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
398 } while (pgd++, addr = next, addr != end);
399}
400
401void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
402 unsigned long floor, unsigned long ceiling)
403{
404 while (vma) {
405 struct vm_area_struct *next = vma->vm_next;
406 unsigned long addr = vma->vm_start;
407
408 /*
409 * Hide vma from rmap and truncate_pagecache before freeing
410 * pgtables
411 */
412 unlink_anon_vmas(vma);
413 unlink_file_vma(vma);
414
415 if (is_vm_hugetlb_page(vma)) {
416 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
417 floor, next ? next->vm_start : ceiling);
418 } else {
419 /*
420 * Optimization: gather nearby vmas into one call down
421 */
422 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
423 && !is_vm_hugetlb_page(next)) {
424 vma = next;
425 next = vma->vm_next;
426 unlink_anon_vmas(vma);
427 unlink_file_vma(vma);
428 }
429 free_pgd_range(tlb, addr, vma->vm_end,
430 floor, next ? next->vm_start : ceiling);
431 }
432 vma = next;
433 }
434}
435
436int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
437{
438 spinlock_t *ptl;
439 pgtable_t new = pte_alloc_one(mm);
440 if (!new)
441 return -ENOMEM;
442
443 /*
444 * Ensure all pte setup (eg. pte page lock and page clearing) are
445 * visible before the pte is made visible to other CPUs by being
446 * put into page tables.
447 *
448 * The other side of the story is the pointer chasing in the page
449 * table walking code (when walking the page table without locking;
450 * ie. most of the time). Fortunately, these data accesses consist
451 * of a chain of data-dependent loads, meaning most CPUs (alpha
452 * being the notable exception) will already guarantee loads are
453 * seen in-order. See the alpha page table accessors for the
454 * smp_rmb() barriers in page table walking code.
455 */
456 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
457
458 ptl = pmd_lock(mm, pmd);
459 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
460 mm_inc_nr_ptes(mm);
461 pmd_populate(mm, pmd, new);
462 new = NULL;
463 }
464 spin_unlock(ptl);
465 if (new)
466 pte_free(mm, new);
467 return 0;
468}
469
470int __pte_alloc_kernel(pmd_t *pmd)
471{
472 pte_t *new = pte_alloc_one_kernel(&init_mm);
473 if (!new)
474 return -ENOMEM;
475
476 smp_wmb(); /* See comment in __pte_alloc */
477
478 spin_lock(&init_mm.page_table_lock);
479 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
480 pmd_populate_kernel(&init_mm, pmd, new);
481 new = NULL;
482 }
483 spin_unlock(&init_mm.page_table_lock);
484 if (new)
485 pte_free_kernel(&init_mm, new);
486 return 0;
487}
488
489static inline void init_rss_vec(int *rss)
490{
491 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
492}
493
494static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
495{
496 int i;
497
498 if (current->mm == mm)
499 sync_mm_rss(mm);
500 for (i = 0; i < NR_MM_COUNTERS; i++)
501 if (rss[i])
502 add_mm_counter(mm, i, rss[i]);
503}
504
505/*
506 * This function is called to print an error when a bad pte
507 * is found. For example, we might have a PFN-mapped pte in
508 * a region that doesn't allow it.
509 *
510 * The calling function must still handle the error.
511 */
512static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
513 pte_t pte, struct page *page)
514{
515 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
516 p4d_t *p4d = p4d_offset(pgd, addr);
517 pud_t *pud = pud_offset(p4d, addr);
518 pmd_t *pmd = pmd_offset(pud, addr);
519 struct address_space *mapping;
520 pgoff_t index;
521 static unsigned long resume;
522 static unsigned long nr_shown;
523 static unsigned long nr_unshown;
524
525 /*
526 * Allow a burst of 60 reports, then keep quiet for that minute;
527 * or allow a steady drip of one report per second.
528 */
529 if (nr_shown == 60) {
530 if (time_before(jiffies, resume)) {
531 nr_unshown++;
532 return;
533 }
534 if (nr_unshown) {
535 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
536 nr_unshown);
537 nr_unshown = 0;
538 }
539 nr_shown = 0;
540 }
541 if (nr_shown++ == 0)
542 resume = jiffies + 60 * HZ;
543
544 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
545 index = linear_page_index(vma, addr);
546
547 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
548 current->comm,
549 (long long)pte_val(pte), (long long)pmd_val(*pmd));
550 if (page)
551 dump_page(page, "bad pte");
552 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
553 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
554 pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
555 vma->vm_file,
556 vma->vm_ops ? vma->vm_ops->fault : NULL,
557 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
558 mapping ? mapping->a_ops->readpage : NULL);
559 dump_stack();
560 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
561}
562
563/*
564 * vm_normal_page -- This function gets the "struct page" associated with a pte.
565 *
566 * "Special" mappings do not wish to be associated with a "struct page" (either
567 * it doesn't exist, or it exists but they don't want to touch it). In this
568 * case, NULL is returned here. "Normal" mappings do have a struct page.
569 *
570 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
571 * pte bit, in which case this function is trivial. Secondly, an architecture
572 * may not have a spare pte bit, which requires a more complicated scheme,
573 * described below.
574 *
575 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
576 * special mapping (even if there are underlying and valid "struct pages").
577 * COWed pages of a VM_PFNMAP are always normal.
578 *
579 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
580 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
581 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
582 * mapping will always honor the rule
583 *
584 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
585 *
586 * And for normal mappings this is false.
587 *
588 * This restricts such mappings to be a linear translation from virtual address
589 * to pfn. To get around this restriction, we allow arbitrary mappings so long
590 * as the vma is not a COW mapping; in that case, we know that all ptes are
591 * special (because none can have been COWed).
592 *
593 *
594 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
595 *
596 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
597 * page" backing, however the difference is that _all_ pages with a struct
598 * page (that is, those where pfn_valid is true) are refcounted and considered
599 * normal pages by the VM. The disadvantage is that pages are refcounted
600 * (which can be slower and simply not an option for some PFNMAP users). The
601 * advantage is that we don't have to follow the strict linearity rule of
602 * PFNMAP mappings in order to support COWable mappings.
603 *
604 */
605struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
606 pte_t pte)
607{
608 unsigned long pfn = pte_pfn(pte);
609
610 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
611 if (likely(!pte_special(pte)))
612 goto check_pfn;
613 if (vma->vm_ops && vma->vm_ops->find_special_page)
614 return vma->vm_ops->find_special_page(vma, addr);
615 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
616 return NULL;
617 if (is_zero_pfn(pfn))
618 return NULL;
619 if (pte_devmap(pte))
620 return NULL;
621
622 print_bad_pte(vma, addr, pte, NULL);
623 return NULL;
624 }
625
626 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
627
628 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
629 if (vma->vm_flags & VM_MIXEDMAP) {
630 if (!pfn_valid(pfn))
631 return NULL;
632 goto out;
633 } else {
634 unsigned long off;
635 off = (addr - vma->vm_start) >> PAGE_SHIFT;
636 if (pfn == vma->vm_pgoff + off)
637 return NULL;
638 if (!is_cow_mapping(vma->vm_flags))
639 return NULL;
640 }
641 }
642
643 if (is_zero_pfn(pfn))
644 return NULL;
645
646check_pfn:
647 if (unlikely(pfn > highest_memmap_pfn)) {
648 print_bad_pte(vma, addr, pte, NULL);
649 return NULL;
650 }
651
652 /*
653 * NOTE! We still have PageReserved() pages in the page tables.
654 * eg. VDSO mappings can cause them to exist.
655 */
656out:
657 return pfn_to_page(pfn);
658}
659
660#ifdef CONFIG_TRANSPARENT_HUGEPAGE
661struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
662 pmd_t pmd)
663{
664 unsigned long pfn = pmd_pfn(pmd);
665
666 /*
667 * There is no pmd_special() but there may be special pmds, e.g.
668 * in a direct-access (dax) mapping, so let's just replicate the
669 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
670 */
671 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
672 if (vma->vm_flags & VM_MIXEDMAP) {
673 if (!pfn_valid(pfn))
674 return NULL;
675 goto out;
676 } else {
677 unsigned long off;
678 off = (addr - vma->vm_start) >> PAGE_SHIFT;
679 if (pfn == vma->vm_pgoff + off)
680 return NULL;
681 if (!is_cow_mapping(vma->vm_flags))
682 return NULL;
683 }
684 }
685
686 if (pmd_devmap(pmd))
687 return NULL;
688 if (is_huge_zero_pmd(pmd))
689 return NULL;
690 if (unlikely(pfn > highest_memmap_pfn))
691 return NULL;
692
693 /*
694 * NOTE! We still have PageReserved() pages in the page tables.
695 * eg. VDSO mappings can cause them to exist.
696 */
697out:
698 return pfn_to_page(pfn);
699}
700#endif
701
702static void restore_exclusive_pte(struct vm_area_struct *vma,
703 struct page *page, unsigned long address,
704 pte_t *ptep)
705{
706 pte_t pte;
707 swp_entry_t entry;
708
709 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
710 if (pte_swp_soft_dirty(*ptep))
711 pte = pte_mksoft_dirty(pte);
712
713 entry = pte_to_swp_entry(*ptep);
714 if (pte_swp_uffd_wp(*ptep))
715 pte = pte_mkuffd_wp(pte);
716 else if (is_writable_device_exclusive_entry(entry))
717 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
718
719 set_pte_at(vma->vm_mm, address, ptep, pte);
720
721 /*
722 * No need to take a page reference as one was already
723 * created when the swap entry was made.
724 */
725 if (PageAnon(page))
726 page_add_anon_rmap(page, vma, address, false);
727 else
728 /*
729 * Currently device exclusive access only supports anonymous
730 * memory so the entry shouldn't point to a filebacked page.
731 */
732 WARN_ON_ONCE(!PageAnon(page));
733
734 if (vma->vm_flags & VM_LOCKED)
735 mlock_vma_page(page);
736
737 /*
738 * No need to invalidate - it was non-present before. However
739 * secondary CPUs may have mappings that need invalidating.
740 */
741 update_mmu_cache(vma, address, ptep);
742}
743
744/*
745 * Tries to restore an exclusive pte if the page lock can be acquired without
746 * sleeping.
747 */
748static int
749try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
750 unsigned long addr)
751{
752 swp_entry_t entry = pte_to_swp_entry(*src_pte);
753 struct page *page = pfn_swap_entry_to_page(entry);
754
755 if (trylock_page(page)) {
756 restore_exclusive_pte(vma, page, addr, src_pte);
757 unlock_page(page);
758 return 0;
759 }
760
761 return -EBUSY;
762}
763
764/*
765 * copy one vm_area from one task to the other. Assumes the page tables
766 * already present in the new task to be cleared in the whole range
767 * covered by this vma.
768 */
769
770static unsigned long
771copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
772 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
773 struct vm_area_struct *src_vma, unsigned long addr, int *rss)
774{
775 unsigned long vm_flags = dst_vma->vm_flags;
776 pte_t pte = *src_pte;
777 struct page *page;
778 swp_entry_t entry = pte_to_swp_entry(pte);
779
780 if (likely(!non_swap_entry(entry))) {
781 if (swap_duplicate(entry) < 0)
782 return -EIO;
783
784 /* make sure dst_mm is on swapoff's mmlist. */
785 if (unlikely(list_empty(&dst_mm->mmlist))) {
786 spin_lock(&mmlist_lock);
787 if (list_empty(&dst_mm->mmlist))
788 list_add(&dst_mm->mmlist,
789 &src_mm->mmlist);
790 spin_unlock(&mmlist_lock);
791 }
792 rss[MM_SWAPENTS]++;
793 } else if (is_migration_entry(entry)) {
794 page = pfn_swap_entry_to_page(entry);
795
796 rss[mm_counter(page)]++;
797
798 if (is_writable_migration_entry(entry) &&
799 is_cow_mapping(vm_flags)) {
800 /*
801 * COW mappings require pages in both
802 * parent and child to be set to read.
803 */
804 entry = make_readable_migration_entry(
805 swp_offset(entry));
806 pte = swp_entry_to_pte(entry);
807 if (pte_swp_soft_dirty(*src_pte))
808 pte = pte_swp_mksoft_dirty(pte);
809 if (pte_swp_uffd_wp(*src_pte))
810 pte = pte_swp_mkuffd_wp(pte);
811 set_pte_at(src_mm, addr, src_pte, pte);
812 }
813 } else if (is_device_private_entry(entry)) {
814 page = pfn_swap_entry_to_page(entry);
815
816 /*
817 * Update rss count even for unaddressable pages, as
818 * they should treated just like normal pages in this
819 * respect.
820 *
821 * We will likely want to have some new rss counters
822 * for unaddressable pages, at some point. But for now
823 * keep things as they are.
824 */
825 get_page(page);
826 rss[mm_counter(page)]++;
827 page_dup_rmap(page, false);
828
829 /*
830 * We do not preserve soft-dirty information, because so
831 * far, checkpoint/restore is the only feature that
832 * requires that. And checkpoint/restore does not work
833 * when a device driver is involved (you cannot easily
834 * save and restore device driver state).
835 */
836 if (is_writable_device_private_entry(entry) &&
837 is_cow_mapping(vm_flags)) {
838 entry = make_readable_device_private_entry(
839 swp_offset(entry));
840 pte = swp_entry_to_pte(entry);
841 if (pte_swp_uffd_wp(*src_pte))
842 pte = pte_swp_mkuffd_wp(pte);
843 set_pte_at(src_mm, addr, src_pte, pte);
844 }
845 } else if (is_device_exclusive_entry(entry)) {
846 /*
847 * Make device exclusive entries present by restoring the
848 * original entry then copying as for a present pte. Device
849 * exclusive entries currently only support private writable
850 * (ie. COW) mappings.
851 */
852 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
853 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
854 return -EBUSY;
855 return -ENOENT;
856 }
857 if (!userfaultfd_wp(dst_vma))
858 pte = pte_swp_clear_uffd_wp(pte);
859 set_pte_at(dst_mm, addr, dst_pte, pte);
860 return 0;
861}
862
863/*
864 * Copy a present and normal page if necessary.
865 *
866 * NOTE! The usual case is that this doesn't need to do
867 * anything, and can just return a positive value. That
868 * will let the caller know that it can just increase
869 * the page refcount and re-use the pte the traditional
870 * way.
871 *
872 * But _if_ we need to copy it because it needs to be
873 * pinned in the parent (and the child should get its own
874 * copy rather than just a reference to the same page),
875 * we'll do that here and return zero to let the caller
876 * know we're done.
877 *
878 * And if we need a pre-allocated page but don't yet have
879 * one, return a negative error to let the preallocation
880 * code know so that it can do so outside the page table
881 * lock.
882 */
883static inline int
884copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
885 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
886 struct page **prealloc, pte_t pte, struct page *page)
887{
888 struct page *new_page;
889
890 /*
891 * What we want to do is to check whether this page may
892 * have been pinned by the parent process. If so,
893 * instead of wrprotect the pte on both sides, we copy
894 * the page immediately so that we'll always guarantee
895 * the pinned page won't be randomly replaced in the
896 * future.
897 *
898 * The page pinning checks are just "has this mm ever
899 * seen pinning", along with the (inexact) check of
900 * the page count. That might give false positives for
901 * for pinning, but it will work correctly.
902 */
903 if (likely(!page_needs_cow_for_dma(src_vma, page)))
904 return 1;
905
906 new_page = *prealloc;
907 if (!new_page)
908 return -EAGAIN;
909
910 /*
911 * We have a prealloc page, all good! Take it
912 * over and copy the page & arm it.
913 */
914 *prealloc = NULL;
915 copy_user_highpage(new_page, page, addr, src_vma);
916 __SetPageUptodate(new_page);
917 page_add_new_anon_rmap(new_page, dst_vma, addr, false);
918 lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
919 rss[mm_counter(new_page)]++;
920
921 /* All done, just insert the new page copy in the child */
922 pte = mk_pte(new_page, dst_vma->vm_page_prot);
923 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
924 if (userfaultfd_pte_wp(dst_vma, *src_pte))
925 /* Uffd-wp needs to be delivered to dest pte as well */
926 pte = pte_wrprotect(pte_mkuffd_wp(pte));
927 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
928 return 0;
929}
930
931/*
932 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
933 * is required to copy this pte.
934 */
935static inline int
936copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
937 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
938 struct page **prealloc)
939{
940 struct mm_struct *src_mm = src_vma->vm_mm;
941 unsigned long vm_flags = src_vma->vm_flags;
942 pte_t pte = *src_pte;
943 struct page *page;
944
945 page = vm_normal_page(src_vma, addr, pte);
946 if (page) {
947 int retval;
948
949 retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
950 addr, rss, prealloc, pte, page);
951 if (retval <= 0)
952 return retval;
953
954 get_page(page);
955 page_dup_rmap(page, false);
956 rss[mm_counter(page)]++;
957 }
958
959 /*
960 * If it's a COW mapping, write protect it both
961 * in the parent and the child
962 */
963 if (is_cow_mapping(vm_flags) && pte_write(pte)) {
964 ptep_set_wrprotect(src_mm, addr, src_pte);
965 pte = pte_wrprotect(pte);
966 }
967
968 /*
969 * If it's a shared mapping, mark it clean in
970 * the child
971 */
972 if (vm_flags & VM_SHARED)
973 pte = pte_mkclean(pte);
974 pte = pte_mkold(pte);
975
976 if (!userfaultfd_wp(dst_vma))
977 pte = pte_clear_uffd_wp(pte);
978
979 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
980 return 0;
981}
982
983static inline struct page *
984page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
985 unsigned long addr)
986{
987 struct page *new_page;
988
989 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
990 if (!new_page)
991 return NULL;
992
993 if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
994 put_page(new_page);
995 return NULL;
996 }
997 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
998
999 return new_page;
1000}
1001
1002static int
1003copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1004 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1005 unsigned long end)
1006{
1007 struct mm_struct *dst_mm = dst_vma->vm_mm;
1008 struct mm_struct *src_mm = src_vma->vm_mm;
1009 pte_t *orig_src_pte, *orig_dst_pte;
1010 pte_t *src_pte, *dst_pte;
1011 spinlock_t *src_ptl, *dst_ptl;
1012 int progress, ret = 0;
1013 int rss[NR_MM_COUNTERS];
1014 swp_entry_t entry = (swp_entry_t){0};
1015 struct page *prealloc = NULL;
1016
1017again:
1018 progress = 0;
1019 init_rss_vec(rss);
1020
1021 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1022 if (!dst_pte) {
1023 ret = -ENOMEM;
1024 goto out;
1025 }
1026 src_pte = pte_offset_map(src_pmd, addr);
1027 src_ptl = pte_lockptr(src_mm, src_pmd);
1028 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1029 orig_src_pte = src_pte;
1030 orig_dst_pte = dst_pte;
1031 arch_enter_lazy_mmu_mode();
1032
1033 do {
1034 /*
1035 * We are holding two locks at this point - either of them
1036 * could generate latencies in another task on another CPU.
1037 */
1038 if (progress >= 32) {
1039 progress = 0;
1040 if (need_resched() ||
1041 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1042 break;
1043 }
1044 if (pte_none(*src_pte)) {
1045 progress++;
1046 continue;
1047 }
1048 if (unlikely(!pte_present(*src_pte))) {
1049 ret = copy_nonpresent_pte(dst_mm, src_mm,
1050 dst_pte, src_pte,
1051 dst_vma, src_vma,
1052 addr, rss);
1053 if (ret == -EIO) {
1054 entry = pte_to_swp_entry(*src_pte);
1055 break;
1056 } else if (ret == -EBUSY) {
1057 break;
1058 } else if (!ret) {
1059 progress += 8;
1060 continue;
1061 }
1062
1063 /*
1064 * Device exclusive entry restored, continue by copying
1065 * the now present pte.
1066 */
1067 WARN_ON_ONCE(ret != -ENOENT);
1068 }
1069 /* copy_present_pte() will clear `*prealloc' if consumed */
1070 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1071 addr, rss, &prealloc);
1072 /*
1073 * If we need a pre-allocated page for this pte, drop the
1074 * locks, allocate, and try again.
1075 */
1076 if (unlikely(ret == -EAGAIN))
1077 break;
1078 if (unlikely(prealloc)) {
1079 /*
1080 * pre-alloc page cannot be reused by next time so as
1081 * to strictly follow mempolicy (e.g., alloc_page_vma()
1082 * will allocate page according to address). This
1083 * could only happen if one pinned pte changed.
1084 */
1085 put_page(prealloc);
1086 prealloc = NULL;
1087 }
1088 progress += 8;
1089 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1090
1091 arch_leave_lazy_mmu_mode();
1092 spin_unlock(src_ptl);
1093 pte_unmap(orig_src_pte);
1094 add_mm_rss_vec(dst_mm, rss);
1095 pte_unmap_unlock(orig_dst_pte, dst_ptl);
1096 cond_resched();
1097
1098 if (ret == -EIO) {
1099 VM_WARN_ON_ONCE(!entry.val);
1100 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1101 ret = -ENOMEM;
1102 goto out;
1103 }
1104 entry.val = 0;
1105 } else if (ret == -EBUSY) {
1106 goto out;
1107 } else if (ret == -EAGAIN) {
1108 prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1109 if (!prealloc)
1110 return -ENOMEM;
1111 } else if (ret) {
1112 VM_WARN_ON_ONCE(1);
1113 }
1114
1115 /* We've captured and resolved the error. Reset, try again. */
1116 ret = 0;
1117
1118 if (addr != end)
1119 goto again;
1120out:
1121 if (unlikely(prealloc))
1122 put_page(prealloc);
1123 return ret;
1124}
1125
1126static inline int
1127copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1128 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1129 unsigned long end)
1130{
1131 struct mm_struct *dst_mm = dst_vma->vm_mm;
1132 struct mm_struct *src_mm = src_vma->vm_mm;
1133 pmd_t *src_pmd, *dst_pmd;
1134 unsigned long next;
1135
1136 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1137 if (!dst_pmd)
1138 return -ENOMEM;
1139 src_pmd = pmd_offset(src_pud, addr);
1140 do {
1141 next = pmd_addr_end(addr, end);
1142 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1143 || pmd_devmap(*src_pmd)) {
1144 int err;
1145 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1146 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1147 addr, dst_vma, src_vma);
1148 if (err == -ENOMEM)
1149 return -ENOMEM;
1150 if (!err)
1151 continue;
1152 /* fall through */
1153 }
1154 if (pmd_none_or_clear_bad(src_pmd))
1155 continue;
1156 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1157 addr, next))
1158 return -ENOMEM;
1159 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1160 return 0;
1161}
1162
1163static inline int
1164copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1165 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1166 unsigned long end)
1167{
1168 struct mm_struct *dst_mm = dst_vma->vm_mm;
1169 struct mm_struct *src_mm = src_vma->vm_mm;
1170 pud_t *src_pud, *dst_pud;
1171 unsigned long next;
1172
1173 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1174 if (!dst_pud)
1175 return -ENOMEM;
1176 src_pud = pud_offset(src_p4d, addr);
1177 do {
1178 next = pud_addr_end(addr, end);
1179 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1180 int err;
1181
1182 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1183 err = copy_huge_pud(dst_mm, src_mm,
1184 dst_pud, src_pud, addr, src_vma);
1185 if (err == -ENOMEM)
1186 return -ENOMEM;
1187 if (!err)
1188 continue;
1189 /* fall through */
1190 }
1191 if (pud_none_or_clear_bad(src_pud))
1192 continue;
1193 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1194 addr, next))
1195 return -ENOMEM;
1196 } while (dst_pud++, src_pud++, addr = next, addr != end);
1197 return 0;
1198}
1199
1200static inline int
1201copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1202 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1203 unsigned long end)
1204{
1205 struct mm_struct *dst_mm = dst_vma->vm_mm;
1206 p4d_t *src_p4d, *dst_p4d;
1207 unsigned long next;
1208
1209 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1210 if (!dst_p4d)
1211 return -ENOMEM;
1212 src_p4d = p4d_offset(src_pgd, addr);
1213 do {
1214 next = p4d_addr_end(addr, end);
1215 if (p4d_none_or_clear_bad(src_p4d))
1216 continue;
1217 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1218 addr, next))
1219 return -ENOMEM;
1220 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1221 return 0;
1222}
1223
1224int
1225copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1226{
1227 pgd_t *src_pgd, *dst_pgd;
1228 unsigned long next;
1229 unsigned long addr = src_vma->vm_start;
1230 unsigned long end = src_vma->vm_end;
1231 struct mm_struct *dst_mm = dst_vma->vm_mm;
1232 struct mm_struct *src_mm = src_vma->vm_mm;
1233 struct mmu_notifier_range range;
1234 bool is_cow;
1235 int ret;
1236
1237 /*
1238 * Don't copy ptes where a page fault will fill them correctly.
1239 * Fork becomes much lighter when there are big shared or private
1240 * readonly mappings. The tradeoff is that copy_page_range is more
1241 * efficient than faulting.
1242 */
1243 if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1244 !src_vma->anon_vma)
1245 return 0;
1246
1247 if (is_vm_hugetlb_page(src_vma))
1248 return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
1249
1250 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1251 /*
1252 * We do not free on error cases below as remove_vma
1253 * gets called on error from higher level routine
1254 */
1255 ret = track_pfn_copy(src_vma);
1256 if (ret)
1257 return ret;
1258 }
1259
1260 /*
1261 * We need to invalidate the secondary MMU mappings only when
1262 * there could be a permission downgrade on the ptes of the
1263 * parent mm. And a permission downgrade will only happen if
1264 * is_cow_mapping() returns true.
1265 */
1266 is_cow = is_cow_mapping(src_vma->vm_flags);
1267
1268 if (is_cow) {
1269 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1270 0, src_vma, src_mm, addr, end);
1271 mmu_notifier_invalidate_range_start(&range);
1272 /*
1273 * Disabling preemption is not needed for the write side, as
1274 * the read side doesn't spin, but goes to the mmap_lock.
1275 *
1276 * Use the raw variant of the seqcount_t write API to avoid
1277 * lockdep complaining about preemptibility.
1278 */
1279 mmap_assert_write_locked(src_mm);
1280 raw_write_seqcount_begin(&src_mm->write_protect_seq);
1281 }
1282
1283 ret = 0;
1284 dst_pgd = pgd_offset(dst_mm, addr);
1285 src_pgd = pgd_offset(src_mm, addr);
1286 do {
1287 next = pgd_addr_end(addr, end);
1288 if (pgd_none_or_clear_bad(src_pgd))
1289 continue;
1290 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1291 addr, next))) {
1292 ret = -ENOMEM;
1293 break;
1294 }
1295 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1296
1297 if (is_cow) {
1298 raw_write_seqcount_end(&src_mm->write_protect_seq);
1299 mmu_notifier_invalidate_range_end(&range);
1300 }
1301 return ret;
1302}
1303
1304static unsigned long zap_pte_range(struct mmu_gather *tlb,
1305 struct vm_area_struct *vma, pmd_t *pmd,
1306 unsigned long addr, unsigned long end,
1307 struct zap_details *details)
1308{
1309 struct mm_struct *mm = tlb->mm;
1310 int force_flush = 0;
1311 int rss[NR_MM_COUNTERS];
1312 spinlock_t *ptl;
1313 pte_t *start_pte;
1314 pte_t *pte;
1315 swp_entry_t entry;
1316
1317 tlb_change_page_size(tlb, PAGE_SIZE);
1318again:
1319 init_rss_vec(rss);
1320 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1321 pte = start_pte;
1322 flush_tlb_batched_pending(mm);
1323 arch_enter_lazy_mmu_mode();
1324 do {
1325 pte_t ptent = *pte;
1326 if (pte_none(ptent))
1327 continue;
1328
1329 if (need_resched())
1330 break;
1331
1332 if (pte_present(ptent)) {
1333 struct page *page;
1334
1335 page = vm_normal_page(vma, addr, ptent);
1336 if (unlikely(details) && page) {
1337 /*
1338 * unmap_shared_mapping_pages() wants to
1339 * invalidate cache without truncating:
1340 * unmap shared but keep private pages.
1341 */
1342 if (details->check_mapping &&
1343 details->check_mapping != page_rmapping(page))
1344 continue;
1345 }
1346 ptent = ptep_get_and_clear_full(mm, addr, pte,
1347 tlb->fullmm);
1348 tlb_remove_tlb_entry(tlb, pte, addr);
1349 if (unlikely(!page))
1350 continue;
1351
1352 if (!PageAnon(page)) {
1353 if (pte_dirty(ptent)) {
1354 force_flush = 1;
1355 set_page_dirty(page);
1356 }
1357 if (pte_young(ptent) &&
1358 likely(!(vma->vm_flags & VM_SEQ_READ)))
1359 mark_page_accessed(page);
1360 }
1361 rss[mm_counter(page)]--;
1362 page_remove_rmap(page, false);
1363 if (unlikely(page_mapcount(page) < 0))
1364 print_bad_pte(vma, addr, ptent, page);
1365 if (unlikely(__tlb_remove_page(tlb, page))) {
1366 force_flush = 1;
1367 addr += PAGE_SIZE;
1368 break;
1369 }
1370 continue;
1371 }
1372
1373 entry = pte_to_swp_entry(ptent);
1374 if (is_device_private_entry(entry) ||
1375 is_device_exclusive_entry(entry)) {
1376 struct page *page = pfn_swap_entry_to_page(entry);
1377
1378 if (unlikely(details && details->check_mapping)) {
1379 /*
1380 * unmap_shared_mapping_pages() wants to
1381 * invalidate cache without truncating:
1382 * unmap shared but keep private pages.
1383 */
1384 if (details->check_mapping !=
1385 page_rmapping(page))
1386 continue;
1387 }
1388
1389 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1390 rss[mm_counter(page)]--;
1391
1392 if (is_device_private_entry(entry))
1393 page_remove_rmap(page, false);
1394
1395 put_page(page);
1396 continue;
1397 }
1398
1399 /* If details->check_mapping, we leave swap entries. */
1400 if (unlikely(details))
1401 continue;
1402
1403 if (!non_swap_entry(entry))
1404 rss[MM_SWAPENTS]--;
1405 else if (is_migration_entry(entry)) {
1406 struct page *page;
1407
1408 page = pfn_swap_entry_to_page(entry);
1409 rss[mm_counter(page)]--;
1410 }
1411 if (unlikely(!free_swap_and_cache(entry)))
1412 print_bad_pte(vma, addr, ptent, NULL);
1413 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1414 } while (pte++, addr += PAGE_SIZE, addr != end);
1415
1416 add_mm_rss_vec(mm, rss);
1417 arch_leave_lazy_mmu_mode();
1418
1419 /* Do the actual TLB flush before dropping ptl */
1420 if (force_flush)
1421 tlb_flush_mmu_tlbonly(tlb);
1422 pte_unmap_unlock(start_pte, ptl);
1423
1424 /*
1425 * If we forced a TLB flush (either due to running out of
1426 * batch buffers or because we needed to flush dirty TLB
1427 * entries before releasing the ptl), free the batched
1428 * memory too. Restart if we didn't do everything.
1429 */
1430 if (force_flush) {
1431 force_flush = 0;
1432 tlb_flush_mmu(tlb);
1433 }
1434
1435 if (addr != end) {
1436 cond_resched();
1437 goto again;
1438 }
1439
1440 return addr;
1441}
1442
1443static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1444 struct vm_area_struct *vma, pud_t *pud,
1445 unsigned long addr, unsigned long end,
1446 struct zap_details *details)
1447{
1448 pmd_t *pmd;
1449 unsigned long next;
1450
1451 pmd = pmd_offset(pud, addr);
1452 do {
1453 next = pmd_addr_end(addr, end);
1454 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1455 if (next - addr != HPAGE_PMD_SIZE)
1456 __split_huge_pmd(vma, pmd, addr, false, NULL);
1457 else if (zap_huge_pmd(tlb, vma, pmd, addr))
1458 goto next;
1459 /* fall through */
1460 } else if (details && details->single_page &&
1461 PageTransCompound(details->single_page) &&
1462 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1463 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1464 /*
1465 * Take and drop THP pmd lock so that we cannot return
1466 * prematurely, while zap_huge_pmd() has cleared *pmd,
1467 * but not yet decremented compound_mapcount().
1468 */
1469 spin_unlock(ptl);
1470 }
1471
1472 /*
1473 * Here there can be other concurrent MADV_DONTNEED or
1474 * trans huge page faults running, and if the pmd is
1475 * none or trans huge it can change under us. This is
1476 * because MADV_DONTNEED holds the mmap_lock in read
1477 * mode.
1478 */
1479 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1480 goto next;
1481 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1482next:
1483 cond_resched();
1484 } while (pmd++, addr = next, addr != end);
1485
1486 return addr;
1487}
1488
1489static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1490 struct vm_area_struct *vma, p4d_t *p4d,
1491 unsigned long addr, unsigned long end,
1492 struct zap_details *details)
1493{
1494 pud_t *pud;
1495 unsigned long next;
1496
1497 pud = pud_offset(p4d, addr);
1498 do {
1499 next = pud_addr_end(addr, end);
1500 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1501 if (next - addr != HPAGE_PUD_SIZE) {
1502 mmap_assert_locked(tlb->mm);
1503 split_huge_pud(vma, pud, addr);
1504 } else if (zap_huge_pud(tlb, vma, pud, addr))
1505 goto next;
1506 /* fall through */
1507 }
1508 if (pud_none_or_clear_bad(pud))
1509 continue;
1510 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1511next:
1512 cond_resched();
1513 } while (pud++, addr = next, addr != end);
1514
1515 return addr;
1516}
1517
1518static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1519 struct vm_area_struct *vma, pgd_t *pgd,
1520 unsigned long addr, unsigned long end,
1521 struct zap_details *details)
1522{
1523 p4d_t *p4d;
1524 unsigned long next;
1525
1526 p4d = p4d_offset(pgd, addr);
1527 do {
1528 next = p4d_addr_end(addr, end);
1529 if (p4d_none_or_clear_bad(p4d))
1530 continue;
1531 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1532 } while (p4d++, addr = next, addr != end);
1533
1534 return addr;
1535}
1536
1537void unmap_page_range(struct mmu_gather *tlb,
1538 struct vm_area_struct *vma,
1539 unsigned long addr, unsigned long end,
1540 struct zap_details *details)
1541{
1542 pgd_t *pgd;
1543 unsigned long next;
1544
1545 BUG_ON(addr >= end);
1546 tlb_start_vma(tlb, vma);
1547 pgd = pgd_offset(vma->vm_mm, addr);
1548 do {
1549 next = pgd_addr_end(addr, end);
1550 if (pgd_none_or_clear_bad(pgd))
1551 continue;
1552 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1553 } while (pgd++, addr = next, addr != end);
1554 tlb_end_vma(tlb, vma);
1555}
1556
1557
1558static void unmap_single_vma(struct mmu_gather *tlb,
1559 struct vm_area_struct *vma, unsigned long start_addr,
1560 unsigned long end_addr,
1561 struct zap_details *details)
1562{
1563 unsigned long start = max(vma->vm_start, start_addr);
1564 unsigned long end;
1565
1566 if (start >= vma->vm_end)
1567 return;
1568 end = min(vma->vm_end, end_addr);
1569 if (end <= vma->vm_start)
1570 return;
1571
1572 if (vma->vm_file)
1573 uprobe_munmap(vma, start, end);
1574
1575 if (unlikely(vma->vm_flags & VM_PFNMAP))
1576 untrack_pfn(vma, 0, 0);
1577
1578 if (start != end) {
1579 if (unlikely(is_vm_hugetlb_page(vma))) {
1580 /*
1581 * It is undesirable to test vma->vm_file as it
1582 * should be non-null for valid hugetlb area.
1583 * However, vm_file will be NULL in the error
1584 * cleanup path of mmap_region. When
1585 * hugetlbfs ->mmap method fails,
1586 * mmap_region() nullifies vma->vm_file
1587 * before calling this function to clean up.
1588 * Since no pte has actually been setup, it is
1589 * safe to do nothing in this case.
1590 */
1591 if (vma->vm_file) {
1592 i_mmap_lock_write(vma->vm_file->f_mapping);
1593 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1594 i_mmap_unlock_write(vma->vm_file->f_mapping);
1595 }
1596 } else
1597 unmap_page_range(tlb, vma, start, end, details);
1598 }
1599}
1600
1601/**
1602 * unmap_vmas - unmap a range of memory covered by a list of vma's
1603 * @tlb: address of the caller's struct mmu_gather
1604 * @vma: the starting vma
1605 * @start_addr: virtual address at which to start unmapping
1606 * @end_addr: virtual address at which to end unmapping
1607 *
1608 * Unmap all pages in the vma list.
1609 *
1610 * Only addresses between `start' and `end' will be unmapped.
1611 *
1612 * The VMA list must be sorted in ascending virtual address order.
1613 *
1614 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1615 * range after unmap_vmas() returns. So the only responsibility here is to
1616 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1617 * drops the lock and schedules.
1618 */
1619void unmap_vmas(struct mmu_gather *tlb,
1620 struct vm_area_struct *vma, unsigned long start_addr,
1621 unsigned long end_addr)
1622{
1623 struct mmu_notifier_range range;
1624
1625 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1626 start_addr, end_addr);
1627 mmu_notifier_invalidate_range_start(&range);
1628 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1629 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1630 mmu_notifier_invalidate_range_end(&range);
1631}
1632
1633/**
1634 * zap_page_range - remove user pages in a given range
1635 * @vma: vm_area_struct holding the applicable pages
1636 * @start: starting address of pages to zap
1637 * @size: number of bytes to zap
1638 *
1639 * Caller must protect the VMA list
1640 */
1641void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1642 unsigned long size)
1643{
1644 struct mmu_notifier_range range;
1645 struct mmu_gather tlb;
1646
1647 lru_add_drain();
1648 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1649 start, start + size);
1650 tlb_gather_mmu(&tlb, vma->vm_mm);
1651 update_hiwater_rss(vma->vm_mm);
1652 mmu_notifier_invalidate_range_start(&range);
1653 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1654 unmap_single_vma(&tlb, vma, start, range.end, NULL);
1655 mmu_notifier_invalidate_range_end(&range);
1656 tlb_finish_mmu(&tlb);
1657}
1658
1659/**
1660 * zap_page_range_single - remove user pages in a given range
1661 * @vma: vm_area_struct holding the applicable pages
1662 * @address: starting address of pages to zap
1663 * @size: number of bytes to zap
1664 * @details: details of shared cache invalidation
1665 *
1666 * The range must fit into one VMA.
1667 */
1668static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1669 unsigned long size, struct zap_details *details)
1670{
1671 struct mmu_notifier_range range;
1672 struct mmu_gather tlb;
1673
1674 lru_add_drain();
1675 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1676 address, address + size);
1677 tlb_gather_mmu(&tlb, vma->vm_mm);
1678 update_hiwater_rss(vma->vm_mm);
1679 mmu_notifier_invalidate_range_start(&range);
1680 unmap_single_vma(&tlb, vma, address, range.end, details);
1681 mmu_notifier_invalidate_range_end(&range);
1682 tlb_finish_mmu(&tlb);
1683}
1684
1685/**
1686 * zap_vma_ptes - remove ptes mapping the vma
1687 * @vma: vm_area_struct holding ptes to be zapped
1688 * @address: starting address of pages to zap
1689 * @size: number of bytes to zap
1690 *
1691 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1692 *
1693 * The entire address range must be fully contained within the vma.
1694 *
1695 */
1696void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1697 unsigned long size)
1698{
1699 if (address < vma->vm_start || address + size > vma->vm_end ||
1700 !(vma->vm_flags & VM_PFNMAP))
1701 return;
1702
1703 zap_page_range_single(vma, address, size, NULL);
1704}
1705EXPORT_SYMBOL_GPL(zap_vma_ptes);
1706
1707static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1708{
1709 pgd_t *pgd;
1710 p4d_t *p4d;
1711 pud_t *pud;
1712 pmd_t *pmd;
1713
1714 pgd = pgd_offset(mm, addr);
1715 p4d = p4d_alloc(mm, pgd, addr);
1716 if (!p4d)
1717 return NULL;
1718 pud = pud_alloc(mm, p4d, addr);
1719 if (!pud)
1720 return NULL;
1721 pmd = pmd_alloc(mm, pud, addr);
1722 if (!pmd)
1723 return NULL;
1724
1725 VM_BUG_ON(pmd_trans_huge(*pmd));
1726 return pmd;
1727}
1728
1729pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1730 spinlock_t **ptl)
1731{
1732 pmd_t *pmd = walk_to_pmd(mm, addr);
1733
1734 if (!pmd)
1735 return NULL;
1736 return pte_alloc_map_lock(mm, pmd, addr, ptl);
1737}
1738
1739static int validate_page_before_insert(struct page *page)
1740{
1741 if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1742 return -EINVAL;
1743 flush_dcache_page(page);
1744 return 0;
1745}
1746
1747static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1748 unsigned long addr, struct page *page, pgprot_t prot)
1749{
1750 if (!pte_none(*pte))
1751 return -EBUSY;
1752 /* Ok, finally just insert the thing.. */
1753 get_page(page);
1754 inc_mm_counter_fast(mm, mm_counter_file(page));
1755 page_add_file_rmap(page, false);
1756 set_pte_at(mm, addr, pte, mk_pte(page, prot));
1757 return 0;
1758}
1759
1760/*
1761 * This is the old fallback for page remapping.
1762 *
1763 * For historical reasons, it only allows reserved pages. Only
1764 * old drivers should use this, and they needed to mark their
1765 * pages reserved for the old functions anyway.
1766 */
1767static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1768 struct page *page, pgprot_t prot)
1769{
1770 struct mm_struct *mm = vma->vm_mm;
1771 int retval;
1772 pte_t *pte;
1773 spinlock_t *ptl;
1774
1775 retval = validate_page_before_insert(page);
1776 if (retval)
1777 goto out;
1778 retval = -ENOMEM;
1779 pte = get_locked_pte(mm, addr, &ptl);
1780 if (!pte)
1781 goto out;
1782 retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1783 pte_unmap_unlock(pte, ptl);
1784out:
1785 return retval;
1786}
1787
1788#ifdef pte_index
1789static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1790 unsigned long addr, struct page *page, pgprot_t prot)
1791{
1792 int err;
1793
1794 if (!page_count(page))
1795 return -EINVAL;
1796 err = validate_page_before_insert(page);
1797 if (err)
1798 return err;
1799 return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1800}
1801
1802/* insert_pages() amortizes the cost of spinlock operations
1803 * when inserting pages in a loop. Arch *must* define pte_index.
1804 */
1805static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1806 struct page **pages, unsigned long *num, pgprot_t prot)
1807{
1808 pmd_t *pmd = NULL;
1809 pte_t *start_pte, *pte;
1810 spinlock_t *pte_lock;
1811 struct mm_struct *const mm = vma->vm_mm;
1812 unsigned long curr_page_idx = 0;
1813 unsigned long remaining_pages_total = *num;
1814 unsigned long pages_to_write_in_pmd;
1815 int ret;
1816more:
1817 ret = -EFAULT;
1818 pmd = walk_to_pmd(mm, addr);
1819 if (!pmd)
1820 goto out;
1821
1822 pages_to_write_in_pmd = min_t(unsigned long,
1823 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1824
1825 /* Allocate the PTE if necessary; takes PMD lock once only. */
1826 ret = -ENOMEM;
1827 if (pte_alloc(mm, pmd))
1828 goto out;
1829
1830 while (pages_to_write_in_pmd) {
1831 int pte_idx = 0;
1832 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1833
1834 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1835 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1836 int err = insert_page_in_batch_locked(mm, pte,
1837 addr, pages[curr_page_idx], prot);
1838 if (unlikely(err)) {
1839 pte_unmap_unlock(start_pte, pte_lock);
1840 ret = err;
1841 remaining_pages_total -= pte_idx;
1842 goto out;
1843 }
1844 addr += PAGE_SIZE;
1845 ++curr_page_idx;
1846 }
1847 pte_unmap_unlock(start_pte, pte_lock);
1848 pages_to_write_in_pmd -= batch_size;
1849 remaining_pages_total -= batch_size;
1850 }
1851 if (remaining_pages_total)
1852 goto more;
1853 ret = 0;
1854out:
1855 *num = remaining_pages_total;
1856 return ret;
1857}
1858#endif /* ifdef pte_index */
1859
1860/**
1861 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1862 * @vma: user vma to map to
1863 * @addr: target start user address of these pages
1864 * @pages: source kernel pages
1865 * @num: in: number of pages to map. out: number of pages that were *not*
1866 * mapped. (0 means all pages were successfully mapped).
1867 *
1868 * Preferred over vm_insert_page() when inserting multiple pages.
1869 *
1870 * In case of error, we may have mapped a subset of the provided
1871 * pages. It is the caller's responsibility to account for this case.
1872 *
1873 * The same restrictions apply as in vm_insert_page().
1874 */
1875int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1876 struct page **pages, unsigned long *num)
1877{
1878#ifdef pte_index
1879 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1880
1881 if (addr < vma->vm_start || end_addr >= vma->vm_end)
1882 return -EFAULT;
1883 if (!(vma->vm_flags & VM_MIXEDMAP)) {
1884 BUG_ON(mmap_read_trylock(vma->vm_mm));
1885 BUG_ON(vma->vm_flags & VM_PFNMAP);
1886 vma->vm_flags |= VM_MIXEDMAP;
1887 }
1888 /* Defer page refcount checking till we're about to map that page. */
1889 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1890#else
1891 unsigned long idx = 0, pgcount = *num;
1892 int err = -EINVAL;
1893
1894 for (; idx < pgcount; ++idx) {
1895 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1896 if (err)
1897 break;
1898 }
1899 *num = pgcount - idx;
1900 return err;
1901#endif /* ifdef pte_index */
1902}
1903EXPORT_SYMBOL(vm_insert_pages);
1904
1905/**
1906 * vm_insert_page - insert single page into user vma
1907 * @vma: user vma to map to
1908 * @addr: target user address of this page
1909 * @page: source kernel page
1910 *
1911 * This allows drivers to insert individual pages they've allocated
1912 * into a user vma.
1913 *
1914 * The page has to be a nice clean _individual_ kernel allocation.
1915 * If you allocate a compound page, you need to have marked it as
1916 * such (__GFP_COMP), or manually just split the page up yourself
1917 * (see split_page()).
1918 *
1919 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1920 * took an arbitrary page protection parameter. This doesn't allow
1921 * that. Your vma protection will have to be set up correctly, which
1922 * means that if you want a shared writable mapping, you'd better
1923 * ask for a shared writable mapping!
1924 *
1925 * The page does not need to be reserved.
1926 *
1927 * Usually this function is called from f_op->mmap() handler
1928 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
1929 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1930 * function from other places, for example from page-fault handler.
1931 *
1932 * Return: %0 on success, negative error code otherwise.
1933 */
1934int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1935 struct page *page)
1936{
1937 if (addr < vma->vm_start || addr >= vma->vm_end)
1938 return -EFAULT;
1939 if (!page_count(page))
1940 return -EINVAL;
1941 if (!(vma->vm_flags & VM_MIXEDMAP)) {
1942 BUG_ON(mmap_read_trylock(vma->vm_mm));
1943 BUG_ON(vma->vm_flags & VM_PFNMAP);
1944 vma->vm_flags |= VM_MIXEDMAP;
1945 }
1946 return insert_page(vma, addr, page, vma->vm_page_prot);
1947}
1948EXPORT_SYMBOL(vm_insert_page);
1949
1950/*
1951 * __vm_map_pages - maps range of kernel pages into user vma
1952 * @vma: user vma to map to
1953 * @pages: pointer to array of source kernel pages
1954 * @num: number of pages in page array
1955 * @offset: user's requested vm_pgoff
1956 *
1957 * This allows drivers to map range of kernel pages into a user vma.
1958 *
1959 * Return: 0 on success and error code otherwise.
1960 */
1961static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1962 unsigned long num, unsigned long offset)
1963{
1964 unsigned long count = vma_pages(vma);
1965 unsigned long uaddr = vma->vm_start;
1966 int ret, i;
1967
1968 /* Fail if the user requested offset is beyond the end of the object */
1969 if (offset >= num)
1970 return -ENXIO;
1971
1972 /* Fail if the user requested size exceeds available object size */
1973 if (count > num - offset)
1974 return -ENXIO;
1975
1976 for (i = 0; i < count; i++) {
1977 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1978 if (ret < 0)
1979 return ret;
1980 uaddr += PAGE_SIZE;
1981 }
1982
1983 return 0;
1984}
1985
1986/**
1987 * vm_map_pages - maps range of kernel pages starts with non zero offset
1988 * @vma: user vma to map to
1989 * @pages: pointer to array of source kernel pages
1990 * @num: number of pages in page array
1991 *
1992 * Maps an object consisting of @num pages, catering for the user's
1993 * requested vm_pgoff
1994 *
1995 * If we fail to insert any page into the vma, the function will return
1996 * immediately leaving any previously inserted pages present. Callers
1997 * from the mmap handler may immediately return the error as their caller
1998 * will destroy the vma, removing any successfully inserted pages. Other
1999 * callers should make their own arrangements for calling unmap_region().
2000 *
2001 * Context: Process context. Called by mmap handlers.
2002 * Return: 0 on success and error code otherwise.
2003 */
2004int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2005 unsigned long num)
2006{
2007 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2008}
2009EXPORT_SYMBOL(vm_map_pages);
2010
2011/**
2012 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2013 * @vma: user vma to map to
2014 * @pages: pointer to array of source kernel pages
2015 * @num: number of pages in page array
2016 *
2017 * Similar to vm_map_pages(), except that it explicitly sets the offset
2018 * to 0. This function is intended for the drivers that did not consider
2019 * vm_pgoff.
2020 *
2021 * Context: Process context. Called by mmap handlers.
2022 * Return: 0 on success and error code otherwise.
2023 */
2024int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2025 unsigned long num)
2026{
2027 return __vm_map_pages(vma, pages, num, 0);
2028}
2029EXPORT_SYMBOL(vm_map_pages_zero);
2030
2031static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2032 pfn_t pfn, pgprot_t prot, bool mkwrite)
2033{
2034 struct mm_struct *mm = vma->vm_mm;
2035 pte_t *pte, entry;
2036 spinlock_t *ptl;
2037
2038 pte = get_locked_pte(mm, addr, &ptl);
2039 if (!pte)
2040 return VM_FAULT_OOM;
2041 if (!pte_none(*pte)) {
2042 if (mkwrite) {
2043 /*
2044 * For read faults on private mappings the PFN passed
2045 * in may not match the PFN we have mapped if the
2046 * mapped PFN is a writeable COW page. In the mkwrite
2047 * case we are creating a writable PTE for a shared
2048 * mapping and we expect the PFNs to match. If they
2049 * don't match, we are likely racing with block
2050 * allocation and mapping invalidation so just skip the
2051 * update.
2052 */
2053 if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
2054 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
2055 goto out_unlock;
2056 }
2057 entry = pte_mkyoung(*pte);
2058 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2059 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2060 update_mmu_cache(vma, addr, pte);
2061 }
2062 goto out_unlock;
2063 }
2064
2065 /* Ok, finally just insert the thing.. */
2066 if (pfn_t_devmap(pfn))
2067 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2068 else
2069 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2070
2071 if (mkwrite) {
2072 entry = pte_mkyoung(entry);
2073 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2074 }
2075
2076 set_pte_at(mm, addr, pte, entry);
2077 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2078
2079out_unlock:
2080 pte_unmap_unlock(pte, ptl);
2081 return VM_FAULT_NOPAGE;
2082}
2083
2084/**
2085 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2086 * @vma: user vma to map to
2087 * @addr: target user address of this page
2088 * @pfn: source kernel pfn
2089 * @pgprot: pgprot flags for the inserted page
2090 *
2091 * This is exactly like vmf_insert_pfn(), except that it allows drivers
2092 * to override pgprot on a per-page basis.
2093 *
2094 * This only makes sense for IO mappings, and it makes no sense for
2095 * COW mappings. In general, using multiple vmas is preferable;
2096 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2097 * impractical.
2098 *
2099 * See vmf_insert_mixed_prot() for a discussion of the implication of using
2100 * a value of @pgprot different from that of @vma->vm_page_prot.
2101 *
2102 * Context: Process context. May allocate using %GFP_KERNEL.
2103 * Return: vm_fault_t value.
2104 */
2105vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2106 unsigned long pfn, pgprot_t pgprot)
2107{
2108 /*
2109 * Technically, architectures with pte_special can avoid all these
2110 * restrictions (same for remap_pfn_range). However we would like
2111 * consistency in testing and feature parity among all, so we should
2112 * try to keep these invariants in place for everybody.
2113 */
2114 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2115 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2116 (VM_PFNMAP|VM_MIXEDMAP));
2117 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2118 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2119
2120 if (addr < vma->vm_start || addr >= vma->vm_end)
2121 return VM_FAULT_SIGBUS;
2122
2123 if (!pfn_modify_allowed(pfn, pgprot))
2124 return VM_FAULT_SIGBUS;
2125
2126 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2127
2128 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2129 false);
2130}
2131EXPORT_SYMBOL(vmf_insert_pfn_prot);
2132
2133/**
2134 * vmf_insert_pfn - insert single pfn into user vma
2135 * @vma: user vma to map to
2136 * @addr: target user address of this page
2137 * @pfn: source kernel pfn
2138 *
2139 * Similar to vm_insert_page, this allows drivers to insert individual pages
2140 * they've allocated into a user vma. Same comments apply.
2141 *
2142 * This function should only be called from a vm_ops->fault handler, and
2143 * in that case the handler should return the result of this function.
2144 *
2145 * vma cannot be a COW mapping.
2146 *
2147 * As this is called only for pages that do not currently exist, we
2148 * do not need to flush old virtual caches or the TLB.
2149 *
2150 * Context: Process context. May allocate using %GFP_KERNEL.
2151 * Return: vm_fault_t value.
2152 */
2153vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2154 unsigned long pfn)
2155{
2156 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2157}
2158EXPORT_SYMBOL(vmf_insert_pfn);
2159
2160static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2161{
2162 /* these checks mirror the abort conditions in vm_normal_page */
2163 if (vma->vm_flags & VM_MIXEDMAP)
2164 return true;
2165 if (pfn_t_devmap(pfn))
2166 return true;
2167 if (pfn_t_special(pfn))
2168 return true;
2169 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2170 return true;
2171 return false;
2172}
2173
2174static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2175 unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2176 bool mkwrite)
2177{
2178 int err;
2179
2180 BUG_ON(!vm_mixed_ok(vma, pfn));
2181
2182 if (addr < vma->vm_start || addr >= vma->vm_end)
2183 return VM_FAULT_SIGBUS;
2184
2185 track_pfn_insert(vma, &pgprot, pfn);
2186
2187 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2188 return VM_FAULT_SIGBUS;
2189
2190 /*
2191 * If we don't have pte special, then we have to use the pfn_valid()
2192 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2193 * refcount the page if pfn_valid is true (hence insert_page rather
2194 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2195 * without pte special, it would there be refcounted as a normal page.
2196 */
2197 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2198 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2199 struct page *page;
2200
2201 /*
2202 * At this point we are committed to insert_page()
2203 * regardless of whether the caller specified flags that
2204 * result in pfn_t_has_page() == false.
2205 */
2206 page = pfn_to_page(pfn_t_to_pfn(pfn));
2207 err = insert_page(vma, addr, page, pgprot);
2208 } else {
2209 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2210 }
2211
2212 if (err == -ENOMEM)
2213 return VM_FAULT_OOM;
2214 if (err < 0 && err != -EBUSY)
2215 return VM_FAULT_SIGBUS;
2216
2217 return VM_FAULT_NOPAGE;
2218}
2219
2220/**
2221 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2222 * @vma: user vma to map to
2223 * @addr: target user address of this page
2224 * @pfn: source kernel pfn
2225 * @pgprot: pgprot flags for the inserted page
2226 *
2227 * This is exactly like vmf_insert_mixed(), except that it allows drivers
2228 * to override pgprot on a per-page basis.
2229 *
2230 * Typically this function should be used by drivers to set caching- and
2231 * encryption bits different than those of @vma->vm_page_prot, because
2232 * the caching- or encryption mode may not be known at mmap() time.
2233 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2234 * to set caching and encryption bits for those vmas (except for COW pages).
2235 * This is ensured by core vm only modifying these page table entries using
2236 * functions that don't touch caching- or encryption bits, using pte_modify()
2237 * if needed. (See for example mprotect()).
2238 * Also when new page-table entries are created, this is only done using the
2239 * fault() callback, and never using the value of vma->vm_page_prot,
2240 * except for page-table entries that point to anonymous pages as the result
2241 * of COW.
2242 *
2243 * Context: Process context. May allocate using %GFP_KERNEL.
2244 * Return: vm_fault_t value.
2245 */
2246vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2247 pfn_t pfn, pgprot_t pgprot)
2248{
2249 return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2250}
2251EXPORT_SYMBOL(vmf_insert_mixed_prot);
2252
2253vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2254 pfn_t pfn)
2255{
2256 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2257}
2258EXPORT_SYMBOL(vmf_insert_mixed);
2259
2260/*
2261 * If the insertion of PTE failed because someone else already added a
2262 * different entry in the mean time, we treat that as success as we assume
2263 * the same entry was actually inserted.
2264 */
2265vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2266 unsigned long addr, pfn_t pfn)
2267{
2268 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2269}
2270EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2271
2272/*
2273 * maps a range of physical memory into the requested pages. the old
2274 * mappings are removed. any references to nonexistent pages results
2275 * in null mappings (currently treated as "copy-on-access")
2276 */
2277static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2278 unsigned long addr, unsigned long end,
2279 unsigned long pfn, pgprot_t prot)
2280{
2281 pte_t *pte, *mapped_pte;
2282 spinlock_t *ptl;
2283 int err = 0;
2284
2285 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2286 if (!pte)
2287 return -ENOMEM;
2288 arch_enter_lazy_mmu_mode();
2289 do {
2290 BUG_ON(!pte_none(*pte));
2291 if (!pfn_modify_allowed(pfn, prot)) {
2292 err = -EACCES;
2293 break;
2294 }
2295 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2296 pfn++;
2297 } while (pte++, addr += PAGE_SIZE, addr != end);
2298 arch_leave_lazy_mmu_mode();
2299 pte_unmap_unlock(mapped_pte, ptl);
2300 return err;
2301}
2302
2303static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2304 unsigned long addr, unsigned long end,
2305 unsigned long pfn, pgprot_t prot)
2306{
2307 pmd_t *pmd;
2308 unsigned long next;
2309 int err;
2310
2311 pfn -= addr >> PAGE_SHIFT;
2312 pmd = pmd_alloc(mm, pud, addr);
2313 if (!pmd)
2314 return -ENOMEM;
2315 VM_BUG_ON(pmd_trans_huge(*pmd));
2316 do {
2317 next = pmd_addr_end(addr, end);
2318 err = remap_pte_range(mm, pmd, addr, next,
2319 pfn + (addr >> PAGE_SHIFT), prot);
2320 if (err)
2321 return err;
2322 } while (pmd++, addr = next, addr != end);
2323 return 0;
2324}
2325
2326static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2327 unsigned long addr, unsigned long end,
2328 unsigned long pfn, pgprot_t prot)
2329{
2330 pud_t *pud;
2331 unsigned long next;
2332 int err;
2333
2334 pfn -= addr >> PAGE_SHIFT;
2335 pud = pud_alloc(mm, p4d, addr);
2336 if (!pud)
2337 return -ENOMEM;
2338 do {
2339 next = pud_addr_end(addr, end);
2340 err = remap_pmd_range(mm, pud, addr, next,
2341 pfn + (addr >> PAGE_SHIFT), prot);
2342 if (err)
2343 return err;
2344 } while (pud++, addr = next, addr != end);
2345 return 0;
2346}
2347
2348static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2349 unsigned long addr, unsigned long end,
2350 unsigned long pfn, pgprot_t prot)
2351{
2352 p4d_t *p4d;
2353 unsigned long next;
2354 int err;
2355
2356 pfn -= addr >> PAGE_SHIFT;
2357 p4d = p4d_alloc(mm, pgd, addr);
2358 if (!p4d)
2359 return -ENOMEM;
2360 do {
2361 next = p4d_addr_end(addr, end);
2362 err = remap_pud_range(mm, p4d, addr, next,
2363 pfn + (addr >> PAGE_SHIFT), prot);
2364 if (err)
2365 return err;
2366 } while (p4d++, addr = next, addr != end);
2367 return 0;
2368}
2369
2370/*
2371 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
2372 * must have pre-validated the caching bits of the pgprot_t.
2373 */
2374int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2375 unsigned long pfn, unsigned long size, pgprot_t prot)
2376{
2377 pgd_t *pgd;
2378 unsigned long next;
2379 unsigned long end = addr + PAGE_ALIGN(size);
2380 struct mm_struct *mm = vma->vm_mm;
2381 int err;
2382
2383 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2384 return -EINVAL;
2385
2386 /*
2387 * Physically remapped pages are special. Tell the
2388 * rest of the world about it:
2389 * VM_IO tells people not to look at these pages
2390 * (accesses can have side effects).
2391 * VM_PFNMAP tells the core MM that the base pages are just
2392 * raw PFN mappings, and do not have a "struct page" associated
2393 * with them.
2394 * VM_DONTEXPAND
2395 * Disable vma merging and expanding with mremap().
2396 * VM_DONTDUMP
2397 * Omit vma from core dump, even when VM_IO turned off.
2398 *
2399 * There's a horrible special case to handle copy-on-write
2400 * behaviour that some programs depend on. We mark the "original"
2401 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2402 * See vm_normal_page() for details.
2403 */
2404 if (is_cow_mapping(vma->vm_flags)) {
2405 if (addr != vma->vm_start || end != vma->vm_end)
2406 return -EINVAL;
2407 vma->vm_pgoff = pfn;
2408 }
2409
2410 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2411
2412 BUG_ON(addr >= end);
2413 pfn -= addr >> PAGE_SHIFT;
2414 pgd = pgd_offset(mm, addr);
2415 flush_cache_range(vma, addr, end);
2416 do {
2417 next = pgd_addr_end(addr, end);
2418 err = remap_p4d_range(mm, pgd, addr, next,
2419 pfn + (addr >> PAGE_SHIFT), prot);
2420 if (err)
2421 return err;
2422 } while (pgd++, addr = next, addr != end);
2423
2424 return 0;
2425}
2426
2427/**
2428 * remap_pfn_range - remap kernel memory to userspace
2429 * @vma: user vma to map to
2430 * @addr: target page aligned user address to start at
2431 * @pfn: page frame number of kernel physical memory address
2432 * @size: size of mapping area
2433 * @prot: page protection flags for this mapping
2434 *
2435 * Note: this is only safe if the mm semaphore is held when called.
2436 *
2437 * Return: %0 on success, negative error code otherwise.
2438 */
2439int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2440 unsigned long pfn, unsigned long size, pgprot_t prot)
2441{
2442 int err;
2443
2444 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2445 if (err)
2446 return -EINVAL;
2447
2448 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2449 if (err)
2450 untrack_pfn(vma, pfn, PAGE_ALIGN(size));
2451 return err;
2452}
2453EXPORT_SYMBOL(remap_pfn_range);
2454
2455/**
2456 * vm_iomap_memory - remap memory to userspace
2457 * @vma: user vma to map to
2458 * @start: start of the physical memory to be mapped
2459 * @len: size of area
2460 *
2461 * This is a simplified io_remap_pfn_range() for common driver use. The
2462 * driver just needs to give us the physical memory range to be mapped,
2463 * we'll figure out the rest from the vma information.
2464 *
2465 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2466 * whatever write-combining details or similar.
2467 *
2468 * Return: %0 on success, negative error code otherwise.
2469 */
2470int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2471{
2472 unsigned long vm_len, pfn, pages;
2473
2474 /* Check that the physical memory area passed in looks valid */
2475 if (start + len < start)
2476 return -EINVAL;
2477 /*
2478 * You *really* shouldn't map things that aren't page-aligned,
2479 * but we've historically allowed it because IO memory might
2480 * just have smaller alignment.
2481 */
2482 len += start & ~PAGE_MASK;
2483 pfn = start >> PAGE_SHIFT;
2484 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2485 if (pfn + pages < pfn)
2486 return -EINVAL;
2487
2488 /* We start the mapping 'vm_pgoff' pages into the area */
2489 if (vma->vm_pgoff > pages)
2490 return -EINVAL;
2491 pfn += vma->vm_pgoff;
2492 pages -= vma->vm_pgoff;
2493
2494 /* Can we fit all of the mapping? */
2495 vm_len = vma->vm_end - vma->vm_start;
2496 if (vm_len >> PAGE_SHIFT > pages)
2497 return -EINVAL;
2498
2499 /* Ok, let it rip */
2500 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2501}
2502EXPORT_SYMBOL(vm_iomap_memory);
2503
2504static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2505 unsigned long addr, unsigned long end,
2506 pte_fn_t fn, void *data, bool create,
2507 pgtbl_mod_mask *mask)
2508{
2509 pte_t *pte, *mapped_pte;
2510 int err = 0;
2511 spinlock_t *ptl;
2512
2513 if (create) {
2514 mapped_pte = pte = (mm == &init_mm) ?
2515 pte_alloc_kernel_track(pmd, addr, mask) :
2516 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2517 if (!pte)
2518 return -ENOMEM;
2519 } else {
2520 mapped_pte = pte = (mm == &init_mm) ?
2521 pte_offset_kernel(pmd, addr) :
2522 pte_offset_map_lock(mm, pmd, addr, &ptl);
2523 }
2524
2525 BUG_ON(pmd_huge(*pmd));
2526
2527 arch_enter_lazy_mmu_mode();
2528
2529 if (fn) {
2530 do {
2531 if (create || !pte_none(*pte)) {
2532 err = fn(pte++, addr, data);
2533 if (err)
2534 break;
2535 }
2536 } while (addr += PAGE_SIZE, addr != end);
2537 }
2538 *mask |= PGTBL_PTE_MODIFIED;
2539
2540 arch_leave_lazy_mmu_mode();
2541
2542 if (mm != &init_mm)
2543 pte_unmap_unlock(mapped_pte, ptl);
2544 return err;
2545}
2546
2547static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2548 unsigned long addr, unsigned long end,
2549 pte_fn_t fn, void *data, bool create,
2550 pgtbl_mod_mask *mask)
2551{
2552 pmd_t *pmd;
2553 unsigned long next;
2554 int err = 0;
2555
2556 BUG_ON(pud_huge(*pud));
2557
2558 if (create) {
2559 pmd = pmd_alloc_track(mm, pud, addr, mask);
2560 if (!pmd)
2561 return -ENOMEM;
2562 } else {
2563 pmd = pmd_offset(pud, addr);
2564 }
2565 do {
2566 next = pmd_addr_end(addr, end);
2567 if (pmd_none(*pmd) && !create)
2568 continue;
2569 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2570 return -EINVAL;
2571 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2572 if (!create)
2573 continue;
2574 pmd_clear_bad(pmd);
2575 }
2576 err = apply_to_pte_range(mm, pmd, addr, next,
2577 fn, data, create, mask);
2578 if (err)
2579 break;
2580 } while (pmd++, addr = next, addr != end);
2581
2582 return err;
2583}
2584
2585static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2586 unsigned long addr, unsigned long end,
2587 pte_fn_t fn, void *data, bool create,
2588 pgtbl_mod_mask *mask)
2589{
2590 pud_t *pud;
2591 unsigned long next;
2592 int err = 0;
2593
2594 if (create) {
2595 pud = pud_alloc_track(mm, p4d, addr, mask);
2596 if (!pud)
2597 return -ENOMEM;
2598 } else {
2599 pud = pud_offset(p4d, addr);
2600 }
2601 do {
2602 next = pud_addr_end(addr, end);
2603 if (pud_none(*pud) && !create)
2604 continue;
2605 if (WARN_ON_ONCE(pud_leaf(*pud)))
2606 return -EINVAL;
2607 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2608 if (!create)
2609 continue;
2610 pud_clear_bad(pud);
2611 }
2612 err = apply_to_pmd_range(mm, pud, addr, next,
2613 fn, data, create, mask);
2614 if (err)
2615 break;
2616 } while (pud++, addr = next, addr != end);
2617
2618 return err;
2619}
2620
2621static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2622 unsigned long addr, unsigned long end,
2623 pte_fn_t fn, void *data, bool create,
2624 pgtbl_mod_mask *mask)
2625{
2626 p4d_t *p4d;
2627 unsigned long next;
2628 int err = 0;
2629
2630 if (create) {
2631 p4d = p4d_alloc_track(mm, pgd, addr, mask);
2632 if (!p4d)
2633 return -ENOMEM;
2634 } else {
2635 p4d = p4d_offset(pgd, addr);
2636 }
2637 do {
2638 next = p4d_addr_end(addr, end);
2639 if (p4d_none(*p4d) && !create)
2640 continue;
2641 if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2642 return -EINVAL;
2643 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2644 if (!create)
2645 continue;
2646 p4d_clear_bad(p4d);
2647 }
2648 err = apply_to_pud_range(mm, p4d, addr, next,
2649 fn, data, create, mask);
2650 if (err)
2651 break;
2652 } while (p4d++, addr = next, addr != end);
2653
2654 return err;
2655}
2656
2657static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2658 unsigned long size, pte_fn_t fn,
2659 void *data, bool create)
2660{
2661 pgd_t *pgd;
2662 unsigned long start = addr, next;
2663 unsigned long end = addr + size;
2664 pgtbl_mod_mask mask = 0;
2665 int err = 0;
2666
2667 if (WARN_ON(addr >= end))
2668 return -EINVAL;
2669
2670 pgd = pgd_offset(mm, addr);
2671 do {
2672 next = pgd_addr_end(addr, end);
2673 if (pgd_none(*pgd) && !create)
2674 continue;
2675 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2676 return -EINVAL;
2677 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2678 if (!create)
2679 continue;
2680 pgd_clear_bad(pgd);
2681 }
2682 err = apply_to_p4d_range(mm, pgd, addr, next,
2683 fn, data, create, &mask);
2684 if (err)
2685 break;
2686 } while (pgd++, addr = next, addr != end);
2687
2688 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2689 arch_sync_kernel_mappings(start, start + size);
2690
2691 return err;
2692}
2693
2694/*
2695 * Scan a region of virtual memory, filling in page tables as necessary
2696 * and calling a provided function on each leaf page table.
2697 */
2698int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2699 unsigned long size, pte_fn_t fn, void *data)
2700{
2701 return __apply_to_page_range(mm, addr, size, fn, data, true);
2702}
2703EXPORT_SYMBOL_GPL(apply_to_page_range);
2704
2705/*
2706 * Scan a region of virtual memory, calling a provided function on
2707 * each leaf page table where it exists.
2708 *
2709 * Unlike apply_to_page_range, this does _not_ fill in page tables
2710 * where they are absent.
2711 */
2712int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2713 unsigned long size, pte_fn_t fn, void *data)
2714{
2715 return __apply_to_page_range(mm, addr, size, fn, data, false);
2716}
2717EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2718
2719/*
2720 * handle_pte_fault chooses page fault handler according to an entry which was
2721 * read non-atomically. Before making any commitment, on those architectures
2722 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2723 * parts, do_swap_page must check under lock before unmapping the pte and
2724 * proceeding (but do_wp_page is only called after already making such a check;
2725 * and do_anonymous_page can safely check later on).
2726 */
2727static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2728 pte_t *page_table, pte_t orig_pte)
2729{
2730 int same = 1;
2731#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2732 if (sizeof(pte_t) > sizeof(unsigned long)) {
2733 spinlock_t *ptl = pte_lockptr(mm, pmd);
2734 spin_lock(ptl);
2735 same = pte_same(*page_table, orig_pte);
2736 spin_unlock(ptl);
2737 }
2738#endif
2739 pte_unmap(page_table);
2740 return same;
2741}
2742
2743static inline bool cow_user_page(struct page *dst, struct page *src,
2744 struct vm_fault *vmf)
2745{
2746 bool ret;
2747 void *kaddr;
2748 void __user *uaddr;
2749 bool locked = false;
2750 struct vm_area_struct *vma = vmf->vma;
2751 struct mm_struct *mm = vma->vm_mm;
2752 unsigned long addr = vmf->address;
2753
2754 if (likely(src)) {
2755 copy_user_highpage(dst, src, addr, vma);
2756 return true;
2757 }
2758
2759 /*
2760 * If the source page was a PFN mapping, we don't have
2761 * a "struct page" for it. We do a best-effort copy by
2762 * just copying from the original user address. If that
2763 * fails, we just zero-fill it. Live with it.
2764 */
2765 kaddr = kmap_atomic(dst);
2766 uaddr = (void __user *)(addr & PAGE_MASK);
2767
2768 /*
2769 * On architectures with software "accessed" bits, we would
2770 * take a double page fault, so mark it accessed here.
2771 */
2772 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2773 pte_t entry;
2774
2775 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2776 locked = true;
2777 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2778 /*
2779 * Other thread has already handled the fault
2780 * and update local tlb only
2781 */
2782 update_mmu_tlb(vma, addr, vmf->pte);
2783 ret = false;
2784 goto pte_unlock;
2785 }
2786
2787 entry = pte_mkyoung(vmf->orig_pte);
2788 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2789 update_mmu_cache(vma, addr, vmf->pte);
2790 }
2791
2792 /*
2793 * This really shouldn't fail, because the page is there
2794 * in the page tables. But it might just be unreadable,
2795 * in which case we just give up and fill the result with
2796 * zeroes.
2797 */
2798 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2799 if (locked)
2800 goto warn;
2801
2802 /* Re-validate under PTL if the page is still mapped */
2803 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2804 locked = true;
2805 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2806 /* The PTE changed under us, update local tlb */
2807 update_mmu_tlb(vma, addr, vmf->pte);
2808 ret = false;
2809 goto pte_unlock;
2810 }
2811
2812 /*
2813 * The same page can be mapped back since last copy attempt.
2814 * Try to copy again under PTL.
2815 */
2816 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2817 /*
2818 * Give a warn in case there can be some obscure
2819 * use-case
2820 */
2821warn:
2822 WARN_ON_ONCE(1);
2823 clear_page(kaddr);
2824 }
2825 }
2826
2827 ret = true;
2828
2829pte_unlock:
2830 if (locked)
2831 pte_unmap_unlock(vmf->pte, vmf->ptl);
2832 kunmap_atomic(kaddr);
2833 flush_dcache_page(dst);
2834
2835 return ret;
2836}
2837
2838static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2839{
2840 struct file *vm_file = vma->vm_file;
2841
2842 if (vm_file)
2843 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2844
2845 /*
2846 * Special mappings (e.g. VDSO) do not have any file so fake
2847 * a default GFP_KERNEL for them.
2848 */
2849 return GFP_KERNEL;
2850}
2851
2852/*
2853 * Notify the address space that the page is about to become writable so that
2854 * it can prohibit this or wait for the page to get into an appropriate state.
2855 *
2856 * We do this without the lock held, so that it can sleep if it needs to.
2857 */
2858static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
2859{
2860 vm_fault_t ret;
2861 struct page *page = vmf->page;
2862 unsigned int old_flags = vmf->flags;
2863
2864 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2865
2866 if (vmf->vma->vm_file &&
2867 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2868 return VM_FAULT_SIGBUS;
2869
2870 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2871 /* Restore original flags so that caller is not surprised */
2872 vmf->flags = old_flags;
2873 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2874 return ret;
2875 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2876 lock_page(page);
2877 if (!page->mapping) {
2878 unlock_page(page);
2879 return 0; /* retry */
2880 }
2881 ret |= VM_FAULT_LOCKED;
2882 } else
2883 VM_BUG_ON_PAGE(!PageLocked(page), page);
2884 return ret;
2885}
2886
2887/*
2888 * Handle dirtying of a page in shared file mapping on a write fault.
2889 *
2890 * The function expects the page to be locked and unlocks it.
2891 */
2892static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2893{
2894 struct vm_area_struct *vma = vmf->vma;
2895 struct address_space *mapping;
2896 struct page *page = vmf->page;
2897 bool dirtied;
2898 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2899
2900 dirtied = set_page_dirty(page);
2901 VM_BUG_ON_PAGE(PageAnon(page), page);
2902 /*
2903 * Take a local copy of the address_space - page.mapping may be zeroed
2904 * by truncate after unlock_page(). The address_space itself remains
2905 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
2906 * release semantics to prevent the compiler from undoing this copying.
2907 */
2908 mapping = page_rmapping(page);
2909 unlock_page(page);
2910
2911 if (!page_mkwrite)
2912 file_update_time(vma->vm_file);
2913
2914 /*
2915 * Throttle page dirtying rate down to writeback speed.
2916 *
2917 * mapping may be NULL here because some device drivers do not
2918 * set page.mapping but still dirty their pages
2919 *
2920 * Drop the mmap_lock before waiting on IO, if we can. The file
2921 * is pinning the mapping, as per above.
2922 */
2923 if ((dirtied || page_mkwrite) && mapping) {
2924 struct file *fpin;
2925
2926 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2927 balance_dirty_pages_ratelimited(mapping);
2928 if (fpin) {
2929 fput(fpin);
2930 return VM_FAULT_RETRY;
2931 }
2932 }
2933
2934 return 0;
2935}
2936
2937/*
2938 * Handle write page faults for pages that can be reused in the current vma
2939 *
2940 * This can happen either due to the mapping being with the VM_SHARED flag,
2941 * or due to us being the last reference standing to the page. In either
2942 * case, all we need to do here is to mark the page as writable and update
2943 * any related book-keeping.
2944 */
2945static inline void wp_page_reuse(struct vm_fault *vmf)
2946 __releases(vmf->ptl)
2947{
2948 struct vm_area_struct *vma = vmf->vma;
2949 struct page *page = vmf->page;
2950 pte_t entry;
2951 /*
2952 * Clear the pages cpupid information as the existing
2953 * information potentially belongs to a now completely
2954 * unrelated process.
2955 */
2956 if (page)
2957 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2958
2959 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2960 entry = pte_mkyoung(vmf->orig_pte);
2961 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2962 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2963 update_mmu_cache(vma, vmf->address, vmf->pte);
2964 pte_unmap_unlock(vmf->pte, vmf->ptl);
2965 count_vm_event(PGREUSE);
2966}
2967
2968/*
2969 * Handle the case of a page which we actually need to copy to a new page.
2970 *
2971 * Called with mmap_lock locked and the old page referenced, but
2972 * without the ptl held.
2973 *
2974 * High level logic flow:
2975 *
2976 * - Allocate a page, copy the content of the old page to the new one.
2977 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2978 * - Take the PTL. If the pte changed, bail out and release the allocated page
2979 * - If the pte is still the way we remember it, update the page table and all
2980 * relevant references. This includes dropping the reference the page-table
2981 * held to the old page, as well as updating the rmap.
2982 * - In any case, unlock the PTL and drop the reference we took to the old page.
2983 */
2984static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2985{
2986 struct vm_area_struct *vma = vmf->vma;
2987 struct mm_struct *mm = vma->vm_mm;
2988 struct page *old_page = vmf->page;
2989 struct page *new_page = NULL;
2990 pte_t entry;
2991 int page_copied = 0;
2992 struct mmu_notifier_range range;
2993
2994 if (unlikely(anon_vma_prepare(vma)))
2995 goto oom;
2996
2997 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2998 new_page = alloc_zeroed_user_highpage_movable(vma,
2999 vmf->address);
3000 if (!new_page)
3001 goto oom;
3002 } else {
3003 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3004 vmf->address);
3005 if (!new_page)
3006 goto oom;
3007
3008 if (!cow_user_page(new_page, old_page, vmf)) {
3009 /*
3010 * COW failed, if the fault was solved by other,
3011 * it's fine. If not, userspace would re-fault on
3012 * the same address and we will handle the fault
3013 * from the second attempt.
3014 */
3015 put_page(new_page);
3016 if (old_page)
3017 put_page(old_page);
3018 return 0;
3019 }
3020 }
3021
3022 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
3023 goto oom_free_new;
3024 cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3025
3026 __SetPageUptodate(new_page);
3027
3028 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3029 vmf->address & PAGE_MASK,
3030 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3031 mmu_notifier_invalidate_range_start(&range);
3032
3033 /*
3034 * Re-check the pte - we dropped the lock
3035 */
3036 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3037 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3038 if (old_page) {
3039 if (!PageAnon(old_page)) {
3040 dec_mm_counter_fast(mm,
3041 mm_counter_file(old_page));
3042 inc_mm_counter_fast(mm, MM_ANONPAGES);
3043 }
3044 } else {
3045 inc_mm_counter_fast(mm, MM_ANONPAGES);
3046 }
3047 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3048 entry = mk_pte(new_page, vma->vm_page_prot);
3049 entry = pte_sw_mkyoung(entry);
3050 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3051
3052 /*
3053 * Clear the pte entry and flush it first, before updating the
3054 * pte with the new entry, to keep TLBs on different CPUs in
3055 * sync. This code used to set the new PTE then flush TLBs, but
3056 * that left a window where the new PTE could be loaded into
3057 * some TLBs while the old PTE remains in others.
3058 */
3059 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3060 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3061 lru_cache_add_inactive_or_unevictable(new_page, vma);
3062 /*
3063 * We call the notify macro here because, when using secondary
3064 * mmu page tables (such as kvm shadow page tables), we want the
3065 * new page to be mapped directly into the secondary page table.
3066 */
3067 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3068 update_mmu_cache(vma, vmf->address, vmf->pte);
3069 if (old_page) {
3070 /*
3071 * Only after switching the pte to the new page may
3072 * we remove the mapcount here. Otherwise another
3073 * process may come and find the rmap count decremented
3074 * before the pte is switched to the new page, and
3075 * "reuse" the old page writing into it while our pte
3076 * here still points into it and can be read by other
3077 * threads.
3078 *
3079 * The critical issue is to order this
3080 * page_remove_rmap with the ptp_clear_flush above.
3081 * Those stores are ordered by (if nothing else,)
3082 * the barrier present in the atomic_add_negative
3083 * in page_remove_rmap.
3084 *
3085 * Then the TLB flush in ptep_clear_flush ensures that
3086 * no process can access the old page before the
3087 * decremented mapcount is visible. And the old page
3088 * cannot be reused until after the decremented
3089 * mapcount is visible. So transitively, TLBs to
3090 * old page will be flushed before it can be reused.
3091 */
3092 page_remove_rmap(old_page, false);
3093 }
3094
3095 /* Free the old page.. */
3096 new_page = old_page;
3097 page_copied = 1;
3098 } else {
3099 update_mmu_tlb(vma, vmf->address, vmf->pte);
3100 }
3101
3102 if (new_page)
3103 put_page(new_page);
3104
3105 pte_unmap_unlock(vmf->pte, vmf->ptl);
3106 /*
3107 * No need to double call mmu_notifier->invalidate_range() callback as
3108 * the above ptep_clear_flush_notify() did already call it.
3109 */
3110 mmu_notifier_invalidate_range_only_end(&range);
3111 if (old_page) {
3112 /*
3113 * Don't let another task, with possibly unlocked vma,
3114 * keep the mlocked page.
3115 */
3116 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
3117 lock_page(old_page); /* LRU manipulation */
3118 if (PageMlocked(old_page))
3119 munlock_vma_page(old_page);
3120 unlock_page(old_page);
3121 }
3122 if (page_copied)
3123 free_swap_cache(old_page);
3124 put_page(old_page);
3125 }
3126 return page_copied ? VM_FAULT_WRITE : 0;
3127oom_free_new:
3128 put_page(new_page);
3129oom:
3130 if (old_page)
3131 put_page(old_page);
3132 return VM_FAULT_OOM;
3133}
3134
3135/**
3136 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3137 * writeable once the page is prepared
3138 *
3139 * @vmf: structure describing the fault
3140 *
3141 * This function handles all that is needed to finish a write page fault in a
3142 * shared mapping due to PTE being read-only once the mapped page is prepared.
3143 * It handles locking of PTE and modifying it.
3144 *
3145 * The function expects the page to be locked or other protection against
3146 * concurrent faults / writeback (such as DAX radix tree locks).
3147 *
3148 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3149 * we acquired PTE lock.
3150 */
3151vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3152{
3153 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3154 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3155 &vmf->ptl);
3156 /*
3157 * We might have raced with another page fault while we released the
3158 * pte_offset_map_lock.
3159 */
3160 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3161 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3162 pte_unmap_unlock(vmf->pte, vmf->ptl);
3163 return VM_FAULT_NOPAGE;
3164 }
3165 wp_page_reuse(vmf);
3166 return 0;
3167}
3168
3169/*
3170 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3171 * mapping
3172 */
3173static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3174{
3175 struct vm_area_struct *vma = vmf->vma;
3176
3177 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3178 vm_fault_t ret;
3179
3180 pte_unmap_unlock(vmf->pte, vmf->ptl);
3181 vmf->flags |= FAULT_FLAG_MKWRITE;
3182 ret = vma->vm_ops->pfn_mkwrite(vmf);
3183 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3184 return ret;
3185 return finish_mkwrite_fault(vmf);
3186 }
3187 wp_page_reuse(vmf);
3188 return VM_FAULT_WRITE;
3189}
3190
3191static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3192 __releases(vmf->ptl)
3193{
3194 struct vm_area_struct *vma = vmf->vma;
3195 vm_fault_t ret = VM_FAULT_WRITE;
3196
3197 get_page(vmf->page);
3198
3199 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3200 vm_fault_t tmp;
3201
3202 pte_unmap_unlock(vmf->pte, vmf->ptl);
3203 tmp = do_page_mkwrite(vmf);
3204 if (unlikely(!tmp || (tmp &
3205 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3206 put_page(vmf->page);
3207 return tmp;
3208 }
3209 tmp = finish_mkwrite_fault(vmf);
3210 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3211 unlock_page(vmf->page);
3212 put_page(vmf->page);
3213 return tmp;
3214 }
3215 } else {
3216 wp_page_reuse(vmf);
3217 lock_page(vmf->page);
3218 }
3219 ret |= fault_dirty_shared_page(vmf);
3220 put_page(vmf->page);
3221
3222 return ret;
3223}
3224
3225/*
3226 * This routine handles present pages, when users try to write
3227 * to a shared page. It is done by copying the page to a new address
3228 * and decrementing the shared-page counter for the old page.
3229 *
3230 * Note that this routine assumes that the protection checks have been
3231 * done by the caller (the low-level page fault routine in most cases).
3232 * Thus we can safely just mark it writable once we've done any necessary
3233 * COW.
3234 *
3235 * We also mark the page dirty at this point even though the page will
3236 * change only once the write actually happens. This avoids a few races,
3237 * and potentially makes it more efficient.
3238 *
3239 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3240 * but allow concurrent faults), with pte both mapped and locked.
3241 * We return with mmap_lock still held, but pte unmapped and unlocked.
3242 */
3243static vm_fault_t do_wp_page(struct vm_fault *vmf)
3244 __releases(vmf->ptl)
3245{
3246 struct vm_area_struct *vma = vmf->vma;
3247
3248 if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3249 pte_unmap_unlock(vmf->pte, vmf->ptl);
3250 return handle_userfault(vmf, VM_UFFD_WP);
3251 }
3252
3253 /*
3254 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3255 * is flushed in this case before copying.
3256 */
3257 if (unlikely(userfaultfd_wp(vmf->vma) &&
3258 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3259 flush_tlb_page(vmf->vma, vmf->address);
3260
3261 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3262 if (!vmf->page) {
3263 /*
3264 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3265 * VM_PFNMAP VMA.
3266 *
3267 * We should not cow pages in a shared writeable mapping.
3268 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3269 */
3270 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3271 (VM_WRITE|VM_SHARED))
3272 return wp_pfn_shared(vmf);
3273
3274 pte_unmap_unlock(vmf->pte, vmf->ptl);
3275 return wp_page_copy(vmf);
3276 }
3277
3278 /*
3279 * Take out anonymous pages first, anonymous shared vmas are
3280 * not dirty accountable.
3281 */
3282 if (PageAnon(vmf->page)) {
3283 struct page *page = vmf->page;
3284
3285 /* PageKsm() doesn't necessarily raise the page refcount */
3286 if (PageKsm(page) || page_count(page) != 1)
3287 goto copy;
3288 if (!trylock_page(page))
3289 goto copy;
3290 if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3291 unlock_page(page);
3292 goto copy;
3293 }
3294 /*
3295 * Ok, we've got the only map reference, and the only
3296 * page count reference, and the page is locked,
3297 * it's dark out, and we're wearing sunglasses. Hit it.
3298 */
3299 unlock_page(page);
3300 wp_page_reuse(vmf);
3301 return VM_FAULT_WRITE;
3302 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3303 (VM_WRITE|VM_SHARED))) {
3304 return wp_page_shared(vmf);
3305 }
3306copy:
3307 /*
3308 * Ok, we need to copy. Oh, well..
3309 */
3310 get_page(vmf->page);
3311
3312 pte_unmap_unlock(vmf->pte, vmf->ptl);
3313 return wp_page_copy(vmf);
3314}
3315
3316static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3317 unsigned long start_addr, unsigned long end_addr,
3318 struct zap_details *details)
3319{
3320 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3321}
3322
3323static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3324 struct zap_details *details)
3325{
3326 struct vm_area_struct *vma;
3327 pgoff_t vba, vea, zba, zea;
3328
3329 vma_interval_tree_foreach(vma, root,
3330 details->first_index, details->last_index) {
3331
3332 vba = vma->vm_pgoff;
3333 vea = vba + vma_pages(vma) - 1;
3334 zba = details->first_index;
3335 if (zba < vba)
3336 zba = vba;
3337 zea = details->last_index;
3338 if (zea > vea)
3339 zea = vea;
3340
3341 unmap_mapping_range_vma(vma,
3342 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3343 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3344 details);
3345 }
3346}
3347
3348/**
3349 * unmap_mapping_page() - Unmap single page from processes.
3350 * @page: The locked page to be unmapped.
3351 *
3352 * Unmap this page from any userspace process which still has it mmaped.
3353 * Typically, for efficiency, the range of nearby pages has already been
3354 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
3355 * truncation or invalidation holds the lock on a page, it may find that
3356 * the page has been remapped again: and then uses unmap_mapping_page()
3357 * to unmap it finally.
3358 */
3359void unmap_mapping_page(struct page *page)
3360{
3361 struct address_space *mapping = page->mapping;
3362 struct zap_details details = { };
3363
3364 VM_BUG_ON(!PageLocked(page));
3365 VM_BUG_ON(PageTail(page));
3366
3367 details.check_mapping = mapping;
3368 details.first_index = page->index;
3369 details.last_index = page->index + thp_nr_pages(page) - 1;
3370 details.single_page = page;
3371
3372 i_mmap_lock_write(mapping);
3373 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3374 unmap_mapping_range_tree(&mapping->i_mmap, &details);
3375 i_mmap_unlock_write(mapping);
3376}
3377
3378/**
3379 * unmap_mapping_pages() - Unmap pages from processes.
3380 * @mapping: The address space containing pages to be unmapped.
3381 * @start: Index of first page to be unmapped.
3382 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3383 * @even_cows: Whether to unmap even private COWed pages.
3384 *
3385 * Unmap the pages in this address space from any userspace process which
3386 * has them mmaped. Generally, you want to remove COWed pages as well when
3387 * a file is being truncated, but not when invalidating pages from the page
3388 * cache.
3389 */
3390void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3391 pgoff_t nr, bool even_cows)
3392{
3393 struct zap_details details = { };
3394
3395 details.check_mapping = even_cows ? NULL : mapping;
3396 details.first_index = start;
3397 details.last_index = start + nr - 1;
3398 if (details.last_index < details.first_index)
3399 details.last_index = ULONG_MAX;
3400
3401 i_mmap_lock_write(mapping);
3402 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3403 unmap_mapping_range_tree(&mapping->i_mmap, &details);
3404 i_mmap_unlock_write(mapping);
3405}
3406
3407/**
3408 * unmap_mapping_range - unmap the portion of all mmaps in the specified
3409 * address_space corresponding to the specified byte range in the underlying
3410 * file.
3411 *
3412 * @mapping: the address space containing mmaps to be unmapped.
3413 * @holebegin: byte in first page to unmap, relative to the start of
3414 * the underlying file. This will be rounded down to a PAGE_SIZE
3415 * boundary. Note that this is different from truncate_pagecache(), which
3416 * must keep the partial page. In contrast, we must get rid of
3417 * partial pages.
3418 * @holelen: size of prospective hole in bytes. This will be rounded
3419 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3420 * end of the file.
3421 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3422 * but 0 when invalidating pagecache, don't throw away private data.
3423 */
3424void unmap_mapping_range(struct address_space *mapping,
3425 loff_t const holebegin, loff_t const holelen, int even_cows)
3426{
3427 pgoff_t hba = holebegin >> PAGE_SHIFT;
3428 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3429
3430 /* Check for overflow. */
3431 if (sizeof(holelen) > sizeof(hlen)) {
3432 long long holeend =
3433 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3434 if (holeend & ~(long long)ULONG_MAX)
3435 hlen = ULONG_MAX - hba + 1;
3436 }
3437
3438 unmap_mapping_pages(mapping, hba, hlen, even_cows);
3439}
3440EXPORT_SYMBOL(unmap_mapping_range);
3441
3442/*
3443 * Restore a potential device exclusive pte to a working pte entry
3444 */
3445static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3446{
3447 struct page *page = vmf->page;
3448 struct vm_area_struct *vma = vmf->vma;
3449 struct mmu_notifier_range range;
3450
3451 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
3452 return VM_FAULT_RETRY;
3453 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
3454 vma->vm_mm, vmf->address & PAGE_MASK,
3455 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3456 mmu_notifier_invalidate_range_start(&range);
3457
3458 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3459 &vmf->ptl);
3460 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3461 restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
3462
3463 pte_unmap_unlock(vmf->pte, vmf->ptl);
3464 unlock_page(page);
3465
3466 mmu_notifier_invalidate_range_end(&range);
3467 return 0;
3468}
3469
3470/*
3471 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3472 * but allow concurrent faults), and pte mapped but not yet locked.
3473 * We return with pte unmapped and unlocked.
3474 *
3475 * We return with the mmap_lock locked or unlocked in the same cases
3476 * as does filemap_fault().
3477 */
3478vm_fault_t do_swap_page(struct vm_fault *vmf)
3479{
3480 struct vm_area_struct *vma = vmf->vma;
3481 struct page *page = NULL, *swapcache;
3482 struct swap_info_struct *si = NULL;
3483 swp_entry_t entry;
3484 pte_t pte;
3485 int locked;
3486 int exclusive = 0;
3487 vm_fault_t ret = 0;
3488 void *shadow = NULL;
3489
3490 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
3491 goto out;
3492
3493 entry = pte_to_swp_entry(vmf->orig_pte);
3494 if (unlikely(non_swap_entry(entry))) {
3495 if (is_migration_entry(entry)) {
3496 migration_entry_wait(vma->vm_mm, vmf->pmd,
3497 vmf->address);
3498 } else if (is_device_exclusive_entry(entry)) {
3499 vmf->page = pfn_swap_entry_to_page(entry);
3500 ret = remove_device_exclusive_entry(vmf);
3501 } else if (is_device_private_entry(entry)) {
3502 vmf->page = pfn_swap_entry_to_page(entry);
3503 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3504 } else if (is_hwpoison_entry(entry)) {
3505 ret = VM_FAULT_HWPOISON;
3506 } else {
3507 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3508 ret = VM_FAULT_SIGBUS;
3509 }
3510 goto out;
3511 }
3512
3513 /* Prevent swapoff from happening to us. */
3514 si = get_swap_device(entry);
3515 if (unlikely(!si))
3516 goto out;
3517
3518 delayacct_set_flag(current, DELAYACCT_PF_SWAPIN);
3519 page = lookup_swap_cache(entry, vma, vmf->address);
3520 swapcache = page;
3521
3522 if (!page) {
3523 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3524 __swap_count(entry) == 1) {
3525 /* skip swapcache */
3526 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3527 vmf->address);
3528 if (page) {
3529 __SetPageLocked(page);
3530 __SetPageSwapBacked(page);
3531
3532 if (mem_cgroup_swapin_charge_page(page,
3533 vma->vm_mm, GFP_KERNEL, entry)) {
3534 ret = VM_FAULT_OOM;
3535 goto out_page;
3536 }
3537 mem_cgroup_swapin_uncharge_swap(entry);
3538
3539 shadow = get_shadow_from_swap_cache(entry);
3540 if (shadow)
3541 workingset_refault(page, shadow);
3542
3543 lru_cache_add(page);
3544
3545 /* To provide entry to swap_readpage() */
3546 set_page_private(page, entry.val);
3547 swap_readpage(page, true);
3548 set_page_private(page, 0);
3549 }
3550 } else {
3551 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3552 vmf);
3553 swapcache = page;
3554 }
3555
3556 if (!page) {
3557 /*
3558 * Back out if somebody else faulted in this pte
3559 * while we released the pte lock.
3560 */
3561 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3562 vmf->address, &vmf->ptl);
3563 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3564 ret = VM_FAULT_OOM;
3565 delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3566 goto unlock;
3567 }
3568
3569 /* Had to read the page from swap area: Major fault */
3570 ret = VM_FAULT_MAJOR;
3571 count_vm_event(PGMAJFAULT);
3572 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3573 } else if (PageHWPoison(page)) {
3574 /*
3575 * hwpoisoned dirty swapcache pages are kept for killing
3576 * owner processes (which may be unknown at hwpoison time)
3577 */
3578 ret = VM_FAULT_HWPOISON;
3579 delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3580 goto out_release;
3581 }
3582
3583 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3584
3585 delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3586 if (!locked) {
3587 ret |= VM_FAULT_RETRY;
3588 goto out_release;
3589 }
3590
3591 /*
3592 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3593 * release the swapcache from under us. The page pin, and pte_same
3594 * test below, are not enough to exclude that. Even if it is still
3595 * swapcache, we need to check that the page's swap has not changed.
3596 */
3597 if (unlikely((!PageSwapCache(page) ||
3598 page_private(page) != entry.val)) && swapcache)
3599 goto out_page;
3600
3601 page = ksm_might_need_to_copy(page, vma, vmf->address);
3602 if (unlikely(!page)) {
3603 ret = VM_FAULT_OOM;
3604 page = swapcache;
3605 goto out_page;
3606 }
3607
3608 cgroup_throttle_swaprate(page, GFP_KERNEL);
3609
3610 /*
3611 * Back out if somebody else already faulted in this pte.
3612 */
3613 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3614 &vmf->ptl);
3615 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3616 goto out_nomap;
3617
3618 if (unlikely(!PageUptodate(page))) {
3619 ret = VM_FAULT_SIGBUS;
3620 goto out_nomap;
3621 }
3622
3623 /*
3624 * The page isn't present yet, go ahead with the fault.
3625 *
3626 * Be careful about the sequence of operations here.
3627 * To get its accounting right, reuse_swap_page() must be called
3628 * while the page is counted on swap but not yet in mapcount i.e.
3629 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3630 * must be called after the swap_free(), or it will never succeed.
3631 */
3632
3633 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3634 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3635 pte = mk_pte(page, vma->vm_page_prot);
3636 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3637 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3638 vmf->flags &= ~FAULT_FLAG_WRITE;
3639 ret |= VM_FAULT_WRITE;
3640 exclusive = RMAP_EXCLUSIVE;
3641 }
3642 flush_icache_page(vma, page);
3643 if (pte_swp_soft_dirty(vmf->orig_pte))
3644 pte = pte_mksoft_dirty(pte);
3645 if (pte_swp_uffd_wp(vmf->orig_pte)) {
3646 pte = pte_mkuffd_wp(pte);
3647 pte = pte_wrprotect(pte);
3648 }
3649 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3650 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3651 vmf->orig_pte = pte;
3652
3653 /* ksm created a completely new copy */
3654 if (unlikely(page != swapcache && swapcache)) {
3655 page_add_new_anon_rmap(page, vma, vmf->address, false);
3656 lru_cache_add_inactive_or_unevictable(page, vma);
3657 } else {
3658 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3659 }
3660
3661 swap_free(entry);
3662 if (mem_cgroup_swap_full(page) ||
3663 (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3664 try_to_free_swap(page);
3665 unlock_page(page);
3666 if (page != swapcache && swapcache) {
3667 /*
3668 * Hold the lock to avoid the swap entry to be reused
3669 * until we take the PT lock for the pte_same() check
3670 * (to avoid false positives from pte_same). For
3671 * further safety release the lock after the swap_free
3672 * so that the swap count won't change under a
3673 * parallel locked swapcache.
3674 */
3675 unlock_page(swapcache);
3676 put_page(swapcache);
3677 }
3678
3679 if (vmf->flags & FAULT_FLAG_WRITE) {
3680 ret |= do_wp_page(vmf);
3681 if (ret & VM_FAULT_ERROR)
3682 ret &= VM_FAULT_ERROR;
3683 goto out;
3684 }
3685
3686 /* No need to invalidate - it was non-present before */
3687 update_mmu_cache(vma, vmf->address, vmf->pte);
3688unlock:
3689 pte_unmap_unlock(vmf->pte, vmf->ptl);
3690out:
3691 if (si)
3692 put_swap_device(si);
3693 return ret;
3694out_nomap:
3695 pte_unmap_unlock(vmf->pte, vmf->ptl);
3696out_page:
3697 unlock_page(page);
3698out_release:
3699 put_page(page);
3700 if (page != swapcache && swapcache) {
3701 unlock_page(swapcache);
3702 put_page(swapcache);
3703 }
3704 if (si)
3705 put_swap_device(si);
3706 return ret;
3707}
3708
3709/*
3710 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3711 * but allow concurrent faults), and pte mapped but not yet locked.
3712 * We return with mmap_lock still held, but pte unmapped and unlocked.
3713 */
3714static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3715{
3716 struct vm_area_struct *vma = vmf->vma;
3717 struct page *page;
3718 vm_fault_t ret = 0;
3719 pte_t entry;
3720
3721 /* File mapping without ->vm_ops ? */
3722 if (vma->vm_flags & VM_SHARED)
3723 return VM_FAULT_SIGBUS;
3724
3725 /*
3726 * Use pte_alloc() instead of pte_alloc_map(). We can't run
3727 * pte_offset_map() on pmds where a huge pmd might be created
3728 * from a different thread.
3729 *
3730 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3731 * parallel threads are excluded by other means.
3732 *
3733 * Here we only have mmap_read_lock(mm).
3734 */
3735 if (pte_alloc(vma->vm_mm, vmf->pmd))
3736 return VM_FAULT_OOM;
3737
3738 /* See comment in handle_pte_fault() */
3739 if (unlikely(pmd_trans_unstable(vmf->pmd)))
3740 return 0;
3741
3742 /* Use the zero-page for reads */
3743 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3744 !mm_forbids_zeropage(vma->vm_mm)) {
3745 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3746 vma->vm_page_prot));
3747 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3748 vmf->address, &vmf->ptl);
3749 if (!pte_none(*vmf->pte)) {
3750 update_mmu_tlb(vma, vmf->address, vmf->pte);
3751 goto unlock;
3752 }
3753 ret = check_stable_address_space(vma->vm_mm);
3754 if (ret)
3755 goto unlock;
3756 /* Deliver the page fault to userland, check inside PT lock */
3757 if (userfaultfd_missing(vma)) {
3758 pte_unmap_unlock(vmf->pte, vmf->ptl);
3759 return handle_userfault(vmf, VM_UFFD_MISSING);
3760 }
3761 goto setpte;
3762 }
3763
3764 /* Allocate our own private page. */
3765 if (unlikely(anon_vma_prepare(vma)))
3766 goto oom;
3767 page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3768 if (!page)
3769 goto oom;
3770
3771 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3772 goto oom_free_page;
3773 cgroup_throttle_swaprate(page, GFP_KERNEL);
3774
3775 /*
3776 * The memory barrier inside __SetPageUptodate makes sure that
3777 * preceding stores to the page contents become visible before
3778 * the set_pte_at() write.
3779 */
3780 __SetPageUptodate(page);
3781
3782 entry = mk_pte(page, vma->vm_page_prot);
3783 entry = pte_sw_mkyoung(entry);
3784 if (vma->vm_flags & VM_WRITE)
3785 entry = pte_mkwrite(pte_mkdirty(entry));
3786
3787 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3788 &vmf->ptl);
3789 if (!pte_none(*vmf->pte)) {
3790 update_mmu_cache(vma, vmf->address, vmf->pte);
3791 goto release;
3792 }
3793
3794 ret = check_stable_address_space(vma->vm_mm);
3795 if (ret)
3796 goto release;
3797
3798 /* Deliver the page fault to userland, check inside PT lock */
3799 if (userfaultfd_missing(vma)) {
3800 pte_unmap_unlock(vmf->pte, vmf->ptl);
3801 put_page(page);
3802 return handle_userfault(vmf, VM_UFFD_MISSING);
3803 }
3804
3805 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3806 page_add_new_anon_rmap(page, vma, vmf->address, false);
3807 lru_cache_add_inactive_or_unevictable(page, vma);
3808setpte:
3809 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3810
3811 /* No need to invalidate - it was non-present before */
3812 update_mmu_cache(vma, vmf->address, vmf->pte);
3813unlock:
3814 pte_unmap_unlock(vmf->pte, vmf->ptl);
3815 return ret;
3816release:
3817 put_page(page);
3818 goto unlock;
3819oom_free_page:
3820 put_page(page);
3821oom:
3822 return VM_FAULT_OOM;
3823}
3824
3825/*
3826 * The mmap_lock must have been held on entry, and may have been
3827 * released depending on flags and vma->vm_ops->fault() return value.
3828 * See filemap_fault() and __lock_page_retry().
3829 */
3830static vm_fault_t __do_fault(struct vm_fault *vmf)
3831{
3832 struct vm_area_struct *vma = vmf->vma;
3833 vm_fault_t ret;
3834
3835 /*
3836 * Preallocate pte before we take page_lock because this might lead to
3837 * deadlocks for memcg reclaim which waits for pages under writeback:
3838 * lock_page(A)
3839 * SetPageWriteback(A)
3840 * unlock_page(A)
3841 * lock_page(B)
3842 * lock_page(B)
3843 * pte_alloc_one
3844 * shrink_page_list
3845 * wait_on_page_writeback(A)
3846 * SetPageWriteback(B)
3847 * unlock_page(B)
3848 * # flush A, B to clear the writeback
3849 */
3850 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3851 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3852 if (!vmf->prealloc_pte)
3853 return VM_FAULT_OOM;
3854 smp_wmb(); /* See comment in __pte_alloc() */
3855 }
3856
3857 ret = vma->vm_ops->fault(vmf);
3858 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3859 VM_FAULT_DONE_COW)))
3860 return ret;
3861
3862 if (unlikely(PageHWPoison(vmf->page))) {
3863 if (ret & VM_FAULT_LOCKED)
3864 unlock_page(vmf->page);
3865 put_page(vmf->page);
3866 vmf->page = NULL;
3867 return VM_FAULT_HWPOISON;
3868 }
3869
3870 if (unlikely(!(ret & VM_FAULT_LOCKED)))
3871 lock_page(vmf->page);
3872 else
3873 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3874
3875 return ret;
3876}
3877
3878#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3879static void deposit_prealloc_pte(struct vm_fault *vmf)
3880{
3881 struct vm_area_struct *vma = vmf->vma;
3882
3883 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3884 /*
3885 * We are going to consume the prealloc table,
3886 * count that as nr_ptes.
3887 */
3888 mm_inc_nr_ptes(vma->vm_mm);
3889 vmf->prealloc_pte = NULL;
3890}
3891
3892vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3893{
3894 struct vm_area_struct *vma = vmf->vma;
3895 bool write = vmf->flags & FAULT_FLAG_WRITE;
3896 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3897 pmd_t entry;
3898 int i;
3899 vm_fault_t ret = VM_FAULT_FALLBACK;
3900
3901 if (!transhuge_vma_suitable(vma, haddr))
3902 return ret;
3903
3904 page = compound_head(page);
3905 if (compound_order(page) != HPAGE_PMD_ORDER)
3906 return ret;
3907
3908 /*
3909 * Archs like ppc64 need additional space to store information
3910 * related to pte entry. Use the preallocated table for that.
3911 */
3912 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3913 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3914 if (!vmf->prealloc_pte)
3915 return VM_FAULT_OOM;
3916 smp_wmb(); /* See comment in __pte_alloc() */
3917 }
3918
3919 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3920 if (unlikely(!pmd_none(*vmf->pmd)))
3921 goto out;
3922
3923 for (i = 0; i < HPAGE_PMD_NR; i++)
3924 flush_icache_page(vma, page + i);
3925
3926 entry = mk_huge_pmd(page, vma->vm_page_prot);
3927 if (write)
3928 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3929
3930 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
3931 page_add_file_rmap(page, true);
3932 /*
3933 * deposit and withdraw with pmd lock held
3934 */
3935 if (arch_needs_pgtable_deposit())
3936 deposit_prealloc_pte(vmf);
3937
3938 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3939
3940 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3941
3942 /* fault is handled */
3943 ret = 0;
3944 count_vm_event(THP_FILE_MAPPED);
3945out:
3946 spin_unlock(vmf->ptl);
3947 return ret;
3948}
3949#else
3950vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3951{
3952 return VM_FAULT_FALLBACK;
3953}
3954#endif
3955
3956void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
3957{
3958 struct vm_area_struct *vma = vmf->vma;
3959 bool write = vmf->flags & FAULT_FLAG_WRITE;
3960 bool prefault = vmf->address != addr;
3961 pte_t entry;
3962
3963 flush_icache_page(vma, page);
3964 entry = mk_pte(page, vma->vm_page_prot);
3965
3966 if (prefault && arch_wants_old_prefaulted_pte())
3967 entry = pte_mkold(entry);
3968 else
3969 entry = pte_sw_mkyoung(entry);
3970
3971 if (write)
3972 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3973 /* copy-on-write page */
3974 if (write && !(vma->vm_flags & VM_SHARED)) {
3975 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3976 page_add_new_anon_rmap(page, vma, addr, false);
3977 lru_cache_add_inactive_or_unevictable(page, vma);
3978 } else {
3979 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3980 page_add_file_rmap(page, false);
3981 }
3982 set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
3983}
3984
3985/**
3986 * finish_fault - finish page fault once we have prepared the page to fault
3987 *
3988 * @vmf: structure describing the fault
3989 *
3990 * This function handles all that is needed to finish a page fault once the
3991 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3992 * given page, adds reverse page mapping, handles memcg charges and LRU
3993 * addition.
3994 *
3995 * The function expects the page to be locked and on success it consumes a
3996 * reference of a page being mapped (for the PTE which maps it).
3997 *
3998 * Return: %0 on success, %VM_FAULT_ code in case of error.
3999 */
4000vm_fault_t finish_fault(struct vm_fault *vmf)
4001{
4002 struct vm_area_struct *vma = vmf->vma;
4003 struct page *page;
4004 vm_fault_t ret;
4005
4006 /* Did we COW the page? */
4007 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4008 page = vmf->cow_page;
4009 else
4010 page = vmf->page;
4011
4012 /*
4013 * check even for read faults because we might have lost our CoWed
4014 * page
4015 */
4016 if (!(vma->vm_flags & VM_SHARED)) {
4017 ret = check_stable_address_space(vma->vm_mm);
4018 if (ret)
4019 return ret;
4020 }
4021
4022 if (pmd_none(*vmf->pmd)) {
4023 if (PageTransCompound(page)) {
4024 ret = do_set_pmd(vmf, page);
4025 if (ret != VM_FAULT_FALLBACK)
4026 return ret;
4027 }
4028
4029 if (vmf->prealloc_pte) {
4030 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4031 if (likely(pmd_none(*vmf->pmd))) {
4032 mm_inc_nr_ptes(vma->vm_mm);
4033 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4034 vmf->prealloc_pte = NULL;
4035 }
4036 spin_unlock(vmf->ptl);
4037 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
4038 return VM_FAULT_OOM;
4039 }
4040 }
4041
4042 /* See comment in handle_pte_fault() */
4043 if (pmd_devmap_trans_unstable(vmf->pmd))
4044 return 0;
4045
4046 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4047 vmf->address, &vmf->ptl);
4048 ret = 0;
4049 /* Re-check under ptl */
4050 if (likely(pte_none(*vmf->pte)))
4051 do_set_pte(vmf, page, vmf->address);
4052 else
4053 ret = VM_FAULT_NOPAGE;
4054
4055 update_mmu_tlb(vma, vmf->address, vmf->pte);
4056 pte_unmap_unlock(vmf->pte, vmf->ptl);
4057 return ret;
4058}
4059
4060static unsigned long fault_around_bytes __read_mostly =
4061 rounddown_pow_of_two(65536);
4062
4063#ifdef CONFIG_DEBUG_FS
4064static int fault_around_bytes_get(void *data, u64 *val)
4065{
4066 *val = fault_around_bytes;
4067 return 0;
4068}
4069
4070/*
4071 * fault_around_bytes must be rounded down to the nearest page order as it's
4072 * what do_fault_around() expects to see.
4073 */
4074static int fault_around_bytes_set(void *data, u64 val)
4075{
4076 if (val / PAGE_SIZE > PTRS_PER_PTE)
4077 return -EINVAL;
4078 if (val > PAGE_SIZE)
4079 fault_around_bytes = rounddown_pow_of_two(val);
4080 else
4081 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
4082 return 0;
4083}
4084DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4085 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4086
4087static int __init fault_around_debugfs(void)
4088{
4089 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4090 &fault_around_bytes_fops);
4091 return 0;
4092}
4093late_initcall(fault_around_debugfs);
4094#endif
4095
4096/*
4097 * do_fault_around() tries to map few pages around the fault address. The hope
4098 * is that the pages will be needed soon and this will lower the number of
4099 * faults to handle.
4100 *
4101 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4102 * not ready to be mapped: not up-to-date, locked, etc.
4103 *
4104 * This function is called with the page table lock taken. In the split ptlock
4105 * case the page table lock only protects only those entries which belong to
4106 * the page table corresponding to the fault address.
4107 *
4108 * This function doesn't cross the VMA boundaries, in order to call map_pages()
4109 * only once.
4110 *
4111 * fault_around_bytes defines how many bytes we'll try to map.
4112 * do_fault_around() expects it to be set to a power of two less than or equal
4113 * to PTRS_PER_PTE.
4114 *
4115 * The virtual address of the area that we map is naturally aligned to
4116 * fault_around_bytes rounded down to the machine page size
4117 * (and therefore to page order). This way it's easier to guarantee
4118 * that we don't cross page table boundaries.
4119 */
4120static vm_fault_t do_fault_around(struct vm_fault *vmf)
4121{
4122 unsigned long address = vmf->address, nr_pages, mask;
4123 pgoff_t start_pgoff = vmf->pgoff;
4124 pgoff_t end_pgoff;
4125 int off;
4126
4127 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
4128 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4129
4130 address = max(address & mask, vmf->vma->vm_start);
4131 off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4132 start_pgoff -= off;
4133
4134 /*
4135 * end_pgoff is either the end of the page table, the end of
4136 * the vma or nr_pages from start_pgoff, depending what is nearest.
4137 */
4138 end_pgoff = start_pgoff -
4139 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4140 PTRS_PER_PTE - 1;
4141 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4142 start_pgoff + nr_pages - 1);
4143
4144 if (pmd_none(*vmf->pmd)) {
4145 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4146 if (!vmf->prealloc_pte)
4147 return VM_FAULT_OOM;
4148 smp_wmb(); /* See comment in __pte_alloc() */
4149 }
4150
4151 return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
4152}
4153
4154static vm_fault_t do_read_fault(struct vm_fault *vmf)
4155{
4156 struct vm_area_struct *vma = vmf->vma;
4157 vm_fault_t ret = 0;
4158
4159 /*
4160 * Let's call ->map_pages() first and use ->fault() as fallback
4161 * if page by the offset is not ready to be mapped (cold cache or
4162 * something).
4163 */
4164 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
4165 if (likely(!userfaultfd_minor(vmf->vma))) {
4166 ret = do_fault_around(vmf);
4167 if (ret)
4168 return ret;
4169 }
4170 }
4171
4172 ret = __do_fault(vmf);
4173 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4174 return ret;
4175
4176 ret |= finish_fault(vmf);
4177 unlock_page(vmf->page);
4178 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4179 put_page(vmf->page);
4180 return ret;
4181}
4182
4183static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4184{
4185 struct vm_area_struct *vma = vmf->vma;
4186 vm_fault_t ret;
4187
4188 if (unlikely(anon_vma_prepare(vma)))
4189 return VM_FAULT_OOM;
4190
4191 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4192 if (!vmf->cow_page)
4193 return VM_FAULT_OOM;
4194
4195 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4196 put_page(vmf->cow_page);
4197 return VM_FAULT_OOM;
4198 }
4199 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4200
4201 ret = __do_fault(vmf);
4202 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4203 goto uncharge_out;
4204 if (ret & VM_FAULT_DONE_COW)
4205 return ret;
4206
4207 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4208 __SetPageUptodate(vmf->cow_page);
4209
4210 ret |= finish_fault(vmf);
4211 unlock_page(vmf->page);
4212 put_page(vmf->page);
4213 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4214 goto uncharge_out;
4215 return ret;
4216uncharge_out:
4217 put_page(vmf->cow_page);
4218 return ret;
4219}
4220
4221static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4222{
4223 struct vm_area_struct *vma = vmf->vma;
4224 vm_fault_t ret, tmp;
4225
4226 ret = __do_fault(vmf);
4227 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4228 return ret;
4229
4230 /*
4231 * Check if the backing address space wants to know that the page is
4232 * about to become writable
4233 */
4234 if (vma->vm_ops->page_mkwrite) {
4235 unlock_page(vmf->page);
4236 tmp = do_page_mkwrite(vmf);
4237 if (unlikely(!tmp ||
4238 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4239 put_page(vmf->page);
4240 return tmp;
4241 }
4242 }
4243
4244 ret |= finish_fault(vmf);
4245 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4246 VM_FAULT_RETRY))) {
4247 unlock_page(vmf->page);
4248 put_page(vmf->page);
4249 return ret;
4250 }
4251
4252 ret |= fault_dirty_shared_page(vmf);
4253 return ret;
4254}
4255
4256/*
4257 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4258 * but allow concurrent faults).
4259 * The mmap_lock may have been released depending on flags and our
4260 * return value. See filemap_fault() and __lock_page_or_retry().
4261 * If mmap_lock is released, vma may become invalid (for example
4262 * by other thread calling munmap()).
4263 */
4264static vm_fault_t do_fault(struct vm_fault *vmf)
4265{
4266 struct vm_area_struct *vma = vmf->vma;
4267 struct mm_struct *vm_mm = vma->vm_mm;
4268 vm_fault_t ret;
4269
4270 /*
4271 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4272 */
4273 if (!vma->vm_ops->fault) {
4274 /*
4275 * If we find a migration pmd entry or a none pmd entry, which
4276 * should never happen, return SIGBUS
4277 */
4278 if (unlikely(!pmd_present(*vmf->pmd)))
4279 ret = VM_FAULT_SIGBUS;
4280 else {
4281 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4282 vmf->pmd,
4283 vmf->address,
4284 &vmf->ptl);
4285 /*
4286 * Make sure this is not a temporary clearing of pte
4287 * by holding ptl and checking again. A R/M/W update
4288 * of pte involves: take ptl, clearing the pte so that
4289 * we don't have concurrent modification by hardware
4290 * followed by an update.
4291 */
4292 if (unlikely(pte_none(*vmf->pte)))
4293 ret = VM_FAULT_SIGBUS;
4294 else
4295 ret = VM_FAULT_NOPAGE;
4296
4297 pte_unmap_unlock(vmf->pte, vmf->ptl);
4298 }
4299 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
4300 ret = do_read_fault(vmf);
4301 else if (!(vma->vm_flags & VM_SHARED))
4302 ret = do_cow_fault(vmf);
4303 else
4304 ret = do_shared_fault(vmf);
4305
4306 /* preallocated pagetable is unused: free it */
4307 if (vmf->prealloc_pte) {
4308 pte_free(vm_mm, vmf->prealloc_pte);
4309 vmf->prealloc_pte = NULL;
4310 }
4311 return ret;
4312}
4313
4314int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4315 unsigned long addr, int page_nid, int *flags)
4316{
4317 get_page(page);
4318
4319 count_vm_numa_event(NUMA_HINT_FAULTS);
4320 if (page_nid == numa_node_id()) {
4321 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4322 *flags |= TNF_FAULT_LOCAL;
4323 }
4324
4325 return mpol_misplaced(page, vma, addr);
4326}
4327
4328static vm_fault_t do_numa_page(struct vm_fault *vmf)
4329{
4330 struct vm_area_struct *vma = vmf->vma;
4331 struct page *page = NULL;
4332 int page_nid = NUMA_NO_NODE;
4333 int last_cpupid;
4334 int target_nid;
4335 pte_t pte, old_pte;
4336 bool was_writable = pte_savedwrite(vmf->orig_pte);
4337 int flags = 0;
4338
4339 /*
4340 * The "pte" at this point cannot be used safely without
4341 * validation through pte_unmap_same(). It's of NUMA type but
4342 * the pfn may be screwed if the read is non atomic.
4343 */
4344 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4345 spin_lock(vmf->ptl);
4346 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4347 pte_unmap_unlock(vmf->pte, vmf->ptl);
4348 goto out;
4349 }
4350
4351 /* Get the normal PTE */
4352 old_pte = ptep_get(vmf->pte);
4353 pte = pte_modify(old_pte, vma->vm_page_prot);
4354
4355 page = vm_normal_page(vma, vmf->address, pte);
4356 if (!page)
4357 goto out_map;
4358
4359 /* TODO: handle PTE-mapped THP */
4360 if (PageCompound(page))
4361 goto out_map;
4362
4363 /*
4364 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4365 * much anyway since they can be in shared cache state. This misses
4366 * the case where a mapping is writable but the process never writes
4367 * to it but pte_write gets cleared during protection updates and
4368 * pte_dirty has unpredictable behaviour between PTE scan updates,
4369 * background writeback, dirty balancing and application behaviour.
4370 */
4371 if (!was_writable)
4372 flags |= TNF_NO_GROUP;
4373
4374 /*
4375 * Flag if the page is shared between multiple address spaces. This
4376 * is later used when determining whether to group tasks together
4377 */
4378 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4379 flags |= TNF_SHARED;
4380
4381 last_cpupid = page_cpupid_last(page);
4382 page_nid = page_to_nid(page);
4383 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4384 &flags);
4385 if (target_nid == NUMA_NO_NODE) {
4386 put_page(page);
4387 goto out_map;
4388 }
4389 pte_unmap_unlock(vmf->pte, vmf->ptl);
4390
4391 /* Migrate to the requested node */
4392 if (migrate_misplaced_page(page, vma, target_nid)) {
4393 page_nid = target_nid;
4394 flags |= TNF_MIGRATED;
4395 } else {
4396 flags |= TNF_MIGRATE_FAIL;
4397 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4398 spin_lock(vmf->ptl);
4399 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4400 pte_unmap_unlock(vmf->pte, vmf->ptl);
4401 goto out;
4402 }
4403 goto out_map;
4404 }
4405
4406out:
4407 if (page_nid != NUMA_NO_NODE)
4408 task_numa_fault(last_cpupid, page_nid, 1, flags);
4409 return 0;
4410out_map:
4411 /*
4412 * Make it present again, depending on how arch implements
4413 * non-accessible ptes, some can allow access by kernel mode.
4414 */
4415 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4416 pte = pte_modify(old_pte, vma->vm_page_prot);
4417 pte = pte_mkyoung(pte);
4418 if (was_writable)
4419 pte = pte_mkwrite(pte);
4420 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4421 update_mmu_cache(vma, vmf->address, vmf->pte);
4422 pte_unmap_unlock(vmf->pte, vmf->ptl);
4423 goto out;
4424}
4425
4426static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4427{
4428 if (vma_is_anonymous(vmf->vma))
4429 return do_huge_pmd_anonymous_page(vmf);
4430 if (vmf->vma->vm_ops->huge_fault)
4431 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4432 return VM_FAULT_FALLBACK;
4433}
4434
4435/* `inline' is required to avoid gcc 4.1.2 build error */
4436static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4437{
4438 if (vma_is_anonymous(vmf->vma)) {
4439 if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
4440 return handle_userfault(vmf, VM_UFFD_WP);
4441 return do_huge_pmd_wp_page(vmf);
4442 }
4443 if (vmf->vma->vm_ops->huge_fault) {
4444 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4445
4446 if (!(ret & VM_FAULT_FALLBACK))
4447 return ret;
4448 }
4449
4450 /* COW or write-notify handled on pte level: split pmd. */
4451 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4452
4453 return VM_FAULT_FALLBACK;
4454}
4455
4456static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4457{
4458#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4459 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4460 /* No support for anonymous transparent PUD pages yet */
4461 if (vma_is_anonymous(vmf->vma))
4462 goto split;
4463 if (vmf->vma->vm_ops->huge_fault) {
4464 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4465
4466 if (!(ret & VM_FAULT_FALLBACK))
4467 return ret;
4468 }
4469split:
4470 /* COW or write-notify not handled on PUD level: split pud.*/
4471 __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4472#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4473 return VM_FAULT_FALLBACK;
4474}
4475
4476static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4477{
4478#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4479 /* No support for anonymous transparent PUD pages yet */
4480 if (vma_is_anonymous(vmf->vma))
4481 return VM_FAULT_FALLBACK;
4482 if (vmf->vma->vm_ops->huge_fault)
4483 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4484#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4485 return VM_FAULT_FALLBACK;
4486}
4487
4488/*
4489 * These routines also need to handle stuff like marking pages dirty
4490 * and/or accessed for architectures that don't do it in hardware (most
4491 * RISC architectures). The early dirtying is also good on the i386.
4492 *
4493 * There is also a hook called "update_mmu_cache()" that architectures
4494 * with external mmu caches can use to update those (ie the Sparc or
4495 * PowerPC hashed page tables that act as extended TLBs).
4496 *
4497 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4498 * concurrent faults).
4499 *
4500 * The mmap_lock may have been released depending on flags and our return value.
4501 * See filemap_fault() and __lock_page_or_retry().
4502 */
4503static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4504{
4505 pte_t entry;
4506
4507 if (unlikely(pmd_none(*vmf->pmd))) {
4508 /*
4509 * Leave __pte_alloc() until later: because vm_ops->fault may
4510 * want to allocate huge page, and if we expose page table
4511 * for an instant, it will be difficult to retract from
4512 * concurrent faults and from rmap lookups.
4513 */
4514 vmf->pte = NULL;
4515 } else {
4516 /*
4517 * If a huge pmd materialized under us just retry later. Use
4518 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
4519 * of pmd_trans_huge() to ensure the pmd didn't become
4520 * pmd_trans_huge under us and then back to pmd_none, as a
4521 * result of MADV_DONTNEED running immediately after a huge pmd
4522 * fault in a different thread of this mm, in turn leading to a
4523 * misleading pmd_trans_huge() retval. All we have to ensure is
4524 * that it is a regular pmd that we can walk with
4525 * pte_offset_map() and we can do that through an atomic read
4526 * in C, which is what pmd_trans_unstable() provides.
4527 */
4528 if (pmd_devmap_trans_unstable(vmf->pmd))
4529 return 0;
4530 /*
4531 * A regular pmd is established and it can't morph into a huge
4532 * pmd from under us anymore at this point because we hold the
4533 * mmap_lock read mode and khugepaged takes it in write mode.
4534 * So now it's safe to run pte_offset_map().
4535 */
4536 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4537 vmf->orig_pte = *vmf->pte;
4538
4539 /*
4540 * some architectures can have larger ptes than wordsize,
4541 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4542 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4543 * accesses. The code below just needs a consistent view
4544 * for the ifs and we later double check anyway with the
4545 * ptl lock held. So here a barrier will do.
4546 */
4547 barrier();
4548 if (pte_none(vmf->orig_pte)) {
4549 pte_unmap(vmf->pte);
4550 vmf->pte = NULL;
4551 }
4552 }
4553
4554 if (!vmf->pte) {
4555 if (vma_is_anonymous(vmf->vma))
4556 return do_anonymous_page(vmf);
4557 else
4558 return do_fault(vmf);
4559 }
4560
4561 if (!pte_present(vmf->orig_pte))
4562 return do_swap_page(vmf);
4563
4564 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4565 return do_numa_page(vmf);
4566
4567 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4568 spin_lock(vmf->ptl);
4569 entry = vmf->orig_pte;
4570 if (unlikely(!pte_same(*vmf->pte, entry))) {
4571 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4572 goto unlock;
4573 }
4574 if (vmf->flags & FAULT_FLAG_WRITE) {
4575 if (!pte_write(entry))
4576 return do_wp_page(vmf);
4577 entry = pte_mkdirty(entry);
4578 }
4579 entry = pte_mkyoung(entry);
4580 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4581 vmf->flags & FAULT_FLAG_WRITE)) {
4582 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4583 } else {
4584 /* Skip spurious TLB flush for retried page fault */
4585 if (vmf->flags & FAULT_FLAG_TRIED)
4586 goto unlock;
4587 /*
4588 * This is needed only for protection faults but the arch code
4589 * is not yet telling us if this is a protection fault or not.
4590 * This still avoids useless tlb flushes for .text page faults
4591 * with threads.
4592 */
4593 if (vmf->flags & FAULT_FLAG_WRITE)
4594 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4595 }
4596unlock:
4597 pte_unmap_unlock(vmf->pte, vmf->ptl);
4598 return 0;
4599}
4600
4601/*
4602 * By the time we get here, we already hold the mm semaphore
4603 *
4604 * The mmap_lock may have been released depending on flags and our
4605 * return value. See filemap_fault() and __lock_page_or_retry().
4606 */
4607static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4608 unsigned long address, unsigned int flags)
4609{
4610 struct vm_fault vmf = {
4611 .vma = vma,
4612 .address = address & PAGE_MASK,
4613 .flags = flags,
4614 .pgoff = linear_page_index(vma, address),
4615 .gfp_mask = __get_fault_gfp_mask(vma),
4616 };
4617 unsigned int dirty = flags & FAULT_FLAG_WRITE;
4618 struct mm_struct *mm = vma->vm_mm;
4619 pgd_t *pgd;
4620 p4d_t *p4d;
4621 vm_fault_t ret;
4622
4623 pgd = pgd_offset(mm, address);
4624 p4d = p4d_alloc(mm, pgd, address);
4625 if (!p4d)
4626 return VM_FAULT_OOM;
4627
4628 vmf.pud = pud_alloc(mm, p4d, address);
4629 if (!vmf.pud)
4630 return VM_FAULT_OOM;
4631retry_pud:
4632 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4633 ret = create_huge_pud(&vmf);
4634 if (!(ret & VM_FAULT_FALLBACK))
4635 return ret;
4636 } else {
4637 pud_t orig_pud = *vmf.pud;
4638
4639 barrier();
4640 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4641
4642 /* NUMA case for anonymous PUDs would go here */
4643
4644 if (dirty && !pud_write(orig_pud)) {
4645 ret = wp_huge_pud(&vmf, orig_pud);
4646 if (!(ret & VM_FAULT_FALLBACK))
4647 return ret;
4648 } else {
4649 huge_pud_set_accessed(&vmf, orig_pud);
4650 return 0;
4651 }
4652 }
4653 }
4654
4655 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4656 if (!vmf.pmd)
4657 return VM_FAULT_OOM;
4658
4659 /* Huge pud page fault raced with pmd_alloc? */
4660 if (pud_trans_unstable(vmf.pud))
4661 goto retry_pud;
4662
4663 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4664 ret = create_huge_pmd(&vmf);
4665 if (!(ret & VM_FAULT_FALLBACK))
4666 return ret;
4667 } else {
4668 vmf.orig_pmd = *vmf.pmd;
4669
4670 barrier();
4671 if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
4672 VM_BUG_ON(thp_migration_supported() &&
4673 !is_pmd_migration_entry(vmf.orig_pmd));
4674 if (is_pmd_migration_entry(vmf.orig_pmd))
4675 pmd_migration_entry_wait(mm, vmf.pmd);
4676 return 0;
4677 }
4678 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
4679 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
4680 return do_huge_pmd_numa_page(&vmf);
4681
4682 if (dirty && !pmd_write(vmf.orig_pmd)) {
4683 ret = wp_huge_pmd(&vmf);
4684 if (!(ret & VM_FAULT_FALLBACK))
4685 return ret;
4686 } else {
4687 huge_pmd_set_accessed(&vmf);
4688 return 0;
4689 }
4690 }
4691 }
4692
4693 return handle_pte_fault(&vmf);
4694}
4695
4696/**
4697 * mm_account_fault - Do page fault accounting
4698 *
4699 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
4700 * of perf event counters, but we'll still do the per-task accounting to
4701 * the task who triggered this page fault.
4702 * @address: the faulted address.
4703 * @flags: the fault flags.
4704 * @ret: the fault retcode.
4705 *
4706 * This will take care of most of the page fault accounting. Meanwhile, it
4707 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4708 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4709 * still be in per-arch page fault handlers at the entry of page fault.
4710 */
4711static inline void mm_account_fault(struct pt_regs *regs,
4712 unsigned long address, unsigned int flags,
4713 vm_fault_t ret)
4714{
4715 bool major;
4716
4717 /*
4718 * We don't do accounting for some specific faults:
4719 *
4720 * - Unsuccessful faults (e.g. when the address wasn't valid). That
4721 * includes arch_vma_access_permitted() failing before reaching here.
4722 * So this is not a "this many hardware page faults" counter. We
4723 * should use the hw profiling for that.
4724 *
4725 * - Incomplete faults (VM_FAULT_RETRY). They will only be counted
4726 * once they're completed.
4727 */
4728 if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4729 return;
4730
4731 /*
4732 * We define the fault as a major fault when the final successful fault
4733 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4734 * handle it immediately previously).
4735 */
4736 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4737
4738 if (major)
4739 current->maj_flt++;
4740 else
4741 current->min_flt++;
4742
4743 /*
4744 * If the fault is done for GUP, regs will be NULL. We only do the
4745 * accounting for the per thread fault counters who triggered the
4746 * fault, and we skip the perf event updates.
4747 */
4748 if (!regs)
4749 return;
4750
4751 if (major)
4752 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4753 else
4754 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4755}
4756
4757/*
4758 * By the time we get here, we already hold the mm semaphore
4759 *
4760 * The mmap_lock may have been released depending on flags and our
4761 * return value. See filemap_fault() and __lock_page_or_retry().
4762 */
4763vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4764 unsigned int flags, struct pt_regs *regs)
4765{
4766 vm_fault_t ret;
4767
4768 __set_current_state(TASK_RUNNING);
4769
4770 count_vm_event(PGFAULT);
4771 count_memcg_event_mm(vma->vm_mm, PGFAULT);
4772
4773 /* do counter updates before entering really critical section. */
4774 check_sync_rss_stat(current);
4775
4776 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4777 flags & FAULT_FLAG_INSTRUCTION,
4778 flags & FAULT_FLAG_REMOTE))
4779 return VM_FAULT_SIGSEGV;
4780
4781 /*
4782 * Enable the memcg OOM handling for faults triggered in user
4783 * space. Kernel faults are handled more gracefully.
4784 */
4785 if (flags & FAULT_FLAG_USER)
4786 mem_cgroup_enter_user_fault();
4787
4788 if (unlikely(is_vm_hugetlb_page(vma)))
4789 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4790 else
4791 ret = __handle_mm_fault(vma, address, flags);
4792
4793 if (flags & FAULT_FLAG_USER) {
4794 mem_cgroup_exit_user_fault();
4795 /*
4796 * The task may have entered a memcg OOM situation but
4797 * if the allocation error was handled gracefully (no
4798 * VM_FAULT_OOM), there is no need to kill anything.
4799 * Just clean up the OOM state peacefully.
4800 */
4801 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4802 mem_cgroup_oom_synchronize(false);
4803 }
4804
4805 mm_account_fault(regs, address, flags, ret);
4806
4807 return ret;
4808}
4809EXPORT_SYMBOL_GPL(handle_mm_fault);
4810
4811#ifndef __PAGETABLE_P4D_FOLDED
4812/*
4813 * Allocate p4d page table.
4814 * We've already handled the fast-path in-line.
4815 */
4816int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4817{
4818 p4d_t *new = p4d_alloc_one(mm, address);
4819 if (!new)
4820 return -ENOMEM;
4821
4822 smp_wmb(); /* See comment in __pte_alloc */
4823
4824 spin_lock(&mm->page_table_lock);
4825 if (pgd_present(*pgd)) /* Another has populated it */
4826 p4d_free(mm, new);
4827 else
4828 pgd_populate(mm, pgd, new);
4829 spin_unlock(&mm->page_table_lock);
4830 return 0;
4831}
4832#endif /* __PAGETABLE_P4D_FOLDED */
4833
4834#ifndef __PAGETABLE_PUD_FOLDED
4835/*
4836 * Allocate page upper directory.
4837 * We've already handled the fast-path in-line.
4838 */
4839int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4840{
4841 pud_t *new = pud_alloc_one(mm, address);
4842 if (!new)
4843 return -ENOMEM;
4844
4845 smp_wmb(); /* See comment in __pte_alloc */
4846
4847 spin_lock(&mm->page_table_lock);
4848 if (!p4d_present(*p4d)) {
4849 mm_inc_nr_puds(mm);
4850 p4d_populate(mm, p4d, new);
4851 } else /* Another has populated it */
4852 pud_free(mm, new);
4853 spin_unlock(&mm->page_table_lock);
4854 return 0;
4855}
4856#endif /* __PAGETABLE_PUD_FOLDED */
4857
4858#ifndef __PAGETABLE_PMD_FOLDED
4859/*
4860 * Allocate page middle directory.
4861 * We've already handled the fast-path in-line.
4862 */
4863int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4864{
4865 spinlock_t *ptl;
4866 pmd_t *new = pmd_alloc_one(mm, address);
4867 if (!new)
4868 return -ENOMEM;
4869
4870 smp_wmb(); /* See comment in __pte_alloc */
4871
4872 ptl = pud_lock(mm, pud);
4873 if (!pud_present(*pud)) {
4874 mm_inc_nr_pmds(mm);
4875 pud_populate(mm, pud, new);
4876 } else /* Another has populated it */
4877 pmd_free(mm, new);
4878 spin_unlock(ptl);
4879 return 0;
4880}
4881#endif /* __PAGETABLE_PMD_FOLDED */
4882
4883int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
4884 struct mmu_notifier_range *range, pte_t **ptepp,
4885 pmd_t **pmdpp, spinlock_t **ptlp)
4886{
4887 pgd_t *pgd;
4888 p4d_t *p4d;
4889 pud_t *pud;
4890 pmd_t *pmd;
4891 pte_t *ptep;
4892
4893 pgd = pgd_offset(mm, address);
4894 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4895 goto out;
4896
4897 p4d = p4d_offset(pgd, address);
4898 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4899 goto out;
4900
4901 pud = pud_offset(p4d, address);
4902 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4903 goto out;
4904
4905 pmd = pmd_offset(pud, address);
4906 VM_BUG_ON(pmd_trans_huge(*pmd));
4907
4908 if (pmd_huge(*pmd)) {
4909 if (!pmdpp)
4910 goto out;
4911
4912 if (range) {
4913 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
4914 NULL, mm, address & PMD_MASK,
4915 (address & PMD_MASK) + PMD_SIZE);
4916 mmu_notifier_invalidate_range_start(range);
4917 }
4918 *ptlp = pmd_lock(mm, pmd);
4919 if (pmd_huge(*pmd)) {
4920 *pmdpp = pmd;
4921 return 0;
4922 }
4923 spin_unlock(*ptlp);
4924 if (range)
4925 mmu_notifier_invalidate_range_end(range);
4926 }
4927
4928 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4929 goto out;
4930
4931 if (range) {
4932 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
4933 address & PAGE_MASK,
4934 (address & PAGE_MASK) + PAGE_SIZE);
4935 mmu_notifier_invalidate_range_start(range);
4936 }
4937 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4938 if (!pte_present(*ptep))
4939 goto unlock;
4940 *ptepp = ptep;
4941 return 0;
4942unlock:
4943 pte_unmap_unlock(ptep, *ptlp);
4944 if (range)
4945 mmu_notifier_invalidate_range_end(range);
4946out:
4947 return -EINVAL;
4948}
4949
4950/**
4951 * follow_pte - look up PTE at a user virtual address
4952 * @mm: the mm_struct of the target address space
4953 * @address: user virtual address
4954 * @ptepp: location to store found PTE
4955 * @ptlp: location to store the lock for the PTE
4956 *
4957 * On a successful return, the pointer to the PTE is stored in @ptepp;
4958 * the corresponding lock is taken and its location is stored in @ptlp.
4959 * The contents of the PTE are only stable until @ptlp is released;
4960 * any further use, if any, must be protected against invalidation
4961 * with MMU notifiers.
4962 *
4963 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
4964 * should be taken for read.
4965 *
4966 * KVM uses this function. While it is arguably less bad than ``follow_pfn``,
4967 * it is not a good general-purpose API.
4968 *
4969 * Return: zero on success, -ve otherwise.
4970 */
4971int follow_pte(struct mm_struct *mm, unsigned long address,
4972 pte_t **ptepp, spinlock_t **ptlp)
4973{
4974 return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
4975}
4976EXPORT_SYMBOL_GPL(follow_pte);
4977
4978/**
4979 * follow_pfn - look up PFN at a user virtual address
4980 * @vma: memory mapping
4981 * @address: user virtual address
4982 * @pfn: location to store found PFN
4983 *
4984 * Only IO mappings and raw PFN mappings are allowed.
4985 *
4986 * This function does not allow the caller to read the permissions
4987 * of the PTE. Do not use it.
4988 *
4989 * Return: zero and the pfn at @pfn on success, -ve otherwise.
4990 */
4991int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4992 unsigned long *pfn)
4993{
4994 int ret = -EINVAL;
4995 spinlock_t *ptl;
4996 pte_t *ptep;
4997
4998 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4999 return ret;
5000
5001 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5002 if (ret)
5003 return ret;
5004 *pfn = pte_pfn(*ptep);
5005 pte_unmap_unlock(ptep, ptl);
5006 return 0;
5007}
5008EXPORT_SYMBOL(follow_pfn);
5009
5010#ifdef CONFIG_HAVE_IOREMAP_PROT
5011int follow_phys(struct vm_area_struct *vma,
5012 unsigned long address, unsigned int flags,
5013 unsigned long *prot, resource_size_t *phys)
5014{
5015 int ret = -EINVAL;
5016 pte_t *ptep, pte;
5017 spinlock_t *ptl;
5018
5019 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5020 goto out;
5021
5022 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5023 goto out;
5024 pte = *ptep;
5025
5026 if ((flags & FOLL_WRITE) && !pte_write(pte))
5027 goto unlock;
5028
5029 *prot = pgprot_val(pte_pgprot(pte));
5030 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5031
5032 ret = 0;
5033unlock:
5034 pte_unmap_unlock(ptep, ptl);
5035out:
5036 return ret;
5037}
5038
5039/**
5040 * generic_access_phys - generic implementation for iomem mmap access
5041 * @vma: the vma to access
5042 * @addr: userspace address, not relative offset within @vma
5043 * @buf: buffer to read/write
5044 * @len: length of transfer
5045 * @write: set to FOLL_WRITE when writing, otherwise reading
5046 *
5047 * This is a generic implementation for &vm_operations_struct.access for an
5048 * iomem mapping. This callback is used by access_process_vm() when the @vma is
5049 * not page based.
5050 */
5051int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5052 void *buf, int len, int write)
5053{
5054 resource_size_t phys_addr;
5055 unsigned long prot = 0;
5056 void __iomem *maddr;
5057 pte_t *ptep, pte;
5058 spinlock_t *ptl;
5059 int offset = offset_in_page(addr);
5060 int ret = -EINVAL;
5061
5062 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5063 return -EINVAL;
5064
5065retry:
5066 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5067 return -EINVAL;
5068 pte = *ptep;
5069 pte_unmap_unlock(ptep, ptl);
5070
5071 prot = pgprot_val(pte_pgprot(pte));
5072 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5073
5074 if ((write & FOLL_WRITE) && !pte_write(pte))
5075 return -EINVAL;
5076
5077 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5078 if (!maddr)
5079 return -ENOMEM;
5080
5081 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5082 goto out_unmap;
5083
5084 if (!pte_same(pte, *ptep)) {
5085 pte_unmap_unlock(ptep, ptl);
5086 iounmap(maddr);
5087
5088 goto retry;
5089 }
5090
5091 if (write)
5092 memcpy_toio(maddr + offset, buf, len);
5093 else
5094 memcpy_fromio(buf, maddr + offset, len);
5095 ret = len;
5096 pte_unmap_unlock(ptep, ptl);
5097out_unmap:
5098 iounmap(maddr);
5099
5100 return ret;
5101}
5102EXPORT_SYMBOL_GPL(generic_access_phys);
5103#endif
5104
5105/*
5106 * Access another process' address space as given in mm.
5107 */
5108int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5109 int len, unsigned int gup_flags)
5110{
5111 struct vm_area_struct *vma;
5112 void *old_buf = buf;
5113 int write = gup_flags & FOLL_WRITE;
5114
5115 if (mmap_read_lock_killable(mm))
5116 return 0;
5117
5118 /* ignore errors, just check how much was successfully transferred */
5119 while (len) {
5120 int bytes, ret, offset;
5121 void *maddr;
5122 struct page *page = NULL;
5123
5124 ret = get_user_pages_remote(mm, addr, 1,
5125 gup_flags, &page, &vma, NULL);
5126 if (ret <= 0) {
5127#ifndef CONFIG_HAVE_IOREMAP_PROT
5128 break;
5129#else
5130 /*
5131 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5132 * we can access using slightly different code.
5133 */
5134 vma = vma_lookup(mm, addr);
5135 if (!vma)
5136 break;
5137 if (vma->vm_ops && vma->vm_ops->access)
5138 ret = vma->vm_ops->access(vma, addr, buf,
5139 len, write);
5140 if (ret <= 0)
5141 break;
5142 bytes = ret;
5143#endif
5144 } else {
5145 bytes = len;
5146 offset = addr & (PAGE_SIZE-1);
5147 if (bytes > PAGE_SIZE-offset)
5148 bytes = PAGE_SIZE-offset;
5149
5150 maddr = kmap(page);
5151 if (write) {
5152 copy_to_user_page(vma, page, addr,
5153 maddr + offset, buf, bytes);
5154 set_page_dirty_lock(page);
5155 } else {
5156 copy_from_user_page(vma, page, addr,
5157 buf, maddr + offset, bytes);
5158 }
5159 kunmap(page);
5160 put_page(page);
5161 }
5162 len -= bytes;
5163 buf += bytes;
5164 addr += bytes;
5165 }
5166 mmap_read_unlock(mm);
5167
5168 return buf - old_buf;
5169}
5170
5171/**
5172 * access_remote_vm - access another process' address space
5173 * @mm: the mm_struct of the target address space
5174 * @addr: start address to access
5175 * @buf: source or destination buffer
5176 * @len: number of bytes to transfer
5177 * @gup_flags: flags modifying lookup behaviour
5178 *
5179 * The caller must hold a reference on @mm.
5180 *
5181 * Return: number of bytes copied from source to destination.
5182 */
5183int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5184 void *buf, int len, unsigned int gup_flags)
5185{
5186 return __access_remote_vm(mm, addr, buf, len, gup_flags);
5187}
5188
5189/*
5190 * Access another process' address space.
5191 * Source/target buffer must be kernel space,
5192 * Do not walk the page table directly, use get_user_pages
5193 */
5194int access_process_vm(struct task_struct *tsk, unsigned long addr,
5195 void *buf, int len, unsigned int gup_flags)
5196{
5197 struct mm_struct *mm;
5198 int ret;
5199
5200 mm = get_task_mm(tsk);
5201 if (!mm)
5202 return 0;
5203
5204 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
5205
5206 mmput(mm);
5207
5208 return ret;
5209}
5210EXPORT_SYMBOL_GPL(access_process_vm);
5211
5212/*
5213 * Print the name of a VMA.
5214 */
5215void print_vma_addr(char *prefix, unsigned long ip)
5216{
5217 struct mm_struct *mm = current->mm;
5218 struct vm_area_struct *vma;
5219
5220 /*
5221 * we might be running from an atomic context so we cannot sleep
5222 */
5223 if (!mmap_read_trylock(mm))
5224 return;
5225
5226 vma = find_vma(mm, ip);
5227 if (vma && vma->vm_file) {
5228 struct file *f = vma->vm_file;
5229 char *buf = (char *)__get_free_page(GFP_NOWAIT);
5230 if (buf) {
5231 char *p;
5232
5233 p = file_path(f, buf, PAGE_SIZE);
5234 if (IS_ERR(p))
5235 p = "?";
5236 printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5237 vma->vm_start,
5238 vma->vm_end - vma->vm_start);
5239 free_page((unsigned long)buf);
5240 }
5241 }
5242 mmap_read_unlock(mm);
5243}
5244
5245#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5246void __might_fault(const char *file, int line)
5247{
5248 /*
5249 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5250 * holding the mmap_lock, this is safe because kernel memory doesn't
5251 * get paged out, therefore we'll never actually fault, and the
5252 * below annotations will generate false positives.
5253 */
5254 if (uaccess_kernel())
5255 return;
5256 if (pagefault_disabled())
5257 return;
5258 __might_sleep(file, line, 0);
5259#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5260 if (current->mm)
5261 might_lock_read(¤t->mm->mmap_lock);
5262#endif
5263}
5264EXPORT_SYMBOL(__might_fault);
5265#endif
5266
5267#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5268/*
5269 * Process all subpages of the specified huge page with the specified
5270 * operation. The target subpage will be processed last to keep its
5271 * cache lines hot.
5272 */
5273static inline void process_huge_page(
5274 unsigned long addr_hint, unsigned int pages_per_huge_page,
5275 void (*process_subpage)(unsigned long addr, int idx, void *arg),
5276 void *arg)
5277{
5278 int i, n, base, l;
5279 unsigned long addr = addr_hint &
5280 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5281
5282 /* Process target subpage last to keep its cache lines hot */
5283 might_sleep();
5284 n = (addr_hint - addr) / PAGE_SIZE;
5285 if (2 * n <= pages_per_huge_page) {
5286 /* If target subpage in first half of huge page */
5287 base = 0;
5288 l = n;
5289 /* Process subpages at the end of huge page */
5290 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5291 cond_resched();
5292 process_subpage(addr + i * PAGE_SIZE, i, arg);
5293 }
5294 } else {
5295 /* If target subpage in second half of huge page */
5296 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5297 l = pages_per_huge_page - n;
5298 /* Process subpages at the begin of huge page */
5299 for (i = 0; i < base; i++) {
5300 cond_resched();
5301 process_subpage(addr + i * PAGE_SIZE, i, arg);
5302 }
5303 }
5304 /*
5305 * Process remaining subpages in left-right-left-right pattern
5306 * towards the target subpage
5307 */
5308 for (i = 0; i < l; i++) {
5309 int left_idx = base + i;
5310 int right_idx = base + 2 * l - 1 - i;
5311
5312 cond_resched();
5313 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5314 cond_resched();
5315 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5316 }
5317}
5318
5319static void clear_gigantic_page(struct page *page,
5320 unsigned long addr,
5321 unsigned int pages_per_huge_page)
5322{
5323 int i;
5324 struct page *p = page;
5325
5326 might_sleep();
5327 for (i = 0; i < pages_per_huge_page;
5328 i++, p = mem_map_next(p, page, i)) {
5329 cond_resched();
5330 clear_user_highpage(p, addr + i * PAGE_SIZE);
5331 }
5332}
5333
5334static void clear_subpage(unsigned long addr, int idx, void *arg)
5335{
5336 struct page *page = arg;
5337
5338 clear_user_highpage(page + idx, addr);
5339}
5340
5341void clear_huge_page(struct page *page,
5342 unsigned long addr_hint, unsigned int pages_per_huge_page)
5343{
5344 unsigned long addr = addr_hint &
5345 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5346
5347 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5348 clear_gigantic_page(page, addr, pages_per_huge_page);
5349 return;
5350 }
5351
5352 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5353}
5354
5355static void copy_user_gigantic_page(struct page *dst, struct page *src,
5356 unsigned long addr,
5357 struct vm_area_struct *vma,
5358 unsigned int pages_per_huge_page)
5359{
5360 int i;
5361 struct page *dst_base = dst;
5362 struct page *src_base = src;
5363
5364 for (i = 0; i < pages_per_huge_page; ) {
5365 cond_resched();
5366 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5367
5368 i++;
5369 dst = mem_map_next(dst, dst_base, i);
5370 src = mem_map_next(src, src_base, i);
5371 }
5372}
5373
5374struct copy_subpage_arg {
5375 struct page *dst;
5376 struct page *src;
5377 struct vm_area_struct *vma;
5378};
5379
5380static void copy_subpage(unsigned long addr, int idx, void *arg)
5381{
5382 struct copy_subpage_arg *copy_arg = arg;
5383
5384 copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5385 addr, copy_arg->vma);
5386}
5387
5388void copy_user_huge_page(struct page *dst, struct page *src,
5389 unsigned long addr_hint, struct vm_area_struct *vma,
5390 unsigned int pages_per_huge_page)
5391{
5392 unsigned long addr = addr_hint &
5393 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5394 struct copy_subpage_arg arg = {
5395 .dst = dst,
5396 .src = src,
5397 .vma = vma,
5398 };
5399
5400 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5401 copy_user_gigantic_page(dst, src, addr, vma,
5402 pages_per_huge_page);
5403 return;
5404 }
5405
5406 process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5407}
5408
5409long copy_huge_page_from_user(struct page *dst_page,
5410 const void __user *usr_src,
5411 unsigned int pages_per_huge_page,
5412 bool allow_pagefault)
5413{
5414 void *src = (void *)usr_src;
5415 void *page_kaddr;
5416 unsigned long i, rc = 0;
5417 unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5418 struct page *subpage = dst_page;
5419
5420 for (i = 0; i < pages_per_huge_page;
5421 i++, subpage = mem_map_next(subpage, dst_page, i)) {
5422 if (allow_pagefault)
5423 page_kaddr = kmap(subpage);
5424 else
5425 page_kaddr = kmap_atomic(subpage);
5426 rc = copy_from_user(page_kaddr,
5427 (const void __user *)(src + i * PAGE_SIZE),
5428 PAGE_SIZE);
5429 if (allow_pagefault)
5430 kunmap(subpage);
5431 else
5432 kunmap_atomic(page_kaddr);
5433
5434 ret_val -= (PAGE_SIZE - rc);
5435 if (rc)
5436 break;
5437
5438 cond_resched();
5439 }
5440 return ret_val;
5441}
5442#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5443
5444#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5445
5446static struct kmem_cache *page_ptl_cachep;
5447
5448void __init ptlock_cache_init(void)
5449{
5450 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5451 SLAB_PANIC, NULL);
5452}
5453
5454bool ptlock_alloc(struct page *page)
5455{
5456 spinlock_t *ptl;
5457
5458 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5459 if (!ptl)
5460 return false;
5461 page->ptl = ptl;
5462 return true;
5463}
5464
5465void ptlock_free(struct page *page)
5466{
5467 kmem_cache_free(page_ptl_cachep, page->ptl);
5468}
5469#endif
1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
10 */
11
12/*
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
15 *
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
19 *
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21 */
22
23/*
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
27 * Found it. Everything seems to work now.
28 * 20.12.91 - Ok, making the swap-device changeable like the root.
29 */
30
31/*
32 * 05.04.94 - Multi-page memory management added for v1.1.
33 * Idea by Alex Bligh (alex@cconcepts.co.uk)
34 *
35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 * (Gerhard.Wichert@pdb.siemens.de)
37 *
38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39 */
40
41#include <linux/kernel_stat.h>
42#include <linux/mm.h>
43#include <linux/hugetlb.h>
44#include <linux/mman.h>
45#include <linux/swap.h>
46#include <linux/highmem.h>
47#include <linux/pagemap.h>
48#include <linux/ksm.h>
49#include <linux/rmap.h>
50#include <linux/export.h>
51#include <linux/delayacct.h>
52#include <linux/init.h>
53#include <linux/writeback.h>
54#include <linux/memcontrol.h>
55#include <linux/mmu_notifier.h>
56#include <linux/kallsyms.h>
57#include <linux/swapops.h>
58#include <linux/elf.h>
59#include <linux/gfp.h>
60
61#include <asm/io.h>
62#include <asm/pgalloc.h>
63#include <asm/uaccess.h>
64#include <asm/tlb.h>
65#include <asm/tlbflush.h>
66#include <asm/pgtable.h>
67
68#include "internal.h"
69
70#ifndef CONFIG_NEED_MULTIPLE_NODES
71/* use the per-pgdat data instead for discontigmem - mbligh */
72unsigned long max_mapnr;
73struct page *mem_map;
74
75EXPORT_SYMBOL(max_mapnr);
76EXPORT_SYMBOL(mem_map);
77#endif
78
79unsigned long num_physpages;
80/*
81 * A number of key systems in x86 including ioremap() rely on the assumption
82 * that high_memory defines the upper bound on direct map memory, then end
83 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
84 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
85 * and ZONE_HIGHMEM.
86 */
87void * high_memory;
88
89EXPORT_SYMBOL(num_physpages);
90EXPORT_SYMBOL(high_memory);
91
92/*
93 * Randomize the address space (stacks, mmaps, brk, etc.).
94 *
95 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
96 * as ancient (libc5 based) binaries can segfault. )
97 */
98int randomize_va_space __read_mostly =
99#ifdef CONFIG_COMPAT_BRK
100 1;
101#else
102 2;
103#endif
104
105static int __init disable_randmaps(char *s)
106{
107 randomize_va_space = 0;
108 return 1;
109}
110__setup("norandmaps", disable_randmaps);
111
112unsigned long zero_pfn __read_mostly;
113unsigned long highest_memmap_pfn __read_mostly;
114
115/*
116 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
117 */
118static int __init init_zero_pfn(void)
119{
120 zero_pfn = page_to_pfn(ZERO_PAGE(0));
121 return 0;
122}
123core_initcall(init_zero_pfn);
124
125
126#if defined(SPLIT_RSS_COUNTING)
127
128void sync_mm_rss(struct mm_struct *mm)
129{
130 int i;
131
132 for (i = 0; i < NR_MM_COUNTERS; i++) {
133 if (current->rss_stat.count[i]) {
134 add_mm_counter(mm, i, current->rss_stat.count[i]);
135 current->rss_stat.count[i] = 0;
136 }
137 }
138 current->rss_stat.events = 0;
139}
140
141static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
142{
143 struct task_struct *task = current;
144
145 if (likely(task->mm == mm))
146 task->rss_stat.count[member] += val;
147 else
148 add_mm_counter(mm, member, val);
149}
150#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
151#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
152
153/* sync counter once per 64 page faults */
154#define TASK_RSS_EVENTS_THRESH (64)
155static void check_sync_rss_stat(struct task_struct *task)
156{
157 if (unlikely(task != current))
158 return;
159 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
160 sync_mm_rss(task->mm);
161}
162#else /* SPLIT_RSS_COUNTING */
163
164#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
165#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
166
167static void check_sync_rss_stat(struct task_struct *task)
168{
169}
170
171#endif /* SPLIT_RSS_COUNTING */
172
173#ifdef HAVE_GENERIC_MMU_GATHER
174
175static int tlb_next_batch(struct mmu_gather *tlb)
176{
177 struct mmu_gather_batch *batch;
178
179 batch = tlb->active;
180 if (batch->next) {
181 tlb->active = batch->next;
182 return 1;
183 }
184
185 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
186 if (!batch)
187 return 0;
188
189 batch->next = NULL;
190 batch->nr = 0;
191 batch->max = MAX_GATHER_BATCH;
192
193 tlb->active->next = batch;
194 tlb->active = batch;
195
196 return 1;
197}
198
199/* tlb_gather_mmu
200 * Called to initialize an (on-stack) mmu_gather structure for page-table
201 * tear-down from @mm. The @fullmm argument is used when @mm is without
202 * users and we're going to destroy the full address space (exit/execve).
203 */
204void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
205{
206 tlb->mm = mm;
207
208 tlb->fullmm = fullmm;
209 tlb->need_flush = 0;
210 tlb->fast_mode = (num_possible_cpus() == 1);
211 tlb->local.next = NULL;
212 tlb->local.nr = 0;
213 tlb->local.max = ARRAY_SIZE(tlb->__pages);
214 tlb->active = &tlb->local;
215
216#ifdef CONFIG_HAVE_RCU_TABLE_FREE
217 tlb->batch = NULL;
218#endif
219}
220
221void tlb_flush_mmu(struct mmu_gather *tlb)
222{
223 struct mmu_gather_batch *batch;
224
225 if (!tlb->need_flush)
226 return;
227 tlb->need_flush = 0;
228 tlb_flush(tlb);
229#ifdef CONFIG_HAVE_RCU_TABLE_FREE
230 tlb_table_flush(tlb);
231#endif
232
233 if (tlb_fast_mode(tlb))
234 return;
235
236 for (batch = &tlb->local; batch; batch = batch->next) {
237 free_pages_and_swap_cache(batch->pages, batch->nr);
238 batch->nr = 0;
239 }
240 tlb->active = &tlb->local;
241}
242
243/* tlb_finish_mmu
244 * Called at the end of the shootdown operation to free up any resources
245 * that were required.
246 */
247void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
248{
249 struct mmu_gather_batch *batch, *next;
250
251 tlb_flush_mmu(tlb);
252
253 /* keep the page table cache within bounds */
254 check_pgt_cache();
255
256 for (batch = tlb->local.next; batch; batch = next) {
257 next = batch->next;
258 free_pages((unsigned long)batch, 0);
259 }
260 tlb->local.next = NULL;
261}
262
263/* __tlb_remove_page
264 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
265 * handling the additional races in SMP caused by other CPUs caching valid
266 * mappings in their TLBs. Returns the number of free page slots left.
267 * When out of page slots we must call tlb_flush_mmu().
268 */
269int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
270{
271 struct mmu_gather_batch *batch;
272
273 VM_BUG_ON(!tlb->need_flush);
274
275 if (tlb_fast_mode(tlb)) {
276 free_page_and_swap_cache(page);
277 return 1; /* avoid calling tlb_flush_mmu() */
278 }
279
280 batch = tlb->active;
281 batch->pages[batch->nr++] = page;
282 if (batch->nr == batch->max) {
283 if (!tlb_next_batch(tlb))
284 return 0;
285 batch = tlb->active;
286 }
287 VM_BUG_ON(batch->nr > batch->max);
288
289 return batch->max - batch->nr;
290}
291
292#endif /* HAVE_GENERIC_MMU_GATHER */
293
294#ifdef CONFIG_HAVE_RCU_TABLE_FREE
295
296/*
297 * See the comment near struct mmu_table_batch.
298 */
299
300static void tlb_remove_table_smp_sync(void *arg)
301{
302 /* Simply deliver the interrupt */
303}
304
305static void tlb_remove_table_one(void *table)
306{
307 /*
308 * This isn't an RCU grace period and hence the page-tables cannot be
309 * assumed to be actually RCU-freed.
310 *
311 * It is however sufficient for software page-table walkers that rely on
312 * IRQ disabling. See the comment near struct mmu_table_batch.
313 */
314 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
315 __tlb_remove_table(table);
316}
317
318static void tlb_remove_table_rcu(struct rcu_head *head)
319{
320 struct mmu_table_batch *batch;
321 int i;
322
323 batch = container_of(head, struct mmu_table_batch, rcu);
324
325 for (i = 0; i < batch->nr; i++)
326 __tlb_remove_table(batch->tables[i]);
327
328 free_page((unsigned long)batch);
329}
330
331void tlb_table_flush(struct mmu_gather *tlb)
332{
333 struct mmu_table_batch **batch = &tlb->batch;
334
335 if (*batch) {
336 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
337 *batch = NULL;
338 }
339}
340
341void tlb_remove_table(struct mmu_gather *tlb, void *table)
342{
343 struct mmu_table_batch **batch = &tlb->batch;
344
345 tlb->need_flush = 1;
346
347 /*
348 * When there's less then two users of this mm there cannot be a
349 * concurrent page-table walk.
350 */
351 if (atomic_read(&tlb->mm->mm_users) < 2) {
352 __tlb_remove_table(table);
353 return;
354 }
355
356 if (*batch == NULL) {
357 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
358 if (*batch == NULL) {
359 tlb_remove_table_one(table);
360 return;
361 }
362 (*batch)->nr = 0;
363 }
364 (*batch)->tables[(*batch)->nr++] = table;
365 if ((*batch)->nr == MAX_TABLE_BATCH)
366 tlb_table_flush(tlb);
367}
368
369#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
370
371/*
372 * If a p?d_bad entry is found while walking page tables, report
373 * the error, before resetting entry to p?d_none. Usually (but
374 * very seldom) called out from the p?d_none_or_clear_bad macros.
375 */
376
377void pgd_clear_bad(pgd_t *pgd)
378{
379 pgd_ERROR(*pgd);
380 pgd_clear(pgd);
381}
382
383void pud_clear_bad(pud_t *pud)
384{
385 pud_ERROR(*pud);
386 pud_clear(pud);
387}
388
389void pmd_clear_bad(pmd_t *pmd)
390{
391 pmd_ERROR(*pmd);
392 pmd_clear(pmd);
393}
394
395/*
396 * Note: this doesn't free the actual pages themselves. That
397 * has been handled earlier when unmapping all the memory regions.
398 */
399static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
400 unsigned long addr)
401{
402 pgtable_t token = pmd_pgtable(*pmd);
403 pmd_clear(pmd);
404 pte_free_tlb(tlb, token, addr);
405 tlb->mm->nr_ptes--;
406}
407
408static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
409 unsigned long addr, unsigned long end,
410 unsigned long floor, unsigned long ceiling)
411{
412 pmd_t *pmd;
413 unsigned long next;
414 unsigned long start;
415
416 start = addr;
417 pmd = pmd_offset(pud, addr);
418 do {
419 next = pmd_addr_end(addr, end);
420 if (pmd_none_or_clear_bad(pmd))
421 continue;
422 free_pte_range(tlb, pmd, addr);
423 } while (pmd++, addr = next, addr != end);
424
425 start &= PUD_MASK;
426 if (start < floor)
427 return;
428 if (ceiling) {
429 ceiling &= PUD_MASK;
430 if (!ceiling)
431 return;
432 }
433 if (end - 1 > ceiling - 1)
434 return;
435
436 pmd = pmd_offset(pud, start);
437 pud_clear(pud);
438 pmd_free_tlb(tlb, pmd, start);
439}
440
441static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
442 unsigned long addr, unsigned long end,
443 unsigned long floor, unsigned long ceiling)
444{
445 pud_t *pud;
446 unsigned long next;
447 unsigned long start;
448
449 start = addr;
450 pud = pud_offset(pgd, addr);
451 do {
452 next = pud_addr_end(addr, end);
453 if (pud_none_or_clear_bad(pud))
454 continue;
455 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
456 } while (pud++, addr = next, addr != end);
457
458 start &= PGDIR_MASK;
459 if (start < floor)
460 return;
461 if (ceiling) {
462 ceiling &= PGDIR_MASK;
463 if (!ceiling)
464 return;
465 }
466 if (end - 1 > ceiling - 1)
467 return;
468
469 pud = pud_offset(pgd, start);
470 pgd_clear(pgd);
471 pud_free_tlb(tlb, pud, start);
472}
473
474/*
475 * This function frees user-level page tables of a process.
476 *
477 * Must be called with pagetable lock held.
478 */
479void free_pgd_range(struct mmu_gather *tlb,
480 unsigned long addr, unsigned long end,
481 unsigned long floor, unsigned long ceiling)
482{
483 pgd_t *pgd;
484 unsigned long next;
485
486 /*
487 * The next few lines have given us lots of grief...
488 *
489 * Why are we testing PMD* at this top level? Because often
490 * there will be no work to do at all, and we'd prefer not to
491 * go all the way down to the bottom just to discover that.
492 *
493 * Why all these "- 1"s? Because 0 represents both the bottom
494 * of the address space and the top of it (using -1 for the
495 * top wouldn't help much: the masks would do the wrong thing).
496 * The rule is that addr 0 and floor 0 refer to the bottom of
497 * the address space, but end 0 and ceiling 0 refer to the top
498 * Comparisons need to use "end - 1" and "ceiling - 1" (though
499 * that end 0 case should be mythical).
500 *
501 * Wherever addr is brought up or ceiling brought down, we must
502 * be careful to reject "the opposite 0" before it confuses the
503 * subsequent tests. But what about where end is brought down
504 * by PMD_SIZE below? no, end can't go down to 0 there.
505 *
506 * Whereas we round start (addr) and ceiling down, by different
507 * masks at different levels, in order to test whether a table
508 * now has no other vmas using it, so can be freed, we don't
509 * bother to round floor or end up - the tests don't need that.
510 */
511
512 addr &= PMD_MASK;
513 if (addr < floor) {
514 addr += PMD_SIZE;
515 if (!addr)
516 return;
517 }
518 if (ceiling) {
519 ceiling &= PMD_MASK;
520 if (!ceiling)
521 return;
522 }
523 if (end - 1 > ceiling - 1)
524 end -= PMD_SIZE;
525 if (addr > end - 1)
526 return;
527
528 pgd = pgd_offset(tlb->mm, addr);
529 do {
530 next = pgd_addr_end(addr, end);
531 if (pgd_none_or_clear_bad(pgd))
532 continue;
533 free_pud_range(tlb, pgd, addr, next, floor, ceiling);
534 } while (pgd++, addr = next, addr != end);
535}
536
537void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
538 unsigned long floor, unsigned long ceiling)
539{
540 while (vma) {
541 struct vm_area_struct *next = vma->vm_next;
542 unsigned long addr = vma->vm_start;
543
544 /*
545 * Hide vma from rmap and truncate_pagecache before freeing
546 * pgtables
547 */
548 unlink_anon_vmas(vma);
549 unlink_file_vma(vma);
550
551 if (is_vm_hugetlb_page(vma)) {
552 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
553 floor, next? next->vm_start: ceiling);
554 } else {
555 /*
556 * Optimization: gather nearby vmas into one call down
557 */
558 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
559 && !is_vm_hugetlb_page(next)) {
560 vma = next;
561 next = vma->vm_next;
562 unlink_anon_vmas(vma);
563 unlink_file_vma(vma);
564 }
565 free_pgd_range(tlb, addr, vma->vm_end,
566 floor, next? next->vm_start: ceiling);
567 }
568 vma = next;
569 }
570}
571
572int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
573 pmd_t *pmd, unsigned long address)
574{
575 pgtable_t new = pte_alloc_one(mm, address);
576 int wait_split_huge_page;
577 if (!new)
578 return -ENOMEM;
579
580 /*
581 * Ensure all pte setup (eg. pte page lock and page clearing) are
582 * visible before the pte is made visible to other CPUs by being
583 * put into page tables.
584 *
585 * The other side of the story is the pointer chasing in the page
586 * table walking code (when walking the page table without locking;
587 * ie. most of the time). Fortunately, these data accesses consist
588 * of a chain of data-dependent loads, meaning most CPUs (alpha
589 * being the notable exception) will already guarantee loads are
590 * seen in-order. See the alpha page table accessors for the
591 * smp_read_barrier_depends() barriers in page table walking code.
592 */
593 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
594
595 spin_lock(&mm->page_table_lock);
596 wait_split_huge_page = 0;
597 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
598 mm->nr_ptes++;
599 pmd_populate(mm, pmd, new);
600 new = NULL;
601 } else if (unlikely(pmd_trans_splitting(*pmd)))
602 wait_split_huge_page = 1;
603 spin_unlock(&mm->page_table_lock);
604 if (new)
605 pte_free(mm, new);
606 if (wait_split_huge_page)
607 wait_split_huge_page(vma->anon_vma, pmd);
608 return 0;
609}
610
611int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
612{
613 pte_t *new = pte_alloc_one_kernel(&init_mm, address);
614 if (!new)
615 return -ENOMEM;
616
617 smp_wmb(); /* See comment in __pte_alloc */
618
619 spin_lock(&init_mm.page_table_lock);
620 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
621 pmd_populate_kernel(&init_mm, pmd, new);
622 new = NULL;
623 } else
624 VM_BUG_ON(pmd_trans_splitting(*pmd));
625 spin_unlock(&init_mm.page_table_lock);
626 if (new)
627 pte_free_kernel(&init_mm, new);
628 return 0;
629}
630
631static inline void init_rss_vec(int *rss)
632{
633 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
634}
635
636static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
637{
638 int i;
639
640 if (current->mm == mm)
641 sync_mm_rss(mm);
642 for (i = 0; i < NR_MM_COUNTERS; i++)
643 if (rss[i])
644 add_mm_counter(mm, i, rss[i]);
645}
646
647/*
648 * This function is called to print an error when a bad pte
649 * is found. For example, we might have a PFN-mapped pte in
650 * a region that doesn't allow it.
651 *
652 * The calling function must still handle the error.
653 */
654static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
655 pte_t pte, struct page *page)
656{
657 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
658 pud_t *pud = pud_offset(pgd, addr);
659 pmd_t *pmd = pmd_offset(pud, addr);
660 struct address_space *mapping;
661 pgoff_t index;
662 static unsigned long resume;
663 static unsigned long nr_shown;
664 static unsigned long nr_unshown;
665
666 /*
667 * Allow a burst of 60 reports, then keep quiet for that minute;
668 * or allow a steady drip of one report per second.
669 */
670 if (nr_shown == 60) {
671 if (time_before(jiffies, resume)) {
672 nr_unshown++;
673 return;
674 }
675 if (nr_unshown) {
676 printk(KERN_ALERT
677 "BUG: Bad page map: %lu messages suppressed\n",
678 nr_unshown);
679 nr_unshown = 0;
680 }
681 nr_shown = 0;
682 }
683 if (nr_shown++ == 0)
684 resume = jiffies + 60 * HZ;
685
686 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
687 index = linear_page_index(vma, addr);
688
689 printk(KERN_ALERT
690 "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
691 current->comm,
692 (long long)pte_val(pte), (long long)pmd_val(*pmd));
693 if (page)
694 dump_page(page);
695 printk(KERN_ALERT
696 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
697 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
698 /*
699 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
700 */
701 if (vma->vm_ops)
702 print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
703 (unsigned long)vma->vm_ops->fault);
704 if (vma->vm_file && vma->vm_file->f_op)
705 print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
706 (unsigned long)vma->vm_file->f_op->mmap);
707 dump_stack();
708 add_taint(TAINT_BAD_PAGE);
709}
710
711static inline int is_cow_mapping(vm_flags_t flags)
712{
713 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
714}
715
716#ifndef is_zero_pfn
717static inline int is_zero_pfn(unsigned long pfn)
718{
719 return pfn == zero_pfn;
720}
721#endif
722
723#ifndef my_zero_pfn
724static inline unsigned long my_zero_pfn(unsigned long addr)
725{
726 return zero_pfn;
727}
728#endif
729
730/*
731 * vm_normal_page -- This function gets the "struct page" associated with a pte.
732 *
733 * "Special" mappings do not wish to be associated with a "struct page" (either
734 * it doesn't exist, or it exists but they don't want to touch it). In this
735 * case, NULL is returned here. "Normal" mappings do have a struct page.
736 *
737 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
738 * pte bit, in which case this function is trivial. Secondly, an architecture
739 * may not have a spare pte bit, which requires a more complicated scheme,
740 * described below.
741 *
742 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
743 * special mapping (even if there are underlying and valid "struct pages").
744 * COWed pages of a VM_PFNMAP are always normal.
745 *
746 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
747 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
748 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
749 * mapping will always honor the rule
750 *
751 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
752 *
753 * And for normal mappings this is false.
754 *
755 * This restricts such mappings to be a linear translation from virtual address
756 * to pfn. To get around this restriction, we allow arbitrary mappings so long
757 * as the vma is not a COW mapping; in that case, we know that all ptes are
758 * special (because none can have been COWed).
759 *
760 *
761 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
762 *
763 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
764 * page" backing, however the difference is that _all_ pages with a struct
765 * page (that is, those where pfn_valid is true) are refcounted and considered
766 * normal pages by the VM. The disadvantage is that pages are refcounted
767 * (which can be slower and simply not an option for some PFNMAP users). The
768 * advantage is that we don't have to follow the strict linearity rule of
769 * PFNMAP mappings in order to support COWable mappings.
770 *
771 */
772#ifdef __HAVE_ARCH_PTE_SPECIAL
773# define HAVE_PTE_SPECIAL 1
774#else
775# define HAVE_PTE_SPECIAL 0
776#endif
777struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
778 pte_t pte)
779{
780 unsigned long pfn = pte_pfn(pte);
781
782 if (HAVE_PTE_SPECIAL) {
783 if (likely(!pte_special(pte)))
784 goto check_pfn;
785 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
786 return NULL;
787 if (!is_zero_pfn(pfn))
788 print_bad_pte(vma, addr, pte, NULL);
789 return NULL;
790 }
791
792 /* !HAVE_PTE_SPECIAL case follows: */
793
794 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
795 if (vma->vm_flags & VM_MIXEDMAP) {
796 if (!pfn_valid(pfn))
797 return NULL;
798 goto out;
799 } else {
800 unsigned long off;
801 off = (addr - vma->vm_start) >> PAGE_SHIFT;
802 if (pfn == vma->vm_pgoff + off)
803 return NULL;
804 if (!is_cow_mapping(vma->vm_flags))
805 return NULL;
806 }
807 }
808
809 if (is_zero_pfn(pfn))
810 return NULL;
811check_pfn:
812 if (unlikely(pfn > highest_memmap_pfn)) {
813 print_bad_pte(vma, addr, pte, NULL);
814 return NULL;
815 }
816
817 /*
818 * NOTE! We still have PageReserved() pages in the page tables.
819 * eg. VDSO mappings can cause them to exist.
820 */
821out:
822 return pfn_to_page(pfn);
823}
824
825/*
826 * copy one vm_area from one task to the other. Assumes the page tables
827 * already present in the new task to be cleared in the whole range
828 * covered by this vma.
829 */
830
831static inline unsigned long
832copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
833 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
834 unsigned long addr, int *rss)
835{
836 unsigned long vm_flags = vma->vm_flags;
837 pte_t pte = *src_pte;
838 struct page *page;
839
840 /* pte contains position in swap or file, so copy. */
841 if (unlikely(!pte_present(pte))) {
842 if (!pte_file(pte)) {
843 swp_entry_t entry = pte_to_swp_entry(pte);
844
845 if (swap_duplicate(entry) < 0)
846 return entry.val;
847
848 /* make sure dst_mm is on swapoff's mmlist. */
849 if (unlikely(list_empty(&dst_mm->mmlist))) {
850 spin_lock(&mmlist_lock);
851 if (list_empty(&dst_mm->mmlist))
852 list_add(&dst_mm->mmlist,
853 &src_mm->mmlist);
854 spin_unlock(&mmlist_lock);
855 }
856 if (likely(!non_swap_entry(entry)))
857 rss[MM_SWAPENTS]++;
858 else if (is_migration_entry(entry)) {
859 page = migration_entry_to_page(entry);
860
861 if (PageAnon(page))
862 rss[MM_ANONPAGES]++;
863 else
864 rss[MM_FILEPAGES]++;
865
866 if (is_write_migration_entry(entry) &&
867 is_cow_mapping(vm_flags)) {
868 /*
869 * COW mappings require pages in both
870 * parent and child to be set to read.
871 */
872 make_migration_entry_read(&entry);
873 pte = swp_entry_to_pte(entry);
874 set_pte_at(src_mm, addr, src_pte, pte);
875 }
876 }
877 }
878 goto out_set_pte;
879 }
880
881 /*
882 * If it's a COW mapping, write protect it both
883 * in the parent and the child
884 */
885 if (is_cow_mapping(vm_flags)) {
886 ptep_set_wrprotect(src_mm, addr, src_pte);
887 pte = pte_wrprotect(pte);
888 }
889
890 /*
891 * If it's a shared mapping, mark it clean in
892 * the child
893 */
894 if (vm_flags & VM_SHARED)
895 pte = pte_mkclean(pte);
896 pte = pte_mkold(pte);
897
898 page = vm_normal_page(vma, addr, pte);
899 if (page) {
900 get_page(page);
901 page_dup_rmap(page);
902 if (PageAnon(page))
903 rss[MM_ANONPAGES]++;
904 else
905 rss[MM_FILEPAGES]++;
906 }
907
908out_set_pte:
909 set_pte_at(dst_mm, addr, dst_pte, pte);
910 return 0;
911}
912
913int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
914 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
915 unsigned long addr, unsigned long end)
916{
917 pte_t *orig_src_pte, *orig_dst_pte;
918 pte_t *src_pte, *dst_pte;
919 spinlock_t *src_ptl, *dst_ptl;
920 int progress = 0;
921 int rss[NR_MM_COUNTERS];
922 swp_entry_t entry = (swp_entry_t){0};
923
924again:
925 init_rss_vec(rss);
926
927 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
928 if (!dst_pte)
929 return -ENOMEM;
930 src_pte = pte_offset_map(src_pmd, addr);
931 src_ptl = pte_lockptr(src_mm, src_pmd);
932 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
933 orig_src_pte = src_pte;
934 orig_dst_pte = dst_pte;
935 arch_enter_lazy_mmu_mode();
936
937 do {
938 /*
939 * We are holding two locks at this point - either of them
940 * could generate latencies in another task on another CPU.
941 */
942 if (progress >= 32) {
943 progress = 0;
944 if (need_resched() ||
945 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
946 break;
947 }
948 if (pte_none(*src_pte)) {
949 progress++;
950 continue;
951 }
952 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
953 vma, addr, rss);
954 if (entry.val)
955 break;
956 progress += 8;
957 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
958
959 arch_leave_lazy_mmu_mode();
960 spin_unlock(src_ptl);
961 pte_unmap(orig_src_pte);
962 add_mm_rss_vec(dst_mm, rss);
963 pte_unmap_unlock(orig_dst_pte, dst_ptl);
964 cond_resched();
965
966 if (entry.val) {
967 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
968 return -ENOMEM;
969 progress = 0;
970 }
971 if (addr != end)
972 goto again;
973 return 0;
974}
975
976static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
977 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
978 unsigned long addr, unsigned long end)
979{
980 pmd_t *src_pmd, *dst_pmd;
981 unsigned long next;
982
983 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
984 if (!dst_pmd)
985 return -ENOMEM;
986 src_pmd = pmd_offset(src_pud, addr);
987 do {
988 next = pmd_addr_end(addr, end);
989 if (pmd_trans_huge(*src_pmd)) {
990 int err;
991 VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
992 err = copy_huge_pmd(dst_mm, src_mm,
993 dst_pmd, src_pmd, addr, vma);
994 if (err == -ENOMEM)
995 return -ENOMEM;
996 if (!err)
997 continue;
998 /* fall through */
999 }
1000 if (pmd_none_or_clear_bad(src_pmd))
1001 continue;
1002 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1003 vma, addr, next))
1004 return -ENOMEM;
1005 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1006 return 0;
1007}
1008
1009static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1010 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1011 unsigned long addr, unsigned long end)
1012{
1013 pud_t *src_pud, *dst_pud;
1014 unsigned long next;
1015
1016 dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
1017 if (!dst_pud)
1018 return -ENOMEM;
1019 src_pud = pud_offset(src_pgd, addr);
1020 do {
1021 next = pud_addr_end(addr, end);
1022 if (pud_none_or_clear_bad(src_pud))
1023 continue;
1024 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1025 vma, addr, next))
1026 return -ENOMEM;
1027 } while (dst_pud++, src_pud++, addr = next, addr != end);
1028 return 0;
1029}
1030
1031int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1032 struct vm_area_struct *vma)
1033{
1034 pgd_t *src_pgd, *dst_pgd;
1035 unsigned long next;
1036 unsigned long addr = vma->vm_start;
1037 unsigned long end = vma->vm_end;
1038 int ret;
1039
1040 /*
1041 * Don't copy ptes where a page fault will fill them correctly.
1042 * Fork becomes much lighter when there are big shared or private
1043 * readonly mappings. The tradeoff is that copy_page_range is more
1044 * efficient than faulting.
1045 */
1046 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
1047 if (!vma->anon_vma)
1048 return 0;
1049 }
1050
1051 if (is_vm_hugetlb_page(vma))
1052 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1053
1054 if (unlikely(is_pfn_mapping(vma))) {
1055 /*
1056 * We do not free on error cases below as remove_vma
1057 * gets called on error from higher level routine
1058 */
1059 ret = track_pfn_vma_copy(vma);
1060 if (ret)
1061 return ret;
1062 }
1063
1064 /*
1065 * We need to invalidate the secondary MMU mappings only when
1066 * there could be a permission downgrade on the ptes of the
1067 * parent mm. And a permission downgrade will only happen if
1068 * is_cow_mapping() returns true.
1069 */
1070 if (is_cow_mapping(vma->vm_flags))
1071 mmu_notifier_invalidate_range_start(src_mm, addr, end);
1072
1073 ret = 0;
1074 dst_pgd = pgd_offset(dst_mm, addr);
1075 src_pgd = pgd_offset(src_mm, addr);
1076 do {
1077 next = pgd_addr_end(addr, end);
1078 if (pgd_none_or_clear_bad(src_pgd))
1079 continue;
1080 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
1081 vma, addr, next))) {
1082 ret = -ENOMEM;
1083 break;
1084 }
1085 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1086
1087 if (is_cow_mapping(vma->vm_flags))
1088 mmu_notifier_invalidate_range_end(src_mm,
1089 vma->vm_start, end);
1090 return ret;
1091}
1092
1093static unsigned long zap_pte_range(struct mmu_gather *tlb,
1094 struct vm_area_struct *vma, pmd_t *pmd,
1095 unsigned long addr, unsigned long end,
1096 struct zap_details *details)
1097{
1098 struct mm_struct *mm = tlb->mm;
1099 int force_flush = 0;
1100 int rss[NR_MM_COUNTERS];
1101 spinlock_t *ptl;
1102 pte_t *start_pte;
1103 pte_t *pte;
1104
1105again:
1106 init_rss_vec(rss);
1107 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1108 pte = start_pte;
1109 arch_enter_lazy_mmu_mode();
1110 do {
1111 pte_t ptent = *pte;
1112 if (pte_none(ptent)) {
1113 continue;
1114 }
1115
1116 if (pte_present(ptent)) {
1117 struct page *page;
1118
1119 page = vm_normal_page(vma, addr, ptent);
1120 if (unlikely(details) && page) {
1121 /*
1122 * unmap_shared_mapping_pages() wants to
1123 * invalidate cache without truncating:
1124 * unmap shared but keep private pages.
1125 */
1126 if (details->check_mapping &&
1127 details->check_mapping != page->mapping)
1128 continue;
1129 /*
1130 * Each page->index must be checked when
1131 * invalidating or truncating nonlinear.
1132 */
1133 if (details->nonlinear_vma &&
1134 (page->index < details->first_index ||
1135 page->index > details->last_index))
1136 continue;
1137 }
1138 ptent = ptep_get_and_clear_full(mm, addr, pte,
1139 tlb->fullmm);
1140 tlb_remove_tlb_entry(tlb, pte, addr);
1141 if (unlikely(!page))
1142 continue;
1143 if (unlikely(details) && details->nonlinear_vma
1144 && linear_page_index(details->nonlinear_vma,
1145 addr) != page->index)
1146 set_pte_at(mm, addr, pte,
1147 pgoff_to_pte(page->index));
1148 if (PageAnon(page))
1149 rss[MM_ANONPAGES]--;
1150 else {
1151 if (pte_dirty(ptent))
1152 set_page_dirty(page);
1153 if (pte_young(ptent) &&
1154 likely(!VM_SequentialReadHint(vma)))
1155 mark_page_accessed(page);
1156 rss[MM_FILEPAGES]--;
1157 }
1158 page_remove_rmap(page);
1159 if (unlikely(page_mapcount(page) < 0))
1160 print_bad_pte(vma, addr, ptent, page);
1161 force_flush = !__tlb_remove_page(tlb, page);
1162 if (force_flush)
1163 break;
1164 continue;
1165 }
1166 /*
1167 * If details->check_mapping, we leave swap entries;
1168 * if details->nonlinear_vma, we leave file entries.
1169 */
1170 if (unlikely(details))
1171 continue;
1172 if (pte_file(ptent)) {
1173 if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
1174 print_bad_pte(vma, addr, ptent, NULL);
1175 } else {
1176 swp_entry_t entry = pte_to_swp_entry(ptent);
1177
1178 if (!non_swap_entry(entry))
1179 rss[MM_SWAPENTS]--;
1180 else if (is_migration_entry(entry)) {
1181 struct page *page;
1182
1183 page = migration_entry_to_page(entry);
1184
1185 if (PageAnon(page))
1186 rss[MM_ANONPAGES]--;
1187 else
1188 rss[MM_FILEPAGES]--;
1189 }
1190 if (unlikely(!free_swap_and_cache(entry)))
1191 print_bad_pte(vma, addr, ptent, NULL);
1192 }
1193 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1194 } while (pte++, addr += PAGE_SIZE, addr != end);
1195
1196 add_mm_rss_vec(mm, rss);
1197 arch_leave_lazy_mmu_mode();
1198 pte_unmap_unlock(start_pte, ptl);
1199
1200 /*
1201 * mmu_gather ran out of room to batch pages, we break out of
1202 * the PTE lock to avoid doing the potential expensive TLB invalidate
1203 * and page-free while holding it.
1204 */
1205 if (force_flush) {
1206 force_flush = 0;
1207 tlb_flush_mmu(tlb);
1208 if (addr != end)
1209 goto again;
1210 }
1211
1212 return addr;
1213}
1214
1215static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1216 struct vm_area_struct *vma, pud_t *pud,
1217 unsigned long addr, unsigned long end,
1218 struct zap_details *details)
1219{
1220 pmd_t *pmd;
1221 unsigned long next;
1222
1223 pmd = pmd_offset(pud, addr);
1224 do {
1225 next = pmd_addr_end(addr, end);
1226 if (pmd_trans_huge(*pmd)) {
1227 if (next - addr != HPAGE_PMD_SIZE) {
1228#ifdef CONFIG_DEBUG_VM
1229 if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
1230 pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
1231 __func__, addr, end,
1232 vma->vm_start,
1233 vma->vm_end);
1234 BUG();
1235 }
1236#endif
1237 split_huge_page_pmd(vma->vm_mm, pmd);
1238 } else if (zap_huge_pmd(tlb, vma, pmd, addr))
1239 goto next;
1240 /* fall through */
1241 }
1242 /*
1243 * Here there can be other concurrent MADV_DONTNEED or
1244 * trans huge page faults running, and if the pmd is
1245 * none or trans huge it can change under us. This is
1246 * because MADV_DONTNEED holds the mmap_sem in read
1247 * mode.
1248 */
1249 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1250 goto next;
1251 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1252next:
1253 cond_resched();
1254 } while (pmd++, addr = next, addr != end);
1255
1256 return addr;
1257}
1258
1259static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1260 struct vm_area_struct *vma, pgd_t *pgd,
1261 unsigned long addr, unsigned long end,
1262 struct zap_details *details)
1263{
1264 pud_t *pud;
1265 unsigned long next;
1266
1267 pud = pud_offset(pgd, addr);
1268 do {
1269 next = pud_addr_end(addr, end);
1270 if (pud_none_or_clear_bad(pud))
1271 continue;
1272 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1273 } while (pud++, addr = next, addr != end);
1274
1275 return addr;
1276}
1277
1278static void unmap_page_range(struct mmu_gather *tlb,
1279 struct vm_area_struct *vma,
1280 unsigned long addr, unsigned long end,
1281 struct zap_details *details)
1282{
1283 pgd_t *pgd;
1284 unsigned long next;
1285
1286 if (details && !details->check_mapping && !details->nonlinear_vma)
1287 details = NULL;
1288
1289 BUG_ON(addr >= end);
1290 mem_cgroup_uncharge_start();
1291 tlb_start_vma(tlb, vma);
1292 pgd = pgd_offset(vma->vm_mm, addr);
1293 do {
1294 next = pgd_addr_end(addr, end);
1295 if (pgd_none_or_clear_bad(pgd))
1296 continue;
1297 next = zap_pud_range(tlb, vma, pgd, addr, next, details);
1298 } while (pgd++, addr = next, addr != end);
1299 tlb_end_vma(tlb, vma);
1300 mem_cgroup_uncharge_end();
1301}
1302
1303
1304static void unmap_single_vma(struct mmu_gather *tlb,
1305 struct vm_area_struct *vma, unsigned long start_addr,
1306 unsigned long end_addr,
1307 struct zap_details *details)
1308{
1309 unsigned long start = max(vma->vm_start, start_addr);
1310 unsigned long end;
1311
1312 if (start >= vma->vm_end)
1313 return;
1314 end = min(vma->vm_end, end_addr);
1315 if (end <= vma->vm_start)
1316 return;
1317
1318 if (vma->vm_file)
1319 uprobe_munmap(vma, start, end);
1320
1321 if (unlikely(is_pfn_mapping(vma)))
1322 untrack_pfn_vma(vma, 0, 0);
1323
1324 if (start != end) {
1325 if (unlikely(is_vm_hugetlb_page(vma))) {
1326 /*
1327 * It is undesirable to test vma->vm_file as it
1328 * should be non-null for valid hugetlb area.
1329 * However, vm_file will be NULL in the error
1330 * cleanup path of do_mmap_pgoff. When
1331 * hugetlbfs ->mmap method fails,
1332 * do_mmap_pgoff() nullifies vma->vm_file
1333 * before calling this function to clean up.
1334 * Since no pte has actually been setup, it is
1335 * safe to do nothing in this case.
1336 */
1337 if (vma->vm_file)
1338 unmap_hugepage_range(vma, start, end, NULL);
1339 } else
1340 unmap_page_range(tlb, vma, start, end, details);
1341 }
1342}
1343
1344/**
1345 * unmap_vmas - unmap a range of memory covered by a list of vma's
1346 * @tlb: address of the caller's struct mmu_gather
1347 * @vma: the starting vma
1348 * @start_addr: virtual address at which to start unmapping
1349 * @end_addr: virtual address at which to end unmapping
1350 *
1351 * Unmap all pages in the vma list.
1352 *
1353 * Only addresses between `start' and `end' will be unmapped.
1354 *
1355 * The VMA list must be sorted in ascending virtual address order.
1356 *
1357 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1358 * range after unmap_vmas() returns. So the only responsibility here is to
1359 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1360 * drops the lock and schedules.
1361 */
1362void unmap_vmas(struct mmu_gather *tlb,
1363 struct vm_area_struct *vma, unsigned long start_addr,
1364 unsigned long end_addr)
1365{
1366 struct mm_struct *mm = vma->vm_mm;
1367
1368 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1369 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1370 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1371 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1372}
1373
1374/**
1375 * zap_page_range - remove user pages in a given range
1376 * @vma: vm_area_struct holding the applicable pages
1377 * @start: starting address of pages to zap
1378 * @size: number of bytes to zap
1379 * @details: details of nonlinear truncation or shared cache invalidation
1380 *
1381 * Caller must protect the VMA list
1382 */
1383void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1384 unsigned long size, struct zap_details *details)
1385{
1386 struct mm_struct *mm = vma->vm_mm;
1387 struct mmu_gather tlb;
1388 unsigned long end = start + size;
1389
1390 lru_add_drain();
1391 tlb_gather_mmu(&tlb, mm, 0);
1392 update_hiwater_rss(mm);
1393 mmu_notifier_invalidate_range_start(mm, start, end);
1394 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
1395 unmap_single_vma(&tlb, vma, start, end, details);
1396 mmu_notifier_invalidate_range_end(mm, start, end);
1397 tlb_finish_mmu(&tlb, start, end);
1398}
1399
1400/**
1401 * zap_page_range_single - remove user pages in a given range
1402 * @vma: vm_area_struct holding the applicable pages
1403 * @address: starting address of pages to zap
1404 * @size: number of bytes to zap
1405 * @details: details of nonlinear truncation or shared cache invalidation
1406 *
1407 * The range must fit into one VMA.
1408 */
1409static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1410 unsigned long size, struct zap_details *details)
1411{
1412 struct mm_struct *mm = vma->vm_mm;
1413 struct mmu_gather tlb;
1414 unsigned long end = address + size;
1415
1416 lru_add_drain();
1417 tlb_gather_mmu(&tlb, mm, 0);
1418 update_hiwater_rss(mm);
1419 mmu_notifier_invalidate_range_start(mm, address, end);
1420 unmap_single_vma(&tlb, vma, address, end, details);
1421 mmu_notifier_invalidate_range_end(mm, address, end);
1422 tlb_finish_mmu(&tlb, address, end);
1423}
1424
1425/**
1426 * zap_vma_ptes - remove ptes mapping the vma
1427 * @vma: vm_area_struct holding ptes to be zapped
1428 * @address: starting address of pages to zap
1429 * @size: number of bytes to zap
1430 *
1431 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1432 *
1433 * The entire address range must be fully contained within the vma.
1434 *
1435 * Returns 0 if successful.
1436 */
1437int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1438 unsigned long size)
1439{
1440 if (address < vma->vm_start || address + size > vma->vm_end ||
1441 !(vma->vm_flags & VM_PFNMAP))
1442 return -1;
1443 zap_page_range_single(vma, address, size, NULL);
1444 return 0;
1445}
1446EXPORT_SYMBOL_GPL(zap_vma_ptes);
1447
1448/**
1449 * follow_page - look up a page descriptor from a user-virtual address
1450 * @vma: vm_area_struct mapping @address
1451 * @address: virtual address to look up
1452 * @flags: flags modifying lookup behaviour
1453 *
1454 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
1455 *
1456 * Returns the mapped (struct page *), %NULL if no mapping exists, or
1457 * an error pointer if there is a mapping to something not represented
1458 * by a page descriptor (see also vm_normal_page()).
1459 */
1460struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1461 unsigned int flags)
1462{
1463 pgd_t *pgd;
1464 pud_t *pud;
1465 pmd_t *pmd;
1466 pte_t *ptep, pte;
1467 spinlock_t *ptl;
1468 struct page *page;
1469 struct mm_struct *mm = vma->vm_mm;
1470
1471 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
1472 if (!IS_ERR(page)) {
1473 BUG_ON(flags & FOLL_GET);
1474 goto out;
1475 }
1476
1477 page = NULL;
1478 pgd = pgd_offset(mm, address);
1479 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1480 goto no_page_table;
1481
1482 pud = pud_offset(pgd, address);
1483 if (pud_none(*pud))
1484 goto no_page_table;
1485 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
1486 BUG_ON(flags & FOLL_GET);
1487 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
1488 goto out;
1489 }
1490 if (unlikely(pud_bad(*pud)))
1491 goto no_page_table;
1492
1493 pmd = pmd_offset(pud, address);
1494 if (pmd_none(*pmd))
1495 goto no_page_table;
1496 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
1497 BUG_ON(flags & FOLL_GET);
1498 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1499 goto out;
1500 }
1501 if (pmd_trans_huge(*pmd)) {
1502 if (flags & FOLL_SPLIT) {
1503 split_huge_page_pmd(mm, pmd);
1504 goto split_fallthrough;
1505 }
1506 spin_lock(&mm->page_table_lock);
1507 if (likely(pmd_trans_huge(*pmd))) {
1508 if (unlikely(pmd_trans_splitting(*pmd))) {
1509 spin_unlock(&mm->page_table_lock);
1510 wait_split_huge_page(vma->anon_vma, pmd);
1511 } else {
1512 page = follow_trans_huge_pmd(mm, address,
1513 pmd, flags);
1514 spin_unlock(&mm->page_table_lock);
1515 goto out;
1516 }
1517 } else
1518 spin_unlock(&mm->page_table_lock);
1519 /* fall through */
1520 }
1521split_fallthrough:
1522 if (unlikely(pmd_bad(*pmd)))
1523 goto no_page_table;
1524
1525 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1526
1527 pte = *ptep;
1528 if (!pte_present(pte))
1529 goto no_page;
1530 if ((flags & FOLL_WRITE) && !pte_write(pte))
1531 goto unlock;
1532
1533 page = vm_normal_page(vma, address, pte);
1534 if (unlikely(!page)) {
1535 if ((flags & FOLL_DUMP) ||
1536 !is_zero_pfn(pte_pfn(pte)))
1537 goto bad_page;
1538 page = pte_page(pte);
1539 }
1540
1541 if (flags & FOLL_GET)
1542 get_page_foll(page);
1543 if (flags & FOLL_TOUCH) {
1544 if ((flags & FOLL_WRITE) &&
1545 !pte_dirty(pte) && !PageDirty(page))
1546 set_page_dirty(page);
1547 /*
1548 * pte_mkyoung() would be more correct here, but atomic care
1549 * is needed to avoid losing the dirty bit: it is easier to use
1550 * mark_page_accessed().
1551 */
1552 mark_page_accessed(page);
1553 }
1554 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1555 /*
1556 * The preliminary mapping check is mainly to avoid the
1557 * pointless overhead of lock_page on the ZERO_PAGE
1558 * which might bounce very badly if there is contention.
1559 *
1560 * If the page is already locked, we don't need to
1561 * handle it now - vmscan will handle it later if and
1562 * when it attempts to reclaim the page.
1563 */
1564 if (page->mapping && trylock_page(page)) {
1565 lru_add_drain(); /* push cached pages to LRU */
1566 /*
1567 * Because we lock page here and migration is
1568 * blocked by the pte's page reference, we need
1569 * only check for file-cache page truncation.
1570 */
1571 if (page->mapping)
1572 mlock_vma_page(page);
1573 unlock_page(page);
1574 }
1575 }
1576unlock:
1577 pte_unmap_unlock(ptep, ptl);
1578out:
1579 return page;
1580
1581bad_page:
1582 pte_unmap_unlock(ptep, ptl);
1583 return ERR_PTR(-EFAULT);
1584
1585no_page:
1586 pte_unmap_unlock(ptep, ptl);
1587 if (!pte_none(pte))
1588 return page;
1589
1590no_page_table:
1591 /*
1592 * When core dumping an enormous anonymous area that nobody
1593 * has touched so far, we don't want to allocate unnecessary pages or
1594 * page tables. Return error instead of NULL to skip handle_mm_fault,
1595 * then get_dump_page() will return NULL to leave a hole in the dump.
1596 * But we can only make this optimization where a hole would surely
1597 * be zero-filled if handle_mm_fault() actually did handle it.
1598 */
1599 if ((flags & FOLL_DUMP) &&
1600 (!vma->vm_ops || !vma->vm_ops->fault))
1601 return ERR_PTR(-EFAULT);
1602 return page;
1603}
1604
1605static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
1606{
1607 return stack_guard_page_start(vma, addr) ||
1608 stack_guard_page_end(vma, addr+PAGE_SIZE);
1609}
1610
1611/**
1612 * __get_user_pages() - pin user pages in memory
1613 * @tsk: task_struct of target task
1614 * @mm: mm_struct of target mm
1615 * @start: starting user address
1616 * @nr_pages: number of pages from start to pin
1617 * @gup_flags: flags modifying pin behaviour
1618 * @pages: array that receives pointers to the pages pinned.
1619 * Should be at least nr_pages long. Or NULL, if caller
1620 * only intends to ensure the pages are faulted in.
1621 * @vmas: array of pointers to vmas corresponding to each page.
1622 * Or NULL if the caller does not require them.
1623 * @nonblocking: whether waiting for disk IO or mmap_sem contention
1624 *
1625 * Returns number of pages pinned. This may be fewer than the number
1626 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1627 * were pinned, returns -errno. Each page returned must be released
1628 * with a put_page() call when it is finished with. vmas will only
1629 * remain valid while mmap_sem is held.
1630 *
1631 * Must be called with mmap_sem held for read or write.
1632 *
1633 * __get_user_pages walks a process's page tables and takes a reference to
1634 * each struct page that each user address corresponds to at a given
1635 * instant. That is, it takes the page that would be accessed if a user
1636 * thread accesses the given user virtual address at that instant.
1637 *
1638 * This does not guarantee that the page exists in the user mappings when
1639 * __get_user_pages returns, and there may even be a completely different
1640 * page there in some cases (eg. if mmapped pagecache has been invalidated
1641 * and subsequently re faulted). However it does guarantee that the page
1642 * won't be freed completely. And mostly callers simply care that the page
1643 * contains data that was valid *at some point in time*. Typically, an IO
1644 * or similar operation cannot guarantee anything stronger anyway because
1645 * locks can't be held over the syscall boundary.
1646 *
1647 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1648 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1649 * appropriate) must be called after the page is finished with, and
1650 * before put_page is called.
1651 *
1652 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
1653 * or mmap_sem contention, and if waiting is needed to pin all pages,
1654 * *@nonblocking will be set to 0.
1655 *
1656 * In most cases, get_user_pages or get_user_pages_fast should be used
1657 * instead of __get_user_pages. __get_user_pages should be used only if
1658 * you need some special @gup_flags.
1659 */
1660int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1661 unsigned long start, int nr_pages, unsigned int gup_flags,
1662 struct page **pages, struct vm_area_struct **vmas,
1663 int *nonblocking)
1664{
1665 int i;
1666 unsigned long vm_flags;
1667
1668 if (nr_pages <= 0)
1669 return 0;
1670
1671 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
1672
1673 /*
1674 * Require read or write permissions.
1675 * If FOLL_FORCE is set, we only require the "MAY" flags.
1676 */
1677 vm_flags = (gup_flags & FOLL_WRITE) ?
1678 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1679 vm_flags &= (gup_flags & FOLL_FORCE) ?
1680 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1681 i = 0;
1682
1683 do {
1684 struct vm_area_struct *vma;
1685
1686 vma = find_extend_vma(mm, start);
1687 if (!vma && in_gate_area(mm, start)) {
1688 unsigned long pg = start & PAGE_MASK;
1689 pgd_t *pgd;
1690 pud_t *pud;
1691 pmd_t *pmd;
1692 pte_t *pte;
1693
1694 /* user gate pages are read-only */
1695 if (gup_flags & FOLL_WRITE)
1696 return i ? : -EFAULT;
1697 if (pg > TASK_SIZE)
1698 pgd = pgd_offset_k(pg);
1699 else
1700 pgd = pgd_offset_gate(mm, pg);
1701 BUG_ON(pgd_none(*pgd));
1702 pud = pud_offset(pgd, pg);
1703 BUG_ON(pud_none(*pud));
1704 pmd = pmd_offset(pud, pg);
1705 if (pmd_none(*pmd))
1706 return i ? : -EFAULT;
1707 VM_BUG_ON(pmd_trans_huge(*pmd));
1708 pte = pte_offset_map(pmd, pg);
1709 if (pte_none(*pte)) {
1710 pte_unmap(pte);
1711 return i ? : -EFAULT;
1712 }
1713 vma = get_gate_vma(mm);
1714 if (pages) {
1715 struct page *page;
1716
1717 page = vm_normal_page(vma, start, *pte);
1718 if (!page) {
1719 if (!(gup_flags & FOLL_DUMP) &&
1720 is_zero_pfn(pte_pfn(*pte)))
1721 page = pte_page(*pte);
1722 else {
1723 pte_unmap(pte);
1724 return i ? : -EFAULT;
1725 }
1726 }
1727 pages[i] = page;
1728 get_page(page);
1729 }
1730 pte_unmap(pte);
1731 goto next_page;
1732 }
1733
1734 if (!vma ||
1735 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1736 !(vm_flags & vma->vm_flags))
1737 return i ? : -EFAULT;
1738
1739 if (is_vm_hugetlb_page(vma)) {
1740 i = follow_hugetlb_page(mm, vma, pages, vmas,
1741 &start, &nr_pages, i, gup_flags);
1742 continue;
1743 }
1744
1745 do {
1746 struct page *page;
1747 unsigned int foll_flags = gup_flags;
1748
1749 /*
1750 * If we have a pending SIGKILL, don't keep faulting
1751 * pages and potentially allocating memory.
1752 */
1753 if (unlikely(fatal_signal_pending(current)))
1754 return i ? i : -ERESTARTSYS;
1755
1756 cond_resched();
1757 while (!(page = follow_page(vma, start, foll_flags))) {
1758 int ret;
1759 unsigned int fault_flags = 0;
1760
1761 /* For mlock, just skip the stack guard page. */
1762 if (foll_flags & FOLL_MLOCK) {
1763 if (stack_guard_page(vma, start))
1764 goto next_page;
1765 }
1766 if (foll_flags & FOLL_WRITE)
1767 fault_flags |= FAULT_FLAG_WRITE;
1768 if (nonblocking)
1769 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
1770 if (foll_flags & FOLL_NOWAIT)
1771 fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
1772
1773 ret = handle_mm_fault(mm, vma, start,
1774 fault_flags);
1775
1776 if (ret & VM_FAULT_ERROR) {
1777 if (ret & VM_FAULT_OOM)
1778 return i ? i : -ENOMEM;
1779 if (ret & (VM_FAULT_HWPOISON |
1780 VM_FAULT_HWPOISON_LARGE)) {
1781 if (i)
1782 return i;
1783 else if (gup_flags & FOLL_HWPOISON)
1784 return -EHWPOISON;
1785 else
1786 return -EFAULT;
1787 }
1788 if (ret & VM_FAULT_SIGBUS)
1789 return i ? i : -EFAULT;
1790 BUG();
1791 }
1792
1793 if (tsk) {
1794 if (ret & VM_FAULT_MAJOR)
1795 tsk->maj_flt++;
1796 else
1797 tsk->min_flt++;
1798 }
1799
1800 if (ret & VM_FAULT_RETRY) {
1801 if (nonblocking)
1802 *nonblocking = 0;
1803 return i;
1804 }
1805
1806 /*
1807 * The VM_FAULT_WRITE bit tells us that
1808 * do_wp_page has broken COW when necessary,
1809 * even if maybe_mkwrite decided not to set
1810 * pte_write. We can thus safely do subsequent
1811 * page lookups as if they were reads. But only
1812 * do so when looping for pte_write is futile:
1813 * in some cases userspace may also be wanting
1814 * to write to the gotten user page, which a
1815 * read fault here might prevent (a readonly
1816 * page might get reCOWed by userspace write).
1817 */
1818 if ((ret & VM_FAULT_WRITE) &&
1819 !(vma->vm_flags & VM_WRITE))
1820 foll_flags &= ~FOLL_WRITE;
1821
1822 cond_resched();
1823 }
1824 if (IS_ERR(page))
1825 return i ? i : PTR_ERR(page);
1826 if (pages) {
1827 pages[i] = page;
1828
1829 flush_anon_page(vma, page, start);
1830 flush_dcache_page(page);
1831 }
1832next_page:
1833 if (vmas)
1834 vmas[i] = vma;
1835 i++;
1836 start += PAGE_SIZE;
1837 nr_pages--;
1838 } while (nr_pages && start < vma->vm_end);
1839 } while (nr_pages);
1840 return i;
1841}
1842EXPORT_SYMBOL(__get_user_pages);
1843
1844/*
1845 * fixup_user_fault() - manually resolve a user page fault
1846 * @tsk: the task_struct to use for page fault accounting, or
1847 * NULL if faults are not to be recorded.
1848 * @mm: mm_struct of target mm
1849 * @address: user address
1850 * @fault_flags:flags to pass down to handle_mm_fault()
1851 *
1852 * This is meant to be called in the specific scenario where for locking reasons
1853 * we try to access user memory in atomic context (within a pagefault_disable()
1854 * section), this returns -EFAULT, and we want to resolve the user fault before
1855 * trying again.
1856 *
1857 * Typically this is meant to be used by the futex code.
1858 *
1859 * The main difference with get_user_pages() is that this function will
1860 * unconditionally call handle_mm_fault() which will in turn perform all the
1861 * necessary SW fixup of the dirty and young bits in the PTE, while
1862 * handle_mm_fault() only guarantees to update these in the struct page.
1863 *
1864 * This is important for some architectures where those bits also gate the
1865 * access permission to the page because they are maintained in software. On
1866 * such architectures, gup() will not be enough to make a subsequent access
1867 * succeed.
1868 *
1869 * This should be called with the mm_sem held for read.
1870 */
1871int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1872 unsigned long address, unsigned int fault_flags)
1873{
1874 struct vm_area_struct *vma;
1875 int ret;
1876
1877 vma = find_extend_vma(mm, address);
1878 if (!vma || address < vma->vm_start)
1879 return -EFAULT;
1880
1881 ret = handle_mm_fault(mm, vma, address, fault_flags);
1882 if (ret & VM_FAULT_ERROR) {
1883 if (ret & VM_FAULT_OOM)
1884 return -ENOMEM;
1885 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
1886 return -EHWPOISON;
1887 if (ret & VM_FAULT_SIGBUS)
1888 return -EFAULT;
1889 BUG();
1890 }
1891 if (tsk) {
1892 if (ret & VM_FAULT_MAJOR)
1893 tsk->maj_flt++;
1894 else
1895 tsk->min_flt++;
1896 }
1897 return 0;
1898}
1899
1900/*
1901 * get_user_pages() - pin user pages in memory
1902 * @tsk: the task_struct to use for page fault accounting, or
1903 * NULL if faults are not to be recorded.
1904 * @mm: mm_struct of target mm
1905 * @start: starting user address
1906 * @nr_pages: number of pages from start to pin
1907 * @write: whether pages will be written to by the caller
1908 * @force: whether to force write access even if user mapping is
1909 * readonly. This will result in the page being COWed even
1910 * in MAP_SHARED mappings. You do not want this.
1911 * @pages: array that receives pointers to the pages pinned.
1912 * Should be at least nr_pages long. Or NULL, if caller
1913 * only intends to ensure the pages are faulted in.
1914 * @vmas: array of pointers to vmas corresponding to each page.
1915 * Or NULL if the caller does not require them.
1916 *
1917 * Returns number of pages pinned. This may be fewer than the number
1918 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1919 * were pinned, returns -errno. Each page returned must be released
1920 * with a put_page() call when it is finished with. vmas will only
1921 * remain valid while mmap_sem is held.
1922 *
1923 * Must be called with mmap_sem held for read or write.
1924 *
1925 * get_user_pages walks a process's page tables and takes a reference to
1926 * each struct page that each user address corresponds to at a given
1927 * instant. That is, it takes the page that would be accessed if a user
1928 * thread accesses the given user virtual address at that instant.
1929 *
1930 * This does not guarantee that the page exists in the user mappings when
1931 * get_user_pages returns, and there may even be a completely different
1932 * page there in some cases (eg. if mmapped pagecache has been invalidated
1933 * and subsequently re faulted). However it does guarantee that the page
1934 * won't be freed completely. And mostly callers simply care that the page
1935 * contains data that was valid *at some point in time*. Typically, an IO
1936 * or similar operation cannot guarantee anything stronger anyway because
1937 * locks can't be held over the syscall boundary.
1938 *
1939 * If write=0, the page must not be written to. If the page is written to,
1940 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
1941 * after the page is finished with, and before put_page is called.
1942 *
1943 * get_user_pages is typically used for fewer-copy IO operations, to get a
1944 * handle on the memory by some means other than accesses via the user virtual
1945 * addresses. The pages may be submitted for DMA to devices or accessed via
1946 * their kernel linear mapping (via the kmap APIs). Care should be taken to
1947 * use the correct cache flushing APIs.
1948 *
1949 * See also get_user_pages_fast, for performance critical applications.
1950 */
1951int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1952 unsigned long start, int nr_pages, int write, int force,
1953 struct page **pages, struct vm_area_struct **vmas)
1954{
1955 int flags = FOLL_TOUCH;
1956
1957 if (pages)
1958 flags |= FOLL_GET;
1959 if (write)
1960 flags |= FOLL_WRITE;
1961 if (force)
1962 flags |= FOLL_FORCE;
1963
1964 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
1965 NULL);
1966}
1967EXPORT_SYMBOL(get_user_pages);
1968
1969/**
1970 * get_dump_page() - pin user page in memory while writing it to core dump
1971 * @addr: user address
1972 *
1973 * Returns struct page pointer of user page pinned for dump,
1974 * to be freed afterwards by page_cache_release() or put_page().
1975 *
1976 * Returns NULL on any kind of failure - a hole must then be inserted into
1977 * the corefile, to preserve alignment with its headers; and also returns
1978 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1979 * allowing a hole to be left in the corefile to save diskspace.
1980 *
1981 * Called without mmap_sem, but after all other threads have been killed.
1982 */
1983#ifdef CONFIG_ELF_CORE
1984struct page *get_dump_page(unsigned long addr)
1985{
1986 struct vm_area_struct *vma;
1987 struct page *page;
1988
1989 if (__get_user_pages(current, current->mm, addr, 1,
1990 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1991 NULL) < 1)
1992 return NULL;
1993 flush_cache_page(vma, addr, page_to_pfn(page));
1994 return page;
1995}
1996#endif /* CONFIG_ELF_CORE */
1997
1998pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1999 spinlock_t **ptl)
2000{
2001 pgd_t * pgd = pgd_offset(mm, addr);
2002 pud_t * pud = pud_alloc(mm, pgd, addr);
2003 if (pud) {
2004 pmd_t * pmd = pmd_alloc(mm, pud, addr);
2005 if (pmd) {
2006 VM_BUG_ON(pmd_trans_huge(*pmd));
2007 return pte_alloc_map_lock(mm, pmd, addr, ptl);
2008 }
2009 }
2010 return NULL;
2011}
2012
2013/*
2014 * This is the old fallback for page remapping.
2015 *
2016 * For historical reasons, it only allows reserved pages. Only
2017 * old drivers should use this, and they needed to mark their
2018 * pages reserved for the old functions anyway.
2019 */
2020static int insert_page(struct vm_area_struct *vma, unsigned long addr,
2021 struct page *page, pgprot_t prot)
2022{
2023 struct mm_struct *mm = vma->vm_mm;
2024 int retval;
2025 pte_t *pte;
2026 spinlock_t *ptl;
2027
2028 retval = -EINVAL;
2029 if (PageAnon(page))
2030 goto out;
2031 retval = -ENOMEM;
2032 flush_dcache_page(page);
2033 pte = get_locked_pte(mm, addr, &ptl);
2034 if (!pte)
2035 goto out;
2036 retval = -EBUSY;
2037 if (!pte_none(*pte))
2038 goto out_unlock;
2039
2040 /* Ok, finally just insert the thing.. */
2041 get_page(page);
2042 inc_mm_counter_fast(mm, MM_FILEPAGES);
2043 page_add_file_rmap(page);
2044 set_pte_at(mm, addr, pte, mk_pte(page, prot));
2045
2046 retval = 0;
2047 pte_unmap_unlock(pte, ptl);
2048 return retval;
2049out_unlock:
2050 pte_unmap_unlock(pte, ptl);
2051out:
2052 return retval;
2053}
2054
2055/**
2056 * vm_insert_page - insert single page into user vma
2057 * @vma: user vma to map to
2058 * @addr: target user address of this page
2059 * @page: source kernel page
2060 *
2061 * This allows drivers to insert individual pages they've allocated
2062 * into a user vma.
2063 *
2064 * The page has to be a nice clean _individual_ kernel allocation.
2065 * If you allocate a compound page, you need to have marked it as
2066 * such (__GFP_COMP), or manually just split the page up yourself
2067 * (see split_page()).
2068 *
2069 * NOTE! Traditionally this was done with "remap_pfn_range()" which
2070 * took an arbitrary page protection parameter. This doesn't allow
2071 * that. Your vma protection will have to be set up correctly, which
2072 * means that if you want a shared writable mapping, you'd better
2073 * ask for a shared writable mapping!
2074 *
2075 * The page does not need to be reserved.
2076 */
2077int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2078 struct page *page)
2079{
2080 if (addr < vma->vm_start || addr >= vma->vm_end)
2081 return -EFAULT;
2082 if (!page_count(page))
2083 return -EINVAL;
2084 vma->vm_flags |= VM_INSERTPAGE;
2085 return insert_page(vma, addr, page, vma->vm_page_prot);
2086}
2087EXPORT_SYMBOL(vm_insert_page);
2088
2089static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2090 unsigned long pfn, pgprot_t prot)
2091{
2092 struct mm_struct *mm = vma->vm_mm;
2093 int retval;
2094 pte_t *pte, entry;
2095 spinlock_t *ptl;
2096
2097 retval = -ENOMEM;
2098 pte = get_locked_pte(mm, addr, &ptl);
2099 if (!pte)
2100 goto out;
2101 retval = -EBUSY;
2102 if (!pte_none(*pte))
2103 goto out_unlock;
2104
2105 /* Ok, finally just insert the thing.. */
2106 entry = pte_mkspecial(pfn_pte(pfn, prot));
2107 set_pte_at(mm, addr, pte, entry);
2108 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2109
2110 retval = 0;
2111out_unlock:
2112 pte_unmap_unlock(pte, ptl);
2113out:
2114 return retval;
2115}
2116
2117/**
2118 * vm_insert_pfn - insert single pfn into user vma
2119 * @vma: user vma to map to
2120 * @addr: target user address of this page
2121 * @pfn: source kernel pfn
2122 *
2123 * Similar to vm_inert_page, this allows drivers to insert individual pages
2124 * they've allocated into a user vma. Same comments apply.
2125 *
2126 * This function should only be called from a vm_ops->fault handler, and
2127 * in that case the handler should return NULL.
2128 *
2129 * vma cannot be a COW mapping.
2130 *
2131 * As this is called only for pages that do not currently exist, we
2132 * do not need to flush old virtual caches or the TLB.
2133 */
2134int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2135 unsigned long pfn)
2136{
2137 int ret;
2138 pgprot_t pgprot = vma->vm_page_prot;
2139 /*
2140 * Technically, architectures with pte_special can avoid all these
2141 * restrictions (same for remap_pfn_range). However we would like
2142 * consistency in testing and feature parity among all, so we should
2143 * try to keep these invariants in place for everybody.
2144 */
2145 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2146 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2147 (VM_PFNMAP|VM_MIXEDMAP));
2148 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2149 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2150
2151 if (addr < vma->vm_start || addr >= vma->vm_end)
2152 return -EFAULT;
2153 if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
2154 return -EINVAL;
2155
2156 ret = insert_pfn(vma, addr, pfn, pgprot);
2157
2158 if (ret)
2159 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
2160
2161 return ret;
2162}
2163EXPORT_SYMBOL(vm_insert_pfn);
2164
2165int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2166 unsigned long pfn)
2167{
2168 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
2169
2170 if (addr < vma->vm_start || addr >= vma->vm_end)
2171 return -EFAULT;
2172
2173 /*
2174 * If we don't have pte special, then we have to use the pfn_valid()
2175 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2176 * refcount the page if pfn_valid is true (hence insert_page rather
2177 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2178 * without pte special, it would there be refcounted as a normal page.
2179 */
2180 if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
2181 struct page *page;
2182
2183 page = pfn_to_page(pfn);
2184 return insert_page(vma, addr, page, vma->vm_page_prot);
2185 }
2186 return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
2187}
2188EXPORT_SYMBOL(vm_insert_mixed);
2189
2190/*
2191 * maps a range of physical memory into the requested pages. the old
2192 * mappings are removed. any references to nonexistent pages results
2193 * in null mappings (currently treated as "copy-on-access")
2194 */
2195static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2196 unsigned long addr, unsigned long end,
2197 unsigned long pfn, pgprot_t prot)
2198{
2199 pte_t *pte;
2200 spinlock_t *ptl;
2201
2202 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2203 if (!pte)
2204 return -ENOMEM;
2205 arch_enter_lazy_mmu_mode();
2206 do {
2207 BUG_ON(!pte_none(*pte));
2208 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2209 pfn++;
2210 } while (pte++, addr += PAGE_SIZE, addr != end);
2211 arch_leave_lazy_mmu_mode();
2212 pte_unmap_unlock(pte - 1, ptl);
2213 return 0;
2214}
2215
2216static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2217 unsigned long addr, unsigned long end,
2218 unsigned long pfn, pgprot_t prot)
2219{
2220 pmd_t *pmd;
2221 unsigned long next;
2222
2223 pfn -= addr >> PAGE_SHIFT;
2224 pmd = pmd_alloc(mm, pud, addr);
2225 if (!pmd)
2226 return -ENOMEM;
2227 VM_BUG_ON(pmd_trans_huge(*pmd));
2228 do {
2229 next = pmd_addr_end(addr, end);
2230 if (remap_pte_range(mm, pmd, addr, next,
2231 pfn + (addr >> PAGE_SHIFT), prot))
2232 return -ENOMEM;
2233 } while (pmd++, addr = next, addr != end);
2234 return 0;
2235}
2236
2237static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
2238 unsigned long addr, unsigned long end,
2239 unsigned long pfn, pgprot_t prot)
2240{
2241 pud_t *pud;
2242 unsigned long next;
2243
2244 pfn -= addr >> PAGE_SHIFT;
2245 pud = pud_alloc(mm, pgd, addr);
2246 if (!pud)
2247 return -ENOMEM;
2248 do {
2249 next = pud_addr_end(addr, end);
2250 if (remap_pmd_range(mm, pud, addr, next,
2251 pfn + (addr >> PAGE_SHIFT), prot))
2252 return -ENOMEM;
2253 } while (pud++, addr = next, addr != end);
2254 return 0;
2255}
2256
2257/**
2258 * remap_pfn_range - remap kernel memory to userspace
2259 * @vma: user vma to map to
2260 * @addr: target user address to start at
2261 * @pfn: physical address of kernel memory
2262 * @size: size of map area
2263 * @prot: page protection flags for this mapping
2264 *
2265 * Note: this is only safe if the mm semaphore is held when called.
2266 */
2267int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2268 unsigned long pfn, unsigned long size, pgprot_t prot)
2269{
2270 pgd_t *pgd;
2271 unsigned long next;
2272 unsigned long end = addr + PAGE_ALIGN(size);
2273 struct mm_struct *mm = vma->vm_mm;
2274 int err;
2275
2276 /*
2277 * Physically remapped pages are special. Tell the
2278 * rest of the world about it:
2279 * VM_IO tells people not to look at these pages
2280 * (accesses can have side effects).
2281 * VM_RESERVED is specified all over the place, because
2282 * in 2.4 it kept swapout's vma scan off this vma; but
2283 * in 2.6 the LRU scan won't even find its pages, so this
2284 * flag means no more than count its pages in reserved_vm,
2285 * and omit it from core dump, even when VM_IO turned off.
2286 * VM_PFNMAP tells the core MM that the base pages are just
2287 * raw PFN mappings, and do not have a "struct page" associated
2288 * with them.
2289 *
2290 * There's a horrible special case to handle copy-on-write
2291 * behaviour that some programs depend on. We mark the "original"
2292 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2293 */
2294 if (addr == vma->vm_start && end == vma->vm_end) {
2295 vma->vm_pgoff = pfn;
2296 vma->vm_flags |= VM_PFN_AT_MMAP;
2297 } else if (is_cow_mapping(vma->vm_flags))
2298 return -EINVAL;
2299
2300 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
2301
2302 err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
2303 if (err) {
2304 /*
2305 * To indicate that track_pfn related cleanup is not
2306 * needed from higher level routine calling unmap_vmas
2307 */
2308 vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
2309 vma->vm_flags &= ~VM_PFN_AT_MMAP;
2310 return -EINVAL;
2311 }
2312
2313 BUG_ON(addr >= end);
2314 pfn -= addr >> PAGE_SHIFT;
2315 pgd = pgd_offset(mm, addr);
2316 flush_cache_range(vma, addr, end);
2317 do {
2318 next = pgd_addr_end(addr, end);
2319 err = remap_pud_range(mm, pgd, addr, next,
2320 pfn + (addr >> PAGE_SHIFT), prot);
2321 if (err)
2322 break;
2323 } while (pgd++, addr = next, addr != end);
2324
2325 if (err)
2326 untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
2327
2328 return err;
2329}
2330EXPORT_SYMBOL(remap_pfn_range);
2331
2332static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2333 unsigned long addr, unsigned long end,
2334 pte_fn_t fn, void *data)
2335{
2336 pte_t *pte;
2337 int err;
2338 pgtable_t token;
2339 spinlock_t *uninitialized_var(ptl);
2340
2341 pte = (mm == &init_mm) ?
2342 pte_alloc_kernel(pmd, addr) :
2343 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2344 if (!pte)
2345 return -ENOMEM;
2346
2347 BUG_ON(pmd_huge(*pmd));
2348
2349 arch_enter_lazy_mmu_mode();
2350
2351 token = pmd_pgtable(*pmd);
2352
2353 do {
2354 err = fn(pte++, token, addr, data);
2355 if (err)
2356 break;
2357 } while (addr += PAGE_SIZE, addr != end);
2358
2359 arch_leave_lazy_mmu_mode();
2360
2361 if (mm != &init_mm)
2362 pte_unmap_unlock(pte-1, ptl);
2363 return err;
2364}
2365
2366static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2367 unsigned long addr, unsigned long end,
2368 pte_fn_t fn, void *data)
2369{
2370 pmd_t *pmd;
2371 unsigned long next;
2372 int err;
2373
2374 BUG_ON(pud_huge(*pud));
2375
2376 pmd = pmd_alloc(mm, pud, addr);
2377 if (!pmd)
2378 return -ENOMEM;
2379 do {
2380 next = pmd_addr_end(addr, end);
2381 err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
2382 if (err)
2383 break;
2384 } while (pmd++, addr = next, addr != end);
2385 return err;
2386}
2387
2388static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
2389 unsigned long addr, unsigned long end,
2390 pte_fn_t fn, void *data)
2391{
2392 pud_t *pud;
2393 unsigned long next;
2394 int err;
2395
2396 pud = pud_alloc(mm, pgd, addr);
2397 if (!pud)
2398 return -ENOMEM;
2399 do {
2400 next = pud_addr_end(addr, end);
2401 err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
2402 if (err)
2403 break;
2404 } while (pud++, addr = next, addr != end);
2405 return err;
2406}
2407
2408/*
2409 * Scan a region of virtual memory, filling in page tables as necessary
2410 * and calling a provided function on each leaf page table.
2411 */
2412int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2413 unsigned long size, pte_fn_t fn, void *data)
2414{
2415 pgd_t *pgd;
2416 unsigned long next;
2417 unsigned long end = addr + size;
2418 int err;
2419
2420 BUG_ON(addr >= end);
2421 pgd = pgd_offset(mm, addr);
2422 do {
2423 next = pgd_addr_end(addr, end);
2424 err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
2425 if (err)
2426 break;
2427 } while (pgd++, addr = next, addr != end);
2428
2429 return err;
2430}
2431EXPORT_SYMBOL_GPL(apply_to_page_range);
2432
2433/*
2434 * handle_pte_fault chooses page fault handler according to an entry
2435 * which was read non-atomically. Before making any commitment, on
2436 * those architectures or configurations (e.g. i386 with PAE) which
2437 * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
2438 * must check under lock before unmapping the pte and proceeding
2439 * (but do_wp_page is only called after already making such a check;
2440 * and do_anonymous_page can safely check later on).
2441 */
2442static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2443 pte_t *page_table, pte_t orig_pte)
2444{
2445 int same = 1;
2446#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2447 if (sizeof(pte_t) > sizeof(unsigned long)) {
2448 spinlock_t *ptl = pte_lockptr(mm, pmd);
2449 spin_lock(ptl);
2450 same = pte_same(*page_table, orig_pte);
2451 spin_unlock(ptl);
2452 }
2453#endif
2454 pte_unmap(page_table);
2455 return same;
2456}
2457
2458static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
2459{
2460 /*
2461 * If the source page was a PFN mapping, we don't have
2462 * a "struct page" for it. We do a best-effort copy by
2463 * just copying from the original user address. If that
2464 * fails, we just zero-fill it. Live with it.
2465 */
2466 if (unlikely(!src)) {
2467 void *kaddr = kmap_atomic(dst);
2468 void __user *uaddr = (void __user *)(va & PAGE_MASK);
2469
2470 /*
2471 * This really shouldn't fail, because the page is there
2472 * in the page tables. But it might just be unreadable,
2473 * in which case we just give up and fill the result with
2474 * zeroes.
2475 */
2476 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
2477 clear_page(kaddr);
2478 kunmap_atomic(kaddr);
2479 flush_dcache_page(dst);
2480 } else
2481 copy_user_highpage(dst, src, va, vma);
2482}
2483
2484/*
2485 * This routine handles present pages, when users try to write
2486 * to a shared page. It is done by copying the page to a new address
2487 * and decrementing the shared-page counter for the old page.
2488 *
2489 * Note that this routine assumes that the protection checks have been
2490 * done by the caller (the low-level page fault routine in most cases).
2491 * Thus we can safely just mark it writable once we've done any necessary
2492 * COW.
2493 *
2494 * We also mark the page dirty at this point even though the page will
2495 * change only once the write actually happens. This avoids a few races,
2496 * and potentially makes it more efficient.
2497 *
2498 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2499 * but allow concurrent faults), with pte both mapped and locked.
2500 * We return with mmap_sem still held, but pte unmapped and unlocked.
2501 */
2502static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2503 unsigned long address, pte_t *page_table, pmd_t *pmd,
2504 spinlock_t *ptl, pte_t orig_pte)
2505 __releases(ptl)
2506{
2507 struct page *old_page, *new_page;
2508 pte_t entry;
2509 int ret = 0;
2510 int page_mkwrite = 0;
2511 struct page *dirty_page = NULL;
2512
2513 old_page = vm_normal_page(vma, address, orig_pte);
2514 if (!old_page) {
2515 /*
2516 * VM_MIXEDMAP !pfn_valid() case
2517 *
2518 * We should not cow pages in a shared writeable mapping.
2519 * Just mark the pages writable as we can't do any dirty
2520 * accounting on raw pfn maps.
2521 */
2522 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2523 (VM_WRITE|VM_SHARED))
2524 goto reuse;
2525 goto gotten;
2526 }
2527
2528 /*
2529 * Take out anonymous pages first, anonymous shared vmas are
2530 * not dirty accountable.
2531 */
2532 if (PageAnon(old_page) && !PageKsm(old_page)) {
2533 if (!trylock_page(old_page)) {
2534 page_cache_get(old_page);
2535 pte_unmap_unlock(page_table, ptl);
2536 lock_page(old_page);
2537 page_table = pte_offset_map_lock(mm, pmd, address,
2538 &ptl);
2539 if (!pte_same(*page_table, orig_pte)) {
2540 unlock_page(old_page);
2541 goto unlock;
2542 }
2543 page_cache_release(old_page);
2544 }
2545 if (reuse_swap_page(old_page)) {
2546 /*
2547 * The page is all ours. Move it to our anon_vma so
2548 * the rmap code will not search our parent or siblings.
2549 * Protected against the rmap code by the page lock.
2550 */
2551 page_move_anon_rmap(old_page, vma, address);
2552 unlock_page(old_page);
2553 goto reuse;
2554 }
2555 unlock_page(old_page);
2556 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2557 (VM_WRITE|VM_SHARED))) {
2558 /*
2559 * Only catch write-faults on shared writable pages,
2560 * read-only shared pages can get COWed by
2561 * get_user_pages(.write=1, .force=1).
2562 */
2563 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2564 struct vm_fault vmf;
2565 int tmp;
2566
2567 vmf.virtual_address = (void __user *)(address &
2568 PAGE_MASK);
2569 vmf.pgoff = old_page->index;
2570 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2571 vmf.page = old_page;
2572
2573 /*
2574 * Notify the address space that the page is about to
2575 * become writable so that it can prohibit this or wait
2576 * for the page to get into an appropriate state.
2577 *
2578 * We do this without the lock held, so that it can
2579 * sleep if it needs to.
2580 */
2581 page_cache_get(old_page);
2582 pte_unmap_unlock(page_table, ptl);
2583
2584 tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
2585 if (unlikely(tmp &
2586 (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2587 ret = tmp;
2588 goto unwritable_page;
2589 }
2590 if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
2591 lock_page(old_page);
2592 if (!old_page->mapping) {
2593 ret = 0; /* retry the fault */
2594 unlock_page(old_page);
2595 goto unwritable_page;
2596 }
2597 } else
2598 VM_BUG_ON(!PageLocked(old_page));
2599
2600 /*
2601 * Since we dropped the lock we need to revalidate
2602 * the PTE as someone else may have changed it. If
2603 * they did, we just return, as we can count on the
2604 * MMU to tell us if they didn't also make it writable.
2605 */
2606 page_table = pte_offset_map_lock(mm, pmd, address,
2607 &ptl);
2608 if (!pte_same(*page_table, orig_pte)) {
2609 unlock_page(old_page);
2610 goto unlock;
2611 }
2612
2613 page_mkwrite = 1;
2614 }
2615 dirty_page = old_page;
2616 get_page(dirty_page);
2617
2618reuse:
2619 flush_cache_page(vma, address, pte_pfn(orig_pte));
2620 entry = pte_mkyoung(orig_pte);
2621 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2622 if (ptep_set_access_flags(vma, address, page_table, entry,1))
2623 update_mmu_cache(vma, address, page_table);
2624 pte_unmap_unlock(page_table, ptl);
2625 ret |= VM_FAULT_WRITE;
2626
2627 if (!dirty_page)
2628 return ret;
2629
2630 /*
2631 * Yes, Virginia, this is actually required to prevent a race
2632 * with clear_page_dirty_for_io() from clearing the page dirty
2633 * bit after it clear all dirty ptes, but before a racing
2634 * do_wp_page installs a dirty pte.
2635 *
2636 * __do_fault is protected similarly.
2637 */
2638 if (!page_mkwrite) {
2639 wait_on_page_locked(dirty_page);
2640 set_page_dirty_balance(dirty_page, page_mkwrite);
2641 }
2642 put_page(dirty_page);
2643 if (page_mkwrite) {
2644 struct address_space *mapping = dirty_page->mapping;
2645
2646 set_page_dirty(dirty_page);
2647 unlock_page(dirty_page);
2648 page_cache_release(dirty_page);
2649 if (mapping) {
2650 /*
2651 * Some device drivers do not set page.mapping
2652 * but still dirty their pages
2653 */
2654 balance_dirty_pages_ratelimited(mapping);
2655 }
2656 }
2657
2658 /* file_update_time outside page_lock */
2659 if (vma->vm_file)
2660 file_update_time(vma->vm_file);
2661
2662 return ret;
2663 }
2664
2665 /*
2666 * Ok, we need to copy. Oh, well..
2667 */
2668 page_cache_get(old_page);
2669gotten:
2670 pte_unmap_unlock(page_table, ptl);
2671
2672 if (unlikely(anon_vma_prepare(vma)))
2673 goto oom;
2674
2675 if (is_zero_pfn(pte_pfn(orig_pte))) {
2676 new_page = alloc_zeroed_user_highpage_movable(vma, address);
2677 if (!new_page)
2678 goto oom;
2679 } else {
2680 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2681 if (!new_page)
2682 goto oom;
2683 cow_user_page(new_page, old_page, address, vma);
2684 }
2685 __SetPageUptodate(new_page);
2686
2687 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2688 goto oom_free_new;
2689
2690 /*
2691 * Re-check the pte - we dropped the lock
2692 */
2693 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2694 if (likely(pte_same(*page_table, orig_pte))) {
2695 if (old_page) {
2696 if (!PageAnon(old_page)) {
2697 dec_mm_counter_fast(mm, MM_FILEPAGES);
2698 inc_mm_counter_fast(mm, MM_ANONPAGES);
2699 }
2700 } else
2701 inc_mm_counter_fast(mm, MM_ANONPAGES);
2702 flush_cache_page(vma, address, pte_pfn(orig_pte));
2703 entry = mk_pte(new_page, vma->vm_page_prot);
2704 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2705 /*
2706 * Clear the pte entry and flush it first, before updating the
2707 * pte with the new entry. This will avoid a race condition
2708 * seen in the presence of one thread doing SMC and another
2709 * thread doing COW.
2710 */
2711 ptep_clear_flush(vma, address, page_table);
2712 page_add_new_anon_rmap(new_page, vma, address);
2713 /*
2714 * We call the notify macro here because, when using secondary
2715 * mmu page tables (such as kvm shadow page tables), we want the
2716 * new page to be mapped directly into the secondary page table.
2717 */
2718 set_pte_at_notify(mm, address, page_table, entry);
2719 update_mmu_cache(vma, address, page_table);
2720 if (old_page) {
2721 /*
2722 * Only after switching the pte to the new page may
2723 * we remove the mapcount here. Otherwise another
2724 * process may come and find the rmap count decremented
2725 * before the pte is switched to the new page, and
2726 * "reuse" the old page writing into it while our pte
2727 * here still points into it and can be read by other
2728 * threads.
2729 *
2730 * The critical issue is to order this
2731 * page_remove_rmap with the ptp_clear_flush above.
2732 * Those stores are ordered by (if nothing else,)
2733 * the barrier present in the atomic_add_negative
2734 * in page_remove_rmap.
2735 *
2736 * Then the TLB flush in ptep_clear_flush ensures that
2737 * no process can access the old page before the
2738 * decremented mapcount is visible. And the old page
2739 * cannot be reused until after the decremented
2740 * mapcount is visible. So transitively, TLBs to
2741 * old page will be flushed before it can be reused.
2742 */
2743 page_remove_rmap(old_page);
2744 }
2745
2746 /* Free the old page.. */
2747 new_page = old_page;
2748 ret |= VM_FAULT_WRITE;
2749 } else
2750 mem_cgroup_uncharge_page(new_page);
2751
2752 if (new_page)
2753 page_cache_release(new_page);
2754unlock:
2755 pte_unmap_unlock(page_table, ptl);
2756 if (old_page) {
2757 /*
2758 * Don't let another task, with possibly unlocked vma,
2759 * keep the mlocked page.
2760 */
2761 if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
2762 lock_page(old_page); /* LRU manipulation */
2763 munlock_vma_page(old_page);
2764 unlock_page(old_page);
2765 }
2766 page_cache_release(old_page);
2767 }
2768 return ret;
2769oom_free_new:
2770 page_cache_release(new_page);
2771oom:
2772 if (old_page) {
2773 if (page_mkwrite) {
2774 unlock_page(old_page);
2775 page_cache_release(old_page);
2776 }
2777 page_cache_release(old_page);
2778 }
2779 return VM_FAULT_OOM;
2780
2781unwritable_page:
2782 page_cache_release(old_page);
2783 return ret;
2784}
2785
2786static void unmap_mapping_range_vma(struct vm_area_struct *vma,
2787 unsigned long start_addr, unsigned long end_addr,
2788 struct zap_details *details)
2789{
2790 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
2791}
2792
2793static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
2794 struct zap_details *details)
2795{
2796 struct vm_area_struct *vma;
2797 struct prio_tree_iter iter;
2798 pgoff_t vba, vea, zba, zea;
2799
2800 vma_prio_tree_foreach(vma, &iter, root,
2801 details->first_index, details->last_index) {
2802
2803 vba = vma->vm_pgoff;
2804 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
2805 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2806 zba = details->first_index;
2807 if (zba < vba)
2808 zba = vba;
2809 zea = details->last_index;
2810 if (zea > vea)
2811 zea = vea;
2812
2813 unmap_mapping_range_vma(vma,
2814 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2815 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2816 details);
2817 }
2818}
2819
2820static inline void unmap_mapping_range_list(struct list_head *head,
2821 struct zap_details *details)
2822{
2823 struct vm_area_struct *vma;
2824
2825 /*
2826 * In nonlinear VMAs there is no correspondence between virtual address
2827 * offset and file offset. So we must perform an exhaustive search
2828 * across *all* the pages in each nonlinear VMA, not just the pages
2829 * whose virtual address lies outside the file truncation point.
2830 */
2831 list_for_each_entry(vma, head, shared.vm_set.list) {
2832 details->nonlinear_vma = vma;
2833 unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
2834 }
2835}
2836
2837/**
2838 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
2839 * @mapping: the address space containing mmaps to be unmapped.
2840 * @holebegin: byte in first page to unmap, relative to the start of
2841 * the underlying file. This will be rounded down to a PAGE_SIZE
2842 * boundary. Note that this is different from truncate_pagecache(), which
2843 * must keep the partial page. In contrast, we must get rid of
2844 * partial pages.
2845 * @holelen: size of prospective hole in bytes. This will be rounded
2846 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
2847 * end of the file.
2848 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2849 * but 0 when invalidating pagecache, don't throw away private data.
2850 */
2851void unmap_mapping_range(struct address_space *mapping,
2852 loff_t const holebegin, loff_t const holelen, int even_cows)
2853{
2854 struct zap_details details;
2855 pgoff_t hba = holebegin >> PAGE_SHIFT;
2856 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2857
2858 /* Check for overflow. */
2859 if (sizeof(holelen) > sizeof(hlen)) {
2860 long long holeend =
2861 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2862 if (holeend & ~(long long)ULONG_MAX)
2863 hlen = ULONG_MAX - hba + 1;
2864 }
2865
2866 details.check_mapping = even_cows? NULL: mapping;
2867 details.nonlinear_vma = NULL;
2868 details.first_index = hba;
2869 details.last_index = hba + hlen - 1;
2870 if (details.last_index < details.first_index)
2871 details.last_index = ULONG_MAX;
2872
2873
2874 mutex_lock(&mapping->i_mmap_mutex);
2875 if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
2876 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2877 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2878 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2879 mutex_unlock(&mapping->i_mmap_mutex);
2880}
2881EXPORT_SYMBOL(unmap_mapping_range);
2882
2883/*
2884 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2885 * but allow concurrent faults), and pte mapped but not yet locked.
2886 * We return with mmap_sem still held, but pte unmapped and unlocked.
2887 */
2888static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2889 unsigned long address, pte_t *page_table, pmd_t *pmd,
2890 unsigned int flags, pte_t orig_pte)
2891{
2892 spinlock_t *ptl;
2893 struct page *page, *swapcache = NULL;
2894 swp_entry_t entry;
2895 pte_t pte;
2896 int locked;
2897 struct mem_cgroup *ptr;
2898 int exclusive = 0;
2899 int ret = 0;
2900
2901 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2902 goto out;
2903
2904 entry = pte_to_swp_entry(orig_pte);
2905 if (unlikely(non_swap_entry(entry))) {
2906 if (is_migration_entry(entry)) {
2907 migration_entry_wait(mm, pmd, address);
2908 } else if (is_hwpoison_entry(entry)) {
2909 ret = VM_FAULT_HWPOISON;
2910 } else {
2911 print_bad_pte(vma, address, orig_pte, NULL);
2912 ret = VM_FAULT_SIGBUS;
2913 }
2914 goto out;
2915 }
2916 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2917 page = lookup_swap_cache(entry);
2918 if (!page) {
2919 page = swapin_readahead(entry,
2920 GFP_HIGHUSER_MOVABLE, vma, address);
2921 if (!page) {
2922 /*
2923 * Back out if somebody else faulted in this pte
2924 * while we released the pte lock.
2925 */
2926 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2927 if (likely(pte_same(*page_table, orig_pte)))
2928 ret = VM_FAULT_OOM;
2929 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2930 goto unlock;
2931 }
2932
2933 /* Had to read the page from swap area: Major fault */
2934 ret = VM_FAULT_MAJOR;
2935 count_vm_event(PGMAJFAULT);
2936 mem_cgroup_count_vm_event(mm, PGMAJFAULT);
2937 } else if (PageHWPoison(page)) {
2938 /*
2939 * hwpoisoned dirty swapcache pages are kept for killing
2940 * owner processes (which may be unknown at hwpoison time)
2941 */
2942 ret = VM_FAULT_HWPOISON;
2943 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2944 goto out_release;
2945 }
2946
2947 locked = lock_page_or_retry(page, mm, flags);
2948
2949 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2950 if (!locked) {
2951 ret |= VM_FAULT_RETRY;
2952 goto out_release;
2953 }
2954
2955 /*
2956 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2957 * release the swapcache from under us. The page pin, and pte_same
2958 * test below, are not enough to exclude that. Even if it is still
2959 * swapcache, we need to check that the page's swap has not changed.
2960 */
2961 if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
2962 goto out_page;
2963
2964 if (ksm_might_need_to_copy(page, vma, address)) {
2965 swapcache = page;
2966 page = ksm_does_need_to_copy(page, vma, address);
2967
2968 if (unlikely(!page)) {
2969 ret = VM_FAULT_OOM;
2970 page = swapcache;
2971 swapcache = NULL;
2972 goto out_page;
2973 }
2974 }
2975
2976 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2977 ret = VM_FAULT_OOM;
2978 goto out_page;
2979 }
2980
2981 /*
2982 * Back out if somebody else already faulted in this pte.
2983 */
2984 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2985 if (unlikely(!pte_same(*page_table, orig_pte)))
2986 goto out_nomap;
2987
2988 if (unlikely(!PageUptodate(page))) {
2989 ret = VM_FAULT_SIGBUS;
2990 goto out_nomap;
2991 }
2992
2993 /*
2994 * The page isn't present yet, go ahead with the fault.
2995 *
2996 * Be careful about the sequence of operations here.
2997 * To get its accounting right, reuse_swap_page() must be called
2998 * while the page is counted on swap but not yet in mapcount i.e.
2999 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3000 * must be called after the swap_free(), or it will never succeed.
3001 * Because delete_from_swap_page() may be called by reuse_swap_page(),
3002 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
3003 * in page->private. In this case, a record in swap_cgroup is silently
3004 * discarded at swap_free().
3005 */
3006
3007 inc_mm_counter_fast(mm, MM_ANONPAGES);
3008 dec_mm_counter_fast(mm, MM_SWAPENTS);
3009 pte = mk_pte(page, vma->vm_page_prot);
3010 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
3011 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3012 flags &= ~FAULT_FLAG_WRITE;
3013 ret |= VM_FAULT_WRITE;
3014 exclusive = 1;
3015 }
3016 flush_icache_page(vma, page);
3017 set_pte_at(mm, address, page_table, pte);
3018 do_page_add_anon_rmap(page, vma, address, exclusive);
3019 /* It's better to call commit-charge after rmap is established */
3020 mem_cgroup_commit_charge_swapin(page, ptr);
3021
3022 swap_free(entry);
3023 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3024 try_to_free_swap(page);
3025 unlock_page(page);
3026 if (swapcache) {
3027 /*
3028 * Hold the lock to avoid the swap entry to be reused
3029 * until we take the PT lock for the pte_same() check
3030 * (to avoid false positives from pte_same). For
3031 * further safety release the lock after the swap_free
3032 * so that the swap count won't change under a
3033 * parallel locked swapcache.
3034 */
3035 unlock_page(swapcache);
3036 page_cache_release(swapcache);
3037 }
3038
3039 if (flags & FAULT_FLAG_WRITE) {
3040 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
3041 if (ret & VM_FAULT_ERROR)
3042 ret &= VM_FAULT_ERROR;
3043 goto out;
3044 }
3045
3046 /* No need to invalidate - it was non-present before */
3047 update_mmu_cache(vma, address, page_table);
3048unlock:
3049 pte_unmap_unlock(page_table, ptl);
3050out:
3051 return ret;
3052out_nomap:
3053 mem_cgroup_cancel_charge_swapin(ptr);
3054 pte_unmap_unlock(page_table, ptl);
3055out_page:
3056 unlock_page(page);
3057out_release:
3058 page_cache_release(page);
3059 if (swapcache) {
3060 unlock_page(swapcache);
3061 page_cache_release(swapcache);
3062 }
3063 return ret;
3064}
3065
3066/*
3067 * This is like a special single-page "expand_{down|up}wards()",
3068 * except we must first make sure that 'address{-|+}PAGE_SIZE'
3069 * doesn't hit another vma.
3070 */
3071static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
3072{
3073 address &= PAGE_MASK;
3074 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
3075 struct vm_area_struct *prev = vma->vm_prev;
3076
3077 /*
3078 * Is there a mapping abutting this one below?
3079 *
3080 * That's only ok if it's the same stack mapping
3081 * that has gotten split..
3082 */
3083 if (prev && prev->vm_end == address)
3084 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
3085
3086 expand_downwards(vma, address - PAGE_SIZE);
3087 }
3088 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
3089 struct vm_area_struct *next = vma->vm_next;
3090
3091 /* As VM_GROWSDOWN but s/below/above/ */
3092 if (next && next->vm_start == address + PAGE_SIZE)
3093 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
3094
3095 expand_upwards(vma, address + PAGE_SIZE);
3096 }
3097 return 0;
3098}
3099
3100/*
3101 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3102 * but allow concurrent faults), and pte mapped but not yet locked.
3103 * We return with mmap_sem still held, but pte unmapped and unlocked.
3104 */
3105static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
3106 unsigned long address, pte_t *page_table, pmd_t *pmd,
3107 unsigned int flags)
3108{
3109 struct page *page;
3110 spinlock_t *ptl;
3111 pte_t entry;
3112
3113 pte_unmap(page_table);
3114
3115 /* Check if we need to add a guard page to the stack */
3116 if (check_stack_guard_page(vma, address) < 0)
3117 return VM_FAULT_SIGBUS;
3118
3119 /* Use the zero-page for reads */
3120 if (!(flags & FAULT_FLAG_WRITE)) {
3121 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
3122 vma->vm_page_prot));
3123 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
3124 if (!pte_none(*page_table))
3125 goto unlock;
3126 goto setpte;
3127 }
3128
3129 /* Allocate our own private page. */
3130 if (unlikely(anon_vma_prepare(vma)))
3131 goto oom;
3132 page = alloc_zeroed_user_highpage_movable(vma, address);
3133 if (!page)
3134 goto oom;
3135 __SetPageUptodate(page);
3136
3137 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
3138 goto oom_free_page;
3139
3140 entry = mk_pte(page, vma->vm_page_prot);
3141 if (vma->vm_flags & VM_WRITE)
3142 entry = pte_mkwrite(pte_mkdirty(entry));
3143
3144 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
3145 if (!pte_none(*page_table))
3146 goto release;
3147
3148 inc_mm_counter_fast(mm, MM_ANONPAGES);
3149 page_add_new_anon_rmap(page, vma, address);
3150setpte:
3151 set_pte_at(mm, address, page_table, entry);
3152
3153 /* No need to invalidate - it was non-present before */
3154 update_mmu_cache(vma, address, page_table);
3155unlock:
3156 pte_unmap_unlock(page_table, ptl);
3157 return 0;
3158release:
3159 mem_cgroup_uncharge_page(page);
3160 page_cache_release(page);
3161 goto unlock;
3162oom_free_page:
3163 page_cache_release(page);
3164oom:
3165 return VM_FAULT_OOM;
3166}
3167
3168/*
3169 * __do_fault() tries to create a new page mapping. It aggressively
3170 * tries to share with existing pages, but makes a separate copy if
3171 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
3172 * the next page fault.
3173 *
3174 * As this is called only for pages that do not currently exist, we
3175 * do not need to flush old virtual caches or the TLB.
3176 *
3177 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3178 * but allow concurrent faults), and pte neither mapped nor locked.
3179 * We return with mmap_sem still held, but pte unmapped and unlocked.
3180 */
3181static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3182 unsigned long address, pmd_t *pmd,
3183 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
3184{
3185 pte_t *page_table;
3186 spinlock_t *ptl;
3187 struct page *page;
3188 struct page *cow_page;
3189 pte_t entry;
3190 int anon = 0;
3191 struct page *dirty_page = NULL;
3192 struct vm_fault vmf;
3193 int ret;
3194 int page_mkwrite = 0;
3195
3196 /*
3197 * If we do COW later, allocate page befor taking lock_page()
3198 * on the file cache page. This will reduce lock holding time.
3199 */
3200 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3201
3202 if (unlikely(anon_vma_prepare(vma)))
3203 return VM_FAULT_OOM;
3204
3205 cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
3206 if (!cow_page)
3207 return VM_FAULT_OOM;
3208
3209 if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) {
3210 page_cache_release(cow_page);
3211 return VM_FAULT_OOM;
3212 }
3213 } else
3214 cow_page = NULL;
3215
3216 vmf.virtual_address = (void __user *)(address & PAGE_MASK);
3217 vmf.pgoff = pgoff;
3218 vmf.flags = flags;
3219 vmf.page = NULL;
3220
3221 ret = vma->vm_ops->fault(vma, &vmf);
3222 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
3223 VM_FAULT_RETRY)))
3224 goto uncharge_out;
3225
3226 if (unlikely(PageHWPoison(vmf.page))) {
3227 if (ret & VM_FAULT_LOCKED)
3228 unlock_page(vmf.page);
3229 ret = VM_FAULT_HWPOISON;
3230 goto uncharge_out;
3231 }
3232
3233 /*
3234 * For consistency in subsequent calls, make the faulted page always
3235 * locked.
3236 */
3237 if (unlikely(!(ret & VM_FAULT_LOCKED)))
3238 lock_page(vmf.page);
3239 else
3240 VM_BUG_ON(!PageLocked(vmf.page));
3241
3242 /*
3243 * Should we do an early C-O-W break?
3244 */
3245 page = vmf.page;
3246 if (flags & FAULT_FLAG_WRITE) {
3247 if (!(vma->vm_flags & VM_SHARED)) {
3248 page = cow_page;
3249 anon = 1;
3250 copy_user_highpage(page, vmf.page, address, vma);
3251 __SetPageUptodate(page);
3252 } else {
3253 /*
3254 * If the page will be shareable, see if the backing
3255 * address space wants to know that the page is about
3256 * to become writable
3257 */
3258 if (vma->vm_ops->page_mkwrite) {
3259 int tmp;
3260
3261 unlock_page(page);
3262 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3263 tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
3264 if (unlikely(tmp &
3265 (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3266 ret = tmp;
3267 goto unwritable_page;
3268 }
3269 if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
3270 lock_page(page);
3271 if (!page->mapping) {
3272 ret = 0; /* retry the fault */
3273 unlock_page(page);
3274 goto unwritable_page;
3275 }
3276 } else
3277 VM_BUG_ON(!PageLocked(page));
3278 page_mkwrite = 1;
3279 }
3280 }
3281
3282 }
3283
3284 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
3285
3286 /*
3287 * This silly early PAGE_DIRTY setting removes a race
3288 * due to the bad i386 page protection. But it's valid
3289 * for other architectures too.
3290 *
3291 * Note that if FAULT_FLAG_WRITE is set, we either now have
3292 * an exclusive copy of the page, or this is a shared mapping,
3293 * so we can make it writable and dirty to avoid having to
3294 * handle that later.
3295 */
3296 /* Only go through if we didn't race with anybody else... */
3297 if (likely(pte_same(*page_table, orig_pte))) {
3298 flush_icache_page(vma, page);
3299 entry = mk_pte(page, vma->vm_page_prot);
3300 if (flags & FAULT_FLAG_WRITE)
3301 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3302 if (anon) {
3303 inc_mm_counter_fast(mm, MM_ANONPAGES);
3304 page_add_new_anon_rmap(page, vma, address);
3305 } else {
3306 inc_mm_counter_fast(mm, MM_FILEPAGES);
3307 page_add_file_rmap(page);
3308 if (flags & FAULT_FLAG_WRITE) {
3309 dirty_page = page;
3310 get_page(dirty_page);
3311 }
3312 }
3313 set_pte_at(mm, address, page_table, entry);
3314
3315 /* no need to invalidate: a not-present page won't be cached */
3316 update_mmu_cache(vma, address, page_table);
3317 } else {
3318 if (cow_page)
3319 mem_cgroup_uncharge_page(cow_page);
3320 if (anon)
3321 page_cache_release(page);
3322 else
3323 anon = 1; /* no anon but release faulted_page */
3324 }
3325
3326 pte_unmap_unlock(page_table, ptl);
3327
3328 if (dirty_page) {
3329 struct address_space *mapping = page->mapping;
3330
3331 if (set_page_dirty(dirty_page))
3332 page_mkwrite = 1;
3333 unlock_page(dirty_page);
3334 put_page(dirty_page);
3335 if (page_mkwrite && mapping) {
3336 /*
3337 * Some device drivers do not set page.mapping but still
3338 * dirty their pages
3339 */
3340 balance_dirty_pages_ratelimited(mapping);
3341 }
3342
3343 /* file_update_time outside page_lock */
3344 if (vma->vm_file)
3345 file_update_time(vma->vm_file);
3346 } else {
3347 unlock_page(vmf.page);
3348 if (anon)
3349 page_cache_release(vmf.page);
3350 }
3351
3352 return ret;
3353
3354unwritable_page:
3355 page_cache_release(page);
3356 return ret;
3357uncharge_out:
3358 /* fs's fault handler get error */
3359 if (cow_page) {
3360 mem_cgroup_uncharge_page(cow_page);
3361 page_cache_release(cow_page);
3362 }
3363 return ret;
3364}
3365
3366static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3367 unsigned long address, pte_t *page_table, pmd_t *pmd,
3368 unsigned int flags, pte_t orig_pte)
3369{
3370 pgoff_t pgoff = (((address & PAGE_MASK)
3371 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3372
3373 pte_unmap(page_table);
3374 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
3375}
3376
3377/*
3378 * Fault of a previously existing named mapping. Repopulate the pte
3379 * from the encoded file_pte if possible. This enables swappable
3380 * nonlinear vmas.
3381 *
3382 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3383 * but allow concurrent faults), and pte mapped but not yet locked.
3384 * We return with mmap_sem still held, but pte unmapped and unlocked.
3385 */
3386static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3387 unsigned long address, pte_t *page_table, pmd_t *pmd,
3388 unsigned int flags, pte_t orig_pte)
3389{
3390 pgoff_t pgoff;
3391
3392 flags |= FAULT_FLAG_NONLINEAR;
3393
3394 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
3395 return 0;
3396
3397 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
3398 /*
3399 * Page table corrupted: show pte and kill process.
3400 */
3401 print_bad_pte(vma, address, orig_pte, NULL);
3402 return VM_FAULT_SIGBUS;
3403 }
3404
3405 pgoff = pte_to_pgoff(orig_pte);
3406 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
3407}
3408
3409/*
3410 * These routines also need to handle stuff like marking pages dirty
3411 * and/or accessed for architectures that don't do it in hardware (most
3412 * RISC architectures). The early dirtying is also good on the i386.
3413 *
3414 * There is also a hook called "update_mmu_cache()" that architectures
3415 * with external mmu caches can use to update those (ie the Sparc or
3416 * PowerPC hashed page tables that act as extended TLBs).
3417 *
3418 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3419 * but allow concurrent faults), and pte mapped but not yet locked.
3420 * We return with mmap_sem still held, but pte unmapped and unlocked.
3421 */
3422int handle_pte_fault(struct mm_struct *mm,
3423 struct vm_area_struct *vma, unsigned long address,
3424 pte_t *pte, pmd_t *pmd, unsigned int flags)
3425{
3426 pte_t entry;
3427 spinlock_t *ptl;
3428
3429 entry = *pte;
3430 if (!pte_present(entry)) {
3431 if (pte_none(entry)) {
3432 if (vma->vm_ops) {
3433 if (likely(vma->vm_ops->fault))
3434 return do_linear_fault(mm, vma, address,
3435 pte, pmd, flags, entry);
3436 }
3437 return do_anonymous_page(mm, vma, address,
3438 pte, pmd, flags);
3439 }
3440 if (pte_file(entry))
3441 return do_nonlinear_fault(mm, vma, address,
3442 pte, pmd, flags, entry);
3443 return do_swap_page(mm, vma, address,
3444 pte, pmd, flags, entry);
3445 }
3446
3447 ptl = pte_lockptr(mm, pmd);
3448 spin_lock(ptl);
3449 if (unlikely(!pte_same(*pte, entry)))
3450 goto unlock;
3451 if (flags & FAULT_FLAG_WRITE) {
3452 if (!pte_write(entry))
3453 return do_wp_page(mm, vma, address,
3454 pte, pmd, ptl, entry);
3455 entry = pte_mkdirty(entry);
3456 }
3457 entry = pte_mkyoung(entry);
3458 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
3459 update_mmu_cache(vma, address, pte);
3460 } else {
3461 /*
3462 * This is needed only for protection faults but the arch code
3463 * is not yet telling us if this is a protection fault or not.
3464 * This still avoids useless tlb flushes for .text page faults
3465 * with threads.
3466 */
3467 if (flags & FAULT_FLAG_WRITE)
3468 flush_tlb_fix_spurious_fault(vma, address);
3469 }
3470unlock:
3471 pte_unmap_unlock(pte, ptl);
3472 return 0;
3473}
3474
3475/*
3476 * By the time we get here, we already hold the mm semaphore
3477 */
3478int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3479 unsigned long address, unsigned int flags)
3480{
3481 pgd_t *pgd;
3482 pud_t *pud;
3483 pmd_t *pmd;
3484 pte_t *pte;
3485
3486 __set_current_state(TASK_RUNNING);
3487
3488 count_vm_event(PGFAULT);
3489 mem_cgroup_count_vm_event(mm, PGFAULT);
3490
3491 /* do counter updates before entering really critical section. */
3492 check_sync_rss_stat(current);
3493
3494 if (unlikely(is_vm_hugetlb_page(vma)))
3495 return hugetlb_fault(mm, vma, address, flags);
3496
3497retry:
3498 pgd = pgd_offset(mm, address);
3499 pud = pud_alloc(mm, pgd, address);
3500 if (!pud)
3501 return VM_FAULT_OOM;
3502 pmd = pmd_alloc(mm, pud, address);
3503 if (!pmd)
3504 return VM_FAULT_OOM;
3505 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
3506 if (!vma->vm_ops)
3507 return do_huge_pmd_anonymous_page(mm, vma, address,
3508 pmd, flags);
3509 } else {
3510 pmd_t orig_pmd = *pmd;
3511 int ret;
3512
3513 barrier();
3514 if (pmd_trans_huge(orig_pmd)) {
3515 if (flags & FAULT_FLAG_WRITE &&
3516 !pmd_write(orig_pmd) &&
3517 !pmd_trans_splitting(orig_pmd)) {
3518 ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
3519 orig_pmd);
3520 /*
3521 * If COW results in an oom, the huge pmd will
3522 * have been split, so retry the fault on the
3523 * pte for a smaller charge.
3524 */
3525 if (unlikely(ret & VM_FAULT_OOM))
3526 goto retry;
3527 return ret;
3528 }
3529 return 0;
3530 }
3531 }
3532
3533 /*
3534 * Use __pte_alloc instead of pte_alloc_map, because we can't
3535 * run pte_offset_map on the pmd, if an huge pmd could
3536 * materialize from under us from a different thread.
3537 */
3538 if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
3539 return VM_FAULT_OOM;
3540 /* if an huge pmd materialized from under us just retry later */
3541 if (unlikely(pmd_trans_huge(*pmd)))
3542 return 0;
3543 /*
3544 * A regular pmd is established and it can't morph into a huge pmd
3545 * from under us anymore at this point because we hold the mmap_sem
3546 * read mode and khugepaged takes it in write mode. So now it's
3547 * safe to run pte_offset_map().
3548 */
3549 pte = pte_offset_map(pmd, address);
3550
3551 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
3552}
3553
3554#ifndef __PAGETABLE_PUD_FOLDED
3555/*
3556 * Allocate page upper directory.
3557 * We've already handled the fast-path in-line.
3558 */
3559int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
3560{
3561 pud_t *new = pud_alloc_one(mm, address);
3562 if (!new)
3563 return -ENOMEM;
3564
3565 smp_wmb(); /* See comment in __pte_alloc */
3566
3567 spin_lock(&mm->page_table_lock);
3568 if (pgd_present(*pgd)) /* Another has populated it */
3569 pud_free(mm, new);
3570 else
3571 pgd_populate(mm, pgd, new);
3572 spin_unlock(&mm->page_table_lock);
3573 return 0;
3574}
3575#endif /* __PAGETABLE_PUD_FOLDED */
3576
3577#ifndef __PAGETABLE_PMD_FOLDED
3578/*
3579 * Allocate page middle directory.
3580 * We've already handled the fast-path in-line.
3581 */
3582int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3583{
3584 pmd_t *new = pmd_alloc_one(mm, address);
3585 if (!new)
3586 return -ENOMEM;
3587
3588 smp_wmb(); /* See comment in __pte_alloc */
3589
3590 spin_lock(&mm->page_table_lock);
3591#ifndef __ARCH_HAS_4LEVEL_HACK
3592 if (pud_present(*pud)) /* Another has populated it */
3593 pmd_free(mm, new);
3594 else
3595 pud_populate(mm, pud, new);
3596#else
3597 if (pgd_present(*pud)) /* Another has populated it */
3598 pmd_free(mm, new);
3599 else
3600 pgd_populate(mm, pud, new);
3601#endif /* __ARCH_HAS_4LEVEL_HACK */
3602 spin_unlock(&mm->page_table_lock);
3603 return 0;
3604}
3605#endif /* __PAGETABLE_PMD_FOLDED */
3606
3607int make_pages_present(unsigned long addr, unsigned long end)
3608{
3609 int ret, len, write;
3610 struct vm_area_struct * vma;
3611
3612 vma = find_vma(current->mm, addr);
3613 if (!vma)
3614 return -ENOMEM;
3615 /*
3616 * We want to touch writable mappings with a write fault in order
3617 * to break COW, except for shared mappings because these don't COW
3618 * and we would not want to dirty them for nothing.
3619 */
3620 write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
3621 BUG_ON(addr >= end);
3622 BUG_ON(end > vma->vm_end);
3623 len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
3624 ret = get_user_pages(current, current->mm, addr,
3625 len, write, 0, NULL, NULL);
3626 if (ret < 0)
3627 return ret;
3628 return ret == len ? 0 : -EFAULT;
3629}
3630
3631#if !defined(__HAVE_ARCH_GATE_AREA)
3632
3633#if defined(AT_SYSINFO_EHDR)
3634static struct vm_area_struct gate_vma;
3635
3636static int __init gate_vma_init(void)
3637{
3638 gate_vma.vm_mm = NULL;
3639 gate_vma.vm_start = FIXADDR_USER_START;
3640 gate_vma.vm_end = FIXADDR_USER_END;
3641 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
3642 gate_vma.vm_page_prot = __P101;
3643
3644 return 0;
3645}
3646__initcall(gate_vma_init);
3647#endif
3648
3649struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3650{
3651#ifdef AT_SYSINFO_EHDR
3652 return &gate_vma;
3653#else
3654 return NULL;
3655#endif
3656}
3657
3658int in_gate_area_no_mm(unsigned long addr)
3659{
3660#ifdef AT_SYSINFO_EHDR
3661 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
3662 return 1;
3663#endif
3664 return 0;
3665}
3666
3667#endif /* __HAVE_ARCH_GATE_AREA */
3668
3669static int __follow_pte(struct mm_struct *mm, unsigned long address,
3670 pte_t **ptepp, spinlock_t **ptlp)
3671{
3672 pgd_t *pgd;
3673 pud_t *pud;
3674 pmd_t *pmd;
3675 pte_t *ptep;
3676
3677 pgd = pgd_offset(mm, address);
3678 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
3679 goto out;
3680
3681 pud = pud_offset(pgd, address);
3682 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
3683 goto out;
3684
3685 pmd = pmd_offset(pud, address);
3686 VM_BUG_ON(pmd_trans_huge(*pmd));
3687 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
3688 goto out;
3689
3690 /* We cannot handle huge page PFN maps. Luckily they don't exist. */
3691 if (pmd_huge(*pmd))
3692 goto out;
3693
3694 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3695 if (!ptep)
3696 goto out;
3697 if (!pte_present(*ptep))
3698 goto unlock;
3699 *ptepp = ptep;
3700 return 0;
3701unlock:
3702 pte_unmap_unlock(ptep, *ptlp);
3703out:
3704 return -EINVAL;
3705}
3706
3707static inline int follow_pte(struct mm_struct *mm, unsigned long address,
3708 pte_t **ptepp, spinlock_t **ptlp)
3709{
3710 int res;
3711
3712 /* (void) is needed to make gcc happy */
3713 (void) __cond_lock(*ptlp,
3714 !(res = __follow_pte(mm, address, ptepp, ptlp)));
3715 return res;
3716}
3717
3718/**
3719 * follow_pfn - look up PFN at a user virtual address
3720 * @vma: memory mapping
3721 * @address: user virtual address
3722 * @pfn: location to store found PFN
3723 *
3724 * Only IO mappings and raw PFN mappings are allowed.
3725 *
3726 * Returns zero and the pfn at @pfn on success, -ve otherwise.
3727 */
3728int follow_pfn(struct vm_area_struct *vma, unsigned long address,
3729 unsigned long *pfn)
3730{
3731 int ret = -EINVAL;
3732 spinlock_t *ptl;
3733 pte_t *ptep;
3734
3735 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3736 return ret;
3737
3738 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3739 if (ret)
3740 return ret;
3741 *pfn = pte_pfn(*ptep);
3742 pte_unmap_unlock(ptep, ptl);
3743 return 0;
3744}
3745EXPORT_SYMBOL(follow_pfn);
3746
3747#ifdef CONFIG_HAVE_IOREMAP_PROT
3748int follow_phys(struct vm_area_struct *vma,
3749 unsigned long address, unsigned int flags,
3750 unsigned long *prot, resource_size_t *phys)
3751{
3752 int ret = -EINVAL;
3753 pte_t *ptep, pte;
3754 spinlock_t *ptl;
3755
3756 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3757 goto out;
3758
3759 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
3760 goto out;
3761 pte = *ptep;
3762
3763 if ((flags & FOLL_WRITE) && !pte_write(pte))
3764 goto unlock;
3765
3766 *prot = pgprot_val(pte_pgprot(pte));
3767 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
3768
3769 ret = 0;
3770unlock:
3771 pte_unmap_unlock(ptep, ptl);
3772out:
3773 return ret;
3774}
3775
3776int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3777 void *buf, int len, int write)
3778{
3779 resource_size_t phys_addr;
3780 unsigned long prot = 0;
3781 void __iomem *maddr;
3782 int offset = addr & (PAGE_SIZE-1);
3783
3784 if (follow_phys(vma, addr, write, &prot, &phys_addr))
3785 return -EINVAL;
3786
3787 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
3788 if (write)
3789 memcpy_toio(maddr + offset, buf, len);
3790 else
3791 memcpy_fromio(buf, maddr + offset, len);
3792 iounmap(maddr);
3793
3794 return len;
3795}
3796#endif
3797
3798/*
3799 * Access another process' address space as given in mm. If non-NULL, use the
3800 * given task for page fault accounting.
3801 */
3802static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3803 unsigned long addr, void *buf, int len, int write)
3804{
3805 struct vm_area_struct *vma;
3806 void *old_buf = buf;
3807
3808 down_read(&mm->mmap_sem);
3809 /* ignore errors, just check how much was successfully transferred */
3810 while (len) {
3811 int bytes, ret, offset;
3812 void *maddr;
3813 struct page *page = NULL;
3814
3815 ret = get_user_pages(tsk, mm, addr, 1,
3816 write, 1, &page, &vma);
3817 if (ret <= 0) {
3818 /*
3819 * Check if this is a VM_IO | VM_PFNMAP VMA, which
3820 * we can access using slightly different code.
3821 */
3822#ifdef CONFIG_HAVE_IOREMAP_PROT
3823 vma = find_vma(mm, addr);
3824 if (!vma || vma->vm_start > addr)
3825 break;
3826 if (vma->vm_ops && vma->vm_ops->access)
3827 ret = vma->vm_ops->access(vma, addr, buf,
3828 len, write);
3829 if (ret <= 0)
3830#endif
3831 break;
3832 bytes = ret;
3833 } else {
3834 bytes = len;
3835 offset = addr & (PAGE_SIZE-1);
3836 if (bytes > PAGE_SIZE-offset)
3837 bytes = PAGE_SIZE-offset;
3838
3839 maddr = kmap(page);
3840 if (write) {
3841 copy_to_user_page(vma, page, addr,
3842 maddr + offset, buf, bytes);
3843 set_page_dirty_lock(page);
3844 } else {
3845 copy_from_user_page(vma, page, addr,
3846 buf, maddr + offset, bytes);
3847 }
3848 kunmap(page);
3849 page_cache_release(page);
3850 }
3851 len -= bytes;
3852 buf += bytes;
3853 addr += bytes;
3854 }
3855 up_read(&mm->mmap_sem);
3856
3857 return buf - old_buf;
3858}
3859
3860/**
3861 * access_remote_vm - access another process' address space
3862 * @mm: the mm_struct of the target address space
3863 * @addr: start address to access
3864 * @buf: source or destination buffer
3865 * @len: number of bytes to transfer
3866 * @write: whether the access is a write
3867 *
3868 * The caller must hold a reference on @mm.
3869 */
3870int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3871 void *buf, int len, int write)
3872{
3873 return __access_remote_vm(NULL, mm, addr, buf, len, write);
3874}
3875
3876/*
3877 * Access another process' address space.
3878 * Source/target buffer must be kernel space,
3879 * Do not walk the page table directly, use get_user_pages
3880 */
3881int access_process_vm(struct task_struct *tsk, unsigned long addr,
3882 void *buf, int len, int write)
3883{
3884 struct mm_struct *mm;
3885 int ret;
3886
3887 mm = get_task_mm(tsk);
3888 if (!mm)
3889 return 0;
3890
3891 ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
3892 mmput(mm);
3893
3894 return ret;
3895}
3896
3897/*
3898 * Print the name of a VMA.
3899 */
3900void print_vma_addr(char *prefix, unsigned long ip)
3901{
3902 struct mm_struct *mm = current->mm;
3903 struct vm_area_struct *vma;
3904
3905 /*
3906 * Do not print if we are in atomic
3907 * contexts (in exception stacks, etc.):
3908 */
3909 if (preempt_count())
3910 return;
3911
3912 down_read(&mm->mmap_sem);
3913 vma = find_vma(mm, ip);
3914 if (vma && vma->vm_file) {
3915 struct file *f = vma->vm_file;
3916 char *buf = (char *)__get_free_page(GFP_KERNEL);
3917 if (buf) {
3918 char *p, *s;
3919
3920 p = d_path(&f->f_path, buf, PAGE_SIZE);
3921 if (IS_ERR(p))
3922 p = "?";
3923 s = strrchr(p, '/');
3924 if (s)
3925 p = s+1;
3926 printk("%s%s[%lx+%lx]", prefix, p,
3927 vma->vm_start,
3928 vma->vm_end - vma->vm_start);
3929 free_page((unsigned long)buf);
3930 }
3931 }
3932 up_read(¤t->mm->mmap_sem);
3933}
3934
3935#ifdef CONFIG_PROVE_LOCKING
3936void might_fault(void)
3937{
3938 /*
3939 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
3940 * holding the mmap_sem, this is safe because kernel memory doesn't
3941 * get paged out, therefore we'll never actually fault, and the
3942 * below annotations will generate false positives.
3943 */
3944 if (segment_eq(get_fs(), KERNEL_DS))
3945 return;
3946
3947 might_sleep();
3948 /*
3949 * it would be nicer only to annotate paths which are not under
3950 * pagefault_disable, however that requires a larger audit and
3951 * providing helpers like get_user_atomic.
3952 */
3953 if (!in_atomic() && current->mm)
3954 might_lock_read(¤t->mm->mmap_sem);
3955}
3956EXPORT_SYMBOL(might_fault);
3957#endif
3958
3959#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
3960static void clear_gigantic_page(struct page *page,
3961 unsigned long addr,
3962 unsigned int pages_per_huge_page)
3963{
3964 int i;
3965 struct page *p = page;
3966
3967 might_sleep();
3968 for (i = 0; i < pages_per_huge_page;
3969 i++, p = mem_map_next(p, page, i)) {
3970 cond_resched();
3971 clear_user_highpage(p, addr + i * PAGE_SIZE);
3972 }
3973}
3974void clear_huge_page(struct page *page,
3975 unsigned long addr, unsigned int pages_per_huge_page)
3976{
3977 int i;
3978
3979 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
3980 clear_gigantic_page(page, addr, pages_per_huge_page);
3981 return;
3982 }
3983
3984 might_sleep();
3985 for (i = 0; i < pages_per_huge_page; i++) {
3986 cond_resched();
3987 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
3988 }
3989}
3990
3991static void copy_user_gigantic_page(struct page *dst, struct page *src,
3992 unsigned long addr,
3993 struct vm_area_struct *vma,
3994 unsigned int pages_per_huge_page)
3995{
3996 int i;
3997 struct page *dst_base = dst;
3998 struct page *src_base = src;
3999
4000 for (i = 0; i < pages_per_huge_page; ) {
4001 cond_resched();
4002 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
4003
4004 i++;
4005 dst = mem_map_next(dst, dst_base, i);
4006 src = mem_map_next(src, src_base, i);
4007 }
4008}
4009
4010void copy_user_huge_page(struct page *dst, struct page *src,
4011 unsigned long addr, struct vm_area_struct *vma,
4012 unsigned int pages_per_huge_page)
4013{
4014 int i;
4015
4016 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
4017 copy_user_gigantic_page(dst, src, addr, vma,
4018 pages_per_huge_page);
4019 return;
4020 }
4021
4022 might_sleep();
4023 for (i = 0; i < pages_per_huge_page; i++) {
4024 cond_resched();
4025 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
4026 }
4027}
4028#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */