Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/backing-dev.h>
15#include <linux/mm.h>
16#include <linux/mm_inline.h>
17#include <linux/shm.h>
18#include <linux/mman.h>
19#include <linux/pagemap.h>
20#include <linux/swap.h>
21#include <linux/syscalls.h>
22#include <linux/capability.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/fs.h>
26#include <linux/personality.h>
27#include <linux/security.h>
28#include <linux/hugetlb.h>
29#include <linux/shmem_fs.h>
30#include <linux/profile.h>
31#include <linux/export.h>
32#include <linux/mount.h>
33#include <linux/mempolicy.h>
34#include <linux/rmap.h>
35#include <linux/mmu_notifier.h>
36#include <linux/mmdebug.h>
37#include <linux/perf_event.h>
38#include <linux/audit.h>
39#include <linux/khugepaged.h>
40#include <linux/uprobes.h>
41#include <linux/notifier.h>
42#include <linux/memory.h>
43#include <linux/printk.h>
44#include <linux/userfaultfd_k.h>
45#include <linux/moduleparam.h>
46#include <linux/pkeys.h>
47#include <linux/oom.h>
48#include <linux/sched/mm.h>
49#include <linux/ksm.h>
50
51#include <linux/uaccess.h>
52#include <asm/cacheflush.h>
53#include <asm/tlb.h>
54#include <asm/mmu_context.h>
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/mmap.h>
58
59#include "internal.h"
60
61#ifndef arch_mmap_check
62#define arch_mmap_check(addr, len, flags) (0)
63#endif
64
65#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69#endif
70#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74#endif
75
76static bool ignore_rlimit_data;
77core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78
79static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
80 struct vm_area_struct *vma, struct vm_area_struct *prev,
81 struct vm_area_struct *next, unsigned long start,
82 unsigned long end, unsigned long tree_end, bool mm_wr_locked);
83
84static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
85{
86 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
87}
88
89/* Update vma->vm_page_prot to reflect vma->vm_flags. */
90void vma_set_page_prot(struct vm_area_struct *vma)
91{
92 unsigned long vm_flags = vma->vm_flags;
93 pgprot_t vm_page_prot;
94
95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
96 if (vma_wants_writenotify(vma, vm_page_prot)) {
97 vm_flags &= ~VM_SHARED;
98 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
99 }
100 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
102}
103
104/*
105 * Requires inode->i_mapping->i_mmap_rwsem
106 */
107static void __remove_shared_vm_struct(struct vm_area_struct *vma,
108 struct file *file, struct address_space *mapping)
109{
110 if (vma_is_shared_maywrite(vma))
111 mapping_unmap_writable(mapping);
112
113 flush_dcache_mmap_lock(mapping);
114 vma_interval_tree_remove(vma, &mapping->i_mmap);
115 flush_dcache_mmap_unlock(mapping);
116}
117
118/*
119 * Unlink a file-based vm structure from its interval tree, to hide
120 * vma from rmap and vmtruncate before freeing its page tables.
121 */
122void unlink_file_vma(struct vm_area_struct *vma)
123{
124 struct file *file = vma->vm_file;
125
126 if (file) {
127 struct address_space *mapping = file->f_mapping;
128 i_mmap_lock_write(mapping);
129 __remove_shared_vm_struct(vma, file, mapping);
130 i_mmap_unlock_write(mapping);
131 }
132}
133
134/*
135 * Close a vm structure and free it.
136 */
137static void remove_vma(struct vm_area_struct *vma, bool unreachable)
138{
139 might_sleep();
140 if (vma->vm_ops && vma->vm_ops->close)
141 vma->vm_ops->close(vma);
142 if (vma->vm_file)
143 fput(vma->vm_file);
144 mpol_put(vma_policy(vma));
145 if (unreachable)
146 __vm_area_free(vma);
147 else
148 vm_area_free(vma);
149}
150
151static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
152 unsigned long min)
153{
154 return mas_prev(&vmi->mas, min);
155}
156
157/*
158 * check_brk_limits() - Use platform specific check of range & verify mlock
159 * limits.
160 * @addr: The address to check
161 * @len: The size of increase.
162 *
163 * Return: 0 on success.
164 */
165static int check_brk_limits(unsigned long addr, unsigned long len)
166{
167 unsigned long mapped_addr;
168
169 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
170 if (IS_ERR_VALUE(mapped_addr))
171 return mapped_addr;
172
173 return mlock_future_ok(current->mm, current->mm->def_flags, len)
174 ? 0 : -EAGAIN;
175}
176static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
177 unsigned long addr, unsigned long request, unsigned long flags);
178SYSCALL_DEFINE1(brk, unsigned long, brk)
179{
180 unsigned long newbrk, oldbrk, origbrk;
181 struct mm_struct *mm = current->mm;
182 struct vm_area_struct *brkvma, *next = NULL;
183 unsigned long min_brk;
184 bool populate = false;
185 LIST_HEAD(uf);
186 struct vma_iterator vmi;
187
188 if (mmap_write_lock_killable(mm))
189 return -EINTR;
190
191 origbrk = mm->brk;
192
193#ifdef CONFIG_COMPAT_BRK
194 /*
195 * CONFIG_COMPAT_BRK can still be overridden by setting
196 * randomize_va_space to 2, which will still cause mm->start_brk
197 * to be arbitrarily shifted
198 */
199 if (current->brk_randomized)
200 min_brk = mm->start_brk;
201 else
202 min_brk = mm->end_data;
203#else
204 min_brk = mm->start_brk;
205#endif
206 if (brk < min_brk)
207 goto out;
208
209 /*
210 * Check against rlimit here. If this check is done later after the test
211 * of oldbrk with newbrk then it can escape the test and let the data
212 * segment grow beyond its set limit the in case where the limit is
213 * not page aligned -Ram Gupta
214 */
215 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
216 mm->end_data, mm->start_data))
217 goto out;
218
219 newbrk = PAGE_ALIGN(brk);
220 oldbrk = PAGE_ALIGN(mm->brk);
221 if (oldbrk == newbrk) {
222 mm->brk = brk;
223 goto success;
224 }
225
226 /* Always allow shrinking brk. */
227 if (brk <= mm->brk) {
228 /* Search one past newbrk */
229 vma_iter_init(&vmi, mm, newbrk);
230 brkvma = vma_find(&vmi, oldbrk);
231 if (!brkvma || brkvma->vm_start >= oldbrk)
232 goto out; /* mapping intersects with an existing non-brk vma. */
233 /*
234 * mm->brk must be protected by write mmap_lock.
235 * do_vma_munmap() will drop the lock on success, so update it
236 * before calling do_vma_munmap().
237 */
238 mm->brk = brk;
239 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
240 goto out;
241
242 goto success_unlocked;
243 }
244
245 if (check_brk_limits(oldbrk, newbrk - oldbrk))
246 goto out;
247
248 /*
249 * Only check if the next VMA is within the stack_guard_gap of the
250 * expansion area
251 */
252 vma_iter_init(&vmi, mm, oldbrk);
253 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
254 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
255 goto out;
256
257 brkvma = vma_prev_limit(&vmi, mm->start_brk);
258 /* Ok, looks good - let it rip. */
259 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
260 goto out;
261
262 mm->brk = brk;
263 if (mm->def_flags & VM_LOCKED)
264 populate = true;
265
266success:
267 mmap_write_unlock(mm);
268success_unlocked:
269 userfaultfd_unmap_complete(mm, &uf);
270 if (populate)
271 mm_populate(oldbrk, newbrk - oldbrk);
272 return brk;
273
274out:
275 mm->brk = origbrk;
276 mmap_write_unlock(mm);
277 return origbrk;
278}
279
280#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
281static void validate_mm(struct mm_struct *mm)
282{
283 int bug = 0;
284 int i = 0;
285 struct vm_area_struct *vma;
286 VMA_ITERATOR(vmi, mm, 0);
287
288 mt_validate(&mm->mm_mt);
289 for_each_vma(vmi, vma) {
290#ifdef CONFIG_DEBUG_VM_RB
291 struct anon_vma *anon_vma = vma->anon_vma;
292 struct anon_vma_chain *avc;
293#endif
294 unsigned long vmi_start, vmi_end;
295 bool warn = 0;
296
297 vmi_start = vma_iter_addr(&vmi);
298 vmi_end = vma_iter_end(&vmi);
299 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
300 warn = 1;
301
302 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
303 warn = 1;
304
305 if (warn) {
306 pr_emerg("issue in %s\n", current->comm);
307 dump_stack();
308 dump_vma(vma);
309 pr_emerg("tree range: %px start %lx end %lx\n", vma,
310 vmi_start, vmi_end - 1);
311 vma_iter_dump_tree(&vmi);
312 }
313
314#ifdef CONFIG_DEBUG_VM_RB
315 if (anon_vma) {
316 anon_vma_lock_read(anon_vma);
317 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
318 anon_vma_interval_tree_verify(avc);
319 anon_vma_unlock_read(anon_vma);
320 }
321#endif
322 i++;
323 }
324 if (i != mm->map_count) {
325 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
326 bug = 1;
327 }
328 VM_BUG_ON_MM(bug, mm);
329}
330
331#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
332#define validate_mm(mm) do { } while (0)
333#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
334
335/*
336 * vma has some anon_vma assigned, and is already inserted on that
337 * anon_vma's interval trees.
338 *
339 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
340 * vma must be removed from the anon_vma's interval trees using
341 * anon_vma_interval_tree_pre_update_vma().
342 *
343 * After the update, the vma will be reinserted using
344 * anon_vma_interval_tree_post_update_vma().
345 *
346 * The entire update must be protected by exclusive mmap_lock and by
347 * the root anon_vma's mutex.
348 */
349static inline void
350anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
351{
352 struct anon_vma_chain *avc;
353
354 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
355 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
356}
357
358static inline void
359anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
360{
361 struct anon_vma_chain *avc;
362
363 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
364 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
365}
366
367static unsigned long count_vma_pages_range(struct mm_struct *mm,
368 unsigned long addr, unsigned long end)
369{
370 VMA_ITERATOR(vmi, mm, addr);
371 struct vm_area_struct *vma;
372 unsigned long nr_pages = 0;
373
374 for_each_vma_range(vmi, vma, end) {
375 unsigned long vm_start = max(addr, vma->vm_start);
376 unsigned long vm_end = min(end, vma->vm_end);
377
378 nr_pages += PHYS_PFN(vm_end - vm_start);
379 }
380
381 return nr_pages;
382}
383
384static void __vma_link_file(struct vm_area_struct *vma,
385 struct address_space *mapping)
386{
387 if (vma_is_shared_maywrite(vma))
388 mapping_allow_writable(mapping);
389
390 flush_dcache_mmap_lock(mapping);
391 vma_interval_tree_insert(vma, &mapping->i_mmap);
392 flush_dcache_mmap_unlock(mapping);
393}
394
395static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
396{
397 VMA_ITERATOR(vmi, mm, 0);
398 struct address_space *mapping = NULL;
399
400 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
401 if (vma_iter_prealloc(&vmi, vma))
402 return -ENOMEM;
403
404 vma_start_write(vma);
405
406 vma_iter_store(&vmi, vma);
407
408 if (vma->vm_file) {
409 mapping = vma->vm_file->f_mapping;
410 i_mmap_lock_write(mapping);
411 __vma_link_file(vma, mapping);
412 i_mmap_unlock_write(mapping);
413 }
414
415 mm->map_count++;
416 validate_mm(mm);
417 return 0;
418}
419
420/*
421 * init_multi_vma_prep() - Initializer for struct vma_prepare
422 * @vp: The vma_prepare struct
423 * @vma: The vma that will be altered once locked
424 * @next: The next vma if it is to be adjusted
425 * @remove: The first vma to be removed
426 * @remove2: The second vma to be removed
427 */
428static inline void init_multi_vma_prep(struct vma_prepare *vp,
429 struct vm_area_struct *vma, struct vm_area_struct *next,
430 struct vm_area_struct *remove, struct vm_area_struct *remove2)
431{
432 memset(vp, 0, sizeof(struct vma_prepare));
433 vp->vma = vma;
434 vp->anon_vma = vma->anon_vma;
435 vp->remove = remove;
436 vp->remove2 = remove2;
437 vp->adj_next = next;
438 if (!vp->anon_vma && next)
439 vp->anon_vma = next->anon_vma;
440
441 vp->file = vma->vm_file;
442 if (vp->file)
443 vp->mapping = vma->vm_file->f_mapping;
444
445}
446
447/*
448 * init_vma_prep() - Initializer wrapper for vma_prepare struct
449 * @vp: The vma_prepare struct
450 * @vma: The vma that will be altered once locked
451 */
452static inline void init_vma_prep(struct vma_prepare *vp,
453 struct vm_area_struct *vma)
454{
455 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
456}
457
458
459/*
460 * vma_prepare() - Helper function for handling locking VMAs prior to altering
461 * @vp: The initialized vma_prepare struct
462 */
463static inline void vma_prepare(struct vma_prepare *vp)
464{
465 if (vp->file) {
466 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
467
468 if (vp->adj_next)
469 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
470 vp->adj_next->vm_end);
471
472 i_mmap_lock_write(vp->mapping);
473 if (vp->insert && vp->insert->vm_file) {
474 /*
475 * Put into interval tree now, so instantiated pages
476 * are visible to arm/parisc __flush_dcache_page
477 * throughout; but we cannot insert into address
478 * space until vma start or end is updated.
479 */
480 __vma_link_file(vp->insert,
481 vp->insert->vm_file->f_mapping);
482 }
483 }
484
485 if (vp->anon_vma) {
486 anon_vma_lock_write(vp->anon_vma);
487 anon_vma_interval_tree_pre_update_vma(vp->vma);
488 if (vp->adj_next)
489 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
490 }
491
492 if (vp->file) {
493 flush_dcache_mmap_lock(vp->mapping);
494 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
495 if (vp->adj_next)
496 vma_interval_tree_remove(vp->adj_next,
497 &vp->mapping->i_mmap);
498 }
499
500}
501
502/*
503 * vma_complete- Helper function for handling the unlocking after altering VMAs,
504 * or for inserting a VMA.
505 *
506 * @vp: The vma_prepare struct
507 * @vmi: The vma iterator
508 * @mm: The mm_struct
509 */
510static inline void vma_complete(struct vma_prepare *vp,
511 struct vma_iterator *vmi, struct mm_struct *mm)
512{
513 if (vp->file) {
514 if (vp->adj_next)
515 vma_interval_tree_insert(vp->adj_next,
516 &vp->mapping->i_mmap);
517 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
518 flush_dcache_mmap_unlock(vp->mapping);
519 }
520
521 if (vp->remove && vp->file) {
522 __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
523 if (vp->remove2)
524 __remove_shared_vm_struct(vp->remove2, vp->file,
525 vp->mapping);
526 } else if (vp->insert) {
527 /*
528 * split_vma has split insert from vma, and needs
529 * us to insert it before dropping the locks
530 * (it may either follow vma or precede it).
531 */
532 vma_iter_store(vmi, vp->insert);
533 mm->map_count++;
534 }
535
536 if (vp->anon_vma) {
537 anon_vma_interval_tree_post_update_vma(vp->vma);
538 if (vp->adj_next)
539 anon_vma_interval_tree_post_update_vma(vp->adj_next);
540 anon_vma_unlock_write(vp->anon_vma);
541 }
542
543 if (vp->file) {
544 i_mmap_unlock_write(vp->mapping);
545 uprobe_mmap(vp->vma);
546
547 if (vp->adj_next)
548 uprobe_mmap(vp->adj_next);
549 }
550
551 if (vp->remove) {
552again:
553 vma_mark_detached(vp->remove, true);
554 if (vp->file) {
555 uprobe_munmap(vp->remove, vp->remove->vm_start,
556 vp->remove->vm_end);
557 fput(vp->file);
558 }
559 if (vp->remove->anon_vma)
560 anon_vma_merge(vp->vma, vp->remove);
561 mm->map_count--;
562 mpol_put(vma_policy(vp->remove));
563 if (!vp->remove2)
564 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
565 vm_area_free(vp->remove);
566
567 /*
568 * In mprotect's case 6 (see comments on vma_merge),
569 * we are removing both mid and next vmas
570 */
571 if (vp->remove2) {
572 vp->remove = vp->remove2;
573 vp->remove2 = NULL;
574 goto again;
575 }
576 }
577 if (vp->insert && vp->file)
578 uprobe_mmap(vp->insert);
579 validate_mm(mm);
580}
581
582/*
583 * dup_anon_vma() - Helper function to duplicate anon_vma
584 * @dst: The destination VMA
585 * @src: The source VMA
586 * @dup: Pointer to the destination VMA when successful.
587 *
588 * Returns: 0 on success.
589 */
590static inline int dup_anon_vma(struct vm_area_struct *dst,
591 struct vm_area_struct *src, struct vm_area_struct **dup)
592{
593 /*
594 * Easily overlooked: when mprotect shifts the boundary, make sure the
595 * expanding vma has anon_vma set if the shrinking vma had, to cover any
596 * anon pages imported.
597 */
598 if (src->anon_vma && !dst->anon_vma) {
599 int ret;
600
601 vma_assert_write_locked(dst);
602 dst->anon_vma = src->anon_vma;
603 ret = anon_vma_clone(dst, src);
604 if (ret)
605 return ret;
606
607 *dup = dst;
608 }
609
610 return 0;
611}
612
613/*
614 * vma_expand - Expand an existing VMA
615 *
616 * @vmi: The vma iterator
617 * @vma: The vma to expand
618 * @start: The start of the vma
619 * @end: The exclusive end of the vma
620 * @pgoff: The page offset of vma
621 * @next: The current of next vma.
622 *
623 * Expand @vma to @start and @end. Can expand off the start and end. Will
624 * expand over @next if it's different from @vma and @end == @next->vm_end.
625 * Checking if the @vma can expand and merge with @next needs to be handled by
626 * the caller.
627 *
628 * Returns: 0 on success
629 */
630int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
631 unsigned long start, unsigned long end, pgoff_t pgoff,
632 struct vm_area_struct *next)
633{
634 struct vm_area_struct *anon_dup = NULL;
635 bool remove_next = false;
636 struct vma_prepare vp;
637
638 vma_start_write(vma);
639 if (next && (vma != next) && (end == next->vm_end)) {
640 int ret;
641
642 remove_next = true;
643 vma_start_write(next);
644 ret = dup_anon_vma(vma, next, &anon_dup);
645 if (ret)
646 return ret;
647 }
648
649 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
650 /* Not merging but overwriting any part of next is not handled. */
651 VM_WARN_ON(next && !vp.remove &&
652 next != vma && end > next->vm_start);
653 /* Only handles expanding */
654 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
655
656 /* Note: vma iterator must be pointing to 'start' */
657 vma_iter_config(vmi, start, end);
658 if (vma_iter_prealloc(vmi, vma))
659 goto nomem;
660
661 vma_prepare(&vp);
662 vma_adjust_trans_huge(vma, start, end, 0);
663 vma->vm_start = start;
664 vma->vm_end = end;
665 vma->vm_pgoff = pgoff;
666 vma_iter_store(vmi, vma);
667
668 vma_complete(&vp, vmi, vma->vm_mm);
669 return 0;
670
671nomem:
672 if (anon_dup)
673 unlink_anon_vmas(anon_dup);
674 return -ENOMEM;
675}
676
677/*
678 * vma_shrink() - Reduce an existing VMAs memory area
679 * @vmi: The vma iterator
680 * @vma: The VMA to modify
681 * @start: The new start
682 * @end: The new end
683 *
684 * Returns: 0 on success, -ENOMEM otherwise
685 */
686int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
687 unsigned long start, unsigned long end, pgoff_t pgoff)
688{
689 struct vma_prepare vp;
690
691 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
692
693 if (vma->vm_start < start)
694 vma_iter_config(vmi, vma->vm_start, start);
695 else
696 vma_iter_config(vmi, end, vma->vm_end);
697
698 if (vma_iter_prealloc(vmi, NULL))
699 return -ENOMEM;
700
701 vma_start_write(vma);
702
703 init_vma_prep(&vp, vma);
704 vma_prepare(&vp);
705 vma_adjust_trans_huge(vma, start, end, 0);
706
707 vma_iter_clear(vmi);
708 vma->vm_start = start;
709 vma->vm_end = end;
710 vma->vm_pgoff = pgoff;
711 vma_complete(&vp, vmi, vma->vm_mm);
712 return 0;
713}
714
715/*
716 * If the vma has a ->close operation then the driver probably needs to release
717 * per-vma resources, so we don't attempt to merge those if the caller indicates
718 * the current vma may be removed as part of the merge.
719 */
720static inline bool is_mergeable_vma(struct vm_area_struct *vma,
721 struct file *file, unsigned long vm_flags,
722 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
723 struct anon_vma_name *anon_name, bool may_remove_vma)
724{
725 /*
726 * VM_SOFTDIRTY should not prevent from VMA merging, if we
727 * match the flags but dirty bit -- the caller should mark
728 * merged VMA as dirty. If dirty bit won't be excluded from
729 * comparison, we increase pressure on the memory system forcing
730 * the kernel to generate new VMAs when old one could be
731 * extended instead.
732 */
733 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
734 return false;
735 if (vma->vm_file != file)
736 return false;
737 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
738 return false;
739 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
740 return false;
741 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
742 return false;
743 return true;
744}
745
746static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
747 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
748{
749 /*
750 * The list_is_singular() test is to avoid merging VMA cloned from
751 * parents. This can improve scalability caused by anon_vma lock.
752 */
753 if ((!anon_vma1 || !anon_vma2) && (!vma ||
754 list_is_singular(&vma->anon_vma_chain)))
755 return true;
756 return anon_vma1 == anon_vma2;
757}
758
759/*
760 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
761 * in front of (at a lower virtual address and file offset than) the vma.
762 *
763 * We cannot merge two vmas if they have differently assigned (non-NULL)
764 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
765 *
766 * We don't check here for the merged mmap wrapping around the end of pagecache
767 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
768 * wrap, nor mmaps which cover the final page at index -1UL.
769 *
770 * We assume the vma may be removed as part of the merge.
771 */
772static bool
773can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
774 struct anon_vma *anon_vma, struct file *file,
775 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
776 struct anon_vma_name *anon_name)
777{
778 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
779 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
780 if (vma->vm_pgoff == vm_pgoff)
781 return true;
782 }
783 return false;
784}
785
786/*
787 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
788 * beyond (at a higher virtual address and file offset than) the vma.
789 *
790 * We cannot merge two vmas if they have differently assigned (non-NULL)
791 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
792 *
793 * We assume that vma is not removed as part of the merge.
794 */
795static bool
796can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
797 struct anon_vma *anon_vma, struct file *file,
798 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
799 struct anon_vma_name *anon_name)
800{
801 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
802 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
803 pgoff_t vm_pglen;
804 vm_pglen = vma_pages(vma);
805 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
806 return true;
807 }
808 return false;
809}
810
811/*
812 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
813 * figure out whether that can be merged with its predecessor or its
814 * successor. Or both (it neatly fills a hole).
815 *
816 * In most cases - when called for mmap, brk or mremap - [addr,end) is
817 * certain not to be mapped by the time vma_merge is called; but when
818 * called for mprotect, it is certain to be already mapped (either at
819 * an offset within prev, or at the start of next), and the flags of
820 * this area are about to be changed to vm_flags - and the no-change
821 * case has already been eliminated.
822 *
823 * The following mprotect cases have to be considered, where **** is
824 * the area passed down from mprotect_fixup, never extending beyond one
825 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
826 * at the same address as **** and is of the same or larger span, and
827 * NNNN the next vma after ****:
828 *
829 * **** **** ****
830 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
831 * cannot merge might become might become
832 * PPNNNNNNNNNN PPPPPPPPPPCC
833 * mmap, brk or case 4 below case 5 below
834 * mremap move:
835 * **** ****
836 * PPPP NNNN PPPPCCCCNNNN
837 * might become might become
838 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
839 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
840 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
841 *
842 * It is important for case 8 that the vma CCCC overlapping the
843 * region **** is never going to extended over NNNN. Instead NNNN must
844 * be extended in region **** and CCCC must be removed. This way in
845 * all cases where vma_merge succeeds, the moment vma_merge drops the
846 * rmap_locks, the properties of the merged vma will be already
847 * correct for the whole merged range. Some of those properties like
848 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
849 * be correct for the whole merged range immediately after the
850 * rmap_locks are released. Otherwise if NNNN would be removed and
851 * CCCC would be extended over the NNNN range, remove_migration_ptes
852 * or other rmap walkers (if working on addresses beyond the "end"
853 * parameter) may establish ptes with the wrong permissions of CCCC
854 * instead of the right permissions of NNNN.
855 *
856 * In the code below:
857 * PPPP is represented by *prev
858 * CCCC is represented by *curr or not represented at all (NULL)
859 * NNNN is represented by *next or not represented at all (NULL)
860 * **** is not represented - it will be merged and the vma containing the
861 * area is returned, or the function will return NULL
862 */
863static struct vm_area_struct
864*vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
865 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
866 unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file,
867 pgoff_t pgoff, struct mempolicy *policy,
868 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
869 struct anon_vma_name *anon_name)
870{
871 struct vm_area_struct *curr, *next, *res;
872 struct vm_area_struct *vma, *adjust, *remove, *remove2;
873 struct vm_area_struct *anon_dup = NULL;
874 struct vma_prepare vp;
875 pgoff_t vma_pgoff;
876 int err = 0;
877 bool merge_prev = false;
878 bool merge_next = false;
879 bool vma_expanded = false;
880 unsigned long vma_start = addr;
881 unsigned long vma_end = end;
882 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
883 long adj_start = 0;
884
885 /*
886 * We later require that vma->vm_flags == vm_flags,
887 * so this tests vma->vm_flags & VM_SPECIAL, too.
888 */
889 if (vm_flags & VM_SPECIAL)
890 return NULL;
891
892 /* Does the input range span an existing VMA? (cases 5 - 8) */
893 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
894
895 if (!curr || /* cases 1 - 4 */
896 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
897 next = vma_lookup(mm, end);
898 else
899 next = NULL; /* case 5 */
900
901 if (prev) {
902 vma_start = prev->vm_start;
903 vma_pgoff = prev->vm_pgoff;
904
905 /* Can we merge the predecessor? */
906 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
907 && can_vma_merge_after(prev, vm_flags, anon_vma, file,
908 pgoff, vm_userfaultfd_ctx, anon_name)) {
909 merge_prev = true;
910 vma_prev(vmi);
911 }
912 }
913
914 /* Can we merge the successor? */
915 if (next && mpol_equal(policy, vma_policy(next)) &&
916 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
917 vm_userfaultfd_ctx, anon_name)) {
918 merge_next = true;
919 }
920
921 /* Verify some invariant that must be enforced by the caller. */
922 VM_WARN_ON(prev && addr <= prev->vm_start);
923 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
924 VM_WARN_ON(addr >= end);
925
926 if (!merge_prev && !merge_next)
927 return NULL; /* Not mergeable. */
928
929 if (merge_prev)
930 vma_start_write(prev);
931
932 res = vma = prev;
933 remove = remove2 = adjust = NULL;
934
935 /* Can we merge both the predecessor and the successor? */
936 if (merge_prev && merge_next &&
937 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
938 vma_start_write(next);
939 remove = next; /* case 1 */
940 vma_end = next->vm_end;
941 err = dup_anon_vma(prev, next, &anon_dup);
942 if (curr) { /* case 6 */
943 vma_start_write(curr);
944 remove = curr;
945 remove2 = next;
946 /*
947 * Note that the dup_anon_vma below cannot overwrite err
948 * since the first caller would do nothing unless next
949 * has an anon_vma.
950 */
951 if (!next->anon_vma)
952 err = dup_anon_vma(prev, curr, &anon_dup);
953 }
954 } else if (merge_prev) { /* case 2 */
955 if (curr) {
956 vma_start_write(curr);
957 if (end == curr->vm_end) { /* case 7 */
958 /*
959 * can_vma_merge_after() assumed we would not be
960 * removing prev vma, so it skipped the check
961 * for vm_ops->close, but we are removing curr
962 */
963 if (curr->vm_ops && curr->vm_ops->close)
964 err = -EINVAL;
965 remove = curr;
966 } else { /* case 5 */
967 adjust = curr;
968 adj_start = (end - curr->vm_start);
969 }
970 if (!err)
971 err = dup_anon_vma(prev, curr, &anon_dup);
972 }
973 } else { /* merge_next */
974 vma_start_write(next);
975 res = next;
976 if (prev && addr < prev->vm_end) { /* case 4 */
977 vma_start_write(prev);
978 vma_end = addr;
979 adjust = next;
980 adj_start = -(prev->vm_end - addr);
981 err = dup_anon_vma(next, prev, &anon_dup);
982 } else {
983 /*
984 * Note that cases 3 and 8 are the ONLY ones where prev
985 * is permitted to be (but is not necessarily) NULL.
986 */
987 vma = next; /* case 3 */
988 vma_start = addr;
989 vma_end = next->vm_end;
990 vma_pgoff = next->vm_pgoff - pglen;
991 if (curr) { /* case 8 */
992 vma_pgoff = curr->vm_pgoff;
993 vma_start_write(curr);
994 remove = curr;
995 err = dup_anon_vma(next, curr, &anon_dup);
996 }
997 }
998 }
999
1000 /* Error in anon_vma clone. */
1001 if (err)
1002 goto anon_vma_fail;
1003
1004 if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1005 vma_expanded = true;
1006
1007 if (vma_expanded) {
1008 vma_iter_config(vmi, vma_start, vma_end);
1009 } else {
1010 vma_iter_config(vmi, adjust->vm_start + adj_start,
1011 adjust->vm_end);
1012 }
1013
1014 if (vma_iter_prealloc(vmi, vma))
1015 goto prealloc_fail;
1016
1017 init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1018 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1019 vp.anon_vma != adjust->anon_vma);
1020
1021 vma_prepare(&vp);
1022 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1023
1024 vma->vm_start = vma_start;
1025 vma->vm_end = vma_end;
1026 vma->vm_pgoff = vma_pgoff;
1027
1028 if (vma_expanded)
1029 vma_iter_store(vmi, vma);
1030
1031 if (adj_start) {
1032 adjust->vm_start += adj_start;
1033 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1034 if (adj_start < 0) {
1035 WARN_ON(vma_expanded);
1036 vma_iter_store(vmi, next);
1037 }
1038 }
1039
1040 vma_complete(&vp, vmi, mm);
1041 khugepaged_enter_vma(res, vm_flags);
1042 return res;
1043
1044prealloc_fail:
1045 if (anon_dup)
1046 unlink_anon_vmas(anon_dup);
1047
1048anon_vma_fail:
1049 vma_iter_set(vmi, addr);
1050 vma_iter_load(vmi);
1051 return NULL;
1052}
1053
1054/*
1055 * Rough compatibility check to quickly see if it's even worth looking
1056 * at sharing an anon_vma.
1057 *
1058 * They need to have the same vm_file, and the flags can only differ
1059 * in things that mprotect may change.
1060 *
1061 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1062 * we can merge the two vma's. For example, we refuse to merge a vma if
1063 * there is a vm_ops->close() function, because that indicates that the
1064 * driver is doing some kind of reference counting. But that doesn't
1065 * really matter for the anon_vma sharing case.
1066 */
1067static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1068{
1069 return a->vm_end == b->vm_start &&
1070 mpol_equal(vma_policy(a), vma_policy(b)) &&
1071 a->vm_file == b->vm_file &&
1072 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1073 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1074}
1075
1076/*
1077 * Do some basic sanity checking to see if we can re-use the anon_vma
1078 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1079 * the same as 'old', the other will be the new one that is trying
1080 * to share the anon_vma.
1081 *
1082 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1083 * the anon_vma of 'old' is concurrently in the process of being set up
1084 * by another page fault trying to merge _that_. But that's ok: if it
1085 * is being set up, that automatically means that it will be a singleton
1086 * acceptable for merging, so we can do all of this optimistically. But
1087 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1088 *
1089 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1090 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1091 * is to return an anon_vma that is "complex" due to having gone through
1092 * a fork).
1093 *
1094 * We also make sure that the two vma's are compatible (adjacent,
1095 * and with the same memory policies). That's all stable, even with just
1096 * a read lock on the mmap_lock.
1097 */
1098static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1099{
1100 if (anon_vma_compatible(a, b)) {
1101 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1102
1103 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1104 return anon_vma;
1105 }
1106 return NULL;
1107}
1108
1109/*
1110 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1111 * neighbouring vmas for a suitable anon_vma, before it goes off
1112 * to allocate a new anon_vma. It checks because a repetitive
1113 * sequence of mprotects and faults may otherwise lead to distinct
1114 * anon_vmas being allocated, preventing vma merge in subsequent
1115 * mprotect.
1116 */
1117struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1118{
1119 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1120 struct anon_vma *anon_vma = NULL;
1121 struct vm_area_struct *prev, *next;
1122
1123 /* Try next first. */
1124 next = mas_walk(&mas);
1125 if (next) {
1126 anon_vma = reusable_anon_vma(next, vma, next);
1127 if (anon_vma)
1128 return anon_vma;
1129 }
1130
1131 prev = mas_prev(&mas, 0);
1132 VM_BUG_ON_VMA(prev != vma, vma);
1133 prev = mas_prev(&mas, 0);
1134 /* Try prev next. */
1135 if (prev)
1136 anon_vma = reusable_anon_vma(prev, prev, vma);
1137
1138 /*
1139 * We might reach here with anon_vma == NULL if we can't find
1140 * any reusable anon_vma.
1141 * There's no absolute need to look only at touching neighbours:
1142 * we could search further afield for "compatible" anon_vmas.
1143 * But it would probably just be a waste of time searching,
1144 * or lead to too many vmas hanging off the same anon_vma.
1145 * We're trying to allow mprotect remerging later on,
1146 * not trying to minimize memory used for anon_vmas.
1147 */
1148 return anon_vma;
1149}
1150
1151/*
1152 * If a hint addr is less than mmap_min_addr change hint to be as
1153 * low as possible but still greater than mmap_min_addr
1154 */
1155static inline unsigned long round_hint_to_min(unsigned long hint)
1156{
1157 hint &= PAGE_MASK;
1158 if (((void *)hint != NULL) &&
1159 (hint < mmap_min_addr))
1160 return PAGE_ALIGN(mmap_min_addr);
1161 return hint;
1162}
1163
1164bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1165 unsigned long bytes)
1166{
1167 unsigned long locked_pages, limit_pages;
1168
1169 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1170 return true;
1171
1172 locked_pages = bytes >> PAGE_SHIFT;
1173 locked_pages += mm->locked_vm;
1174
1175 limit_pages = rlimit(RLIMIT_MEMLOCK);
1176 limit_pages >>= PAGE_SHIFT;
1177
1178 return locked_pages <= limit_pages;
1179}
1180
1181static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1182{
1183 if (S_ISREG(inode->i_mode))
1184 return MAX_LFS_FILESIZE;
1185
1186 if (S_ISBLK(inode->i_mode))
1187 return MAX_LFS_FILESIZE;
1188
1189 if (S_ISSOCK(inode->i_mode))
1190 return MAX_LFS_FILESIZE;
1191
1192 /* Special "we do even unsigned file positions" case */
1193 if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1194 return 0;
1195
1196 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1197 return ULONG_MAX;
1198}
1199
1200static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1201 unsigned long pgoff, unsigned long len)
1202{
1203 u64 maxsize = file_mmap_size_max(file, inode);
1204
1205 if (maxsize && len > maxsize)
1206 return false;
1207 maxsize -= len;
1208 if (pgoff > maxsize >> PAGE_SHIFT)
1209 return false;
1210 return true;
1211}
1212
1213/*
1214 * The caller must write-lock current->mm->mmap_lock.
1215 */
1216unsigned long do_mmap(struct file *file, unsigned long addr,
1217 unsigned long len, unsigned long prot,
1218 unsigned long flags, vm_flags_t vm_flags,
1219 unsigned long pgoff, unsigned long *populate,
1220 struct list_head *uf)
1221{
1222 struct mm_struct *mm = current->mm;
1223 int pkey = 0;
1224
1225 *populate = 0;
1226
1227 if (!len)
1228 return -EINVAL;
1229
1230 /*
1231 * Does the application expect PROT_READ to imply PROT_EXEC?
1232 *
1233 * (the exception is when the underlying filesystem is noexec
1234 * mounted, in which case we don't add PROT_EXEC.)
1235 */
1236 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1237 if (!(file && path_noexec(&file->f_path)))
1238 prot |= PROT_EXEC;
1239
1240 /* force arch specific MAP_FIXED handling in get_unmapped_area */
1241 if (flags & MAP_FIXED_NOREPLACE)
1242 flags |= MAP_FIXED;
1243
1244 if (!(flags & MAP_FIXED))
1245 addr = round_hint_to_min(addr);
1246
1247 /* Careful about overflows.. */
1248 len = PAGE_ALIGN(len);
1249 if (!len)
1250 return -ENOMEM;
1251
1252 /* offset overflow? */
1253 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1254 return -EOVERFLOW;
1255
1256 /* Too many mappings? */
1257 if (mm->map_count > sysctl_max_map_count)
1258 return -ENOMEM;
1259
1260 /* Obtain the address to map to. we verify (or select) it and ensure
1261 * that it represents a valid section of the address space.
1262 */
1263 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1264 if (IS_ERR_VALUE(addr))
1265 return addr;
1266
1267 if (flags & MAP_FIXED_NOREPLACE) {
1268 if (find_vma_intersection(mm, addr, addr + len))
1269 return -EEXIST;
1270 }
1271
1272 if (prot == PROT_EXEC) {
1273 pkey = execute_only_pkey(mm);
1274 if (pkey < 0)
1275 pkey = 0;
1276 }
1277
1278 /* Do simple checking here so the lower-level routines won't have
1279 * to. we assume access permissions have been handled by the open
1280 * of the memory object, so we don't do any here.
1281 */
1282 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1283 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1284
1285 if (flags & MAP_LOCKED)
1286 if (!can_do_mlock())
1287 return -EPERM;
1288
1289 if (!mlock_future_ok(mm, vm_flags, len))
1290 return -EAGAIN;
1291
1292 if (file) {
1293 struct inode *inode = file_inode(file);
1294 unsigned long flags_mask;
1295
1296 if (!file_mmap_ok(file, inode, pgoff, len))
1297 return -EOVERFLOW;
1298
1299 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1300
1301 switch (flags & MAP_TYPE) {
1302 case MAP_SHARED:
1303 /*
1304 * Force use of MAP_SHARED_VALIDATE with non-legacy
1305 * flags. E.g. MAP_SYNC is dangerous to use with
1306 * MAP_SHARED as you don't know which consistency model
1307 * you will get. We silently ignore unsupported flags
1308 * with MAP_SHARED to preserve backward compatibility.
1309 */
1310 flags &= LEGACY_MAP_MASK;
1311 fallthrough;
1312 case MAP_SHARED_VALIDATE:
1313 if (flags & ~flags_mask)
1314 return -EOPNOTSUPP;
1315 if (prot & PROT_WRITE) {
1316 if (!(file->f_mode & FMODE_WRITE))
1317 return -EACCES;
1318 if (IS_SWAPFILE(file->f_mapping->host))
1319 return -ETXTBSY;
1320 }
1321
1322 /*
1323 * Make sure we don't allow writing to an append-only
1324 * file..
1325 */
1326 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1327 return -EACCES;
1328
1329 vm_flags |= VM_SHARED | VM_MAYSHARE;
1330 if (!(file->f_mode & FMODE_WRITE))
1331 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1332 fallthrough;
1333 case MAP_PRIVATE:
1334 if (!(file->f_mode & FMODE_READ))
1335 return -EACCES;
1336 if (path_noexec(&file->f_path)) {
1337 if (vm_flags & VM_EXEC)
1338 return -EPERM;
1339 vm_flags &= ~VM_MAYEXEC;
1340 }
1341
1342 if (!file->f_op->mmap)
1343 return -ENODEV;
1344 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1345 return -EINVAL;
1346 break;
1347
1348 default:
1349 return -EINVAL;
1350 }
1351 } else {
1352 switch (flags & MAP_TYPE) {
1353 case MAP_SHARED:
1354 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1355 return -EINVAL;
1356 /*
1357 * Ignore pgoff.
1358 */
1359 pgoff = 0;
1360 vm_flags |= VM_SHARED | VM_MAYSHARE;
1361 break;
1362 case MAP_PRIVATE:
1363 /*
1364 * Set pgoff according to addr for anon_vma.
1365 */
1366 pgoff = addr >> PAGE_SHIFT;
1367 break;
1368 default:
1369 return -EINVAL;
1370 }
1371 }
1372
1373 /*
1374 * Set 'VM_NORESERVE' if we should not account for the
1375 * memory use of this mapping.
1376 */
1377 if (flags & MAP_NORESERVE) {
1378 /* We honor MAP_NORESERVE if allowed to overcommit */
1379 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1380 vm_flags |= VM_NORESERVE;
1381
1382 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1383 if (file && is_file_hugepages(file))
1384 vm_flags |= VM_NORESERVE;
1385 }
1386
1387 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1388 if (!IS_ERR_VALUE(addr) &&
1389 ((vm_flags & VM_LOCKED) ||
1390 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1391 *populate = len;
1392 return addr;
1393}
1394
1395unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1396 unsigned long prot, unsigned long flags,
1397 unsigned long fd, unsigned long pgoff)
1398{
1399 struct file *file = NULL;
1400 unsigned long retval;
1401
1402 if (!(flags & MAP_ANONYMOUS)) {
1403 audit_mmap_fd(fd, flags);
1404 file = fget(fd);
1405 if (!file)
1406 return -EBADF;
1407 if (is_file_hugepages(file)) {
1408 len = ALIGN(len, huge_page_size(hstate_file(file)));
1409 } else if (unlikely(flags & MAP_HUGETLB)) {
1410 retval = -EINVAL;
1411 goto out_fput;
1412 }
1413 } else if (flags & MAP_HUGETLB) {
1414 struct hstate *hs;
1415
1416 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1417 if (!hs)
1418 return -EINVAL;
1419
1420 len = ALIGN(len, huge_page_size(hs));
1421 /*
1422 * VM_NORESERVE is used because the reservations will be
1423 * taken when vm_ops->mmap() is called
1424 */
1425 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1426 VM_NORESERVE,
1427 HUGETLB_ANONHUGE_INODE,
1428 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1429 if (IS_ERR(file))
1430 return PTR_ERR(file);
1431 }
1432
1433 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1434out_fput:
1435 if (file)
1436 fput(file);
1437 return retval;
1438}
1439
1440SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1441 unsigned long, prot, unsigned long, flags,
1442 unsigned long, fd, unsigned long, pgoff)
1443{
1444 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1445}
1446
1447#ifdef __ARCH_WANT_SYS_OLD_MMAP
1448struct mmap_arg_struct {
1449 unsigned long addr;
1450 unsigned long len;
1451 unsigned long prot;
1452 unsigned long flags;
1453 unsigned long fd;
1454 unsigned long offset;
1455};
1456
1457SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1458{
1459 struct mmap_arg_struct a;
1460
1461 if (copy_from_user(&a, arg, sizeof(a)))
1462 return -EFAULT;
1463 if (offset_in_page(a.offset))
1464 return -EINVAL;
1465
1466 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1467 a.offset >> PAGE_SHIFT);
1468}
1469#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1470
1471static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1472{
1473 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1474}
1475
1476static bool vma_is_shared_writable(struct vm_area_struct *vma)
1477{
1478 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1479 (VM_WRITE | VM_SHARED);
1480}
1481
1482static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1483{
1484 /* No managed pages to writeback. */
1485 if (vma->vm_flags & VM_PFNMAP)
1486 return false;
1487
1488 return vma->vm_file && vma->vm_file->f_mapping &&
1489 mapping_can_writeback(vma->vm_file->f_mapping);
1490}
1491
1492/*
1493 * Does this VMA require the underlying folios to have their dirty state
1494 * tracked?
1495 */
1496bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1497{
1498 /* Only shared, writable VMAs require dirty tracking. */
1499 if (!vma_is_shared_writable(vma))
1500 return false;
1501
1502 /* Does the filesystem need to be notified? */
1503 if (vm_ops_needs_writenotify(vma->vm_ops))
1504 return true;
1505
1506 /*
1507 * Even if the filesystem doesn't indicate a need for writenotify, if it
1508 * can writeback, dirty tracking is still required.
1509 */
1510 return vma_fs_can_writeback(vma);
1511}
1512
1513/*
1514 * Some shared mappings will want the pages marked read-only
1515 * to track write events. If so, we'll downgrade vm_page_prot
1516 * to the private version (using protection_map[] without the
1517 * VM_SHARED bit).
1518 */
1519int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1520{
1521 /* If it was private or non-writable, the write bit is already clear */
1522 if (!vma_is_shared_writable(vma))
1523 return 0;
1524
1525 /* The backer wishes to know when pages are first written to? */
1526 if (vm_ops_needs_writenotify(vma->vm_ops))
1527 return 1;
1528
1529 /* The open routine did something to the protections that pgprot_modify
1530 * won't preserve? */
1531 if (pgprot_val(vm_page_prot) !=
1532 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1533 return 0;
1534
1535 /*
1536 * Do we need to track softdirty? hugetlb does not support softdirty
1537 * tracking yet.
1538 */
1539 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1540 return 1;
1541
1542 /* Do we need write faults for uffd-wp tracking? */
1543 if (userfaultfd_wp(vma))
1544 return 1;
1545
1546 /* Can the mapping track the dirty pages? */
1547 return vma_fs_can_writeback(vma);
1548}
1549
1550/*
1551 * We account for memory if it's a private writeable mapping,
1552 * not hugepages and VM_NORESERVE wasn't set.
1553 */
1554static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1555{
1556 /*
1557 * hugetlb has its own accounting separate from the core VM
1558 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1559 */
1560 if (file && is_file_hugepages(file))
1561 return 0;
1562
1563 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1564}
1565
1566/**
1567 * unmapped_area() - Find an area between the low_limit and the high_limit with
1568 * the correct alignment and offset, all from @info. Note: current->mm is used
1569 * for the search.
1570 *
1571 * @info: The unmapped area information including the range [low_limit -
1572 * high_limit), the alignment offset and mask.
1573 *
1574 * Return: A memory address or -ENOMEM.
1575 */
1576static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1577{
1578 unsigned long length, gap;
1579 unsigned long low_limit, high_limit;
1580 struct vm_area_struct *tmp;
1581
1582 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
1583
1584 /* Adjust search length to account for worst case alignment overhead */
1585 length = info->length + info->align_mask;
1586 if (length < info->length)
1587 return -ENOMEM;
1588
1589 low_limit = info->low_limit;
1590 if (low_limit < mmap_min_addr)
1591 low_limit = mmap_min_addr;
1592 high_limit = info->high_limit;
1593retry:
1594 if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
1595 return -ENOMEM;
1596
1597 gap = mas.index;
1598 gap += (info->align_offset - gap) & info->align_mask;
1599 tmp = mas_next(&mas, ULONG_MAX);
1600 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1601 if (vm_start_gap(tmp) < gap + length - 1) {
1602 low_limit = tmp->vm_end;
1603 mas_reset(&mas);
1604 goto retry;
1605 }
1606 } else {
1607 tmp = mas_prev(&mas, 0);
1608 if (tmp && vm_end_gap(tmp) > gap) {
1609 low_limit = vm_end_gap(tmp);
1610 mas_reset(&mas);
1611 goto retry;
1612 }
1613 }
1614
1615 return gap;
1616}
1617
1618/**
1619 * unmapped_area_topdown() - Find an area between the low_limit and the
1620 * high_limit with the correct alignment and offset at the highest available
1621 * address, all from @info. Note: current->mm is used for the search.
1622 *
1623 * @info: The unmapped area information including the range [low_limit -
1624 * high_limit), the alignment offset and mask.
1625 *
1626 * Return: A memory address or -ENOMEM.
1627 */
1628static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1629{
1630 unsigned long length, gap, gap_end;
1631 unsigned long low_limit, high_limit;
1632 struct vm_area_struct *tmp;
1633
1634 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
1635 /* Adjust search length to account for worst case alignment overhead */
1636 length = info->length + info->align_mask;
1637 if (length < info->length)
1638 return -ENOMEM;
1639
1640 low_limit = info->low_limit;
1641 if (low_limit < mmap_min_addr)
1642 low_limit = mmap_min_addr;
1643 high_limit = info->high_limit;
1644retry:
1645 if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
1646 return -ENOMEM;
1647
1648 gap = mas.last + 1 - info->length;
1649 gap -= (gap - info->align_offset) & info->align_mask;
1650 gap_end = mas.last;
1651 tmp = mas_next(&mas, ULONG_MAX);
1652 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1653 if (vm_start_gap(tmp) <= gap_end) {
1654 high_limit = vm_start_gap(tmp);
1655 mas_reset(&mas);
1656 goto retry;
1657 }
1658 } else {
1659 tmp = mas_prev(&mas, 0);
1660 if (tmp && vm_end_gap(tmp) > gap) {
1661 high_limit = tmp->vm_start;
1662 mas_reset(&mas);
1663 goto retry;
1664 }
1665 }
1666
1667 return gap;
1668}
1669
1670/*
1671 * Search for an unmapped address range.
1672 *
1673 * We are looking for a range that:
1674 * - does not intersect with any VMA;
1675 * - is contained within the [low_limit, high_limit) interval;
1676 * - is at least the desired size.
1677 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1678 */
1679unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
1680{
1681 unsigned long addr;
1682
1683 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1684 addr = unmapped_area_topdown(info);
1685 else
1686 addr = unmapped_area(info);
1687
1688 trace_vm_unmapped_area(addr, info);
1689 return addr;
1690}
1691
1692/* Get an address range which is currently unmapped.
1693 * For shmat() with addr=0.
1694 *
1695 * Ugly calling convention alert:
1696 * Return value with the low bits set means error value,
1697 * ie
1698 * if (ret & ~PAGE_MASK)
1699 * error = ret;
1700 *
1701 * This function "knows" that -ENOMEM has the bits set.
1702 */
1703unsigned long
1704generic_get_unmapped_area(struct file *filp, unsigned long addr,
1705 unsigned long len, unsigned long pgoff,
1706 unsigned long flags)
1707{
1708 struct mm_struct *mm = current->mm;
1709 struct vm_area_struct *vma, *prev;
1710 struct vm_unmapped_area_info info;
1711 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1712
1713 if (len > mmap_end - mmap_min_addr)
1714 return -ENOMEM;
1715
1716 if (flags & MAP_FIXED)
1717 return addr;
1718
1719 if (addr) {
1720 addr = PAGE_ALIGN(addr);
1721 vma = find_vma_prev(mm, addr, &prev);
1722 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1723 (!vma || addr + len <= vm_start_gap(vma)) &&
1724 (!prev || addr >= vm_end_gap(prev)))
1725 return addr;
1726 }
1727
1728 info.flags = 0;
1729 info.length = len;
1730 info.low_limit = mm->mmap_base;
1731 info.high_limit = mmap_end;
1732 info.align_mask = 0;
1733 info.align_offset = 0;
1734 return vm_unmapped_area(&info);
1735}
1736
1737#ifndef HAVE_ARCH_UNMAPPED_AREA
1738unsigned long
1739arch_get_unmapped_area(struct file *filp, unsigned long addr,
1740 unsigned long len, unsigned long pgoff,
1741 unsigned long flags)
1742{
1743 return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
1744}
1745#endif
1746
1747/*
1748 * This mmap-allocator allocates new areas top-down from below the
1749 * stack's low limit (the base):
1750 */
1751unsigned long
1752generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1753 unsigned long len, unsigned long pgoff,
1754 unsigned long flags)
1755{
1756 struct vm_area_struct *vma, *prev;
1757 struct mm_struct *mm = current->mm;
1758 struct vm_unmapped_area_info info;
1759 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1760
1761 /* requested length too big for entire address space */
1762 if (len > mmap_end - mmap_min_addr)
1763 return -ENOMEM;
1764
1765 if (flags & MAP_FIXED)
1766 return addr;
1767
1768 /* requesting a specific address */
1769 if (addr) {
1770 addr = PAGE_ALIGN(addr);
1771 vma = find_vma_prev(mm, addr, &prev);
1772 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1773 (!vma || addr + len <= vm_start_gap(vma)) &&
1774 (!prev || addr >= vm_end_gap(prev)))
1775 return addr;
1776 }
1777
1778 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1779 info.length = len;
1780 info.low_limit = PAGE_SIZE;
1781 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
1782 info.align_mask = 0;
1783 info.align_offset = 0;
1784 addr = vm_unmapped_area(&info);
1785
1786 /*
1787 * A failed mmap() very likely causes application failure,
1788 * so fall back to the bottom-up function here. This scenario
1789 * can happen with large stack limits and large mmap()
1790 * allocations.
1791 */
1792 if (offset_in_page(addr)) {
1793 VM_BUG_ON(addr != -ENOMEM);
1794 info.flags = 0;
1795 info.low_limit = TASK_UNMAPPED_BASE;
1796 info.high_limit = mmap_end;
1797 addr = vm_unmapped_area(&info);
1798 }
1799
1800 return addr;
1801}
1802
1803#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1804unsigned long
1805arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1806 unsigned long len, unsigned long pgoff,
1807 unsigned long flags)
1808{
1809 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
1810}
1811#endif
1812
1813unsigned long
1814get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1815 unsigned long pgoff, unsigned long flags)
1816{
1817 unsigned long (*get_area)(struct file *, unsigned long,
1818 unsigned long, unsigned long, unsigned long);
1819
1820 unsigned long error = arch_mmap_check(addr, len, flags);
1821 if (error)
1822 return error;
1823
1824 /* Careful about overflows.. */
1825 if (len > TASK_SIZE)
1826 return -ENOMEM;
1827
1828 get_area = current->mm->get_unmapped_area;
1829 if (file) {
1830 if (file->f_op->get_unmapped_area)
1831 get_area = file->f_op->get_unmapped_area;
1832 } else if (flags & MAP_SHARED) {
1833 /*
1834 * mmap_region() will call shmem_zero_setup() to create a file,
1835 * so use shmem's get_unmapped_area in case it can be huge.
1836 */
1837 get_area = shmem_get_unmapped_area;
1838 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1839 /* Ensures that larger anonymous mappings are THP aligned. */
1840 get_area = thp_get_unmapped_area;
1841 }
1842
1843 /* Always treat pgoff as zero for anonymous memory. */
1844 if (!file)
1845 pgoff = 0;
1846
1847 addr = get_area(file, addr, len, pgoff, flags);
1848 if (IS_ERR_VALUE(addr))
1849 return addr;
1850
1851 if (addr > TASK_SIZE - len)
1852 return -ENOMEM;
1853 if (offset_in_page(addr))
1854 return -EINVAL;
1855
1856 error = security_mmap_addr(addr);
1857 return error ? error : addr;
1858}
1859
1860EXPORT_SYMBOL(get_unmapped_area);
1861
1862/**
1863 * find_vma_intersection() - Look up the first VMA which intersects the interval
1864 * @mm: The process address space.
1865 * @start_addr: The inclusive start user address.
1866 * @end_addr: The exclusive end user address.
1867 *
1868 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
1869 * start_addr < end_addr.
1870 */
1871struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1872 unsigned long start_addr,
1873 unsigned long end_addr)
1874{
1875 unsigned long index = start_addr;
1876
1877 mmap_assert_locked(mm);
1878 return mt_find(&mm->mm_mt, &index, end_addr - 1);
1879}
1880EXPORT_SYMBOL(find_vma_intersection);
1881
1882/**
1883 * find_vma() - Find the VMA for a given address, or the next VMA.
1884 * @mm: The mm_struct to check
1885 * @addr: The address
1886 *
1887 * Returns: The VMA associated with addr, or the next VMA.
1888 * May return %NULL in the case of no VMA at addr or above.
1889 */
1890struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1891{
1892 unsigned long index = addr;
1893
1894 mmap_assert_locked(mm);
1895 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
1896}
1897EXPORT_SYMBOL(find_vma);
1898
1899/**
1900 * find_vma_prev() - Find the VMA for a given address, or the next vma and
1901 * set %pprev to the previous VMA, if any.
1902 * @mm: The mm_struct to check
1903 * @addr: The address
1904 * @pprev: The pointer to set to the previous VMA
1905 *
1906 * Note that RCU lock is missing here since the external mmap_lock() is used
1907 * instead.
1908 *
1909 * Returns: The VMA associated with @addr, or the next vma.
1910 * May return %NULL in the case of no vma at addr or above.
1911 */
1912struct vm_area_struct *
1913find_vma_prev(struct mm_struct *mm, unsigned long addr,
1914 struct vm_area_struct **pprev)
1915{
1916 struct vm_area_struct *vma;
1917 MA_STATE(mas, &mm->mm_mt, addr, addr);
1918
1919 vma = mas_walk(&mas);
1920 *pprev = mas_prev(&mas, 0);
1921 if (!vma)
1922 vma = mas_next(&mas, ULONG_MAX);
1923 return vma;
1924}
1925
1926/*
1927 * Verify that the stack growth is acceptable and
1928 * update accounting. This is shared with both the
1929 * grow-up and grow-down cases.
1930 */
1931static int acct_stack_growth(struct vm_area_struct *vma,
1932 unsigned long size, unsigned long grow)
1933{
1934 struct mm_struct *mm = vma->vm_mm;
1935 unsigned long new_start;
1936
1937 /* address space limit tests */
1938 if (!may_expand_vm(mm, vma->vm_flags, grow))
1939 return -ENOMEM;
1940
1941 /* Stack limit test */
1942 if (size > rlimit(RLIMIT_STACK))
1943 return -ENOMEM;
1944
1945 /* mlock limit tests */
1946 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1947 return -ENOMEM;
1948
1949 /* Check to ensure the stack will not grow into a hugetlb-only region */
1950 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1951 vma->vm_end - size;
1952 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1953 return -EFAULT;
1954
1955 /*
1956 * Overcommit.. This must be the final test, as it will
1957 * update security statistics.
1958 */
1959 if (security_vm_enough_memory_mm(mm, grow))
1960 return -ENOMEM;
1961
1962 return 0;
1963}
1964
1965#if defined(CONFIG_STACK_GROWSUP)
1966/*
1967 * PA-RISC uses this for its stack.
1968 * vma is the last one with address > vma->vm_end. Have to extend vma.
1969 */
1970static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1971{
1972 struct mm_struct *mm = vma->vm_mm;
1973 struct vm_area_struct *next;
1974 unsigned long gap_addr;
1975 int error = 0;
1976 MA_STATE(mas, &mm->mm_mt, vma->vm_start, address);
1977
1978 if (!(vma->vm_flags & VM_GROWSUP))
1979 return -EFAULT;
1980
1981 /* Guard against exceeding limits of the address space. */
1982 address &= PAGE_MASK;
1983 if (address >= (TASK_SIZE & PAGE_MASK))
1984 return -ENOMEM;
1985 address += PAGE_SIZE;
1986
1987 /* Enforce stack_guard_gap */
1988 gap_addr = address + stack_guard_gap;
1989
1990 /* Guard against overflow */
1991 if (gap_addr < address || gap_addr > TASK_SIZE)
1992 gap_addr = TASK_SIZE;
1993
1994 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1995 if (next && vma_is_accessible(next)) {
1996 if (!(next->vm_flags & VM_GROWSUP))
1997 return -ENOMEM;
1998 /* Check that both stack segments have the same anon_vma? */
1999 }
2000
2001 if (next)
2002 mas_prev_range(&mas, address);
2003
2004 __mas_set_range(&mas, vma->vm_start, address - 1);
2005 if (mas_preallocate(&mas, vma, GFP_KERNEL))
2006 return -ENOMEM;
2007
2008 /* We must make sure the anon_vma is allocated. */
2009 if (unlikely(anon_vma_prepare(vma))) {
2010 mas_destroy(&mas);
2011 return -ENOMEM;
2012 }
2013
2014 /* Lock the VMA before expanding to prevent concurrent page faults */
2015 vma_start_write(vma);
2016 /*
2017 * vma->vm_start/vm_end cannot change under us because the caller
2018 * is required to hold the mmap_lock in read mode. We need the
2019 * anon_vma lock to serialize against concurrent expand_stacks.
2020 */
2021 anon_vma_lock_write(vma->anon_vma);
2022
2023 /* Somebody else might have raced and expanded it already */
2024 if (address > vma->vm_end) {
2025 unsigned long size, grow;
2026
2027 size = address - vma->vm_start;
2028 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2029
2030 error = -ENOMEM;
2031 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2032 error = acct_stack_growth(vma, size, grow);
2033 if (!error) {
2034 /*
2035 * We only hold a shared mmap_lock lock here, so
2036 * we need to protect against concurrent vma
2037 * expansions. anon_vma_lock_write() doesn't
2038 * help here, as we don't guarantee that all
2039 * growable vmas in a mm share the same root
2040 * anon vma. So, we reuse mm->page_table_lock
2041 * to guard against concurrent vma expansions.
2042 */
2043 spin_lock(&mm->page_table_lock);
2044 if (vma->vm_flags & VM_LOCKED)
2045 mm->locked_vm += grow;
2046 vm_stat_account(mm, vma->vm_flags, grow);
2047 anon_vma_interval_tree_pre_update_vma(vma);
2048 vma->vm_end = address;
2049 /* Overwrite old entry in mtree. */
2050 mas_store_prealloc(&mas, vma);
2051 anon_vma_interval_tree_post_update_vma(vma);
2052 spin_unlock(&mm->page_table_lock);
2053
2054 perf_event_mmap(vma);
2055 }
2056 }
2057 }
2058 anon_vma_unlock_write(vma->anon_vma);
2059 khugepaged_enter_vma(vma, vma->vm_flags);
2060 mas_destroy(&mas);
2061 validate_mm(mm);
2062 return error;
2063}
2064#endif /* CONFIG_STACK_GROWSUP */
2065
2066/*
2067 * vma is the first one with address < vma->vm_start. Have to extend vma.
2068 * mmap_lock held for writing.
2069 */
2070int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2071{
2072 struct mm_struct *mm = vma->vm_mm;
2073 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
2074 struct vm_area_struct *prev;
2075 int error = 0;
2076
2077 if (!(vma->vm_flags & VM_GROWSDOWN))
2078 return -EFAULT;
2079
2080 address &= PAGE_MASK;
2081 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2082 return -EPERM;
2083
2084 /* Enforce stack_guard_gap */
2085 prev = mas_prev(&mas, 0);
2086 /* Check that both stack segments have the same anon_vma? */
2087 if (prev) {
2088 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2089 vma_is_accessible(prev) &&
2090 (address - prev->vm_end < stack_guard_gap))
2091 return -ENOMEM;
2092 }
2093
2094 if (prev)
2095 mas_next_range(&mas, vma->vm_start);
2096
2097 __mas_set_range(&mas, address, vma->vm_end - 1);
2098 if (mas_preallocate(&mas, vma, GFP_KERNEL))
2099 return -ENOMEM;
2100
2101 /* We must make sure the anon_vma is allocated. */
2102 if (unlikely(anon_vma_prepare(vma))) {
2103 mas_destroy(&mas);
2104 return -ENOMEM;
2105 }
2106
2107 /* Lock the VMA before expanding to prevent concurrent page faults */
2108 vma_start_write(vma);
2109 /*
2110 * vma->vm_start/vm_end cannot change under us because the caller
2111 * is required to hold the mmap_lock in read mode. We need the
2112 * anon_vma lock to serialize against concurrent expand_stacks.
2113 */
2114 anon_vma_lock_write(vma->anon_vma);
2115
2116 /* Somebody else might have raced and expanded it already */
2117 if (address < vma->vm_start) {
2118 unsigned long size, grow;
2119
2120 size = vma->vm_end - address;
2121 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2122
2123 error = -ENOMEM;
2124 if (grow <= vma->vm_pgoff) {
2125 error = acct_stack_growth(vma, size, grow);
2126 if (!error) {
2127 /*
2128 * We only hold a shared mmap_lock lock here, so
2129 * we need to protect against concurrent vma
2130 * expansions. anon_vma_lock_write() doesn't
2131 * help here, as we don't guarantee that all
2132 * growable vmas in a mm share the same root
2133 * anon vma. So, we reuse mm->page_table_lock
2134 * to guard against concurrent vma expansions.
2135 */
2136 spin_lock(&mm->page_table_lock);
2137 if (vma->vm_flags & VM_LOCKED)
2138 mm->locked_vm += grow;
2139 vm_stat_account(mm, vma->vm_flags, grow);
2140 anon_vma_interval_tree_pre_update_vma(vma);
2141 vma->vm_start = address;
2142 vma->vm_pgoff -= grow;
2143 /* Overwrite old entry in mtree. */
2144 mas_store_prealloc(&mas, vma);
2145 anon_vma_interval_tree_post_update_vma(vma);
2146 spin_unlock(&mm->page_table_lock);
2147
2148 perf_event_mmap(vma);
2149 }
2150 }
2151 }
2152 anon_vma_unlock_write(vma->anon_vma);
2153 khugepaged_enter_vma(vma, vma->vm_flags);
2154 mas_destroy(&mas);
2155 validate_mm(mm);
2156 return error;
2157}
2158
2159/* enforced gap between the expanding stack and other mappings. */
2160unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2161
2162static int __init cmdline_parse_stack_guard_gap(char *p)
2163{
2164 unsigned long val;
2165 char *endptr;
2166
2167 val = simple_strtoul(p, &endptr, 10);
2168 if (!*endptr)
2169 stack_guard_gap = val << PAGE_SHIFT;
2170
2171 return 1;
2172}
2173__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2174
2175#ifdef CONFIG_STACK_GROWSUP
2176int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2177{
2178 return expand_upwards(vma, address);
2179}
2180
2181struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2182{
2183 struct vm_area_struct *vma, *prev;
2184
2185 addr &= PAGE_MASK;
2186 vma = find_vma_prev(mm, addr, &prev);
2187 if (vma && (vma->vm_start <= addr))
2188 return vma;
2189 if (!prev)
2190 return NULL;
2191 if (expand_stack_locked(prev, addr))
2192 return NULL;
2193 if (prev->vm_flags & VM_LOCKED)
2194 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2195 return prev;
2196}
2197#else
2198int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2199{
2200 return expand_downwards(vma, address);
2201}
2202
2203struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2204{
2205 struct vm_area_struct *vma;
2206 unsigned long start;
2207
2208 addr &= PAGE_MASK;
2209 vma = find_vma(mm, addr);
2210 if (!vma)
2211 return NULL;
2212 if (vma->vm_start <= addr)
2213 return vma;
2214 start = vma->vm_start;
2215 if (expand_stack_locked(vma, addr))
2216 return NULL;
2217 if (vma->vm_flags & VM_LOCKED)
2218 populate_vma_page_range(vma, addr, start, NULL);
2219 return vma;
2220}
2221#endif
2222
2223#if defined(CONFIG_STACK_GROWSUP)
2224
2225#define vma_expand_up(vma,addr) expand_upwards(vma, addr)
2226#define vma_expand_down(vma, addr) (-EFAULT)
2227
2228#else
2229
2230#define vma_expand_up(vma,addr) (-EFAULT)
2231#define vma_expand_down(vma, addr) expand_downwards(vma, addr)
2232
2233#endif
2234
2235/*
2236 * expand_stack(): legacy interface for page faulting. Don't use unless
2237 * you have to.
2238 *
2239 * This is called with the mm locked for reading, drops the lock, takes
2240 * the lock for writing, tries to look up a vma again, expands it if
2241 * necessary, and downgrades the lock to reading again.
2242 *
2243 * If no vma is found or it can't be expanded, it returns NULL and has
2244 * dropped the lock.
2245 */
2246struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
2247{
2248 struct vm_area_struct *vma, *prev;
2249
2250 mmap_read_unlock(mm);
2251 if (mmap_write_lock_killable(mm))
2252 return NULL;
2253
2254 vma = find_vma_prev(mm, addr, &prev);
2255 if (vma && vma->vm_start <= addr)
2256 goto success;
2257
2258 if (prev && !vma_expand_up(prev, addr)) {
2259 vma = prev;
2260 goto success;
2261 }
2262
2263 if (vma && !vma_expand_down(vma, addr))
2264 goto success;
2265
2266 mmap_write_unlock(mm);
2267 return NULL;
2268
2269success:
2270 mmap_write_downgrade(mm);
2271 return vma;
2272}
2273
2274/*
2275 * Ok - we have the memory areas we should free on a maple tree so release them,
2276 * and do the vma updates.
2277 *
2278 * Called with the mm semaphore held.
2279 */
2280static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
2281{
2282 unsigned long nr_accounted = 0;
2283 struct vm_area_struct *vma;
2284
2285 /* Update high watermark before we lower total_vm */
2286 update_hiwater_vm(mm);
2287 mas_for_each(mas, vma, ULONG_MAX) {
2288 long nrpages = vma_pages(vma);
2289
2290 if (vma->vm_flags & VM_ACCOUNT)
2291 nr_accounted += nrpages;
2292 vm_stat_account(mm, vma->vm_flags, -nrpages);
2293 remove_vma(vma, false);
2294 }
2295 vm_unacct_memory(nr_accounted);
2296}
2297
2298/*
2299 * Get rid of page table information in the indicated region.
2300 *
2301 * Called with the mm semaphore held.
2302 */
2303static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
2304 struct vm_area_struct *vma, struct vm_area_struct *prev,
2305 struct vm_area_struct *next, unsigned long start,
2306 unsigned long end, unsigned long tree_end, bool mm_wr_locked)
2307{
2308 struct mmu_gather tlb;
2309 unsigned long mt_start = mas->index;
2310
2311 lru_add_drain();
2312 tlb_gather_mmu(&tlb, mm);
2313 update_hiwater_rss(mm);
2314 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2315 mas_set(mas, mt_start);
2316 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2317 next ? next->vm_start : USER_PGTABLES_CEILING,
2318 mm_wr_locked);
2319 tlb_finish_mmu(&tlb);
2320}
2321
2322/*
2323 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
2324 * has already been checked or doesn't make sense to fail.
2325 * VMA Iterator will point to the end VMA.
2326 */
2327static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2328 unsigned long addr, int new_below)
2329{
2330 struct vma_prepare vp;
2331 struct vm_area_struct *new;
2332 int err;
2333
2334 WARN_ON(vma->vm_start >= addr);
2335 WARN_ON(vma->vm_end <= addr);
2336
2337 if (vma->vm_ops && vma->vm_ops->may_split) {
2338 err = vma->vm_ops->may_split(vma, addr);
2339 if (err)
2340 return err;
2341 }
2342
2343 new = vm_area_dup(vma);
2344 if (!new)
2345 return -ENOMEM;
2346
2347 if (new_below) {
2348 new->vm_end = addr;
2349 } else {
2350 new->vm_start = addr;
2351 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2352 }
2353
2354 err = -ENOMEM;
2355 vma_iter_config(vmi, new->vm_start, new->vm_end);
2356 if (vma_iter_prealloc(vmi, new))
2357 goto out_free_vma;
2358
2359 err = vma_dup_policy(vma, new);
2360 if (err)
2361 goto out_free_vmi;
2362
2363 err = anon_vma_clone(new, vma);
2364 if (err)
2365 goto out_free_mpol;
2366
2367 if (new->vm_file)
2368 get_file(new->vm_file);
2369
2370 if (new->vm_ops && new->vm_ops->open)
2371 new->vm_ops->open(new);
2372
2373 vma_start_write(vma);
2374 vma_start_write(new);
2375
2376 init_vma_prep(&vp, vma);
2377 vp.insert = new;
2378 vma_prepare(&vp);
2379 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2380
2381 if (new_below) {
2382 vma->vm_start = addr;
2383 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2384 } else {
2385 vma->vm_end = addr;
2386 }
2387
2388 /* vma_complete stores the new vma */
2389 vma_complete(&vp, vmi, vma->vm_mm);
2390
2391 /* Success. */
2392 if (new_below)
2393 vma_next(vmi);
2394 return 0;
2395
2396out_free_mpol:
2397 mpol_put(vma_policy(new));
2398out_free_vmi:
2399 vma_iter_free(vmi);
2400out_free_vma:
2401 vm_area_free(new);
2402 return err;
2403}
2404
2405/*
2406 * Split a vma into two pieces at address 'addr', a new vma is allocated
2407 * either for the first part or the tail.
2408 */
2409static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2410 unsigned long addr, int new_below)
2411{
2412 if (vma->vm_mm->map_count >= sysctl_max_map_count)
2413 return -ENOMEM;
2414
2415 return __split_vma(vmi, vma, addr, new_below);
2416}
2417
2418/*
2419 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
2420 * context and anonymous VMA name within the range [start, end).
2421 *
2422 * As a result, we might be able to merge the newly modified VMA range with an
2423 * adjacent VMA with identical properties.
2424 *
2425 * If no merge is possible and the range does not span the entirety of the VMA,
2426 * we then need to split the VMA to accommodate the change.
2427 *
2428 * The function returns either the merged VMA, the original VMA if a split was
2429 * required instead, or an error if the split failed.
2430 */
2431struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
2432 struct vm_area_struct *prev,
2433 struct vm_area_struct *vma,
2434 unsigned long start, unsigned long end,
2435 unsigned long vm_flags,
2436 struct mempolicy *policy,
2437 struct vm_userfaultfd_ctx uffd_ctx,
2438 struct anon_vma_name *anon_name)
2439{
2440 pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
2441 struct vm_area_struct *merged;
2442
2443 merged = vma_merge(vmi, vma->vm_mm, prev, start, end, vm_flags,
2444 vma->anon_vma, vma->vm_file, pgoff, policy,
2445 uffd_ctx, anon_name);
2446 if (merged)
2447 return merged;
2448
2449 if (vma->vm_start < start) {
2450 int err = split_vma(vmi, vma, start, 1);
2451
2452 if (err)
2453 return ERR_PTR(err);
2454 }
2455
2456 if (vma->vm_end > end) {
2457 int err = split_vma(vmi, vma, end, 0);
2458
2459 if (err)
2460 return ERR_PTR(err);
2461 }
2462
2463 return vma;
2464}
2465
2466/*
2467 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller
2468 * must ensure that [start, end) does not overlap any existing VMA.
2469 */
2470static struct vm_area_struct
2471*vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
2472 struct vm_area_struct *vma, unsigned long start,
2473 unsigned long end, pgoff_t pgoff)
2474{
2475 return vma_merge(vmi, vma->vm_mm, prev, start, end, vma->vm_flags,
2476 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
2477 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
2478}
2479
2480/*
2481 * Expand vma by delta bytes, potentially merging with an immediately adjacent
2482 * VMA with identical properties.
2483 */
2484struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
2485 struct vm_area_struct *vma,
2486 unsigned long delta)
2487{
2488 pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma);
2489
2490 /* vma is specified as prev, so case 1 or 2 will apply. */
2491 return vma_merge(vmi, vma->vm_mm, vma, vma->vm_end, vma->vm_end + delta,
2492 vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff,
2493 vma_policy(vma), vma->vm_userfaultfd_ctx,
2494 anon_vma_name(vma));
2495}
2496
2497/*
2498 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2499 * @vmi: The vma iterator
2500 * @vma: The starting vm_area_struct
2501 * @mm: The mm_struct
2502 * @start: The aligned start address to munmap.
2503 * @end: The aligned end address to munmap.
2504 * @uf: The userfaultfd list_head
2505 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
2506 * success.
2507 *
2508 * Return: 0 on success and drops the lock if so directed, error and leaves the
2509 * lock held otherwise.
2510 */
2511static int
2512do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2513 struct mm_struct *mm, unsigned long start,
2514 unsigned long end, struct list_head *uf, bool unlock)
2515{
2516 struct vm_area_struct *prev, *next = NULL;
2517 struct maple_tree mt_detach;
2518 int count = 0;
2519 int error = -ENOMEM;
2520 unsigned long locked_vm = 0;
2521 MA_STATE(mas_detach, &mt_detach, 0, 0);
2522 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2523 mt_on_stack(mt_detach);
2524
2525 /*
2526 * If we need to split any vma, do it now to save pain later.
2527 *
2528 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2529 * unmapped vm_area_struct will remain in use: so lower split_vma
2530 * places tmp vma above, and higher split_vma places tmp vma below.
2531 */
2532
2533 /* Does it split the first one? */
2534 if (start > vma->vm_start) {
2535
2536 /*
2537 * Make sure that map_count on return from munmap() will
2538 * not exceed its limit; but let map_count go just above
2539 * its limit temporarily, to help free resources as expected.
2540 */
2541 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2542 goto map_count_exceeded;
2543
2544 error = __split_vma(vmi, vma, start, 1);
2545 if (error)
2546 goto start_split_failed;
2547 }
2548
2549 /*
2550 * Detach a range of VMAs from the mm. Using next as a temp variable as
2551 * it is always overwritten.
2552 */
2553 next = vma;
2554 do {
2555 /* Does it split the end? */
2556 if (next->vm_end > end) {
2557 error = __split_vma(vmi, next, end, 0);
2558 if (error)
2559 goto end_split_failed;
2560 }
2561 vma_start_write(next);
2562 mas_set(&mas_detach, count);
2563 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
2564 if (error)
2565 goto munmap_gather_failed;
2566 vma_mark_detached(next, true);
2567 if (next->vm_flags & VM_LOCKED)
2568 locked_vm += vma_pages(next);
2569
2570 count++;
2571 if (unlikely(uf)) {
2572 /*
2573 * If userfaultfd_unmap_prep returns an error the vmas
2574 * will remain split, but userland will get a
2575 * highly unexpected error anyway. This is no
2576 * different than the case where the first of the two
2577 * __split_vma fails, but we don't undo the first
2578 * split, despite we could. This is unlikely enough
2579 * failure that it's not worth optimizing it for.
2580 */
2581 error = userfaultfd_unmap_prep(next, start, end, uf);
2582
2583 if (error)
2584 goto userfaultfd_error;
2585 }
2586#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2587 BUG_ON(next->vm_start < start);
2588 BUG_ON(next->vm_start > end);
2589#endif
2590 } for_each_vma_range(*vmi, next, end);
2591
2592#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2593 /* Make sure no VMAs are about to be lost. */
2594 {
2595 MA_STATE(test, &mt_detach, 0, 0);
2596 struct vm_area_struct *vma_mas, *vma_test;
2597 int test_count = 0;
2598
2599 vma_iter_set(vmi, start);
2600 rcu_read_lock();
2601 vma_test = mas_find(&test, count - 1);
2602 for_each_vma_range(*vmi, vma_mas, end) {
2603 BUG_ON(vma_mas != vma_test);
2604 test_count++;
2605 vma_test = mas_next(&test, count - 1);
2606 }
2607 rcu_read_unlock();
2608 BUG_ON(count != test_count);
2609 }
2610#endif
2611
2612 while (vma_iter_addr(vmi) > start)
2613 vma_iter_prev_range(vmi);
2614
2615 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2616 if (error)
2617 goto clear_tree_failed;
2618
2619 /* Point of no return */
2620 mm->locked_vm -= locked_vm;
2621 mm->map_count -= count;
2622 if (unlock)
2623 mmap_write_downgrade(mm);
2624
2625 prev = vma_iter_prev_range(vmi);
2626 next = vma_next(vmi);
2627 if (next)
2628 vma_iter_prev_range(vmi);
2629
2630 /*
2631 * We can free page tables without write-locking mmap_lock because VMAs
2632 * were isolated before we downgraded mmap_lock.
2633 */
2634 mas_set(&mas_detach, 1);
2635 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2636 !unlock);
2637 /* Statistics and freeing VMAs */
2638 mas_set(&mas_detach, 0);
2639 remove_mt(mm, &mas_detach);
2640 validate_mm(mm);
2641 if (unlock)
2642 mmap_read_unlock(mm);
2643
2644 __mt_destroy(&mt_detach);
2645 return 0;
2646
2647clear_tree_failed:
2648userfaultfd_error:
2649munmap_gather_failed:
2650end_split_failed:
2651 mas_set(&mas_detach, 0);
2652 mas_for_each(&mas_detach, next, end)
2653 vma_mark_detached(next, false);
2654
2655 __mt_destroy(&mt_detach);
2656start_split_failed:
2657map_count_exceeded:
2658 validate_mm(mm);
2659 return error;
2660}
2661
2662/*
2663 * do_vmi_munmap() - munmap a given range.
2664 * @vmi: The vma iterator
2665 * @mm: The mm_struct
2666 * @start: The start address to munmap
2667 * @len: The length of the range to munmap
2668 * @uf: The userfaultfd list_head
2669 * @unlock: set to true if the user wants to drop the mmap_lock on success
2670 *
2671 * This function takes a @mas that is either pointing to the previous VMA or set
2672 * to MA_START and sets it up to remove the mapping(s). The @len will be
2673 * aligned and any arch_unmap work will be preformed.
2674 *
2675 * Return: 0 on success and drops the lock if so directed, error and leaves the
2676 * lock held otherwise.
2677 */
2678int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
2679 unsigned long start, size_t len, struct list_head *uf,
2680 bool unlock)
2681{
2682 unsigned long end;
2683 struct vm_area_struct *vma;
2684
2685 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2686 return -EINVAL;
2687
2688 end = start + PAGE_ALIGN(len);
2689 if (end == start)
2690 return -EINVAL;
2691
2692 /* arch_unmap() might do unmaps itself. */
2693 arch_unmap(mm, start, end);
2694
2695 /* Find the first overlapping VMA */
2696 vma = vma_find(vmi, end);
2697 if (!vma) {
2698 if (unlock)
2699 mmap_write_unlock(mm);
2700 return 0;
2701 }
2702
2703 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2704}
2705
2706/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
2707 * @mm: The mm_struct
2708 * @start: The start address to munmap
2709 * @len: The length to be munmapped.
2710 * @uf: The userfaultfd list_head
2711 *
2712 * Return: 0 on success, error otherwise.
2713 */
2714int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2715 struct list_head *uf)
2716{
2717 VMA_ITERATOR(vmi, mm, start);
2718
2719 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
2720}
2721
2722unsigned long mmap_region(struct file *file, unsigned long addr,
2723 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2724 struct list_head *uf)
2725{
2726 struct mm_struct *mm = current->mm;
2727 struct vm_area_struct *vma = NULL;
2728 struct vm_area_struct *next, *prev, *merge;
2729 pgoff_t pglen = len >> PAGE_SHIFT;
2730 unsigned long charged = 0;
2731 unsigned long end = addr + len;
2732 unsigned long merge_start = addr, merge_end = end;
2733 bool writable_file_mapping = false;
2734 pgoff_t vm_pgoff;
2735 int error;
2736 VMA_ITERATOR(vmi, mm, addr);
2737
2738 /* Check against address space limit. */
2739 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
2740 unsigned long nr_pages;
2741
2742 /*
2743 * MAP_FIXED may remove pages of mappings that intersects with
2744 * requested mapping. Account for the pages it would unmap.
2745 */
2746 nr_pages = count_vma_pages_range(mm, addr, end);
2747
2748 if (!may_expand_vm(mm, vm_flags,
2749 (len >> PAGE_SHIFT) - nr_pages))
2750 return -ENOMEM;
2751 }
2752
2753 /* Unmap any existing mapping in the area */
2754 if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
2755 return -ENOMEM;
2756
2757 /*
2758 * Private writable mapping: check memory availability
2759 */
2760 if (accountable_mapping(file, vm_flags)) {
2761 charged = len >> PAGE_SHIFT;
2762 if (security_vm_enough_memory_mm(mm, charged))
2763 return -ENOMEM;
2764 vm_flags |= VM_ACCOUNT;
2765 }
2766
2767 next = vma_next(&vmi);
2768 prev = vma_prev(&vmi);
2769 if (vm_flags & VM_SPECIAL) {
2770 if (prev)
2771 vma_iter_next_range(&vmi);
2772 goto cannot_expand;
2773 }
2774
2775 /* Attempt to expand an old mapping */
2776 /* Check next */
2777 if (next && next->vm_start == end && !vma_policy(next) &&
2778 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
2779 NULL_VM_UFFD_CTX, NULL)) {
2780 merge_end = next->vm_end;
2781 vma = next;
2782 vm_pgoff = next->vm_pgoff - pglen;
2783 }
2784
2785 /* Check prev */
2786 if (prev && prev->vm_end == addr && !vma_policy(prev) &&
2787 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2788 pgoff, vma->vm_userfaultfd_ctx, NULL) :
2789 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
2790 NULL_VM_UFFD_CTX, NULL))) {
2791 merge_start = prev->vm_start;
2792 vma = prev;
2793 vm_pgoff = prev->vm_pgoff;
2794 } else if (prev) {
2795 vma_iter_next_range(&vmi);
2796 }
2797
2798 /* Actually expand, if possible */
2799 if (vma &&
2800 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2801 khugepaged_enter_vma(vma, vm_flags);
2802 goto expanded;
2803 }
2804
2805 if (vma == prev)
2806 vma_iter_set(&vmi, addr);
2807cannot_expand:
2808
2809 /*
2810 * Determine the object being mapped and call the appropriate
2811 * specific mapper. the address has already been validated, but
2812 * not unmapped, but the maps are removed from the list.
2813 */
2814 vma = vm_area_alloc(mm);
2815 if (!vma) {
2816 error = -ENOMEM;
2817 goto unacct_error;
2818 }
2819
2820 vma_iter_config(&vmi, addr, end);
2821 vma->vm_start = addr;
2822 vma->vm_end = end;
2823 vm_flags_init(vma, vm_flags);
2824 vma->vm_page_prot = vm_get_page_prot(vm_flags);
2825 vma->vm_pgoff = pgoff;
2826
2827 if (file) {
2828 vma->vm_file = get_file(file);
2829 error = call_mmap(file, vma);
2830 if (error)
2831 goto unmap_and_free_vma;
2832
2833 if (vma_is_shared_maywrite(vma)) {
2834 error = mapping_map_writable(file->f_mapping);
2835 if (error)
2836 goto close_and_free_vma;
2837
2838 writable_file_mapping = true;
2839 }
2840
2841 /*
2842 * Expansion is handled above, merging is handled below.
2843 * Drivers should not alter the address of the VMA.
2844 */
2845 error = -EINVAL;
2846 if (WARN_ON((addr != vma->vm_start)))
2847 goto close_and_free_vma;
2848
2849 vma_iter_config(&vmi, addr, end);
2850 /*
2851 * If vm_flags changed after call_mmap(), we should try merge
2852 * vma again as we may succeed this time.
2853 */
2854 if (unlikely(vm_flags != vma->vm_flags && prev)) {
2855 merge = vma_merge_new_vma(&vmi, prev, vma,
2856 vma->vm_start, vma->vm_end,
2857 vma->vm_pgoff);
2858 if (merge) {
2859 /*
2860 * ->mmap() can change vma->vm_file and fput
2861 * the original file. So fput the vma->vm_file
2862 * here or we would add an extra fput for file
2863 * and cause general protection fault
2864 * ultimately.
2865 */
2866 fput(vma->vm_file);
2867 vm_area_free(vma);
2868 vma = merge;
2869 /* Update vm_flags to pick up the change. */
2870 vm_flags = vma->vm_flags;
2871 goto unmap_writable;
2872 }
2873 }
2874
2875 vm_flags = vma->vm_flags;
2876 } else if (vm_flags & VM_SHARED) {
2877 error = shmem_zero_setup(vma);
2878 if (error)
2879 goto free_vma;
2880 } else {
2881 vma_set_anonymous(vma);
2882 }
2883
2884 if (map_deny_write_exec(vma, vma->vm_flags)) {
2885 error = -EACCES;
2886 goto close_and_free_vma;
2887 }
2888
2889 /* Allow architectures to sanity-check the vm_flags */
2890 error = -EINVAL;
2891 if (!arch_validate_flags(vma->vm_flags))
2892 goto close_and_free_vma;
2893
2894 error = -ENOMEM;
2895 if (vma_iter_prealloc(&vmi, vma))
2896 goto close_and_free_vma;
2897
2898 /* Lock the VMA since it is modified after insertion into VMA tree */
2899 vma_start_write(vma);
2900 vma_iter_store(&vmi, vma);
2901 mm->map_count++;
2902 if (vma->vm_file) {
2903 i_mmap_lock_write(vma->vm_file->f_mapping);
2904 if (vma_is_shared_maywrite(vma))
2905 mapping_allow_writable(vma->vm_file->f_mapping);
2906
2907 flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2908 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2909 flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2910 i_mmap_unlock_write(vma->vm_file->f_mapping);
2911 }
2912
2913 /*
2914 * vma_merge() calls khugepaged_enter_vma() either, the below
2915 * call covers the non-merge case.
2916 */
2917 khugepaged_enter_vma(vma, vma->vm_flags);
2918
2919 /* Once vma denies write, undo our temporary denial count */
2920unmap_writable:
2921 if (writable_file_mapping)
2922 mapping_unmap_writable(file->f_mapping);
2923 file = vma->vm_file;
2924 ksm_add_vma(vma);
2925expanded:
2926 perf_event_mmap(vma);
2927
2928 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
2929 if (vm_flags & VM_LOCKED) {
2930 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2931 is_vm_hugetlb_page(vma) ||
2932 vma == get_gate_vma(current->mm))
2933 vm_flags_clear(vma, VM_LOCKED_MASK);
2934 else
2935 mm->locked_vm += (len >> PAGE_SHIFT);
2936 }
2937
2938 if (file)
2939 uprobe_mmap(vma);
2940
2941 /*
2942 * New (or expanded) vma always get soft dirty status.
2943 * Otherwise user-space soft-dirty page tracker won't
2944 * be able to distinguish situation when vma area unmapped,
2945 * then new mapped in-place (which must be aimed as
2946 * a completely new data area).
2947 */
2948 vm_flags_set(vma, VM_SOFTDIRTY);
2949
2950 vma_set_page_prot(vma);
2951
2952 validate_mm(mm);
2953 return addr;
2954
2955close_and_free_vma:
2956 if (file && vma->vm_ops && vma->vm_ops->close)
2957 vma->vm_ops->close(vma);
2958
2959 if (file || vma->vm_file) {
2960unmap_and_free_vma:
2961 fput(vma->vm_file);
2962 vma->vm_file = NULL;
2963
2964 vma_iter_set(&vmi, vma->vm_end);
2965 /* Undo any partial mapping done by a device driver. */
2966 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
2967 vma->vm_end, vma->vm_end, true);
2968 }
2969 if (writable_file_mapping)
2970 mapping_unmap_writable(file->f_mapping);
2971free_vma:
2972 vm_area_free(vma);
2973unacct_error:
2974 if (charged)
2975 vm_unacct_memory(charged);
2976 validate_mm(mm);
2977 return error;
2978}
2979
2980static int __vm_munmap(unsigned long start, size_t len, bool unlock)
2981{
2982 int ret;
2983 struct mm_struct *mm = current->mm;
2984 LIST_HEAD(uf);
2985 VMA_ITERATOR(vmi, mm, start);
2986
2987 if (mmap_write_lock_killable(mm))
2988 return -EINTR;
2989
2990 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2991 if (ret || !unlock)
2992 mmap_write_unlock(mm);
2993
2994 userfaultfd_unmap_complete(mm, &uf);
2995 return ret;
2996}
2997
2998int vm_munmap(unsigned long start, size_t len)
2999{
3000 return __vm_munmap(start, len, false);
3001}
3002EXPORT_SYMBOL(vm_munmap);
3003
3004SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
3005{
3006 addr = untagged_addr(addr);
3007 return __vm_munmap(addr, len, true);
3008}
3009
3010
3011/*
3012 * Emulation of deprecated remap_file_pages() syscall.
3013 */
3014SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
3015 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
3016{
3017
3018 struct mm_struct *mm = current->mm;
3019 struct vm_area_struct *vma;
3020 unsigned long populate = 0;
3021 unsigned long ret = -EINVAL;
3022 struct file *file;
3023
3024 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
3025 current->comm, current->pid);
3026
3027 if (prot)
3028 return ret;
3029 start = start & PAGE_MASK;
3030 size = size & PAGE_MASK;
3031
3032 if (start + size <= start)
3033 return ret;
3034
3035 /* Does pgoff wrap? */
3036 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
3037 return ret;
3038
3039 if (mmap_write_lock_killable(mm))
3040 return -EINTR;
3041
3042 vma = vma_lookup(mm, start);
3043
3044 if (!vma || !(vma->vm_flags & VM_SHARED))
3045 goto out;
3046
3047 if (start + size > vma->vm_end) {
3048 VMA_ITERATOR(vmi, mm, vma->vm_end);
3049 struct vm_area_struct *next, *prev = vma;
3050
3051 for_each_vma_range(vmi, next, start + size) {
3052 /* hole between vmas ? */
3053 if (next->vm_start != prev->vm_end)
3054 goto out;
3055
3056 if (next->vm_file != vma->vm_file)
3057 goto out;
3058
3059 if (next->vm_flags != vma->vm_flags)
3060 goto out;
3061
3062 if (start + size <= next->vm_end)
3063 break;
3064
3065 prev = next;
3066 }
3067
3068 if (!next)
3069 goto out;
3070 }
3071
3072 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
3073 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
3074 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
3075
3076 flags &= MAP_NONBLOCK;
3077 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
3078 if (vma->vm_flags & VM_LOCKED)
3079 flags |= MAP_LOCKED;
3080
3081 file = get_file(vma->vm_file);
3082 ret = do_mmap(vma->vm_file, start, size,
3083 prot, flags, 0, pgoff, &populate, NULL);
3084 fput(file);
3085out:
3086 mmap_write_unlock(mm);
3087 if (populate)
3088 mm_populate(ret, populate);
3089 if (!IS_ERR_VALUE(ret))
3090 ret = 0;
3091 return ret;
3092}
3093
3094/*
3095 * do_vma_munmap() - Unmap a full or partial vma.
3096 * @vmi: The vma iterator pointing at the vma
3097 * @vma: The first vma to be munmapped
3098 * @start: the start of the address to unmap
3099 * @end: The end of the address to unmap
3100 * @uf: The userfaultfd list_head
3101 * @unlock: Drop the lock on success
3102 *
3103 * unmaps a VMA mapping when the vma iterator is already in position.
3104 * Does not handle alignment.
3105 *
3106 * Return: 0 on success drops the lock of so directed, error on failure and will
3107 * still hold the lock.
3108 */
3109int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3110 unsigned long start, unsigned long end, struct list_head *uf,
3111 bool unlock)
3112{
3113 struct mm_struct *mm = vma->vm_mm;
3114
3115 arch_unmap(mm, start, end);
3116 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3117}
3118
3119/*
3120 * do_brk_flags() - Increase the brk vma if the flags match.
3121 * @vmi: The vma iterator
3122 * @addr: The start address
3123 * @len: The length of the increase
3124 * @vma: The vma,
3125 * @flags: The VMA Flags
3126 *
3127 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
3128 * do not match then create a new anonymous VMA. Eventually we may be able to
3129 * do some brk-specific accounting here.
3130 */
3131static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3132 unsigned long addr, unsigned long len, unsigned long flags)
3133{
3134 struct mm_struct *mm = current->mm;
3135 struct vma_prepare vp;
3136
3137 /*
3138 * Check against address space limits by the changed size
3139 * Note: This happens *after* clearing old mappings in some code paths.
3140 */
3141 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3142 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3143 return -ENOMEM;
3144
3145 if (mm->map_count > sysctl_max_map_count)
3146 return -ENOMEM;
3147
3148 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3149 return -ENOMEM;
3150
3151 /*
3152 * Expand the existing vma if possible; Note that singular lists do not
3153 * occur after forking, so the expand will only happen on new VMAs.
3154 */
3155 if (vma && vma->vm_end == addr && !vma_policy(vma) &&
3156 can_vma_merge_after(vma, flags, NULL, NULL,
3157 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
3158 vma_iter_config(vmi, vma->vm_start, addr + len);
3159 if (vma_iter_prealloc(vmi, vma))
3160 goto unacct_fail;
3161
3162 vma_start_write(vma);
3163
3164 init_vma_prep(&vp, vma);
3165 vma_prepare(&vp);
3166 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
3167 vma->vm_end = addr + len;
3168 vm_flags_set(vma, VM_SOFTDIRTY);
3169 vma_iter_store(vmi, vma);
3170
3171 vma_complete(&vp, vmi, mm);
3172 khugepaged_enter_vma(vma, flags);
3173 goto out;
3174 }
3175
3176 if (vma)
3177 vma_iter_next_range(vmi);
3178 /* create a vma struct for an anonymous mapping */
3179 vma = vm_area_alloc(mm);
3180 if (!vma)
3181 goto unacct_fail;
3182
3183 vma_set_anonymous(vma);
3184 vma->vm_start = addr;
3185 vma->vm_end = addr + len;
3186 vma->vm_pgoff = addr >> PAGE_SHIFT;
3187 vm_flags_init(vma, flags);
3188 vma->vm_page_prot = vm_get_page_prot(flags);
3189 vma_start_write(vma);
3190 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
3191 goto mas_store_fail;
3192
3193 mm->map_count++;
3194 validate_mm(mm);
3195 ksm_add_vma(vma);
3196out:
3197 perf_event_mmap(vma);
3198 mm->total_vm += len >> PAGE_SHIFT;
3199 mm->data_vm += len >> PAGE_SHIFT;
3200 if (flags & VM_LOCKED)
3201 mm->locked_vm += (len >> PAGE_SHIFT);
3202 vm_flags_set(vma, VM_SOFTDIRTY);
3203 return 0;
3204
3205mas_store_fail:
3206 vm_area_free(vma);
3207unacct_fail:
3208 vm_unacct_memory(len >> PAGE_SHIFT);
3209 return -ENOMEM;
3210}
3211
3212int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3213{
3214 struct mm_struct *mm = current->mm;
3215 struct vm_area_struct *vma = NULL;
3216 unsigned long len;
3217 int ret;
3218 bool populate;
3219 LIST_HEAD(uf);
3220 VMA_ITERATOR(vmi, mm, addr);
3221
3222 len = PAGE_ALIGN(request);
3223 if (len < request)
3224 return -ENOMEM;
3225 if (!len)
3226 return 0;
3227
3228 /* Until we need other flags, refuse anything except VM_EXEC. */
3229 if ((flags & (~VM_EXEC)) != 0)
3230 return -EINVAL;
3231
3232 if (mmap_write_lock_killable(mm))
3233 return -EINTR;
3234
3235 ret = check_brk_limits(addr, len);
3236 if (ret)
3237 goto limits_failed;
3238
3239 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
3240 if (ret)
3241 goto munmap_failed;
3242
3243 vma = vma_prev(&vmi);
3244 ret = do_brk_flags(&vmi, vma, addr, len, flags);
3245 populate = ((mm->def_flags & VM_LOCKED) != 0);
3246 mmap_write_unlock(mm);
3247 userfaultfd_unmap_complete(mm, &uf);
3248 if (populate && !ret)
3249 mm_populate(addr, len);
3250 return ret;
3251
3252munmap_failed:
3253limits_failed:
3254 mmap_write_unlock(mm);
3255 return ret;
3256}
3257EXPORT_SYMBOL(vm_brk_flags);
3258
3259/* Release all mmaps. */
3260void exit_mmap(struct mm_struct *mm)
3261{
3262 struct mmu_gather tlb;
3263 struct vm_area_struct *vma;
3264 unsigned long nr_accounted = 0;
3265 MA_STATE(mas, &mm->mm_mt, 0, 0);
3266 int count = 0;
3267
3268 /* mm's last user has gone, and its about to be pulled down */
3269 mmu_notifier_release(mm);
3270
3271 mmap_read_lock(mm);
3272 arch_exit_mmap(mm);
3273
3274 vma = mas_find(&mas, ULONG_MAX);
3275 if (!vma || unlikely(xa_is_zero(vma))) {
3276 /* Can happen if dup_mmap() received an OOM */
3277 mmap_read_unlock(mm);
3278 mmap_write_lock(mm);
3279 goto destroy;
3280 }
3281
3282 lru_add_drain();
3283 flush_cache_mm(mm);
3284 tlb_gather_mmu_fullmm(&tlb, mm);
3285 /* update_hiwater_rss(mm) here? but nobody should be looking */
3286 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
3287 unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
3288 mmap_read_unlock(mm);
3289
3290 /*
3291 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
3292 * because the memory has been already freed.
3293 */
3294 set_bit(MMF_OOM_SKIP, &mm->flags);
3295 mmap_write_lock(mm);
3296 mt_clear_in_rcu(&mm->mm_mt);
3297 mas_set(&mas, vma->vm_end);
3298 free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS,
3299 USER_PGTABLES_CEILING, true);
3300 tlb_finish_mmu(&tlb);
3301
3302 /*
3303 * Walk the list again, actually closing and freeing it, with preemption
3304 * enabled, without holding any MM locks besides the unreachable
3305 * mmap_write_lock.
3306 */
3307 mas_set(&mas, vma->vm_end);
3308 do {
3309 if (vma->vm_flags & VM_ACCOUNT)
3310 nr_accounted += vma_pages(vma);
3311 remove_vma(vma, true);
3312 count++;
3313 cond_resched();
3314 vma = mas_find(&mas, ULONG_MAX);
3315 } while (vma && likely(!xa_is_zero(vma)));
3316
3317 BUG_ON(count != mm->map_count);
3318
3319 trace_exit_mmap(mm);
3320destroy:
3321 __mt_destroy(&mm->mm_mt);
3322 mmap_write_unlock(mm);
3323 vm_unacct_memory(nr_accounted);
3324}
3325
3326/* Insert vm structure into process list sorted by address
3327 * and into the inode's i_mmap tree. If vm_file is non-NULL
3328 * then i_mmap_rwsem is taken here.
3329 */
3330int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3331{
3332 unsigned long charged = vma_pages(vma);
3333
3334
3335 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3336 return -ENOMEM;
3337
3338 if ((vma->vm_flags & VM_ACCOUNT) &&
3339 security_vm_enough_memory_mm(mm, charged))
3340 return -ENOMEM;
3341
3342 /*
3343 * The vm_pgoff of a purely anonymous vma should be irrelevant
3344 * until its first write fault, when page's anon_vma and index
3345 * are set. But now set the vm_pgoff it will almost certainly
3346 * end up with (unless mremap moves it elsewhere before that
3347 * first wfault), so /proc/pid/maps tells a consistent story.
3348 *
3349 * By setting it to reflect the virtual start address of the
3350 * vma, merges and splits can happen in a seamless way, just
3351 * using the existing file pgoff checks and manipulations.
3352 * Similarly in do_mmap and in do_brk_flags.
3353 */
3354 if (vma_is_anonymous(vma)) {
3355 BUG_ON(vma->anon_vma);
3356 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3357 }
3358
3359 if (vma_link(mm, vma)) {
3360 if (vma->vm_flags & VM_ACCOUNT)
3361 vm_unacct_memory(charged);
3362 return -ENOMEM;
3363 }
3364
3365 return 0;
3366}
3367
3368/*
3369 * Copy the vma structure to a new location in the same mm,
3370 * prior to moving page table entries, to effect an mremap move.
3371 */
3372struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3373 unsigned long addr, unsigned long len, pgoff_t pgoff,
3374 bool *need_rmap_locks)
3375{
3376 struct vm_area_struct *vma = *vmap;
3377 unsigned long vma_start = vma->vm_start;
3378 struct mm_struct *mm = vma->vm_mm;
3379 struct vm_area_struct *new_vma, *prev;
3380 bool faulted_in_anon_vma = true;
3381 VMA_ITERATOR(vmi, mm, addr);
3382
3383 /*
3384 * If anonymous vma has not yet been faulted, update new pgoff
3385 * to match new location, to increase its chance of merging.
3386 */
3387 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3388 pgoff = addr >> PAGE_SHIFT;
3389 faulted_in_anon_vma = false;
3390 }
3391
3392 new_vma = find_vma_prev(mm, addr, &prev);
3393 if (new_vma && new_vma->vm_start < addr + len)
3394 return NULL; /* should never get here */
3395
3396 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff);
3397 if (new_vma) {
3398 /*
3399 * Source vma may have been merged into new_vma
3400 */
3401 if (unlikely(vma_start >= new_vma->vm_start &&
3402 vma_start < new_vma->vm_end)) {
3403 /*
3404 * The only way we can get a vma_merge with
3405 * self during an mremap is if the vma hasn't
3406 * been faulted in yet and we were allowed to
3407 * reset the dst vma->vm_pgoff to the
3408 * destination address of the mremap to allow
3409 * the merge to happen. mremap must change the
3410 * vm_pgoff linearity between src and dst vmas
3411 * (in turn preventing a vma_merge) to be
3412 * safe. It is only safe to keep the vm_pgoff
3413 * linear if there are no pages mapped yet.
3414 */
3415 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3416 *vmap = vma = new_vma;
3417 }
3418 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3419 } else {
3420 new_vma = vm_area_dup(vma);
3421 if (!new_vma)
3422 goto out;
3423 new_vma->vm_start = addr;
3424 new_vma->vm_end = addr + len;
3425 new_vma->vm_pgoff = pgoff;
3426 if (vma_dup_policy(vma, new_vma))
3427 goto out_free_vma;
3428 if (anon_vma_clone(new_vma, vma))
3429 goto out_free_mempol;
3430 if (new_vma->vm_file)
3431 get_file(new_vma->vm_file);
3432 if (new_vma->vm_ops && new_vma->vm_ops->open)
3433 new_vma->vm_ops->open(new_vma);
3434 if (vma_link(mm, new_vma))
3435 goto out_vma_link;
3436 *need_rmap_locks = false;
3437 }
3438 return new_vma;
3439
3440out_vma_link:
3441 if (new_vma->vm_ops && new_vma->vm_ops->close)
3442 new_vma->vm_ops->close(new_vma);
3443
3444 if (new_vma->vm_file)
3445 fput(new_vma->vm_file);
3446
3447 unlink_anon_vmas(new_vma);
3448out_free_mempol:
3449 mpol_put(vma_policy(new_vma));
3450out_free_vma:
3451 vm_area_free(new_vma);
3452out:
3453 return NULL;
3454}
3455
3456/*
3457 * Return true if the calling process may expand its vm space by the passed
3458 * number of pages
3459 */
3460bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3461{
3462 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3463 return false;
3464
3465 if (is_data_mapping(flags) &&
3466 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3467 /* Workaround for Valgrind */
3468 if (rlimit(RLIMIT_DATA) == 0 &&
3469 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3470 return true;
3471
3472 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3473 current->comm, current->pid,
3474 (mm->data_vm + npages) << PAGE_SHIFT,
3475 rlimit(RLIMIT_DATA),
3476 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3477
3478 if (!ignore_rlimit_data)
3479 return false;
3480 }
3481
3482 return true;
3483}
3484
3485void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3486{
3487 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
3488
3489 if (is_exec_mapping(flags))
3490 mm->exec_vm += npages;
3491 else if (is_stack_mapping(flags))
3492 mm->stack_vm += npages;
3493 else if (is_data_mapping(flags))
3494 mm->data_vm += npages;
3495}
3496
3497static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3498
3499/*
3500 * Having a close hook prevents vma merging regardless of flags.
3501 */
3502static void special_mapping_close(struct vm_area_struct *vma)
3503{
3504}
3505
3506static const char *special_mapping_name(struct vm_area_struct *vma)
3507{
3508 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3509}
3510
3511static int special_mapping_mremap(struct vm_area_struct *new_vma)
3512{
3513 struct vm_special_mapping *sm = new_vma->vm_private_data;
3514
3515 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3516 return -EFAULT;
3517
3518 if (sm->mremap)
3519 return sm->mremap(sm, new_vma);
3520
3521 return 0;
3522}
3523
3524static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3525{
3526 /*
3527 * Forbid splitting special mappings - kernel has expectations over
3528 * the number of pages in mapping. Together with VM_DONTEXPAND
3529 * the size of vma should stay the same over the special mapping's
3530 * lifetime.
3531 */
3532 return -EINVAL;
3533}
3534
3535static const struct vm_operations_struct special_mapping_vmops = {
3536 .close = special_mapping_close,
3537 .fault = special_mapping_fault,
3538 .mremap = special_mapping_mremap,
3539 .name = special_mapping_name,
3540 /* vDSO code relies that VVAR can't be accessed remotely */
3541 .access = NULL,
3542 .may_split = special_mapping_split,
3543};
3544
3545static const struct vm_operations_struct legacy_special_mapping_vmops = {
3546 .close = special_mapping_close,
3547 .fault = special_mapping_fault,
3548};
3549
3550static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3551{
3552 struct vm_area_struct *vma = vmf->vma;
3553 pgoff_t pgoff;
3554 struct page **pages;
3555
3556 if (vma->vm_ops == &legacy_special_mapping_vmops) {
3557 pages = vma->vm_private_data;
3558 } else {
3559 struct vm_special_mapping *sm = vma->vm_private_data;
3560
3561 if (sm->fault)
3562 return sm->fault(sm, vmf->vma, vmf);
3563
3564 pages = sm->pages;
3565 }
3566
3567 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3568 pgoff--;
3569
3570 if (*pages) {
3571 struct page *page = *pages;
3572 get_page(page);
3573 vmf->page = page;
3574 return 0;
3575 }
3576
3577 return VM_FAULT_SIGBUS;
3578}
3579
3580static struct vm_area_struct *__install_special_mapping(
3581 struct mm_struct *mm,
3582 unsigned long addr, unsigned long len,
3583 unsigned long vm_flags, void *priv,
3584 const struct vm_operations_struct *ops)
3585{
3586 int ret;
3587 struct vm_area_struct *vma;
3588
3589 vma = vm_area_alloc(mm);
3590 if (unlikely(vma == NULL))
3591 return ERR_PTR(-ENOMEM);
3592
3593 vma->vm_start = addr;
3594 vma->vm_end = addr + len;
3595
3596 vm_flags_init(vma, (vm_flags | mm->def_flags |
3597 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
3598 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3599
3600 vma->vm_ops = ops;
3601 vma->vm_private_data = priv;
3602
3603 ret = insert_vm_struct(mm, vma);
3604 if (ret)
3605 goto out;
3606
3607 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3608
3609 perf_event_mmap(vma);
3610
3611 return vma;
3612
3613out:
3614 vm_area_free(vma);
3615 return ERR_PTR(ret);
3616}
3617
3618bool vma_is_special_mapping(const struct vm_area_struct *vma,
3619 const struct vm_special_mapping *sm)
3620{
3621 return vma->vm_private_data == sm &&
3622 (vma->vm_ops == &special_mapping_vmops ||
3623 vma->vm_ops == &legacy_special_mapping_vmops);
3624}
3625
3626/*
3627 * Called with mm->mmap_lock held for writing.
3628 * Insert a new vma covering the given region, with the given flags.
3629 * Its pages are supplied by the given array of struct page *.
3630 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3631 * The region past the last page supplied will always produce SIGBUS.
3632 * The array pointer and the pages it points to are assumed to stay alive
3633 * for as long as this mapping might exist.
3634 */
3635struct vm_area_struct *_install_special_mapping(
3636 struct mm_struct *mm,
3637 unsigned long addr, unsigned long len,
3638 unsigned long vm_flags, const struct vm_special_mapping *spec)
3639{
3640 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3641 &special_mapping_vmops);
3642}
3643
3644int install_special_mapping(struct mm_struct *mm,
3645 unsigned long addr, unsigned long len,
3646 unsigned long vm_flags, struct page **pages)
3647{
3648 struct vm_area_struct *vma = __install_special_mapping(
3649 mm, addr, len, vm_flags, (void *)pages,
3650 &legacy_special_mapping_vmops);
3651
3652 return PTR_ERR_OR_ZERO(vma);
3653}
3654
3655static DEFINE_MUTEX(mm_all_locks_mutex);
3656
3657static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3658{
3659 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3660 /*
3661 * The LSB of head.next can't change from under us
3662 * because we hold the mm_all_locks_mutex.
3663 */
3664 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3665 /*
3666 * We can safely modify head.next after taking the
3667 * anon_vma->root->rwsem. If some other vma in this mm shares
3668 * the same anon_vma we won't take it again.
3669 *
3670 * No need of atomic instructions here, head.next
3671 * can't change from under us thanks to the
3672 * anon_vma->root->rwsem.
3673 */
3674 if (__test_and_set_bit(0, (unsigned long *)
3675 &anon_vma->root->rb_root.rb_root.rb_node))
3676 BUG();
3677 }
3678}
3679
3680static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3681{
3682 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3683 /*
3684 * AS_MM_ALL_LOCKS can't change from under us because
3685 * we hold the mm_all_locks_mutex.
3686 *
3687 * Operations on ->flags have to be atomic because
3688 * even if AS_MM_ALL_LOCKS is stable thanks to the
3689 * mm_all_locks_mutex, there may be other cpus
3690 * changing other bitflags in parallel to us.
3691 */
3692 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3693 BUG();
3694 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3695 }
3696}
3697
3698/*
3699 * This operation locks against the VM for all pte/vma/mm related
3700 * operations that could ever happen on a certain mm. This includes
3701 * vmtruncate, try_to_unmap, and all page faults.
3702 *
3703 * The caller must take the mmap_lock in write mode before calling
3704 * mm_take_all_locks(). The caller isn't allowed to release the
3705 * mmap_lock until mm_drop_all_locks() returns.
3706 *
3707 * mmap_lock in write mode is required in order to block all operations
3708 * that could modify pagetables and free pages without need of
3709 * altering the vma layout. It's also needed in write mode to avoid new
3710 * anon_vmas to be associated with existing vmas.
3711 *
3712 * A single task can't take more than one mm_take_all_locks() in a row
3713 * or it would deadlock.
3714 *
3715 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3716 * mapping->flags avoid to take the same lock twice, if more than one
3717 * vma in this mm is backed by the same anon_vma or address_space.
3718 *
3719 * We take locks in following order, accordingly to comment at beginning
3720 * of mm/rmap.c:
3721 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3722 * hugetlb mapping);
3723 * - all vmas marked locked
3724 * - all i_mmap_rwsem locks;
3725 * - all anon_vma->rwseml
3726 *
3727 * We can take all locks within these types randomly because the VM code
3728 * doesn't nest them and we protected from parallel mm_take_all_locks() by
3729 * mm_all_locks_mutex.
3730 *
3731 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3732 * that may have to take thousand of locks.
3733 *
3734 * mm_take_all_locks() can fail if it's interrupted by signals.
3735 */
3736int mm_take_all_locks(struct mm_struct *mm)
3737{
3738 struct vm_area_struct *vma;
3739 struct anon_vma_chain *avc;
3740 MA_STATE(mas, &mm->mm_mt, 0, 0);
3741
3742 mmap_assert_write_locked(mm);
3743
3744 mutex_lock(&mm_all_locks_mutex);
3745
3746 /*
3747 * vma_start_write() does not have a complement in mm_drop_all_locks()
3748 * because vma_start_write() is always asymmetrical; it marks a VMA as
3749 * being written to until mmap_write_unlock() or mmap_write_downgrade()
3750 * is reached.
3751 */
3752 mas_for_each(&mas, vma, ULONG_MAX) {
3753 if (signal_pending(current))
3754 goto out_unlock;
3755 vma_start_write(vma);
3756 }
3757
3758 mas_set(&mas, 0);
3759 mas_for_each(&mas, vma, ULONG_MAX) {
3760 if (signal_pending(current))
3761 goto out_unlock;
3762 if (vma->vm_file && vma->vm_file->f_mapping &&
3763 is_vm_hugetlb_page(vma))
3764 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3765 }
3766
3767 mas_set(&mas, 0);
3768 mas_for_each(&mas, vma, ULONG_MAX) {
3769 if (signal_pending(current))
3770 goto out_unlock;
3771 if (vma->vm_file && vma->vm_file->f_mapping &&
3772 !is_vm_hugetlb_page(vma))
3773 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3774 }
3775
3776 mas_set(&mas, 0);
3777 mas_for_each(&mas, vma, ULONG_MAX) {
3778 if (signal_pending(current))
3779 goto out_unlock;
3780 if (vma->anon_vma)
3781 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3782 vm_lock_anon_vma(mm, avc->anon_vma);
3783 }
3784
3785 return 0;
3786
3787out_unlock:
3788 mm_drop_all_locks(mm);
3789 return -EINTR;
3790}
3791
3792static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3793{
3794 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3795 /*
3796 * The LSB of head.next can't change to 0 from under
3797 * us because we hold the mm_all_locks_mutex.
3798 *
3799 * We must however clear the bitflag before unlocking
3800 * the vma so the users using the anon_vma->rb_root will
3801 * never see our bitflag.
3802 *
3803 * No need of atomic instructions here, head.next
3804 * can't change from under us until we release the
3805 * anon_vma->root->rwsem.
3806 */
3807 if (!__test_and_clear_bit(0, (unsigned long *)
3808 &anon_vma->root->rb_root.rb_root.rb_node))
3809 BUG();
3810 anon_vma_unlock_write(anon_vma);
3811 }
3812}
3813
3814static void vm_unlock_mapping(struct address_space *mapping)
3815{
3816 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3817 /*
3818 * AS_MM_ALL_LOCKS can't change to 0 from under us
3819 * because we hold the mm_all_locks_mutex.
3820 */
3821 i_mmap_unlock_write(mapping);
3822 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3823 &mapping->flags))
3824 BUG();
3825 }
3826}
3827
3828/*
3829 * The mmap_lock cannot be released by the caller until
3830 * mm_drop_all_locks() returns.
3831 */
3832void mm_drop_all_locks(struct mm_struct *mm)
3833{
3834 struct vm_area_struct *vma;
3835 struct anon_vma_chain *avc;
3836 MA_STATE(mas, &mm->mm_mt, 0, 0);
3837
3838 mmap_assert_write_locked(mm);
3839 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3840
3841 mas_for_each(&mas, vma, ULONG_MAX) {
3842 if (vma->anon_vma)
3843 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3844 vm_unlock_anon_vma(avc->anon_vma);
3845 if (vma->vm_file && vma->vm_file->f_mapping)
3846 vm_unlock_mapping(vma->vm_file->f_mapping);
3847 }
3848
3849 mutex_unlock(&mm_all_locks_mutex);
3850}
3851
3852/*
3853 * initialise the percpu counter for VM
3854 */
3855void __init mmap_init(void)
3856{
3857 int ret;
3858
3859 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3860 VM_BUG_ON(ret);
3861}
3862
3863/*
3864 * Initialise sysctl_user_reserve_kbytes.
3865 *
3866 * This is intended to prevent a user from starting a single memory hogging
3867 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3868 * mode.
3869 *
3870 * The default value is min(3% of free memory, 128MB)
3871 * 128MB is enough to recover with sshd/login, bash, and top/kill.
3872 */
3873static int init_user_reserve(void)
3874{
3875 unsigned long free_kbytes;
3876
3877 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3878
3879 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3880 return 0;
3881}
3882subsys_initcall(init_user_reserve);
3883
3884/*
3885 * Initialise sysctl_admin_reserve_kbytes.
3886 *
3887 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3888 * to log in and kill a memory hogging process.
3889 *
3890 * Systems with more than 256MB will reserve 8MB, enough to recover
3891 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3892 * only reserve 3% of free pages by default.
3893 */
3894static int init_admin_reserve(void)
3895{
3896 unsigned long free_kbytes;
3897
3898 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3899
3900 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3901 return 0;
3902}
3903subsys_initcall(init_admin_reserve);
3904
3905/*
3906 * Reinititalise user and admin reserves if memory is added or removed.
3907 *
3908 * The default user reserve max is 128MB, and the default max for the
3909 * admin reserve is 8MB. These are usually, but not always, enough to
3910 * enable recovery from a memory hogging process using login/sshd, a shell,
3911 * and tools like top. It may make sense to increase or even disable the
3912 * reserve depending on the existence of swap or variations in the recovery
3913 * tools. So, the admin may have changed them.
3914 *
3915 * If memory is added and the reserves have been eliminated or increased above
3916 * the default max, then we'll trust the admin.
3917 *
3918 * If memory is removed and there isn't enough free memory, then we
3919 * need to reset the reserves.
3920 *
3921 * Otherwise keep the reserve set by the admin.
3922 */
3923static int reserve_mem_notifier(struct notifier_block *nb,
3924 unsigned long action, void *data)
3925{
3926 unsigned long tmp, free_kbytes;
3927
3928 switch (action) {
3929 case MEM_ONLINE:
3930 /* Default max is 128MB. Leave alone if modified by operator. */
3931 tmp = sysctl_user_reserve_kbytes;
3932 if (0 < tmp && tmp < (1UL << 17))
3933 init_user_reserve();
3934
3935 /* Default max is 8MB. Leave alone if modified by operator. */
3936 tmp = sysctl_admin_reserve_kbytes;
3937 if (0 < tmp && tmp < (1UL << 13))
3938 init_admin_reserve();
3939
3940 break;
3941 case MEM_OFFLINE:
3942 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3943
3944 if (sysctl_user_reserve_kbytes > free_kbytes) {
3945 init_user_reserve();
3946 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3947 sysctl_user_reserve_kbytes);
3948 }
3949
3950 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3951 init_admin_reserve();
3952 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3953 sysctl_admin_reserve_kbytes);
3954 }
3955 break;
3956 default:
3957 break;
3958 }
3959 return NOTIFY_OK;
3960}
3961
3962static int __meminit init_reserve_notifier(void)
3963{
3964 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
3965 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3966
3967 return 0;
3968}
3969subsys_initcall(init_reserve_notifier);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/backing-dev.h>
15#include <linux/mm.h>
16#include <linux/mm_inline.h>
17#include <linux/shm.h>
18#include <linux/mman.h>
19#include <linux/pagemap.h>
20#include <linux/swap.h>
21#include <linux/syscalls.h>
22#include <linux/capability.h>
23#include <linux/init.h>
24#include <linux/file.h>
25#include <linux/fs.h>
26#include <linux/personality.h>
27#include <linux/security.h>
28#include <linux/hugetlb.h>
29#include <linux/shmem_fs.h>
30#include <linux/profile.h>
31#include <linux/export.h>
32#include <linux/mount.h>
33#include <linux/mempolicy.h>
34#include <linux/rmap.h>
35#include <linux/mmu_notifier.h>
36#include <linux/mmdebug.h>
37#include <linux/perf_event.h>
38#include <linux/audit.h>
39#include <linux/khugepaged.h>
40#include <linux/uprobes.h>
41#include <linux/notifier.h>
42#include <linux/memory.h>
43#include <linux/printk.h>
44#include <linux/userfaultfd_k.h>
45#include <linux/moduleparam.h>
46#include <linux/pkeys.h>
47#include <linux/oom.h>
48#include <linux/sched/mm.h>
49#include <linux/ksm.h>
50#include <linux/memfd.h>
51
52#include <linux/uaccess.h>
53#include <asm/cacheflush.h>
54#include <asm/tlb.h>
55#include <asm/mmu_context.h>
56
57#define CREATE_TRACE_POINTS
58#include <trace/events/mmap.h>
59
60#include "internal.h"
61
62#ifndef arch_mmap_check
63#define arch_mmap_check(addr, len, flags) (0)
64#endif
65
66#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
67const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
68int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
69int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
70#endif
71#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
72const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
73const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
74int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
75#endif
76
77static bool ignore_rlimit_data;
78core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
79
80/* Update vma->vm_page_prot to reflect vma->vm_flags. */
81void vma_set_page_prot(struct vm_area_struct *vma)
82{
83 unsigned long vm_flags = vma->vm_flags;
84 pgprot_t vm_page_prot;
85
86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
87 if (vma_wants_writenotify(vma, vm_page_prot)) {
88 vm_flags &= ~VM_SHARED;
89 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
90 }
91 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
92 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
93}
94
95/*
96 * check_brk_limits() - Use platform specific check of range & verify mlock
97 * limits.
98 * @addr: The address to check
99 * @len: The size of increase.
100 *
101 * Return: 0 on success.
102 */
103static int check_brk_limits(unsigned long addr, unsigned long len)
104{
105 unsigned long mapped_addr;
106
107 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
108 if (IS_ERR_VALUE(mapped_addr))
109 return mapped_addr;
110
111 return mlock_future_ok(current->mm, current->mm->def_flags, len)
112 ? 0 : -EAGAIN;
113}
114static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
115 unsigned long addr, unsigned long request, unsigned long flags);
116SYSCALL_DEFINE1(brk, unsigned long, brk)
117{
118 unsigned long newbrk, oldbrk, origbrk;
119 struct mm_struct *mm = current->mm;
120 struct vm_area_struct *brkvma, *next = NULL;
121 unsigned long min_brk;
122 bool populate = false;
123 LIST_HEAD(uf);
124 struct vma_iterator vmi;
125
126 if (mmap_write_lock_killable(mm))
127 return -EINTR;
128
129 origbrk = mm->brk;
130
131#ifdef CONFIG_COMPAT_BRK
132 /*
133 * CONFIG_COMPAT_BRK can still be overridden by setting
134 * randomize_va_space to 2, which will still cause mm->start_brk
135 * to be arbitrarily shifted
136 */
137 if (current->brk_randomized)
138 min_brk = mm->start_brk;
139 else
140 min_brk = mm->end_data;
141#else
142 min_brk = mm->start_brk;
143#endif
144 if (brk < min_brk)
145 goto out;
146
147 /*
148 * Check against rlimit here. If this check is done later after the test
149 * of oldbrk with newbrk then it can escape the test and let the data
150 * segment grow beyond its set limit the in case where the limit is
151 * not page aligned -Ram Gupta
152 */
153 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
154 mm->end_data, mm->start_data))
155 goto out;
156
157 newbrk = PAGE_ALIGN(brk);
158 oldbrk = PAGE_ALIGN(mm->brk);
159 if (oldbrk == newbrk) {
160 mm->brk = brk;
161 goto success;
162 }
163
164 /* Always allow shrinking brk. */
165 if (brk <= mm->brk) {
166 /* Search one past newbrk */
167 vma_iter_init(&vmi, mm, newbrk);
168 brkvma = vma_find(&vmi, oldbrk);
169 if (!brkvma || brkvma->vm_start >= oldbrk)
170 goto out; /* mapping intersects with an existing non-brk vma. */
171 /*
172 * mm->brk must be protected by write mmap_lock.
173 * do_vmi_align_munmap() will drop the lock on success, so
174 * update it before calling do_vma_munmap().
175 */
176 mm->brk = brk;
177 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
178 /* unlock = */ true))
179 goto out;
180
181 goto success_unlocked;
182 }
183
184 if (check_brk_limits(oldbrk, newbrk - oldbrk))
185 goto out;
186
187 /*
188 * Only check if the next VMA is within the stack_guard_gap of the
189 * expansion area
190 */
191 vma_iter_init(&vmi, mm, oldbrk);
192 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
193 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
194 goto out;
195
196 brkvma = vma_prev_limit(&vmi, mm->start_brk);
197 /* Ok, looks good - let it rip. */
198 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
199 goto out;
200
201 mm->brk = brk;
202 if (mm->def_flags & VM_LOCKED)
203 populate = true;
204
205success:
206 mmap_write_unlock(mm);
207success_unlocked:
208 userfaultfd_unmap_complete(mm, &uf);
209 if (populate)
210 mm_populate(oldbrk, newbrk - oldbrk);
211 return brk;
212
213out:
214 mm->brk = origbrk;
215 mmap_write_unlock(mm);
216 return origbrk;
217}
218
219/*
220 * If a hint addr is less than mmap_min_addr change hint to be as
221 * low as possible but still greater than mmap_min_addr
222 */
223static inline unsigned long round_hint_to_min(unsigned long hint)
224{
225 hint &= PAGE_MASK;
226 if (((void *)hint != NULL) &&
227 (hint < mmap_min_addr))
228 return PAGE_ALIGN(mmap_min_addr);
229 return hint;
230}
231
232bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
233 unsigned long bytes)
234{
235 unsigned long locked_pages, limit_pages;
236
237 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
238 return true;
239
240 locked_pages = bytes >> PAGE_SHIFT;
241 locked_pages += mm->locked_vm;
242
243 limit_pages = rlimit(RLIMIT_MEMLOCK);
244 limit_pages >>= PAGE_SHIFT;
245
246 return locked_pages <= limit_pages;
247}
248
249static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
250{
251 if (S_ISREG(inode->i_mode))
252 return MAX_LFS_FILESIZE;
253
254 if (S_ISBLK(inode->i_mode))
255 return MAX_LFS_FILESIZE;
256
257 if (S_ISSOCK(inode->i_mode))
258 return MAX_LFS_FILESIZE;
259
260 /* Special "we do even unsigned file positions" case */
261 if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
262 return 0;
263
264 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
265 return ULONG_MAX;
266}
267
268static inline bool file_mmap_ok(struct file *file, struct inode *inode,
269 unsigned long pgoff, unsigned long len)
270{
271 u64 maxsize = file_mmap_size_max(file, inode);
272
273 if (maxsize && len > maxsize)
274 return false;
275 maxsize -= len;
276 if (pgoff > maxsize >> PAGE_SHIFT)
277 return false;
278 return true;
279}
280
281/*
282 * The caller must write-lock current->mm->mmap_lock.
283 */
284unsigned long do_mmap(struct file *file, unsigned long addr,
285 unsigned long len, unsigned long prot,
286 unsigned long flags, vm_flags_t vm_flags,
287 unsigned long pgoff, unsigned long *populate,
288 struct list_head *uf)
289{
290 struct mm_struct *mm = current->mm;
291 int pkey = 0;
292
293 *populate = 0;
294
295 if (!len)
296 return -EINVAL;
297
298 /*
299 * Does the application expect PROT_READ to imply PROT_EXEC?
300 *
301 * (the exception is when the underlying filesystem is noexec
302 * mounted, in which case we don't add PROT_EXEC.)
303 */
304 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
305 if (!(file && path_noexec(&file->f_path)))
306 prot |= PROT_EXEC;
307
308 /* force arch specific MAP_FIXED handling in get_unmapped_area */
309 if (flags & MAP_FIXED_NOREPLACE)
310 flags |= MAP_FIXED;
311
312 if (!(flags & MAP_FIXED))
313 addr = round_hint_to_min(addr);
314
315 /* Careful about overflows.. */
316 len = PAGE_ALIGN(len);
317 if (!len)
318 return -ENOMEM;
319
320 /* offset overflow? */
321 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
322 return -EOVERFLOW;
323
324 /* Too many mappings? */
325 if (mm->map_count > sysctl_max_map_count)
326 return -ENOMEM;
327
328 /*
329 * addr is returned from get_unmapped_area,
330 * There are two cases:
331 * 1> MAP_FIXED == false
332 * unallocated memory, no need to check sealing.
333 * 1> MAP_FIXED == true
334 * sealing is checked inside mmap_region when
335 * do_vmi_munmap is called.
336 */
337
338 if (prot == PROT_EXEC) {
339 pkey = execute_only_pkey(mm);
340 if (pkey < 0)
341 pkey = 0;
342 }
343
344 /* Do simple checking here so the lower-level routines won't have
345 * to. we assume access permissions have been handled by the open
346 * of the memory object, so we don't do any here.
347 */
348 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
349 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
350
351 /* Obtain the address to map to. we verify (or select) it and ensure
352 * that it represents a valid section of the address space.
353 */
354 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
355 if (IS_ERR_VALUE(addr))
356 return addr;
357
358 if (flags & MAP_FIXED_NOREPLACE) {
359 if (find_vma_intersection(mm, addr, addr + len))
360 return -EEXIST;
361 }
362
363 if (flags & MAP_LOCKED)
364 if (!can_do_mlock())
365 return -EPERM;
366
367 if (!mlock_future_ok(mm, vm_flags, len))
368 return -EAGAIN;
369
370 if (file) {
371 struct inode *inode = file_inode(file);
372 unsigned int seals = memfd_file_seals(file);
373 unsigned long flags_mask;
374
375 if (!file_mmap_ok(file, inode, pgoff, len))
376 return -EOVERFLOW;
377
378 flags_mask = LEGACY_MAP_MASK;
379 if (file->f_op->fop_flags & FOP_MMAP_SYNC)
380 flags_mask |= MAP_SYNC;
381
382 switch (flags & MAP_TYPE) {
383 case MAP_SHARED:
384 /*
385 * Force use of MAP_SHARED_VALIDATE with non-legacy
386 * flags. E.g. MAP_SYNC is dangerous to use with
387 * MAP_SHARED as you don't know which consistency model
388 * you will get. We silently ignore unsupported flags
389 * with MAP_SHARED to preserve backward compatibility.
390 */
391 flags &= LEGACY_MAP_MASK;
392 fallthrough;
393 case MAP_SHARED_VALIDATE:
394 if (flags & ~flags_mask)
395 return -EOPNOTSUPP;
396 if (prot & PROT_WRITE) {
397 if (!(file->f_mode & FMODE_WRITE))
398 return -EACCES;
399 if (IS_SWAPFILE(file->f_mapping->host))
400 return -ETXTBSY;
401 }
402
403 /*
404 * Make sure we don't allow writing to an append-only
405 * file..
406 */
407 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
408 return -EACCES;
409
410 vm_flags |= VM_SHARED | VM_MAYSHARE;
411 if (!(file->f_mode & FMODE_WRITE))
412 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
413 else if (is_readonly_sealed(seals, vm_flags))
414 vm_flags &= ~VM_MAYWRITE;
415 fallthrough;
416 case MAP_PRIVATE:
417 if (!(file->f_mode & FMODE_READ))
418 return -EACCES;
419 if (path_noexec(&file->f_path)) {
420 if (vm_flags & VM_EXEC)
421 return -EPERM;
422 vm_flags &= ~VM_MAYEXEC;
423 }
424
425 if (!file->f_op->mmap)
426 return -ENODEV;
427 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
428 return -EINVAL;
429 break;
430
431 default:
432 return -EINVAL;
433 }
434 } else {
435 switch (flags & MAP_TYPE) {
436 case MAP_SHARED:
437 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
438 return -EINVAL;
439 /*
440 * Ignore pgoff.
441 */
442 pgoff = 0;
443 vm_flags |= VM_SHARED | VM_MAYSHARE;
444 break;
445 case MAP_DROPPABLE:
446 if (VM_DROPPABLE == VM_NONE)
447 return -ENOTSUPP;
448 /*
449 * A locked or stack area makes no sense to be droppable.
450 *
451 * Also, since droppable pages can just go away at any time
452 * it makes no sense to copy them on fork or dump them.
453 *
454 * And don't attempt to combine with hugetlb for now.
455 */
456 if (flags & (MAP_LOCKED | MAP_HUGETLB))
457 return -EINVAL;
458 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
459 return -EINVAL;
460
461 vm_flags |= VM_DROPPABLE;
462
463 /*
464 * If the pages can be dropped, then it doesn't make
465 * sense to reserve them.
466 */
467 vm_flags |= VM_NORESERVE;
468
469 /*
470 * Likewise, they're volatile enough that they
471 * shouldn't survive forks or coredumps.
472 */
473 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
474 fallthrough;
475 case MAP_PRIVATE:
476 /*
477 * Set pgoff according to addr for anon_vma.
478 */
479 pgoff = addr >> PAGE_SHIFT;
480 break;
481 default:
482 return -EINVAL;
483 }
484 }
485
486 /*
487 * Set 'VM_NORESERVE' if we should not account for the
488 * memory use of this mapping.
489 */
490 if (flags & MAP_NORESERVE) {
491 /* We honor MAP_NORESERVE if allowed to overcommit */
492 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
493 vm_flags |= VM_NORESERVE;
494
495 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
496 if (file && is_file_hugepages(file))
497 vm_flags |= VM_NORESERVE;
498 }
499
500 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
501 if (!IS_ERR_VALUE(addr) &&
502 ((vm_flags & VM_LOCKED) ||
503 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
504 *populate = len;
505 return addr;
506}
507
508unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
509 unsigned long prot, unsigned long flags,
510 unsigned long fd, unsigned long pgoff)
511{
512 struct file *file = NULL;
513 unsigned long retval;
514
515 if (!(flags & MAP_ANONYMOUS)) {
516 audit_mmap_fd(fd, flags);
517 file = fget(fd);
518 if (!file)
519 return -EBADF;
520 if (is_file_hugepages(file)) {
521 len = ALIGN(len, huge_page_size(hstate_file(file)));
522 } else if (unlikely(flags & MAP_HUGETLB)) {
523 retval = -EINVAL;
524 goto out_fput;
525 }
526 } else if (flags & MAP_HUGETLB) {
527 struct hstate *hs;
528
529 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
530 if (!hs)
531 return -EINVAL;
532
533 len = ALIGN(len, huge_page_size(hs));
534 /*
535 * VM_NORESERVE is used because the reservations will be
536 * taken when vm_ops->mmap() is called
537 */
538 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
539 VM_NORESERVE,
540 HUGETLB_ANONHUGE_INODE,
541 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
542 if (IS_ERR(file))
543 return PTR_ERR(file);
544 }
545
546 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
547out_fput:
548 if (file)
549 fput(file);
550 return retval;
551}
552
553SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
554 unsigned long, prot, unsigned long, flags,
555 unsigned long, fd, unsigned long, pgoff)
556{
557 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
558}
559
560#ifdef __ARCH_WANT_SYS_OLD_MMAP
561struct mmap_arg_struct {
562 unsigned long addr;
563 unsigned long len;
564 unsigned long prot;
565 unsigned long flags;
566 unsigned long fd;
567 unsigned long offset;
568};
569
570SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
571{
572 struct mmap_arg_struct a;
573
574 if (copy_from_user(&a, arg, sizeof(a)))
575 return -EFAULT;
576 if (offset_in_page(a.offset))
577 return -EINVAL;
578
579 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
580 a.offset >> PAGE_SHIFT);
581}
582#endif /* __ARCH_WANT_SYS_OLD_MMAP */
583
584/**
585 * unmapped_area() - Find an area between the low_limit and the high_limit with
586 * the correct alignment and offset, all from @info. Note: current->mm is used
587 * for the search.
588 *
589 * @info: The unmapped area information including the range [low_limit -
590 * high_limit), the alignment offset and mask.
591 *
592 * Return: A memory address or -ENOMEM.
593 */
594static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
595{
596 unsigned long length, gap;
597 unsigned long low_limit, high_limit;
598 struct vm_area_struct *tmp;
599 VMA_ITERATOR(vmi, current->mm, 0);
600
601 /* Adjust search length to account for worst case alignment overhead */
602 length = info->length + info->align_mask + info->start_gap;
603 if (length < info->length)
604 return -ENOMEM;
605
606 low_limit = info->low_limit;
607 if (low_limit < mmap_min_addr)
608 low_limit = mmap_min_addr;
609 high_limit = info->high_limit;
610retry:
611 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
612 return -ENOMEM;
613
614 /*
615 * Adjust for the gap first so it doesn't interfere with the
616 * later alignment. The first step is the minimum needed to
617 * fulill the start gap, the next steps is the minimum to align
618 * that. It is the minimum needed to fulill both.
619 */
620 gap = vma_iter_addr(&vmi) + info->start_gap;
621 gap += (info->align_offset - gap) & info->align_mask;
622 tmp = vma_next(&vmi);
623 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
624 if (vm_start_gap(tmp) < gap + length - 1) {
625 low_limit = tmp->vm_end;
626 vma_iter_reset(&vmi);
627 goto retry;
628 }
629 } else {
630 tmp = vma_prev(&vmi);
631 if (tmp && vm_end_gap(tmp) > gap) {
632 low_limit = vm_end_gap(tmp);
633 vma_iter_reset(&vmi);
634 goto retry;
635 }
636 }
637
638 return gap;
639}
640
641/**
642 * unmapped_area_topdown() - Find an area between the low_limit and the
643 * high_limit with the correct alignment and offset at the highest available
644 * address, all from @info. Note: current->mm is used for the search.
645 *
646 * @info: The unmapped area information including the range [low_limit -
647 * high_limit), the alignment offset and mask.
648 *
649 * Return: A memory address or -ENOMEM.
650 */
651static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
652{
653 unsigned long length, gap, gap_end;
654 unsigned long low_limit, high_limit;
655 struct vm_area_struct *tmp;
656 VMA_ITERATOR(vmi, current->mm, 0);
657
658 /* Adjust search length to account for worst case alignment overhead */
659 length = info->length + info->align_mask + info->start_gap;
660 if (length < info->length)
661 return -ENOMEM;
662
663 low_limit = info->low_limit;
664 if (low_limit < mmap_min_addr)
665 low_limit = mmap_min_addr;
666 high_limit = info->high_limit;
667retry:
668 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
669 return -ENOMEM;
670
671 gap = vma_iter_end(&vmi) - info->length;
672 gap -= (gap - info->align_offset) & info->align_mask;
673 gap_end = vma_iter_end(&vmi);
674 tmp = vma_next(&vmi);
675 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
676 if (vm_start_gap(tmp) < gap_end) {
677 high_limit = vm_start_gap(tmp);
678 vma_iter_reset(&vmi);
679 goto retry;
680 }
681 } else {
682 tmp = vma_prev(&vmi);
683 if (tmp && vm_end_gap(tmp) > gap) {
684 high_limit = tmp->vm_start;
685 vma_iter_reset(&vmi);
686 goto retry;
687 }
688 }
689
690 return gap;
691}
692
693/*
694 * Determine if the allocation needs to ensure that there is no
695 * existing mapping within it's guard gaps, for use as start_gap.
696 */
697static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
698{
699 if (vm_flags & VM_SHADOW_STACK)
700 return PAGE_SIZE;
701
702 return 0;
703}
704
705/*
706 * Search for an unmapped address range.
707 *
708 * We are looking for a range that:
709 * - does not intersect with any VMA;
710 * - is contained within the [low_limit, high_limit) interval;
711 * - is at least the desired size.
712 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
713 */
714unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
715{
716 unsigned long addr;
717
718 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
719 addr = unmapped_area_topdown(info);
720 else
721 addr = unmapped_area(info);
722
723 trace_vm_unmapped_area(addr, info);
724 return addr;
725}
726
727/* Get an address range which is currently unmapped.
728 * For shmat() with addr=0.
729 *
730 * Ugly calling convention alert:
731 * Return value with the low bits set means error value,
732 * ie
733 * if (ret & ~PAGE_MASK)
734 * error = ret;
735 *
736 * This function "knows" that -ENOMEM has the bits set.
737 */
738unsigned long
739generic_get_unmapped_area(struct file *filp, unsigned long addr,
740 unsigned long len, unsigned long pgoff,
741 unsigned long flags, vm_flags_t vm_flags)
742{
743 struct mm_struct *mm = current->mm;
744 struct vm_area_struct *vma, *prev;
745 struct vm_unmapped_area_info info = {};
746 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
747
748 if (len > mmap_end - mmap_min_addr)
749 return -ENOMEM;
750
751 if (flags & MAP_FIXED)
752 return addr;
753
754 if (addr) {
755 addr = PAGE_ALIGN(addr);
756 vma = find_vma_prev(mm, addr, &prev);
757 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
758 (!vma || addr + len <= vm_start_gap(vma)) &&
759 (!prev || addr >= vm_end_gap(prev)))
760 return addr;
761 }
762
763 info.length = len;
764 info.low_limit = mm->mmap_base;
765 info.high_limit = mmap_end;
766 info.start_gap = stack_guard_placement(vm_flags);
767 if (filp && is_file_hugepages(filp))
768 info.align_mask = huge_page_mask_align(filp);
769 return vm_unmapped_area(&info);
770}
771
772#ifndef HAVE_ARCH_UNMAPPED_AREA
773unsigned long
774arch_get_unmapped_area(struct file *filp, unsigned long addr,
775 unsigned long len, unsigned long pgoff,
776 unsigned long flags, vm_flags_t vm_flags)
777{
778 return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
779 vm_flags);
780}
781#endif
782
783/*
784 * This mmap-allocator allocates new areas top-down from below the
785 * stack's low limit (the base):
786 */
787unsigned long
788generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
789 unsigned long len, unsigned long pgoff,
790 unsigned long flags, vm_flags_t vm_flags)
791{
792 struct vm_area_struct *vma, *prev;
793 struct mm_struct *mm = current->mm;
794 struct vm_unmapped_area_info info = {};
795 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
796
797 /* requested length too big for entire address space */
798 if (len > mmap_end - mmap_min_addr)
799 return -ENOMEM;
800
801 if (flags & MAP_FIXED)
802 return addr;
803
804 /* requesting a specific address */
805 if (addr) {
806 addr = PAGE_ALIGN(addr);
807 vma = find_vma_prev(mm, addr, &prev);
808 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
809 (!vma || addr + len <= vm_start_gap(vma)) &&
810 (!prev || addr >= vm_end_gap(prev)))
811 return addr;
812 }
813
814 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
815 info.length = len;
816 info.low_limit = PAGE_SIZE;
817 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
818 info.start_gap = stack_guard_placement(vm_flags);
819 if (filp && is_file_hugepages(filp))
820 info.align_mask = huge_page_mask_align(filp);
821 addr = vm_unmapped_area(&info);
822
823 /*
824 * A failed mmap() very likely causes application failure,
825 * so fall back to the bottom-up function here. This scenario
826 * can happen with large stack limits and large mmap()
827 * allocations.
828 */
829 if (offset_in_page(addr)) {
830 VM_BUG_ON(addr != -ENOMEM);
831 info.flags = 0;
832 info.low_limit = TASK_UNMAPPED_BASE;
833 info.high_limit = mmap_end;
834 addr = vm_unmapped_area(&info);
835 }
836
837 return addr;
838}
839
840#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
841unsigned long
842arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
843 unsigned long len, unsigned long pgoff,
844 unsigned long flags, vm_flags_t vm_flags)
845{
846 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
847 vm_flags);
848}
849#endif
850
851unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
852 unsigned long addr, unsigned long len,
853 unsigned long pgoff, unsigned long flags,
854 vm_flags_t vm_flags)
855{
856 if (test_bit(MMF_TOPDOWN, &mm->flags))
857 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
858 flags, vm_flags);
859 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
860}
861
862unsigned long
863__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
864 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
865{
866 unsigned long (*get_area)(struct file *, unsigned long,
867 unsigned long, unsigned long, unsigned long)
868 = NULL;
869
870 unsigned long error = arch_mmap_check(addr, len, flags);
871 if (error)
872 return error;
873
874 /* Careful about overflows.. */
875 if (len > TASK_SIZE)
876 return -ENOMEM;
877
878 if (file) {
879 if (file->f_op->get_unmapped_area)
880 get_area = file->f_op->get_unmapped_area;
881 } else if (flags & MAP_SHARED) {
882 /*
883 * mmap_region() will call shmem_zero_setup() to create a file,
884 * so use shmem's get_unmapped_area in case it can be huge.
885 */
886 get_area = shmem_get_unmapped_area;
887 }
888
889 /* Always treat pgoff as zero for anonymous memory. */
890 if (!file)
891 pgoff = 0;
892
893 if (get_area) {
894 addr = get_area(file, addr, len, pgoff, flags);
895 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file
896 && !addr /* no hint */
897 && IS_ALIGNED(len, PMD_SIZE)) {
898 /* Ensures that larger anonymous mappings are THP aligned. */
899 addr = thp_get_unmapped_area_vmflags(file, addr, len,
900 pgoff, flags, vm_flags);
901 } else {
902 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
903 pgoff, flags, vm_flags);
904 }
905 if (IS_ERR_VALUE(addr))
906 return addr;
907
908 if (addr > TASK_SIZE - len)
909 return -ENOMEM;
910 if (offset_in_page(addr))
911 return -EINVAL;
912
913 error = security_mmap_addr(addr);
914 return error ? error : addr;
915}
916
917unsigned long
918mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
919 unsigned long addr, unsigned long len,
920 unsigned long pgoff, unsigned long flags)
921{
922 if (test_bit(MMF_TOPDOWN, &mm->flags))
923 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0);
924 return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
925}
926EXPORT_SYMBOL(mm_get_unmapped_area);
927
928/**
929 * find_vma_intersection() - Look up the first VMA which intersects the interval
930 * @mm: The process address space.
931 * @start_addr: The inclusive start user address.
932 * @end_addr: The exclusive end user address.
933 *
934 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
935 * start_addr < end_addr.
936 */
937struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
938 unsigned long start_addr,
939 unsigned long end_addr)
940{
941 unsigned long index = start_addr;
942
943 mmap_assert_locked(mm);
944 return mt_find(&mm->mm_mt, &index, end_addr - 1);
945}
946EXPORT_SYMBOL(find_vma_intersection);
947
948/**
949 * find_vma() - Find the VMA for a given address, or the next VMA.
950 * @mm: The mm_struct to check
951 * @addr: The address
952 *
953 * Returns: The VMA associated with addr, or the next VMA.
954 * May return %NULL in the case of no VMA at addr or above.
955 */
956struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
957{
958 unsigned long index = addr;
959
960 mmap_assert_locked(mm);
961 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
962}
963EXPORT_SYMBOL(find_vma);
964
965/**
966 * find_vma_prev() - Find the VMA for a given address, or the next vma and
967 * set %pprev to the previous VMA, if any.
968 * @mm: The mm_struct to check
969 * @addr: The address
970 * @pprev: The pointer to set to the previous VMA
971 *
972 * Note that RCU lock is missing here since the external mmap_lock() is used
973 * instead.
974 *
975 * Returns: The VMA associated with @addr, or the next vma.
976 * May return %NULL in the case of no vma at addr or above.
977 */
978struct vm_area_struct *
979find_vma_prev(struct mm_struct *mm, unsigned long addr,
980 struct vm_area_struct **pprev)
981{
982 struct vm_area_struct *vma;
983 VMA_ITERATOR(vmi, mm, addr);
984
985 vma = vma_iter_load(&vmi);
986 *pprev = vma_prev(&vmi);
987 if (!vma)
988 vma = vma_next(&vmi);
989 return vma;
990}
991
992/*
993 * Verify that the stack growth is acceptable and
994 * update accounting. This is shared with both the
995 * grow-up and grow-down cases.
996 */
997static int acct_stack_growth(struct vm_area_struct *vma,
998 unsigned long size, unsigned long grow)
999{
1000 struct mm_struct *mm = vma->vm_mm;
1001 unsigned long new_start;
1002
1003 /* address space limit tests */
1004 if (!may_expand_vm(mm, vma->vm_flags, grow))
1005 return -ENOMEM;
1006
1007 /* Stack limit test */
1008 if (size > rlimit(RLIMIT_STACK))
1009 return -ENOMEM;
1010
1011 /* mlock limit tests */
1012 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1013 return -ENOMEM;
1014
1015 /* Check to ensure the stack will not grow into a hugetlb-only region */
1016 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1017 vma->vm_end - size;
1018 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1019 return -EFAULT;
1020
1021 /*
1022 * Overcommit.. This must be the final test, as it will
1023 * update security statistics.
1024 */
1025 if (security_vm_enough_memory_mm(mm, grow))
1026 return -ENOMEM;
1027
1028 return 0;
1029}
1030
1031#if defined(CONFIG_STACK_GROWSUP)
1032/*
1033 * PA-RISC uses this for its stack.
1034 * vma is the last one with address > vma->vm_end. Have to extend vma.
1035 */
1036static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1037{
1038 struct mm_struct *mm = vma->vm_mm;
1039 struct vm_area_struct *next;
1040 unsigned long gap_addr;
1041 int error = 0;
1042 VMA_ITERATOR(vmi, mm, vma->vm_start);
1043
1044 if (!(vma->vm_flags & VM_GROWSUP))
1045 return -EFAULT;
1046
1047 mmap_assert_write_locked(mm);
1048
1049 /* Guard against exceeding limits of the address space. */
1050 address &= PAGE_MASK;
1051 if (address >= (TASK_SIZE & PAGE_MASK))
1052 return -ENOMEM;
1053 address += PAGE_SIZE;
1054
1055 /* Enforce stack_guard_gap */
1056 gap_addr = address + stack_guard_gap;
1057
1058 /* Guard against overflow */
1059 if (gap_addr < address || gap_addr > TASK_SIZE)
1060 gap_addr = TASK_SIZE;
1061
1062 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1063 if (next && vma_is_accessible(next)) {
1064 if (!(next->vm_flags & VM_GROWSUP))
1065 return -ENOMEM;
1066 /* Check that both stack segments have the same anon_vma? */
1067 }
1068
1069 if (next)
1070 vma_iter_prev_range_limit(&vmi, address);
1071
1072 vma_iter_config(&vmi, vma->vm_start, address);
1073 if (vma_iter_prealloc(&vmi, vma))
1074 return -ENOMEM;
1075
1076 /* We must make sure the anon_vma is allocated. */
1077 if (unlikely(anon_vma_prepare(vma))) {
1078 vma_iter_free(&vmi);
1079 return -ENOMEM;
1080 }
1081
1082 /* Lock the VMA before expanding to prevent concurrent page faults */
1083 vma_start_write(vma);
1084 /* We update the anon VMA tree. */
1085 anon_vma_lock_write(vma->anon_vma);
1086
1087 /* Somebody else might have raced and expanded it already */
1088 if (address > vma->vm_end) {
1089 unsigned long size, grow;
1090
1091 size = address - vma->vm_start;
1092 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1093
1094 error = -ENOMEM;
1095 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1096 error = acct_stack_growth(vma, size, grow);
1097 if (!error) {
1098 if (vma->vm_flags & VM_LOCKED)
1099 mm->locked_vm += grow;
1100 vm_stat_account(mm, vma->vm_flags, grow);
1101 anon_vma_interval_tree_pre_update_vma(vma);
1102 vma->vm_end = address;
1103 /* Overwrite old entry in mtree. */
1104 vma_iter_store(&vmi, vma);
1105 anon_vma_interval_tree_post_update_vma(vma);
1106
1107 perf_event_mmap(vma);
1108 }
1109 }
1110 }
1111 anon_vma_unlock_write(vma->anon_vma);
1112 vma_iter_free(&vmi);
1113 validate_mm(mm);
1114 return error;
1115}
1116#endif /* CONFIG_STACK_GROWSUP */
1117
1118/*
1119 * vma is the first one with address < vma->vm_start. Have to extend vma.
1120 * mmap_lock held for writing.
1121 */
1122int expand_downwards(struct vm_area_struct *vma, unsigned long address)
1123{
1124 struct mm_struct *mm = vma->vm_mm;
1125 struct vm_area_struct *prev;
1126 int error = 0;
1127 VMA_ITERATOR(vmi, mm, vma->vm_start);
1128
1129 if (!(vma->vm_flags & VM_GROWSDOWN))
1130 return -EFAULT;
1131
1132 mmap_assert_write_locked(mm);
1133
1134 address &= PAGE_MASK;
1135 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
1136 return -EPERM;
1137
1138 /* Enforce stack_guard_gap */
1139 prev = vma_prev(&vmi);
1140 /* Check that both stack segments have the same anon_vma? */
1141 if (prev) {
1142 if (!(prev->vm_flags & VM_GROWSDOWN) &&
1143 vma_is_accessible(prev) &&
1144 (address - prev->vm_end < stack_guard_gap))
1145 return -ENOMEM;
1146 }
1147
1148 if (prev)
1149 vma_iter_next_range_limit(&vmi, vma->vm_start);
1150
1151 vma_iter_config(&vmi, address, vma->vm_end);
1152 if (vma_iter_prealloc(&vmi, vma))
1153 return -ENOMEM;
1154
1155 /* We must make sure the anon_vma is allocated. */
1156 if (unlikely(anon_vma_prepare(vma))) {
1157 vma_iter_free(&vmi);
1158 return -ENOMEM;
1159 }
1160
1161 /* Lock the VMA before expanding to prevent concurrent page faults */
1162 vma_start_write(vma);
1163 /* We update the anon VMA tree. */
1164 anon_vma_lock_write(vma->anon_vma);
1165
1166 /* Somebody else might have raced and expanded it already */
1167 if (address < vma->vm_start) {
1168 unsigned long size, grow;
1169
1170 size = vma->vm_end - address;
1171 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1172
1173 error = -ENOMEM;
1174 if (grow <= vma->vm_pgoff) {
1175 error = acct_stack_growth(vma, size, grow);
1176 if (!error) {
1177 if (vma->vm_flags & VM_LOCKED)
1178 mm->locked_vm += grow;
1179 vm_stat_account(mm, vma->vm_flags, grow);
1180 anon_vma_interval_tree_pre_update_vma(vma);
1181 vma->vm_start = address;
1182 vma->vm_pgoff -= grow;
1183 /* Overwrite old entry in mtree. */
1184 vma_iter_store(&vmi, vma);
1185 anon_vma_interval_tree_post_update_vma(vma);
1186
1187 perf_event_mmap(vma);
1188 }
1189 }
1190 }
1191 anon_vma_unlock_write(vma->anon_vma);
1192 vma_iter_free(&vmi);
1193 validate_mm(mm);
1194 return error;
1195}
1196
1197/* enforced gap between the expanding stack and other mappings. */
1198unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
1199
1200static int __init cmdline_parse_stack_guard_gap(char *p)
1201{
1202 unsigned long val;
1203 char *endptr;
1204
1205 val = simple_strtoul(p, &endptr, 10);
1206 if (!*endptr)
1207 stack_guard_gap = val << PAGE_SHIFT;
1208
1209 return 1;
1210}
1211__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
1212
1213#ifdef CONFIG_STACK_GROWSUP
1214int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1215{
1216 return expand_upwards(vma, address);
1217}
1218
1219struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1220{
1221 struct vm_area_struct *vma, *prev;
1222
1223 addr &= PAGE_MASK;
1224 vma = find_vma_prev(mm, addr, &prev);
1225 if (vma && (vma->vm_start <= addr))
1226 return vma;
1227 if (!prev)
1228 return NULL;
1229 if (expand_stack_locked(prev, addr))
1230 return NULL;
1231 if (prev->vm_flags & VM_LOCKED)
1232 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
1233 return prev;
1234}
1235#else
1236int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1237{
1238 return expand_downwards(vma, address);
1239}
1240
1241struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1242{
1243 struct vm_area_struct *vma;
1244 unsigned long start;
1245
1246 addr &= PAGE_MASK;
1247 vma = find_vma(mm, addr);
1248 if (!vma)
1249 return NULL;
1250 if (vma->vm_start <= addr)
1251 return vma;
1252 start = vma->vm_start;
1253 if (expand_stack_locked(vma, addr))
1254 return NULL;
1255 if (vma->vm_flags & VM_LOCKED)
1256 populate_vma_page_range(vma, addr, start, NULL);
1257 return vma;
1258}
1259#endif
1260
1261#if defined(CONFIG_STACK_GROWSUP)
1262
1263#define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1264#define vma_expand_down(vma, addr) (-EFAULT)
1265
1266#else
1267
1268#define vma_expand_up(vma,addr) (-EFAULT)
1269#define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1270
1271#endif
1272
1273/*
1274 * expand_stack(): legacy interface for page faulting. Don't use unless
1275 * you have to.
1276 *
1277 * This is called with the mm locked for reading, drops the lock, takes
1278 * the lock for writing, tries to look up a vma again, expands it if
1279 * necessary, and downgrades the lock to reading again.
1280 *
1281 * If no vma is found or it can't be expanded, it returns NULL and has
1282 * dropped the lock.
1283 */
1284struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
1285{
1286 struct vm_area_struct *vma, *prev;
1287
1288 mmap_read_unlock(mm);
1289 if (mmap_write_lock_killable(mm))
1290 return NULL;
1291
1292 vma = find_vma_prev(mm, addr, &prev);
1293 if (vma && vma->vm_start <= addr)
1294 goto success;
1295
1296 if (prev && !vma_expand_up(prev, addr)) {
1297 vma = prev;
1298 goto success;
1299 }
1300
1301 if (vma && !vma_expand_down(vma, addr))
1302 goto success;
1303
1304 mmap_write_unlock(mm);
1305 return NULL;
1306
1307success:
1308 mmap_write_downgrade(mm);
1309 return vma;
1310}
1311
1312/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1313 * @mm: The mm_struct
1314 * @start: The start address to munmap
1315 * @len: The length to be munmapped.
1316 * @uf: The userfaultfd list_head
1317 *
1318 * Return: 0 on success, error otherwise.
1319 */
1320int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
1321 struct list_head *uf)
1322{
1323 VMA_ITERATOR(vmi, mm, start);
1324
1325 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
1326}
1327
1328unsigned long mmap_region(struct file *file, unsigned long addr,
1329 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1330 struct list_head *uf)
1331{
1332 unsigned long ret;
1333 bool writable_file_mapping = false;
1334
1335 /* Check to see if MDWE is applicable. */
1336 if (map_deny_write_exec(vm_flags, vm_flags))
1337 return -EACCES;
1338
1339 /* Allow architectures to sanity-check the vm_flags. */
1340 if (!arch_validate_flags(vm_flags))
1341 return -EINVAL;
1342
1343 /* Map writable and ensure this isn't a sealed memfd. */
1344 if (file && is_shared_maywrite(vm_flags)) {
1345 int error = mapping_map_writable(file->f_mapping);
1346
1347 if (error)
1348 return error;
1349 writable_file_mapping = true;
1350 }
1351
1352 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
1353
1354 /* Clear our write mapping regardless of error. */
1355 if (writable_file_mapping)
1356 mapping_unmap_writable(file->f_mapping);
1357
1358 validate_mm(current->mm);
1359 return ret;
1360}
1361
1362static int __vm_munmap(unsigned long start, size_t len, bool unlock)
1363{
1364 int ret;
1365 struct mm_struct *mm = current->mm;
1366 LIST_HEAD(uf);
1367 VMA_ITERATOR(vmi, mm, start);
1368
1369 if (mmap_write_lock_killable(mm))
1370 return -EINTR;
1371
1372 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
1373 if (ret || !unlock)
1374 mmap_write_unlock(mm);
1375
1376 userfaultfd_unmap_complete(mm, &uf);
1377 return ret;
1378}
1379
1380int vm_munmap(unsigned long start, size_t len)
1381{
1382 return __vm_munmap(start, len, false);
1383}
1384EXPORT_SYMBOL(vm_munmap);
1385
1386SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1387{
1388 addr = untagged_addr(addr);
1389 return __vm_munmap(addr, len, true);
1390}
1391
1392
1393/*
1394 * Emulation of deprecated remap_file_pages() syscall.
1395 */
1396SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
1397 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1398{
1399
1400 struct mm_struct *mm = current->mm;
1401 struct vm_area_struct *vma;
1402 unsigned long populate = 0;
1403 unsigned long ret = -EINVAL;
1404 struct file *file;
1405 vm_flags_t vm_flags;
1406
1407 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
1408 current->comm, current->pid);
1409
1410 if (prot)
1411 return ret;
1412 start = start & PAGE_MASK;
1413 size = size & PAGE_MASK;
1414
1415 if (start + size <= start)
1416 return ret;
1417
1418 /* Does pgoff wrap? */
1419 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
1420 return ret;
1421
1422 if (mmap_read_lock_killable(mm))
1423 return -EINTR;
1424
1425 /*
1426 * Look up VMA under read lock first so we can perform the security
1427 * without holding locks (which can be problematic). We reacquire a
1428 * write lock later and check nothing changed underneath us.
1429 */
1430 vma = vma_lookup(mm, start);
1431
1432 if (!vma || !(vma->vm_flags & VM_SHARED)) {
1433 mmap_read_unlock(mm);
1434 return -EINVAL;
1435 }
1436
1437 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1438 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1439 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1440
1441 flags &= MAP_NONBLOCK;
1442 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
1443 if (vma->vm_flags & VM_LOCKED)
1444 flags |= MAP_LOCKED;
1445
1446 /* Save vm_flags used to calculate prot and flags, and recheck later. */
1447 vm_flags = vma->vm_flags;
1448 file = get_file(vma->vm_file);
1449
1450 mmap_read_unlock(mm);
1451
1452 /* Call outside mmap_lock to be consistent with other callers. */
1453 ret = security_mmap_file(file, prot, flags);
1454 if (ret) {
1455 fput(file);
1456 return ret;
1457 }
1458
1459 ret = -EINVAL;
1460
1461 /* OK security check passed, take write lock + let it rip. */
1462 if (mmap_write_lock_killable(mm)) {
1463 fput(file);
1464 return -EINTR;
1465 }
1466
1467 vma = vma_lookup(mm, start);
1468
1469 if (!vma)
1470 goto out;
1471
1472 /* Make sure things didn't change under us. */
1473 if (vma->vm_flags != vm_flags)
1474 goto out;
1475 if (vma->vm_file != file)
1476 goto out;
1477
1478 if (start + size > vma->vm_end) {
1479 VMA_ITERATOR(vmi, mm, vma->vm_end);
1480 struct vm_area_struct *next, *prev = vma;
1481
1482 for_each_vma_range(vmi, next, start + size) {
1483 /* hole between vmas ? */
1484 if (next->vm_start != prev->vm_end)
1485 goto out;
1486
1487 if (next->vm_file != vma->vm_file)
1488 goto out;
1489
1490 if (next->vm_flags != vma->vm_flags)
1491 goto out;
1492
1493 if (start + size <= next->vm_end)
1494 break;
1495
1496 prev = next;
1497 }
1498
1499 if (!next)
1500 goto out;
1501 }
1502
1503 ret = do_mmap(vma->vm_file, start, size,
1504 prot, flags, 0, pgoff, &populate, NULL);
1505out:
1506 mmap_write_unlock(mm);
1507 fput(file);
1508 if (populate)
1509 mm_populate(ret, populate);
1510 if (!IS_ERR_VALUE(ret))
1511 ret = 0;
1512 return ret;
1513}
1514
1515/*
1516 * do_brk_flags() - Increase the brk vma if the flags match.
1517 * @vmi: The vma iterator
1518 * @addr: The start address
1519 * @len: The length of the increase
1520 * @vma: The vma,
1521 * @flags: The VMA Flags
1522 *
1523 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
1524 * do not match then create a new anonymous VMA. Eventually we may be able to
1525 * do some brk-specific accounting here.
1526 */
1527static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1528 unsigned long addr, unsigned long len, unsigned long flags)
1529{
1530 struct mm_struct *mm = current->mm;
1531
1532 /*
1533 * Check against address space limits by the changed size
1534 * Note: This happens *after* clearing old mappings in some code paths.
1535 */
1536 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1537 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
1538 return -ENOMEM;
1539
1540 if (mm->map_count > sysctl_max_map_count)
1541 return -ENOMEM;
1542
1543 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1544 return -ENOMEM;
1545
1546 /*
1547 * Expand the existing vma if possible; Note that singular lists do not
1548 * occur after forking, so the expand will only happen on new VMAs.
1549 */
1550 if (vma && vma->vm_end == addr) {
1551 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
1552
1553 vmg.prev = vma;
1554 /* vmi is positioned at prev, which this mode expects. */
1555 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1556
1557 if (vma_merge_new_range(&vmg))
1558 goto out;
1559 else if (vmg_nomem(&vmg))
1560 goto unacct_fail;
1561 }
1562
1563 if (vma)
1564 vma_iter_next_range(vmi);
1565 /* create a vma struct for an anonymous mapping */
1566 vma = vm_area_alloc(mm);
1567 if (!vma)
1568 goto unacct_fail;
1569
1570 vma_set_anonymous(vma);
1571 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
1572 vm_flags_init(vma, flags);
1573 vma->vm_page_prot = vm_get_page_prot(flags);
1574 vma_start_write(vma);
1575 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
1576 goto mas_store_fail;
1577
1578 mm->map_count++;
1579 validate_mm(mm);
1580 ksm_add_vma(vma);
1581out:
1582 perf_event_mmap(vma);
1583 mm->total_vm += len >> PAGE_SHIFT;
1584 mm->data_vm += len >> PAGE_SHIFT;
1585 if (flags & VM_LOCKED)
1586 mm->locked_vm += (len >> PAGE_SHIFT);
1587 vm_flags_set(vma, VM_SOFTDIRTY);
1588 return 0;
1589
1590mas_store_fail:
1591 vm_area_free(vma);
1592unacct_fail:
1593 vm_unacct_memory(len >> PAGE_SHIFT);
1594 return -ENOMEM;
1595}
1596
1597int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
1598{
1599 struct mm_struct *mm = current->mm;
1600 struct vm_area_struct *vma = NULL;
1601 unsigned long len;
1602 int ret;
1603 bool populate;
1604 LIST_HEAD(uf);
1605 VMA_ITERATOR(vmi, mm, addr);
1606
1607 len = PAGE_ALIGN(request);
1608 if (len < request)
1609 return -ENOMEM;
1610 if (!len)
1611 return 0;
1612
1613 /* Until we need other flags, refuse anything except VM_EXEC. */
1614 if ((flags & (~VM_EXEC)) != 0)
1615 return -EINVAL;
1616
1617 if (mmap_write_lock_killable(mm))
1618 return -EINTR;
1619
1620 ret = check_brk_limits(addr, len);
1621 if (ret)
1622 goto limits_failed;
1623
1624 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
1625 if (ret)
1626 goto munmap_failed;
1627
1628 vma = vma_prev(&vmi);
1629 ret = do_brk_flags(&vmi, vma, addr, len, flags);
1630 populate = ((mm->def_flags & VM_LOCKED) != 0);
1631 mmap_write_unlock(mm);
1632 userfaultfd_unmap_complete(mm, &uf);
1633 if (populate && !ret)
1634 mm_populate(addr, len);
1635 return ret;
1636
1637munmap_failed:
1638limits_failed:
1639 mmap_write_unlock(mm);
1640 return ret;
1641}
1642EXPORT_SYMBOL(vm_brk_flags);
1643
1644/* Release all mmaps. */
1645void exit_mmap(struct mm_struct *mm)
1646{
1647 struct mmu_gather tlb;
1648 struct vm_area_struct *vma;
1649 unsigned long nr_accounted = 0;
1650 VMA_ITERATOR(vmi, mm, 0);
1651 int count = 0;
1652
1653 /* mm's last user has gone, and its about to be pulled down */
1654 mmu_notifier_release(mm);
1655
1656 mmap_read_lock(mm);
1657 arch_exit_mmap(mm);
1658
1659 vma = vma_next(&vmi);
1660 if (!vma || unlikely(xa_is_zero(vma))) {
1661 /* Can happen if dup_mmap() received an OOM */
1662 mmap_read_unlock(mm);
1663 mmap_write_lock(mm);
1664 goto destroy;
1665 }
1666
1667 lru_add_drain();
1668 flush_cache_mm(mm);
1669 tlb_gather_mmu_fullmm(&tlb, mm);
1670 /* update_hiwater_rss(mm) here? but nobody should be looking */
1671 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
1672 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
1673 mmap_read_unlock(mm);
1674
1675 /*
1676 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
1677 * because the memory has been already freed.
1678 */
1679 set_bit(MMF_OOM_SKIP, &mm->flags);
1680 mmap_write_lock(mm);
1681 mt_clear_in_rcu(&mm->mm_mt);
1682 vma_iter_set(&vmi, vma->vm_end);
1683 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
1684 USER_PGTABLES_CEILING, true);
1685 tlb_finish_mmu(&tlb);
1686
1687 /*
1688 * Walk the list again, actually closing and freeing it, with preemption
1689 * enabled, without holding any MM locks besides the unreachable
1690 * mmap_write_lock.
1691 */
1692 vma_iter_set(&vmi, vma->vm_end);
1693 do {
1694 if (vma->vm_flags & VM_ACCOUNT)
1695 nr_accounted += vma_pages(vma);
1696 remove_vma(vma, /* unreachable = */ true);
1697 count++;
1698 cond_resched();
1699 vma = vma_next(&vmi);
1700 } while (vma && likely(!xa_is_zero(vma)));
1701
1702 BUG_ON(count != mm->map_count);
1703
1704 trace_exit_mmap(mm);
1705destroy:
1706 __mt_destroy(&mm->mm_mt);
1707 mmap_write_unlock(mm);
1708 vm_unacct_memory(nr_accounted);
1709}
1710
1711/* Insert vm structure into process list sorted by address
1712 * and into the inode's i_mmap tree. If vm_file is non-NULL
1713 * then i_mmap_rwsem is taken here.
1714 */
1715int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
1716{
1717 unsigned long charged = vma_pages(vma);
1718
1719
1720 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
1721 return -ENOMEM;
1722
1723 if ((vma->vm_flags & VM_ACCOUNT) &&
1724 security_vm_enough_memory_mm(mm, charged))
1725 return -ENOMEM;
1726
1727 /*
1728 * The vm_pgoff of a purely anonymous vma should be irrelevant
1729 * until its first write fault, when page's anon_vma and index
1730 * are set. But now set the vm_pgoff it will almost certainly
1731 * end up with (unless mremap moves it elsewhere before that
1732 * first wfault), so /proc/pid/maps tells a consistent story.
1733 *
1734 * By setting it to reflect the virtual start address of the
1735 * vma, merges and splits can happen in a seamless way, just
1736 * using the existing file pgoff checks and manipulations.
1737 * Similarly in do_mmap and in do_brk_flags.
1738 */
1739 if (vma_is_anonymous(vma)) {
1740 BUG_ON(vma->anon_vma);
1741 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
1742 }
1743
1744 if (vma_link(mm, vma)) {
1745 if (vma->vm_flags & VM_ACCOUNT)
1746 vm_unacct_memory(charged);
1747 return -ENOMEM;
1748 }
1749
1750 return 0;
1751}
1752
1753/*
1754 * Return true if the calling process may expand its vm space by the passed
1755 * number of pages
1756 */
1757bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
1758{
1759 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
1760 return false;
1761
1762 if (is_data_mapping(flags) &&
1763 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
1764 /* Workaround for Valgrind */
1765 if (rlimit(RLIMIT_DATA) == 0 &&
1766 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
1767 return true;
1768
1769 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
1770 current->comm, current->pid,
1771 (mm->data_vm + npages) << PAGE_SHIFT,
1772 rlimit(RLIMIT_DATA),
1773 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
1774
1775 if (!ignore_rlimit_data)
1776 return false;
1777 }
1778
1779 return true;
1780}
1781
1782void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
1783{
1784 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1785
1786 if (is_exec_mapping(flags))
1787 mm->exec_vm += npages;
1788 else if (is_stack_mapping(flags))
1789 mm->stack_vm += npages;
1790 else if (is_data_mapping(flags))
1791 mm->data_vm += npages;
1792}
1793
1794static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
1795
1796/*
1797 * Close hook, called for unmap() and on the old vma for mremap().
1798 *
1799 * Having a close hook prevents vma merging regardless of flags.
1800 */
1801static void special_mapping_close(struct vm_area_struct *vma)
1802{
1803 const struct vm_special_mapping *sm = vma->vm_private_data;
1804
1805 if (sm->close)
1806 sm->close(sm, vma);
1807}
1808
1809static const char *special_mapping_name(struct vm_area_struct *vma)
1810{
1811 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
1812}
1813
1814static int special_mapping_mremap(struct vm_area_struct *new_vma)
1815{
1816 struct vm_special_mapping *sm = new_vma->vm_private_data;
1817
1818 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
1819 return -EFAULT;
1820
1821 if (sm->mremap)
1822 return sm->mremap(sm, new_vma);
1823
1824 return 0;
1825}
1826
1827static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
1828{
1829 /*
1830 * Forbid splitting special mappings - kernel has expectations over
1831 * the number of pages in mapping. Together with VM_DONTEXPAND
1832 * the size of vma should stay the same over the special mapping's
1833 * lifetime.
1834 */
1835 return -EINVAL;
1836}
1837
1838static const struct vm_operations_struct special_mapping_vmops = {
1839 .close = special_mapping_close,
1840 .fault = special_mapping_fault,
1841 .mremap = special_mapping_mremap,
1842 .name = special_mapping_name,
1843 /* vDSO code relies that VVAR can't be accessed remotely */
1844 .access = NULL,
1845 .may_split = special_mapping_split,
1846};
1847
1848static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
1849{
1850 struct vm_area_struct *vma = vmf->vma;
1851 pgoff_t pgoff;
1852 struct page **pages;
1853 struct vm_special_mapping *sm = vma->vm_private_data;
1854
1855 if (sm->fault)
1856 return sm->fault(sm, vmf->vma, vmf);
1857
1858 pages = sm->pages;
1859
1860 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
1861 pgoff--;
1862
1863 if (*pages) {
1864 struct page *page = *pages;
1865 get_page(page);
1866 vmf->page = page;
1867 return 0;
1868 }
1869
1870 return VM_FAULT_SIGBUS;
1871}
1872
1873static struct vm_area_struct *__install_special_mapping(
1874 struct mm_struct *mm,
1875 unsigned long addr, unsigned long len,
1876 unsigned long vm_flags, void *priv,
1877 const struct vm_operations_struct *ops)
1878{
1879 int ret;
1880 struct vm_area_struct *vma;
1881
1882 vma = vm_area_alloc(mm);
1883 if (unlikely(vma == NULL))
1884 return ERR_PTR(-ENOMEM);
1885
1886 vma_set_range(vma, addr, addr + len, 0);
1887 vm_flags_init(vma, (vm_flags | mm->def_flags |
1888 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
1889 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1890
1891 vma->vm_ops = ops;
1892 vma->vm_private_data = priv;
1893
1894 ret = insert_vm_struct(mm, vma);
1895 if (ret)
1896 goto out;
1897
1898 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
1899
1900 perf_event_mmap(vma);
1901
1902 return vma;
1903
1904out:
1905 vm_area_free(vma);
1906 return ERR_PTR(ret);
1907}
1908
1909bool vma_is_special_mapping(const struct vm_area_struct *vma,
1910 const struct vm_special_mapping *sm)
1911{
1912 return vma->vm_private_data == sm &&
1913 vma->vm_ops == &special_mapping_vmops;
1914}
1915
1916/*
1917 * Called with mm->mmap_lock held for writing.
1918 * Insert a new vma covering the given region, with the given flags.
1919 * Its pages are supplied by the given array of struct page *.
1920 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
1921 * The region past the last page supplied will always produce SIGBUS.
1922 * The array pointer and the pages it points to are assumed to stay alive
1923 * for as long as this mapping might exist.
1924 */
1925struct vm_area_struct *_install_special_mapping(
1926 struct mm_struct *mm,
1927 unsigned long addr, unsigned long len,
1928 unsigned long vm_flags, const struct vm_special_mapping *spec)
1929{
1930 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
1931 &special_mapping_vmops);
1932}
1933
1934/*
1935 * initialise the percpu counter for VM
1936 */
1937void __init mmap_init(void)
1938{
1939 int ret;
1940
1941 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
1942 VM_BUG_ON(ret);
1943}
1944
1945/*
1946 * Initialise sysctl_user_reserve_kbytes.
1947 *
1948 * This is intended to prevent a user from starting a single memory hogging
1949 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1950 * mode.
1951 *
1952 * The default value is min(3% of free memory, 128MB)
1953 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1954 */
1955static int init_user_reserve(void)
1956{
1957 unsigned long free_kbytes;
1958
1959 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1960
1961 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
1962 return 0;
1963}
1964subsys_initcall(init_user_reserve);
1965
1966/*
1967 * Initialise sysctl_admin_reserve_kbytes.
1968 *
1969 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1970 * to log in and kill a memory hogging process.
1971 *
1972 * Systems with more than 256MB will reserve 8MB, enough to recover
1973 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1974 * only reserve 3% of free pages by default.
1975 */
1976static int init_admin_reserve(void)
1977{
1978 unsigned long free_kbytes;
1979
1980 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1981
1982 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
1983 return 0;
1984}
1985subsys_initcall(init_admin_reserve);
1986
1987/*
1988 * Reinititalise user and admin reserves if memory is added or removed.
1989 *
1990 * The default user reserve max is 128MB, and the default max for the
1991 * admin reserve is 8MB. These are usually, but not always, enough to
1992 * enable recovery from a memory hogging process using login/sshd, a shell,
1993 * and tools like top. It may make sense to increase or even disable the
1994 * reserve depending on the existence of swap or variations in the recovery
1995 * tools. So, the admin may have changed them.
1996 *
1997 * If memory is added and the reserves have been eliminated or increased above
1998 * the default max, then we'll trust the admin.
1999 *
2000 * If memory is removed and there isn't enough free memory, then we
2001 * need to reset the reserves.
2002 *
2003 * Otherwise keep the reserve set by the admin.
2004 */
2005static int reserve_mem_notifier(struct notifier_block *nb,
2006 unsigned long action, void *data)
2007{
2008 unsigned long tmp, free_kbytes;
2009
2010 switch (action) {
2011 case MEM_ONLINE:
2012 /* Default max is 128MB. Leave alone if modified by operator. */
2013 tmp = sysctl_user_reserve_kbytes;
2014 if (tmp > 0 && tmp < SZ_128K)
2015 init_user_reserve();
2016
2017 /* Default max is 8MB. Leave alone if modified by operator. */
2018 tmp = sysctl_admin_reserve_kbytes;
2019 if (tmp > 0 && tmp < SZ_8K)
2020 init_admin_reserve();
2021
2022 break;
2023 case MEM_OFFLINE:
2024 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2025
2026 if (sysctl_user_reserve_kbytes > free_kbytes) {
2027 init_user_reserve();
2028 pr_info("vm.user_reserve_kbytes reset to %lu\n",
2029 sysctl_user_reserve_kbytes);
2030 }
2031
2032 if (sysctl_admin_reserve_kbytes > free_kbytes) {
2033 init_admin_reserve();
2034 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
2035 sysctl_admin_reserve_kbytes);
2036 }
2037 break;
2038 default:
2039 break;
2040 }
2041 return NOTIFY_OK;
2042}
2043
2044static int __meminit init_reserve_notifier(void)
2045{
2046 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
2047 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
2048
2049 return 0;
2050}
2051subsys_initcall(init_reserve_notifier);
2052
2053/*
2054 * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
2055 * this VMA and its relocated range, which will now reside at [vma->vm_start -
2056 * shift, vma->vm_end - shift).
2057 *
2058 * This function is almost certainly NOT what you want for anything other than
2059 * early executable temporary stack relocation.
2060 */
2061int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
2062{
2063 /*
2064 * The process proceeds as follows:
2065 *
2066 * 1) Use shift to calculate the new vma endpoints.
2067 * 2) Extend vma to cover both the old and new ranges. This ensures the
2068 * arguments passed to subsequent functions are consistent.
2069 * 3) Move vma's page tables to the new range.
2070 * 4) Free up any cleared pgd range.
2071 * 5) Shrink the vma to cover only the new range.
2072 */
2073
2074 struct mm_struct *mm = vma->vm_mm;
2075 unsigned long old_start = vma->vm_start;
2076 unsigned long old_end = vma->vm_end;
2077 unsigned long length = old_end - old_start;
2078 unsigned long new_start = old_start - shift;
2079 unsigned long new_end = old_end - shift;
2080 VMA_ITERATOR(vmi, mm, new_start);
2081 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
2082 struct vm_area_struct *next;
2083 struct mmu_gather tlb;
2084
2085 BUG_ON(new_start > new_end);
2086
2087 /*
2088 * ensure there are no vmas between where we want to go
2089 * and where we are
2090 */
2091 if (vma != vma_next(&vmi))
2092 return -EFAULT;
2093
2094 vma_iter_prev_range(&vmi);
2095 /*
2096 * cover the whole range: [new_start, old_end)
2097 */
2098 vmg.vma = vma;
2099 if (vma_expand(&vmg))
2100 return -ENOMEM;
2101
2102 /*
2103 * move the page tables downwards, on failure we rely on
2104 * process cleanup to remove whatever mess we made.
2105 */
2106 if (length != move_page_tables(vma, old_start,
2107 vma, new_start, length, false, true))
2108 return -ENOMEM;
2109
2110 lru_add_drain();
2111 tlb_gather_mmu(&tlb, mm);
2112 next = vma_next(&vmi);
2113 if (new_end > old_start) {
2114 /*
2115 * when the old and new regions overlap clear from new_end.
2116 */
2117 free_pgd_range(&tlb, new_end, old_end, new_end,
2118 next ? next->vm_start : USER_PGTABLES_CEILING);
2119 } else {
2120 /*
2121 * otherwise, clean from old_start; this is done to not touch
2122 * the address space in [new_end, old_start) some architectures
2123 * have constraints on va-space that make this illegal (IA64) -
2124 * for the others its just a little faster.
2125 */
2126 free_pgd_range(&tlb, old_start, old_end, new_end,
2127 next ? next->vm_start : USER_PGTABLES_CEILING);
2128 }
2129 tlb_finish_mmu(&tlb);
2130
2131 vma_prev(&vmi);
2132 /* Shrink the vma to just the new range */
2133 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
2134}