Loading...
1/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
11#include <linux/mempolicy.h>
12#include <linux/page-isolation.h>
13#include <linux/hugetlb.h>
14#include <linux/falloc.h>
15#include <linux/sched.h>
16#include <linux/ksm.h>
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/blkdev.h>
20#include <linux/backing-dev.h>
21#include <linux/swap.h>
22#include <linux/swapops.h>
23#include <linux/mmu_notifier.h>
24
25#include <asm/tlb.h>
26
27/*
28 * Any behaviour which results in changes to the vma->vm_flags needs to
29 * take mmap_sem for writing. Others, which simply traverse vmas, need
30 * to only take it for reading.
31 */
32static int madvise_need_mmap_write(int behavior)
33{
34 switch (behavior) {
35 case MADV_REMOVE:
36 case MADV_WILLNEED:
37 case MADV_DONTNEED:
38 case MADV_FREE:
39 return 0;
40 default:
41 /* be safe, default to 1. list exceptions explicitly */
42 return 1;
43 }
44}
45
46/*
47 * We can potentially split a vm area into separate
48 * areas, each area with its own behavior.
49 */
50static long madvise_behavior(struct vm_area_struct *vma,
51 struct vm_area_struct **prev,
52 unsigned long start, unsigned long end, int behavior)
53{
54 struct mm_struct *mm = vma->vm_mm;
55 int error = 0;
56 pgoff_t pgoff;
57 unsigned long new_flags = vma->vm_flags;
58
59 switch (behavior) {
60 case MADV_NORMAL:
61 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
62 break;
63 case MADV_SEQUENTIAL:
64 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
65 break;
66 case MADV_RANDOM:
67 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
68 break;
69 case MADV_DONTFORK:
70 new_flags |= VM_DONTCOPY;
71 break;
72 case MADV_DOFORK:
73 if (vma->vm_flags & VM_IO) {
74 error = -EINVAL;
75 goto out;
76 }
77 new_flags &= ~VM_DONTCOPY;
78 break;
79 case MADV_DONTDUMP:
80 new_flags |= VM_DONTDUMP;
81 break;
82 case MADV_DODUMP:
83 if (new_flags & VM_SPECIAL) {
84 error = -EINVAL;
85 goto out;
86 }
87 new_flags &= ~VM_DONTDUMP;
88 break;
89 case MADV_MERGEABLE:
90 case MADV_UNMERGEABLE:
91 error = ksm_madvise(vma, start, end, behavior, &new_flags);
92 if (error)
93 goto out;
94 break;
95 case MADV_HUGEPAGE:
96 case MADV_NOHUGEPAGE:
97 error = hugepage_madvise(vma, &new_flags, behavior);
98 if (error)
99 goto out;
100 break;
101 }
102
103 if (new_flags == vma->vm_flags) {
104 *prev = vma;
105 goto out;
106 }
107
108 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
109 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
110 vma->vm_file, pgoff, vma_policy(vma),
111 vma->vm_userfaultfd_ctx);
112 if (*prev) {
113 vma = *prev;
114 goto success;
115 }
116
117 *prev = vma;
118
119 if (start != vma->vm_start) {
120 error = split_vma(mm, vma, start, 1);
121 if (error)
122 goto out;
123 }
124
125 if (end != vma->vm_end) {
126 error = split_vma(mm, vma, end, 0);
127 if (error)
128 goto out;
129 }
130
131success:
132 /*
133 * vm_flags is protected by the mmap_sem held in write mode.
134 */
135 vma->vm_flags = new_flags;
136
137out:
138 if (error == -ENOMEM)
139 error = -EAGAIN;
140 return error;
141}
142
143#ifdef CONFIG_SWAP
144static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
145 unsigned long end, struct mm_walk *walk)
146{
147 pte_t *orig_pte;
148 struct vm_area_struct *vma = walk->private;
149 unsigned long index;
150
151 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
152 return 0;
153
154 for (index = start; index != end; index += PAGE_SIZE) {
155 pte_t pte;
156 swp_entry_t entry;
157 struct page *page;
158 spinlock_t *ptl;
159
160 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
161 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
162 pte_unmap_unlock(orig_pte, ptl);
163
164 if (pte_present(pte) || pte_none(pte))
165 continue;
166 entry = pte_to_swp_entry(pte);
167 if (unlikely(non_swap_entry(entry)))
168 continue;
169
170 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
171 vma, index);
172 if (page)
173 put_page(page);
174 }
175
176 return 0;
177}
178
179static void force_swapin_readahead(struct vm_area_struct *vma,
180 unsigned long start, unsigned long end)
181{
182 struct mm_walk walk = {
183 .mm = vma->vm_mm,
184 .pmd_entry = swapin_walk_pmd_entry,
185 .private = vma,
186 };
187
188 walk_page_range(start, end, &walk);
189
190 lru_add_drain(); /* Push any new pages onto the LRU now */
191}
192
193static void force_shm_swapin_readahead(struct vm_area_struct *vma,
194 unsigned long start, unsigned long end,
195 struct address_space *mapping)
196{
197 pgoff_t index;
198 struct page *page;
199 swp_entry_t swap;
200
201 for (; start < end; start += PAGE_SIZE) {
202 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
203
204 page = find_get_entry(mapping, index);
205 if (!radix_tree_exceptional_entry(page)) {
206 if (page)
207 put_page(page);
208 continue;
209 }
210 swap = radix_to_swp_entry(page);
211 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
212 NULL, 0);
213 if (page)
214 put_page(page);
215 }
216
217 lru_add_drain(); /* Push any new pages onto the LRU now */
218}
219#endif /* CONFIG_SWAP */
220
221/*
222 * Schedule all required I/O operations. Do not wait for completion.
223 */
224static long madvise_willneed(struct vm_area_struct *vma,
225 struct vm_area_struct **prev,
226 unsigned long start, unsigned long end)
227{
228 struct file *file = vma->vm_file;
229
230#ifdef CONFIG_SWAP
231 if (!file) {
232 *prev = vma;
233 force_swapin_readahead(vma, start, end);
234 return 0;
235 }
236
237 if (shmem_mapping(file->f_mapping)) {
238 *prev = vma;
239 force_shm_swapin_readahead(vma, start, end,
240 file->f_mapping);
241 return 0;
242 }
243#else
244 if (!file)
245 return -EBADF;
246#endif
247
248 if (IS_DAX(file_inode(file))) {
249 /* no bad return value, but ignore advice */
250 return 0;
251 }
252
253 *prev = vma;
254 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
255 if (end > vma->vm_end)
256 end = vma->vm_end;
257 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
258
259 force_page_cache_readahead(file->f_mapping, file, start, end - start);
260 return 0;
261}
262
263static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
264 unsigned long end, struct mm_walk *walk)
265
266{
267 struct mmu_gather *tlb = walk->private;
268 struct mm_struct *mm = tlb->mm;
269 struct vm_area_struct *vma = walk->vma;
270 spinlock_t *ptl;
271 pte_t *orig_pte, *pte, ptent;
272 struct page *page;
273 int nr_swap = 0;
274 unsigned long next;
275
276 next = pmd_addr_end(addr, end);
277 if (pmd_trans_huge(*pmd))
278 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
279 goto next;
280
281 if (pmd_trans_unstable(pmd))
282 return 0;
283
284 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
285 arch_enter_lazy_mmu_mode();
286 for (; addr != end; pte++, addr += PAGE_SIZE) {
287 ptent = *pte;
288
289 if (pte_none(ptent))
290 continue;
291 /*
292 * If the pte has swp_entry, just clear page table to
293 * prevent swap-in which is more expensive rather than
294 * (page allocation + zeroing).
295 */
296 if (!pte_present(ptent)) {
297 swp_entry_t entry;
298
299 entry = pte_to_swp_entry(ptent);
300 if (non_swap_entry(entry))
301 continue;
302 nr_swap--;
303 free_swap_and_cache(entry);
304 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
305 continue;
306 }
307
308 page = vm_normal_page(vma, addr, ptent);
309 if (!page)
310 continue;
311
312 /*
313 * If pmd isn't transhuge but the page is THP and
314 * is owned by only this process, split it and
315 * deactivate all pages.
316 */
317 if (PageTransCompound(page)) {
318 if (page_mapcount(page) != 1)
319 goto out;
320 get_page(page);
321 if (!trylock_page(page)) {
322 put_page(page);
323 goto out;
324 }
325 pte_unmap_unlock(orig_pte, ptl);
326 if (split_huge_page(page)) {
327 unlock_page(page);
328 put_page(page);
329 pte_offset_map_lock(mm, pmd, addr, &ptl);
330 goto out;
331 }
332 put_page(page);
333 unlock_page(page);
334 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
335 pte--;
336 addr -= PAGE_SIZE;
337 continue;
338 }
339
340 VM_BUG_ON_PAGE(PageTransCompound(page), page);
341
342 if (PageSwapCache(page) || PageDirty(page)) {
343 if (!trylock_page(page))
344 continue;
345 /*
346 * If page is shared with others, we couldn't clear
347 * PG_dirty of the page.
348 */
349 if (page_mapcount(page) != 1) {
350 unlock_page(page);
351 continue;
352 }
353
354 if (PageSwapCache(page) && !try_to_free_swap(page)) {
355 unlock_page(page);
356 continue;
357 }
358
359 ClearPageDirty(page);
360 unlock_page(page);
361 }
362
363 if (pte_young(ptent) || pte_dirty(ptent)) {
364 /*
365 * Some of architecture(ex, PPC) don't update TLB
366 * with set_pte_at and tlb_remove_tlb_entry so for
367 * the portability, remap the pte with old|clean
368 * after pte clearing.
369 */
370 ptent = ptep_get_and_clear_full(mm, addr, pte,
371 tlb->fullmm);
372
373 ptent = pte_mkold(ptent);
374 ptent = pte_mkclean(ptent);
375 set_pte_at(mm, addr, pte, ptent);
376 if (PageActive(page))
377 deactivate_page(page);
378 tlb_remove_tlb_entry(tlb, pte, addr);
379 }
380 }
381out:
382 if (nr_swap) {
383 if (current->mm == mm)
384 sync_mm_rss(mm);
385
386 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
387 }
388 arch_leave_lazy_mmu_mode();
389 pte_unmap_unlock(orig_pte, ptl);
390 cond_resched();
391next:
392 return 0;
393}
394
395static void madvise_free_page_range(struct mmu_gather *tlb,
396 struct vm_area_struct *vma,
397 unsigned long addr, unsigned long end)
398{
399 struct mm_walk free_walk = {
400 .pmd_entry = madvise_free_pte_range,
401 .mm = vma->vm_mm,
402 .private = tlb,
403 };
404
405 tlb_start_vma(tlb, vma);
406 walk_page_range(addr, end, &free_walk);
407 tlb_end_vma(tlb, vma);
408}
409
410static int madvise_free_single_vma(struct vm_area_struct *vma,
411 unsigned long start_addr, unsigned long end_addr)
412{
413 unsigned long start, end;
414 struct mm_struct *mm = vma->vm_mm;
415 struct mmu_gather tlb;
416
417 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
418 return -EINVAL;
419
420 /* MADV_FREE works for only anon vma at the moment */
421 if (!vma_is_anonymous(vma))
422 return -EINVAL;
423
424 start = max(vma->vm_start, start_addr);
425 if (start >= vma->vm_end)
426 return -EINVAL;
427 end = min(vma->vm_end, end_addr);
428 if (end <= vma->vm_start)
429 return -EINVAL;
430
431 lru_add_drain();
432 tlb_gather_mmu(&tlb, mm, start, end);
433 update_hiwater_rss(mm);
434
435 mmu_notifier_invalidate_range_start(mm, start, end);
436 madvise_free_page_range(&tlb, vma, start, end);
437 mmu_notifier_invalidate_range_end(mm, start, end);
438 tlb_finish_mmu(&tlb, start, end);
439
440 return 0;
441}
442
443static long madvise_free(struct vm_area_struct *vma,
444 struct vm_area_struct **prev,
445 unsigned long start, unsigned long end)
446{
447 *prev = vma;
448 return madvise_free_single_vma(vma, start, end);
449}
450
451/*
452 * Application no longer needs these pages. If the pages are dirty,
453 * it's OK to just throw them away. The app will be more careful about
454 * data it wants to keep. Be sure to free swap resources too. The
455 * zap_page_range call sets things up for shrink_active_list to actually free
456 * these pages later if no one else has touched them in the meantime,
457 * although we could add these pages to a global reuse list for
458 * shrink_active_list to pick up before reclaiming other pages.
459 *
460 * NB: This interface discards data rather than pushes it out to swap,
461 * as some implementations do. This has performance implications for
462 * applications like large transactional databases which want to discard
463 * pages in anonymous maps after committing to backing store the data
464 * that was kept in them. There is no reason to write this data out to
465 * the swap area if the application is discarding it.
466 *
467 * An interface that causes the system to free clean pages and flush
468 * dirty pages is already available as msync(MS_INVALIDATE).
469 */
470static long madvise_dontneed(struct vm_area_struct *vma,
471 struct vm_area_struct **prev,
472 unsigned long start, unsigned long end)
473{
474 *prev = vma;
475 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
476 return -EINVAL;
477
478 zap_page_range(vma, start, end - start, NULL);
479 return 0;
480}
481
482/*
483 * Application wants to free up the pages and associated backing store.
484 * This is effectively punching a hole into the middle of a file.
485 */
486static long madvise_remove(struct vm_area_struct *vma,
487 struct vm_area_struct **prev,
488 unsigned long start, unsigned long end)
489{
490 loff_t offset;
491 int error;
492 struct file *f;
493
494 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
495
496 if (vma->vm_flags & VM_LOCKED)
497 return -EINVAL;
498
499 f = vma->vm_file;
500
501 if (!f || !f->f_mapping || !f->f_mapping->host) {
502 return -EINVAL;
503 }
504
505 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
506 return -EACCES;
507
508 offset = (loff_t)(start - vma->vm_start)
509 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
510
511 /*
512 * Filesystem's fallocate may need to take i_mutex. We need to
513 * explicitly grab a reference because the vma (and hence the
514 * vma's reference to the file) can go away as soon as we drop
515 * mmap_sem.
516 */
517 get_file(f);
518 up_read(¤t->mm->mmap_sem);
519 error = vfs_fallocate(f,
520 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
521 offset, end - start);
522 fput(f);
523 down_read(¤t->mm->mmap_sem);
524 return error;
525}
526
527#ifdef CONFIG_MEMORY_FAILURE
528/*
529 * Error injection support for memory error handling.
530 */
531static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
532{
533 struct page *p;
534 if (!capable(CAP_SYS_ADMIN))
535 return -EPERM;
536 for (; start < end; start += PAGE_SIZE <<
537 compound_order(compound_head(p))) {
538 int ret;
539
540 ret = get_user_pages_fast(start, 1, 0, &p);
541 if (ret != 1)
542 return ret;
543
544 if (PageHWPoison(p)) {
545 put_page(p);
546 continue;
547 }
548 if (bhv == MADV_SOFT_OFFLINE) {
549 pr_info("Soft offlining page %#lx at %#lx\n",
550 page_to_pfn(p), start);
551 ret = soft_offline_page(p, MF_COUNT_INCREASED);
552 if (ret)
553 return ret;
554 continue;
555 }
556 pr_info("Injecting memory failure for page %#lx at %#lx\n",
557 page_to_pfn(p), start);
558 ret = memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
559 if (ret)
560 return ret;
561 }
562 return 0;
563}
564#endif
565
566static long
567madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
568 unsigned long start, unsigned long end, int behavior)
569{
570 switch (behavior) {
571 case MADV_REMOVE:
572 return madvise_remove(vma, prev, start, end);
573 case MADV_WILLNEED:
574 return madvise_willneed(vma, prev, start, end);
575 case MADV_FREE:
576 /*
577 * XXX: In this implementation, MADV_FREE works like
578 * MADV_DONTNEED on swapless system or full swap.
579 */
580 if (get_nr_swap_pages() > 0)
581 return madvise_free(vma, prev, start, end);
582 /* passthrough */
583 case MADV_DONTNEED:
584 return madvise_dontneed(vma, prev, start, end);
585 default:
586 return madvise_behavior(vma, prev, start, end, behavior);
587 }
588}
589
590static bool
591madvise_behavior_valid(int behavior)
592{
593 switch (behavior) {
594 case MADV_DOFORK:
595 case MADV_DONTFORK:
596 case MADV_NORMAL:
597 case MADV_SEQUENTIAL:
598 case MADV_RANDOM:
599 case MADV_REMOVE:
600 case MADV_WILLNEED:
601 case MADV_DONTNEED:
602 case MADV_FREE:
603#ifdef CONFIG_KSM
604 case MADV_MERGEABLE:
605 case MADV_UNMERGEABLE:
606#endif
607#ifdef CONFIG_TRANSPARENT_HUGEPAGE
608 case MADV_HUGEPAGE:
609 case MADV_NOHUGEPAGE:
610#endif
611 case MADV_DONTDUMP:
612 case MADV_DODUMP:
613 return true;
614
615 default:
616 return false;
617 }
618}
619
620/*
621 * The madvise(2) system call.
622 *
623 * Applications can use madvise() to advise the kernel how it should
624 * handle paging I/O in this VM area. The idea is to help the kernel
625 * use appropriate read-ahead and caching techniques. The information
626 * provided is advisory only, and can be safely disregarded by the
627 * kernel without affecting the correct operation of the application.
628 *
629 * behavior values:
630 * MADV_NORMAL - the default behavior is to read clusters. This
631 * results in some read-ahead and read-behind.
632 * MADV_RANDOM - the system should read the minimum amount of data
633 * on any access, since it is unlikely that the appli-
634 * cation will need more than what it asks for.
635 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
636 * once, so they can be aggressively read ahead, and
637 * can be freed soon after they are accessed.
638 * MADV_WILLNEED - the application is notifying the system to read
639 * some pages ahead.
640 * MADV_DONTNEED - the application is finished with the given range,
641 * so the kernel can free resources associated with it.
642 * MADV_FREE - the application marks pages in the given range as lazy free,
643 * where actual purges are postponed until memory pressure happens.
644 * MADV_REMOVE - the application wants to free up the given range of
645 * pages and associated backing store.
646 * MADV_DONTFORK - omit this area from child's address space when forking:
647 * typically, to avoid COWing pages pinned by get_user_pages().
648 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
649 * MADV_HWPOISON - trigger memory error handler as if the given memory range
650 * were corrupted by unrecoverable hardware memory failure.
651 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
652 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
653 * this area with pages of identical content from other such areas.
654 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
655 * MADV_HUGEPAGE - the application wants to back the given range by transparent
656 * huge pages in the future. Existing pages might be coalesced and
657 * new pages might be allocated as THP.
658 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
659 * transparent huge pages so the existing pages will not be
660 * coalesced into THP and new pages will not be allocated as THP.
661 * MADV_DONTDUMP - the application wants to prevent pages in the given range
662 * from being included in its core dump.
663 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
664 *
665 * return values:
666 * zero - success
667 * -EINVAL - start + len < 0, start is not page-aligned,
668 * "behavior" is not a valid value, or application
669 * is attempting to release locked or shared pages.
670 * -ENOMEM - addresses in the specified range are not currently
671 * mapped, or are outside the AS of the process.
672 * -EIO - an I/O error occurred while paging in data.
673 * -EBADF - map exists, but area maps something that isn't a file.
674 * -EAGAIN - a kernel resource was temporarily unavailable.
675 */
676SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
677{
678 unsigned long end, tmp;
679 struct vm_area_struct *vma, *prev;
680 int unmapped_error = 0;
681 int error = -EINVAL;
682 int write;
683 size_t len;
684 struct blk_plug plug;
685
686#ifdef CONFIG_MEMORY_FAILURE
687 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
688 return madvise_hwpoison(behavior, start, start+len_in);
689#endif
690 if (!madvise_behavior_valid(behavior))
691 return error;
692
693 if (start & ~PAGE_MASK)
694 return error;
695 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
696
697 /* Check to see whether len was rounded up from small -ve to zero */
698 if (len_in && !len)
699 return error;
700
701 end = start + len;
702 if (end < start)
703 return error;
704
705 error = 0;
706 if (end == start)
707 return error;
708
709 write = madvise_need_mmap_write(behavior);
710 if (write)
711 down_write(¤t->mm->mmap_sem);
712 else
713 down_read(¤t->mm->mmap_sem);
714
715 /*
716 * If the interval [start,end) covers some unmapped address
717 * ranges, just ignore them, but return -ENOMEM at the end.
718 * - different from the way of handling in mlock etc.
719 */
720 vma = find_vma_prev(current->mm, start, &prev);
721 if (vma && start > vma->vm_start)
722 prev = vma;
723
724 blk_start_plug(&plug);
725 for (;;) {
726 /* Still start < end. */
727 error = -ENOMEM;
728 if (!vma)
729 goto out;
730
731 /* Here start < (end|vma->vm_end). */
732 if (start < vma->vm_start) {
733 unmapped_error = -ENOMEM;
734 start = vma->vm_start;
735 if (start >= end)
736 goto out;
737 }
738
739 /* Here vma->vm_start <= start < (end|vma->vm_end) */
740 tmp = vma->vm_end;
741 if (end < tmp)
742 tmp = end;
743
744 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
745 error = madvise_vma(vma, &prev, start, tmp, behavior);
746 if (error)
747 goto out;
748 start = tmp;
749 if (prev && start < prev->vm_end)
750 start = prev->vm_end;
751 error = unmapped_error;
752 if (start >= end)
753 goto out;
754 if (prev)
755 vma = prev->vm_next;
756 else /* madvise_remove dropped mmap_sem */
757 vma = find_vma(current->mm, start);
758 }
759out:
760 blk_finish_plug(&plug);
761 if (write)
762 up_write(¤t->mm->mmap_sem);
763 else
764 up_read(¤t->mm->mmap_sem);
765
766 return error;
767}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
12#include <linux/mempolicy.h>
13#include <linux/page-isolation.h>
14#include <linux/userfaultfd_k.h>
15#include <linux/hugetlb.h>
16#include <linux/falloc.h>
17#include <linux/sched.h>
18#include <linux/ksm.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/blkdev.h>
22#include <linux/backing-dev.h>
23#include <linux/swap.h>
24#include <linux/swapops.h>
25#include <linux/shmem_fs.h>
26#include <linux/mmu_notifier.h>
27
28#include <asm/tlb.h>
29
30#include "internal.h"
31
32/*
33 * Any behaviour which results in changes to the vma->vm_flags needs to
34 * take mmap_sem for writing. Others, which simply traverse vmas, need
35 * to only take it for reading.
36 */
37static int madvise_need_mmap_write(int behavior)
38{
39 switch (behavior) {
40 case MADV_REMOVE:
41 case MADV_WILLNEED:
42 case MADV_DONTNEED:
43 case MADV_FREE:
44 return 0;
45 default:
46 /* be safe, default to 1. list exceptions explicitly */
47 return 1;
48 }
49}
50
51/*
52 * We can potentially split a vm area into separate
53 * areas, each area with its own behavior.
54 */
55static long madvise_behavior(struct vm_area_struct *vma,
56 struct vm_area_struct **prev,
57 unsigned long start, unsigned long end, int behavior)
58{
59 struct mm_struct *mm = vma->vm_mm;
60 int error = 0;
61 pgoff_t pgoff;
62 unsigned long new_flags = vma->vm_flags;
63
64 switch (behavior) {
65 case MADV_NORMAL:
66 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67 break;
68 case MADV_SEQUENTIAL:
69 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
70 break;
71 case MADV_RANDOM:
72 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
73 break;
74 case MADV_DONTFORK:
75 new_flags |= VM_DONTCOPY;
76 break;
77 case MADV_DOFORK:
78 if (vma->vm_flags & VM_IO) {
79 error = -EINVAL;
80 goto out;
81 }
82 new_flags &= ~VM_DONTCOPY;
83 break;
84 case MADV_WIPEONFORK:
85 /* MADV_WIPEONFORK is only supported on anonymous memory. */
86 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
87 error = -EINVAL;
88 goto out;
89 }
90 new_flags |= VM_WIPEONFORK;
91 break;
92 case MADV_KEEPONFORK:
93 new_flags &= ~VM_WIPEONFORK;
94 break;
95 case MADV_DONTDUMP:
96 new_flags |= VM_DONTDUMP;
97 break;
98 case MADV_DODUMP:
99 if (new_flags & VM_SPECIAL) {
100 error = -EINVAL;
101 goto out;
102 }
103 new_flags &= ~VM_DONTDUMP;
104 break;
105 case MADV_MERGEABLE:
106 case MADV_UNMERGEABLE:
107 error = ksm_madvise(vma, start, end, behavior, &new_flags);
108 if (error) {
109 /*
110 * madvise() returns EAGAIN if kernel resources, such as
111 * slab, are temporarily unavailable.
112 */
113 if (error == -ENOMEM)
114 error = -EAGAIN;
115 goto out;
116 }
117 break;
118 case MADV_HUGEPAGE:
119 case MADV_NOHUGEPAGE:
120 error = hugepage_madvise(vma, &new_flags, behavior);
121 if (error) {
122 /*
123 * madvise() returns EAGAIN if kernel resources, such as
124 * slab, are temporarily unavailable.
125 */
126 if (error == -ENOMEM)
127 error = -EAGAIN;
128 goto out;
129 }
130 break;
131 }
132
133 if (new_flags == vma->vm_flags) {
134 *prev = vma;
135 goto out;
136 }
137
138 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
139 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
140 vma->vm_file, pgoff, vma_policy(vma),
141 vma->vm_userfaultfd_ctx);
142 if (*prev) {
143 vma = *prev;
144 goto success;
145 }
146
147 *prev = vma;
148
149 if (start != vma->vm_start) {
150 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
151 error = -ENOMEM;
152 goto out;
153 }
154 error = __split_vma(mm, vma, start, 1);
155 if (error) {
156 /*
157 * madvise() returns EAGAIN if kernel resources, such as
158 * slab, are temporarily unavailable.
159 */
160 if (error == -ENOMEM)
161 error = -EAGAIN;
162 goto out;
163 }
164 }
165
166 if (end != vma->vm_end) {
167 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
168 error = -ENOMEM;
169 goto out;
170 }
171 error = __split_vma(mm, vma, end, 0);
172 if (error) {
173 /*
174 * madvise() returns EAGAIN if kernel resources, such as
175 * slab, are temporarily unavailable.
176 */
177 if (error == -ENOMEM)
178 error = -EAGAIN;
179 goto out;
180 }
181 }
182
183success:
184 /*
185 * vm_flags is protected by the mmap_sem held in write mode.
186 */
187 vma->vm_flags = new_flags;
188out:
189 return error;
190}
191
192#ifdef CONFIG_SWAP
193static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
194 unsigned long end, struct mm_walk *walk)
195{
196 pte_t *orig_pte;
197 struct vm_area_struct *vma = walk->private;
198 unsigned long index;
199
200 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
201 return 0;
202
203 for (index = start; index != end; index += PAGE_SIZE) {
204 pte_t pte;
205 swp_entry_t entry;
206 struct page *page;
207 spinlock_t *ptl;
208
209 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
210 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
211 pte_unmap_unlock(orig_pte, ptl);
212
213 if (pte_present(pte) || pte_none(pte))
214 continue;
215 entry = pte_to_swp_entry(pte);
216 if (unlikely(non_swap_entry(entry)))
217 continue;
218
219 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
220 vma, index, false);
221 if (page)
222 put_page(page);
223 }
224
225 return 0;
226}
227
228static void force_swapin_readahead(struct vm_area_struct *vma,
229 unsigned long start, unsigned long end)
230{
231 struct mm_walk walk = {
232 .mm = vma->vm_mm,
233 .pmd_entry = swapin_walk_pmd_entry,
234 .private = vma,
235 };
236
237 walk_page_range(start, end, &walk);
238
239 lru_add_drain(); /* Push any new pages onto the LRU now */
240}
241
242static void force_shm_swapin_readahead(struct vm_area_struct *vma,
243 unsigned long start, unsigned long end,
244 struct address_space *mapping)
245{
246 pgoff_t index;
247 struct page *page;
248 swp_entry_t swap;
249
250 for (; start < end; start += PAGE_SIZE) {
251 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
252
253 page = find_get_entry(mapping, index);
254 if (!radix_tree_exceptional_entry(page)) {
255 if (page)
256 put_page(page);
257 continue;
258 }
259 swap = radix_to_swp_entry(page);
260 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
261 NULL, 0, false);
262 if (page)
263 put_page(page);
264 }
265
266 lru_add_drain(); /* Push any new pages onto the LRU now */
267}
268#endif /* CONFIG_SWAP */
269
270/*
271 * Schedule all required I/O operations. Do not wait for completion.
272 */
273static long madvise_willneed(struct vm_area_struct *vma,
274 struct vm_area_struct **prev,
275 unsigned long start, unsigned long end)
276{
277 struct file *file = vma->vm_file;
278
279 *prev = vma;
280#ifdef CONFIG_SWAP
281 if (!file) {
282 force_swapin_readahead(vma, start, end);
283 return 0;
284 }
285
286 if (shmem_mapping(file->f_mapping)) {
287 force_shm_swapin_readahead(vma, start, end,
288 file->f_mapping);
289 return 0;
290 }
291#else
292 if (!file)
293 return -EBADF;
294#endif
295
296 if (IS_DAX(file_inode(file))) {
297 /* no bad return value, but ignore advice */
298 return 0;
299 }
300
301 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
302 if (end > vma->vm_end)
303 end = vma->vm_end;
304 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
305
306 force_page_cache_readahead(file->f_mapping, file, start, end - start);
307 return 0;
308}
309
310static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
311 unsigned long end, struct mm_walk *walk)
312
313{
314 struct mmu_gather *tlb = walk->private;
315 struct mm_struct *mm = tlb->mm;
316 struct vm_area_struct *vma = walk->vma;
317 spinlock_t *ptl;
318 pte_t *orig_pte, *pte, ptent;
319 struct page *page;
320 int nr_swap = 0;
321 unsigned long next;
322
323 next = pmd_addr_end(addr, end);
324 if (pmd_trans_huge(*pmd))
325 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
326 goto next;
327
328 if (pmd_trans_unstable(pmd))
329 return 0;
330
331 tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
332 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
333 flush_tlb_batched_pending(mm);
334 arch_enter_lazy_mmu_mode();
335 for (; addr != end; pte++, addr += PAGE_SIZE) {
336 ptent = *pte;
337
338 if (pte_none(ptent))
339 continue;
340 /*
341 * If the pte has swp_entry, just clear page table to
342 * prevent swap-in which is more expensive rather than
343 * (page allocation + zeroing).
344 */
345 if (!pte_present(ptent)) {
346 swp_entry_t entry;
347
348 entry = pte_to_swp_entry(ptent);
349 if (non_swap_entry(entry))
350 continue;
351 nr_swap--;
352 free_swap_and_cache(entry);
353 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
354 continue;
355 }
356
357 page = _vm_normal_page(vma, addr, ptent, true);
358 if (!page)
359 continue;
360
361 /*
362 * If pmd isn't transhuge but the page is THP and
363 * is owned by only this process, split it and
364 * deactivate all pages.
365 */
366 if (PageTransCompound(page)) {
367 if (page_mapcount(page) != 1)
368 goto out;
369 get_page(page);
370 if (!trylock_page(page)) {
371 put_page(page);
372 goto out;
373 }
374 pte_unmap_unlock(orig_pte, ptl);
375 if (split_huge_page(page)) {
376 unlock_page(page);
377 put_page(page);
378 pte_offset_map_lock(mm, pmd, addr, &ptl);
379 goto out;
380 }
381 unlock_page(page);
382 put_page(page);
383 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
384 pte--;
385 addr -= PAGE_SIZE;
386 continue;
387 }
388
389 VM_BUG_ON_PAGE(PageTransCompound(page), page);
390
391 if (PageSwapCache(page) || PageDirty(page)) {
392 if (!trylock_page(page))
393 continue;
394 /*
395 * If page is shared with others, we couldn't clear
396 * PG_dirty of the page.
397 */
398 if (page_mapcount(page) != 1) {
399 unlock_page(page);
400 continue;
401 }
402
403 if (PageSwapCache(page) && !try_to_free_swap(page)) {
404 unlock_page(page);
405 continue;
406 }
407
408 ClearPageDirty(page);
409 unlock_page(page);
410 }
411
412 if (pte_young(ptent) || pte_dirty(ptent)) {
413 /*
414 * Some of architecture(ex, PPC) don't update TLB
415 * with set_pte_at and tlb_remove_tlb_entry so for
416 * the portability, remap the pte with old|clean
417 * after pte clearing.
418 */
419 ptent = ptep_get_and_clear_full(mm, addr, pte,
420 tlb->fullmm);
421
422 ptent = pte_mkold(ptent);
423 ptent = pte_mkclean(ptent);
424 set_pte_at(mm, addr, pte, ptent);
425 tlb_remove_tlb_entry(tlb, pte, addr);
426 }
427 mark_page_lazyfree(page);
428 }
429out:
430 if (nr_swap) {
431 if (current->mm == mm)
432 sync_mm_rss(mm);
433
434 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
435 }
436 arch_leave_lazy_mmu_mode();
437 pte_unmap_unlock(orig_pte, ptl);
438 cond_resched();
439next:
440 return 0;
441}
442
443static void madvise_free_page_range(struct mmu_gather *tlb,
444 struct vm_area_struct *vma,
445 unsigned long addr, unsigned long end)
446{
447 struct mm_walk free_walk = {
448 .pmd_entry = madvise_free_pte_range,
449 .mm = vma->vm_mm,
450 .private = tlb,
451 };
452
453 tlb_start_vma(tlb, vma);
454 walk_page_range(addr, end, &free_walk);
455 tlb_end_vma(tlb, vma);
456}
457
458static int madvise_free_single_vma(struct vm_area_struct *vma,
459 unsigned long start_addr, unsigned long end_addr)
460{
461 unsigned long start, end;
462 struct mm_struct *mm = vma->vm_mm;
463 struct mmu_gather tlb;
464
465 /* MADV_FREE works for only anon vma at the moment */
466 if (!vma_is_anonymous(vma))
467 return -EINVAL;
468
469 start = max(vma->vm_start, start_addr);
470 if (start >= vma->vm_end)
471 return -EINVAL;
472 end = min(vma->vm_end, end_addr);
473 if (end <= vma->vm_start)
474 return -EINVAL;
475
476 lru_add_drain();
477 tlb_gather_mmu(&tlb, mm, start, end);
478 update_hiwater_rss(mm);
479
480 mmu_notifier_invalidate_range_start(mm, start, end);
481 madvise_free_page_range(&tlb, vma, start, end);
482 mmu_notifier_invalidate_range_end(mm, start, end);
483 tlb_finish_mmu(&tlb, start, end);
484
485 return 0;
486}
487
488/*
489 * Application no longer needs these pages. If the pages are dirty,
490 * it's OK to just throw them away. The app will be more careful about
491 * data it wants to keep. Be sure to free swap resources too. The
492 * zap_page_range call sets things up for shrink_active_list to actually free
493 * these pages later if no one else has touched them in the meantime,
494 * although we could add these pages to a global reuse list for
495 * shrink_active_list to pick up before reclaiming other pages.
496 *
497 * NB: This interface discards data rather than pushes it out to swap,
498 * as some implementations do. This has performance implications for
499 * applications like large transactional databases which want to discard
500 * pages in anonymous maps after committing to backing store the data
501 * that was kept in them. There is no reason to write this data out to
502 * the swap area if the application is discarding it.
503 *
504 * An interface that causes the system to free clean pages and flush
505 * dirty pages is already available as msync(MS_INVALIDATE).
506 */
507static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
508 unsigned long start, unsigned long end)
509{
510 zap_page_range(vma, start, end - start);
511 return 0;
512}
513
514static long madvise_dontneed_free(struct vm_area_struct *vma,
515 struct vm_area_struct **prev,
516 unsigned long start, unsigned long end,
517 int behavior)
518{
519 *prev = vma;
520 if (!can_madv_dontneed_vma(vma))
521 return -EINVAL;
522
523 if (!userfaultfd_remove(vma, start, end)) {
524 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
525
526 down_read(¤t->mm->mmap_sem);
527 vma = find_vma(current->mm, start);
528 if (!vma)
529 return -ENOMEM;
530 if (start < vma->vm_start) {
531 /*
532 * This "vma" under revalidation is the one
533 * with the lowest vma->vm_start where start
534 * is also < vma->vm_end. If start <
535 * vma->vm_start it means an hole materialized
536 * in the user address space within the
537 * virtual range passed to MADV_DONTNEED
538 * or MADV_FREE.
539 */
540 return -ENOMEM;
541 }
542 if (!can_madv_dontneed_vma(vma))
543 return -EINVAL;
544 if (end > vma->vm_end) {
545 /*
546 * Don't fail if end > vma->vm_end. If the old
547 * vma was splitted while the mmap_sem was
548 * released the effect of the concurrent
549 * operation may not cause madvise() to
550 * have an undefined result. There may be an
551 * adjacent next vma that we'll walk
552 * next. userfaultfd_remove() will generate an
553 * UFFD_EVENT_REMOVE repetition on the
554 * end-vma->vm_end range, but the manager can
555 * handle a repetition fine.
556 */
557 end = vma->vm_end;
558 }
559 VM_WARN_ON(start >= end);
560 }
561
562 if (behavior == MADV_DONTNEED)
563 return madvise_dontneed_single_vma(vma, start, end);
564 else if (behavior == MADV_FREE)
565 return madvise_free_single_vma(vma, start, end);
566 else
567 return -EINVAL;
568}
569
570/*
571 * Application wants to free up the pages and associated backing store.
572 * This is effectively punching a hole into the middle of a file.
573 */
574static long madvise_remove(struct vm_area_struct *vma,
575 struct vm_area_struct **prev,
576 unsigned long start, unsigned long end)
577{
578 loff_t offset;
579 int error;
580 struct file *f;
581
582 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
583
584 if (vma->vm_flags & VM_LOCKED)
585 return -EINVAL;
586
587 f = vma->vm_file;
588
589 if (!f || !f->f_mapping || !f->f_mapping->host) {
590 return -EINVAL;
591 }
592
593 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
594 return -EACCES;
595
596 offset = (loff_t)(start - vma->vm_start)
597 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
598
599 /*
600 * Filesystem's fallocate may need to take i_mutex. We need to
601 * explicitly grab a reference because the vma (and hence the
602 * vma's reference to the file) can go away as soon as we drop
603 * mmap_sem.
604 */
605 get_file(f);
606 if (userfaultfd_remove(vma, start, end)) {
607 /* mmap_sem was not released by userfaultfd_remove() */
608 up_read(¤t->mm->mmap_sem);
609 }
610 error = vfs_fallocate(f,
611 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
612 offset, end - start);
613 fput(f);
614 down_read(¤t->mm->mmap_sem);
615 return error;
616}
617
618#ifdef CONFIG_MEMORY_FAILURE
619/*
620 * Error injection support for memory error handling.
621 */
622static int madvise_inject_error(int behavior,
623 unsigned long start, unsigned long end)
624{
625 struct page *page;
626 struct zone *zone;
627 unsigned int order;
628
629 if (!capable(CAP_SYS_ADMIN))
630 return -EPERM;
631
632
633 for (; start < end; start += PAGE_SIZE << order) {
634 int ret;
635
636 ret = get_user_pages_fast(start, 1, 0, &page);
637 if (ret != 1)
638 return ret;
639
640 /*
641 * When soft offlining hugepages, after migrating the page
642 * we dissolve it, therefore in the second loop "page" will
643 * no longer be a compound page, and order will be 0.
644 */
645 order = compound_order(compound_head(page));
646
647 if (PageHWPoison(page)) {
648 put_page(page);
649 continue;
650 }
651
652 if (behavior == MADV_SOFT_OFFLINE) {
653 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
654 page_to_pfn(page), start);
655
656 ret = soft_offline_page(page, MF_COUNT_INCREASED);
657 if (ret)
658 return ret;
659 continue;
660 }
661 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
662 page_to_pfn(page), start);
663
664 ret = memory_failure(page_to_pfn(page), MF_COUNT_INCREASED);
665 if (ret)
666 return ret;
667 }
668
669 /* Ensure that all poisoned pages are removed from per-cpu lists */
670 for_each_populated_zone(zone)
671 drain_all_pages(zone);
672
673 return 0;
674}
675#endif
676
677static long
678madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
679 unsigned long start, unsigned long end, int behavior)
680{
681 switch (behavior) {
682 case MADV_REMOVE:
683 return madvise_remove(vma, prev, start, end);
684 case MADV_WILLNEED:
685 return madvise_willneed(vma, prev, start, end);
686 case MADV_FREE:
687 case MADV_DONTNEED:
688 return madvise_dontneed_free(vma, prev, start, end, behavior);
689 default:
690 return madvise_behavior(vma, prev, start, end, behavior);
691 }
692}
693
694static bool
695madvise_behavior_valid(int behavior)
696{
697 switch (behavior) {
698 case MADV_DOFORK:
699 case MADV_DONTFORK:
700 case MADV_NORMAL:
701 case MADV_SEQUENTIAL:
702 case MADV_RANDOM:
703 case MADV_REMOVE:
704 case MADV_WILLNEED:
705 case MADV_DONTNEED:
706 case MADV_FREE:
707#ifdef CONFIG_KSM
708 case MADV_MERGEABLE:
709 case MADV_UNMERGEABLE:
710#endif
711#ifdef CONFIG_TRANSPARENT_HUGEPAGE
712 case MADV_HUGEPAGE:
713 case MADV_NOHUGEPAGE:
714#endif
715 case MADV_DONTDUMP:
716 case MADV_DODUMP:
717 case MADV_WIPEONFORK:
718 case MADV_KEEPONFORK:
719#ifdef CONFIG_MEMORY_FAILURE
720 case MADV_SOFT_OFFLINE:
721 case MADV_HWPOISON:
722#endif
723 return true;
724
725 default:
726 return false;
727 }
728}
729
730/*
731 * The madvise(2) system call.
732 *
733 * Applications can use madvise() to advise the kernel how it should
734 * handle paging I/O in this VM area. The idea is to help the kernel
735 * use appropriate read-ahead and caching techniques. The information
736 * provided is advisory only, and can be safely disregarded by the
737 * kernel without affecting the correct operation of the application.
738 *
739 * behavior values:
740 * MADV_NORMAL - the default behavior is to read clusters. This
741 * results in some read-ahead and read-behind.
742 * MADV_RANDOM - the system should read the minimum amount of data
743 * on any access, since it is unlikely that the appli-
744 * cation will need more than what it asks for.
745 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
746 * once, so they can be aggressively read ahead, and
747 * can be freed soon after they are accessed.
748 * MADV_WILLNEED - the application is notifying the system to read
749 * some pages ahead.
750 * MADV_DONTNEED - the application is finished with the given range,
751 * so the kernel can free resources associated with it.
752 * MADV_FREE - the application marks pages in the given range as lazy free,
753 * where actual purges are postponed until memory pressure happens.
754 * MADV_REMOVE - the application wants to free up the given range of
755 * pages and associated backing store.
756 * MADV_DONTFORK - omit this area from child's address space when forking:
757 * typically, to avoid COWing pages pinned by get_user_pages().
758 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
759 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
760 * range after a fork.
761 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
762 * MADV_HWPOISON - trigger memory error handler as if the given memory range
763 * were corrupted by unrecoverable hardware memory failure.
764 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
765 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
766 * this area with pages of identical content from other such areas.
767 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
768 * MADV_HUGEPAGE - the application wants to back the given range by transparent
769 * huge pages in the future. Existing pages might be coalesced and
770 * new pages might be allocated as THP.
771 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
772 * transparent huge pages so the existing pages will not be
773 * coalesced into THP and new pages will not be allocated as THP.
774 * MADV_DONTDUMP - the application wants to prevent pages in the given range
775 * from being included in its core dump.
776 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
777 *
778 * return values:
779 * zero - success
780 * -EINVAL - start + len < 0, start is not page-aligned,
781 * "behavior" is not a valid value, or application
782 * is attempting to release locked or shared pages,
783 * or the specified address range includes file, Huge TLB,
784 * MAP_SHARED or VMPFNMAP range.
785 * -ENOMEM - addresses in the specified range are not currently
786 * mapped, or are outside the AS of the process.
787 * -EIO - an I/O error occurred while paging in data.
788 * -EBADF - map exists, but area maps something that isn't a file.
789 * -EAGAIN - a kernel resource was temporarily unavailable.
790 */
791SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
792{
793 unsigned long end, tmp;
794 struct vm_area_struct *vma, *prev;
795 int unmapped_error = 0;
796 int error = -EINVAL;
797 int write;
798 size_t len;
799 struct blk_plug plug;
800
801 if (!madvise_behavior_valid(behavior))
802 return error;
803
804 if (start & ~PAGE_MASK)
805 return error;
806 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
807
808 /* Check to see whether len was rounded up from small -ve to zero */
809 if (len_in && !len)
810 return error;
811
812 end = start + len;
813 if (end < start)
814 return error;
815
816 error = 0;
817 if (end == start)
818 return error;
819
820#ifdef CONFIG_MEMORY_FAILURE
821 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
822 return madvise_inject_error(behavior, start, start + len_in);
823#endif
824
825 write = madvise_need_mmap_write(behavior);
826 if (write) {
827 if (down_write_killable(¤t->mm->mmap_sem))
828 return -EINTR;
829 } else {
830 down_read(¤t->mm->mmap_sem);
831 }
832
833 /*
834 * If the interval [start,end) covers some unmapped address
835 * ranges, just ignore them, but return -ENOMEM at the end.
836 * - different from the way of handling in mlock etc.
837 */
838 vma = find_vma_prev(current->mm, start, &prev);
839 if (vma && start > vma->vm_start)
840 prev = vma;
841
842 blk_start_plug(&plug);
843 for (;;) {
844 /* Still start < end. */
845 error = -ENOMEM;
846 if (!vma)
847 goto out;
848
849 /* Here start < (end|vma->vm_end). */
850 if (start < vma->vm_start) {
851 unmapped_error = -ENOMEM;
852 start = vma->vm_start;
853 if (start >= end)
854 goto out;
855 }
856
857 /* Here vma->vm_start <= start < (end|vma->vm_end) */
858 tmp = vma->vm_end;
859 if (end < tmp)
860 tmp = end;
861
862 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
863 error = madvise_vma(vma, &prev, start, tmp, behavior);
864 if (error)
865 goto out;
866 start = tmp;
867 if (prev && start < prev->vm_end)
868 start = prev->vm_end;
869 error = unmapped_error;
870 if (start >= end)
871 goto out;
872 if (prev)
873 vma = prev->vm_next;
874 else /* madvise_remove dropped mmap_sem */
875 vma = find_vma(current->mm, start);
876 }
877out:
878 blk_finish_plug(&plug);
879 if (write)
880 up_write(¤t->mm->mmap_sem);
881 else
882 up_read(¤t->mm->mmap_sem);
883
884 return error;
885}