Loading...
1/*
2 * Memory Migration functionality - linux/mm/migrate.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter
13 */
14
15#include <linux/migrate.h>
16#include <linux/export.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/backing-dev.h>
34#include <linux/syscalls.h>
35#include <linux/hugetlb.h>
36#include <linux/hugetlb_cgroup.h>
37#include <linux/gfp.h>
38#include <linux/balloon_compaction.h>
39#include <linux/mmu_notifier.h>
40#include <linux/page_idle.h>
41#include <linux/page_owner.h>
42
43#include <asm/tlbflush.h>
44
45#define CREATE_TRACE_POINTS
46#include <trace/events/migrate.h>
47
48#include "internal.h"
49
50/*
51 * migrate_prep() needs to be called before we start compiling a list of pages
52 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
53 * undesirable, use migrate_prep_local()
54 */
55int migrate_prep(void)
56{
57 /*
58 * Clear the LRU lists so pages can be isolated.
59 * Note that pages may be moved off the LRU after we have
60 * drained them. Those pages will fail to migrate like other
61 * pages that may be busy.
62 */
63 lru_add_drain_all();
64
65 return 0;
66}
67
68/* Do the necessary work of migrate_prep but not if it involves other CPUs */
69int migrate_prep_local(void)
70{
71 lru_add_drain();
72
73 return 0;
74}
75
76/*
77 * Put previously isolated pages back onto the appropriate lists
78 * from where they were once taken off for compaction/migration.
79 *
80 * This function shall be used whenever the isolated pageset has been
81 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
82 * and isolate_huge_page().
83 */
84void putback_movable_pages(struct list_head *l)
85{
86 struct page *page;
87 struct page *page2;
88
89 list_for_each_entry_safe(page, page2, l, lru) {
90 if (unlikely(PageHuge(page))) {
91 putback_active_hugepage(page);
92 continue;
93 }
94 list_del(&page->lru);
95 dec_zone_page_state(page, NR_ISOLATED_ANON +
96 page_is_file_cache(page));
97 if (unlikely(isolated_balloon_page(page)))
98 balloon_page_putback(page);
99 else
100 putback_lru_page(page);
101 }
102}
103
104/*
105 * Restore a potential migration pte to a working pte entry
106 */
107static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
108 unsigned long addr, void *old)
109{
110 struct mm_struct *mm = vma->vm_mm;
111 swp_entry_t entry;
112 pmd_t *pmd;
113 pte_t *ptep, pte;
114 spinlock_t *ptl;
115
116 if (unlikely(PageHuge(new))) {
117 ptep = huge_pte_offset(mm, addr);
118 if (!ptep)
119 goto out;
120 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
121 } else {
122 pmd = mm_find_pmd(mm, addr);
123 if (!pmd)
124 goto out;
125
126 ptep = pte_offset_map(pmd, addr);
127
128 /*
129 * Peek to check is_swap_pte() before taking ptlock? No, we
130 * can race mremap's move_ptes(), which skips anon_vma lock.
131 */
132
133 ptl = pte_lockptr(mm, pmd);
134 }
135
136 spin_lock(ptl);
137 pte = *ptep;
138 if (!is_swap_pte(pte))
139 goto unlock;
140
141 entry = pte_to_swp_entry(pte);
142
143 if (!is_migration_entry(entry) ||
144 migration_entry_to_page(entry) != old)
145 goto unlock;
146
147 get_page(new);
148 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
149 if (pte_swp_soft_dirty(*ptep))
150 pte = pte_mksoft_dirty(pte);
151
152 /* Recheck VMA as permissions can change since migration started */
153 if (is_write_migration_entry(entry))
154 pte = maybe_mkwrite(pte, vma);
155
156#ifdef CONFIG_HUGETLB_PAGE
157 if (PageHuge(new)) {
158 pte = pte_mkhuge(pte);
159 pte = arch_make_huge_pte(pte, vma, new, 0);
160 }
161#endif
162 flush_dcache_page(new);
163 set_pte_at(mm, addr, ptep, pte);
164
165 if (PageHuge(new)) {
166 if (PageAnon(new))
167 hugepage_add_anon_rmap(new, vma, addr);
168 else
169 page_dup_rmap(new, true);
170 } else if (PageAnon(new))
171 page_add_anon_rmap(new, vma, addr, false);
172 else
173 page_add_file_rmap(new);
174
175 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
176 mlock_vma_page(new);
177
178 /* No need to invalidate - it was non-present before */
179 update_mmu_cache(vma, addr, ptep);
180unlock:
181 pte_unmap_unlock(ptep, ptl);
182out:
183 return SWAP_AGAIN;
184}
185
186/*
187 * Get rid of all migration entries and replace them by
188 * references to the indicated page.
189 */
190void remove_migration_ptes(struct page *old, struct page *new, bool locked)
191{
192 struct rmap_walk_control rwc = {
193 .rmap_one = remove_migration_pte,
194 .arg = old,
195 };
196
197 if (locked)
198 rmap_walk_locked(new, &rwc);
199 else
200 rmap_walk(new, &rwc);
201}
202
203/*
204 * Something used the pte of a page under migration. We need to
205 * get to the page and wait until migration is finished.
206 * When we return from this function the fault will be retried.
207 */
208void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
209 spinlock_t *ptl)
210{
211 pte_t pte;
212 swp_entry_t entry;
213 struct page *page;
214
215 spin_lock(ptl);
216 pte = *ptep;
217 if (!is_swap_pte(pte))
218 goto out;
219
220 entry = pte_to_swp_entry(pte);
221 if (!is_migration_entry(entry))
222 goto out;
223
224 page = migration_entry_to_page(entry);
225
226 /*
227 * Once radix-tree replacement of page migration started, page_count
228 * *must* be zero. And, we don't want to call wait_on_page_locked()
229 * against a page without get_page().
230 * So, we use get_page_unless_zero(), here. Even failed, page fault
231 * will occur again.
232 */
233 if (!get_page_unless_zero(page))
234 goto out;
235 pte_unmap_unlock(ptep, ptl);
236 wait_on_page_locked(page);
237 put_page(page);
238 return;
239out:
240 pte_unmap_unlock(ptep, ptl);
241}
242
243void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
244 unsigned long address)
245{
246 spinlock_t *ptl = pte_lockptr(mm, pmd);
247 pte_t *ptep = pte_offset_map(pmd, address);
248 __migration_entry_wait(mm, ptep, ptl);
249}
250
251void migration_entry_wait_huge(struct vm_area_struct *vma,
252 struct mm_struct *mm, pte_t *pte)
253{
254 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
255 __migration_entry_wait(mm, pte, ptl);
256}
257
258#ifdef CONFIG_BLOCK
259/* Returns true if all buffers are successfully locked */
260static bool buffer_migrate_lock_buffers(struct buffer_head *head,
261 enum migrate_mode mode)
262{
263 struct buffer_head *bh = head;
264
265 /* Simple case, sync compaction */
266 if (mode != MIGRATE_ASYNC) {
267 do {
268 get_bh(bh);
269 lock_buffer(bh);
270 bh = bh->b_this_page;
271
272 } while (bh != head);
273
274 return true;
275 }
276
277 /* async case, we cannot block on lock_buffer so use trylock_buffer */
278 do {
279 get_bh(bh);
280 if (!trylock_buffer(bh)) {
281 /*
282 * We failed to lock the buffer and cannot stall in
283 * async migration. Release the taken locks
284 */
285 struct buffer_head *failed_bh = bh;
286 put_bh(failed_bh);
287 bh = head;
288 while (bh != failed_bh) {
289 unlock_buffer(bh);
290 put_bh(bh);
291 bh = bh->b_this_page;
292 }
293 return false;
294 }
295
296 bh = bh->b_this_page;
297 } while (bh != head);
298 return true;
299}
300#else
301static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
302 enum migrate_mode mode)
303{
304 return true;
305}
306#endif /* CONFIG_BLOCK */
307
308/*
309 * Replace the page in the mapping.
310 *
311 * The number of remaining references must be:
312 * 1 for anonymous pages without a mapping
313 * 2 for pages with a mapping
314 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
315 */
316int migrate_page_move_mapping(struct address_space *mapping,
317 struct page *newpage, struct page *page,
318 struct buffer_head *head, enum migrate_mode mode,
319 int extra_count)
320{
321 struct zone *oldzone, *newzone;
322 int dirty;
323 int expected_count = 1 + extra_count;
324 void **pslot;
325
326 if (!mapping) {
327 /* Anonymous page without mapping */
328 if (page_count(page) != expected_count)
329 return -EAGAIN;
330
331 /* No turning back from here */
332 newpage->index = page->index;
333 newpage->mapping = page->mapping;
334 if (PageSwapBacked(page))
335 SetPageSwapBacked(newpage);
336
337 return MIGRATEPAGE_SUCCESS;
338 }
339
340 oldzone = page_zone(page);
341 newzone = page_zone(newpage);
342
343 spin_lock_irq(&mapping->tree_lock);
344
345 pslot = radix_tree_lookup_slot(&mapping->page_tree,
346 page_index(page));
347
348 expected_count += 1 + page_has_private(page);
349 if (page_count(page) != expected_count ||
350 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
351 spin_unlock_irq(&mapping->tree_lock);
352 return -EAGAIN;
353 }
354
355 if (!page_ref_freeze(page, expected_count)) {
356 spin_unlock_irq(&mapping->tree_lock);
357 return -EAGAIN;
358 }
359
360 /*
361 * In the async migration case of moving a page with buffers, lock the
362 * buffers using trylock before the mapping is moved. If the mapping
363 * was moved, we later failed to lock the buffers and could not move
364 * the mapping back due to an elevated page count, we would have to
365 * block waiting on other references to be dropped.
366 */
367 if (mode == MIGRATE_ASYNC && head &&
368 !buffer_migrate_lock_buffers(head, mode)) {
369 page_ref_unfreeze(page, expected_count);
370 spin_unlock_irq(&mapping->tree_lock);
371 return -EAGAIN;
372 }
373
374 /*
375 * Now we know that no one else is looking at the page:
376 * no turning back from here.
377 */
378 newpage->index = page->index;
379 newpage->mapping = page->mapping;
380 if (PageSwapBacked(page))
381 SetPageSwapBacked(newpage);
382
383 get_page(newpage); /* add cache reference */
384 if (PageSwapCache(page)) {
385 SetPageSwapCache(newpage);
386 set_page_private(newpage, page_private(page));
387 }
388
389 /* Move dirty while page refs frozen and newpage not yet exposed */
390 dirty = PageDirty(page);
391 if (dirty) {
392 ClearPageDirty(page);
393 SetPageDirty(newpage);
394 }
395
396 radix_tree_replace_slot(pslot, newpage);
397
398 /*
399 * Drop cache reference from old page by unfreezing
400 * to one less reference.
401 * We know this isn't the last reference.
402 */
403 page_ref_unfreeze(page, expected_count - 1);
404
405 spin_unlock(&mapping->tree_lock);
406 /* Leave irq disabled to prevent preemption while updating stats */
407
408 /*
409 * If moved to a different zone then also account
410 * the page for that zone. Other VM counters will be
411 * taken care of when we establish references to the
412 * new page and drop references to the old page.
413 *
414 * Note that anonymous pages are accounted for
415 * via NR_FILE_PAGES and NR_ANON_PAGES if they
416 * are mapped to swap space.
417 */
418 if (newzone != oldzone) {
419 __dec_zone_state(oldzone, NR_FILE_PAGES);
420 __inc_zone_state(newzone, NR_FILE_PAGES);
421 if (PageSwapBacked(page) && !PageSwapCache(page)) {
422 __dec_zone_state(oldzone, NR_SHMEM);
423 __inc_zone_state(newzone, NR_SHMEM);
424 }
425 if (dirty && mapping_cap_account_dirty(mapping)) {
426 __dec_zone_state(oldzone, NR_FILE_DIRTY);
427 __inc_zone_state(newzone, NR_FILE_DIRTY);
428 }
429 }
430 local_irq_enable();
431
432 return MIGRATEPAGE_SUCCESS;
433}
434
435/*
436 * The expected number of remaining references is the same as that
437 * of migrate_page_move_mapping().
438 */
439int migrate_huge_page_move_mapping(struct address_space *mapping,
440 struct page *newpage, struct page *page)
441{
442 int expected_count;
443 void **pslot;
444
445 spin_lock_irq(&mapping->tree_lock);
446
447 pslot = radix_tree_lookup_slot(&mapping->page_tree,
448 page_index(page));
449
450 expected_count = 2 + page_has_private(page);
451 if (page_count(page) != expected_count ||
452 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
453 spin_unlock_irq(&mapping->tree_lock);
454 return -EAGAIN;
455 }
456
457 if (!page_ref_freeze(page, expected_count)) {
458 spin_unlock_irq(&mapping->tree_lock);
459 return -EAGAIN;
460 }
461
462 newpage->index = page->index;
463 newpage->mapping = page->mapping;
464
465 get_page(newpage);
466
467 radix_tree_replace_slot(pslot, newpage);
468
469 page_ref_unfreeze(page, expected_count - 1);
470
471 spin_unlock_irq(&mapping->tree_lock);
472
473 return MIGRATEPAGE_SUCCESS;
474}
475
476/*
477 * Gigantic pages are so large that we do not guarantee that page++ pointer
478 * arithmetic will work across the entire page. We need something more
479 * specialized.
480 */
481static void __copy_gigantic_page(struct page *dst, struct page *src,
482 int nr_pages)
483{
484 int i;
485 struct page *dst_base = dst;
486 struct page *src_base = src;
487
488 for (i = 0; i < nr_pages; ) {
489 cond_resched();
490 copy_highpage(dst, src);
491
492 i++;
493 dst = mem_map_next(dst, dst_base, i);
494 src = mem_map_next(src, src_base, i);
495 }
496}
497
498static void copy_huge_page(struct page *dst, struct page *src)
499{
500 int i;
501 int nr_pages;
502
503 if (PageHuge(src)) {
504 /* hugetlbfs page */
505 struct hstate *h = page_hstate(src);
506 nr_pages = pages_per_huge_page(h);
507
508 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
509 __copy_gigantic_page(dst, src, nr_pages);
510 return;
511 }
512 } else {
513 /* thp page */
514 BUG_ON(!PageTransHuge(src));
515 nr_pages = hpage_nr_pages(src);
516 }
517
518 for (i = 0; i < nr_pages; i++) {
519 cond_resched();
520 copy_highpage(dst + i, src + i);
521 }
522}
523
524/*
525 * Copy the page to its new location
526 */
527void migrate_page_copy(struct page *newpage, struct page *page)
528{
529 int cpupid;
530
531 if (PageHuge(page) || PageTransHuge(page))
532 copy_huge_page(newpage, page);
533 else
534 copy_highpage(newpage, page);
535
536 if (PageError(page))
537 SetPageError(newpage);
538 if (PageReferenced(page))
539 SetPageReferenced(newpage);
540 if (PageUptodate(page))
541 SetPageUptodate(newpage);
542 if (TestClearPageActive(page)) {
543 VM_BUG_ON_PAGE(PageUnevictable(page), page);
544 SetPageActive(newpage);
545 } else if (TestClearPageUnevictable(page))
546 SetPageUnevictable(newpage);
547 if (PageChecked(page))
548 SetPageChecked(newpage);
549 if (PageMappedToDisk(page))
550 SetPageMappedToDisk(newpage);
551
552 /* Move dirty on pages not done by migrate_page_move_mapping() */
553 if (PageDirty(page))
554 SetPageDirty(newpage);
555
556 if (page_is_young(page))
557 set_page_young(newpage);
558 if (page_is_idle(page))
559 set_page_idle(newpage);
560
561 /*
562 * Copy NUMA information to the new page, to prevent over-eager
563 * future migrations of this same page.
564 */
565 cpupid = page_cpupid_xchg_last(page, -1);
566 page_cpupid_xchg_last(newpage, cpupid);
567
568 ksm_migrate_page(newpage, page);
569 /*
570 * Please do not reorder this without considering how mm/ksm.c's
571 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
572 */
573 if (PageSwapCache(page))
574 ClearPageSwapCache(page);
575 ClearPagePrivate(page);
576 set_page_private(page, 0);
577
578 /*
579 * If any waiters have accumulated on the new page then
580 * wake them up.
581 */
582 if (PageWriteback(newpage))
583 end_page_writeback(newpage);
584
585 copy_page_owner(page, newpage);
586
587 mem_cgroup_migrate(page, newpage);
588}
589
590/************************************************************
591 * Migration functions
592 ***********************************************************/
593
594/*
595 * Common logic to directly migrate a single page suitable for
596 * pages that do not use PagePrivate/PagePrivate2.
597 *
598 * Pages are locked upon entry and exit.
599 */
600int migrate_page(struct address_space *mapping,
601 struct page *newpage, struct page *page,
602 enum migrate_mode mode)
603{
604 int rc;
605
606 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
607
608 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
609
610 if (rc != MIGRATEPAGE_SUCCESS)
611 return rc;
612
613 migrate_page_copy(newpage, page);
614 return MIGRATEPAGE_SUCCESS;
615}
616EXPORT_SYMBOL(migrate_page);
617
618#ifdef CONFIG_BLOCK
619/*
620 * Migration function for pages with buffers. This function can only be used
621 * if the underlying filesystem guarantees that no other references to "page"
622 * exist.
623 */
624int buffer_migrate_page(struct address_space *mapping,
625 struct page *newpage, struct page *page, enum migrate_mode mode)
626{
627 struct buffer_head *bh, *head;
628 int rc;
629
630 if (!page_has_buffers(page))
631 return migrate_page(mapping, newpage, page, mode);
632
633 head = page_buffers(page);
634
635 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
636
637 if (rc != MIGRATEPAGE_SUCCESS)
638 return rc;
639
640 /*
641 * In the async case, migrate_page_move_mapping locked the buffers
642 * with an IRQ-safe spinlock held. In the sync case, the buffers
643 * need to be locked now
644 */
645 if (mode != MIGRATE_ASYNC)
646 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
647
648 ClearPagePrivate(page);
649 set_page_private(newpage, page_private(page));
650 set_page_private(page, 0);
651 put_page(page);
652 get_page(newpage);
653
654 bh = head;
655 do {
656 set_bh_page(bh, newpage, bh_offset(bh));
657 bh = bh->b_this_page;
658
659 } while (bh != head);
660
661 SetPagePrivate(newpage);
662
663 migrate_page_copy(newpage, page);
664
665 bh = head;
666 do {
667 unlock_buffer(bh);
668 put_bh(bh);
669 bh = bh->b_this_page;
670
671 } while (bh != head);
672
673 return MIGRATEPAGE_SUCCESS;
674}
675EXPORT_SYMBOL(buffer_migrate_page);
676#endif
677
678/*
679 * Writeback a page to clean the dirty state
680 */
681static int writeout(struct address_space *mapping, struct page *page)
682{
683 struct writeback_control wbc = {
684 .sync_mode = WB_SYNC_NONE,
685 .nr_to_write = 1,
686 .range_start = 0,
687 .range_end = LLONG_MAX,
688 .for_reclaim = 1
689 };
690 int rc;
691
692 if (!mapping->a_ops->writepage)
693 /* No write method for the address space */
694 return -EINVAL;
695
696 if (!clear_page_dirty_for_io(page))
697 /* Someone else already triggered a write */
698 return -EAGAIN;
699
700 /*
701 * A dirty page may imply that the underlying filesystem has
702 * the page on some queue. So the page must be clean for
703 * migration. Writeout may mean we loose the lock and the
704 * page state is no longer what we checked for earlier.
705 * At this point we know that the migration attempt cannot
706 * be successful.
707 */
708 remove_migration_ptes(page, page, false);
709
710 rc = mapping->a_ops->writepage(page, &wbc);
711
712 if (rc != AOP_WRITEPAGE_ACTIVATE)
713 /* unlocked. Relock */
714 lock_page(page);
715
716 return (rc < 0) ? -EIO : -EAGAIN;
717}
718
719/*
720 * Default handling if a filesystem does not provide a migration function.
721 */
722static int fallback_migrate_page(struct address_space *mapping,
723 struct page *newpage, struct page *page, enum migrate_mode mode)
724{
725 if (PageDirty(page)) {
726 /* Only writeback pages in full synchronous migration */
727 if (mode != MIGRATE_SYNC)
728 return -EBUSY;
729 return writeout(mapping, page);
730 }
731
732 /*
733 * Buffers may be managed in a filesystem specific way.
734 * We must have no buffers or drop them.
735 */
736 if (page_has_private(page) &&
737 !try_to_release_page(page, GFP_KERNEL))
738 return -EAGAIN;
739
740 return migrate_page(mapping, newpage, page, mode);
741}
742
743/*
744 * Move a page to a newly allocated page
745 * The page is locked and all ptes have been successfully removed.
746 *
747 * The new page will have replaced the old page if this function
748 * is successful.
749 *
750 * Return value:
751 * < 0 - error code
752 * MIGRATEPAGE_SUCCESS - success
753 */
754static int move_to_new_page(struct page *newpage, struct page *page,
755 enum migrate_mode mode)
756{
757 struct address_space *mapping;
758 int rc;
759
760 VM_BUG_ON_PAGE(!PageLocked(page), page);
761 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
762
763 mapping = page_mapping(page);
764 if (!mapping)
765 rc = migrate_page(mapping, newpage, page, mode);
766 else if (mapping->a_ops->migratepage)
767 /*
768 * Most pages have a mapping and most filesystems provide a
769 * migratepage callback. Anonymous pages are part of swap
770 * space which also has its own migratepage callback. This
771 * is the most common path for page migration.
772 */
773 rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
774 else
775 rc = fallback_migrate_page(mapping, newpage, page, mode);
776
777 /*
778 * When successful, old pagecache page->mapping must be cleared before
779 * page is freed; but stats require that PageAnon be left as PageAnon.
780 */
781 if (rc == MIGRATEPAGE_SUCCESS) {
782 if (!PageAnon(page))
783 page->mapping = NULL;
784 }
785 return rc;
786}
787
788static int __unmap_and_move(struct page *page, struct page *newpage,
789 int force, enum migrate_mode mode)
790{
791 int rc = -EAGAIN;
792 int page_was_mapped = 0;
793 struct anon_vma *anon_vma = NULL;
794
795 if (!trylock_page(page)) {
796 if (!force || mode == MIGRATE_ASYNC)
797 goto out;
798
799 /*
800 * It's not safe for direct compaction to call lock_page.
801 * For example, during page readahead pages are added locked
802 * to the LRU. Later, when the IO completes the pages are
803 * marked uptodate and unlocked. However, the queueing
804 * could be merging multiple pages for one bio (e.g.
805 * mpage_readpages). If an allocation happens for the
806 * second or third page, the process can end up locking
807 * the same page twice and deadlocking. Rather than
808 * trying to be clever about what pages can be locked,
809 * avoid the use of lock_page for direct compaction
810 * altogether.
811 */
812 if (current->flags & PF_MEMALLOC)
813 goto out;
814
815 lock_page(page);
816 }
817
818 if (PageWriteback(page)) {
819 /*
820 * Only in the case of a full synchronous migration is it
821 * necessary to wait for PageWriteback. In the async case,
822 * the retry loop is too short and in the sync-light case,
823 * the overhead of stalling is too much
824 */
825 if (mode != MIGRATE_SYNC) {
826 rc = -EBUSY;
827 goto out_unlock;
828 }
829 if (!force)
830 goto out_unlock;
831 wait_on_page_writeback(page);
832 }
833
834 /*
835 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
836 * we cannot notice that anon_vma is freed while we migrates a page.
837 * This get_anon_vma() delays freeing anon_vma pointer until the end
838 * of migration. File cache pages are no problem because of page_lock()
839 * File Caches may use write_page() or lock_page() in migration, then,
840 * just care Anon page here.
841 *
842 * Only page_get_anon_vma() understands the subtleties of
843 * getting a hold on an anon_vma from outside one of its mms.
844 * But if we cannot get anon_vma, then we won't need it anyway,
845 * because that implies that the anon page is no longer mapped
846 * (and cannot be remapped so long as we hold the page lock).
847 */
848 if (PageAnon(page) && !PageKsm(page))
849 anon_vma = page_get_anon_vma(page);
850
851 /*
852 * Block others from accessing the new page when we get around to
853 * establishing additional references. We are usually the only one
854 * holding a reference to newpage at this point. We used to have a BUG
855 * here if trylock_page(newpage) fails, but would like to allow for
856 * cases where there might be a race with the previous use of newpage.
857 * This is much like races on refcount of oldpage: just don't BUG().
858 */
859 if (unlikely(!trylock_page(newpage)))
860 goto out_unlock;
861
862 if (unlikely(isolated_balloon_page(page))) {
863 /*
864 * A ballooned page does not need any special attention from
865 * physical to virtual reverse mapping procedures.
866 * Skip any attempt to unmap PTEs or to remap swap cache,
867 * in order to avoid burning cycles at rmap level, and perform
868 * the page migration right away (proteced by page lock).
869 */
870 rc = balloon_page_migrate(newpage, page, mode);
871 goto out_unlock_both;
872 }
873
874 /*
875 * Corner case handling:
876 * 1. When a new swap-cache page is read into, it is added to the LRU
877 * and treated as swapcache but it has no rmap yet.
878 * Calling try_to_unmap() against a page->mapping==NULL page will
879 * trigger a BUG. So handle it here.
880 * 2. An orphaned page (see truncate_complete_page) might have
881 * fs-private metadata. The page can be picked up due to memory
882 * offlining. Everywhere else except page reclaim, the page is
883 * invisible to the vm, so the page can not be migrated. So try to
884 * free the metadata, so the page can be freed.
885 */
886 if (!page->mapping) {
887 VM_BUG_ON_PAGE(PageAnon(page), page);
888 if (page_has_private(page)) {
889 try_to_free_buffers(page);
890 goto out_unlock_both;
891 }
892 } else if (page_mapped(page)) {
893 /* Establish migration ptes */
894 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
895 page);
896 try_to_unmap(page,
897 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
898 page_was_mapped = 1;
899 }
900
901 if (!page_mapped(page))
902 rc = move_to_new_page(newpage, page, mode);
903
904 if (page_was_mapped)
905 remove_migration_ptes(page,
906 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
907
908out_unlock_both:
909 unlock_page(newpage);
910out_unlock:
911 /* Drop an anon_vma reference if we took one */
912 if (anon_vma)
913 put_anon_vma(anon_vma);
914 unlock_page(page);
915out:
916 return rc;
917}
918
919/*
920 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
921 * around it.
922 */
923#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
924#define ICE_noinline noinline
925#else
926#define ICE_noinline
927#endif
928
929/*
930 * Obtain the lock on page, remove all ptes and migrate the page
931 * to the newly allocated page in newpage.
932 */
933static ICE_noinline int unmap_and_move(new_page_t get_new_page,
934 free_page_t put_new_page,
935 unsigned long private, struct page *page,
936 int force, enum migrate_mode mode,
937 enum migrate_reason reason)
938{
939 int rc = MIGRATEPAGE_SUCCESS;
940 int *result = NULL;
941 struct page *newpage;
942
943 newpage = get_new_page(page, private, &result);
944 if (!newpage)
945 return -ENOMEM;
946
947 if (page_count(page) == 1) {
948 /* page was freed from under us. So we are done. */
949 goto out;
950 }
951
952 if (unlikely(PageTransHuge(page))) {
953 lock_page(page);
954 rc = split_huge_page(page);
955 unlock_page(page);
956 if (rc)
957 goto out;
958 }
959
960 rc = __unmap_and_move(page, newpage, force, mode);
961 if (rc == MIGRATEPAGE_SUCCESS) {
962 put_new_page = NULL;
963 set_page_owner_migrate_reason(newpage, reason);
964 }
965
966out:
967 if (rc != -EAGAIN) {
968 /*
969 * A page that has been migrated has all references
970 * removed and will be freed. A page that has not been
971 * migrated will have kepts its references and be
972 * restored.
973 */
974 list_del(&page->lru);
975 dec_zone_page_state(page, NR_ISOLATED_ANON +
976 page_is_file_cache(page));
977 /* Soft-offlined page shouldn't go through lru cache list */
978 if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
979 /*
980 * With this release, we free successfully migrated
981 * page and set PG_HWPoison on just freed page
982 * intentionally. Although it's rather weird, it's how
983 * HWPoison flag works at the moment.
984 */
985 put_page(page);
986 if (!test_set_page_hwpoison(page))
987 num_poisoned_pages_inc();
988 } else
989 putback_lru_page(page);
990 }
991
992 /*
993 * If migration was not successful and there's a freeing callback, use
994 * it. Otherwise, putback_lru_page() will drop the reference grabbed
995 * during isolation.
996 */
997 if (put_new_page)
998 put_new_page(newpage, private);
999 else if (unlikely(__is_movable_balloon_page(newpage))) {
1000 /* drop our reference, page already in the balloon */
1001 put_page(newpage);
1002 } else
1003 putback_lru_page(newpage);
1004
1005 if (result) {
1006 if (rc)
1007 *result = rc;
1008 else
1009 *result = page_to_nid(newpage);
1010 }
1011 return rc;
1012}
1013
1014/*
1015 * Counterpart of unmap_and_move_page() for hugepage migration.
1016 *
1017 * This function doesn't wait the completion of hugepage I/O
1018 * because there is no race between I/O and migration for hugepage.
1019 * Note that currently hugepage I/O occurs only in direct I/O
1020 * where no lock is held and PG_writeback is irrelevant,
1021 * and writeback status of all subpages are counted in the reference
1022 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1023 * under direct I/O, the reference of the head page is 512 and a bit more.)
1024 * This means that when we try to migrate hugepage whose subpages are
1025 * doing direct I/O, some references remain after try_to_unmap() and
1026 * hugepage migration fails without data corruption.
1027 *
1028 * There is also no race when direct I/O is issued on the page under migration,
1029 * because then pte is replaced with migration swap entry and direct I/O code
1030 * will wait in the page fault for migration to complete.
1031 */
1032static int unmap_and_move_huge_page(new_page_t get_new_page,
1033 free_page_t put_new_page, unsigned long private,
1034 struct page *hpage, int force,
1035 enum migrate_mode mode, int reason)
1036{
1037 int rc = -EAGAIN;
1038 int *result = NULL;
1039 int page_was_mapped = 0;
1040 struct page *new_hpage;
1041 struct anon_vma *anon_vma = NULL;
1042
1043 /*
1044 * Movability of hugepages depends on architectures and hugepage size.
1045 * This check is necessary because some callers of hugepage migration
1046 * like soft offline and memory hotremove don't walk through page
1047 * tables or check whether the hugepage is pmd-based or not before
1048 * kicking migration.
1049 */
1050 if (!hugepage_migration_supported(page_hstate(hpage))) {
1051 putback_active_hugepage(hpage);
1052 return -ENOSYS;
1053 }
1054
1055 new_hpage = get_new_page(hpage, private, &result);
1056 if (!new_hpage)
1057 return -ENOMEM;
1058
1059 if (!trylock_page(hpage)) {
1060 if (!force || mode != MIGRATE_SYNC)
1061 goto out;
1062 lock_page(hpage);
1063 }
1064
1065 if (PageAnon(hpage))
1066 anon_vma = page_get_anon_vma(hpage);
1067
1068 if (unlikely(!trylock_page(new_hpage)))
1069 goto put_anon;
1070
1071 if (page_mapped(hpage)) {
1072 try_to_unmap(hpage,
1073 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1074 page_was_mapped = 1;
1075 }
1076
1077 if (!page_mapped(hpage))
1078 rc = move_to_new_page(new_hpage, hpage, mode);
1079
1080 if (page_was_mapped)
1081 remove_migration_ptes(hpage,
1082 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1083
1084 unlock_page(new_hpage);
1085
1086put_anon:
1087 if (anon_vma)
1088 put_anon_vma(anon_vma);
1089
1090 if (rc == MIGRATEPAGE_SUCCESS) {
1091 hugetlb_cgroup_migrate(hpage, new_hpage);
1092 put_new_page = NULL;
1093 set_page_owner_migrate_reason(new_hpage, reason);
1094 }
1095
1096 unlock_page(hpage);
1097out:
1098 if (rc != -EAGAIN)
1099 putback_active_hugepage(hpage);
1100
1101 /*
1102 * If migration was not successful and there's a freeing callback, use
1103 * it. Otherwise, put_page() will drop the reference grabbed during
1104 * isolation.
1105 */
1106 if (put_new_page)
1107 put_new_page(new_hpage, private);
1108 else
1109 putback_active_hugepage(new_hpage);
1110
1111 if (result) {
1112 if (rc)
1113 *result = rc;
1114 else
1115 *result = page_to_nid(new_hpage);
1116 }
1117 return rc;
1118}
1119
1120/*
1121 * migrate_pages - migrate the pages specified in a list, to the free pages
1122 * supplied as the target for the page migration
1123 *
1124 * @from: The list of pages to be migrated.
1125 * @get_new_page: The function used to allocate free pages to be used
1126 * as the target of the page migration.
1127 * @put_new_page: The function used to free target pages if migration
1128 * fails, or NULL if no special handling is necessary.
1129 * @private: Private data to be passed on to get_new_page()
1130 * @mode: The migration mode that specifies the constraints for
1131 * page migration, if any.
1132 * @reason: The reason for page migration.
1133 *
1134 * The function returns after 10 attempts or if no pages are movable any more
1135 * because the list has become empty or no retryable pages exist any more.
1136 * The caller should call putback_movable_pages() to return pages to the LRU
1137 * or free list only if ret != 0.
1138 *
1139 * Returns the number of pages that were not migrated, or an error code.
1140 */
1141int migrate_pages(struct list_head *from, new_page_t get_new_page,
1142 free_page_t put_new_page, unsigned long private,
1143 enum migrate_mode mode, int reason)
1144{
1145 int retry = 1;
1146 int nr_failed = 0;
1147 int nr_succeeded = 0;
1148 int pass = 0;
1149 struct page *page;
1150 struct page *page2;
1151 int swapwrite = current->flags & PF_SWAPWRITE;
1152 int rc;
1153
1154 if (!swapwrite)
1155 current->flags |= PF_SWAPWRITE;
1156
1157 for(pass = 0; pass < 10 && retry; pass++) {
1158 retry = 0;
1159
1160 list_for_each_entry_safe(page, page2, from, lru) {
1161 cond_resched();
1162
1163 if (PageHuge(page))
1164 rc = unmap_and_move_huge_page(get_new_page,
1165 put_new_page, private, page,
1166 pass > 2, mode, reason);
1167 else
1168 rc = unmap_and_move(get_new_page, put_new_page,
1169 private, page, pass > 2, mode,
1170 reason);
1171
1172 switch(rc) {
1173 case -ENOMEM:
1174 goto out;
1175 case -EAGAIN:
1176 retry++;
1177 break;
1178 case MIGRATEPAGE_SUCCESS:
1179 nr_succeeded++;
1180 break;
1181 default:
1182 /*
1183 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1184 * unlike -EAGAIN case, the failed page is
1185 * removed from migration page list and not
1186 * retried in the next outer loop.
1187 */
1188 nr_failed++;
1189 break;
1190 }
1191 }
1192 }
1193 nr_failed += retry;
1194 rc = nr_failed;
1195out:
1196 if (nr_succeeded)
1197 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1198 if (nr_failed)
1199 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1200 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1201
1202 if (!swapwrite)
1203 current->flags &= ~PF_SWAPWRITE;
1204
1205 return rc;
1206}
1207
1208#ifdef CONFIG_NUMA
1209/*
1210 * Move a list of individual pages
1211 */
1212struct page_to_node {
1213 unsigned long addr;
1214 struct page *page;
1215 int node;
1216 int status;
1217};
1218
1219static struct page *new_page_node(struct page *p, unsigned long private,
1220 int **result)
1221{
1222 struct page_to_node *pm = (struct page_to_node *)private;
1223
1224 while (pm->node != MAX_NUMNODES && pm->page != p)
1225 pm++;
1226
1227 if (pm->node == MAX_NUMNODES)
1228 return NULL;
1229
1230 *result = &pm->status;
1231
1232 if (PageHuge(p))
1233 return alloc_huge_page_node(page_hstate(compound_head(p)),
1234 pm->node);
1235 else
1236 return __alloc_pages_node(pm->node,
1237 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1238}
1239
1240/*
1241 * Move a set of pages as indicated in the pm array. The addr
1242 * field must be set to the virtual address of the page to be moved
1243 * and the node number must contain a valid target node.
1244 * The pm array ends with node = MAX_NUMNODES.
1245 */
1246static int do_move_page_to_node_array(struct mm_struct *mm,
1247 struct page_to_node *pm,
1248 int migrate_all)
1249{
1250 int err;
1251 struct page_to_node *pp;
1252 LIST_HEAD(pagelist);
1253
1254 down_read(&mm->mmap_sem);
1255
1256 /*
1257 * Build a list of pages to migrate
1258 */
1259 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1260 struct vm_area_struct *vma;
1261 struct page *page;
1262
1263 err = -EFAULT;
1264 vma = find_vma(mm, pp->addr);
1265 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1266 goto set_status;
1267
1268 /* FOLL_DUMP to ignore special (like zero) pages */
1269 page = follow_page(vma, pp->addr,
1270 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
1271
1272 err = PTR_ERR(page);
1273 if (IS_ERR(page))
1274 goto set_status;
1275
1276 err = -ENOENT;
1277 if (!page)
1278 goto set_status;
1279
1280 pp->page = page;
1281 err = page_to_nid(page);
1282
1283 if (err == pp->node)
1284 /*
1285 * Node already in the right place
1286 */
1287 goto put_and_set;
1288
1289 err = -EACCES;
1290 if (page_mapcount(page) > 1 &&
1291 !migrate_all)
1292 goto put_and_set;
1293
1294 if (PageHuge(page)) {
1295 if (PageHead(page))
1296 isolate_huge_page(page, &pagelist);
1297 goto put_and_set;
1298 }
1299
1300 err = isolate_lru_page(page);
1301 if (!err) {
1302 list_add_tail(&page->lru, &pagelist);
1303 inc_zone_page_state(page, NR_ISOLATED_ANON +
1304 page_is_file_cache(page));
1305 }
1306put_and_set:
1307 /*
1308 * Either remove the duplicate refcount from
1309 * isolate_lru_page() or drop the page ref if it was
1310 * not isolated.
1311 */
1312 put_page(page);
1313set_status:
1314 pp->status = err;
1315 }
1316
1317 err = 0;
1318 if (!list_empty(&pagelist)) {
1319 err = migrate_pages(&pagelist, new_page_node, NULL,
1320 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1321 if (err)
1322 putback_movable_pages(&pagelist);
1323 }
1324
1325 up_read(&mm->mmap_sem);
1326 return err;
1327}
1328
1329/*
1330 * Migrate an array of page address onto an array of nodes and fill
1331 * the corresponding array of status.
1332 */
1333static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1334 unsigned long nr_pages,
1335 const void __user * __user *pages,
1336 const int __user *nodes,
1337 int __user *status, int flags)
1338{
1339 struct page_to_node *pm;
1340 unsigned long chunk_nr_pages;
1341 unsigned long chunk_start;
1342 int err;
1343
1344 err = -ENOMEM;
1345 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1346 if (!pm)
1347 goto out;
1348
1349 migrate_prep();
1350
1351 /*
1352 * Store a chunk of page_to_node array in a page,
1353 * but keep the last one as a marker
1354 */
1355 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1356
1357 for (chunk_start = 0;
1358 chunk_start < nr_pages;
1359 chunk_start += chunk_nr_pages) {
1360 int j;
1361
1362 if (chunk_start + chunk_nr_pages > nr_pages)
1363 chunk_nr_pages = nr_pages - chunk_start;
1364
1365 /* fill the chunk pm with addrs and nodes from user-space */
1366 for (j = 0; j < chunk_nr_pages; j++) {
1367 const void __user *p;
1368 int node;
1369
1370 err = -EFAULT;
1371 if (get_user(p, pages + j + chunk_start))
1372 goto out_pm;
1373 pm[j].addr = (unsigned long) p;
1374
1375 if (get_user(node, nodes + j + chunk_start))
1376 goto out_pm;
1377
1378 err = -ENODEV;
1379 if (node < 0 || node >= MAX_NUMNODES)
1380 goto out_pm;
1381
1382 if (!node_state(node, N_MEMORY))
1383 goto out_pm;
1384
1385 err = -EACCES;
1386 if (!node_isset(node, task_nodes))
1387 goto out_pm;
1388
1389 pm[j].node = node;
1390 }
1391
1392 /* End marker for this chunk */
1393 pm[chunk_nr_pages].node = MAX_NUMNODES;
1394
1395 /* Migrate this chunk */
1396 err = do_move_page_to_node_array(mm, pm,
1397 flags & MPOL_MF_MOVE_ALL);
1398 if (err < 0)
1399 goto out_pm;
1400
1401 /* Return status information */
1402 for (j = 0; j < chunk_nr_pages; j++)
1403 if (put_user(pm[j].status, status + j + chunk_start)) {
1404 err = -EFAULT;
1405 goto out_pm;
1406 }
1407 }
1408 err = 0;
1409
1410out_pm:
1411 free_page((unsigned long)pm);
1412out:
1413 return err;
1414}
1415
1416/*
1417 * Determine the nodes of an array of pages and store it in an array of status.
1418 */
1419static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1420 const void __user **pages, int *status)
1421{
1422 unsigned long i;
1423
1424 down_read(&mm->mmap_sem);
1425
1426 for (i = 0; i < nr_pages; i++) {
1427 unsigned long addr = (unsigned long)(*pages);
1428 struct vm_area_struct *vma;
1429 struct page *page;
1430 int err = -EFAULT;
1431
1432 vma = find_vma(mm, addr);
1433 if (!vma || addr < vma->vm_start)
1434 goto set_status;
1435
1436 /* FOLL_DUMP to ignore special (like zero) pages */
1437 page = follow_page(vma, addr, FOLL_DUMP);
1438
1439 err = PTR_ERR(page);
1440 if (IS_ERR(page))
1441 goto set_status;
1442
1443 err = page ? page_to_nid(page) : -ENOENT;
1444set_status:
1445 *status = err;
1446
1447 pages++;
1448 status++;
1449 }
1450
1451 up_read(&mm->mmap_sem);
1452}
1453
1454/*
1455 * Determine the nodes of a user array of pages and store it in
1456 * a user array of status.
1457 */
1458static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1459 const void __user * __user *pages,
1460 int __user *status)
1461{
1462#define DO_PAGES_STAT_CHUNK_NR 16
1463 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1464 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1465
1466 while (nr_pages) {
1467 unsigned long chunk_nr;
1468
1469 chunk_nr = nr_pages;
1470 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1471 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1472
1473 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1474 break;
1475
1476 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1477
1478 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1479 break;
1480
1481 pages += chunk_nr;
1482 status += chunk_nr;
1483 nr_pages -= chunk_nr;
1484 }
1485 return nr_pages ? -EFAULT : 0;
1486}
1487
1488/*
1489 * Move a list of pages in the address space of the currently executing
1490 * process.
1491 */
1492SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1493 const void __user * __user *, pages,
1494 const int __user *, nodes,
1495 int __user *, status, int, flags)
1496{
1497 const struct cred *cred = current_cred(), *tcred;
1498 struct task_struct *task;
1499 struct mm_struct *mm;
1500 int err;
1501 nodemask_t task_nodes;
1502
1503 /* Check flags */
1504 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1505 return -EINVAL;
1506
1507 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1508 return -EPERM;
1509
1510 /* Find the mm_struct */
1511 rcu_read_lock();
1512 task = pid ? find_task_by_vpid(pid) : current;
1513 if (!task) {
1514 rcu_read_unlock();
1515 return -ESRCH;
1516 }
1517 get_task_struct(task);
1518
1519 /*
1520 * Check if this process has the right to modify the specified
1521 * process. The right exists if the process has administrative
1522 * capabilities, superuser privileges or the same
1523 * userid as the target process.
1524 */
1525 tcred = __task_cred(task);
1526 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1527 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1528 !capable(CAP_SYS_NICE)) {
1529 rcu_read_unlock();
1530 err = -EPERM;
1531 goto out;
1532 }
1533 rcu_read_unlock();
1534
1535 err = security_task_movememory(task);
1536 if (err)
1537 goto out;
1538
1539 task_nodes = cpuset_mems_allowed(task);
1540 mm = get_task_mm(task);
1541 put_task_struct(task);
1542
1543 if (!mm)
1544 return -EINVAL;
1545
1546 if (nodes)
1547 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1548 nodes, status, flags);
1549 else
1550 err = do_pages_stat(mm, nr_pages, pages, status);
1551
1552 mmput(mm);
1553 return err;
1554
1555out:
1556 put_task_struct(task);
1557 return err;
1558}
1559
1560#ifdef CONFIG_NUMA_BALANCING
1561/*
1562 * Returns true if this is a safe migration target node for misplaced NUMA
1563 * pages. Currently it only checks the watermarks which crude
1564 */
1565static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1566 unsigned long nr_migrate_pages)
1567{
1568 int z;
1569 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1570 struct zone *zone = pgdat->node_zones + z;
1571
1572 if (!populated_zone(zone))
1573 continue;
1574
1575 if (!zone_reclaimable(zone))
1576 continue;
1577
1578 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1579 if (!zone_watermark_ok(zone, 0,
1580 high_wmark_pages(zone) +
1581 nr_migrate_pages,
1582 0, 0))
1583 continue;
1584 return true;
1585 }
1586 return false;
1587}
1588
1589static struct page *alloc_misplaced_dst_page(struct page *page,
1590 unsigned long data,
1591 int **result)
1592{
1593 int nid = (int) data;
1594 struct page *newpage;
1595
1596 newpage = __alloc_pages_node(nid,
1597 (GFP_HIGHUSER_MOVABLE |
1598 __GFP_THISNODE | __GFP_NOMEMALLOC |
1599 __GFP_NORETRY | __GFP_NOWARN) &
1600 ~__GFP_RECLAIM, 0);
1601
1602 return newpage;
1603}
1604
1605/*
1606 * page migration rate limiting control.
1607 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1608 * window of time. Default here says do not migrate more than 1280M per second.
1609 */
1610static unsigned int migrate_interval_millisecs __read_mostly = 100;
1611static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1612
1613/* Returns true if the node is migrate rate-limited after the update */
1614static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1615 unsigned long nr_pages)
1616{
1617 /*
1618 * Rate-limit the amount of data that is being migrated to a node.
1619 * Optimal placement is no good if the memory bus is saturated and
1620 * all the time is being spent migrating!
1621 */
1622 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1623 spin_lock(&pgdat->numabalancing_migrate_lock);
1624 pgdat->numabalancing_migrate_nr_pages = 0;
1625 pgdat->numabalancing_migrate_next_window = jiffies +
1626 msecs_to_jiffies(migrate_interval_millisecs);
1627 spin_unlock(&pgdat->numabalancing_migrate_lock);
1628 }
1629 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1630 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1631 nr_pages);
1632 return true;
1633 }
1634
1635 /*
1636 * This is an unlocked non-atomic update so errors are possible.
1637 * The consequences are failing to migrate when we potentiall should
1638 * have which is not severe enough to warrant locking. If it is ever
1639 * a problem, it can be converted to a per-cpu counter.
1640 */
1641 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1642 return false;
1643}
1644
1645static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1646{
1647 int page_lru;
1648
1649 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1650
1651 /* Avoid migrating to a node that is nearly full */
1652 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1653 return 0;
1654
1655 if (isolate_lru_page(page))
1656 return 0;
1657
1658 /*
1659 * migrate_misplaced_transhuge_page() skips page migration's usual
1660 * check on page_count(), so we must do it here, now that the page
1661 * has been isolated: a GUP pin, or any other pin, prevents migration.
1662 * The expected page count is 3: 1 for page's mapcount and 1 for the
1663 * caller's pin and 1 for the reference taken by isolate_lru_page().
1664 */
1665 if (PageTransHuge(page) && page_count(page) != 3) {
1666 putback_lru_page(page);
1667 return 0;
1668 }
1669
1670 page_lru = page_is_file_cache(page);
1671 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
1672 hpage_nr_pages(page));
1673
1674 /*
1675 * Isolating the page has taken another reference, so the
1676 * caller's reference can be safely dropped without the page
1677 * disappearing underneath us during migration.
1678 */
1679 put_page(page);
1680 return 1;
1681}
1682
1683bool pmd_trans_migrating(pmd_t pmd)
1684{
1685 struct page *page = pmd_page(pmd);
1686 return PageLocked(page);
1687}
1688
1689/*
1690 * Attempt to migrate a misplaced page to the specified destination
1691 * node. Caller is expected to have an elevated reference count on
1692 * the page that will be dropped by this function before returning.
1693 */
1694int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1695 int node)
1696{
1697 pg_data_t *pgdat = NODE_DATA(node);
1698 int isolated;
1699 int nr_remaining;
1700 LIST_HEAD(migratepages);
1701
1702 /*
1703 * Don't migrate file pages that are mapped in multiple processes
1704 * with execute permissions as they are probably shared libraries.
1705 */
1706 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1707 (vma->vm_flags & VM_EXEC))
1708 goto out;
1709
1710 /*
1711 * Rate-limit the amount of data that is being migrated to a node.
1712 * Optimal placement is no good if the memory bus is saturated and
1713 * all the time is being spent migrating!
1714 */
1715 if (numamigrate_update_ratelimit(pgdat, 1))
1716 goto out;
1717
1718 isolated = numamigrate_isolate_page(pgdat, page);
1719 if (!isolated)
1720 goto out;
1721
1722 list_add(&page->lru, &migratepages);
1723 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1724 NULL, node, MIGRATE_ASYNC,
1725 MR_NUMA_MISPLACED);
1726 if (nr_remaining) {
1727 if (!list_empty(&migratepages)) {
1728 list_del(&page->lru);
1729 dec_zone_page_state(page, NR_ISOLATED_ANON +
1730 page_is_file_cache(page));
1731 putback_lru_page(page);
1732 }
1733 isolated = 0;
1734 } else
1735 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1736 BUG_ON(!list_empty(&migratepages));
1737 return isolated;
1738
1739out:
1740 put_page(page);
1741 return 0;
1742}
1743#endif /* CONFIG_NUMA_BALANCING */
1744
1745#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1746/*
1747 * Migrates a THP to a given target node. page must be locked and is unlocked
1748 * before returning.
1749 */
1750int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1751 struct vm_area_struct *vma,
1752 pmd_t *pmd, pmd_t entry,
1753 unsigned long address,
1754 struct page *page, int node)
1755{
1756 spinlock_t *ptl;
1757 pg_data_t *pgdat = NODE_DATA(node);
1758 int isolated = 0;
1759 struct page *new_page = NULL;
1760 int page_lru = page_is_file_cache(page);
1761 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1762 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1763 pmd_t orig_entry;
1764
1765 /*
1766 * Rate-limit the amount of data that is being migrated to a node.
1767 * Optimal placement is no good if the memory bus is saturated and
1768 * all the time is being spent migrating!
1769 */
1770 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1771 goto out_dropref;
1772
1773 new_page = alloc_pages_node(node,
1774 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
1775 HPAGE_PMD_ORDER);
1776 if (!new_page)
1777 goto out_fail;
1778 prep_transhuge_page(new_page);
1779
1780 isolated = numamigrate_isolate_page(pgdat, page);
1781 if (!isolated) {
1782 put_page(new_page);
1783 goto out_fail;
1784 }
1785 /*
1786 * We are not sure a pending tlb flush here is for a huge page
1787 * mapping or not. Hence use the tlb range variant
1788 */
1789 if (mm_tlb_flush_pending(mm))
1790 flush_tlb_range(vma, mmun_start, mmun_end);
1791
1792 /* Prepare a page as a migration target */
1793 __SetPageLocked(new_page);
1794 SetPageSwapBacked(new_page);
1795
1796 /* anon mapping, we can simply copy page->mapping to the new page: */
1797 new_page->mapping = page->mapping;
1798 new_page->index = page->index;
1799 migrate_page_copy(new_page, page);
1800 WARN_ON(PageLRU(new_page));
1801
1802 /* Recheck the target PMD */
1803 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1804 ptl = pmd_lock(mm, pmd);
1805 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1806fail_putback:
1807 spin_unlock(ptl);
1808 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1809
1810 /* Reverse changes made by migrate_page_copy() */
1811 if (TestClearPageActive(new_page))
1812 SetPageActive(page);
1813 if (TestClearPageUnevictable(new_page))
1814 SetPageUnevictable(page);
1815
1816 unlock_page(new_page);
1817 put_page(new_page); /* Free it */
1818
1819 /* Retake the callers reference and putback on LRU */
1820 get_page(page);
1821 putback_lru_page(page);
1822 mod_zone_page_state(page_zone(page),
1823 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1824
1825 goto out_unlock;
1826 }
1827
1828 orig_entry = *pmd;
1829 entry = mk_pmd(new_page, vma->vm_page_prot);
1830 entry = pmd_mkhuge(entry);
1831 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1832
1833 /*
1834 * Clear the old entry under pagetable lock and establish the new PTE.
1835 * Any parallel GUP will either observe the old page blocking on the
1836 * page lock, block on the page table lock or observe the new page.
1837 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1838 * guarantee the copy is visible before the pagetable update.
1839 */
1840 flush_cache_range(vma, mmun_start, mmun_end);
1841 page_add_anon_rmap(new_page, vma, mmun_start, true);
1842 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
1843 set_pmd_at(mm, mmun_start, pmd, entry);
1844 update_mmu_cache_pmd(vma, address, &entry);
1845
1846 if (page_count(page) != 2) {
1847 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1848 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
1849 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
1850 update_mmu_cache_pmd(vma, address, &entry);
1851 page_remove_rmap(new_page, true);
1852 goto fail_putback;
1853 }
1854
1855 mlock_migrate_page(new_page, page);
1856 page_remove_rmap(page, true);
1857 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
1858
1859 spin_unlock(ptl);
1860 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1861
1862 /* Take an "isolate" reference and put new page on the LRU. */
1863 get_page(new_page);
1864 putback_lru_page(new_page);
1865
1866 unlock_page(new_page);
1867 unlock_page(page);
1868 put_page(page); /* Drop the rmap reference */
1869 put_page(page); /* Drop the LRU isolation reference */
1870
1871 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1872 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1873
1874 mod_zone_page_state(page_zone(page),
1875 NR_ISOLATED_ANON + page_lru,
1876 -HPAGE_PMD_NR);
1877 return isolated;
1878
1879out_fail:
1880 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1881out_dropref:
1882 ptl = pmd_lock(mm, pmd);
1883 if (pmd_same(*pmd, entry)) {
1884 entry = pmd_modify(entry, vma->vm_page_prot);
1885 set_pmd_at(mm, mmun_start, pmd, entry);
1886 update_mmu_cache_pmd(vma, address, &entry);
1887 }
1888 spin_unlock(ptl);
1889
1890out_unlock:
1891 unlock_page(page);
1892 put_page(page);
1893 return 0;
1894}
1895#endif /* CONFIG_NUMA_BALANCING */
1896
1897#endif /* CONFIG_NUMA */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/nsproxy.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/backing-dev.h>
34#include <linux/compaction.h>
35#include <linux/syscalls.h>
36#include <linux/compat.h>
37#include <linux/hugetlb.h>
38#include <linux/hugetlb_cgroup.h>
39#include <linux/gfp.h>
40#include <linux/pfn_t.h>
41#include <linux/memremap.h>
42#include <linux/userfaultfd_k.h>
43#include <linux/balloon_compaction.h>
44#include <linux/page_idle.h>
45#include <linux/page_owner.h>
46#include <linux/sched/mm.h>
47#include <linux/ptrace.h>
48#include <linux/oom.h>
49#include <linux/memory.h>
50#include <linux/random.h>
51#include <linux/sched/sysctl.h>
52#include <linux/memory-tiers.h>
53
54#include <asm/tlbflush.h>
55
56#include <trace/events/migrate.h>
57
58#include "internal.h"
59
60bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61{
62 struct folio *folio = folio_get_nontail_page(page);
63 const struct movable_operations *mops;
64
65 /*
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
68 *
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
73 */
74 if (!folio)
75 goto out;
76
77 if (unlikely(folio_test_slab(folio)))
78 goto out_putfolio;
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 smp_rmb();
81 /*
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
85 */
86 if (unlikely(!__folio_test_movable(folio)))
87 goto out_putfolio;
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 smp_rmb();
90 if (unlikely(folio_test_slab(folio)))
91 goto out_putfolio;
92
93 /*
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
97 *
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
103 */
104 if (unlikely(!folio_trylock(folio)))
105 goto out_putfolio;
106
107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
108 goto out_no_isolated;
109
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
112
113 if (!mops->isolate_page(&folio->page, mode))
114 goto out_no_isolated;
115
116 /* Driver shouldn't use PG_isolated bit of page->flags */
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
119 folio_unlock(folio);
120
121 return true;
122
123out_no_isolated:
124 folio_unlock(folio);
125out_putfolio:
126 folio_put(folio);
127out:
128 return false;
129}
130
131static void putback_movable_folio(struct folio *folio)
132{
133 const struct movable_operations *mops = folio_movable_ops(folio);
134
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
137}
138
139/*
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
142 *
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145 * and isolate_hugetlb().
146 */
147void putback_movable_pages(struct list_head *l)
148{
149 struct folio *folio;
150 struct folio *folio2;
151
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
155 continue;
156 }
157 list_del(&folio->lru);
158 /*
159 * We isolated non-lru movable folio so here we can use
160 * __folio_test_movable because LRU folio's mapping cannot
161 * have PAGE_MAPPING_MOVABLE.
162 */
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 folio_lock(folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
168 else
169 folio_clear_isolated(folio);
170 folio_unlock(folio);
171 folio_put(folio);
172 } else {
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
176 }
177 }
178}
179
180/*
181 * Restore a potential migration pte to a working pte entry
182 */
183static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
185{
186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187
188 while (page_vma_mapped_walk(&pvmw)) {
189 rmap_t rmap_flags = RMAP_NONE;
190 pte_t old_pte;
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
200
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
211 folio_get(folio);
212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 old_pte = ptep_get(pvmw.pte);
214
215 entry = pte_to_swp_entry(old_pte);
216 if (!is_migration_entry_young(entry))
217 pte = pte_mkold(pte);
218 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
219 pte = pte_mkdirty(pte);
220 if (pte_swp_soft_dirty(old_pte))
221 pte = pte_mksoft_dirty(pte);
222 else
223 pte = pte_clear_soft_dirty(pte);
224
225 if (is_writable_migration_entry(entry))
226 pte = pte_mkwrite(pte, vma);
227 else if (pte_swp_uffd_wp(old_pte))
228 pte = pte_mkuffd_wp(pte);
229
230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231 rmap_flags |= RMAP_EXCLUSIVE;
232
233 if (unlikely(is_device_private_page(new))) {
234 if (pte_write(pte))
235 entry = make_writable_device_private_entry(
236 page_to_pfn(new));
237 else
238 entry = make_readable_device_private_entry(
239 page_to_pfn(new));
240 pte = swp_entry_to_pte(entry);
241 if (pte_swp_soft_dirty(old_pte))
242 pte = pte_swp_mksoft_dirty(pte);
243 if (pte_swp_uffd_wp(old_pte))
244 pte = pte_swp_mkuffd_wp(pte);
245 }
246
247#ifdef CONFIG_HUGETLB_PAGE
248 if (folio_test_hugetlb(folio)) {
249 struct hstate *h = hstate_vma(vma);
250 unsigned int shift = huge_page_shift(h);
251 unsigned long psize = huge_page_size(h);
252
253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
254 if (folio_test_anon(folio))
255 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
256 rmap_flags);
257 else
258 hugetlb_add_file_rmap(folio);
259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
260 psize);
261 } else
262#endif
263 {
264 if (folio_test_anon(folio))
265 folio_add_anon_rmap_pte(folio, new, vma,
266 pvmw.address, rmap_flags);
267 else
268 folio_add_file_rmap_pte(folio, new, vma);
269 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
270 }
271 if (vma->vm_flags & VM_LOCKED)
272 mlock_drain_local();
273
274 trace_remove_migration_pte(pvmw.address, pte_val(pte),
275 compound_order(new));
276
277 /* No need to invalidate - it was non-present before */
278 update_mmu_cache(vma, pvmw.address, pvmw.pte);
279 }
280
281 return true;
282}
283
284/*
285 * Get rid of all migration entries and replace them by
286 * references to the indicated page.
287 */
288void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
289{
290 struct rmap_walk_control rwc = {
291 .rmap_one = remove_migration_pte,
292 .arg = src,
293 };
294
295 if (locked)
296 rmap_walk_locked(dst, &rwc);
297 else
298 rmap_walk(dst, &rwc);
299}
300
301/*
302 * Something used the pte of a page under migration. We need to
303 * get to the page and wait until migration is finished.
304 * When we return from this function the fault will be retried.
305 */
306void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
307 unsigned long address)
308{
309 spinlock_t *ptl;
310 pte_t *ptep;
311 pte_t pte;
312 swp_entry_t entry;
313
314 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
315 if (!ptep)
316 return;
317
318 pte = ptep_get(ptep);
319 pte_unmap(ptep);
320
321 if (!is_swap_pte(pte))
322 goto out;
323
324 entry = pte_to_swp_entry(pte);
325 if (!is_migration_entry(entry))
326 goto out;
327
328 migration_entry_wait_on_locked(entry, ptl);
329 return;
330out:
331 spin_unlock(ptl);
332}
333
334#ifdef CONFIG_HUGETLB_PAGE
335/*
336 * The vma read lock must be held upon entry. Holding that lock prevents either
337 * the pte or the ptl from being freed.
338 *
339 * This function will release the vma lock before returning.
340 */
341void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
342{
343 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
344 pte_t pte;
345
346 hugetlb_vma_assert_locked(vma);
347 spin_lock(ptl);
348 pte = huge_ptep_get(ptep);
349
350 if (unlikely(!is_hugetlb_entry_migration(pte))) {
351 spin_unlock(ptl);
352 hugetlb_vma_unlock_read(vma);
353 } else {
354 /*
355 * If migration entry existed, safe to release vma lock
356 * here because the pgtable page won't be freed without the
357 * pgtable lock released. See comment right above pgtable
358 * lock release in migration_entry_wait_on_locked().
359 */
360 hugetlb_vma_unlock_read(vma);
361 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
362 }
363}
364#endif
365
366#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
367void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
368{
369 spinlock_t *ptl;
370
371 ptl = pmd_lock(mm, pmd);
372 if (!is_pmd_migration_entry(*pmd))
373 goto unlock;
374 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
375 return;
376unlock:
377 spin_unlock(ptl);
378}
379#endif
380
381static int folio_expected_refs(struct address_space *mapping,
382 struct folio *folio)
383{
384 int refs = 1;
385 if (!mapping)
386 return refs;
387
388 refs += folio_nr_pages(folio);
389 if (folio_test_private(folio))
390 refs++;
391
392 return refs;
393}
394
395/*
396 * Replace the page in the mapping.
397 *
398 * The number of remaining references must be:
399 * 1 for anonymous pages without a mapping
400 * 2 for pages with a mapping
401 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
402 */
403int folio_migrate_mapping(struct address_space *mapping,
404 struct folio *newfolio, struct folio *folio, int extra_count)
405{
406 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
407 struct zone *oldzone, *newzone;
408 int dirty;
409 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
410 long nr = folio_nr_pages(folio);
411 long entries, i;
412
413 if (!mapping) {
414 /* Anonymous page without mapping */
415 if (folio_ref_count(folio) != expected_count)
416 return -EAGAIN;
417
418 /* No turning back from here */
419 newfolio->index = folio->index;
420 newfolio->mapping = folio->mapping;
421 if (folio_test_swapbacked(folio))
422 __folio_set_swapbacked(newfolio);
423
424 return MIGRATEPAGE_SUCCESS;
425 }
426
427 oldzone = folio_zone(folio);
428 newzone = folio_zone(newfolio);
429
430 xas_lock_irq(&xas);
431 if (!folio_ref_freeze(folio, expected_count)) {
432 xas_unlock_irq(&xas);
433 return -EAGAIN;
434 }
435
436 /*
437 * Now we know that no one else is looking at the folio:
438 * no turning back from here.
439 */
440 newfolio->index = folio->index;
441 newfolio->mapping = folio->mapping;
442 folio_ref_add(newfolio, nr); /* add cache reference */
443 if (folio_test_swapbacked(folio)) {
444 __folio_set_swapbacked(newfolio);
445 if (folio_test_swapcache(folio)) {
446 folio_set_swapcache(newfolio);
447 newfolio->private = folio_get_private(folio);
448 }
449 entries = nr;
450 } else {
451 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
452 entries = 1;
453 }
454
455 /* Move dirty while page refs frozen and newpage not yet exposed */
456 dirty = folio_test_dirty(folio);
457 if (dirty) {
458 folio_clear_dirty(folio);
459 folio_set_dirty(newfolio);
460 }
461
462 /* Swap cache still stores N entries instead of a high-order entry */
463 for (i = 0; i < entries; i++) {
464 xas_store(&xas, newfolio);
465 xas_next(&xas);
466 }
467
468 /*
469 * Drop cache reference from old page by unfreezing
470 * to one less reference.
471 * We know this isn't the last reference.
472 */
473 folio_ref_unfreeze(folio, expected_count - nr);
474
475 xas_unlock(&xas);
476 /* Leave irq disabled to prevent preemption while updating stats */
477
478 /*
479 * If moved to a different zone then also account
480 * the page for that zone. Other VM counters will be
481 * taken care of when we establish references to the
482 * new page and drop references to the old page.
483 *
484 * Note that anonymous pages are accounted for
485 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
486 * are mapped to swap space.
487 */
488 if (newzone != oldzone) {
489 struct lruvec *old_lruvec, *new_lruvec;
490 struct mem_cgroup *memcg;
491
492 memcg = folio_memcg(folio);
493 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
494 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
495
496 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
497 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
498 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
499 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
500 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
501
502 if (folio_test_pmd_mappable(folio)) {
503 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
504 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
505 }
506 }
507#ifdef CONFIG_SWAP
508 if (folio_test_swapcache(folio)) {
509 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
510 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
511 }
512#endif
513 if (dirty && mapping_can_writeback(mapping)) {
514 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
515 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
516 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
517 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
518 }
519 }
520 local_irq_enable();
521
522 return MIGRATEPAGE_SUCCESS;
523}
524EXPORT_SYMBOL(folio_migrate_mapping);
525
526/*
527 * The expected number of remaining references is the same as that
528 * of folio_migrate_mapping().
529 */
530int migrate_huge_page_move_mapping(struct address_space *mapping,
531 struct folio *dst, struct folio *src)
532{
533 XA_STATE(xas, &mapping->i_pages, folio_index(src));
534 int expected_count;
535
536 xas_lock_irq(&xas);
537 expected_count = folio_expected_refs(mapping, src);
538 if (!folio_ref_freeze(src, expected_count)) {
539 xas_unlock_irq(&xas);
540 return -EAGAIN;
541 }
542
543 dst->index = src->index;
544 dst->mapping = src->mapping;
545
546 folio_ref_add(dst, folio_nr_pages(dst));
547
548 xas_store(&xas, dst);
549
550 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
551
552 xas_unlock_irq(&xas);
553
554 return MIGRATEPAGE_SUCCESS;
555}
556
557/*
558 * Copy the flags and some other ancillary information
559 */
560void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
561{
562 int cpupid;
563
564 if (folio_test_error(folio))
565 folio_set_error(newfolio);
566 if (folio_test_referenced(folio))
567 folio_set_referenced(newfolio);
568 if (folio_test_uptodate(folio))
569 folio_mark_uptodate(newfolio);
570 if (folio_test_clear_active(folio)) {
571 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
572 folio_set_active(newfolio);
573 } else if (folio_test_clear_unevictable(folio))
574 folio_set_unevictable(newfolio);
575 if (folio_test_workingset(folio))
576 folio_set_workingset(newfolio);
577 if (folio_test_checked(folio))
578 folio_set_checked(newfolio);
579 /*
580 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
581 * migration entries. We can still have PG_anon_exclusive set on an
582 * effectively unmapped and unreferenced first sub-pages of an
583 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
584 */
585 if (folio_test_mappedtodisk(folio))
586 folio_set_mappedtodisk(newfolio);
587
588 /* Move dirty on pages not done by folio_migrate_mapping() */
589 if (folio_test_dirty(folio))
590 folio_set_dirty(newfolio);
591
592 if (folio_test_young(folio))
593 folio_set_young(newfolio);
594 if (folio_test_idle(folio))
595 folio_set_idle(newfolio);
596
597 /*
598 * Copy NUMA information to the new page, to prevent over-eager
599 * future migrations of this same page.
600 */
601 cpupid = folio_xchg_last_cpupid(folio, -1);
602 /*
603 * For memory tiering mode, when migrate between slow and fast
604 * memory node, reset cpupid, because that is used to record
605 * page access time in slow memory node.
606 */
607 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
608 bool f_toptier = node_is_toptier(folio_nid(folio));
609 bool t_toptier = node_is_toptier(folio_nid(newfolio));
610
611 if (f_toptier != t_toptier)
612 cpupid = -1;
613 }
614 folio_xchg_last_cpupid(newfolio, cpupid);
615
616 folio_migrate_ksm(newfolio, folio);
617 /*
618 * Please do not reorder this without considering how mm/ksm.c's
619 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
620 */
621 if (folio_test_swapcache(folio))
622 folio_clear_swapcache(folio);
623 folio_clear_private(folio);
624
625 /* page->private contains hugetlb specific flags */
626 if (!folio_test_hugetlb(folio))
627 folio->private = NULL;
628
629 /*
630 * If any waiters have accumulated on the new page then
631 * wake them up.
632 */
633 if (folio_test_writeback(newfolio))
634 folio_end_writeback(newfolio);
635
636 /*
637 * PG_readahead shares the same bit with PG_reclaim. The above
638 * end_page_writeback() may clear PG_readahead mistakenly, so set the
639 * bit after that.
640 */
641 if (folio_test_readahead(folio))
642 folio_set_readahead(newfolio);
643
644 folio_copy_owner(newfolio, folio);
645
646 mem_cgroup_migrate(folio, newfolio);
647}
648EXPORT_SYMBOL(folio_migrate_flags);
649
650void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
651{
652 folio_copy(newfolio, folio);
653 folio_migrate_flags(newfolio, folio);
654}
655EXPORT_SYMBOL(folio_migrate_copy);
656
657/************************************************************
658 * Migration functions
659 ***********************************************************/
660
661int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
662 struct folio *src, enum migrate_mode mode, int extra_count)
663{
664 int rc;
665
666 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
667
668 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
669
670 if (rc != MIGRATEPAGE_SUCCESS)
671 return rc;
672
673 if (mode != MIGRATE_SYNC_NO_COPY)
674 folio_migrate_copy(dst, src);
675 else
676 folio_migrate_flags(dst, src);
677 return MIGRATEPAGE_SUCCESS;
678}
679
680/**
681 * migrate_folio() - Simple folio migration.
682 * @mapping: The address_space containing the folio.
683 * @dst: The folio to migrate the data to.
684 * @src: The folio containing the current data.
685 * @mode: How to migrate the page.
686 *
687 * Common logic to directly migrate a single LRU folio suitable for
688 * folios that do not use PagePrivate/PagePrivate2.
689 *
690 * Folios are locked upon entry and exit.
691 */
692int migrate_folio(struct address_space *mapping, struct folio *dst,
693 struct folio *src, enum migrate_mode mode)
694{
695 return migrate_folio_extra(mapping, dst, src, mode, 0);
696}
697EXPORT_SYMBOL(migrate_folio);
698
699#ifdef CONFIG_BUFFER_HEAD
700/* Returns true if all buffers are successfully locked */
701static bool buffer_migrate_lock_buffers(struct buffer_head *head,
702 enum migrate_mode mode)
703{
704 struct buffer_head *bh = head;
705 struct buffer_head *failed_bh;
706
707 do {
708 if (!trylock_buffer(bh)) {
709 if (mode == MIGRATE_ASYNC)
710 goto unlock;
711 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
712 goto unlock;
713 lock_buffer(bh);
714 }
715
716 bh = bh->b_this_page;
717 } while (bh != head);
718
719 return true;
720
721unlock:
722 /* We failed to lock the buffer and cannot stall. */
723 failed_bh = bh;
724 bh = head;
725 while (bh != failed_bh) {
726 unlock_buffer(bh);
727 bh = bh->b_this_page;
728 }
729
730 return false;
731}
732
733static int __buffer_migrate_folio(struct address_space *mapping,
734 struct folio *dst, struct folio *src, enum migrate_mode mode,
735 bool check_refs)
736{
737 struct buffer_head *bh, *head;
738 int rc;
739 int expected_count;
740
741 head = folio_buffers(src);
742 if (!head)
743 return migrate_folio(mapping, dst, src, mode);
744
745 /* Check whether page does not have extra refs before we do more work */
746 expected_count = folio_expected_refs(mapping, src);
747 if (folio_ref_count(src) != expected_count)
748 return -EAGAIN;
749
750 if (!buffer_migrate_lock_buffers(head, mode))
751 return -EAGAIN;
752
753 if (check_refs) {
754 bool busy;
755 bool invalidated = false;
756
757recheck_buffers:
758 busy = false;
759 spin_lock(&mapping->i_private_lock);
760 bh = head;
761 do {
762 if (atomic_read(&bh->b_count)) {
763 busy = true;
764 break;
765 }
766 bh = bh->b_this_page;
767 } while (bh != head);
768 if (busy) {
769 if (invalidated) {
770 rc = -EAGAIN;
771 goto unlock_buffers;
772 }
773 spin_unlock(&mapping->i_private_lock);
774 invalidate_bh_lrus();
775 invalidated = true;
776 goto recheck_buffers;
777 }
778 }
779
780 rc = folio_migrate_mapping(mapping, dst, src, 0);
781 if (rc != MIGRATEPAGE_SUCCESS)
782 goto unlock_buffers;
783
784 folio_attach_private(dst, folio_detach_private(src));
785
786 bh = head;
787 do {
788 folio_set_bh(bh, dst, bh_offset(bh));
789 bh = bh->b_this_page;
790 } while (bh != head);
791
792 if (mode != MIGRATE_SYNC_NO_COPY)
793 folio_migrate_copy(dst, src);
794 else
795 folio_migrate_flags(dst, src);
796
797 rc = MIGRATEPAGE_SUCCESS;
798unlock_buffers:
799 if (check_refs)
800 spin_unlock(&mapping->i_private_lock);
801 bh = head;
802 do {
803 unlock_buffer(bh);
804 bh = bh->b_this_page;
805 } while (bh != head);
806
807 return rc;
808}
809
810/**
811 * buffer_migrate_folio() - Migration function for folios with buffers.
812 * @mapping: The address space containing @src.
813 * @dst: The folio to migrate to.
814 * @src: The folio to migrate from.
815 * @mode: How to migrate the folio.
816 *
817 * This function can only be used if the underlying filesystem guarantees
818 * that no other references to @src exist. For example attached buffer
819 * heads are accessed only under the folio lock. If your filesystem cannot
820 * provide this guarantee, buffer_migrate_folio_norefs() may be more
821 * appropriate.
822 *
823 * Return: 0 on success or a negative errno on failure.
824 */
825int buffer_migrate_folio(struct address_space *mapping,
826 struct folio *dst, struct folio *src, enum migrate_mode mode)
827{
828 return __buffer_migrate_folio(mapping, dst, src, mode, false);
829}
830EXPORT_SYMBOL(buffer_migrate_folio);
831
832/**
833 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
834 * @mapping: The address space containing @src.
835 * @dst: The folio to migrate to.
836 * @src: The folio to migrate from.
837 * @mode: How to migrate the folio.
838 *
839 * Like buffer_migrate_folio() except that this variant is more careful
840 * and checks that there are also no buffer head references. This function
841 * is the right one for mappings where buffer heads are directly looked
842 * up and referenced (such as block device mappings).
843 *
844 * Return: 0 on success or a negative errno on failure.
845 */
846int buffer_migrate_folio_norefs(struct address_space *mapping,
847 struct folio *dst, struct folio *src, enum migrate_mode mode)
848{
849 return __buffer_migrate_folio(mapping, dst, src, mode, true);
850}
851EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
852#endif /* CONFIG_BUFFER_HEAD */
853
854int filemap_migrate_folio(struct address_space *mapping,
855 struct folio *dst, struct folio *src, enum migrate_mode mode)
856{
857 int ret;
858
859 ret = folio_migrate_mapping(mapping, dst, src, 0);
860 if (ret != MIGRATEPAGE_SUCCESS)
861 return ret;
862
863 if (folio_get_private(src))
864 folio_attach_private(dst, folio_detach_private(src));
865
866 if (mode != MIGRATE_SYNC_NO_COPY)
867 folio_migrate_copy(dst, src);
868 else
869 folio_migrate_flags(dst, src);
870 return MIGRATEPAGE_SUCCESS;
871}
872EXPORT_SYMBOL_GPL(filemap_migrate_folio);
873
874/*
875 * Writeback a folio to clean the dirty state
876 */
877static int writeout(struct address_space *mapping, struct folio *folio)
878{
879 struct writeback_control wbc = {
880 .sync_mode = WB_SYNC_NONE,
881 .nr_to_write = 1,
882 .range_start = 0,
883 .range_end = LLONG_MAX,
884 .for_reclaim = 1
885 };
886 int rc;
887
888 if (!mapping->a_ops->writepage)
889 /* No write method for the address space */
890 return -EINVAL;
891
892 if (!folio_clear_dirty_for_io(folio))
893 /* Someone else already triggered a write */
894 return -EAGAIN;
895
896 /*
897 * A dirty folio may imply that the underlying filesystem has
898 * the folio on some queue. So the folio must be clean for
899 * migration. Writeout may mean we lose the lock and the
900 * folio state is no longer what we checked for earlier.
901 * At this point we know that the migration attempt cannot
902 * be successful.
903 */
904 remove_migration_ptes(folio, folio, false);
905
906 rc = mapping->a_ops->writepage(&folio->page, &wbc);
907
908 if (rc != AOP_WRITEPAGE_ACTIVATE)
909 /* unlocked. Relock */
910 folio_lock(folio);
911
912 return (rc < 0) ? -EIO : -EAGAIN;
913}
914
915/*
916 * Default handling if a filesystem does not provide a migration function.
917 */
918static int fallback_migrate_folio(struct address_space *mapping,
919 struct folio *dst, struct folio *src, enum migrate_mode mode)
920{
921 if (folio_test_dirty(src)) {
922 /* Only writeback folios in full synchronous migration */
923 switch (mode) {
924 case MIGRATE_SYNC:
925 case MIGRATE_SYNC_NO_COPY:
926 break;
927 default:
928 return -EBUSY;
929 }
930 return writeout(mapping, src);
931 }
932
933 /*
934 * Buffers may be managed in a filesystem specific way.
935 * We must have no buffers or drop them.
936 */
937 if (!filemap_release_folio(src, GFP_KERNEL))
938 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
939
940 return migrate_folio(mapping, dst, src, mode);
941}
942
943/*
944 * Move a page to a newly allocated page
945 * The page is locked and all ptes have been successfully removed.
946 *
947 * The new page will have replaced the old page if this function
948 * is successful.
949 *
950 * Return value:
951 * < 0 - error code
952 * MIGRATEPAGE_SUCCESS - success
953 */
954static int move_to_new_folio(struct folio *dst, struct folio *src,
955 enum migrate_mode mode)
956{
957 int rc = -EAGAIN;
958 bool is_lru = !__folio_test_movable(src);
959
960 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
961 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
962
963 if (likely(is_lru)) {
964 struct address_space *mapping = folio_mapping(src);
965
966 if (!mapping)
967 rc = migrate_folio(mapping, dst, src, mode);
968 else if (mapping_unmovable(mapping))
969 rc = -EOPNOTSUPP;
970 else if (mapping->a_ops->migrate_folio)
971 /*
972 * Most folios have a mapping and most filesystems
973 * provide a migrate_folio callback. Anonymous folios
974 * are part of swap space which also has its own
975 * migrate_folio callback. This is the most common path
976 * for page migration.
977 */
978 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
979 mode);
980 else
981 rc = fallback_migrate_folio(mapping, dst, src, mode);
982 } else {
983 const struct movable_operations *mops;
984
985 /*
986 * In case of non-lru page, it could be released after
987 * isolation step. In that case, we shouldn't try migration.
988 */
989 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
990 if (!folio_test_movable(src)) {
991 rc = MIGRATEPAGE_SUCCESS;
992 folio_clear_isolated(src);
993 goto out;
994 }
995
996 mops = folio_movable_ops(src);
997 rc = mops->migrate_page(&dst->page, &src->page, mode);
998 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
999 !folio_test_isolated(src));
1000 }
1001
1002 /*
1003 * When successful, old pagecache src->mapping must be cleared before
1004 * src is freed; but stats require that PageAnon be left as PageAnon.
1005 */
1006 if (rc == MIGRATEPAGE_SUCCESS) {
1007 if (__folio_test_movable(src)) {
1008 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1009
1010 /*
1011 * We clear PG_movable under page_lock so any compactor
1012 * cannot try to migrate this page.
1013 */
1014 folio_clear_isolated(src);
1015 }
1016
1017 /*
1018 * Anonymous and movable src->mapping will be cleared by
1019 * free_pages_prepare so don't reset it here for keeping
1020 * the type to work PageAnon, for example.
1021 */
1022 if (!folio_mapping_flags(src))
1023 src->mapping = NULL;
1024
1025 if (likely(!folio_is_zone_device(dst)))
1026 flush_dcache_folio(dst);
1027 }
1028out:
1029 return rc;
1030}
1031
1032/*
1033 * To record some information during migration, we use unused private
1034 * field of struct folio of the newly allocated destination folio.
1035 * This is safe because nobody is using it except us.
1036 */
1037enum {
1038 PAGE_WAS_MAPPED = BIT(0),
1039 PAGE_WAS_MLOCKED = BIT(1),
1040 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1041};
1042
1043static void __migrate_folio_record(struct folio *dst,
1044 int old_page_state,
1045 struct anon_vma *anon_vma)
1046{
1047 dst->private = (void *)anon_vma + old_page_state;
1048}
1049
1050static void __migrate_folio_extract(struct folio *dst,
1051 int *old_page_state,
1052 struct anon_vma **anon_vmap)
1053{
1054 unsigned long private = (unsigned long)dst->private;
1055
1056 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1057 *old_page_state = private & PAGE_OLD_STATES;
1058 dst->private = NULL;
1059}
1060
1061/* Restore the source folio to the original state upon failure */
1062static void migrate_folio_undo_src(struct folio *src,
1063 int page_was_mapped,
1064 struct anon_vma *anon_vma,
1065 bool locked,
1066 struct list_head *ret)
1067{
1068 if (page_was_mapped)
1069 remove_migration_ptes(src, src, false);
1070 /* Drop an anon_vma reference if we took one */
1071 if (anon_vma)
1072 put_anon_vma(anon_vma);
1073 if (locked)
1074 folio_unlock(src);
1075 if (ret)
1076 list_move_tail(&src->lru, ret);
1077}
1078
1079/* Restore the destination folio to the original state upon failure */
1080static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1081 free_folio_t put_new_folio, unsigned long private)
1082{
1083 if (locked)
1084 folio_unlock(dst);
1085 if (put_new_folio)
1086 put_new_folio(dst, private);
1087 else
1088 folio_put(dst);
1089}
1090
1091/* Cleanup src folio upon migration success */
1092static void migrate_folio_done(struct folio *src,
1093 enum migrate_reason reason)
1094{
1095 /*
1096 * Compaction can migrate also non-LRU pages which are
1097 * not accounted to NR_ISOLATED_*. They can be recognized
1098 * as __folio_test_movable
1099 */
1100 if (likely(!__folio_test_movable(src)))
1101 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1102 folio_is_file_lru(src), -folio_nr_pages(src));
1103
1104 if (reason != MR_MEMORY_FAILURE)
1105 /* We release the page in page_handle_poison. */
1106 folio_put(src);
1107}
1108
1109/* Obtain the lock on page, remove all ptes. */
1110static int migrate_folio_unmap(new_folio_t get_new_folio,
1111 free_folio_t put_new_folio, unsigned long private,
1112 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1113 enum migrate_reason reason, struct list_head *ret)
1114{
1115 struct folio *dst;
1116 int rc = -EAGAIN;
1117 int old_page_state = 0;
1118 struct anon_vma *anon_vma = NULL;
1119 bool is_lru = !__folio_test_movable(src);
1120 bool locked = false;
1121 bool dst_locked = false;
1122
1123 if (folio_ref_count(src) == 1) {
1124 /* Folio was freed from under us. So we are done. */
1125 folio_clear_active(src);
1126 folio_clear_unevictable(src);
1127 /* free_pages_prepare() will clear PG_isolated. */
1128 list_del(&src->lru);
1129 migrate_folio_done(src, reason);
1130 return MIGRATEPAGE_SUCCESS;
1131 }
1132
1133 dst = get_new_folio(src, private);
1134 if (!dst)
1135 return -ENOMEM;
1136 *dstp = dst;
1137
1138 dst->private = NULL;
1139
1140 if (!folio_trylock(src)) {
1141 if (mode == MIGRATE_ASYNC)
1142 goto out;
1143
1144 /*
1145 * It's not safe for direct compaction to call lock_page.
1146 * For example, during page readahead pages are added locked
1147 * to the LRU. Later, when the IO completes the pages are
1148 * marked uptodate and unlocked. However, the queueing
1149 * could be merging multiple pages for one bio (e.g.
1150 * mpage_readahead). If an allocation happens for the
1151 * second or third page, the process can end up locking
1152 * the same page twice and deadlocking. Rather than
1153 * trying to be clever about what pages can be locked,
1154 * avoid the use of lock_page for direct compaction
1155 * altogether.
1156 */
1157 if (current->flags & PF_MEMALLOC)
1158 goto out;
1159
1160 /*
1161 * In "light" mode, we can wait for transient locks (eg
1162 * inserting a page into the page table), but it's not
1163 * worth waiting for I/O.
1164 */
1165 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1166 goto out;
1167
1168 folio_lock(src);
1169 }
1170 locked = true;
1171 if (folio_test_mlocked(src))
1172 old_page_state |= PAGE_WAS_MLOCKED;
1173
1174 if (folio_test_writeback(src)) {
1175 /*
1176 * Only in the case of a full synchronous migration is it
1177 * necessary to wait for PageWriteback. In the async case,
1178 * the retry loop is too short and in the sync-light case,
1179 * the overhead of stalling is too much
1180 */
1181 switch (mode) {
1182 case MIGRATE_SYNC:
1183 case MIGRATE_SYNC_NO_COPY:
1184 break;
1185 default:
1186 rc = -EBUSY;
1187 goto out;
1188 }
1189 folio_wait_writeback(src);
1190 }
1191
1192 /*
1193 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1194 * we cannot notice that anon_vma is freed while we migrate a page.
1195 * This get_anon_vma() delays freeing anon_vma pointer until the end
1196 * of migration. File cache pages are no problem because of page_lock()
1197 * File Caches may use write_page() or lock_page() in migration, then,
1198 * just care Anon page here.
1199 *
1200 * Only folio_get_anon_vma() understands the subtleties of
1201 * getting a hold on an anon_vma from outside one of its mms.
1202 * But if we cannot get anon_vma, then we won't need it anyway,
1203 * because that implies that the anon page is no longer mapped
1204 * (and cannot be remapped so long as we hold the page lock).
1205 */
1206 if (folio_test_anon(src) && !folio_test_ksm(src))
1207 anon_vma = folio_get_anon_vma(src);
1208
1209 /*
1210 * Block others from accessing the new page when we get around to
1211 * establishing additional references. We are usually the only one
1212 * holding a reference to dst at this point. We used to have a BUG
1213 * here if folio_trylock(dst) fails, but would like to allow for
1214 * cases where there might be a race with the previous use of dst.
1215 * This is much like races on refcount of oldpage: just don't BUG().
1216 */
1217 if (unlikely(!folio_trylock(dst)))
1218 goto out;
1219 dst_locked = true;
1220
1221 if (unlikely(!is_lru)) {
1222 __migrate_folio_record(dst, old_page_state, anon_vma);
1223 return MIGRATEPAGE_UNMAP;
1224 }
1225
1226 /*
1227 * Corner case handling:
1228 * 1. When a new swap-cache page is read into, it is added to the LRU
1229 * and treated as swapcache but it has no rmap yet.
1230 * Calling try_to_unmap() against a src->mapping==NULL page will
1231 * trigger a BUG. So handle it here.
1232 * 2. An orphaned page (see truncate_cleanup_page) might have
1233 * fs-private metadata. The page can be picked up due to memory
1234 * offlining. Everywhere else except page reclaim, the page is
1235 * invisible to the vm, so the page can not be migrated. So try to
1236 * free the metadata, so the page can be freed.
1237 */
1238 if (!src->mapping) {
1239 if (folio_test_private(src)) {
1240 try_to_free_buffers(src);
1241 goto out;
1242 }
1243 } else if (folio_mapped(src)) {
1244 /* Establish migration ptes */
1245 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1246 !folio_test_ksm(src) && !anon_vma, src);
1247 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1248 old_page_state |= PAGE_WAS_MAPPED;
1249 }
1250
1251 if (!folio_mapped(src)) {
1252 __migrate_folio_record(dst, old_page_state, anon_vma);
1253 return MIGRATEPAGE_UNMAP;
1254 }
1255
1256out:
1257 /*
1258 * A folio that has not been unmapped will be restored to
1259 * right list unless we want to retry.
1260 */
1261 if (rc == -EAGAIN)
1262 ret = NULL;
1263
1264 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1265 anon_vma, locked, ret);
1266 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1267
1268 return rc;
1269}
1270
1271/* Migrate the folio to the newly allocated folio in dst. */
1272static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1273 struct folio *src, struct folio *dst,
1274 enum migrate_mode mode, enum migrate_reason reason,
1275 struct list_head *ret)
1276{
1277 int rc;
1278 int old_page_state = 0;
1279 struct anon_vma *anon_vma = NULL;
1280 bool is_lru = !__folio_test_movable(src);
1281 struct list_head *prev;
1282
1283 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1284 prev = dst->lru.prev;
1285 list_del(&dst->lru);
1286
1287 rc = move_to_new_folio(dst, src, mode);
1288 if (rc)
1289 goto out;
1290
1291 if (unlikely(!is_lru))
1292 goto out_unlock_both;
1293
1294 /*
1295 * When successful, push dst to LRU immediately: so that if it
1296 * turns out to be an mlocked page, remove_migration_ptes() will
1297 * automatically build up the correct dst->mlock_count for it.
1298 *
1299 * We would like to do something similar for the old page, when
1300 * unsuccessful, and other cases when a page has been temporarily
1301 * isolated from the unevictable LRU: but this case is the easiest.
1302 */
1303 folio_add_lru(dst);
1304 if (old_page_state & PAGE_WAS_MLOCKED)
1305 lru_add_drain();
1306
1307 if (old_page_state & PAGE_WAS_MAPPED)
1308 remove_migration_ptes(src, dst, false);
1309
1310out_unlock_both:
1311 folio_unlock(dst);
1312 set_page_owner_migrate_reason(&dst->page, reason);
1313 /*
1314 * If migration is successful, decrease refcount of dst,
1315 * which will not free the page because new page owner increased
1316 * refcounter.
1317 */
1318 folio_put(dst);
1319
1320 /*
1321 * A folio that has been migrated has all references removed
1322 * and will be freed.
1323 */
1324 list_del(&src->lru);
1325 /* Drop an anon_vma reference if we took one */
1326 if (anon_vma)
1327 put_anon_vma(anon_vma);
1328 folio_unlock(src);
1329 migrate_folio_done(src, reason);
1330
1331 return rc;
1332out:
1333 /*
1334 * A folio that has not been migrated will be restored to
1335 * right list unless we want to retry.
1336 */
1337 if (rc == -EAGAIN) {
1338 list_add(&dst->lru, prev);
1339 __migrate_folio_record(dst, old_page_state, anon_vma);
1340 return rc;
1341 }
1342
1343 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1344 anon_vma, true, ret);
1345 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1346
1347 return rc;
1348}
1349
1350/*
1351 * Counterpart of unmap_and_move_page() for hugepage migration.
1352 *
1353 * This function doesn't wait the completion of hugepage I/O
1354 * because there is no race between I/O and migration for hugepage.
1355 * Note that currently hugepage I/O occurs only in direct I/O
1356 * where no lock is held and PG_writeback is irrelevant,
1357 * and writeback status of all subpages are counted in the reference
1358 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1359 * under direct I/O, the reference of the head page is 512 and a bit more.)
1360 * This means that when we try to migrate hugepage whose subpages are
1361 * doing direct I/O, some references remain after try_to_unmap() and
1362 * hugepage migration fails without data corruption.
1363 *
1364 * There is also no race when direct I/O is issued on the page under migration,
1365 * because then pte is replaced with migration swap entry and direct I/O code
1366 * will wait in the page fault for migration to complete.
1367 */
1368static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1369 free_folio_t put_new_folio, unsigned long private,
1370 struct folio *src, int force, enum migrate_mode mode,
1371 int reason, struct list_head *ret)
1372{
1373 struct folio *dst;
1374 int rc = -EAGAIN;
1375 int page_was_mapped = 0;
1376 struct anon_vma *anon_vma = NULL;
1377 struct address_space *mapping = NULL;
1378
1379 if (folio_ref_count(src) == 1) {
1380 /* page was freed from under us. So we are done. */
1381 folio_putback_active_hugetlb(src);
1382 return MIGRATEPAGE_SUCCESS;
1383 }
1384
1385 dst = get_new_folio(src, private);
1386 if (!dst)
1387 return -ENOMEM;
1388
1389 if (!folio_trylock(src)) {
1390 if (!force)
1391 goto out;
1392 switch (mode) {
1393 case MIGRATE_SYNC:
1394 case MIGRATE_SYNC_NO_COPY:
1395 break;
1396 default:
1397 goto out;
1398 }
1399 folio_lock(src);
1400 }
1401
1402 /*
1403 * Check for pages which are in the process of being freed. Without
1404 * folio_mapping() set, hugetlbfs specific move page routine will not
1405 * be called and we could leak usage counts for subpools.
1406 */
1407 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1408 rc = -EBUSY;
1409 goto out_unlock;
1410 }
1411
1412 if (folio_test_anon(src))
1413 anon_vma = folio_get_anon_vma(src);
1414
1415 if (unlikely(!folio_trylock(dst)))
1416 goto put_anon;
1417
1418 if (folio_mapped(src)) {
1419 enum ttu_flags ttu = 0;
1420
1421 if (!folio_test_anon(src)) {
1422 /*
1423 * In shared mappings, try_to_unmap could potentially
1424 * call huge_pmd_unshare. Because of this, take
1425 * semaphore in write mode here and set TTU_RMAP_LOCKED
1426 * to let lower levels know we have taken the lock.
1427 */
1428 mapping = hugetlb_page_mapping_lock_write(&src->page);
1429 if (unlikely(!mapping))
1430 goto unlock_put_anon;
1431
1432 ttu = TTU_RMAP_LOCKED;
1433 }
1434
1435 try_to_migrate(src, ttu);
1436 page_was_mapped = 1;
1437
1438 if (ttu & TTU_RMAP_LOCKED)
1439 i_mmap_unlock_write(mapping);
1440 }
1441
1442 if (!folio_mapped(src))
1443 rc = move_to_new_folio(dst, src, mode);
1444
1445 if (page_was_mapped)
1446 remove_migration_ptes(src,
1447 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1448
1449unlock_put_anon:
1450 folio_unlock(dst);
1451
1452put_anon:
1453 if (anon_vma)
1454 put_anon_vma(anon_vma);
1455
1456 if (rc == MIGRATEPAGE_SUCCESS) {
1457 move_hugetlb_state(src, dst, reason);
1458 put_new_folio = NULL;
1459 }
1460
1461out_unlock:
1462 folio_unlock(src);
1463out:
1464 if (rc == MIGRATEPAGE_SUCCESS)
1465 folio_putback_active_hugetlb(src);
1466 else if (rc != -EAGAIN)
1467 list_move_tail(&src->lru, ret);
1468
1469 /*
1470 * If migration was not successful and there's a freeing callback, use
1471 * it. Otherwise, put_page() will drop the reference grabbed during
1472 * isolation.
1473 */
1474 if (put_new_folio)
1475 put_new_folio(dst, private);
1476 else
1477 folio_putback_active_hugetlb(dst);
1478
1479 return rc;
1480}
1481
1482static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1483{
1484 int rc;
1485
1486 folio_lock(folio);
1487 rc = split_folio_to_list(folio, split_folios);
1488 folio_unlock(folio);
1489 if (!rc)
1490 list_move_tail(&folio->lru, split_folios);
1491
1492 return rc;
1493}
1494
1495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1496#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1497#else
1498#define NR_MAX_BATCHED_MIGRATION 512
1499#endif
1500#define NR_MAX_MIGRATE_PAGES_RETRY 10
1501#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1502#define NR_MAX_MIGRATE_SYNC_RETRY \
1503 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1504
1505struct migrate_pages_stats {
1506 int nr_succeeded; /* Normal and large folios migrated successfully, in
1507 units of base pages */
1508 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1509 units of base pages. Untried folios aren't counted */
1510 int nr_thp_succeeded; /* THP migrated successfully */
1511 int nr_thp_failed; /* THP failed to be migrated */
1512 int nr_thp_split; /* THP split before migrating */
1513 int nr_split; /* Large folio (include THP) split before migrating */
1514};
1515
1516/*
1517 * Returns the number of hugetlb folios that were not migrated, or an error code
1518 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1519 * any more because the list has become empty or no retryable hugetlb folios
1520 * exist any more. It is caller's responsibility to call putback_movable_pages()
1521 * only if ret != 0.
1522 */
1523static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1524 free_folio_t put_new_folio, unsigned long private,
1525 enum migrate_mode mode, int reason,
1526 struct migrate_pages_stats *stats,
1527 struct list_head *ret_folios)
1528{
1529 int retry = 1;
1530 int nr_failed = 0;
1531 int nr_retry_pages = 0;
1532 int pass = 0;
1533 struct folio *folio, *folio2;
1534 int rc, nr_pages;
1535
1536 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1537 retry = 0;
1538 nr_retry_pages = 0;
1539
1540 list_for_each_entry_safe(folio, folio2, from, lru) {
1541 if (!folio_test_hugetlb(folio))
1542 continue;
1543
1544 nr_pages = folio_nr_pages(folio);
1545
1546 cond_resched();
1547
1548 /*
1549 * Migratability of hugepages depends on architectures and
1550 * their size. This check is necessary because some callers
1551 * of hugepage migration like soft offline and memory
1552 * hotremove don't walk through page tables or check whether
1553 * the hugepage is pmd-based or not before kicking migration.
1554 */
1555 if (!hugepage_migration_supported(folio_hstate(folio))) {
1556 nr_failed++;
1557 stats->nr_failed_pages += nr_pages;
1558 list_move_tail(&folio->lru, ret_folios);
1559 continue;
1560 }
1561
1562 rc = unmap_and_move_huge_page(get_new_folio,
1563 put_new_folio, private,
1564 folio, pass > 2, mode,
1565 reason, ret_folios);
1566 /*
1567 * The rules are:
1568 * Success: hugetlb folio will be put back
1569 * -EAGAIN: stay on the from list
1570 * -ENOMEM: stay on the from list
1571 * Other errno: put on ret_folios list
1572 */
1573 switch(rc) {
1574 case -ENOMEM:
1575 /*
1576 * When memory is low, don't bother to try to migrate
1577 * other folios, just exit.
1578 */
1579 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1580 return -ENOMEM;
1581 case -EAGAIN:
1582 retry++;
1583 nr_retry_pages += nr_pages;
1584 break;
1585 case MIGRATEPAGE_SUCCESS:
1586 stats->nr_succeeded += nr_pages;
1587 break;
1588 default:
1589 /*
1590 * Permanent failure (-EBUSY, etc.):
1591 * unlike -EAGAIN case, the failed folio is
1592 * removed from migration folio list and not
1593 * retried in the next outer loop.
1594 */
1595 nr_failed++;
1596 stats->nr_failed_pages += nr_pages;
1597 break;
1598 }
1599 }
1600 }
1601 /*
1602 * nr_failed is number of hugetlb folios failed to be migrated. After
1603 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1604 * folios as failed.
1605 */
1606 nr_failed += retry;
1607 stats->nr_failed_pages += nr_retry_pages;
1608
1609 return nr_failed;
1610}
1611
1612/*
1613 * migrate_pages_batch() first unmaps folios in the from list as many as
1614 * possible, then move the unmapped folios.
1615 *
1616 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1617 * lock or bit when we have locked more than one folio. Which may cause
1618 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1619 * length of the from list must be <= 1.
1620 */
1621static int migrate_pages_batch(struct list_head *from,
1622 new_folio_t get_new_folio, free_folio_t put_new_folio,
1623 unsigned long private, enum migrate_mode mode, int reason,
1624 struct list_head *ret_folios, struct list_head *split_folios,
1625 struct migrate_pages_stats *stats, int nr_pass)
1626{
1627 int retry = 1;
1628 int thp_retry = 1;
1629 int nr_failed = 0;
1630 int nr_retry_pages = 0;
1631 int pass = 0;
1632 bool is_thp = false;
1633 bool is_large = false;
1634 struct folio *folio, *folio2, *dst = NULL, *dst2;
1635 int rc, rc_saved = 0, nr_pages;
1636 LIST_HEAD(unmap_folios);
1637 LIST_HEAD(dst_folios);
1638 bool nosplit = (reason == MR_NUMA_MISPLACED);
1639
1640 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1641 !list_empty(from) && !list_is_singular(from));
1642
1643 for (pass = 0; pass < nr_pass && retry; pass++) {
1644 retry = 0;
1645 thp_retry = 0;
1646 nr_retry_pages = 0;
1647
1648 list_for_each_entry_safe(folio, folio2, from, lru) {
1649 is_large = folio_test_large(folio);
1650 is_thp = is_large && folio_test_pmd_mappable(folio);
1651 nr_pages = folio_nr_pages(folio);
1652
1653 cond_resched();
1654
1655 /*
1656 * Large folio migration might be unsupported or
1657 * the allocation might be failed so we should retry
1658 * on the same folio with the large folio split
1659 * to normal folios.
1660 *
1661 * Split folios are put in split_folios, and
1662 * we will migrate them after the rest of the
1663 * list is processed.
1664 */
1665 if (!thp_migration_supported() && is_thp) {
1666 nr_failed++;
1667 stats->nr_thp_failed++;
1668 if (!try_split_folio(folio, split_folios)) {
1669 stats->nr_thp_split++;
1670 stats->nr_split++;
1671 continue;
1672 }
1673 stats->nr_failed_pages += nr_pages;
1674 list_move_tail(&folio->lru, ret_folios);
1675 continue;
1676 }
1677
1678 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1679 private, folio, &dst, mode, reason,
1680 ret_folios);
1681 /*
1682 * The rules are:
1683 * Success: folio will be freed
1684 * Unmap: folio will be put on unmap_folios list,
1685 * dst folio put on dst_folios list
1686 * -EAGAIN: stay on the from list
1687 * -ENOMEM: stay on the from list
1688 * Other errno: put on ret_folios list
1689 */
1690 switch(rc) {
1691 case -ENOMEM:
1692 /*
1693 * When memory is low, don't bother to try to migrate
1694 * other folios, move unmapped folios, then exit.
1695 */
1696 nr_failed++;
1697 stats->nr_thp_failed += is_thp;
1698 /* Large folio NUMA faulting doesn't split to retry. */
1699 if (is_large && !nosplit) {
1700 int ret = try_split_folio(folio, split_folios);
1701
1702 if (!ret) {
1703 stats->nr_thp_split += is_thp;
1704 stats->nr_split++;
1705 break;
1706 } else if (reason == MR_LONGTERM_PIN &&
1707 ret == -EAGAIN) {
1708 /*
1709 * Try again to split large folio to
1710 * mitigate the failure of longterm pinning.
1711 */
1712 retry++;
1713 thp_retry += is_thp;
1714 nr_retry_pages += nr_pages;
1715 /* Undo duplicated failure counting. */
1716 nr_failed--;
1717 stats->nr_thp_failed -= is_thp;
1718 break;
1719 }
1720 }
1721
1722 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1723 /* nr_failed isn't updated for not used */
1724 stats->nr_thp_failed += thp_retry;
1725 rc_saved = rc;
1726 if (list_empty(&unmap_folios))
1727 goto out;
1728 else
1729 goto move;
1730 case -EAGAIN:
1731 retry++;
1732 thp_retry += is_thp;
1733 nr_retry_pages += nr_pages;
1734 break;
1735 case MIGRATEPAGE_SUCCESS:
1736 stats->nr_succeeded += nr_pages;
1737 stats->nr_thp_succeeded += is_thp;
1738 break;
1739 case MIGRATEPAGE_UNMAP:
1740 list_move_tail(&folio->lru, &unmap_folios);
1741 list_add_tail(&dst->lru, &dst_folios);
1742 break;
1743 default:
1744 /*
1745 * Permanent failure (-EBUSY, etc.):
1746 * unlike -EAGAIN case, the failed folio is
1747 * removed from migration folio list and not
1748 * retried in the next outer loop.
1749 */
1750 nr_failed++;
1751 stats->nr_thp_failed += is_thp;
1752 stats->nr_failed_pages += nr_pages;
1753 break;
1754 }
1755 }
1756 }
1757 nr_failed += retry;
1758 stats->nr_thp_failed += thp_retry;
1759 stats->nr_failed_pages += nr_retry_pages;
1760move:
1761 /* Flush TLBs for all unmapped folios */
1762 try_to_unmap_flush();
1763
1764 retry = 1;
1765 for (pass = 0; pass < nr_pass && retry; pass++) {
1766 retry = 0;
1767 thp_retry = 0;
1768 nr_retry_pages = 0;
1769
1770 dst = list_first_entry(&dst_folios, struct folio, lru);
1771 dst2 = list_next_entry(dst, lru);
1772 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1773 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1774 nr_pages = folio_nr_pages(folio);
1775
1776 cond_resched();
1777
1778 rc = migrate_folio_move(put_new_folio, private,
1779 folio, dst, mode,
1780 reason, ret_folios);
1781 /*
1782 * The rules are:
1783 * Success: folio will be freed
1784 * -EAGAIN: stay on the unmap_folios list
1785 * Other errno: put on ret_folios list
1786 */
1787 switch(rc) {
1788 case -EAGAIN:
1789 retry++;
1790 thp_retry += is_thp;
1791 nr_retry_pages += nr_pages;
1792 break;
1793 case MIGRATEPAGE_SUCCESS:
1794 stats->nr_succeeded += nr_pages;
1795 stats->nr_thp_succeeded += is_thp;
1796 break;
1797 default:
1798 nr_failed++;
1799 stats->nr_thp_failed += is_thp;
1800 stats->nr_failed_pages += nr_pages;
1801 break;
1802 }
1803 dst = dst2;
1804 dst2 = list_next_entry(dst, lru);
1805 }
1806 }
1807 nr_failed += retry;
1808 stats->nr_thp_failed += thp_retry;
1809 stats->nr_failed_pages += nr_retry_pages;
1810
1811 rc = rc_saved ? : nr_failed;
1812out:
1813 /* Cleanup remaining folios */
1814 dst = list_first_entry(&dst_folios, struct folio, lru);
1815 dst2 = list_next_entry(dst, lru);
1816 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1817 int old_page_state = 0;
1818 struct anon_vma *anon_vma = NULL;
1819
1820 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1821 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1822 anon_vma, true, ret_folios);
1823 list_del(&dst->lru);
1824 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1825 dst = dst2;
1826 dst2 = list_next_entry(dst, lru);
1827 }
1828
1829 return rc;
1830}
1831
1832static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1833 free_folio_t put_new_folio, unsigned long private,
1834 enum migrate_mode mode, int reason,
1835 struct list_head *ret_folios, struct list_head *split_folios,
1836 struct migrate_pages_stats *stats)
1837{
1838 int rc, nr_failed = 0;
1839 LIST_HEAD(folios);
1840 struct migrate_pages_stats astats;
1841
1842 memset(&astats, 0, sizeof(astats));
1843 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1844 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1845 reason, &folios, split_folios, &astats,
1846 NR_MAX_MIGRATE_ASYNC_RETRY);
1847 stats->nr_succeeded += astats.nr_succeeded;
1848 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1849 stats->nr_thp_split += astats.nr_thp_split;
1850 stats->nr_split += astats.nr_split;
1851 if (rc < 0) {
1852 stats->nr_failed_pages += astats.nr_failed_pages;
1853 stats->nr_thp_failed += astats.nr_thp_failed;
1854 list_splice_tail(&folios, ret_folios);
1855 return rc;
1856 }
1857 stats->nr_thp_failed += astats.nr_thp_split;
1858 /*
1859 * Do not count rc, as pages will be retried below.
1860 * Count nr_split only, since it includes nr_thp_split.
1861 */
1862 nr_failed += astats.nr_split;
1863 /*
1864 * Fall back to migrate all failed folios one by one synchronously. All
1865 * failed folios except split THPs will be retried, so their failure
1866 * isn't counted
1867 */
1868 list_splice_tail_init(&folios, from);
1869 while (!list_empty(from)) {
1870 list_move(from->next, &folios);
1871 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1872 private, mode, reason, ret_folios,
1873 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1874 list_splice_tail_init(&folios, ret_folios);
1875 if (rc < 0)
1876 return rc;
1877 nr_failed += rc;
1878 }
1879
1880 return nr_failed;
1881}
1882
1883/*
1884 * migrate_pages - migrate the folios specified in a list, to the free folios
1885 * supplied as the target for the page migration
1886 *
1887 * @from: The list of folios to be migrated.
1888 * @get_new_folio: The function used to allocate free folios to be used
1889 * as the target of the folio migration.
1890 * @put_new_folio: The function used to free target folios if migration
1891 * fails, or NULL if no special handling is necessary.
1892 * @private: Private data to be passed on to get_new_folio()
1893 * @mode: The migration mode that specifies the constraints for
1894 * folio migration, if any.
1895 * @reason: The reason for folio migration.
1896 * @ret_succeeded: Set to the number of folios migrated successfully if
1897 * the caller passes a non-NULL pointer.
1898 *
1899 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1900 * are movable any more because the list has become empty or no retryable folios
1901 * exist any more. It is caller's responsibility to call putback_movable_pages()
1902 * only if ret != 0.
1903 *
1904 * Returns the number of {normal folio, large folio, hugetlb} that were not
1905 * migrated, or an error code. The number of large folio splits will be
1906 * considered as the number of non-migrated large folio, no matter how many
1907 * split folios of the large folio are migrated successfully.
1908 */
1909int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1910 free_folio_t put_new_folio, unsigned long private,
1911 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1912{
1913 int rc, rc_gather;
1914 int nr_pages;
1915 struct folio *folio, *folio2;
1916 LIST_HEAD(folios);
1917 LIST_HEAD(ret_folios);
1918 LIST_HEAD(split_folios);
1919 struct migrate_pages_stats stats;
1920
1921 trace_mm_migrate_pages_start(mode, reason);
1922
1923 memset(&stats, 0, sizeof(stats));
1924
1925 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1926 mode, reason, &stats, &ret_folios);
1927 if (rc_gather < 0)
1928 goto out;
1929
1930again:
1931 nr_pages = 0;
1932 list_for_each_entry_safe(folio, folio2, from, lru) {
1933 /* Retried hugetlb folios will be kept in list */
1934 if (folio_test_hugetlb(folio)) {
1935 list_move_tail(&folio->lru, &ret_folios);
1936 continue;
1937 }
1938
1939 nr_pages += folio_nr_pages(folio);
1940 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1941 break;
1942 }
1943 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1944 list_cut_before(&folios, from, &folio2->lru);
1945 else
1946 list_splice_init(from, &folios);
1947 if (mode == MIGRATE_ASYNC)
1948 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1949 private, mode, reason, &ret_folios,
1950 &split_folios, &stats,
1951 NR_MAX_MIGRATE_PAGES_RETRY);
1952 else
1953 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1954 private, mode, reason, &ret_folios,
1955 &split_folios, &stats);
1956 list_splice_tail_init(&folios, &ret_folios);
1957 if (rc < 0) {
1958 rc_gather = rc;
1959 list_splice_tail(&split_folios, &ret_folios);
1960 goto out;
1961 }
1962 if (!list_empty(&split_folios)) {
1963 /*
1964 * Failure isn't counted since all split folios of a large folio
1965 * is counted as 1 failure already. And, we only try to migrate
1966 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1967 */
1968 migrate_pages_batch(&split_folios, get_new_folio,
1969 put_new_folio, private, MIGRATE_ASYNC, reason,
1970 &ret_folios, NULL, &stats, 1);
1971 list_splice_tail_init(&split_folios, &ret_folios);
1972 }
1973 rc_gather += rc;
1974 if (!list_empty(from))
1975 goto again;
1976out:
1977 /*
1978 * Put the permanent failure folio back to migration list, they
1979 * will be put back to the right list by the caller.
1980 */
1981 list_splice(&ret_folios, from);
1982
1983 /*
1984 * Return 0 in case all split folios of fail-to-migrate large folios
1985 * are migrated successfully.
1986 */
1987 if (list_empty(from))
1988 rc_gather = 0;
1989
1990 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1991 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1992 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1993 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1994 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1995 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1996 stats.nr_thp_succeeded, stats.nr_thp_failed,
1997 stats.nr_thp_split, stats.nr_split, mode,
1998 reason);
1999
2000 if (ret_succeeded)
2001 *ret_succeeded = stats.nr_succeeded;
2002
2003 return rc_gather;
2004}
2005
2006struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2007{
2008 struct migration_target_control *mtc;
2009 gfp_t gfp_mask;
2010 unsigned int order = 0;
2011 int nid;
2012 int zidx;
2013
2014 mtc = (struct migration_target_control *)private;
2015 gfp_mask = mtc->gfp_mask;
2016 nid = mtc->nid;
2017 if (nid == NUMA_NO_NODE)
2018 nid = folio_nid(src);
2019
2020 if (folio_test_hugetlb(src)) {
2021 struct hstate *h = folio_hstate(src);
2022
2023 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2024 return alloc_hugetlb_folio_nodemask(h, nid,
2025 mtc->nmask, gfp_mask);
2026 }
2027
2028 if (folio_test_large(src)) {
2029 /*
2030 * clear __GFP_RECLAIM to make the migration callback
2031 * consistent with regular THP allocations.
2032 */
2033 gfp_mask &= ~__GFP_RECLAIM;
2034 gfp_mask |= GFP_TRANSHUGE;
2035 order = folio_order(src);
2036 }
2037 zidx = zone_idx(folio_zone(src));
2038 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2039 gfp_mask |= __GFP_HIGHMEM;
2040
2041 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2042}
2043
2044#ifdef CONFIG_NUMA
2045
2046static int store_status(int __user *status, int start, int value, int nr)
2047{
2048 while (nr-- > 0) {
2049 if (put_user(value, status + start))
2050 return -EFAULT;
2051 start++;
2052 }
2053
2054 return 0;
2055}
2056
2057static int do_move_pages_to_node(struct list_head *pagelist, int node)
2058{
2059 int err;
2060 struct migration_target_control mtc = {
2061 .nid = node,
2062 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2063 };
2064
2065 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2066 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2067 if (err)
2068 putback_movable_pages(pagelist);
2069 return err;
2070}
2071
2072/*
2073 * Resolves the given address to a struct page, isolates it from the LRU and
2074 * puts it to the given pagelist.
2075 * Returns:
2076 * errno - if the page cannot be found/isolated
2077 * 0 - when it doesn't have to be migrated because it is already on the
2078 * target node
2079 * 1 - when it has been queued
2080 */
2081static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2082 int node, struct list_head *pagelist, bool migrate_all)
2083{
2084 struct vm_area_struct *vma;
2085 unsigned long addr;
2086 struct page *page;
2087 struct folio *folio;
2088 int err;
2089
2090 mmap_read_lock(mm);
2091 addr = (unsigned long)untagged_addr_remote(mm, p);
2092
2093 err = -EFAULT;
2094 vma = vma_lookup(mm, addr);
2095 if (!vma || !vma_migratable(vma))
2096 goto out;
2097
2098 /* FOLL_DUMP to ignore special (like zero) pages */
2099 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2100
2101 err = PTR_ERR(page);
2102 if (IS_ERR(page))
2103 goto out;
2104
2105 err = -ENOENT;
2106 if (!page)
2107 goto out;
2108
2109 folio = page_folio(page);
2110 if (folio_is_zone_device(folio))
2111 goto out_putfolio;
2112
2113 err = 0;
2114 if (folio_nid(folio) == node)
2115 goto out_putfolio;
2116
2117 err = -EACCES;
2118 if (page_mapcount(page) > 1 && !migrate_all)
2119 goto out_putfolio;
2120
2121 err = -EBUSY;
2122 if (folio_test_hugetlb(folio)) {
2123 if (isolate_hugetlb(folio, pagelist))
2124 err = 1;
2125 } else {
2126 if (!folio_isolate_lru(folio))
2127 goto out_putfolio;
2128
2129 err = 1;
2130 list_add_tail(&folio->lru, pagelist);
2131 node_stat_mod_folio(folio,
2132 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2133 folio_nr_pages(folio));
2134 }
2135out_putfolio:
2136 /*
2137 * Either remove the duplicate refcount from folio_isolate_lru()
2138 * or drop the folio ref if it was not isolated.
2139 */
2140 folio_put(folio);
2141out:
2142 mmap_read_unlock(mm);
2143 return err;
2144}
2145
2146static int move_pages_and_store_status(int node,
2147 struct list_head *pagelist, int __user *status,
2148 int start, int i, unsigned long nr_pages)
2149{
2150 int err;
2151
2152 if (list_empty(pagelist))
2153 return 0;
2154
2155 err = do_move_pages_to_node(pagelist, node);
2156 if (err) {
2157 /*
2158 * Positive err means the number of failed
2159 * pages to migrate. Since we are going to
2160 * abort and return the number of non-migrated
2161 * pages, so need to include the rest of the
2162 * nr_pages that have not been attempted as
2163 * well.
2164 */
2165 if (err > 0)
2166 err += nr_pages - i;
2167 return err;
2168 }
2169 return store_status(status, start, node, i - start);
2170}
2171
2172/*
2173 * Migrate an array of page address onto an array of nodes and fill
2174 * the corresponding array of status.
2175 */
2176static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2177 unsigned long nr_pages,
2178 const void __user * __user *pages,
2179 const int __user *nodes,
2180 int __user *status, int flags)
2181{
2182 compat_uptr_t __user *compat_pages = (void __user *)pages;
2183 int current_node = NUMA_NO_NODE;
2184 LIST_HEAD(pagelist);
2185 int start, i;
2186 int err = 0, err1;
2187
2188 lru_cache_disable();
2189
2190 for (i = start = 0; i < nr_pages; i++) {
2191 const void __user *p;
2192 int node;
2193
2194 err = -EFAULT;
2195 if (in_compat_syscall()) {
2196 compat_uptr_t cp;
2197
2198 if (get_user(cp, compat_pages + i))
2199 goto out_flush;
2200
2201 p = compat_ptr(cp);
2202 } else {
2203 if (get_user(p, pages + i))
2204 goto out_flush;
2205 }
2206 if (get_user(node, nodes + i))
2207 goto out_flush;
2208
2209 err = -ENODEV;
2210 if (node < 0 || node >= MAX_NUMNODES)
2211 goto out_flush;
2212 if (!node_state(node, N_MEMORY))
2213 goto out_flush;
2214
2215 err = -EACCES;
2216 if (!node_isset(node, task_nodes))
2217 goto out_flush;
2218
2219 if (current_node == NUMA_NO_NODE) {
2220 current_node = node;
2221 start = i;
2222 } else if (node != current_node) {
2223 err = move_pages_and_store_status(current_node,
2224 &pagelist, status, start, i, nr_pages);
2225 if (err)
2226 goto out;
2227 start = i;
2228 current_node = node;
2229 }
2230
2231 /*
2232 * Errors in the page lookup or isolation are not fatal and we simply
2233 * report them via status
2234 */
2235 err = add_page_for_migration(mm, p, current_node, &pagelist,
2236 flags & MPOL_MF_MOVE_ALL);
2237
2238 if (err > 0) {
2239 /* The page is successfully queued for migration */
2240 continue;
2241 }
2242
2243 /*
2244 * The move_pages() man page does not have an -EEXIST choice, so
2245 * use -EFAULT instead.
2246 */
2247 if (err == -EEXIST)
2248 err = -EFAULT;
2249
2250 /*
2251 * If the page is already on the target node (!err), store the
2252 * node, otherwise, store the err.
2253 */
2254 err = store_status(status, i, err ? : current_node, 1);
2255 if (err)
2256 goto out_flush;
2257
2258 err = move_pages_and_store_status(current_node, &pagelist,
2259 status, start, i, nr_pages);
2260 if (err) {
2261 /* We have accounted for page i */
2262 if (err > 0)
2263 err--;
2264 goto out;
2265 }
2266 current_node = NUMA_NO_NODE;
2267 }
2268out_flush:
2269 /* Make sure we do not overwrite the existing error */
2270 err1 = move_pages_and_store_status(current_node, &pagelist,
2271 status, start, i, nr_pages);
2272 if (err >= 0)
2273 err = err1;
2274out:
2275 lru_cache_enable();
2276 return err;
2277}
2278
2279/*
2280 * Determine the nodes of an array of pages and store it in an array of status.
2281 */
2282static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2283 const void __user **pages, int *status)
2284{
2285 unsigned long i;
2286
2287 mmap_read_lock(mm);
2288
2289 for (i = 0; i < nr_pages; i++) {
2290 unsigned long addr = (unsigned long)(*pages);
2291 struct vm_area_struct *vma;
2292 struct page *page;
2293 int err = -EFAULT;
2294
2295 vma = vma_lookup(mm, addr);
2296 if (!vma)
2297 goto set_status;
2298
2299 /* FOLL_DUMP to ignore special (like zero) pages */
2300 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2301
2302 err = PTR_ERR(page);
2303 if (IS_ERR(page))
2304 goto set_status;
2305
2306 err = -ENOENT;
2307 if (!page)
2308 goto set_status;
2309
2310 if (!is_zone_device_page(page))
2311 err = page_to_nid(page);
2312
2313 put_page(page);
2314set_status:
2315 *status = err;
2316
2317 pages++;
2318 status++;
2319 }
2320
2321 mmap_read_unlock(mm);
2322}
2323
2324static int get_compat_pages_array(const void __user *chunk_pages[],
2325 const void __user * __user *pages,
2326 unsigned long chunk_nr)
2327{
2328 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2329 compat_uptr_t p;
2330 int i;
2331
2332 for (i = 0; i < chunk_nr; i++) {
2333 if (get_user(p, pages32 + i))
2334 return -EFAULT;
2335 chunk_pages[i] = compat_ptr(p);
2336 }
2337
2338 return 0;
2339}
2340
2341/*
2342 * Determine the nodes of a user array of pages and store it in
2343 * a user array of status.
2344 */
2345static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2346 const void __user * __user *pages,
2347 int __user *status)
2348{
2349#define DO_PAGES_STAT_CHUNK_NR 16UL
2350 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2351 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2352
2353 while (nr_pages) {
2354 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2355
2356 if (in_compat_syscall()) {
2357 if (get_compat_pages_array(chunk_pages, pages,
2358 chunk_nr))
2359 break;
2360 } else {
2361 if (copy_from_user(chunk_pages, pages,
2362 chunk_nr * sizeof(*chunk_pages)))
2363 break;
2364 }
2365
2366 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2367
2368 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2369 break;
2370
2371 pages += chunk_nr;
2372 status += chunk_nr;
2373 nr_pages -= chunk_nr;
2374 }
2375 return nr_pages ? -EFAULT : 0;
2376}
2377
2378static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2379{
2380 struct task_struct *task;
2381 struct mm_struct *mm;
2382
2383 /*
2384 * There is no need to check if current process has the right to modify
2385 * the specified process when they are same.
2386 */
2387 if (!pid) {
2388 mmget(current->mm);
2389 *mem_nodes = cpuset_mems_allowed(current);
2390 return current->mm;
2391 }
2392
2393 /* Find the mm_struct */
2394 rcu_read_lock();
2395 task = find_task_by_vpid(pid);
2396 if (!task) {
2397 rcu_read_unlock();
2398 return ERR_PTR(-ESRCH);
2399 }
2400 get_task_struct(task);
2401
2402 /*
2403 * Check if this process has the right to modify the specified
2404 * process. Use the regular "ptrace_may_access()" checks.
2405 */
2406 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2407 rcu_read_unlock();
2408 mm = ERR_PTR(-EPERM);
2409 goto out;
2410 }
2411 rcu_read_unlock();
2412
2413 mm = ERR_PTR(security_task_movememory(task));
2414 if (IS_ERR(mm))
2415 goto out;
2416 *mem_nodes = cpuset_mems_allowed(task);
2417 mm = get_task_mm(task);
2418out:
2419 put_task_struct(task);
2420 if (!mm)
2421 mm = ERR_PTR(-EINVAL);
2422 return mm;
2423}
2424
2425/*
2426 * Move a list of pages in the address space of the currently executing
2427 * process.
2428 */
2429static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2430 const void __user * __user *pages,
2431 const int __user *nodes,
2432 int __user *status, int flags)
2433{
2434 struct mm_struct *mm;
2435 int err;
2436 nodemask_t task_nodes;
2437
2438 /* Check flags */
2439 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2440 return -EINVAL;
2441
2442 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2443 return -EPERM;
2444
2445 mm = find_mm_struct(pid, &task_nodes);
2446 if (IS_ERR(mm))
2447 return PTR_ERR(mm);
2448
2449 if (nodes)
2450 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2451 nodes, status, flags);
2452 else
2453 err = do_pages_stat(mm, nr_pages, pages, status);
2454
2455 mmput(mm);
2456 return err;
2457}
2458
2459SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2460 const void __user * __user *, pages,
2461 const int __user *, nodes,
2462 int __user *, status, int, flags)
2463{
2464 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2465}
2466
2467#ifdef CONFIG_NUMA_BALANCING
2468/*
2469 * Returns true if this is a safe migration target node for misplaced NUMA
2470 * pages. Currently it only checks the watermarks which is crude.
2471 */
2472static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2473 unsigned long nr_migrate_pages)
2474{
2475 int z;
2476
2477 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2478 struct zone *zone = pgdat->node_zones + z;
2479
2480 if (!managed_zone(zone))
2481 continue;
2482
2483 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2484 if (!zone_watermark_ok(zone, 0,
2485 high_wmark_pages(zone) +
2486 nr_migrate_pages,
2487 ZONE_MOVABLE, 0))
2488 continue;
2489 return true;
2490 }
2491 return false;
2492}
2493
2494static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2495 unsigned long data)
2496{
2497 int nid = (int) data;
2498 int order = folio_order(src);
2499 gfp_t gfp = __GFP_THISNODE;
2500
2501 if (order > 0)
2502 gfp |= GFP_TRANSHUGE_LIGHT;
2503 else {
2504 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2505 __GFP_NOWARN;
2506 gfp &= ~__GFP_RECLAIM;
2507 }
2508 return __folio_alloc_node(gfp, order, nid);
2509}
2510
2511static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2512{
2513 int nr_pages = folio_nr_pages(folio);
2514
2515 /* Avoid migrating to a node that is nearly full */
2516 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2517 int z;
2518
2519 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2520 return 0;
2521 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2522 if (managed_zone(pgdat->node_zones + z))
2523 break;
2524 }
2525
2526 /*
2527 * If there are no managed zones, it should not proceed
2528 * further.
2529 */
2530 if (z < 0)
2531 return 0;
2532
2533 wakeup_kswapd(pgdat->node_zones + z, 0,
2534 folio_order(folio), ZONE_MOVABLE);
2535 return 0;
2536 }
2537
2538 if (!folio_isolate_lru(folio))
2539 return 0;
2540
2541 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2542 nr_pages);
2543
2544 /*
2545 * Isolating the folio has taken another reference, so the
2546 * caller's reference can be safely dropped without the folio
2547 * disappearing underneath us during migration.
2548 */
2549 folio_put(folio);
2550 return 1;
2551}
2552
2553/*
2554 * Attempt to migrate a misplaced folio to the specified destination
2555 * node. Caller is expected to have an elevated reference count on
2556 * the folio that will be dropped by this function before returning.
2557 */
2558int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2559 int node)
2560{
2561 pg_data_t *pgdat = NODE_DATA(node);
2562 int isolated;
2563 int nr_remaining;
2564 unsigned int nr_succeeded;
2565 LIST_HEAD(migratepages);
2566 int nr_pages = folio_nr_pages(folio);
2567
2568 /*
2569 * Don't migrate file folios that are mapped in multiple processes
2570 * with execute permissions as they are probably shared libraries.
2571 * To check if the folio is shared, ideally we want to make sure
2572 * every page is mapped to the same process. Doing that is very
2573 * expensive, so check the estimated mapcount of the folio instead.
2574 */
2575 if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
2576 (vma->vm_flags & VM_EXEC))
2577 goto out;
2578
2579 /*
2580 * Also do not migrate dirty folios as not all filesystems can move
2581 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2582 */
2583 if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2584 goto out;
2585
2586 isolated = numamigrate_isolate_folio(pgdat, folio);
2587 if (!isolated)
2588 goto out;
2589
2590 list_add(&folio->lru, &migratepages);
2591 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2592 NULL, node, MIGRATE_ASYNC,
2593 MR_NUMA_MISPLACED, &nr_succeeded);
2594 if (nr_remaining) {
2595 if (!list_empty(&migratepages)) {
2596 list_del(&folio->lru);
2597 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2598 folio_is_file_lru(folio), -nr_pages);
2599 folio_putback_lru(folio);
2600 }
2601 isolated = 0;
2602 }
2603 if (nr_succeeded) {
2604 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2605 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2606 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2607 nr_succeeded);
2608 }
2609 BUG_ON(!list_empty(&migratepages));
2610 return isolated;
2611
2612out:
2613 folio_put(folio);
2614 return 0;
2615}
2616#endif /* CONFIG_NUMA_BALANCING */
2617#endif /* CONFIG_NUMA */