Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/nsproxy.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/backing-dev.h>
34#include <linux/compaction.h>
35#include <linux/syscalls.h>
36#include <linux/compat.h>
37#include <linux/hugetlb.h>
38#include <linux/hugetlb_cgroup.h>
39#include <linux/gfp.h>
40#include <linux/pfn_t.h>
41#include <linux/memremap.h>
42#include <linux/userfaultfd_k.h>
43#include <linux/balloon_compaction.h>
44#include <linux/page_idle.h>
45#include <linux/page_owner.h>
46#include <linux/sched/mm.h>
47#include <linux/ptrace.h>
48#include <linux/oom.h>
49#include <linux/memory.h>
50#include <linux/random.h>
51#include <linux/sched/sysctl.h>
52#include <linux/memory-tiers.h>
53
54#include <asm/tlbflush.h>
55
56#include <trace/events/migrate.h>
57
58#include "internal.h"
59
60bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61{
62 struct folio *folio = folio_get_nontail_page(page);
63 const struct movable_operations *mops;
64
65 /*
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
68 *
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
73 */
74 if (!folio)
75 goto out;
76
77 if (unlikely(folio_test_slab(folio)))
78 goto out_putfolio;
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 smp_rmb();
81 /*
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
85 */
86 if (unlikely(!__folio_test_movable(folio)))
87 goto out_putfolio;
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 smp_rmb();
90 if (unlikely(folio_test_slab(folio)))
91 goto out_putfolio;
92
93 /*
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
97 *
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
103 */
104 if (unlikely(!folio_trylock(folio)))
105 goto out_putfolio;
106
107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
108 goto out_no_isolated;
109
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
112
113 if (!mops->isolate_page(&folio->page, mode))
114 goto out_no_isolated;
115
116 /* Driver shouldn't use PG_isolated bit of page->flags */
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
119 folio_unlock(folio);
120
121 return true;
122
123out_no_isolated:
124 folio_unlock(folio);
125out_putfolio:
126 folio_put(folio);
127out:
128 return false;
129}
130
131static void putback_movable_folio(struct folio *folio)
132{
133 const struct movable_operations *mops = folio_movable_ops(folio);
134
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
137}
138
139/*
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
142 *
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145 * and isolate_hugetlb().
146 */
147void putback_movable_pages(struct list_head *l)
148{
149 struct folio *folio;
150 struct folio *folio2;
151
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
155 continue;
156 }
157 list_del(&folio->lru);
158 /*
159 * We isolated non-lru movable folio so here we can use
160 * __folio_test_movable because LRU folio's mapping cannot
161 * have PAGE_MAPPING_MOVABLE.
162 */
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 folio_lock(folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
168 else
169 folio_clear_isolated(folio);
170 folio_unlock(folio);
171 folio_put(folio);
172 } else {
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
176 }
177 }
178}
179
180/*
181 * Restore a potential migration pte to a working pte entry
182 */
183static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
185{
186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187
188 while (page_vma_mapped_walk(&pvmw)) {
189 rmap_t rmap_flags = RMAP_NONE;
190 pte_t old_pte;
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
200
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
211 folio_get(folio);
212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 old_pte = ptep_get(pvmw.pte);
214 if (pte_swp_soft_dirty(old_pte))
215 pte = pte_mksoft_dirty(pte);
216
217 entry = pte_to_swp_entry(old_pte);
218 if (!is_migration_entry_young(entry))
219 pte = pte_mkold(pte);
220 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
221 pte = pte_mkdirty(pte);
222 if (is_writable_migration_entry(entry))
223 pte = pte_mkwrite(pte, vma);
224 else if (pte_swp_uffd_wp(old_pte))
225 pte = pte_mkuffd_wp(pte);
226
227 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
228 rmap_flags |= RMAP_EXCLUSIVE;
229
230 if (unlikely(is_device_private_page(new))) {
231 if (pte_write(pte))
232 entry = make_writable_device_private_entry(
233 page_to_pfn(new));
234 else
235 entry = make_readable_device_private_entry(
236 page_to_pfn(new));
237 pte = swp_entry_to_pte(entry);
238 if (pte_swp_soft_dirty(old_pte))
239 pte = pte_swp_mksoft_dirty(pte);
240 if (pte_swp_uffd_wp(old_pte))
241 pte = pte_swp_mkuffd_wp(pte);
242 }
243
244#ifdef CONFIG_HUGETLB_PAGE
245 if (folio_test_hugetlb(folio)) {
246 struct hstate *h = hstate_vma(vma);
247 unsigned int shift = huge_page_shift(h);
248 unsigned long psize = huge_page_size(h);
249
250 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
251 if (folio_test_anon(folio))
252 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
253 rmap_flags);
254 else
255 hugetlb_add_file_rmap(folio);
256 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
257 psize);
258 } else
259#endif
260 {
261 if (folio_test_anon(folio))
262 folio_add_anon_rmap_pte(folio, new, vma,
263 pvmw.address, rmap_flags);
264 else
265 folio_add_file_rmap_pte(folio, new, vma);
266 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
267 }
268 if (vma->vm_flags & VM_LOCKED)
269 mlock_drain_local();
270
271 trace_remove_migration_pte(pvmw.address, pte_val(pte),
272 compound_order(new));
273
274 /* No need to invalidate - it was non-present before */
275 update_mmu_cache(vma, pvmw.address, pvmw.pte);
276 }
277
278 return true;
279}
280
281/*
282 * Get rid of all migration entries and replace them by
283 * references to the indicated page.
284 */
285void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
286{
287 struct rmap_walk_control rwc = {
288 .rmap_one = remove_migration_pte,
289 .arg = src,
290 };
291
292 if (locked)
293 rmap_walk_locked(dst, &rwc);
294 else
295 rmap_walk(dst, &rwc);
296}
297
298/*
299 * Something used the pte of a page under migration. We need to
300 * get to the page and wait until migration is finished.
301 * When we return from this function the fault will be retried.
302 */
303void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
304 unsigned long address)
305{
306 spinlock_t *ptl;
307 pte_t *ptep;
308 pte_t pte;
309 swp_entry_t entry;
310
311 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
312 if (!ptep)
313 return;
314
315 pte = ptep_get(ptep);
316 pte_unmap(ptep);
317
318 if (!is_swap_pte(pte))
319 goto out;
320
321 entry = pte_to_swp_entry(pte);
322 if (!is_migration_entry(entry))
323 goto out;
324
325 migration_entry_wait_on_locked(entry, ptl);
326 return;
327out:
328 spin_unlock(ptl);
329}
330
331#ifdef CONFIG_HUGETLB_PAGE
332/*
333 * The vma read lock must be held upon entry. Holding that lock prevents either
334 * the pte or the ptl from being freed.
335 *
336 * This function will release the vma lock before returning.
337 */
338void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
339{
340 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
341 pte_t pte;
342
343 hugetlb_vma_assert_locked(vma);
344 spin_lock(ptl);
345 pte = huge_ptep_get(ptep);
346
347 if (unlikely(!is_hugetlb_entry_migration(pte))) {
348 spin_unlock(ptl);
349 hugetlb_vma_unlock_read(vma);
350 } else {
351 /*
352 * If migration entry existed, safe to release vma lock
353 * here because the pgtable page won't be freed without the
354 * pgtable lock released. See comment right above pgtable
355 * lock release in migration_entry_wait_on_locked().
356 */
357 hugetlb_vma_unlock_read(vma);
358 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
359 }
360}
361#endif
362
363#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
364void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
365{
366 spinlock_t *ptl;
367
368 ptl = pmd_lock(mm, pmd);
369 if (!is_pmd_migration_entry(*pmd))
370 goto unlock;
371 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
372 return;
373unlock:
374 spin_unlock(ptl);
375}
376#endif
377
378static int folio_expected_refs(struct address_space *mapping,
379 struct folio *folio)
380{
381 int refs = 1;
382 if (!mapping)
383 return refs;
384
385 refs += folio_nr_pages(folio);
386 if (folio_test_private(folio))
387 refs++;
388
389 return refs;
390}
391
392/*
393 * Replace the page in the mapping.
394 *
395 * The number of remaining references must be:
396 * 1 for anonymous pages without a mapping
397 * 2 for pages with a mapping
398 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
399 */
400int folio_migrate_mapping(struct address_space *mapping,
401 struct folio *newfolio, struct folio *folio, int extra_count)
402{
403 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
404 struct zone *oldzone, *newzone;
405 int dirty;
406 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
407 long nr = folio_nr_pages(folio);
408 long entries, i;
409
410 if (!mapping) {
411 /* Anonymous page without mapping */
412 if (folio_ref_count(folio) != expected_count)
413 return -EAGAIN;
414
415 /* No turning back from here */
416 newfolio->index = folio->index;
417 newfolio->mapping = folio->mapping;
418 if (folio_test_swapbacked(folio))
419 __folio_set_swapbacked(newfolio);
420
421 return MIGRATEPAGE_SUCCESS;
422 }
423
424 oldzone = folio_zone(folio);
425 newzone = folio_zone(newfolio);
426
427 xas_lock_irq(&xas);
428 if (!folio_ref_freeze(folio, expected_count)) {
429 xas_unlock_irq(&xas);
430 return -EAGAIN;
431 }
432
433 /*
434 * Now we know that no one else is looking at the folio:
435 * no turning back from here.
436 */
437 newfolio->index = folio->index;
438 newfolio->mapping = folio->mapping;
439 folio_ref_add(newfolio, nr); /* add cache reference */
440 if (folio_test_swapbacked(folio)) {
441 __folio_set_swapbacked(newfolio);
442 if (folio_test_swapcache(folio)) {
443 folio_set_swapcache(newfolio);
444 newfolio->private = folio_get_private(folio);
445 }
446 entries = nr;
447 } else {
448 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
449 entries = 1;
450 }
451
452 /* Move dirty while page refs frozen and newpage not yet exposed */
453 dirty = folio_test_dirty(folio);
454 if (dirty) {
455 folio_clear_dirty(folio);
456 folio_set_dirty(newfolio);
457 }
458
459 /* Swap cache still stores N entries instead of a high-order entry */
460 for (i = 0; i < entries; i++) {
461 xas_store(&xas, newfolio);
462 xas_next(&xas);
463 }
464
465 /*
466 * Drop cache reference from old page by unfreezing
467 * to one less reference.
468 * We know this isn't the last reference.
469 */
470 folio_ref_unfreeze(folio, expected_count - nr);
471
472 xas_unlock(&xas);
473 /* Leave irq disabled to prevent preemption while updating stats */
474
475 /*
476 * If moved to a different zone then also account
477 * the page for that zone. Other VM counters will be
478 * taken care of when we establish references to the
479 * new page and drop references to the old page.
480 *
481 * Note that anonymous pages are accounted for
482 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
483 * are mapped to swap space.
484 */
485 if (newzone != oldzone) {
486 struct lruvec *old_lruvec, *new_lruvec;
487 struct mem_cgroup *memcg;
488
489 memcg = folio_memcg(folio);
490 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
491 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
492
493 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
494 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
495 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
496 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
497 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
498
499 if (folio_test_pmd_mappable(folio)) {
500 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
501 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
502 }
503 }
504#ifdef CONFIG_SWAP
505 if (folio_test_swapcache(folio)) {
506 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
507 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
508 }
509#endif
510 if (dirty && mapping_can_writeback(mapping)) {
511 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
512 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
513 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
514 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
515 }
516 }
517 local_irq_enable();
518
519 return MIGRATEPAGE_SUCCESS;
520}
521EXPORT_SYMBOL(folio_migrate_mapping);
522
523/*
524 * The expected number of remaining references is the same as that
525 * of folio_migrate_mapping().
526 */
527int migrate_huge_page_move_mapping(struct address_space *mapping,
528 struct folio *dst, struct folio *src)
529{
530 XA_STATE(xas, &mapping->i_pages, folio_index(src));
531 int expected_count;
532
533 xas_lock_irq(&xas);
534 expected_count = folio_expected_refs(mapping, src);
535 if (!folio_ref_freeze(src, expected_count)) {
536 xas_unlock_irq(&xas);
537 return -EAGAIN;
538 }
539
540 dst->index = src->index;
541 dst->mapping = src->mapping;
542
543 folio_ref_add(dst, folio_nr_pages(dst));
544
545 xas_store(&xas, dst);
546
547 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
548
549 xas_unlock_irq(&xas);
550
551 return MIGRATEPAGE_SUCCESS;
552}
553
554/*
555 * Copy the flags and some other ancillary information
556 */
557void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
558{
559 int cpupid;
560
561 if (folio_test_error(folio))
562 folio_set_error(newfolio);
563 if (folio_test_referenced(folio))
564 folio_set_referenced(newfolio);
565 if (folio_test_uptodate(folio))
566 folio_mark_uptodate(newfolio);
567 if (folio_test_clear_active(folio)) {
568 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
569 folio_set_active(newfolio);
570 } else if (folio_test_clear_unevictable(folio))
571 folio_set_unevictable(newfolio);
572 if (folio_test_workingset(folio))
573 folio_set_workingset(newfolio);
574 if (folio_test_checked(folio))
575 folio_set_checked(newfolio);
576 /*
577 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
578 * migration entries. We can still have PG_anon_exclusive set on an
579 * effectively unmapped and unreferenced first sub-pages of an
580 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
581 */
582 if (folio_test_mappedtodisk(folio))
583 folio_set_mappedtodisk(newfolio);
584
585 /* Move dirty on pages not done by folio_migrate_mapping() */
586 if (folio_test_dirty(folio))
587 folio_set_dirty(newfolio);
588
589 if (folio_test_young(folio))
590 folio_set_young(newfolio);
591 if (folio_test_idle(folio))
592 folio_set_idle(newfolio);
593
594 /*
595 * Copy NUMA information to the new page, to prevent over-eager
596 * future migrations of this same page.
597 */
598 cpupid = folio_xchg_last_cpupid(folio, -1);
599 /*
600 * For memory tiering mode, when migrate between slow and fast
601 * memory node, reset cpupid, because that is used to record
602 * page access time in slow memory node.
603 */
604 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
605 bool f_toptier = node_is_toptier(folio_nid(folio));
606 bool t_toptier = node_is_toptier(folio_nid(newfolio));
607
608 if (f_toptier != t_toptier)
609 cpupid = -1;
610 }
611 folio_xchg_last_cpupid(newfolio, cpupid);
612
613 folio_migrate_ksm(newfolio, folio);
614 /*
615 * Please do not reorder this without considering how mm/ksm.c's
616 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
617 */
618 if (folio_test_swapcache(folio))
619 folio_clear_swapcache(folio);
620 folio_clear_private(folio);
621
622 /* page->private contains hugetlb specific flags */
623 if (!folio_test_hugetlb(folio))
624 folio->private = NULL;
625
626 /*
627 * If any waiters have accumulated on the new page then
628 * wake them up.
629 */
630 if (folio_test_writeback(newfolio))
631 folio_end_writeback(newfolio);
632
633 /*
634 * PG_readahead shares the same bit with PG_reclaim. The above
635 * end_page_writeback() may clear PG_readahead mistakenly, so set the
636 * bit after that.
637 */
638 if (folio_test_readahead(folio))
639 folio_set_readahead(newfolio);
640
641 folio_copy_owner(newfolio, folio);
642
643 mem_cgroup_migrate(folio, newfolio);
644}
645EXPORT_SYMBOL(folio_migrate_flags);
646
647void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
648{
649 folio_copy(newfolio, folio);
650 folio_migrate_flags(newfolio, folio);
651}
652EXPORT_SYMBOL(folio_migrate_copy);
653
654/************************************************************
655 * Migration functions
656 ***********************************************************/
657
658int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
659 struct folio *src, enum migrate_mode mode, int extra_count)
660{
661 int rc;
662
663 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
664
665 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
666
667 if (rc != MIGRATEPAGE_SUCCESS)
668 return rc;
669
670 if (mode != MIGRATE_SYNC_NO_COPY)
671 folio_migrate_copy(dst, src);
672 else
673 folio_migrate_flags(dst, src);
674 return MIGRATEPAGE_SUCCESS;
675}
676
677/**
678 * migrate_folio() - Simple folio migration.
679 * @mapping: The address_space containing the folio.
680 * @dst: The folio to migrate the data to.
681 * @src: The folio containing the current data.
682 * @mode: How to migrate the page.
683 *
684 * Common logic to directly migrate a single LRU folio suitable for
685 * folios that do not use PagePrivate/PagePrivate2.
686 *
687 * Folios are locked upon entry and exit.
688 */
689int migrate_folio(struct address_space *mapping, struct folio *dst,
690 struct folio *src, enum migrate_mode mode)
691{
692 return migrate_folio_extra(mapping, dst, src, mode, 0);
693}
694EXPORT_SYMBOL(migrate_folio);
695
696#ifdef CONFIG_BUFFER_HEAD
697/* Returns true if all buffers are successfully locked */
698static bool buffer_migrate_lock_buffers(struct buffer_head *head,
699 enum migrate_mode mode)
700{
701 struct buffer_head *bh = head;
702 struct buffer_head *failed_bh;
703
704 do {
705 if (!trylock_buffer(bh)) {
706 if (mode == MIGRATE_ASYNC)
707 goto unlock;
708 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
709 goto unlock;
710 lock_buffer(bh);
711 }
712
713 bh = bh->b_this_page;
714 } while (bh != head);
715
716 return true;
717
718unlock:
719 /* We failed to lock the buffer and cannot stall. */
720 failed_bh = bh;
721 bh = head;
722 while (bh != failed_bh) {
723 unlock_buffer(bh);
724 bh = bh->b_this_page;
725 }
726
727 return false;
728}
729
730static int __buffer_migrate_folio(struct address_space *mapping,
731 struct folio *dst, struct folio *src, enum migrate_mode mode,
732 bool check_refs)
733{
734 struct buffer_head *bh, *head;
735 int rc;
736 int expected_count;
737
738 head = folio_buffers(src);
739 if (!head)
740 return migrate_folio(mapping, dst, src, mode);
741
742 /* Check whether page does not have extra refs before we do more work */
743 expected_count = folio_expected_refs(mapping, src);
744 if (folio_ref_count(src) != expected_count)
745 return -EAGAIN;
746
747 if (!buffer_migrate_lock_buffers(head, mode))
748 return -EAGAIN;
749
750 if (check_refs) {
751 bool busy;
752 bool invalidated = false;
753
754recheck_buffers:
755 busy = false;
756 spin_lock(&mapping->i_private_lock);
757 bh = head;
758 do {
759 if (atomic_read(&bh->b_count)) {
760 busy = true;
761 break;
762 }
763 bh = bh->b_this_page;
764 } while (bh != head);
765 if (busy) {
766 if (invalidated) {
767 rc = -EAGAIN;
768 goto unlock_buffers;
769 }
770 spin_unlock(&mapping->i_private_lock);
771 invalidate_bh_lrus();
772 invalidated = true;
773 goto recheck_buffers;
774 }
775 }
776
777 rc = folio_migrate_mapping(mapping, dst, src, 0);
778 if (rc != MIGRATEPAGE_SUCCESS)
779 goto unlock_buffers;
780
781 folio_attach_private(dst, folio_detach_private(src));
782
783 bh = head;
784 do {
785 folio_set_bh(bh, dst, bh_offset(bh));
786 bh = bh->b_this_page;
787 } while (bh != head);
788
789 if (mode != MIGRATE_SYNC_NO_COPY)
790 folio_migrate_copy(dst, src);
791 else
792 folio_migrate_flags(dst, src);
793
794 rc = MIGRATEPAGE_SUCCESS;
795unlock_buffers:
796 if (check_refs)
797 spin_unlock(&mapping->i_private_lock);
798 bh = head;
799 do {
800 unlock_buffer(bh);
801 bh = bh->b_this_page;
802 } while (bh != head);
803
804 return rc;
805}
806
807/**
808 * buffer_migrate_folio() - Migration function for folios with buffers.
809 * @mapping: The address space containing @src.
810 * @dst: The folio to migrate to.
811 * @src: The folio to migrate from.
812 * @mode: How to migrate the folio.
813 *
814 * This function can only be used if the underlying filesystem guarantees
815 * that no other references to @src exist. For example attached buffer
816 * heads are accessed only under the folio lock. If your filesystem cannot
817 * provide this guarantee, buffer_migrate_folio_norefs() may be more
818 * appropriate.
819 *
820 * Return: 0 on success or a negative errno on failure.
821 */
822int buffer_migrate_folio(struct address_space *mapping,
823 struct folio *dst, struct folio *src, enum migrate_mode mode)
824{
825 return __buffer_migrate_folio(mapping, dst, src, mode, false);
826}
827EXPORT_SYMBOL(buffer_migrate_folio);
828
829/**
830 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
831 * @mapping: The address space containing @src.
832 * @dst: The folio to migrate to.
833 * @src: The folio to migrate from.
834 * @mode: How to migrate the folio.
835 *
836 * Like buffer_migrate_folio() except that this variant is more careful
837 * and checks that there are also no buffer head references. This function
838 * is the right one for mappings where buffer heads are directly looked
839 * up and referenced (such as block device mappings).
840 *
841 * Return: 0 on success or a negative errno on failure.
842 */
843int buffer_migrate_folio_norefs(struct address_space *mapping,
844 struct folio *dst, struct folio *src, enum migrate_mode mode)
845{
846 return __buffer_migrate_folio(mapping, dst, src, mode, true);
847}
848EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
849#endif /* CONFIG_BUFFER_HEAD */
850
851int filemap_migrate_folio(struct address_space *mapping,
852 struct folio *dst, struct folio *src, enum migrate_mode mode)
853{
854 int ret;
855
856 ret = folio_migrate_mapping(mapping, dst, src, 0);
857 if (ret != MIGRATEPAGE_SUCCESS)
858 return ret;
859
860 if (folio_get_private(src))
861 folio_attach_private(dst, folio_detach_private(src));
862
863 if (mode != MIGRATE_SYNC_NO_COPY)
864 folio_migrate_copy(dst, src);
865 else
866 folio_migrate_flags(dst, src);
867 return MIGRATEPAGE_SUCCESS;
868}
869EXPORT_SYMBOL_GPL(filemap_migrate_folio);
870
871/*
872 * Writeback a folio to clean the dirty state
873 */
874static int writeout(struct address_space *mapping, struct folio *folio)
875{
876 struct writeback_control wbc = {
877 .sync_mode = WB_SYNC_NONE,
878 .nr_to_write = 1,
879 .range_start = 0,
880 .range_end = LLONG_MAX,
881 .for_reclaim = 1
882 };
883 int rc;
884
885 if (!mapping->a_ops->writepage)
886 /* No write method for the address space */
887 return -EINVAL;
888
889 if (!folio_clear_dirty_for_io(folio))
890 /* Someone else already triggered a write */
891 return -EAGAIN;
892
893 /*
894 * A dirty folio may imply that the underlying filesystem has
895 * the folio on some queue. So the folio must be clean for
896 * migration. Writeout may mean we lose the lock and the
897 * folio state is no longer what we checked for earlier.
898 * At this point we know that the migration attempt cannot
899 * be successful.
900 */
901 remove_migration_ptes(folio, folio, false);
902
903 rc = mapping->a_ops->writepage(&folio->page, &wbc);
904
905 if (rc != AOP_WRITEPAGE_ACTIVATE)
906 /* unlocked. Relock */
907 folio_lock(folio);
908
909 return (rc < 0) ? -EIO : -EAGAIN;
910}
911
912/*
913 * Default handling if a filesystem does not provide a migration function.
914 */
915static int fallback_migrate_folio(struct address_space *mapping,
916 struct folio *dst, struct folio *src, enum migrate_mode mode)
917{
918 if (folio_test_dirty(src)) {
919 /* Only writeback folios in full synchronous migration */
920 switch (mode) {
921 case MIGRATE_SYNC:
922 case MIGRATE_SYNC_NO_COPY:
923 break;
924 default:
925 return -EBUSY;
926 }
927 return writeout(mapping, src);
928 }
929
930 /*
931 * Buffers may be managed in a filesystem specific way.
932 * We must have no buffers or drop them.
933 */
934 if (!filemap_release_folio(src, GFP_KERNEL))
935 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
936
937 return migrate_folio(mapping, dst, src, mode);
938}
939
940/*
941 * Move a page to a newly allocated page
942 * The page is locked and all ptes have been successfully removed.
943 *
944 * The new page will have replaced the old page if this function
945 * is successful.
946 *
947 * Return value:
948 * < 0 - error code
949 * MIGRATEPAGE_SUCCESS - success
950 */
951static int move_to_new_folio(struct folio *dst, struct folio *src,
952 enum migrate_mode mode)
953{
954 int rc = -EAGAIN;
955 bool is_lru = !__folio_test_movable(src);
956
957 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
958 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
959
960 if (likely(is_lru)) {
961 struct address_space *mapping = folio_mapping(src);
962
963 if (!mapping)
964 rc = migrate_folio(mapping, dst, src, mode);
965 else if (mapping_unmovable(mapping))
966 rc = -EOPNOTSUPP;
967 else if (mapping->a_ops->migrate_folio)
968 /*
969 * Most folios have a mapping and most filesystems
970 * provide a migrate_folio callback. Anonymous folios
971 * are part of swap space which also has its own
972 * migrate_folio callback. This is the most common path
973 * for page migration.
974 */
975 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
976 mode);
977 else
978 rc = fallback_migrate_folio(mapping, dst, src, mode);
979 } else {
980 const struct movable_operations *mops;
981
982 /*
983 * In case of non-lru page, it could be released after
984 * isolation step. In that case, we shouldn't try migration.
985 */
986 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
987 if (!folio_test_movable(src)) {
988 rc = MIGRATEPAGE_SUCCESS;
989 folio_clear_isolated(src);
990 goto out;
991 }
992
993 mops = folio_movable_ops(src);
994 rc = mops->migrate_page(&dst->page, &src->page, mode);
995 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
996 !folio_test_isolated(src));
997 }
998
999 /*
1000 * When successful, old pagecache src->mapping must be cleared before
1001 * src is freed; but stats require that PageAnon be left as PageAnon.
1002 */
1003 if (rc == MIGRATEPAGE_SUCCESS) {
1004 if (__folio_test_movable(src)) {
1005 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1006
1007 /*
1008 * We clear PG_movable under page_lock so any compactor
1009 * cannot try to migrate this page.
1010 */
1011 folio_clear_isolated(src);
1012 }
1013
1014 /*
1015 * Anonymous and movable src->mapping will be cleared by
1016 * free_pages_prepare so don't reset it here for keeping
1017 * the type to work PageAnon, for example.
1018 */
1019 if (!folio_mapping_flags(src))
1020 src->mapping = NULL;
1021
1022 if (likely(!folio_is_zone_device(dst)))
1023 flush_dcache_folio(dst);
1024 }
1025out:
1026 return rc;
1027}
1028
1029/*
1030 * To record some information during migration, we use unused private
1031 * field of struct folio of the newly allocated destination folio.
1032 * This is safe because nobody is using it except us.
1033 */
1034enum {
1035 PAGE_WAS_MAPPED = BIT(0),
1036 PAGE_WAS_MLOCKED = BIT(1),
1037 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1038};
1039
1040static void __migrate_folio_record(struct folio *dst,
1041 int old_page_state,
1042 struct anon_vma *anon_vma)
1043{
1044 dst->private = (void *)anon_vma + old_page_state;
1045}
1046
1047static void __migrate_folio_extract(struct folio *dst,
1048 int *old_page_state,
1049 struct anon_vma **anon_vmap)
1050{
1051 unsigned long private = (unsigned long)dst->private;
1052
1053 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1054 *old_page_state = private & PAGE_OLD_STATES;
1055 dst->private = NULL;
1056}
1057
1058/* Restore the source folio to the original state upon failure */
1059static void migrate_folio_undo_src(struct folio *src,
1060 int page_was_mapped,
1061 struct anon_vma *anon_vma,
1062 bool locked,
1063 struct list_head *ret)
1064{
1065 if (page_was_mapped)
1066 remove_migration_ptes(src, src, false);
1067 /* Drop an anon_vma reference if we took one */
1068 if (anon_vma)
1069 put_anon_vma(anon_vma);
1070 if (locked)
1071 folio_unlock(src);
1072 if (ret)
1073 list_move_tail(&src->lru, ret);
1074}
1075
1076/* Restore the destination folio to the original state upon failure */
1077static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1078 free_folio_t put_new_folio, unsigned long private)
1079{
1080 if (locked)
1081 folio_unlock(dst);
1082 if (put_new_folio)
1083 put_new_folio(dst, private);
1084 else
1085 folio_put(dst);
1086}
1087
1088/* Cleanup src folio upon migration success */
1089static void migrate_folio_done(struct folio *src,
1090 enum migrate_reason reason)
1091{
1092 /*
1093 * Compaction can migrate also non-LRU pages which are
1094 * not accounted to NR_ISOLATED_*. They can be recognized
1095 * as __folio_test_movable
1096 */
1097 if (likely(!__folio_test_movable(src)))
1098 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1099 folio_is_file_lru(src), -folio_nr_pages(src));
1100
1101 if (reason != MR_MEMORY_FAILURE)
1102 /* We release the page in page_handle_poison. */
1103 folio_put(src);
1104}
1105
1106/* Obtain the lock on page, remove all ptes. */
1107static int migrate_folio_unmap(new_folio_t get_new_folio,
1108 free_folio_t put_new_folio, unsigned long private,
1109 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1110 enum migrate_reason reason, struct list_head *ret)
1111{
1112 struct folio *dst;
1113 int rc = -EAGAIN;
1114 int old_page_state = 0;
1115 struct anon_vma *anon_vma = NULL;
1116 bool is_lru = !__folio_test_movable(src);
1117 bool locked = false;
1118 bool dst_locked = false;
1119
1120 if (folio_ref_count(src) == 1) {
1121 /* Folio was freed from under us. So we are done. */
1122 folio_clear_active(src);
1123 folio_clear_unevictable(src);
1124 /* free_pages_prepare() will clear PG_isolated. */
1125 list_del(&src->lru);
1126 migrate_folio_done(src, reason);
1127 return MIGRATEPAGE_SUCCESS;
1128 }
1129
1130 dst = get_new_folio(src, private);
1131 if (!dst)
1132 return -ENOMEM;
1133 *dstp = dst;
1134
1135 dst->private = NULL;
1136
1137 if (!folio_trylock(src)) {
1138 if (mode == MIGRATE_ASYNC)
1139 goto out;
1140
1141 /*
1142 * It's not safe for direct compaction to call lock_page.
1143 * For example, during page readahead pages are added locked
1144 * to the LRU. Later, when the IO completes the pages are
1145 * marked uptodate and unlocked. However, the queueing
1146 * could be merging multiple pages for one bio (e.g.
1147 * mpage_readahead). If an allocation happens for the
1148 * second or third page, the process can end up locking
1149 * the same page twice and deadlocking. Rather than
1150 * trying to be clever about what pages can be locked,
1151 * avoid the use of lock_page for direct compaction
1152 * altogether.
1153 */
1154 if (current->flags & PF_MEMALLOC)
1155 goto out;
1156
1157 /*
1158 * In "light" mode, we can wait for transient locks (eg
1159 * inserting a page into the page table), but it's not
1160 * worth waiting for I/O.
1161 */
1162 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1163 goto out;
1164
1165 folio_lock(src);
1166 }
1167 locked = true;
1168 if (folio_test_mlocked(src))
1169 old_page_state |= PAGE_WAS_MLOCKED;
1170
1171 if (folio_test_writeback(src)) {
1172 /*
1173 * Only in the case of a full synchronous migration is it
1174 * necessary to wait for PageWriteback. In the async case,
1175 * the retry loop is too short and in the sync-light case,
1176 * the overhead of stalling is too much
1177 */
1178 switch (mode) {
1179 case MIGRATE_SYNC:
1180 case MIGRATE_SYNC_NO_COPY:
1181 break;
1182 default:
1183 rc = -EBUSY;
1184 goto out;
1185 }
1186 folio_wait_writeback(src);
1187 }
1188
1189 /*
1190 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1191 * we cannot notice that anon_vma is freed while we migrate a page.
1192 * This get_anon_vma() delays freeing anon_vma pointer until the end
1193 * of migration. File cache pages are no problem because of page_lock()
1194 * File Caches may use write_page() or lock_page() in migration, then,
1195 * just care Anon page here.
1196 *
1197 * Only folio_get_anon_vma() understands the subtleties of
1198 * getting a hold on an anon_vma from outside one of its mms.
1199 * But if we cannot get anon_vma, then we won't need it anyway,
1200 * because that implies that the anon page is no longer mapped
1201 * (and cannot be remapped so long as we hold the page lock).
1202 */
1203 if (folio_test_anon(src) && !folio_test_ksm(src))
1204 anon_vma = folio_get_anon_vma(src);
1205
1206 /*
1207 * Block others from accessing the new page when we get around to
1208 * establishing additional references. We are usually the only one
1209 * holding a reference to dst at this point. We used to have a BUG
1210 * here if folio_trylock(dst) fails, but would like to allow for
1211 * cases where there might be a race with the previous use of dst.
1212 * This is much like races on refcount of oldpage: just don't BUG().
1213 */
1214 if (unlikely(!folio_trylock(dst)))
1215 goto out;
1216 dst_locked = true;
1217
1218 if (unlikely(!is_lru)) {
1219 __migrate_folio_record(dst, old_page_state, anon_vma);
1220 return MIGRATEPAGE_UNMAP;
1221 }
1222
1223 /*
1224 * Corner case handling:
1225 * 1. When a new swap-cache page is read into, it is added to the LRU
1226 * and treated as swapcache but it has no rmap yet.
1227 * Calling try_to_unmap() against a src->mapping==NULL page will
1228 * trigger a BUG. So handle it here.
1229 * 2. An orphaned page (see truncate_cleanup_page) might have
1230 * fs-private metadata. The page can be picked up due to memory
1231 * offlining. Everywhere else except page reclaim, the page is
1232 * invisible to the vm, so the page can not be migrated. So try to
1233 * free the metadata, so the page can be freed.
1234 */
1235 if (!src->mapping) {
1236 if (folio_test_private(src)) {
1237 try_to_free_buffers(src);
1238 goto out;
1239 }
1240 } else if (folio_mapped(src)) {
1241 /* Establish migration ptes */
1242 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1243 !folio_test_ksm(src) && !anon_vma, src);
1244 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1245 old_page_state |= PAGE_WAS_MAPPED;
1246 }
1247
1248 if (!folio_mapped(src)) {
1249 __migrate_folio_record(dst, old_page_state, anon_vma);
1250 return MIGRATEPAGE_UNMAP;
1251 }
1252
1253out:
1254 /*
1255 * A folio that has not been unmapped will be restored to
1256 * right list unless we want to retry.
1257 */
1258 if (rc == -EAGAIN)
1259 ret = NULL;
1260
1261 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1262 anon_vma, locked, ret);
1263 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1264
1265 return rc;
1266}
1267
1268/* Migrate the folio to the newly allocated folio in dst. */
1269static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1270 struct folio *src, struct folio *dst,
1271 enum migrate_mode mode, enum migrate_reason reason,
1272 struct list_head *ret)
1273{
1274 int rc;
1275 int old_page_state = 0;
1276 struct anon_vma *anon_vma = NULL;
1277 bool is_lru = !__folio_test_movable(src);
1278 struct list_head *prev;
1279
1280 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1281 prev = dst->lru.prev;
1282 list_del(&dst->lru);
1283
1284 rc = move_to_new_folio(dst, src, mode);
1285 if (rc)
1286 goto out;
1287
1288 if (unlikely(!is_lru))
1289 goto out_unlock_both;
1290
1291 /*
1292 * When successful, push dst to LRU immediately: so that if it
1293 * turns out to be an mlocked page, remove_migration_ptes() will
1294 * automatically build up the correct dst->mlock_count for it.
1295 *
1296 * We would like to do something similar for the old page, when
1297 * unsuccessful, and other cases when a page has been temporarily
1298 * isolated from the unevictable LRU: but this case is the easiest.
1299 */
1300 folio_add_lru(dst);
1301 if (old_page_state & PAGE_WAS_MLOCKED)
1302 lru_add_drain();
1303
1304 if (old_page_state & PAGE_WAS_MAPPED)
1305 remove_migration_ptes(src, dst, false);
1306
1307out_unlock_both:
1308 folio_unlock(dst);
1309 set_page_owner_migrate_reason(&dst->page, reason);
1310 /*
1311 * If migration is successful, decrease refcount of dst,
1312 * which will not free the page because new page owner increased
1313 * refcounter.
1314 */
1315 folio_put(dst);
1316
1317 /*
1318 * A folio that has been migrated has all references removed
1319 * and will be freed.
1320 */
1321 list_del(&src->lru);
1322 /* Drop an anon_vma reference if we took one */
1323 if (anon_vma)
1324 put_anon_vma(anon_vma);
1325 folio_unlock(src);
1326 migrate_folio_done(src, reason);
1327
1328 return rc;
1329out:
1330 /*
1331 * A folio that has not been migrated will be restored to
1332 * right list unless we want to retry.
1333 */
1334 if (rc == -EAGAIN) {
1335 list_add(&dst->lru, prev);
1336 __migrate_folio_record(dst, old_page_state, anon_vma);
1337 return rc;
1338 }
1339
1340 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1341 anon_vma, true, ret);
1342 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1343
1344 return rc;
1345}
1346
1347/*
1348 * Counterpart of unmap_and_move_page() for hugepage migration.
1349 *
1350 * This function doesn't wait the completion of hugepage I/O
1351 * because there is no race between I/O and migration for hugepage.
1352 * Note that currently hugepage I/O occurs only in direct I/O
1353 * where no lock is held and PG_writeback is irrelevant,
1354 * and writeback status of all subpages are counted in the reference
1355 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1356 * under direct I/O, the reference of the head page is 512 and a bit more.)
1357 * This means that when we try to migrate hugepage whose subpages are
1358 * doing direct I/O, some references remain after try_to_unmap() and
1359 * hugepage migration fails without data corruption.
1360 *
1361 * There is also no race when direct I/O is issued on the page under migration,
1362 * because then pte is replaced with migration swap entry and direct I/O code
1363 * will wait in the page fault for migration to complete.
1364 */
1365static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1366 free_folio_t put_new_folio, unsigned long private,
1367 struct folio *src, int force, enum migrate_mode mode,
1368 int reason, struct list_head *ret)
1369{
1370 struct folio *dst;
1371 int rc = -EAGAIN;
1372 int page_was_mapped = 0;
1373 struct anon_vma *anon_vma = NULL;
1374 struct address_space *mapping = NULL;
1375
1376 if (folio_ref_count(src) == 1) {
1377 /* page was freed from under us. So we are done. */
1378 folio_putback_active_hugetlb(src);
1379 return MIGRATEPAGE_SUCCESS;
1380 }
1381
1382 dst = get_new_folio(src, private);
1383 if (!dst)
1384 return -ENOMEM;
1385
1386 if (!folio_trylock(src)) {
1387 if (!force)
1388 goto out;
1389 switch (mode) {
1390 case MIGRATE_SYNC:
1391 case MIGRATE_SYNC_NO_COPY:
1392 break;
1393 default:
1394 goto out;
1395 }
1396 folio_lock(src);
1397 }
1398
1399 /*
1400 * Check for pages which are in the process of being freed. Without
1401 * folio_mapping() set, hugetlbfs specific move page routine will not
1402 * be called and we could leak usage counts for subpools.
1403 */
1404 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1405 rc = -EBUSY;
1406 goto out_unlock;
1407 }
1408
1409 if (folio_test_anon(src))
1410 anon_vma = folio_get_anon_vma(src);
1411
1412 if (unlikely(!folio_trylock(dst)))
1413 goto put_anon;
1414
1415 if (folio_mapped(src)) {
1416 enum ttu_flags ttu = 0;
1417
1418 if (!folio_test_anon(src)) {
1419 /*
1420 * In shared mappings, try_to_unmap could potentially
1421 * call huge_pmd_unshare. Because of this, take
1422 * semaphore in write mode here and set TTU_RMAP_LOCKED
1423 * to let lower levels know we have taken the lock.
1424 */
1425 mapping = hugetlb_page_mapping_lock_write(&src->page);
1426 if (unlikely(!mapping))
1427 goto unlock_put_anon;
1428
1429 ttu = TTU_RMAP_LOCKED;
1430 }
1431
1432 try_to_migrate(src, ttu);
1433 page_was_mapped = 1;
1434
1435 if (ttu & TTU_RMAP_LOCKED)
1436 i_mmap_unlock_write(mapping);
1437 }
1438
1439 if (!folio_mapped(src))
1440 rc = move_to_new_folio(dst, src, mode);
1441
1442 if (page_was_mapped)
1443 remove_migration_ptes(src,
1444 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1445
1446unlock_put_anon:
1447 folio_unlock(dst);
1448
1449put_anon:
1450 if (anon_vma)
1451 put_anon_vma(anon_vma);
1452
1453 if (rc == MIGRATEPAGE_SUCCESS) {
1454 move_hugetlb_state(src, dst, reason);
1455 put_new_folio = NULL;
1456 }
1457
1458out_unlock:
1459 folio_unlock(src);
1460out:
1461 if (rc == MIGRATEPAGE_SUCCESS)
1462 folio_putback_active_hugetlb(src);
1463 else if (rc != -EAGAIN)
1464 list_move_tail(&src->lru, ret);
1465
1466 /*
1467 * If migration was not successful and there's a freeing callback, use
1468 * it. Otherwise, put_page() will drop the reference grabbed during
1469 * isolation.
1470 */
1471 if (put_new_folio)
1472 put_new_folio(dst, private);
1473 else
1474 folio_putback_active_hugetlb(dst);
1475
1476 return rc;
1477}
1478
1479static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1480{
1481 int rc;
1482
1483 folio_lock(folio);
1484 rc = split_folio_to_list(folio, split_folios);
1485 folio_unlock(folio);
1486 if (!rc)
1487 list_move_tail(&folio->lru, split_folios);
1488
1489 return rc;
1490}
1491
1492#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1493#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1494#else
1495#define NR_MAX_BATCHED_MIGRATION 512
1496#endif
1497#define NR_MAX_MIGRATE_PAGES_RETRY 10
1498#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1499#define NR_MAX_MIGRATE_SYNC_RETRY \
1500 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1501
1502struct migrate_pages_stats {
1503 int nr_succeeded; /* Normal and large folios migrated successfully, in
1504 units of base pages */
1505 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1506 units of base pages. Untried folios aren't counted */
1507 int nr_thp_succeeded; /* THP migrated successfully */
1508 int nr_thp_failed; /* THP failed to be migrated */
1509 int nr_thp_split; /* THP split before migrating */
1510 int nr_split; /* Large folio (include THP) split before migrating */
1511};
1512
1513/*
1514 * Returns the number of hugetlb folios that were not migrated, or an error code
1515 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1516 * any more because the list has become empty or no retryable hugetlb folios
1517 * exist any more. It is caller's responsibility to call putback_movable_pages()
1518 * only if ret != 0.
1519 */
1520static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1521 free_folio_t put_new_folio, unsigned long private,
1522 enum migrate_mode mode, int reason,
1523 struct migrate_pages_stats *stats,
1524 struct list_head *ret_folios)
1525{
1526 int retry = 1;
1527 int nr_failed = 0;
1528 int nr_retry_pages = 0;
1529 int pass = 0;
1530 struct folio *folio, *folio2;
1531 int rc, nr_pages;
1532
1533 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1534 retry = 0;
1535 nr_retry_pages = 0;
1536
1537 list_for_each_entry_safe(folio, folio2, from, lru) {
1538 if (!folio_test_hugetlb(folio))
1539 continue;
1540
1541 nr_pages = folio_nr_pages(folio);
1542
1543 cond_resched();
1544
1545 /*
1546 * Migratability of hugepages depends on architectures and
1547 * their size. This check is necessary because some callers
1548 * of hugepage migration like soft offline and memory
1549 * hotremove don't walk through page tables or check whether
1550 * the hugepage is pmd-based or not before kicking migration.
1551 */
1552 if (!hugepage_migration_supported(folio_hstate(folio))) {
1553 nr_failed++;
1554 stats->nr_failed_pages += nr_pages;
1555 list_move_tail(&folio->lru, ret_folios);
1556 continue;
1557 }
1558
1559 rc = unmap_and_move_huge_page(get_new_folio,
1560 put_new_folio, private,
1561 folio, pass > 2, mode,
1562 reason, ret_folios);
1563 /*
1564 * The rules are:
1565 * Success: hugetlb folio will be put back
1566 * -EAGAIN: stay on the from list
1567 * -ENOMEM: stay on the from list
1568 * Other errno: put on ret_folios list
1569 */
1570 switch(rc) {
1571 case -ENOMEM:
1572 /*
1573 * When memory is low, don't bother to try to migrate
1574 * other folios, just exit.
1575 */
1576 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1577 return -ENOMEM;
1578 case -EAGAIN:
1579 retry++;
1580 nr_retry_pages += nr_pages;
1581 break;
1582 case MIGRATEPAGE_SUCCESS:
1583 stats->nr_succeeded += nr_pages;
1584 break;
1585 default:
1586 /*
1587 * Permanent failure (-EBUSY, etc.):
1588 * unlike -EAGAIN case, the failed folio is
1589 * removed from migration folio list and not
1590 * retried in the next outer loop.
1591 */
1592 nr_failed++;
1593 stats->nr_failed_pages += nr_pages;
1594 break;
1595 }
1596 }
1597 }
1598 /*
1599 * nr_failed is number of hugetlb folios failed to be migrated. After
1600 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1601 * folios as failed.
1602 */
1603 nr_failed += retry;
1604 stats->nr_failed_pages += nr_retry_pages;
1605
1606 return nr_failed;
1607}
1608
1609/*
1610 * migrate_pages_batch() first unmaps folios in the from list as many as
1611 * possible, then move the unmapped folios.
1612 *
1613 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1614 * lock or bit when we have locked more than one folio. Which may cause
1615 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1616 * length of the from list must be <= 1.
1617 */
1618static int migrate_pages_batch(struct list_head *from,
1619 new_folio_t get_new_folio, free_folio_t put_new_folio,
1620 unsigned long private, enum migrate_mode mode, int reason,
1621 struct list_head *ret_folios, struct list_head *split_folios,
1622 struct migrate_pages_stats *stats, int nr_pass)
1623{
1624 int retry = 1;
1625 int thp_retry = 1;
1626 int nr_failed = 0;
1627 int nr_retry_pages = 0;
1628 int pass = 0;
1629 bool is_thp = false;
1630 bool is_large = false;
1631 struct folio *folio, *folio2, *dst = NULL, *dst2;
1632 int rc, rc_saved = 0, nr_pages;
1633 LIST_HEAD(unmap_folios);
1634 LIST_HEAD(dst_folios);
1635 bool nosplit = (reason == MR_NUMA_MISPLACED);
1636
1637 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1638 !list_empty(from) && !list_is_singular(from));
1639
1640 for (pass = 0; pass < nr_pass && retry; pass++) {
1641 retry = 0;
1642 thp_retry = 0;
1643 nr_retry_pages = 0;
1644
1645 list_for_each_entry_safe(folio, folio2, from, lru) {
1646 is_large = folio_test_large(folio);
1647 is_thp = is_large && folio_test_pmd_mappable(folio);
1648 nr_pages = folio_nr_pages(folio);
1649
1650 cond_resched();
1651
1652 /*
1653 * Large folio migration might be unsupported or
1654 * the allocation might be failed so we should retry
1655 * on the same folio with the large folio split
1656 * to normal folios.
1657 *
1658 * Split folios are put in split_folios, and
1659 * we will migrate them after the rest of the
1660 * list is processed.
1661 */
1662 if (!thp_migration_supported() && is_thp) {
1663 nr_failed++;
1664 stats->nr_thp_failed++;
1665 if (!try_split_folio(folio, split_folios)) {
1666 stats->nr_thp_split++;
1667 stats->nr_split++;
1668 continue;
1669 }
1670 stats->nr_failed_pages += nr_pages;
1671 list_move_tail(&folio->lru, ret_folios);
1672 continue;
1673 }
1674
1675 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1676 private, folio, &dst, mode, reason,
1677 ret_folios);
1678 /*
1679 * The rules are:
1680 * Success: folio will be freed
1681 * Unmap: folio will be put on unmap_folios list,
1682 * dst folio put on dst_folios list
1683 * -EAGAIN: stay on the from list
1684 * -ENOMEM: stay on the from list
1685 * Other errno: put on ret_folios list
1686 */
1687 switch(rc) {
1688 case -ENOMEM:
1689 /*
1690 * When memory is low, don't bother to try to migrate
1691 * other folios, move unmapped folios, then exit.
1692 */
1693 nr_failed++;
1694 stats->nr_thp_failed += is_thp;
1695 /* Large folio NUMA faulting doesn't split to retry. */
1696 if (is_large && !nosplit) {
1697 int ret = try_split_folio(folio, split_folios);
1698
1699 if (!ret) {
1700 stats->nr_thp_split += is_thp;
1701 stats->nr_split++;
1702 break;
1703 } else if (reason == MR_LONGTERM_PIN &&
1704 ret == -EAGAIN) {
1705 /*
1706 * Try again to split large folio to
1707 * mitigate the failure of longterm pinning.
1708 */
1709 retry++;
1710 thp_retry += is_thp;
1711 nr_retry_pages += nr_pages;
1712 /* Undo duplicated failure counting. */
1713 nr_failed--;
1714 stats->nr_thp_failed -= is_thp;
1715 break;
1716 }
1717 }
1718
1719 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1720 /* nr_failed isn't updated for not used */
1721 stats->nr_thp_failed += thp_retry;
1722 rc_saved = rc;
1723 if (list_empty(&unmap_folios))
1724 goto out;
1725 else
1726 goto move;
1727 case -EAGAIN:
1728 retry++;
1729 thp_retry += is_thp;
1730 nr_retry_pages += nr_pages;
1731 break;
1732 case MIGRATEPAGE_SUCCESS:
1733 stats->nr_succeeded += nr_pages;
1734 stats->nr_thp_succeeded += is_thp;
1735 break;
1736 case MIGRATEPAGE_UNMAP:
1737 list_move_tail(&folio->lru, &unmap_folios);
1738 list_add_tail(&dst->lru, &dst_folios);
1739 break;
1740 default:
1741 /*
1742 * Permanent failure (-EBUSY, etc.):
1743 * unlike -EAGAIN case, the failed folio is
1744 * removed from migration folio list and not
1745 * retried in the next outer loop.
1746 */
1747 nr_failed++;
1748 stats->nr_thp_failed += is_thp;
1749 stats->nr_failed_pages += nr_pages;
1750 break;
1751 }
1752 }
1753 }
1754 nr_failed += retry;
1755 stats->nr_thp_failed += thp_retry;
1756 stats->nr_failed_pages += nr_retry_pages;
1757move:
1758 /* Flush TLBs for all unmapped folios */
1759 try_to_unmap_flush();
1760
1761 retry = 1;
1762 for (pass = 0; pass < nr_pass && retry; pass++) {
1763 retry = 0;
1764 thp_retry = 0;
1765 nr_retry_pages = 0;
1766
1767 dst = list_first_entry(&dst_folios, struct folio, lru);
1768 dst2 = list_next_entry(dst, lru);
1769 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1770 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1771 nr_pages = folio_nr_pages(folio);
1772
1773 cond_resched();
1774
1775 rc = migrate_folio_move(put_new_folio, private,
1776 folio, dst, mode,
1777 reason, ret_folios);
1778 /*
1779 * The rules are:
1780 * Success: folio will be freed
1781 * -EAGAIN: stay on the unmap_folios list
1782 * Other errno: put on ret_folios list
1783 */
1784 switch(rc) {
1785 case -EAGAIN:
1786 retry++;
1787 thp_retry += is_thp;
1788 nr_retry_pages += nr_pages;
1789 break;
1790 case MIGRATEPAGE_SUCCESS:
1791 stats->nr_succeeded += nr_pages;
1792 stats->nr_thp_succeeded += is_thp;
1793 break;
1794 default:
1795 nr_failed++;
1796 stats->nr_thp_failed += is_thp;
1797 stats->nr_failed_pages += nr_pages;
1798 break;
1799 }
1800 dst = dst2;
1801 dst2 = list_next_entry(dst, lru);
1802 }
1803 }
1804 nr_failed += retry;
1805 stats->nr_thp_failed += thp_retry;
1806 stats->nr_failed_pages += nr_retry_pages;
1807
1808 rc = rc_saved ? : nr_failed;
1809out:
1810 /* Cleanup remaining folios */
1811 dst = list_first_entry(&dst_folios, struct folio, lru);
1812 dst2 = list_next_entry(dst, lru);
1813 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1814 int old_page_state = 0;
1815 struct anon_vma *anon_vma = NULL;
1816
1817 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1818 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1819 anon_vma, true, ret_folios);
1820 list_del(&dst->lru);
1821 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1822 dst = dst2;
1823 dst2 = list_next_entry(dst, lru);
1824 }
1825
1826 return rc;
1827}
1828
1829static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1830 free_folio_t put_new_folio, unsigned long private,
1831 enum migrate_mode mode, int reason,
1832 struct list_head *ret_folios, struct list_head *split_folios,
1833 struct migrate_pages_stats *stats)
1834{
1835 int rc, nr_failed = 0;
1836 LIST_HEAD(folios);
1837 struct migrate_pages_stats astats;
1838
1839 memset(&astats, 0, sizeof(astats));
1840 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1841 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1842 reason, &folios, split_folios, &astats,
1843 NR_MAX_MIGRATE_ASYNC_RETRY);
1844 stats->nr_succeeded += astats.nr_succeeded;
1845 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1846 stats->nr_thp_split += astats.nr_thp_split;
1847 stats->nr_split += astats.nr_split;
1848 if (rc < 0) {
1849 stats->nr_failed_pages += astats.nr_failed_pages;
1850 stats->nr_thp_failed += astats.nr_thp_failed;
1851 list_splice_tail(&folios, ret_folios);
1852 return rc;
1853 }
1854 stats->nr_thp_failed += astats.nr_thp_split;
1855 /*
1856 * Do not count rc, as pages will be retried below.
1857 * Count nr_split only, since it includes nr_thp_split.
1858 */
1859 nr_failed += astats.nr_split;
1860 /*
1861 * Fall back to migrate all failed folios one by one synchronously. All
1862 * failed folios except split THPs will be retried, so their failure
1863 * isn't counted
1864 */
1865 list_splice_tail_init(&folios, from);
1866 while (!list_empty(from)) {
1867 list_move(from->next, &folios);
1868 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1869 private, mode, reason, ret_folios,
1870 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1871 list_splice_tail_init(&folios, ret_folios);
1872 if (rc < 0)
1873 return rc;
1874 nr_failed += rc;
1875 }
1876
1877 return nr_failed;
1878}
1879
1880/*
1881 * migrate_pages - migrate the folios specified in a list, to the free folios
1882 * supplied as the target for the page migration
1883 *
1884 * @from: The list of folios to be migrated.
1885 * @get_new_folio: The function used to allocate free folios to be used
1886 * as the target of the folio migration.
1887 * @put_new_folio: The function used to free target folios if migration
1888 * fails, or NULL if no special handling is necessary.
1889 * @private: Private data to be passed on to get_new_folio()
1890 * @mode: The migration mode that specifies the constraints for
1891 * folio migration, if any.
1892 * @reason: The reason for folio migration.
1893 * @ret_succeeded: Set to the number of folios migrated successfully if
1894 * the caller passes a non-NULL pointer.
1895 *
1896 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1897 * are movable any more because the list has become empty or no retryable folios
1898 * exist any more. It is caller's responsibility to call putback_movable_pages()
1899 * only if ret != 0.
1900 *
1901 * Returns the number of {normal folio, large folio, hugetlb} that were not
1902 * migrated, or an error code. The number of large folio splits will be
1903 * considered as the number of non-migrated large folio, no matter how many
1904 * split folios of the large folio are migrated successfully.
1905 */
1906int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1907 free_folio_t put_new_folio, unsigned long private,
1908 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1909{
1910 int rc, rc_gather;
1911 int nr_pages;
1912 struct folio *folio, *folio2;
1913 LIST_HEAD(folios);
1914 LIST_HEAD(ret_folios);
1915 LIST_HEAD(split_folios);
1916 struct migrate_pages_stats stats;
1917
1918 trace_mm_migrate_pages_start(mode, reason);
1919
1920 memset(&stats, 0, sizeof(stats));
1921
1922 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1923 mode, reason, &stats, &ret_folios);
1924 if (rc_gather < 0)
1925 goto out;
1926
1927again:
1928 nr_pages = 0;
1929 list_for_each_entry_safe(folio, folio2, from, lru) {
1930 /* Retried hugetlb folios will be kept in list */
1931 if (folio_test_hugetlb(folio)) {
1932 list_move_tail(&folio->lru, &ret_folios);
1933 continue;
1934 }
1935
1936 nr_pages += folio_nr_pages(folio);
1937 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1938 break;
1939 }
1940 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1941 list_cut_before(&folios, from, &folio2->lru);
1942 else
1943 list_splice_init(from, &folios);
1944 if (mode == MIGRATE_ASYNC)
1945 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1946 private, mode, reason, &ret_folios,
1947 &split_folios, &stats,
1948 NR_MAX_MIGRATE_PAGES_RETRY);
1949 else
1950 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1951 private, mode, reason, &ret_folios,
1952 &split_folios, &stats);
1953 list_splice_tail_init(&folios, &ret_folios);
1954 if (rc < 0) {
1955 rc_gather = rc;
1956 list_splice_tail(&split_folios, &ret_folios);
1957 goto out;
1958 }
1959 if (!list_empty(&split_folios)) {
1960 /*
1961 * Failure isn't counted since all split folios of a large folio
1962 * is counted as 1 failure already. And, we only try to migrate
1963 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1964 */
1965 migrate_pages_batch(&split_folios, get_new_folio,
1966 put_new_folio, private, MIGRATE_ASYNC, reason,
1967 &ret_folios, NULL, &stats, 1);
1968 list_splice_tail_init(&split_folios, &ret_folios);
1969 }
1970 rc_gather += rc;
1971 if (!list_empty(from))
1972 goto again;
1973out:
1974 /*
1975 * Put the permanent failure folio back to migration list, they
1976 * will be put back to the right list by the caller.
1977 */
1978 list_splice(&ret_folios, from);
1979
1980 /*
1981 * Return 0 in case all split folios of fail-to-migrate large folios
1982 * are migrated successfully.
1983 */
1984 if (list_empty(from))
1985 rc_gather = 0;
1986
1987 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1988 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1989 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1990 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1991 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1992 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1993 stats.nr_thp_succeeded, stats.nr_thp_failed,
1994 stats.nr_thp_split, stats.nr_split, mode,
1995 reason);
1996
1997 if (ret_succeeded)
1998 *ret_succeeded = stats.nr_succeeded;
1999
2000 return rc_gather;
2001}
2002
2003struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2004{
2005 struct migration_target_control *mtc;
2006 gfp_t gfp_mask;
2007 unsigned int order = 0;
2008 int nid;
2009 int zidx;
2010
2011 mtc = (struct migration_target_control *)private;
2012 gfp_mask = mtc->gfp_mask;
2013 nid = mtc->nid;
2014 if (nid == NUMA_NO_NODE)
2015 nid = folio_nid(src);
2016
2017 if (folio_test_hugetlb(src)) {
2018 struct hstate *h = folio_hstate(src);
2019
2020 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2021 return alloc_hugetlb_folio_nodemask(h, nid,
2022 mtc->nmask, gfp_mask);
2023 }
2024
2025 if (folio_test_large(src)) {
2026 /*
2027 * clear __GFP_RECLAIM to make the migration callback
2028 * consistent with regular THP allocations.
2029 */
2030 gfp_mask &= ~__GFP_RECLAIM;
2031 gfp_mask |= GFP_TRANSHUGE;
2032 order = folio_order(src);
2033 }
2034 zidx = zone_idx(folio_zone(src));
2035 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2036 gfp_mask |= __GFP_HIGHMEM;
2037
2038 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2039}
2040
2041#ifdef CONFIG_NUMA
2042
2043static int store_status(int __user *status, int start, int value, int nr)
2044{
2045 while (nr-- > 0) {
2046 if (put_user(value, status + start))
2047 return -EFAULT;
2048 start++;
2049 }
2050
2051 return 0;
2052}
2053
2054static int do_move_pages_to_node(struct list_head *pagelist, int node)
2055{
2056 int err;
2057 struct migration_target_control mtc = {
2058 .nid = node,
2059 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2060 };
2061
2062 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2063 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2064 if (err)
2065 putback_movable_pages(pagelist);
2066 return err;
2067}
2068
2069/*
2070 * Resolves the given address to a struct page, isolates it from the LRU and
2071 * puts it to the given pagelist.
2072 * Returns:
2073 * errno - if the page cannot be found/isolated
2074 * 0 - when it doesn't have to be migrated because it is already on the
2075 * target node
2076 * 1 - when it has been queued
2077 */
2078static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2079 int node, struct list_head *pagelist, bool migrate_all)
2080{
2081 struct vm_area_struct *vma;
2082 unsigned long addr;
2083 struct page *page;
2084 struct folio *folio;
2085 int err;
2086
2087 mmap_read_lock(mm);
2088 addr = (unsigned long)untagged_addr_remote(mm, p);
2089
2090 err = -EFAULT;
2091 vma = vma_lookup(mm, addr);
2092 if (!vma || !vma_migratable(vma))
2093 goto out;
2094
2095 /* FOLL_DUMP to ignore special (like zero) pages */
2096 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2097
2098 err = PTR_ERR(page);
2099 if (IS_ERR(page))
2100 goto out;
2101
2102 err = -ENOENT;
2103 if (!page)
2104 goto out;
2105
2106 folio = page_folio(page);
2107 if (folio_is_zone_device(folio))
2108 goto out_putfolio;
2109
2110 err = 0;
2111 if (folio_nid(folio) == node)
2112 goto out_putfolio;
2113
2114 err = -EACCES;
2115 if (page_mapcount(page) > 1 && !migrate_all)
2116 goto out_putfolio;
2117
2118 err = -EBUSY;
2119 if (folio_test_hugetlb(folio)) {
2120 if (isolate_hugetlb(folio, pagelist))
2121 err = 1;
2122 } else {
2123 if (!folio_isolate_lru(folio))
2124 goto out_putfolio;
2125
2126 err = 1;
2127 list_add_tail(&folio->lru, pagelist);
2128 node_stat_mod_folio(folio,
2129 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2130 folio_nr_pages(folio));
2131 }
2132out_putfolio:
2133 /*
2134 * Either remove the duplicate refcount from folio_isolate_lru()
2135 * or drop the folio ref if it was not isolated.
2136 */
2137 folio_put(folio);
2138out:
2139 mmap_read_unlock(mm);
2140 return err;
2141}
2142
2143static int move_pages_and_store_status(int node,
2144 struct list_head *pagelist, int __user *status,
2145 int start, int i, unsigned long nr_pages)
2146{
2147 int err;
2148
2149 if (list_empty(pagelist))
2150 return 0;
2151
2152 err = do_move_pages_to_node(pagelist, node);
2153 if (err) {
2154 /*
2155 * Positive err means the number of failed
2156 * pages to migrate. Since we are going to
2157 * abort and return the number of non-migrated
2158 * pages, so need to include the rest of the
2159 * nr_pages that have not been attempted as
2160 * well.
2161 */
2162 if (err > 0)
2163 err += nr_pages - i;
2164 return err;
2165 }
2166 return store_status(status, start, node, i - start);
2167}
2168
2169/*
2170 * Migrate an array of page address onto an array of nodes and fill
2171 * the corresponding array of status.
2172 */
2173static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2174 unsigned long nr_pages,
2175 const void __user * __user *pages,
2176 const int __user *nodes,
2177 int __user *status, int flags)
2178{
2179 compat_uptr_t __user *compat_pages = (void __user *)pages;
2180 int current_node = NUMA_NO_NODE;
2181 LIST_HEAD(pagelist);
2182 int start, i;
2183 int err = 0, err1;
2184
2185 lru_cache_disable();
2186
2187 for (i = start = 0; i < nr_pages; i++) {
2188 const void __user *p;
2189 int node;
2190
2191 err = -EFAULT;
2192 if (in_compat_syscall()) {
2193 compat_uptr_t cp;
2194
2195 if (get_user(cp, compat_pages + i))
2196 goto out_flush;
2197
2198 p = compat_ptr(cp);
2199 } else {
2200 if (get_user(p, pages + i))
2201 goto out_flush;
2202 }
2203 if (get_user(node, nodes + i))
2204 goto out_flush;
2205
2206 err = -ENODEV;
2207 if (node < 0 || node >= MAX_NUMNODES)
2208 goto out_flush;
2209 if (!node_state(node, N_MEMORY))
2210 goto out_flush;
2211
2212 err = -EACCES;
2213 if (!node_isset(node, task_nodes))
2214 goto out_flush;
2215
2216 if (current_node == NUMA_NO_NODE) {
2217 current_node = node;
2218 start = i;
2219 } else if (node != current_node) {
2220 err = move_pages_and_store_status(current_node,
2221 &pagelist, status, start, i, nr_pages);
2222 if (err)
2223 goto out;
2224 start = i;
2225 current_node = node;
2226 }
2227
2228 /*
2229 * Errors in the page lookup or isolation are not fatal and we simply
2230 * report them via status
2231 */
2232 err = add_page_for_migration(mm, p, current_node, &pagelist,
2233 flags & MPOL_MF_MOVE_ALL);
2234
2235 if (err > 0) {
2236 /* The page is successfully queued for migration */
2237 continue;
2238 }
2239
2240 /*
2241 * The move_pages() man page does not have an -EEXIST choice, so
2242 * use -EFAULT instead.
2243 */
2244 if (err == -EEXIST)
2245 err = -EFAULT;
2246
2247 /*
2248 * If the page is already on the target node (!err), store the
2249 * node, otherwise, store the err.
2250 */
2251 err = store_status(status, i, err ? : current_node, 1);
2252 if (err)
2253 goto out_flush;
2254
2255 err = move_pages_and_store_status(current_node, &pagelist,
2256 status, start, i, nr_pages);
2257 if (err) {
2258 /* We have accounted for page i */
2259 if (err > 0)
2260 err--;
2261 goto out;
2262 }
2263 current_node = NUMA_NO_NODE;
2264 }
2265out_flush:
2266 /* Make sure we do not overwrite the existing error */
2267 err1 = move_pages_and_store_status(current_node, &pagelist,
2268 status, start, i, nr_pages);
2269 if (err >= 0)
2270 err = err1;
2271out:
2272 lru_cache_enable();
2273 return err;
2274}
2275
2276/*
2277 * Determine the nodes of an array of pages and store it in an array of status.
2278 */
2279static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2280 const void __user **pages, int *status)
2281{
2282 unsigned long i;
2283
2284 mmap_read_lock(mm);
2285
2286 for (i = 0; i < nr_pages; i++) {
2287 unsigned long addr = (unsigned long)(*pages);
2288 struct vm_area_struct *vma;
2289 struct page *page;
2290 int err = -EFAULT;
2291
2292 vma = vma_lookup(mm, addr);
2293 if (!vma)
2294 goto set_status;
2295
2296 /* FOLL_DUMP to ignore special (like zero) pages */
2297 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2298
2299 err = PTR_ERR(page);
2300 if (IS_ERR(page))
2301 goto set_status;
2302
2303 err = -ENOENT;
2304 if (!page)
2305 goto set_status;
2306
2307 if (!is_zone_device_page(page))
2308 err = page_to_nid(page);
2309
2310 put_page(page);
2311set_status:
2312 *status = err;
2313
2314 pages++;
2315 status++;
2316 }
2317
2318 mmap_read_unlock(mm);
2319}
2320
2321static int get_compat_pages_array(const void __user *chunk_pages[],
2322 const void __user * __user *pages,
2323 unsigned long chunk_nr)
2324{
2325 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2326 compat_uptr_t p;
2327 int i;
2328
2329 for (i = 0; i < chunk_nr; i++) {
2330 if (get_user(p, pages32 + i))
2331 return -EFAULT;
2332 chunk_pages[i] = compat_ptr(p);
2333 }
2334
2335 return 0;
2336}
2337
2338/*
2339 * Determine the nodes of a user array of pages and store it in
2340 * a user array of status.
2341 */
2342static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2343 const void __user * __user *pages,
2344 int __user *status)
2345{
2346#define DO_PAGES_STAT_CHUNK_NR 16UL
2347 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2348 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2349
2350 while (nr_pages) {
2351 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2352
2353 if (in_compat_syscall()) {
2354 if (get_compat_pages_array(chunk_pages, pages,
2355 chunk_nr))
2356 break;
2357 } else {
2358 if (copy_from_user(chunk_pages, pages,
2359 chunk_nr * sizeof(*chunk_pages)))
2360 break;
2361 }
2362
2363 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2364
2365 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2366 break;
2367
2368 pages += chunk_nr;
2369 status += chunk_nr;
2370 nr_pages -= chunk_nr;
2371 }
2372 return nr_pages ? -EFAULT : 0;
2373}
2374
2375static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2376{
2377 struct task_struct *task;
2378 struct mm_struct *mm;
2379
2380 /*
2381 * There is no need to check if current process has the right to modify
2382 * the specified process when they are same.
2383 */
2384 if (!pid) {
2385 mmget(current->mm);
2386 *mem_nodes = cpuset_mems_allowed(current);
2387 return current->mm;
2388 }
2389
2390 /* Find the mm_struct */
2391 rcu_read_lock();
2392 task = find_task_by_vpid(pid);
2393 if (!task) {
2394 rcu_read_unlock();
2395 return ERR_PTR(-ESRCH);
2396 }
2397 get_task_struct(task);
2398
2399 /*
2400 * Check if this process has the right to modify the specified
2401 * process. Use the regular "ptrace_may_access()" checks.
2402 */
2403 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2404 rcu_read_unlock();
2405 mm = ERR_PTR(-EPERM);
2406 goto out;
2407 }
2408 rcu_read_unlock();
2409
2410 mm = ERR_PTR(security_task_movememory(task));
2411 if (IS_ERR(mm))
2412 goto out;
2413 *mem_nodes = cpuset_mems_allowed(task);
2414 mm = get_task_mm(task);
2415out:
2416 put_task_struct(task);
2417 if (!mm)
2418 mm = ERR_PTR(-EINVAL);
2419 return mm;
2420}
2421
2422/*
2423 * Move a list of pages in the address space of the currently executing
2424 * process.
2425 */
2426static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2427 const void __user * __user *pages,
2428 const int __user *nodes,
2429 int __user *status, int flags)
2430{
2431 struct mm_struct *mm;
2432 int err;
2433 nodemask_t task_nodes;
2434
2435 /* Check flags */
2436 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2437 return -EINVAL;
2438
2439 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2440 return -EPERM;
2441
2442 mm = find_mm_struct(pid, &task_nodes);
2443 if (IS_ERR(mm))
2444 return PTR_ERR(mm);
2445
2446 if (nodes)
2447 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2448 nodes, status, flags);
2449 else
2450 err = do_pages_stat(mm, nr_pages, pages, status);
2451
2452 mmput(mm);
2453 return err;
2454}
2455
2456SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2457 const void __user * __user *, pages,
2458 const int __user *, nodes,
2459 int __user *, status, int, flags)
2460{
2461 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2462}
2463
2464#ifdef CONFIG_NUMA_BALANCING
2465/*
2466 * Returns true if this is a safe migration target node for misplaced NUMA
2467 * pages. Currently it only checks the watermarks which is crude.
2468 */
2469static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2470 unsigned long nr_migrate_pages)
2471{
2472 int z;
2473
2474 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2475 struct zone *zone = pgdat->node_zones + z;
2476
2477 if (!managed_zone(zone))
2478 continue;
2479
2480 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2481 if (!zone_watermark_ok(zone, 0,
2482 high_wmark_pages(zone) +
2483 nr_migrate_pages,
2484 ZONE_MOVABLE, 0))
2485 continue;
2486 return true;
2487 }
2488 return false;
2489}
2490
2491static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2492 unsigned long data)
2493{
2494 int nid = (int) data;
2495 int order = folio_order(src);
2496 gfp_t gfp = __GFP_THISNODE;
2497
2498 if (order > 0)
2499 gfp |= GFP_TRANSHUGE_LIGHT;
2500 else {
2501 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2502 __GFP_NOWARN;
2503 gfp &= ~__GFP_RECLAIM;
2504 }
2505 return __folio_alloc_node(gfp, order, nid);
2506}
2507
2508static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2509{
2510 int nr_pages = folio_nr_pages(folio);
2511
2512 /* Avoid migrating to a node that is nearly full */
2513 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2514 int z;
2515
2516 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2517 return 0;
2518 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2519 if (managed_zone(pgdat->node_zones + z))
2520 break;
2521 }
2522
2523 /*
2524 * If there are no managed zones, it should not proceed
2525 * further.
2526 */
2527 if (z < 0)
2528 return 0;
2529
2530 wakeup_kswapd(pgdat->node_zones + z, 0,
2531 folio_order(folio), ZONE_MOVABLE);
2532 return 0;
2533 }
2534
2535 if (!folio_isolate_lru(folio))
2536 return 0;
2537
2538 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2539 nr_pages);
2540
2541 /*
2542 * Isolating the folio has taken another reference, so the
2543 * caller's reference can be safely dropped without the folio
2544 * disappearing underneath us during migration.
2545 */
2546 folio_put(folio);
2547 return 1;
2548}
2549
2550/*
2551 * Attempt to migrate a misplaced folio to the specified destination
2552 * node. Caller is expected to have an elevated reference count on
2553 * the folio that will be dropped by this function before returning.
2554 */
2555int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2556 int node)
2557{
2558 pg_data_t *pgdat = NODE_DATA(node);
2559 int isolated;
2560 int nr_remaining;
2561 unsigned int nr_succeeded;
2562 LIST_HEAD(migratepages);
2563 int nr_pages = folio_nr_pages(folio);
2564
2565 /*
2566 * Don't migrate file folios that are mapped in multiple processes
2567 * with execute permissions as they are probably shared libraries.
2568 * To check if the folio is shared, ideally we want to make sure
2569 * every page is mapped to the same process. Doing that is very
2570 * expensive, so check the estimated mapcount of the folio instead.
2571 */
2572 if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
2573 (vma->vm_flags & VM_EXEC))
2574 goto out;
2575
2576 /*
2577 * Also do not migrate dirty folios as not all filesystems can move
2578 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2579 */
2580 if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2581 goto out;
2582
2583 isolated = numamigrate_isolate_folio(pgdat, folio);
2584 if (!isolated)
2585 goto out;
2586
2587 list_add(&folio->lru, &migratepages);
2588 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2589 NULL, node, MIGRATE_ASYNC,
2590 MR_NUMA_MISPLACED, &nr_succeeded);
2591 if (nr_remaining) {
2592 if (!list_empty(&migratepages)) {
2593 list_del(&folio->lru);
2594 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2595 folio_is_file_lru(folio), -nr_pages);
2596 folio_putback_lru(folio);
2597 }
2598 isolated = 0;
2599 }
2600 if (nr_succeeded) {
2601 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2602 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2603 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2604 nr_succeeded);
2605 }
2606 BUG_ON(!list_empty(&migratepages));
2607 return isolated;
2608
2609out:
2610 folio_put(folio);
2611 return 0;
2612}
2613#endif /* CONFIG_NUMA_BALANCING */
2614#endif /* CONFIG_NUMA */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/nsproxy.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/backing-dev.h>
34#include <linux/compaction.h>
35#include <linux/syscalls.h>
36#include <linux/compat.h>
37#include <linux/hugetlb.h>
38#include <linux/hugetlb_cgroup.h>
39#include <linux/gfp.h>
40#include <linux/pfn_t.h>
41#include <linux/memremap.h>
42#include <linux/userfaultfd_k.h>
43#include <linux/balloon_compaction.h>
44#include <linux/page_idle.h>
45#include <linux/page_owner.h>
46#include <linux/sched/mm.h>
47#include <linux/ptrace.h>
48#include <linux/oom.h>
49#include <linux/memory.h>
50#include <linux/random.h>
51#include <linux/sched/sysctl.h>
52#include <linux/memory-tiers.h>
53
54#include <asm/tlbflush.h>
55
56#include <trace/events/migrate.h>
57
58#include "internal.h"
59
60bool isolate_movable_page(struct page *page, isolate_mode_t mode)
61{
62 struct folio *folio = folio_get_nontail_page(page);
63 const struct movable_operations *mops;
64
65 /*
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
68 *
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
73 */
74 if (!folio)
75 goto out;
76
77 if (unlikely(folio_test_slab(folio)))
78 goto out_putfolio;
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 smp_rmb();
81 /*
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
85 */
86 if (unlikely(!__folio_test_movable(folio)))
87 goto out_putfolio;
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 smp_rmb();
90 if (unlikely(folio_test_slab(folio)))
91 goto out_putfolio;
92
93 /*
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
97 *
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
103 */
104 if (unlikely(!folio_trylock(folio)))
105 goto out_putfolio;
106
107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
108 goto out_no_isolated;
109
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
112
113 if (!mops->isolate_page(&folio->page, mode))
114 goto out_no_isolated;
115
116 /* Driver shouldn't use PG_isolated bit of page->flags */
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
119 folio_unlock(folio);
120
121 return true;
122
123out_no_isolated:
124 folio_unlock(folio);
125out_putfolio:
126 folio_put(folio);
127out:
128 return false;
129}
130
131static void putback_movable_folio(struct folio *folio)
132{
133 const struct movable_operations *mops = folio_movable_ops(folio);
134
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
137}
138
139/*
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
142 *
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145 * and isolate_hugetlb().
146 */
147void putback_movable_pages(struct list_head *l)
148{
149 struct folio *folio;
150 struct folio *folio2;
151
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
155 continue;
156 }
157 list_del(&folio->lru);
158 /*
159 * We isolated non-lru movable folio so here we can use
160 * __folio_test_movable because LRU folio's mapping cannot
161 * have PAGE_MAPPING_MOVABLE.
162 */
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 folio_lock(folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
168 else
169 folio_clear_isolated(folio);
170 folio_unlock(folio);
171 folio_put(folio);
172 } else {
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
176 }
177 }
178}
179
180/*
181 * Restore a potential migration pte to a working pte entry
182 */
183static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
185{
186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187
188 while (page_vma_mapped_walk(&pvmw)) {
189 rmap_t rmap_flags = RMAP_NONE;
190 pte_t old_pte;
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
200
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
211 folio_get(folio);
212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 old_pte = ptep_get(pvmw.pte);
214
215 entry = pte_to_swp_entry(old_pte);
216 if (!is_migration_entry_young(entry))
217 pte = pte_mkold(pte);
218 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
219 pte = pte_mkdirty(pte);
220 if (pte_swp_soft_dirty(old_pte))
221 pte = pte_mksoft_dirty(pte);
222 else
223 pte = pte_clear_soft_dirty(pte);
224
225 if (is_writable_migration_entry(entry))
226 pte = pte_mkwrite(pte, vma);
227 else if (pte_swp_uffd_wp(old_pte))
228 pte = pte_mkuffd_wp(pte);
229
230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231 rmap_flags |= RMAP_EXCLUSIVE;
232
233 if (unlikely(is_device_private_page(new))) {
234 if (pte_write(pte))
235 entry = make_writable_device_private_entry(
236 page_to_pfn(new));
237 else
238 entry = make_readable_device_private_entry(
239 page_to_pfn(new));
240 pte = swp_entry_to_pte(entry);
241 if (pte_swp_soft_dirty(old_pte))
242 pte = pte_swp_mksoft_dirty(pte);
243 if (pte_swp_uffd_wp(old_pte))
244 pte = pte_swp_mkuffd_wp(pte);
245 }
246
247#ifdef CONFIG_HUGETLB_PAGE
248 if (folio_test_hugetlb(folio)) {
249 struct hstate *h = hstate_vma(vma);
250 unsigned int shift = huge_page_shift(h);
251 unsigned long psize = huge_page_size(h);
252
253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
254 if (folio_test_anon(folio))
255 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
256 rmap_flags);
257 else
258 hugetlb_add_file_rmap(folio);
259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
260 psize);
261 } else
262#endif
263 {
264 if (folio_test_anon(folio))
265 folio_add_anon_rmap_pte(folio, new, vma,
266 pvmw.address, rmap_flags);
267 else
268 folio_add_file_rmap_pte(folio, new, vma);
269 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
270 }
271 if (vma->vm_flags & VM_LOCKED)
272 mlock_drain_local();
273
274 trace_remove_migration_pte(pvmw.address, pte_val(pte),
275 compound_order(new));
276
277 /* No need to invalidate - it was non-present before */
278 update_mmu_cache(vma, pvmw.address, pvmw.pte);
279 }
280
281 return true;
282}
283
284/*
285 * Get rid of all migration entries and replace them by
286 * references to the indicated page.
287 */
288void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
289{
290 struct rmap_walk_control rwc = {
291 .rmap_one = remove_migration_pte,
292 .arg = src,
293 };
294
295 if (locked)
296 rmap_walk_locked(dst, &rwc);
297 else
298 rmap_walk(dst, &rwc);
299}
300
301/*
302 * Something used the pte of a page under migration. We need to
303 * get to the page and wait until migration is finished.
304 * When we return from this function the fault will be retried.
305 */
306void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
307 unsigned long address)
308{
309 spinlock_t *ptl;
310 pte_t *ptep;
311 pte_t pte;
312 swp_entry_t entry;
313
314 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
315 if (!ptep)
316 return;
317
318 pte = ptep_get(ptep);
319 pte_unmap(ptep);
320
321 if (!is_swap_pte(pte))
322 goto out;
323
324 entry = pte_to_swp_entry(pte);
325 if (!is_migration_entry(entry))
326 goto out;
327
328 migration_entry_wait_on_locked(entry, ptl);
329 return;
330out:
331 spin_unlock(ptl);
332}
333
334#ifdef CONFIG_HUGETLB_PAGE
335/*
336 * The vma read lock must be held upon entry. Holding that lock prevents either
337 * the pte or the ptl from being freed.
338 *
339 * This function will release the vma lock before returning.
340 */
341void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
342{
343 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
344 pte_t pte;
345
346 hugetlb_vma_assert_locked(vma);
347 spin_lock(ptl);
348 pte = huge_ptep_get(ptep);
349
350 if (unlikely(!is_hugetlb_entry_migration(pte))) {
351 spin_unlock(ptl);
352 hugetlb_vma_unlock_read(vma);
353 } else {
354 /*
355 * If migration entry existed, safe to release vma lock
356 * here because the pgtable page won't be freed without the
357 * pgtable lock released. See comment right above pgtable
358 * lock release in migration_entry_wait_on_locked().
359 */
360 hugetlb_vma_unlock_read(vma);
361 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
362 }
363}
364#endif
365
366#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
367void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
368{
369 spinlock_t *ptl;
370
371 ptl = pmd_lock(mm, pmd);
372 if (!is_pmd_migration_entry(*pmd))
373 goto unlock;
374 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
375 return;
376unlock:
377 spin_unlock(ptl);
378}
379#endif
380
381static int folio_expected_refs(struct address_space *mapping,
382 struct folio *folio)
383{
384 int refs = 1;
385 if (!mapping)
386 return refs;
387
388 refs += folio_nr_pages(folio);
389 if (folio_test_private(folio))
390 refs++;
391
392 return refs;
393}
394
395/*
396 * Replace the page in the mapping.
397 *
398 * The number of remaining references must be:
399 * 1 for anonymous pages without a mapping
400 * 2 for pages with a mapping
401 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
402 */
403int folio_migrate_mapping(struct address_space *mapping,
404 struct folio *newfolio, struct folio *folio, int extra_count)
405{
406 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
407 struct zone *oldzone, *newzone;
408 int dirty;
409 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
410 long nr = folio_nr_pages(folio);
411 long entries, i;
412
413 if (!mapping) {
414 /* Anonymous page without mapping */
415 if (folio_ref_count(folio) != expected_count)
416 return -EAGAIN;
417
418 /* No turning back from here */
419 newfolio->index = folio->index;
420 newfolio->mapping = folio->mapping;
421 if (folio_test_swapbacked(folio))
422 __folio_set_swapbacked(newfolio);
423
424 return MIGRATEPAGE_SUCCESS;
425 }
426
427 oldzone = folio_zone(folio);
428 newzone = folio_zone(newfolio);
429
430 xas_lock_irq(&xas);
431 if (!folio_ref_freeze(folio, expected_count)) {
432 xas_unlock_irq(&xas);
433 return -EAGAIN;
434 }
435
436 /*
437 * Now we know that no one else is looking at the folio:
438 * no turning back from here.
439 */
440 newfolio->index = folio->index;
441 newfolio->mapping = folio->mapping;
442 folio_ref_add(newfolio, nr); /* add cache reference */
443 if (folio_test_swapbacked(folio)) {
444 __folio_set_swapbacked(newfolio);
445 if (folio_test_swapcache(folio)) {
446 folio_set_swapcache(newfolio);
447 newfolio->private = folio_get_private(folio);
448 }
449 entries = nr;
450 } else {
451 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
452 entries = 1;
453 }
454
455 /* Move dirty while page refs frozen and newpage not yet exposed */
456 dirty = folio_test_dirty(folio);
457 if (dirty) {
458 folio_clear_dirty(folio);
459 folio_set_dirty(newfolio);
460 }
461
462 /* Swap cache still stores N entries instead of a high-order entry */
463 for (i = 0; i < entries; i++) {
464 xas_store(&xas, newfolio);
465 xas_next(&xas);
466 }
467
468 /*
469 * Drop cache reference from old page by unfreezing
470 * to one less reference.
471 * We know this isn't the last reference.
472 */
473 folio_ref_unfreeze(folio, expected_count - nr);
474
475 xas_unlock(&xas);
476 /* Leave irq disabled to prevent preemption while updating stats */
477
478 /*
479 * If moved to a different zone then also account
480 * the page for that zone. Other VM counters will be
481 * taken care of when we establish references to the
482 * new page and drop references to the old page.
483 *
484 * Note that anonymous pages are accounted for
485 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
486 * are mapped to swap space.
487 */
488 if (newzone != oldzone) {
489 struct lruvec *old_lruvec, *new_lruvec;
490 struct mem_cgroup *memcg;
491
492 memcg = folio_memcg(folio);
493 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
494 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
495
496 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
497 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
498 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
499 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
500 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
501
502 if (folio_test_pmd_mappable(folio)) {
503 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
504 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
505 }
506 }
507#ifdef CONFIG_SWAP
508 if (folio_test_swapcache(folio)) {
509 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
510 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
511 }
512#endif
513 if (dirty && mapping_can_writeback(mapping)) {
514 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
515 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
516 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
517 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
518 }
519 }
520 local_irq_enable();
521
522 return MIGRATEPAGE_SUCCESS;
523}
524EXPORT_SYMBOL(folio_migrate_mapping);
525
526/*
527 * The expected number of remaining references is the same as that
528 * of folio_migrate_mapping().
529 */
530int migrate_huge_page_move_mapping(struct address_space *mapping,
531 struct folio *dst, struct folio *src)
532{
533 XA_STATE(xas, &mapping->i_pages, folio_index(src));
534 int expected_count;
535
536 xas_lock_irq(&xas);
537 expected_count = folio_expected_refs(mapping, src);
538 if (!folio_ref_freeze(src, expected_count)) {
539 xas_unlock_irq(&xas);
540 return -EAGAIN;
541 }
542
543 dst->index = src->index;
544 dst->mapping = src->mapping;
545
546 folio_ref_add(dst, folio_nr_pages(dst));
547
548 xas_store(&xas, dst);
549
550 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
551
552 xas_unlock_irq(&xas);
553
554 return MIGRATEPAGE_SUCCESS;
555}
556
557/*
558 * Copy the flags and some other ancillary information
559 */
560void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
561{
562 int cpupid;
563
564 if (folio_test_error(folio))
565 folio_set_error(newfolio);
566 if (folio_test_referenced(folio))
567 folio_set_referenced(newfolio);
568 if (folio_test_uptodate(folio))
569 folio_mark_uptodate(newfolio);
570 if (folio_test_clear_active(folio)) {
571 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
572 folio_set_active(newfolio);
573 } else if (folio_test_clear_unevictable(folio))
574 folio_set_unevictable(newfolio);
575 if (folio_test_workingset(folio))
576 folio_set_workingset(newfolio);
577 if (folio_test_checked(folio))
578 folio_set_checked(newfolio);
579 /*
580 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
581 * migration entries. We can still have PG_anon_exclusive set on an
582 * effectively unmapped and unreferenced first sub-pages of an
583 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
584 */
585 if (folio_test_mappedtodisk(folio))
586 folio_set_mappedtodisk(newfolio);
587
588 /* Move dirty on pages not done by folio_migrate_mapping() */
589 if (folio_test_dirty(folio))
590 folio_set_dirty(newfolio);
591
592 if (folio_test_young(folio))
593 folio_set_young(newfolio);
594 if (folio_test_idle(folio))
595 folio_set_idle(newfolio);
596
597 /*
598 * Copy NUMA information to the new page, to prevent over-eager
599 * future migrations of this same page.
600 */
601 cpupid = folio_xchg_last_cpupid(folio, -1);
602 /*
603 * For memory tiering mode, when migrate between slow and fast
604 * memory node, reset cpupid, because that is used to record
605 * page access time in slow memory node.
606 */
607 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
608 bool f_toptier = node_is_toptier(folio_nid(folio));
609 bool t_toptier = node_is_toptier(folio_nid(newfolio));
610
611 if (f_toptier != t_toptier)
612 cpupid = -1;
613 }
614 folio_xchg_last_cpupid(newfolio, cpupid);
615
616 folio_migrate_ksm(newfolio, folio);
617 /*
618 * Please do not reorder this without considering how mm/ksm.c's
619 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
620 */
621 if (folio_test_swapcache(folio))
622 folio_clear_swapcache(folio);
623 folio_clear_private(folio);
624
625 /* page->private contains hugetlb specific flags */
626 if (!folio_test_hugetlb(folio))
627 folio->private = NULL;
628
629 /*
630 * If any waiters have accumulated on the new page then
631 * wake them up.
632 */
633 if (folio_test_writeback(newfolio))
634 folio_end_writeback(newfolio);
635
636 /*
637 * PG_readahead shares the same bit with PG_reclaim. The above
638 * end_page_writeback() may clear PG_readahead mistakenly, so set the
639 * bit after that.
640 */
641 if (folio_test_readahead(folio))
642 folio_set_readahead(newfolio);
643
644 folio_copy_owner(newfolio, folio);
645
646 mem_cgroup_migrate(folio, newfolio);
647}
648EXPORT_SYMBOL(folio_migrate_flags);
649
650void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
651{
652 folio_copy(newfolio, folio);
653 folio_migrate_flags(newfolio, folio);
654}
655EXPORT_SYMBOL(folio_migrate_copy);
656
657/************************************************************
658 * Migration functions
659 ***********************************************************/
660
661int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
662 struct folio *src, enum migrate_mode mode, int extra_count)
663{
664 int rc;
665
666 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
667
668 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
669
670 if (rc != MIGRATEPAGE_SUCCESS)
671 return rc;
672
673 if (mode != MIGRATE_SYNC_NO_COPY)
674 folio_migrate_copy(dst, src);
675 else
676 folio_migrate_flags(dst, src);
677 return MIGRATEPAGE_SUCCESS;
678}
679
680/**
681 * migrate_folio() - Simple folio migration.
682 * @mapping: The address_space containing the folio.
683 * @dst: The folio to migrate the data to.
684 * @src: The folio containing the current data.
685 * @mode: How to migrate the page.
686 *
687 * Common logic to directly migrate a single LRU folio suitable for
688 * folios that do not use PagePrivate/PagePrivate2.
689 *
690 * Folios are locked upon entry and exit.
691 */
692int migrate_folio(struct address_space *mapping, struct folio *dst,
693 struct folio *src, enum migrate_mode mode)
694{
695 return migrate_folio_extra(mapping, dst, src, mode, 0);
696}
697EXPORT_SYMBOL(migrate_folio);
698
699#ifdef CONFIG_BUFFER_HEAD
700/* Returns true if all buffers are successfully locked */
701static bool buffer_migrate_lock_buffers(struct buffer_head *head,
702 enum migrate_mode mode)
703{
704 struct buffer_head *bh = head;
705 struct buffer_head *failed_bh;
706
707 do {
708 if (!trylock_buffer(bh)) {
709 if (mode == MIGRATE_ASYNC)
710 goto unlock;
711 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
712 goto unlock;
713 lock_buffer(bh);
714 }
715
716 bh = bh->b_this_page;
717 } while (bh != head);
718
719 return true;
720
721unlock:
722 /* We failed to lock the buffer and cannot stall. */
723 failed_bh = bh;
724 bh = head;
725 while (bh != failed_bh) {
726 unlock_buffer(bh);
727 bh = bh->b_this_page;
728 }
729
730 return false;
731}
732
733static int __buffer_migrate_folio(struct address_space *mapping,
734 struct folio *dst, struct folio *src, enum migrate_mode mode,
735 bool check_refs)
736{
737 struct buffer_head *bh, *head;
738 int rc;
739 int expected_count;
740
741 head = folio_buffers(src);
742 if (!head)
743 return migrate_folio(mapping, dst, src, mode);
744
745 /* Check whether page does not have extra refs before we do more work */
746 expected_count = folio_expected_refs(mapping, src);
747 if (folio_ref_count(src) != expected_count)
748 return -EAGAIN;
749
750 if (!buffer_migrate_lock_buffers(head, mode))
751 return -EAGAIN;
752
753 if (check_refs) {
754 bool busy;
755 bool invalidated = false;
756
757recheck_buffers:
758 busy = false;
759 spin_lock(&mapping->i_private_lock);
760 bh = head;
761 do {
762 if (atomic_read(&bh->b_count)) {
763 busy = true;
764 break;
765 }
766 bh = bh->b_this_page;
767 } while (bh != head);
768 if (busy) {
769 if (invalidated) {
770 rc = -EAGAIN;
771 goto unlock_buffers;
772 }
773 spin_unlock(&mapping->i_private_lock);
774 invalidate_bh_lrus();
775 invalidated = true;
776 goto recheck_buffers;
777 }
778 }
779
780 rc = folio_migrate_mapping(mapping, dst, src, 0);
781 if (rc != MIGRATEPAGE_SUCCESS)
782 goto unlock_buffers;
783
784 folio_attach_private(dst, folio_detach_private(src));
785
786 bh = head;
787 do {
788 folio_set_bh(bh, dst, bh_offset(bh));
789 bh = bh->b_this_page;
790 } while (bh != head);
791
792 if (mode != MIGRATE_SYNC_NO_COPY)
793 folio_migrate_copy(dst, src);
794 else
795 folio_migrate_flags(dst, src);
796
797 rc = MIGRATEPAGE_SUCCESS;
798unlock_buffers:
799 if (check_refs)
800 spin_unlock(&mapping->i_private_lock);
801 bh = head;
802 do {
803 unlock_buffer(bh);
804 bh = bh->b_this_page;
805 } while (bh != head);
806
807 return rc;
808}
809
810/**
811 * buffer_migrate_folio() - Migration function for folios with buffers.
812 * @mapping: The address space containing @src.
813 * @dst: The folio to migrate to.
814 * @src: The folio to migrate from.
815 * @mode: How to migrate the folio.
816 *
817 * This function can only be used if the underlying filesystem guarantees
818 * that no other references to @src exist. For example attached buffer
819 * heads are accessed only under the folio lock. If your filesystem cannot
820 * provide this guarantee, buffer_migrate_folio_norefs() may be more
821 * appropriate.
822 *
823 * Return: 0 on success or a negative errno on failure.
824 */
825int buffer_migrate_folio(struct address_space *mapping,
826 struct folio *dst, struct folio *src, enum migrate_mode mode)
827{
828 return __buffer_migrate_folio(mapping, dst, src, mode, false);
829}
830EXPORT_SYMBOL(buffer_migrate_folio);
831
832/**
833 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
834 * @mapping: The address space containing @src.
835 * @dst: The folio to migrate to.
836 * @src: The folio to migrate from.
837 * @mode: How to migrate the folio.
838 *
839 * Like buffer_migrate_folio() except that this variant is more careful
840 * and checks that there are also no buffer head references. This function
841 * is the right one for mappings where buffer heads are directly looked
842 * up and referenced (such as block device mappings).
843 *
844 * Return: 0 on success or a negative errno on failure.
845 */
846int buffer_migrate_folio_norefs(struct address_space *mapping,
847 struct folio *dst, struct folio *src, enum migrate_mode mode)
848{
849 return __buffer_migrate_folio(mapping, dst, src, mode, true);
850}
851EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
852#endif /* CONFIG_BUFFER_HEAD */
853
854int filemap_migrate_folio(struct address_space *mapping,
855 struct folio *dst, struct folio *src, enum migrate_mode mode)
856{
857 int ret;
858
859 ret = folio_migrate_mapping(mapping, dst, src, 0);
860 if (ret != MIGRATEPAGE_SUCCESS)
861 return ret;
862
863 if (folio_get_private(src))
864 folio_attach_private(dst, folio_detach_private(src));
865
866 if (mode != MIGRATE_SYNC_NO_COPY)
867 folio_migrate_copy(dst, src);
868 else
869 folio_migrate_flags(dst, src);
870 return MIGRATEPAGE_SUCCESS;
871}
872EXPORT_SYMBOL_GPL(filemap_migrate_folio);
873
874/*
875 * Writeback a folio to clean the dirty state
876 */
877static int writeout(struct address_space *mapping, struct folio *folio)
878{
879 struct writeback_control wbc = {
880 .sync_mode = WB_SYNC_NONE,
881 .nr_to_write = 1,
882 .range_start = 0,
883 .range_end = LLONG_MAX,
884 .for_reclaim = 1
885 };
886 int rc;
887
888 if (!mapping->a_ops->writepage)
889 /* No write method for the address space */
890 return -EINVAL;
891
892 if (!folio_clear_dirty_for_io(folio))
893 /* Someone else already triggered a write */
894 return -EAGAIN;
895
896 /*
897 * A dirty folio may imply that the underlying filesystem has
898 * the folio on some queue. So the folio must be clean for
899 * migration. Writeout may mean we lose the lock and the
900 * folio state is no longer what we checked for earlier.
901 * At this point we know that the migration attempt cannot
902 * be successful.
903 */
904 remove_migration_ptes(folio, folio, false);
905
906 rc = mapping->a_ops->writepage(&folio->page, &wbc);
907
908 if (rc != AOP_WRITEPAGE_ACTIVATE)
909 /* unlocked. Relock */
910 folio_lock(folio);
911
912 return (rc < 0) ? -EIO : -EAGAIN;
913}
914
915/*
916 * Default handling if a filesystem does not provide a migration function.
917 */
918static int fallback_migrate_folio(struct address_space *mapping,
919 struct folio *dst, struct folio *src, enum migrate_mode mode)
920{
921 if (folio_test_dirty(src)) {
922 /* Only writeback folios in full synchronous migration */
923 switch (mode) {
924 case MIGRATE_SYNC:
925 case MIGRATE_SYNC_NO_COPY:
926 break;
927 default:
928 return -EBUSY;
929 }
930 return writeout(mapping, src);
931 }
932
933 /*
934 * Buffers may be managed in a filesystem specific way.
935 * We must have no buffers or drop them.
936 */
937 if (!filemap_release_folio(src, GFP_KERNEL))
938 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
939
940 return migrate_folio(mapping, dst, src, mode);
941}
942
943/*
944 * Move a page to a newly allocated page
945 * The page is locked and all ptes have been successfully removed.
946 *
947 * The new page will have replaced the old page if this function
948 * is successful.
949 *
950 * Return value:
951 * < 0 - error code
952 * MIGRATEPAGE_SUCCESS - success
953 */
954static int move_to_new_folio(struct folio *dst, struct folio *src,
955 enum migrate_mode mode)
956{
957 int rc = -EAGAIN;
958 bool is_lru = !__folio_test_movable(src);
959
960 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
961 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
962
963 if (likely(is_lru)) {
964 struct address_space *mapping = folio_mapping(src);
965
966 if (!mapping)
967 rc = migrate_folio(mapping, dst, src, mode);
968 else if (mapping_unmovable(mapping))
969 rc = -EOPNOTSUPP;
970 else if (mapping->a_ops->migrate_folio)
971 /*
972 * Most folios have a mapping and most filesystems
973 * provide a migrate_folio callback. Anonymous folios
974 * are part of swap space which also has its own
975 * migrate_folio callback. This is the most common path
976 * for page migration.
977 */
978 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
979 mode);
980 else
981 rc = fallback_migrate_folio(mapping, dst, src, mode);
982 } else {
983 const struct movable_operations *mops;
984
985 /*
986 * In case of non-lru page, it could be released after
987 * isolation step. In that case, we shouldn't try migration.
988 */
989 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
990 if (!folio_test_movable(src)) {
991 rc = MIGRATEPAGE_SUCCESS;
992 folio_clear_isolated(src);
993 goto out;
994 }
995
996 mops = folio_movable_ops(src);
997 rc = mops->migrate_page(&dst->page, &src->page, mode);
998 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
999 !folio_test_isolated(src));
1000 }
1001
1002 /*
1003 * When successful, old pagecache src->mapping must be cleared before
1004 * src is freed; but stats require that PageAnon be left as PageAnon.
1005 */
1006 if (rc == MIGRATEPAGE_SUCCESS) {
1007 if (__folio_test_movable(src)) {
1008 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1009
1010 /*
1011 * We clear PG_movable under page_lock so any compactor
1012 * cannot try to migrate this page.
1013 */
1014 folio_clear_isolated(src);
1015 }
1016
1017 /*
1018 * Anonymous and movable src->mapping will be cleared by
1019 * free_pages_prepare so don't reset it here for keeping
1020 * the type to work PageAnon, for example.
1021 */
1022 if (!folio_mapping_flags(src))
1023 src->mapping = NULL;
1024
1025 if (likely(!folio_is_zone_device(dst)))
1026 flush_dcache_folio(dst);
1027 }
1028out:
1029 return rc;
1030}
1031
1032/*
1033 * To record some information during migration, we use unused private
1034 * field of struct folio of the newly allocated destination folio.
1035 * This is safe because nobody is using it except us.
1036 */
1037enum {
1038 PAGE_WAS_MAPPED = BIT(0),
1039 PAGE_WAS_MLOCKED = BIT(1),
1040 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1041};
1042
1043static void __migrate_folio_record(struct folio *dst,
1044 int old_page_state,
1045 struct anon_vma *anon_vma)
1046{
1047 dst->private = (void *)anon_vma + old_page_state;
1048}
1049
1050static void __migrate_folio_extract(struct folio *dst,
1051 int *old_page_state,
1052 struct anon_vma **anon_vmap)
1053{
1054 unsigned long private = (unsigned long)dst->private;
1055
1056 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1057 *old_page_state = private & PAGE_OLD_STATES;
1058 dst->private = NULL;
1059}
1060
1061/* Restore the source folio to the original state upon failure */
1062static void migrate_folio_undo_src(struct folio *src,
1063 int page_was_mapped,
1064 struct anon_vma *anon_vma,
1065 bool locked,
1066 struct list_head *ret)
1067{
1068 if (page_was_mapped)
1069 remove_migration_ptes(src, src, false);
1070 /* Drop an anon_vma reference if we took one */
1071 if (anon_vma)
1072 put_anon_vma(anon_vma);
1073 if (locked)
1074 folio_unlock(src);
1075 if (ret)
1076 list_move_tail(&src->lru, ret);
1077}
1078
1079/* Restore the destination folio to the original state upon failure */
1080static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1081 free_folio_t put_new_folio, unsigned long private)
1082{
1083 if (locked)
1084 folio_unlock(dst);
1085 if (put_new_folio)
1086 put_new_folio(dst, private);
1087 else
1088 folio_put(dst);
1089}
1090
1091/* Cleanup src folio upon migration success */
1092static void migrate_folio_done(struct folio *src,
1093 enum migrate_reason reason)
1094{
1095 /*
1096 * Compaction can migrate also non-LRU pages which are
1097 * not accounted to NR_ISOLATED_*. They can be recognized
1098 * as __folio_test_movable
1099 */
1100 if (likely(!__folio_test_movable(src)))
1101 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1102 folio_is_file_lru(src), -folio_nr_pages(src));
1103
1104 if (reason != MR_MEMORY_FAILURE)
1105 /* We release the page in page_handle_poison. */
1106 folio_put(src);
1107}
1108
1109/* Obtain the lock on page, remove all ptes. */
1110static int migrate_folio_unmap(new_folio_t get_new_folio,
1111 free_folio_t put_new_folio, unsigned long private,
1112 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1113 enum migrate_reason reason, struct list_head *ret)
1114{
1115 struct folio *dst;
1116 int rc = -EAGAIN;
1117 int old_page_state = 0;
1118 struct anon_vma *anon_vma = NULL;
1119 bool is_lru = !__folio_test_movable(src);
1120 bool locked = false;
1121 bool dst_locked = false;
1122
1123 if (folio_ref_count(src) == 1) {
1124 /* Folio was freed from under us. So we are done. */
1125 folio_clear_active(src);
1126 folio_clear_unevictable(src);
1127 /* free_pages_prepare() will clear PG_isolated. */
1128 list_del(&src->lru);
1129 migrate_folio_done(src, reason);
1130 return MIGRATEPAGE_SUCCESS;
1131 }
1132
1133 dst = get_new_folio(src, private);
1134 if (!dst)
1135 return -ENOMEM;
1136 *dstp = dst;
1137
1138 dst->private = NULL;
1139
1140 if (!folio_trylock(src)) {
1141 if (mode == MIGRATE_ASYNC)
1142 goto out;
1143
1144 /*
1145 * It's not safe for direct compaction to call lock_page.
1146 * For example, during page readahead pages are added locked
1147 * to the LRU. Later, when the IO completes the pages are
1148 * marked uptodate and unlocked. However, the queueing
1149 * could be merging multiple pages for one bio (e.g.
1150 * mpage_readahead). If an allocation happens for the
1151 * second or third page, the process can end up locking
1152 * the same page twice and deadlocking. Rather than
1153 * trying to be clever about what pages can be locked,
1154 * avoid the use of lock_page for direct compaction
1155 * altogether.
1156 */
1157 if (current->flags & PF_MEMALLOC)
1158 goto out;
1159
1160 /*
1161 * In "light" mode, we can wait for transient locks (eg
1162 * inserting a page into the page table), but it's not
1163 * worth waiting for I/O.
1164 */
1165 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1166 goto out;
1167
1168 folio_lock(src);
1169 }
1170 locked = true;
1171 if (folio_test_mlocked(src))
1172 old_page_state |= PAGE_WAS_MLOCKED;
1173
1174 if (folio_test_writeback(src)) {
1175 /*
1176 * Only in the case of a full synchronous migration is it
1177 * necessary to wait for PageWriteback. In the async case,
1178 * the retry loop is too short and in the sync-light case,
1179 * the overhead of stalling is too much
1180 */
1181 switch (mode) {
1182 case MIGRATE_SYNC:
1183 case MIGRATE_SYNC_NO_COPY:
1184 break;
1185 default:
1186 rc = -EBUSY;
1187 goto out;
1188 }
1189 folio_wait_writeback(src);
1190 }
1191
1192 /*
1193 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1194 * we cannot notice that anon_vma is freed while we migrate a page.
1195 * This get_anon_vma() delays freeing anon_vma pointer until the end
1196 * of migration. File cache pages are no problem because of page_lock()
1197 * File Caches may use write_page() or lock_page() in migration, then,
1198 * just care Anon page here.
1199 *
1200 * Only folio_get_anon_vma() understands the subtleties of
1201 * getting a hold on an anon_vma from outside one of its mms.
1202 * But if we cannot get anon_vma, then we won't need it anyway,
1203 * because that implies that the anon page is no longer mapped
1204 * (and cannot be remapped so long as we hold the page lock).
1205 */
1206 if (folio_test_anon(src) && !folio_test_ksm(src))
1207 anon_vma = folio_get_anon_vma(src);
1208
1209 /*
1210 * Block others from accessing the new page when we get around to
1211 * establishing additional references. We are usually the only one
1212 * holding a reference to dst at this point. We used to have a BUG
1213 * here if folio_trylock(dst) fails, but would like to allow for
1214 * cases where there might be a race with the previous use of dst.
1215 * This is much like races on refcount of oldpage: just don't BUG().
1216 */
1217 if (unlikely(!folio_trylock(dst)))
1218 goto out;
1219 dst_locked = true;
1220
1221 if (unlikely(!is_lru)) {
1222 __migrate_folio_record(dst, old_page_state, anon_vma);
1223 return MIGRATEPAGE_UNMAP;
1224 }
1225
1226 /*
1227 * Corner case handling:
1228 * 1. When a new swap-cache page is read into, it is added to the LRU
1229 * and treated as swapcache but it has no rmap yet.
1230 * Calling try_to_unmap() against a src->mapping==NULL page will
1231 * trigger a BUG. So handle it here.
1232 * 2. An orphaned page (see truncate_cleanup_page) might have
1233 * fs-private metadata. The page can be picked up due to memory
1234 * offlining. Everywhere else except page reclaim, the page is
1235 * invisible to the vm, so the page can not be migrated. So try to
1236 * free the metadata, so the page can be freed.
1237 */
1238 if (!src->mapping) {
1239 if (folio_test_private(src)) {
1240 try_to_free_buffers(src);
1241 goto out;
1242 }
1243 } else if (folio_mapped(src)) {
1244 /* Establish migration ptes */
1245 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1246 !folio_test_ksm(src) && !anon_vma, src);
1247 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1248 old_page_state |= PAGE_WAS_MAPPED;
1249 }
1250
1251 if (!folio_mapped(src)) {
1252 __migrate_folio_record(dst, old_page_state, anon_vma);
1253 return MIGRATEPAGE_UNMAP;
1254 }
1255
1256out:
1257 /*
1258 * A folio that has not been unmapped will be restored to
1259 * right list unless we want to retry.
1260 */
1261 if (rc == -EAGAIN)
1262 ret = NULL;
1263
1264 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1265 anon_vma, locked, ret);
1266 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1267
1268 return rc;
1269}
1270
1271/* Migrate the folio to the newly allocated folio in dst. */
1272static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1273 struct folio *src, struct folio *dst,
1274 enum migrate_mode mode, enum migrate_reason reason,
1275 struct list_head *ret)
1276{
1277 int rc;
1278 int old_page_state = 0;
1279 struct anon_vma *anon_vma = NULL;
1280 bool is_lru = !__folio_test_movable(src);
1281 struct list_head *prev;
1282
1283 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1284 prev = dst->lru.prev;
1285 list_del(&dst->lru);
1286
1287 rc = move_to_new_folio(dst, src, mode);
1288 if (rc)
1289 goto out;
1290
1291 if (unlikely(!is_lru))
1292 goto out_unlock_both;
1293
1294 /*
1295 * When successful, push dst to LRU immediately: so that if it
1296 * turns out to be an mlocked page, remove_migration_ptes() will
1297 * automatically build up the correct dst->mlock_count for it.
1298 *
1299 * We would like to do something similar for the old page, when
1300 * unsuccessful, and other cases when a page has been temporarily
1301 * isolated from the unevictable LRU: but this case is the easiest.
1302 */
1303 folio_add_lru(dst);
1304 if (old_page_state & PAGE_WAS_MLOCKED)
1305 lru_add_drain();
1306
1307 if (old_page_state & PAGE_WAS_MAPPED)
1308 remove_migration_ptes(src, dst, false);
1309
1310out_unlock_both:
1311 folio_unlock(dst);
1312 set_page_owner_migrate_reason(&dst->page, reason);
1313 /*
1314 * If migration is successful, decrease refcount of dst,
1315 * which will not free the page because new page owner increased
1316 * refcounter.
1317 */
1318 folio_put(dst);
1319
1320 /*
1321 * A folio that has been migrated has all references removed
1322 * and will be freed.
1323 */
1324 list_del(&src->lru);
1325 /* Drop an anon_vma reference if we took one */
1326 if (anon_vma)
1327 put_anon_vma(anon_vma);
1328 folio_unlock(src);
1329 migrate_folio_done(src, reason);
1330
1331 return rc;
1332out:
1333 /*
1334 * A folio that has not been migrated will be restored to
1335 * right list unless we want to retry.
1336 */
1337 if (rc == -EAGAIN) {
1338 list_add(&dst->lru, prev);
1339 __migrate_folio_record(dst, old_page_state, anon_vma);
1340 return rc;
1341 }
1342
1343 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1344 anon_vma, true, ret);
1345 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1346
1347 return rc;
1348}
1349
1350/*
1351 * Counterpart of unmap_and_move_page() for hugepage migration.
1352 *
1353 * This function doesn't wait the completion of hugepage I/O
1354 * because there is no race between I/O and migration for hugepage.
1355 * Note that currently hugepage I/O occurs only in direct I/O
1356 * where no lock is held and PG_writeback is irrelevant,
1357 * and writeback status of all subpages are counted in the reference
1358 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1359 * under direct I/O, the reference of the head page is 512 and a bit more.)
1360 * This means that when we try to migrate hugepage whose subpages are
1361 * doing direct I/O, some references remain after try_to_unmap() and
1362 * hugepage migration fails without data corruption.
1363 *
1364 * There is also no race when direct I/O is issued on the page under migration,
1365 * because then pte is replaced with migration swap entry and direct I/O code
1366 * will wait in the page fault for migration to complete.
1367 */
1368static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1369 free_folio_t put_new_folio, unsigned long private,
1370 struct folio *src, int force, enum migrate_mode mode,
1371 int reason, struct list_head *ret)
1372{
1373 struct folio *dst;
1374 int rc = -EAGAIN;
1375 int page_was_mapped = 0;
1376 struct anon_vma *anon_vma = NULL;
1377 struct address_space *mapping = NULL;
1378
1379 if (folio_ref_count(src) == 1) {
1380 /* page was freed from under us. So we are done. */
1381 folio_putback_active_hugetlb(src);
1382 return MIGRATEPAGE_SUCCESS;
1383 }
1384
1385 dst = get_new_folio(src, private);
1386 if (!dst)
1387 return -ENOMEM;
1388
1389 if (!folio_trylock(src)) {
1390 if (!force)
1391 goto out;
1392 switch (mode) {
1393 case MIGRATE_SYNC:
1394 case MIGRATE_SYNC_NO_COPY:
1395 break;
1396 default:
1397 goto out;
1398 }
1399 folio_lock(src);
1400 }
1401
1402 /*
1403 * Check for pages which are in the process of being freed. Without
1404 * folio_mapping() set, hugetlbfs specific move page routine will not
1405 * be called and we could leak usage counts for subpools.
1406 */
1407 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1408 rc = -EBUSY;
1409 goto out_unlock;
1410 }
1411
1412 if (folio_test_anon(src))
1413 anon_vma = folio_get_anon_vma(src);
1414
1415 if (unlikely(!folio_trylock(dst)))
1416 goto put_anon;
1417
1418 if (folio_mapped(src)) {
1419 enum ttu_flags ttu = 0;
1420
1421 if (!folio_test_anon(src)) {
1422 /*
1423 * In shared mappings, try_to_unmap could potentially
1424 * call huge_pmd_unshare. Because of this, take
1425 * semaphore in write mode here and set TTU_RMAP_LOCKED
1426 * to let lower levels know we have taken the lock.
1427 */
1428 mapping = hugetlb_page_mapping_lock_write(&src->page);
1429 if (unlikely(!mapping))
1430 goto unlock_put_anon;
1431
1432 ttu = TTU_RMAP_LOCKED;
1433 }
1434
1435 try_to_migrate(src, ttu);
1436 page_was_mapped = 1;
1437
1438 if (ttu & TTU_RMAP_LOCKED)
1439 i_mmap_unlock_write(mapping);
1440 }
1441
1442 if (!folio_mapped(src))
1443 rc = move_to_new_folio(dst, src, mode);
1444
1445 if (page_was_mapped)
1446 remove_migration_ptes(src,
1447 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1448
1449unlock_put_anon:
1450 folio_unlock(dst);
1451
1452put_anon:
1453 if (anon_vma)
1454 put_anon_vma(anon_vma);
1455
1456 if (rc == MIGRATEPAGE_SUCCESS) {
1457 move_hugetlb_state(src, dst, reason);
1458 put_new_folio = NULL;
1459 }
1460
1461out_unlock:
1462 folio_unlock(src);
1463out:
1464 if (rc == MIGRATEPAGE_SUCCESS)
1465 folio_putback_active_hugetlb(src);
1466 else if (rc != -EAGAIN)
1467 list_move_tail(&src->lru, ret);
1468
1469 /*
1470 * If migration was not successful and there's a freeing callback, use
1471 * it. Otherwise, put_page() will drop the reference grabbed during
1472 * isolation.
1473 */
1474 if (put_new_folio)
1475 put_new_folio(dst, private);
1476 else
1477 folio_putback_active_hugetlb(dst);
1478
1479 return rc;
1480}
1481
1482static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1483{
1484 int rc;
1485
1486 folio_lock(folio);
1487 rc = split_folio_to_list(folio, split_folios);
1488 folio_unlock(folio);
1489 if (!rc)
1490 list_move_tail(&folio->lru, split_folios);
1491
1492 return rc;
1493}
1494
1495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1496#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1497#else
1498#define NR_MAX_BATCHED_MIGRATION 512
1499#endif
1500#define NR_MAX_MIGRATE_PAGES_RETRY 10
1501#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1502#define NR_MAX_MIGRATE_SYNC_RETRY \
1503 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1504
1505struct migrate_pages_stats {
1506 int nr_succeeded; /* Normal and large folios migrated successfully, in
1507 units of base pages */
1508 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1509 units of base pages. Untried folios aren't counted */
1510 int nr_thp_succeeded; /* THP migrated successfully */
1511 int nr_thp_failed; /* THP failed to be migrated */
1512 int nr_thp_split; /* THP split before migrating */
1513 int nr_split; /* Large folio (include THP) split before migrating */
1514};
1515
1516/*
1517 * Returns the number of hugetlb folios that were not migrated, or an error code
1518 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1519 * any more because the list has become empty or no retryable hugetlb folios
1520 * exist any more. It is caller's responsibility to call putback_movable_pages()
1521 * only if ret != 0.
1522 */
1523static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1524 free_folio_t put_new_folio, unsigned long private,
1525 enum migrate_mode mode, int reason,
1526 struct migrate_pages_stats *stats,
1527 struct list_head *ret_folios)
1528{
1529 int retry = 1;
1530 int nr_failed = 0;
1531 int nr_retry_pages = 0;
1532 int pass = 0;
1533 struct folio *folio, *folio2;
1534 int rc, nr_pages;
1535
1536 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1537 retry = 0;
1538 nr_retry_pages = 0;
1539
1540 list_for_each_entry_safe(folio, folio2, from, lru) {
1541 if (!folio_test_hugetlb(folio))
1542 continue;
1543
1544 nr_pages = folio_nr_pages(folio);
1545
1546 cond_resched();
1547
1548 /*
1549 * Migratability of hugepages depends on architectures and
1550 * their size. This check is necessary because some callers
1551 * of hugepage migration like soft offline and memory
1552 * hotremove don't walk through page tables or check whether
1553 * the hugepage is pmd-based or not before kicking migration.
1554 */
1555 if (!hugepage_migration_supported(folio_hstate(folio))) {
1556 nr_failed++;
1557 stats->nr_failed_pages += nr_pages;
1558 list_move_tail(&folio->lru, ret_folios);
1559 continue;
1560 }
1561
1562 rc = unmap_and_move_huge_page(get_new_folio,
1563 put_new_folio, private,
1564 folio, pass > 2, mode,
1565 reason, ret_folios);
1566 /*
1567 * The rules are:
1568 * Success: hugetlb folio will be put back
1569 * -EAGAIN: stay on the from list
1570 * -ENOMEM: stay on the from list
1571 * Other errno: put on ret_folios list
1572 */
1573 switch(rc) {
1574 case -ENOMEM:
1575 /*
1576 * When memory is low, don't bother to try to migrate
1577 * other folios, just exit.
1578 */
1579 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1580 return -ENOMEM;
1581 case -EAGAIN:
1582 retry++;
1583 nr_retry_pages += nr_pages;
1584 break;
1585 case MIGRATEPAGE_SUCCESS:
1586 stats->nr_succeeded += nr_pages;
1587 break;
1588 default:
1589 /*
1590 * Permanent failure (-EBUSY, etc.):
1591 * unlike -EAGAIN case, the failed folio is
1592 * removed from migration folio list and not
1593 * retried in the next outer loop.
1594 */
1595 nr_failed++;
1596 stats->nr_failed_pages += nr_pages;
1597 break;
1598 }
1599 }
1600 }
1601 /*
1602 * nr_failed is number of hugetlb folios failed to be migrated. After
1603 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1604 * folios as failed.
1605 */
1606 nr_failed += retry;
1607 stats->nr_failed_pages += nr_retry_pages;
1608
1609 return nr_failed;
1610}
1611
1612/*
1613 * migrate_pages_batch() first unmaps folios in the from list as many as
1614 * possible, then move the unmapped folios.
1615 *
1616 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1617 * lock or bit when we have locked more than one folio. Which may cause
1618 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1619 * length of the from list must be <= 1.
1620 */
1621static int migrate_pages_batch(struct list_head *from,
1622 new_folio_t get_new_folio, free_folio_t put_new_folio,
1623 unsigned long private, enum migrate_mode mode, int reason,
1624 struct list_head *ret_folios, struct list_head *split_folios,
1625 struct migrate_pages_stats *stats, int nr_pass)
1626{
1627 int retry = 1;
1628 int thp_retry = 1;
1629 int nr_failed = 0;
1630 int nr_retry_pages = 0;
1631 int pass = 0;
1632 bool is_thp = false;
1633 bool is_large = false;
1634 struct folio *folio, *folio2, *dst = NULL, *dst2;
1635 int rc, rc_saved = 0, nr_pages;
1636 LIST_HEAD(unmap_folios);
1637 LIST_HEAD(dst_folios);
1638 bool nosplit = (reason == MR_NUMA_MISPLACED);
1639
1640 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1641 !list_empty(from) && !list_is_singular(from));
1642
1643 for (pass = 0; pass < nr_pass && retry; pass++) {
1644 retry = 0;
1645 thp_retry = 0;
1646 nr_retry_pages = 0;
1647
1648 list_for_each_entry_safe(folio, folio2, from, lru) {
1649 is_large = folio_test_large(folio);
1650 is_thp = is_large && folio_test_pmd_mappable(folio);
1651 nr_pages = folio_nr_pages(folio);
1652
1653 cond_resched();
1654
1655 /*
1656 * Large folio migration might be unsupported or
1657 * the allocation might be failed so we should retry
1658 * on the same folio with the large folio split
1659 * to normal folios.
1660 *
1661 * Split folios are put in split_folios, and
1662 * we will migrate them after the rest of the
1663 * list is processed.
1664 */
1665 if (!thp_migration_supported() && is_thp) {
1666 nr_failed++;
1667 stats->nr_thp_failed++;
1668 if (!try_split_folio(folio, split_folios)) {
1669 stats->nr_thp_split++;
1670 stats->nr_split++;
1671 continue;
1672 }
1673 stats->nr_failed_pages += nr_pages;
1674 list_move_tail(&folio->lru, ret_folios);
1675 continue;
1676 }
1677
1678 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1679 private, folio, &dst, mode, reason,
1680 ret_folios);
1681 /*
1682 * The rules are:
1683 * Success: folio will be freed
1684 * Unmap: folio will be put on unmap_folios list,
1685 * dst folio put on dst_folios list
1686 * -EAGAIN: stay on the from list
1687 * -ENOMEM: stay on the from list
1688 * Other errno: put on ret_folios list
1689 */
1690 switch(rc) {
1691 case -ENOMEM:
1692 /*
1693 * When memory is low, don't bother to try to migrate
1694 * other folios, move unmapped folios, then exit.
1695 */
1696 nr_failed++;
1697 stats->nr_thp_failed += is_thp;
1698 /* Large folio NUMA faulting doesn't split to retry. */
1699 if (is_large && !nosplit) {
1700 int ret = try_split_folio(folio, split_folios);
1701
1702 if (!ret) {
1703 stats->nr_thp_split += is_thp;
1704 stats->nr_split++;
1705 break;
1706 } else if (reason == MR_LONGTERM_PIN &&
1707 ret == -EAGAIN) {
1708 /*
1709 * Try again to split large folio to
1710 * mitigate the failure of longterm pinning.
1711 */
1712 retry++;
1713 thp_retry += is_thp;
1714 nr_retry_pages += nr_pages;
1715 /* Undo duplicated failure counting. */
1716 nr_failed--;
1717 stats->nr_thp_failed -= is_thp;
1718 break;
1719 }
1720 }
1721
1722 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1723 /* nr_failed isn't updated for not used */
1724 stats->nr_thp_failed += thp_retry;
1725 rc_saved = rc;
1726 if (list_empty(&unmap_folios))
1727 goto out;
1728 else
1729 goto move;
1730 case -EAGAIN:
1731 retry++;
1732 thp_retry += is_thp;
1733 nr_retry_pages += nr_pages;
1734 break;
1735 case MIGRATEPAGE_SUCCESS:
1736 stats->nr_succeeded += nr_pages;
1737 stats->nr_thp_succeeded += is_thp;
1738 break;
1739 case MIGRATEPAGE_UNMAP:
1740 list_move_tail(&folio->lru, &unmap_folios);
1741 list_add_tail(&dst->lru, &dst_folios);
1742 break;
1743 default:
1744 /*
1745 * Permanent failure (-EBUSY, etc.):
1746 * unlike -EAGAIN case, the failed folio is
1747 * removed from migration folio list and not
1748 * retried in the next outer loop.
1749 */
1750 nr_failed++;
1751 stats->nr_thp_failed += is_thp;
1752 stats->nr_failed_pages += nr_pages;
1753 break;
1754 }
1755 }
1756 }
1757 nr_failed += retry;
1758 stats->nr_thp_failed += thp_retry;
1759 stats->nr_failed_pages += nr_retry_pages;
1760move:
1761 /* Flush TLBs for all unmapped folios */
1762 try_to_unmap_flush();
1763
1764 retry = 1;
1765 for (pass = 0; pass < nr_pass && retry; pass++) {
1766 retry = 0;
1767 thp_retry = 0;
1768 nr_retry_pages = 0;
1769
1770 dst = list_first_entry(&dst_folios, struct folio, lru);
1771 dst2 = list_next_entry(dst, lru);
1772 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1773 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1774 nr_pages = folio_nr_pages(folio);
1775
1776 cond_resched();
1777
1778 rc = migrate_folio_move(put_new_folio, private,
1779 folio, dst, mode,
1780 reason, ret_folios);
1781 /*
1782 * The rules are:
1783 * Success: folio will be freed
1784 * -EAGAIN: stay on the unmap_folios list
1785 * Other errno: put on ret_folios list
1786 */
1787 switch(rc) {
1788 case -EAGAIN:
1789 retry++;
1790 thp_retry += is_thp;
1791 nr_retry_pages += nr_pages;
1792 break;
1793 case MIGRATEPAGE_SUCCESS:
1794 stats->nr_succeeded += nr_pages;
1795 stats->nr_thp_succeeded += is_thp;
1796 break;
1797 default:
1798 nr_failed++;
1799 stats->nr_thp_failed += is_thp;
1800 stats->nr_failed_pages += nr_pages;
1801 break;
1802 }
1803 dst = dst2;
1804 dst2 = list_next_entry(dst, lru);
1805 }
1806 }
1807 nr_failed += retry;
1808 stats->nr_thp_failed += thp_retry;
1809 stats->nr_failed_pages += nr_retry_pages;
1810
1811 rc = rc_saved ? : nr_failed;
1812out:
1813 /* Cleanup remaining folios */
1814 dst = list_first_entry(&dst_folios, struct folio, lru);
1815 dst2 = list_next_entry(dst, lru);
1816 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1817 int old_page_state = 0;
1818 struct anon_vma *anon_vma = NULL;
1819
1820 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1821 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1822 anon_vma, true, ret_folios);
1823 list_del(&dst->lru);
1824 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1825 dst = dst2;
1826 dst2 = list_next_entry(dst, lru);
1827 }
1828
1829 return rc;
1830}
1831
1832static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1833 free_folio_t put_new_folio, unsigned long private,
1834 enum migrate_mode mode, int reason,
1835 struct list_head *ret_folios, struct list_head *split_folios,
1836 struct migrate_pages_stats *stats)
1837{
1838 int rc, nr_failed = 0;
1839 LIST_HEAD(folios);
1840 struct migrate_pages_stats astats;
1841
1842 memset(&astats, 0, sizeof(astats));
1843 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1844 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1845 reason, &folios, split_folios, &astats,
1846 NR_MAX_MIGRATE_ASYNC_RETRY);
1847 stats->nr_succeeded += astats.nr_succeeded;
1848 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1849 stats->nr_thp_split += astats.nr_thp_split;
1850 stats->nr_split += astats.nr_split;
1851 if (rc < 0) {
1852 stats->nr_failed_pages += astats.nr_failed_pages;
1853 stats->nr_thp_failed += astats.nr_thp_failed;
1854 list_splice_tail(&folios, ret_folios);
1855 return rc;
1856 }
1857 stats->nr_thp_failed += astats.nr_thp_split;
1858 /*
1859 * Do not count rc, as pages will be retried below.
1860 * Count nr_split only, since it includes nr_thp_split.
1861 */
1862 nr_failed += astats.nr_split;
1863 /*
1864 * Fall back to migrate all failed folios one by one synchronously. All
1865 * failed folios except split THPs will be retried, so their failure
1866 * isn't counted
1867 */
1868 list_splice_tail_init(&folios, from);
1869 while (!list_empty(from)) {
1870 list_move(from->next, &folios);
1871 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1872 private, mode, reason, ret_folios,
1873 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1874 list_splice_tail_init(&folios, ret_folios);
1875 if (rc < 0)
1876 return rc;
1877 nr_failed += rc;
1878 }
1879
1880 return nr_failed;
1881}
1882
1883/*
1884 * migrate_pages - migrate the folios specified in a list, to the free folios
1885 * supplied as the target for the page migration
1886 *
1887 * @from: The list of folios to be migrated.
1888 * @get_new_folio: The function used to allocate free folios to be used
1889 * as the target of the folio migration.
1890 * @put_new_folio: The function used to free target folios if migration
1891 * fails, or NULL if no special handling is necessary.
1892 * @private: Private data to be passed on to get_new_folio()
1893 * @mode: The migration mode that specifies the constraints for
1894 * folio migration, if any.
1895 * @reason: The reason for folio migration.
1896 * @ret_succeeded: Set to the number of folios migrated successfully if
1897 * the caller passes a non-NULL pointer.
1898 *
1899 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1900 * are movable any more because the list has become empty or no retryable folios
1901 * exist any more. It is caller's responsibility to call putback_movable_pages()
1902 * only if ret != 0.
1903 *
1904 * Returns the number of {normal folio, large folio, hugetlb} that were not
1905 * migrated, or an error code. The number of large folio splits will be
1906 * considered as the number of non-migrated large folio, no matter how many
1907 * split folios of the large folio are migrated successfully.
1908 */
1909int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1910 free_folio_t put_new_folio, unsigned long private,
1911 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1912{
1913 int rc, rc_gather;
1914 int nr_pages;
1915 struct folio *folio, *folio2;
1916 LIST_HEAD(folios);
1917 LIST_HEAD(ret_folios);
1918 LIST_HEAD(split_folios);
1919 struct migrate_pages_stats stats;
1920
1921 trace_mm_migrate_pages_start(mode, reason);
1922
1923 memset(&stats, 0, sizeof(stats));
1924
1925 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1926 mode, reason, &stats, &ret_folios);
1927 if (rc_gather < 0)
1928 goto out;
1929
1930again:
1931 nr_pages = 0;
1932 list_for_each_entry_safe(folio, folio2, from, lru) {
1933 /* Retried hugetlb folios will be kept in list */
1934 if (folio_test_hugetlb(folio)) {
1935 list_move_tail(&folio->lru, &ret_folios);
1936 continue;
1937 }
1938
1939 nr_pages += folio_nr_pages(folio);
1940 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1941 break;
1942 }
1943 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1944 list_cut_before(&folios, from, &folio2->lru);
1945 else
1946 list_splice_init(from, &folios);
1947 if (mode == MIGRATE_ASYNC)
1948 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1949 private, mode, reason, &ret_folios,
1950 &split_folios, &stats,
1951 NR_MAX_MIGRATE_PAGES_RETRY);
1952 else
1953 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1954 private, mode, reason, &ret_folios,
1955 &split_folios, &stats);
1956 list_splice_tail_init(&folios, &ret_folios);
1957 if (rc < 0) {
1958 rc_gather = rc;
1959 list_splice_tail(&split_folios, &ret_folios);
1960 goto out;
1961 }
1962 if (!list_empty(&split_folios)) {
1963 /*
1964 * Failure isn't counted since all split folios of a large folio
1965 * is counted as 1 failure already. And, we only try to migrate
1966 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1967 */
1968 migrate_pages_batch(&split_folios, get_new_folio,
1969 put_new_folio, private, MIGRATE_ASYNC, reason,
1970 &ret_folios, NULL, &stats, 1);
1971 list_splice_tail_init(&split_folios, &ret_folios);
1972 }
1973 rc_gather += rc;
1974 if (!list_empty(from))
1975 goto again;
1976out:
1977 /*
1978 * Put the permanent failure folio back to migration list, they
1979 * will be put back to the right list by the caller.
1980 */
1981 list_splice(&ret_folios, from);
1982
1983 /*
1984 * Return 0 in case all split folios of fail-to-migrate large folios
1985 * are migrated successfully.
1986 */
1987 if (list_empty(from))
1988 rc_gather = 0;
1989
1990 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1991 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1992 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1993 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1994 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1995 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1996 stats.nr_thp_succeeded, stats.nr_thp_failed,
1997 stats.nr_thp_split, stats.nr_split, mode,
1998 reason);
1999
2000 if (ret_succeeded)
2001 *ret_succeeded = stats.nr_succeeded;
2002
2003 return rc_gather;
2004}
2005
2006struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2007{
2008 struct migration_target_control *mtc;
2009 gfp_t gfp_mask;
2010 unsigned int order = 0;
2011 int nid;
2012 int zidx;
2013
2014 mtc = (struct migration_target_control *)private;
2015 gfp_mask = mtc->gfp_mask;
2016 nid = mtc->nid;
2017 if (nid == NUMA_NO_NODE)
2018 nid = folio_nid(src);
2019
2020 if (folio_test_hugetlb(src)) {
2021 struct hstate *h = folio_hstate(src);
2022
2023 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2024 return alloc_hugetlb_folio_nodemask(h, nid,
2025 mtc->nmask, gfp_mask);
2026 }
2027
2028 if (folio_test_large(src)) {
2029 /*
2030 * clear __GFP_RECLAIM to make the migration callback
2031 * consistent with regular THP allocations.
2032 */
2033 gfp_mask &= ~__GFP_RECLAIM;
2034 gfp_mask |= GFP_TRANSHUGE;
2035 order = folio_order(src);
2036 }
2037 zidx = zone_idx(folio_zone(src));
2038 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2039 gfp_mask |= __GFP_HIGHMEM;
2040
2041 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2042}
2043
2044#ifdef CONFIG_NUMA
2045
2046static int store_status(int __user *status, int start, int value, int nr)
2047{
2048 while (nr-- > 0) {
2049 if (put_user(value, status + start))
2050 return -EFAULT;
2051 start++;
2052 }
2053
2054 return 0;
2055}
2056
2057static int do_move_pages_to_node(struct list_head *pagelist, int node)
2058{
2059 int err;
2060 struct migration_target_control mtc = {
2061 .nid = node,
2062 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2063 };
2064
2065 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2066 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2067 if (err)
2068 putback_movable_pages(pagelist);
2069 return err;
2070}
2071
2072/*
2073 * Resolves the given address to a struct page, isolates it from the LRU and
2074 * puts it to the given pagelist.
2075 * Returns:
2076 * errno - if the page cannot be found/isolated
2077 * 0 - when it doesn't have to be migrated because it is already on the
2078 * target node
2079 * 1 - when it has been queued
2080 */
2081static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2082 int node, struct list_head *pagelist, bool migrate_all)
2083{
2084 struct vm_area_struct *vma;
2085 unsigned long addr;
2086 struct page *page;
2087 struct folio *folio;
2088 int err;
2089
2090 mmap_read_lock(mm);
2091 addr = (unsigned long)untagged_addr_remote(mm, p);
2092
2093 err = -EFAULT;
2094 vma = vma_lookup(mm, addr);
2095 if (!vma || !vma_migratable(vma))
2096 goto out;
2097
2098 /* FOLL_DUMP to ignore special (like zero) pages */
2099 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2100
2101 err = PTR_ERR(page);
2102 if (IS_ERR(page))
2103 goto out;
2104
2105 err = -ENOENT;
2106 if (!page)
2107 goto out;
2108
2109 folio = page_folio(page);
2110 if (folio_is_zone_device(folio))
2111 goto out_putfolio;
2112
2113 err = 0;
2114 if (folio_nid(folio) == node)
2115 goto out_putfolio;
2116
2117 err = -EACCES;
2118 if (page_mapcount(page) > 1 && !migrate_all)
2119 goto out_putfolio;
2120
2121 err = -EBUSY;
2122 if (folio_test_hugetlb(folio)) {
2123 if (isolate_hugetlb(folio, pagelist))
2124 err = 1;
2125 } else {
2126 if (!folio_isolate_lru(folio))
2127 goto out_putfolio;
2128
2129 err = 1;
2130 list_add_tail(&folio->lru, pagelist);
2131 node_stat_mod_folio(folio,
2132 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2133 folio_nr_pages(folio));
2134 }
2135out_putfolio:
2136 /*
2137 * Either remove the duplicate refcount from folio_isolate_lru()
2138 * or drop the folio ref if it was not isolated.
2139 */
2140 folio_put(folio);
2141out:
2142 mmap_read_unlock(mm);
2143 return err;
2144}
2145
2146static int move_pages_and_store_status(int node,
2147 struct list_head *pagelist, int __user *status,
2148 int start, int i, unsigned long nr_pages)
2149{
2150 int err;
2151
2152 if (list_empty(pagelist))
2153 return 0;
2154
2155 err = do_move_pages_to_node(pagelist, node);
2156 if (err) {
2157 /*
2158 * Positive err means the number of failed
2159 * pages to migrate. Since we are going to
2160 * abort and return the number of non-migrated
2161 * pages, so need to include the rest of the
2162 * nr_pages that have not been attempted as
2163 * well.
2164 */
2165 if (err > 0)
2166 err += nr_pages - i;
2167 return err;
2168 }
2169 return store_status(status, start, node, i - start);
2170}
2171
2172/*
2173 * Migrate an array of page address onto an array of nodes and fill
2174 * the corresponding array of status.
2175 */
2176static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2177 unsigned long nr_pages,
2178 const void __user * __user *pages,
2179 const int __user *nodes,
2180 int __user *status, int flags)
2181{
2182 compat_uptr_t __user *compat_pages = (void __user *)pages;
2183 int current_node = NUMA_NO_NODE;
2184 LIST_HEAD(pagelist);
2185 int start, i;
2186 int err = 0, err1;
2187
2188 lru_cache_disable();
2189
2190 for (i = start = 0; i < nr_pages; i++) {
2191 const void __user *p;
2192 int node;
2193
2194 err = -EFAULT;
2195 if (in_compat_syscall()) {
2196 compat_uptr_t cp;
2197
2198 if (get_user(cp, compat_pages + i))
2199 goto out_flush;
2200
2201 p = compat_ptr(cp);
2202 } else {
2203 if (get_user(p, pages + i))
2204 goto out_flush;
2205 }
2206 if (get_user(node, nodes + i))
2207 goto out_flush;
2208
2209 err = -ENODEV;
2210 if (node < 0 || node >= MAX_NUMNODES)
2211 goto out_flush;
2212 if (!node_state(node, N_MEMORY))
2213 goto out_flush;
2214
2215 err = -EACCES;
2216 if (!node_isset(node, task_nodes))
2217 goto out_flush;
2218
2219 if (current_node == NUMA_NO_NODE) {
2220 current_node = node;
2221 start = i;
2222 } else if (node != current_node) {
2223 err = move_pages_and_store_status(current_node,
2224 &pagelist, status, start, i, nr_pages);
2225 if (err)
2226 goto out;
2227 start = i;
2228 current_node = node;
2229 }
2230
2231 /*
2232 * Errors in the page lookup or isolation are not fatal and we simply
2233 * report them via status
2234 */
2235 err = add_page_for_migration(mm, p, current_node, &pagelist,
2236 flags & MPOL_MF_MOVE_ALL);
2237
2238 if (err > 0) {
2239 /* The page is successfully queued for migration */
2240 continue;
2241 }
2242
2243 /*
2244 * The move_pages() man page does not have an -EEXIST choice, so
2245 * use -EFAULT instead.
2246 */
2247 if (err == -EEXIST)
2248 err = -EFAULT;
2249
2250 /*
2251 * If the page is already on the target node (!err), store the
2252 * node, otherwise, store the err.
2253 */
2254 err = store_status(status, i, err ? : current_node, 1);
2255 if (err)
2256 goto out_flush;
2257
2258 err = move_pages_and_store_status(current_node, &pagelist,
2259 status, start, i, nr_pages);
2260 if (err) {
2261 /* We have accounted for page i */
2262 if (err > 0)
2263 err--;
2264 goto out;
2265 }
2266 current_node = NUMA_NO_NODE;
2267 }
2268out_flush:
2269 /* Make sure we do not overwrite the existing error */
2270 err1 = move_pages_and_store_status(current_node, &pagelist,
2271 status, start, i, nr_pages);
2272 if (err >= 0)
2273 err = err1;
2274out:
2275 lru_cache_enable();
2276 return err;
2277}
2278
2279/*
2280 * Determine the nodes of an array of pages and store it in an array of status.
2281 */
2282static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2283 const void __user **pages, int *status)
2284{
2285 unsigned long i;
2286
2287 mmap_read_lock(mm);
2288
2289 for (i = 0; i < nr_pages; i++) {
2290 unsigned long addr = (unsigned long)(*pages);
2291 struct vm_area_struct *vma;
2292 struct page *page;
2293 int err = -EFAULT;
2294
2295 vma = vma_lookup(mm, addr);
2296 if (!vma)
2297 goto set_status;
2298
2299 /* FOLL_DUMP to ignore special (like zero) pages */
2300 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2301
2302 err = PTR_ERR(page);
2303 if (IS_ERR(page))
2304 goto set_status;
2305
2306 err = -ENOENT;
2307 if (!page)
2308 goto set_status;
2309
2310 if (!is_zone_device_page(page))
2311 err = page_to_nid(page);
2312
2313 put_page(page);
2314set_status:
2315 *status = err;
2316
2317 pages++;
2318 status++;
2319 }
2320
2321 mmap_read_unlock(mm);
2322}
2323
2324static int get_compat_pages_array(const void __user *chunk_pages[],
2325 const void __user * __user *pages,
2326 unsigned long chunk_nr)
2327{
2328 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2329 compat_uptr_t p;
2330 int i;
2331
2332 for (i = 0; i < chunk_nr; i++) {
2333 if (get_user(p, pages32 + i))
2334 return -EFAULT;
2335 chunk_pages[i] = compat_ptr(p);
2336 }
2337
2338 return 0;
2339}
2340
2341/*
2342 * Determine the nodes of a user array of pages and store it in
2343 * a user array of status.
2344 */
2345static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2346 const void __user * __user *pages,
2347 int __user *status)
2348{
2349#define DO_PAGES_STAT_CHUNK_NR 16UL
2350 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2351 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2352
2353 while (nr_pages) {
2354 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2355
2356 if (in_compat_syscall()) {
2357 if (get_compat_pages_array(chunk_pages, pages,
2358 chunk_nr))
2359 break;
2360 } else {
2361 if (copy_from_user(chunk_pages, pages,
2362 chunk_nr * sizeof(*chunk_pages)))
2363 break;
2364 }
2365
2366 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2367
2368 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2369 break;
2370
2371 pages += chunk_nr;
2372 status += chunk_nr;
2373 nr_pages -= chunk_nr;
2374 }
2375 return nr_pages ? -EFAULT : 0;
2376}
2377
2378static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2379{
2380 struct task_struct *task;
2381 struct mm_struct *mm;
2382
2383 /*
2384 * There is no need to check if current process has the right to modify
2385 * the specified process when they are same.
2386 */
2387 if (!pid) {
2388 mmget(current->mm);
2389 *mem_nodes = cpuset_mems_allowed(current);
2390 return current->mm;
2391 }
2392
2393 /* Find the mm_struct */
2394 rcu_read_lock();
2395 task = find_task_by_vpid(pid);
2396 if (!task) {
2397 rcu_read_unlock();
2398 return ERR_PTR(-ESRCH);
2399 }
2400 get_task_struct(task);
2401
2402 /*
2403 * Check if this process has the right to modify the specified
2404 * process. Use the regular "ptrace_may_access()" checks.
2405 */
2406 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2407 rcu_read_unlock();
2408 mm = ERR_PTR(-EPERM);
2409 goto out;
2410 }
2411 rcu_read_unlock();
2412
2413 mm = ERR_PTR(security_task_movememory(task));
2414 if (IS_ERR(mm))
2415 goto out;
2416 *mem_nodes = cpuset_mems_allowed(task);
2417 mm = get_task_mm(task);
2418out:
2419 put_task_struct(task);
2420 if (!mm)
2421 mm = ERR_PTR(-EINVAL);
2422 return mm;
2423}
2424
2425/*
2426 * Move a list of pages in the address space of the currently executing
2427 * process.
2428 */
2429static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2430 const void __user * __user *pages,
2431 const int __user *nodes,
2432 int __user *status, int flags)
2433{
2434 struct mm_struct *mm;
2435 int err;
2436 nodemask_t task_nodes;
2437
2438 /* Check flags */
2439 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2440 return -EINVAL;
2441
2442 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2443 return -EPERM;
2444
2445 mm = find_mm_struct(pid, &task_nodes);
2446 if (IS_ERR(mm))
2447 return PTR_ERR(mm);
2448
2449 if (nodes)
2450 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2451 nodes, status, flags);
2452 else
2453 err = do_pages_stat(mm, nr_pages, pages, status);
2454
2455 mmput(mm);
2456 return err;
2457}
2458
2459SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2460 const void __user * __user *, pages,
2461 const int __user *, nodes,
2462 int __user *, status, int, flags)
2463{
2464 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2465}
2466
2467#ifdef CONFIG_NUMA_BALANCING
2468/*
2469 * Returns true if this is a safe migration target node for misplaced NUMA
2470 * pages. Currently it only checks the watermarks which is crude.
2471 */
2472static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2473 unsigned long nr_migrate_pages)
2474{
2475 int z;
2476
2477 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2478 struct zone *zone = pgdat->node_zones + z;
2479
2480 if (!managed_zone(zone))
2481 continue;
2482
2483 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2484 if (!zone_watermark_ok(zone, 0,
2485 high_wmark_pages(zone) +
2486 nr_migrate_pages,
2487 ZONE_MOVABLE, 0))
2488 continue;
2489 return true;
2490 }
2491 return false;
2492}
2493
2494static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2495 unsigned long data)
2496{
2497 int nid = (int) data;
2498 int order = folio_order(src);
2499 gfp_t gfp = __GFP_THISNODE;
2500
2501 if (order > 0)
2502 gfp |= GFP_TRANSHUGE_LIGHT;
2503 else {
2504 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2505 __GFP_NOWARN;
2506 gfp &= ~__GFP_RECLAIM;
2507 }
2508 return __folio_alloc_node(gfp, order, nid);
2509}
2510
2511static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2512{
2513 int nr_pages = folio_nr_pages(folio);
2514
2515 /* Avoid migrating to a node that is nearly full */
2516 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2517 int z;
2518
2519 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2520 return 0;
2521 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2522 if (managed_zone(pgdat->node_zones + z))
2523 break;
2524 }
2525
2526 /*
2527 * If there are no managed zones, it should not proceed
2528 * further.
2529 */
2530 if (z < 0)
2531 return 0;
2532
2533 wakeup_kswapd(pgdat->node_zones + z, 0,
2534 folio_order(folio), ZONE_MOVABLE);
2535 return 0;
2536 }
2537
2538 if (!folio_isolate_lru(folio))
2539 return 0;
2540
2541 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2542 nr_pages);
2543
2544 /*
2545 * Isolating the folio has taken another reference, so the
2546 * caller's reference can be safely dropped without the folio
2547 * disappearing underneath us during migration.
2548 */
2549 folio_put(folio);
2550 return 1;
2551}
2552
2553/*
2554 * Attempt to migrate a misplaced folio to the specified destination
2555 * node. Caller is expected to have an elevated reference count on
2556 * the folio that will be dropped by this function before returning.
2557 */
2558int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2559 int node)
2560{
2561 pg_data_t *pgdat = NODE_DATA(node);
2562 int isolated;
2563 int nr_remaining;
2564 unsigned int nr_succeeded;
2565 LIST_HEAD(migratepages);
2566 int nr_pages = folio_nr_pages(folio);
2567
2568 /*
2569 * Don't migrate file folios that are mapped in multiple processes
2570 * with execute permissions as they are probably shared libraries.
2571 * To check if the folio is shared, ideally we want to make sure
2572 * every page is mapped to the same process. Doing that is very
2573 * expensive, so check the estimated mapcount of the folio instead.
2574 */
2575 if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
2576 (vma->vm_flags & VM_EXEC))
2577 goto out;
2578
2579 /*
2580 * Also do not migrate dirty folios as not all filesystems can move
2581 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2582 */
2583 if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2584 goto out;
2585
2586 isolated = numamigrate_isolate_folio(pgdat, folio);
2587 if (!isolated)
2588 goto out;
2589
2590 list_add(&folio->lru, &migratepages);
2591 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2592 NULL, node, MIGRATE_ASYNC,
2593 MR_NUMA_MISPLACED, &nr_succeeded);
2594 if (nr_remaining) {
2595 if (!list_empty(&migratepages)) {
2596 list_del(&folio->lru);
2597 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2598 folio_is_file_lru(folio), -nr_pages);
2599 folio_putback_lru(folio);
2600 }
2601 isolated = 0;
2602 }
2603 if (nr_succeeded) {
2604 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2605 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2606 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2607 nr_succeeded);
2608 }
2609 BUG_ON(!list_empty(&migratepages));
2610 return isolated;
2611
2612out:
2613 folio_put(folio);
2614 return 0;
2615}
2616#endif /* CONFIG_NUMA_BALANCING */
2617#endif /* CONFIG_NUMA */