Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/nsproxy.h>
24#include <linux/pagevec.h>
25#include <linux/ksm.h>
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
30#include <linux/writeback.h>
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
33#include <linux/security.h>
34#include <linux/backing-dev.h>
35#include <linux/compaction.h>
36#include <linux/syscalls.h>
37#include <linux/compat.h>
38#include <linux/hugetlb.h>
39#include <linux/hugetlb_cgroup.h>
40#include <linux/gfp.h>
41#include <linux/pfn_t.h>
42#include <linux/memremap.h>
43#include <linux/userfaultfd_k.h>
44#include <linux/balloon_compaction.h>
45#include <linux/page_idle.h>
46#include <linux/page_owner.h>
47#include <linux/sched/mm.h>
48#include <linux/ptrace.h>
49#include <linux/oom.h>
50#include <linux/memory.h>
51#include <linux/random.h>
52#include <linux/sched/sysctl.h>
53#include <linux/memory-tiers.h>
54
55#include <asm/tlbflush.h>
56
57#include <trace/events/migrate.h>
58
59#include "internal.h"
60
61int isolate_movable_page(struct page *page, isolate_mode_t mode)
62{
63 const struct movable_operations *mops;
64
65 /*
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
68 *
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
73 */
74 if (unlikely(!get_page_unless_zero(page)))
75 goto out;
76
77 if (unlikely(PageSlab(page)))
78 goto out_putpage;
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 smp_rmb();
81 /*
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
85 */
86 if (unlikely(!__PageMovable(page)))
87 goto out_putpage;
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 smp_rmb();
90 if (unlikely(PageSlab(page)))
91 goto out_putpage;
92
93 /*
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
97 *
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
103 */
104 if (unlikely(!trylock_page(page)))
105 goto out_putpage;
106
107 if (!PageMovable(page) || PageIsolated(page))
108 goto out_no_isolated;
109
110 mops = page_movable_ops(page);
111 VM_BUG_ON_PAGE(!mops, page);
112
113 if (!mops->isolate_page(page, mode))
114 goto out_no_isolated;
115
116 /* Driver shouldn't use PG_isolated bit of page->flags */
117 WARN_ON_ONCE(PageIsolated(page));
118 SetPageIsolated(page);
119 unlock_page(page);
120
121 return 0;
122
123out_no_isolated:
124 unlock_page(page);
125out_putpage:
126 put_page(page);
127out:
128 return -EBUSY;
129}
130
131static void putback_movable_page(struct page *page)
132{
133 const struct movable_operations *mops = page_movable_ops(page);
134
135 mops->putback_page(page);
136 ClearPageIsolated(page);
137}
138
139/*
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
142 *
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145 * and isolate_hugetlb().
146 */
147void putback_movable_pages(struct list_head *l)
148{
149 struct page *page;
150 struct page *page2;
151
152 list_for_each_entry_safe(page, page2, l, lru) {
153 if (unlikely(PageHuge(page))) {
154 putback_active_hugepage(page);
155 continue;
156 }
157 list_del(&page->lru);
158 /*
159 * We isolated non-lru movable page so here we can use
160 * __PageMovable because LRU page's mapping cannot have
161 * PAGE_MAPPING_MOVABLE.
162 */
163 if (unlikely(__PageMovable(page))) {
164 VM_BUG_ON_PAGE(!PageIsolated(page), page);
165 lock_page(page);
166 if (PageMovable(page))
167 putback_movable_page(page);
168 else
169 ClearPageIsolated(page);
170 unlock_page(page);
171 put_page(page);
172 } else {
173 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
174 page_is_file_lru(page), -thp_nr_pages(page));
175 putback_lru_page(page);
176 }
177 }
178}
179
180/*
181 * Restore a potential migration pte to a working pte entry
182 */
183static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
185{
186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187
188 while (page_vma_mapped_walk(&pvmw)) {
189 rmap_t rmap_flags = RMAP_NONE;
190 pte_t pte;
191 swp_entry_t entry;
192 struct page *new;
193 unsigned long idx = 0;
194
195 /* pgoff is invalid for ksm pages, but they are never large */
196 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
197 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
198 new = folio_page(folio, idx);
199
200#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
201 /* PMD-mapped THP migration entry */
202 if (!pvmw.pte) {
203 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
204 !folio_test_pmd_mappable(folio), folio);
205 remove_migration_pmd(&pvmw, new);
206 continue;
207 }
208#endif
209
210 folio_get(folio);
211 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
212 if (pte_swp_soft_dirty(*pvmw.pte))
213 pte = pte_mksoft_dirty(pte);
214
215 /*
216 * Recheck VMA as permissions can change since migration started
217 */
218 entry = pte_to_swp_entry(*pvmw.pte);
219 if (!is_migration_entry_young(entry))
220 pte = pte_mkold(pte);
221 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
222 pte = pte_mkdirty(pte);
223 if (is_writable_migration_entry(entry))
224 pte = maybe_mkwrite(pte, vma);
225 else if (pte_swp_uffd_wp(*pvmw.pte))
226 pte = pte_mkuffd_wp(pte);
227 else
228 pte = pte_wrprotect(pte);
229
230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231 rmap_flags |= RMAP_EXCLUSIVE;
232
233 if (unlikely(is_device_private_page(new))) {
234 if (pte_write(pte))
235 entry = make_writable_device_private_entry(
236 page_to_pfn(new));
237 else
238 entry = make_readable_device_private_entry(
239 page_to_pfn(new));
240 pte = swp_entry_to_pte(entry);
241 if (pte_swp_soft_dirty(*pvmw.pte))
242 pte = pte_swp_mksoft_dirty(pte);
243 if (pte_swp_uffd_wp(*pvmw.pte))
244 pte = pte_swp_mkuffd_wp(pte);
245 }
246
247#ifdef CONFIG_HUGETLB_PAGE
248 if (folio_test_hugetlb(folio)) {
249 unsigned int shift = huge_page_shift(hstate_vma(vma));
250
251 pte = pte_mkhuge(pte);
252 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
253 if (folio_test_anon(folio))
254 hugepage_add_anon_rmap(new, vma, pvmw.address,
255 rmap_flags);
256 else
257 page_dup_file_rmap(new, true);
258 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
259 } else
260#endif
261 {
262 if (folio_test_anon(folio))
263 page_add_anon_rmap(new, vma, pvmw.address,
264 rmap_flags);
265 else
266 page_add_file_rmap(new, vma, false);
267 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
268 }
269 if (vma->vm_flags & VM_LOCKED)
270 mlock_page_drain_local();
271
272 trace_remove_migration_pte(pvmw.address, pte_val(pte),
273 compound_order(new));
274
275 /* No need to invalidate - it was non-present before */
276 update_mmu_cache(vma, pvmw.address, pvmw.pte);
277 }
278
279 return true;
280}
281
282/*
283 * Get rid of all migration entries and replace them by
284 * references to the indicated page.
285 */
286void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
287{
288 struct rmap_walk_control rwc = {
289 .rmap_one = remove_migration_pte,
290 .arg = src,
291 };
292
293 if (locked)
294 rmap_walk_locked(dst, &rwc);
295 else
296 rmap_walk(dst, &rwc);
297}
298
299/*
300 * Something used the pte of a page under migration. We need to
301 * get to the page and wait until migration is finished.
302 * When we return from this function the fault will be retried.
303 */
304void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
305 spinlock_t *ptl)
306{
307 pte_t pte;
308 swp_entry_t entry;
309
310 spin_lock(ptl);
311 pte = *ptep;
312 if (!is_swap_pte(pte))
313 goto out;
314
315 entry = pte_to_swp_entry(pte);
316 if (!is_migration_entry(entry))
317 goto out;
318
319 migration_entry_wait_on_locked(entry, ptep, ptl);
320 return;
321out:
322 pte_unmap_unlock(ptep, ptl);
323}
324
325void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
326 unsigned long address)
327{
328 spinlock_t *ptl = pte_lockptr(mm, pmd);
329 pte_t *ptep = pte_offset_map(pmd, address);
330 __migration_entry_wait(mm, ptep, ptl);
331}
332
333#ifdef CONFIG_HUGETLB_PAGE
334void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl)
335{
336 pte_t pte;
337
338 spin_lock(ptl);
339 pte = huge_ptep_get(ptep);
340
341 if (unlikely(!is_hugetlb_entry_migration(pte)))
342 spin_unlock(ptl);
343 else
344 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
345}
346
347void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
348{
349 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
350
351 __migration_entry_wait_huge(pte, ptl);
352}
353#endif
354
355#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
356void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
357{
358 spinlock_t *ptl;
359
360 ptl = pmd_lock(mm, pmd);
361 if (!is_pmd_migration_entry(*pmd))
362 goto unlock;
363 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
364 return;
365unlock:
366 spin_unlock(ptl);
367}
368#endif
369
370static int folio_expected_refs(struct address_space *mapping,
371 struct folio *folio)
372{
373 int refs = 1;
374 if (!mapping)
375 return refs;
376
377 refs += folio_nr_pages(folio);
378 if (folio_test_private(folio))
379 refs++;
380
381 return refs;
382}
383
384/*
385 * Replace the page in the mapping.
386 *
387 * The number of remaining references must be:
388 * 1 for anonymous pages without a mapping
389 * 2 for pages with a mapping
390 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
391 */
392int folio_migrate_mapping(struct address_space *mapping,
393 struct folio *newfolio, struct folio *folio, int extra_count)
394{
395 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
396 struct zone *oldzone, *newzone;
397 int dirty;
398 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
399 long nr = folio_nr_pages(folio);
400
401 if (!mapping) {
402 /* Anonymous page without mapping */
403 if (folio_ref_count(folio) != expected_count)
404 return -EAGAIN;
405
406 /* No turning back from here */
407 newfolio->index = folio->index;
408 newfolio->mapping = folio->mapping;
409 if (folio_test_swapbacked(folio))
410 __folio_set_swapbacked(newfolio);
411
412 return MIGRATEPAGE_SUCCESS;
413 }
414
415 oldzone = folio_zone(folio);
416 newzone = folio_zone(newfolio);
417
418 xas_lock_irq(&xas);
419 if (!folio_ref_freeze(folio, expected_count)) {
420 xas_unlock_irq(&xas);
421 return -EAGAIN;
422 }
423
424 /*
425 * Now we know that no one else is looking at the folio:
426 * no turning back from here.
427 */
428 newfolio->index = folio->index;
429 newfolio->mapping = folio->mapping;
430 folio_ref_add(newfolio, nr); /* add cache reference */
431 if (folio_test_swapbacked(folio)) {
432 __folio_set_swapbacked(newfolio);
433 if (folio_test_swapcache(folio)) {
434 folio_set_swapcache(newfolio);
435 newfolio->private = folio_get_private(folio);
436 }
437 } else {
438 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
439 }
440
441 /* Move dirty while page refs frozen and newpage not yet exposed */
442 dirty = folio_test_dirty(folio);
443 if (dirty) {
444 folio_clear_dirty(folio);
445 folio_set_dirty(newfolio);
446 }
447
448 xas_store(&xas, newfolio);
449
450 /*
451 * Drop cache reference from old page by unfreezing
452 * to one less reference.
453 * We know this isn't the last reference.
454 */
455 folio_ref_unfreeze(folio, expected_count - nr);
456
457 xas_unlock(&xas);
458 /* Leave irq disabled to prevent preemption while updating stats */
459
460 /*
461 * If moved to a different zone then also account
462 * the page for that zone. Other VM counters will be
463 * taken care of when we establish references to the
464 * new page and drop references to the old page.
465 *
466 * Note that anonymous pages are accounted for
467 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
468 * are mapped to swap space.
469 */
470 if (newzone != oldzone) {
471 struct lruvec *old_lruvec, *new_lruvec;
472 struct mem_cgroup *memcg;
473
474 memcg = folio_memcg(folio);
475 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
476 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
477
478 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
479 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
480 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
481 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
482 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
483 }
484#ifdef CONFIG_SWAP
485 if (folio_test_swapcache(folio)) {
486 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
487 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
488 }
489#endif
490 if (dirty && mapping_can_writeback(mapping)) {
491 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
492 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
493 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
494 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
495 }
496 }
497 local_irq_enable();
498
499 return MIGRATEPAGE_SUCCESS;
500}
501EXPORT_SYMBOL(folio_migrate_mapping);
502
503/*
504 * The expected number of remaining references is the same as that
505 * of folio_migrate_mapping().
506 */
507int migrate_huge_page_move_mapping(struct address_space *mapping,
508 struct folio *dst, struct folio *src)
509{
510 XA_STATE(xas, &mapping->i_pages, folio_index(src));
511 int expected_count;
512
513 xas_lock_irq(&xas);
514 expected_count = 2 + folio_has_private(src);
515 if (!folio_ref_freeze(src, expected_count)) {
516 xas_unlock_irq(&xas);
517 return -EAGAIN;
518 }
519
520 dst->index = src->index;
521 dst->mapping = src->mapping;
522
523 folio_get(dst);
524
525 xas_store(&xas, dst);
526
527 folio_ref_unfreeze(src, expected_count - 1);
528
529 xas_unlock_irq(&xas);
530
531 return MIGRATEPAGE_SUCCESS;
532}
533
534/*
535 * Copy the flags and some other ancillary information
536 */
537void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
538{
539 int cpupid;
540
541 if (folio_test_error(folio))
542 folio_set_error(newfolio);
543 if (folio_test_referenced(folio))
544 folio_set_referenced(newfolio);
545 if (folio_test_uptodate(folio))
546 folio_mark_uptodate(newfolio);
547 if (folio_test_clear_active(folio)) {
548 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
549 folio_set_active(newfolio);
550 } else if (folio_test_clear_unevictable(folio))
551 folio_set_unevictable(newfolio);
552 if (folio_test_workingset(folio))
553 folio_set_workingset(newfolio);
554 if (folio_test_checked(folio))
555 folio_set_checked(newfolio);
556 /*
557 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
558 * migration entries. We can still have PG_anon_exclusive set on an
559 * effectively unmapped and unreferenced first sub-pages of an
560 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
561 */
562 if (folio_test_mappedtodisk(folio))
563 folio_set_mappedtodisk(newfolio);
564
565 /* Move dirty on pages not done by folio_migrate_mapping() */
566 if (folio_test_dirty(folio))
567 folio_set_dirty(newfolio);
568
569 if (folio_test_young(folio))
570 folio_set_young(newfolio);
571 if (folio_test_idle(folio))
572 folio_set_idle(newfolio);
573
574 /*
575 * Copy NUMA information to the new page, to prevent over-eager
576 * future migrations of this same page.
577 */
578 cpupid = page_cpupid_xchg_last(&folio->page, -1);
579 /*
580 * For memory tiering mode, when migrate between slow and fast
581 * memory node, reset cpupid, because that is used to record
582 * page access time in slow memory node.
583 */
584 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
585 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
586 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
587
588 if (f_toptier != t_toptier)
589 cpupid = -1;
590 }
591 page_cpupid_xchg_last(&newfolio->page, cpupid);
592
593 folio_migrate_ksm(newfolio, folio);
594 /*
595 * Please do not reorder this without considering how mm/ksm.c's
596 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
597 */
598 if (folio_test_swapcache(folio))
599 folio_clear_swapcache(folio);
600 folio_clear_private(folio);
601
602 /* page->private contains hugetlb specific flags */
603 if (!folio_test_hugetlb(folio))
604 folio->private = NULL;
605
606 /*
607 * If any waiters have accumulated on the new page then
608 * wake them up.
609 */
610 if (folio_test_writeback(newfolio))
611 folio_end_writeback(newfolio);
612
613 /*
614 * PG_readahead shares the same bit with PG_reclaim. The above
615 * end_page_writeback() may clear PG_readahead mistakenly, so set the
616 * bit after that.
617 */
618 if (folio_test_readahead(folio))
619 folio_set_readahead(newfolio);
620
621 folio_copy_owner(newfolio, folio);
622
623 if (!folio_test_hugetlb(folio))
624 mem_cgroup_migrate(folio, newfolio);
625}
626EXPORT_SYMBOL(folio_migrate_flags);
627
628void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
629{
630 folio_copy(newfolio, folio);
631 folio_migrate_flags(newfolio, folio);
632}
633EXPORT_SYMBOL(folio_migrate_copy);
634
635/************************************************************
636 * Migration functions
637 ***********************************************************/
638
639int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
640 struct folio *src, enum migrate_mode mode, int extra_count)
641{
642 int rc;
643
644 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
645
646 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
647
648 if (rc != MIGRATEPAGE_SUCCESS)
649 return rc;
650
651 if (mode != MIGRATE_SYNC_NO_COPY)
652 folio_migrate_copy(dst, src);
653 else
654 folio_migrate_flags(dst, src);
655 return MIGRATEPAGE_SUCCESS;
656}
657
658/**
659 * migrate_folio() - Simple folio migration.
660 * @mapping: The address_space containing the folio.
661 * @dst: The folio to migrate the data to.
662 * @src: The folio containing the current data.
663 * @mode: How to migrate the page.
664 *
665 * Common logic to directly migrate a single LRU folio suitable for
666 * folios that do not use PagePrivate/PagePrivate2.
667 *
668 * Folios are locked upon entry and exit.
669 */
670int migrate_folio(struct address_space *mapping, struct folio *dst,
671 struct folio *src, enum migrate_mode mode)
672{
673 return migrate_folio_extra(mapping, dst, src, mode, 0);
674}
675EXPORT_SYMBOL(migrate_folio);
676
677#ifdef CONFIG_BLOCK
678/* Returns true if all buffers are successfully locked */
679static bool buffer_migrate_lock_buffers(struct buffer_head *head,
680 enum migrate_mode mode)
681{
682 struct buffer_head *bh = head;
683
684 /* Simple case, sync compaction */
685 if (mode != MIGRATE_ASYNC) {
686 do {
687 lock_buffer(bh);
688 bh = bh->b_this_page;
689
690 } while (bh != head);
691
692 return true;
693 }
694
695 /* async case, we cannot block on lock_buffer so use trylock_buffer */
696 do {
697 if (!trylock_buffer(bh)) {
698 /*
699 * We failed to lock the buffer and cannot stall in
700 * async migration. Release the taken locks
701 */
702 struct buffer_head *failed_bh = bh;
703 bh = head;
704 while (bh != failed_bh) {
705 unlock_buffer(bh);
706 bh = bh->b_this_page;
707 }
708 return false;
709 }
710
711 bh = bh->b_this_page;
712 } while (bh != head);
713 return true;
714}
715
716static int __buffer_migrate_folio(struct address_space *mapping,
717 struct folio *dst, struct folio *src, enum migrate_mode mode,
718 bool check_refs)
719{
720 struct buffer_head *bh, *head;
721 int rc;
722 int expected_count;
723
724 head = folio_buffers(src);
725 if (!head)
726 return migrate_folio(mapping, dst, src, mode);
727
728 /* Check whether page does not have extra refs before we do more work */
729 expected_count = folio_expected_refs(mapping, src);
730 if (folio_ref_count(src) != expected_count)
731 return -EAGAIN;
732
733 if (!buffer_migrate_lock_buffers(head, mode))
734 return -EAGAIN;
735
736 if (check_refs) {
737 bool busy;
738 bool invalidated = false;
739
740recheck_buffers:
741 busy = false;
742 spin_lock(&mapping->private_lock);
743 bh = head;
744 do {
745 if (atomic_read(&bh->b_count)) {
746 busy = true;
747 break;
748 }
749 bh = bh->b_this_page;
750 } while (bh != head);
751 if (busy) {
752 if (invalidated) {
753 rc = -EAGAIN;
754 goto unlock_buffers;
755 }
756 spin_unlock(&mapping->private_lock);
757 invalidate_bh_lrus();
758 invalidated = true;
759 goto recheck_buffers;
760 }
761 }
762
763 rc = folio_migrate_mapping(mapping, dst, src, 0);
764 if (rc != MIGRATEPAGE_SUCCESS)
765 goto unlock_buffers;
766
767 folio_attach_private(dst, folio_detach_private(src));
768
769 bh = head;
770 do {
771 set_bh_page(bh, &dst->page, bh_offset(bh));
772 bh = bh->b_this_page;
773 } while (bh != head);
774
775 if (mode != MIGRATE_SYNC_NO_COPY)
776 folio_migrate_copy(dst, src);
777 else
778 folio_migrate_flags(dst, src);
779
780 rc = MIGRATEPAGE_SUCCESS;
781unlock_buffers:
782 if (check_refs)
783 spin_unlock(&mapping->private_lock);
784 bh = head;
785 do {
786 unlock_buffer(bh);
787 bh = bh->b_this_page;
788 } while (bh != head);
789
790 return rc;
791}
792
793/**
794 * buffer_migrate_folio() - Migration function for folios with buffers.
795 * @mapping: The address space containing @src.
796 * @dst: The folio to migrate to.
797 * @src: The folio to migrate from.
798 * @mode: How to migrate the folio.
799 *
800 * This function can only be used if the underlying filesystem guarantees
801 * that no other references to @src exist. For example attached buffer
802 * heads are accessed only under the folio lock. If your filesystem cannot
803 * provide this guarantee, buffer_migrate_folio_norefs() may be more
804 * appropriate.
805 *
806 * Return: 0 on success or a negative errno on failure.
807 */
808int buffer_migrate_folio(struct address_space *mapping,
809 struct folio *dst, struct folio *src, enum migrate_mode mode)
810{
811 return __buffer_migrate_folio(mapping, dst, src, mode, false);
812}
813EXPORT_SYMBOL(buffer_migrate_folio);
814
815/**
816 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
817 * @mapping: The address space containing @src.
818 * @dst: The folio to migrate to.
819 * @src: The folio to migrate from.
820 * @mode: How to migrate the folio.
821 *
822 * Like buffer_migrate_folio() except that this variant is more careful
823 * and checks that there are also no buffer head references. This function
824 * is the right one for mappings where buffer heads are directly looked
825 * up and referenced (such as block device mappings).
826 *
827 * Return: 0 on success or a negative errno on failure.
828 */
829int buffer_migrate_folio_norefs(struct address_space *mapping,
830 struct folio *dst, struct folio *src, enum migrate_mode mode)
831{
832 return __buffer_migrate_folio(mapping, dst, src, mode, true);
833}
834EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
835#endif
836
837int filemap_migrate_folio(struct address_space *mapping,
838 struct folio *dst, struct folio *src, enum migrate_mode mode)
839{
840 int ret;
841
842 ret = folio_migrate_mapping(mapping, dst, src, 0);
843 if (ret != MIGRATEPAGE_SUCCESS)
844 return ret;
845
846 if (folio_get_private(src))
847 folio_attach_private(dst, folio_detach_private(src));
848
849 if (mode != MIGRATE_SYNC_NO_COPY)
850 folio_migrate_copy(dst, src);
851 else
852 folio_migrate_flags(dst, src);
853 return MIGRATEPAGE_SUCCESS;
854}
855EXPORT_SYMBOL_GPL(filemap_migrate_folio);
856
857/*
858 * Writeback a folio to clean the dirty state
859 */
860static int writeout(struct address_space *mapping, struct folio *folio)
861{
862 struct writeback_control wbc = {
863 .sync_mode = WB_SYNC_NONE,
864 .nr_to_write = 1,
865 .range_start = 0,
866 .range_end = LLONG_MAX,
867 .for_reclaim = 1
868 };
869 int rc;
870
871 if (!mapping->a_ops->writepage)
872 /* No write method for the address space */
873 return -EINVAL;
874
875 if (!folio_clear_dirty_for_io(folio))
876 /* Someone else already triggered a write */
877 return -EAGAIN;
878
879 /*
880 * A dirty folio may imply that the underlying filesystem has
881 * the folio on some queue. So the folio must be clean for
882 * migration. Writeout may mean we lose the lock and the
883 * folio state is no longer what we checked for earlier.
884 * At this point we know that the migration attempt cannot
885 * be successful.
886 */
887 remove_migration_ptes(folio, folio, false);
888
889 rc = mapping->a_ops->writepage(&folio->page, &wbc);
890
891 if (rc != AOP_WRITEPAGE_ACTIVATE)
892 /* unlocked. Relock */
893 folio_lock(folio);
894
895 return (rc < 0) ? -EIO : -EAGAIN;
896}
897
898/*
899 * Default handling if a filesystem does not provide a migration function.
900 */
901static int fallback_migrate_folio(struct address_space *mapping,
902 struct folio *dst, struct folio *src, enum migrate_mode mode)
903{
904 if (folio_test_dirty(src)) {
905 /* Only writeback folios in full synchronous migration */
906 switch (mode) {
907 case MIGRATE_SYNC:
908 case MIGRATE_SYNC_NO_COPY:
909 break;
910 default:
911 return -EBUSY;
912 }
913 return writeout(mapping, src);
914 }
915
916 /*
917 * Buffers may be managed in a filesystem specific way.
918 * We must have no buffers or drop them.
919 */
920 if (folio_test_private(src) &&
921 !filemap_release_folio(src, GFP_KERNEL))
922 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
923
924 return migrate_folio(mapping, dst, src, mode);
925}
926
927/*
928 * Move a page to a newly allocated page
929 * The page is locked and all ptes have been successfully removed.
930 *
931 * The new page will have replaced the old page if this function
932 * is successful.
933 *
934 * Return value:
935 * < 0 - error code
936 * MIGRATEPAGE_SUCCESS - success
937 */
938static int move_to_new_folio(struct folio *dst, struct folio *src,
939 enum migrate_mode mode)
940{
941 int rc = -EAGAIN;
942 bool is_lru = !__PageMovable(&src->page);
943
944 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
945 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
946
947 if (likely(is_lru)) {
948 struct address_space *mapping = folio_mapping(src);
949
950 if (!mapping)
951 rc = migrate_folio(mapping, dst, src, mode);
952 else if (mapping->a_ops->migrate_folio)
953 /*
954 * Most folios have a mapping and most filesystems
955 * provide a migrate_folio callback. Anonymous folios
956 * are part of swap space which also has its own
957 * migrate_folio callback. This is the most common path
958 * for page migration.
959 */
960 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
961 mode);
962 else
963 rc = fallback_migrate_folio(mapping, dst, src, mode);
964 } else {
965 const struct movable_operations *mops;
966
967 /*
968 * In case of non-lru page, it could be released after
969 * isolation step. In that case, we shouldn't try migration.
970 */
971 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
972 if (!folio_test_movable(src)) {
973 rc = MIGRATEPAGE_SUCCESS;
974 folio_clear_isolated(src);
975 goto out;
976 }
977
978 mops = page_movable_ops(&src->page);
979 rc = mops->migrate_page(&dst->page, &src->page, mode);
980 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
981 !folio_test_isolated(src));
982 }
983
984 /*
985 * When successful, old pagecache src->mapping must be cleared before
986 * src is freed; but stats require that PageAnon be left as PageAnon.
987 */
988 if (rc == MIGRATEPAGE_SUCCESS) {
989 if (__PageMovable(&src->page)) {
990 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
991
992 /*
993 * We clear PG_movable under page_lock so any compactor
994 * cannot try to migrate this page.
995 */
996 folio_clear_isolated(src);
997 }
998
999 /*
1000 * Anonymous and movable src->mapping will be cleared by
1001 * free_pages_prepare so don't reset it here for keeping
1002 * the type to work PageAnon, for example.
1003 */
1004 if (!folio_mapping_flags(src))
1005 src->mapping = NULL;
1006
1007 if (likely(!folio_is_zone_device(dst)))
1008 flush_dcache_folio(dst);
1009 }
1010out:
1011 return rc;
1012}
1013
1014static int __unmap_and_move(struct folio *src, struct folio *dst,
1015 int force, enum migrate_mode mode)
1016{
1017 int rc = -EAGAIN;
1018 bool page_was_mapped = false;
1019 struct anon_vma *anon_vma = NULL;
1020 bool is_lru = !__PageMovable(&src->page);
1021
1022 if (!folio_trylock(src)) {
1023 if (!force || mode == MIGRATE_ASYNC)
1024 goto out;
1025
1026 /*
1027 * It's not safe for direct compaction to call lock_page.
1028 * For example, during page readahead pages are added locked
1029 * to the LRU. Later, when the IO completes the pages are
1030 * marked uptodate and unlocked. However, the queueing
1031 * could be merging multiple pages for one bio (e.g.
1032 * mpage_readahead). If an allocation happens for the
1033 * second or third page, the process can end up locking
1034 * the same page twice and deadlocking. Rather than
1035 * trying to be clever about what pages can be locked,
1036 * avoid the use of lock_page for direct compaction
1037 * altogether.
1038 */
1039 if (current->flags & PF_MEMALLOC)
1040 goto out;
1041
1042 folio_lock(src);
1043 }
1044
1045 if (folio_test_writeback(src)) {
1046 /*
1047 * Only in the case of a full synchronous migration is it
1048 * necessary to wait for PageWriteback. In the async case,
1049 * the retry loop is too short and in the sync-light case,
1050 * the overhead of stalling is too much
1051 */
1052 switch (mode) {
1053 case MIGRATE_SYNC:
1054 case MIGRATE_SYNC_NO_COPY:
1055 break;
1056 default:
1057 rc = -EBUSY;
1058 goto out_unlock;
1059 }
1060 if (!force)
1061 goto out_unlock;
1062 folio_wait_writeback(src);
1063 }
1064
1065 /*
1066 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1067 * we cannot notice that anon_vma is freed while we migrate a page.
1068 * This get_anon_vma() delays freeing anon_vma pointer until the end
1069 * of migration. File cache pages are no problem because of page_lock()
1070 * File Caches may use write_page() or lock_page() in migration, then,
1071 * just care Anon page here.
1072 *
1073 * Only folio_get_anon_vma() understands the subtleties of
1074 * getting a hold on an anon_vma from outside one of its mms.
1075 * But if we cannot get anon_vma, then we won't need it anyway,
1076 * because that implies that the anon page is no longer mapped
1077 * (and cannot be remapped so long as we hold the page lock).
1078 */
1079 if (folio_test_anon(src) && !folio_test_ksm(src))
1080 anon_vma = folio_get_anon_vma(src);
1081
1082 /*
1083 * Block others from accessing the new page when we get around to
1084 * establishing additional references. We are usually the only one
1085 * holding a reference to dst at this point. We used to have a BUG
1086 * here if folio_trylock(dst) fails, but would like to allow for
1087 * cases where there might be a race with the previous use of dst.
1088 * This is much like races on refcount of oldpage: just don't BUG().
1089 */
1090 if (unlikely(!folio_trylock(dst)))
1091 goto out_unlock;
1092
1093 if (unlikely(!is_lru)) {
1094 rc = move_to_new_folio(dst, src, mode);
1095 goto out_unlock_both;
1096 }
1097
1098 /*
1099 * Corner case handling:
1100 * 1. When a new swap-cache page is read into, it is added to the LRU
1101 * and treated as swapcache but it has no rmap yet.
1102 * Calling try_to_unmap() against a src->mapping==NULL page will
1103 * trigger a BUG. So handle it here.
1104 * 2. An orphaned page (see truncate_cleanup_page) might have
1105 * fs-private metadata. The page can be picked up due to memory
1106 * offlining. Everywhere else except page reclaim, the page is
1107 * invisible to the vm, so the page can not be migrated. So try to
1108 * free the metadata, so the page can be freed.
1109 */
1110 if (!src->mapping) {
1111 if (folio_test_private(src)) {
1112 try_to_free_buffers(src);
1113 goto out_unlock_both;
1114 }
1115 } else if (folio_mapped(src)) {
1116 /* Establish migration ptes */
1117 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1118 !folio_test_ksm(src) && !anon_vma, src);
1119 try_to_migrate(src, 0);
1120 page_was_mapped = true;
1121 }
1122
1123 if (!folio_mapped(src))
1124 rc = move_to_new_folio(dst, src, mode);
1125
1126 /*
1127 * When successful, push dst to LRU immediately: so that if it
1128 * turns out to be an mlocked page, remove_migration_ptes() will
1129 * automatically build up the correct dst->mlock_count for it.
1130 *
1131 * We would like to do something similar for the old page, when
1132 * unsuccessful, and other cases when a page has been temporarily
1133 * isolated from the unevictable LRU: but this case is the easiest.
1134 */
1135 if (rc == MIGRATEPAGE_SUCCESS) {
1136 folio_add_lru(dst);
1137 if (page_was_mapped)
1138 lru_add_drain();
1139 }
1140
1141 if (page_was_mapped)
1142 remove_migration_ptes(src,
1143 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1144
1145out_unlock_both:
1146 folio_unlock(dst);
1147out_unlock:
1148 /* Drop an anon_vma reference if we took one */
1149 if (anon_vma)
1150 put_anon_vma(anon_vma);
1151 folio_unlock(src);
1152out:
1153 /*
1154 * If migration is successful, decrease refcount of dst,
1155 * which will not free the page because new page owner increased
1156 * refcounter.
1157 */
1158 if (rc == MIGRATEPAGE_SUCCESS)
1159 folio_put(dst);
1160
1161 return rc;
1162}
1163
1164/*
1165 * Obtain the lock on folio, remove all ptes and migrate the folio
1166 * to the newly allocated folio in dst.
1167 */
1168static int unmap_and_move(new_page_t get_new_page,
1169 free_page_t put_new_page,
1170 unsigned long private, struct folio *src,
1171 int force, enum migrate_mode mode,
1172 enum migrate_reason reason,
1173 struct list_head *ret)
1174{
1175 struct folio *dst;
1176 int rc = MIGRATEPAGE_SUCCESS;
1177 struct page *newpage = NULL;
1178
1179 if (!thp_migration_supported() && folio_test_transhuge(src))
1180 return -ENOSYS;
1181
1182 if (folio_ref_count(src) == 1) {
1183 /* Folio was freed from under us. So we are done. */
1184 folio_clear_active(src);
1185 folio_clear_unevictable(src);
1186 /* free_pages_prepare() will clear PG_isolated. */
1187 goto out;
1188 }
1189
1190 newpage = get_new_page(&src->page, private);
1191 if (!newpage)
1192 return -ENOMEM;
1193 dst = page_folio(newpage);
1194
1195 dst->private = NULL;
1196 rc = __unmap_and_move(src, dst, force, mode);
1197 if (rc == MIGRATEPAGE_SUCCESS)
1198 set_page_owner_migrate_reason(&dst->page, reason);
1199
1200out:
1201 if (rc != -EAGAIN) {
1202 /*
1203 * A folio that has been migrated has all references
1204 * removed and will be freed. A folio that has not been
1205 * migrated will have kept its references and be restored.
1206 */
1207 list_del(&src->lru);
1208 }
1209
1210 /*
1211 * If migration is successful, releases reference grabbed during
1212 * isolation. Otherwise, restore the folio to right list unless
1213 * we want to retry.
1214 */
1215 if (rc == MIGRATEPAGE_SUCCESS) {
1216 /*
1217 * Compaction can migrate also non-LRU folios which are
1218 * not accounted to NR_ISOLATED_*. They can be recognized
1219 * as __folio_test_movable
1220 */
1221 if (likely(!__folio_test_movable(src)))
1222 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1223 folio_is_file_lru(src), -folio_nr_pages(src));
1224
1225 if (reason != MR_MEMORY_FAILURE)
1226 /*
1227 * We release the folio in page_handle_poison.
1228 */
1229 folio_put(src);
1230 } else {
1231 if (rc != -EAGAIN)
1232 list_add_tail(&src->lru, ret);
1233
1234 if (put_new_page)
1235 put_new_page(&dst->page, private);
1236 else
1237 folio_put(dst);
1238 }
1239
1240 return rc;
1241}
1242
1243/*
1244 * Counterpart of unmap_and_move_page() for hugepage migration.
1245 *
1246 * This function doesn't wait the completion of hugepage I/O
1247 * because there is no race between I/O and migration for hugepage.
1248 * Note that currently hugepage I/O occurs only in direct I/O
1249 * where no lock is held and PG_writeback is irrelevant,
1250 * and writeback status of all subpages are counted in the reference
1251 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1252 * under direct I/O, the reference of the head page is 512 and a bit more.)
1253 * This means that when we try to migrate hugepage whose subpages are
1254 * doing direct I/O, some references remain after try_to_unmap() and
1255 * hugepage migration fails without data corruption.
1256 *
1257 * There is also no race when direct I/O is issued on the page under migration,
1258 * because then pte is replaced with migration swap entry and direct I/O code
1259 * will wait in the page fault for migration to complete.
1260 */
1261static int unmap_and_move_huge_page(new_page_t get_new_page,
1262 free_page_t put_new_page, unsigned long private,
1263 struct page *hpage, int force,
1264 enum migrate_mode mode, int reason,
1265 struct list_head *ret)
1266{
1267 struct folio *dst, *src = page_folio(hpage);
1268 int rc = -EAGAIN;
1269 int page_was_mapped = 0;
1270 struct page *new_hpage;
1271 struct anon_vma *anon_vma = NULL;
1272 struct address_space *mapping = NULL;
1273
1274 /*
1275 * Migratability of hugepages depends on architectures and their size.
1276 * This check is necessary because some callers of hugepage migration
1277 * like soft offline and memory hotremove don't walk through page
1278 * tables or check whether the hugepage is pmd-based or not before
1279 * kicking migration.
1280 */
1281 if (!hugepage_migration_supported(page_hstate(hpage)))
1282 return -ENOSYS;
1283
1284 if (folio_ref_count(src) == 1) {
1285 /* page was freed from under us. So we are done. */
1286 putback_active_hugepage(hpage);
1287 return MIGRATEPAGE_SUCCESS;
1288 }
1289
1290 new_hpage = get_new_page(hpage, private);
1291 if (!new_hpage)
1292 return -ENOMEM;
1293 dst = page_folio(new_hpage);
1294
1295 if (!folio_trylock(src)) {
1296 if (!force)
1297 goto out;
1298 switch (mode) {
1299 case MIGRATE_SYNC:
1300 case MIGRATE_SYNC_NO_COPY:
1301 break;
1302 default:
1303 goto out;
1304 }
1305 folio_lock(src);
1306 }
1307
1308 /*
1309 * Check for pages which are in the process of being freed. Without
1310 * folio_mapping() set, hugetlbfs specific move page routine will not
1311 * be called and we could leak usage counts for subpools.
1312 */
1313 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1314 rc = -EBUSY;
1315 goto out_unlock;
1316 }
1317
1318 if (folio_test_anon(src))
1319 anon_vma = folio_get_anon_vma(src);
1320
1321 if (unlikely(!folio_trylock(dst)))
1322 goto put_anon;
1323
1324 if (folio_mapped(src)) {
1325 enum ttu_flags ttu = 0;
1326
1327 if (!folio_test_anon(src)) {
1328 /*
1329 * In shared mappings, try_to_unmap could potentially
1330 * call huge_pmd_unshare. Because of this, take
1331 * semaphore in write mode here and set TTU_RMAP_LOCKED
1332 * to let lower levels know we have taken the lock.
1333 */
1334 mapping = hugetlb_page_mapping_lock_write(hpage);
1335 if (unlikely(!mapping))
1336 goto unlock_put_anon;
1337
1338 ttu = TTU_RMAP_LOCKED;
1339 }
1340
1341 try_to_migrate(src, ttu);
1342 page_was_mapped = 1;
1343
1344 if (ttu & TTU_RMAP_LOCKED)
1345 i_mmap_unlock_write(mapping);
1346 }
1347
1348 if (!folio_mapped(src))
1349 rc = move_to_new_folio(dst, src, mode);
1350
1351 if (page_was_mapped)
1352 remove_migration_ptes(src,
1353 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1354
1355unlock_put_anon:
1356 folio_unlock(dst);
1357
1358put_anon:
1359 if (anon_vma)
1360 put_anon_vma(anon_vma);
1361
1362 if (rc == MIGRATEPAGE_SUCCESS) {
1363 move_hugetlb_state(src, dst, reason);
1364 put_new_page = NULL;
1365 }
1366
1367out_unlock:
1368 folio_unlock(src);
1369out:
1370 if (rc == MIGRATEPAGE_SUCCESS)
1371 putback_active_hugepage(hpage);
1372 else if (rc != -EAGAIN)
1373 list_move_tail(&src->lru, ret);
1374
1375 /*
1376 * If migration was not successful and there's a freeing callback, use
1377 * it. Otherwise, put_page() will drop the reference grabbed during
1378 * isolation.
1379 */
1380 if (put_new_page)
1381 put_new_page(new_hpage, private);
1382 else
1383 putback_active_hugepage(new_hpage);
1384
1385 return rc;
1386}
1387
1388static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1389{
1390 int rc;
1391
1392 folio_lock(folio);
1393 rc = split_folio_to_list(folio, split_folios);
1394 folio_unlock(folio);
1395 if (!rc)
1396 list_move_tail(&folio->lru, split_folios);
1397
1398 return rc;
1399}
1400
1401/*
1402 * migrate_pages - migrate the folios specified in a list, to the free folios
1403 * supplied as the target for the page migration
1404 *
1405 * @from: The list of folios to be migrated.
1406 * @get_new_page: The function used to allocate free folios to be used
1407 * as the target of the folio migration.
1408 * @put_new_page: The function used to free target folios if migration
1409 * fails, or NULL if no special handling is necessary.
1410 * @private: Private data to be passed on to get_new_page()
1411 * @mode: The migration mode that specifies the constraints for
1412 * folio migration, if any.
1413 * @reason: The reason for folio migration.
1414 * @ret_succeeded: Set to the number of folios migrated successfully if
1415 * the caller passes a non-NULL pointer.
1416 *
1417 * The function returns after 10 attempts or if no folios are movable any more
1418 * because the list has become empty or no retryable folios exist any more.
1419 * It is caller's responsibility to call putback_movable_pages() to return folios
1420 * to the LRU or free list only if ret != 0.
1421 *
1422 * Returns the number of {normal folio, large folio, hugetlb} that were not
1423 * migrated, or an error code. The number of large folio splits will be
1424 * considered as the number of non-migrated large folio, no matter how many
1425 * split folios of the large folio are migrated successfully.
1426 */
1427int migrate_pages(struct list_head *from, new_page_t get_new_page,
1428 free_page_t put_new_page, unsigned long private,
1429 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1430{
1431 int retry = 1;
1432 int large_retry = 1;
1433 int thp_retry = 1;
1434 int nr_failed = 0;
1435 int nr_failed_pages = 0;
1436 int nr_retry_pages = 0;
1437 int nr_succeeded = 0;
1438 int nr_thp_succeeded = 0;
1439 int nr_large_failed = 0;
1440 int nr_thp_failed = 0;
1441 int nr_thp_split = 0;
1442 int pass = 0;
1443 bool is_large = false;
1444 bool is_thp = false;
1445 struct folio *folio, *folio2;
1446 int rc, nr_pages;
1447 LIST_HEAD(ret_folios);
1448 LIST_HEAD(split_folios);
1449 bool nosplit = (reason == MR_NUMA_MISPLACED);
1450 bool no_split_folio_counting = false;
1451
1452 trace_mm_migrate_pages_start(mode, reason);
1453
1454split_folio_migration:
1455 for (pass = 0; pass < 10 && (retry || large_retry); pass++) {
1456 retry = 0;
1457 large_retry = 0;
1458 thp_retry = 0;
1459 nr_retry_pages = 0;
1460
1461 list_for_each_entry_safe(folio, folio2, from, lru) {
1462 /*
1463 * Large folio statistics is based on the source large
1464 * folio. Capture required information that might get
1465 * lost during migration.
1466 */
1467 is_large = folio_test_large(folio) && !folio_test_hugetlb(folio);
1468 is_thp = is_large && folio_test_pmd_mappable(folio);
1469 nr_pages = folio_nr_pages(folio);
1470 cond_resched();
1471
1472 if (folio_test_hugetlb(folio))
1473 rc = unmap_and_move_huge_page(get_new_page,
1474 put_new_page, private,
1475 &folio->page, pass > 2, mode,
1476 reason,
1477 &ret_folios);
1478 else
1479 rc = unmap_and_move(get_new_page, put_new_page,
1480 private, folio, pass > 2, mode,
1481 reason, &ret_folios);
1482 /*
1483 * The rules are:
1484 * Success: non hugetlb folio will be freed, hugetlb
1485 * folio will be put back
1486 * -EAGAIN: stay on the from list
1487 * -ENOMEM: stay on the from list
1488 * -ENOSYS: stay on the from list
1489 * Other errno: put on ret_folios list then splice to
1490 * from list
1491 */
1492 switch(rc) {
1493 /*
1494 * Large folio migration might be unsupported or
1495 * the allocation could've failed so we should retry
1496 * on the same folio with the large folio split
1497 * to normal folios.
1498 *
1499 * Split folios are put in split_folios, and
1500 * we will migrate them after the rest of the
1501 * list is processed.
1502 */
1503 case -ENOSYS:
1504 /* Large folio migration is unsupported */
1505 if (is_large) {
1506 nr_large_failed++;
1507 nr_thp_failed += is_thp;
1508 if (!try_split_folio(folio, &split_folios)) {
1509 nr_thp_split += is_thp;
1510 break;
1511 }
1512 /* Hugetlb migration is unsupported */
1513 } else if (!no_split_folio_counting) {
1514 nr_failed++;
1515 }
1516
1517 nr_failed_pages += nr_pages;
1518 list_move_tail(&folio->lru, &ret_folios);
1519 break;
1520 case -ENOMEM:
1521 /*
1522 * When memory is low, don't bother to try to migrate
1523 * other folios, just exit.
1524 */
1525 if (is_large) {
1526 nr_large_failed++;
1527 nr_thp_failed += is_thp;
1528 /* Large folio NUMA faulting doesn't split to retry. */
1529 if (!nosplit) {
1530 int ret = try_split_folio(folio, &split_folios);
1531
1532 if (!ret) {
1533 nr_thp_split += is_thp;
1534 break;
1535 } else if (reason == MR_LONGTERM_PIN &&
1536 ret == -EAGAIN) {
1537 /*
1538 * Try again to split large folio to
1539 * mitigate the failure of longterm pinning.
1540 */
1541 large_retry++;
1542 thp_retry += is_thp;
1543 nr_retry_pages += nr_pages;
1544 break;
1545 }
1546 }
1547 } else if (!no_split_folio_counting) {
1548 nr_failed++;
1549 }
1550
1551 nr_failed_pages += nr_pages + nr_retry_pages;
1552 /*
1553 * There might be some split folios of fail-to-migrate large
1554 * folios left in split_folios list. Move them back to migration
1555 * list so that they could be put back to the right list by
1556 * the caller otherwise the folio refcnt will be leaked.
1557 */
1558 list_splice_init(&split_folios, from);
1559 /* nr_failed isn't updated for not used */
1560 nr_large_failed += large_retry;
1561 nr_thp_failed += thp_retry;
1562 goto out;
1563 case -EAGAIN:
1564 if (is_large) {
1565 large_retry++;
1566 thp_retry += is_thp;
1567 } else if (!no_split_folio_counting) {
1568 retry++;
1569 }
1570 nr_retry_pages += nr_pages;
1571 break;
1572 case MIGRATEPAGE_SUCCESS:
1573 nr_succeeded += nr_pages;
1574 nr_thp_succeeded += is_thp;
1575 break;
1576 default:
1577 /*
1578 * Permanent failure (-EBUSY, etc.):
1579 * unlike -EAGAIN case, the failed folio is
1580 * removed from migration folio list and not
1581 * retried in the next outer loop.
1582 */
1583 if (is_large) {
1584 nr_large_failed++;
1585 nr_thp_failed += is_thp;
1586 } else if (!no_split_folio_counting) {
1587 nr_failed++;
1588 }
1589
1590 nr_failed_pages += nr_pages;
1591 break;
1592 }
1593 }
1594 }
1595 nr_failed += retry;
1596 nr_large_failed += large_retry;
1597 nr_thp_failed += thp_retry;
1598 nr_failed_pages += nr_retry_pages;
1599 /*
1600 * Try to migrate split folios of fail-to-migrate large folios, no
1601 * nr_failed counting in this round, since all split folios of a
1602 * large folio is counted as 1 failure in the first round.
1603 */
1604 if (!list_empty(&split_folios)) {
1605 /*
1606 * Move non-migrated folios (after 10 retries) to ret_folios
1607 * to avoid migrating them again.
1608 */
1609 list_splice_init(from, &ret_folios);
1610 list_splice_init(&split_folios, from);
1611 no_split_folio_counting = true;
1612 retry = 1;
1613 goto split_folio_migration;
1614 }
1615
1616 rc = nr_failed + nr_large_failed;
1617out:
1618 /*
1619 * Put the permanent failure folio back to migration list, they
1620 * will be put back to the right list by the caller.
1621 */
1622 list_splice(&ret_folios, from);
1623
1624 /*
1625 * Return 0 in case all split folios of fail-to-migrate large folios
1626 * are migrated successfully.
1627 */
1628 if (list_empty(from))
1629 rc = 0;
1630
1631 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1632 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
1633 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1634 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1635 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1636 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
1637 nr_thp_failed, nr_thp_split, mode, reason);
1638
1639 if (ret_succeeded)
1640 *ret_succeeded = nr_succeeded;
1641
1642 return rc;
1643}
1644
1645struct page *alloc_migration_target(struct page *page, unsigned long private)
1646{
1647 struct folio *folio = page_folio(page);
1648 struct migration_target_control *mtc;
1649 gfp_t gfp_mask;
1650 unsigned int order = 0;
1651 struct folio *new_folio = NULL;
1652 int nid;
1653 int zidx;
1654
1655 mtc = (struct migration_target_control *)private;
1656 gfp_mask = mtc->gfp_mask;
1657 nid = mtc->nid;
1658 if (nid == NUMA_NO_NODE)
1659 nid = folio_nid(folio);
1660
1661 if (folio_test_hugetlb(folio)) {
1662 struct hstate *h = folio_hstate(folio);
1663
1664 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1665 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1666 }
1667
1668 if (folio_test_large(folio)) {
1669 /*
1670 * clear __GFP_RECLAIM to make the migration callback
1671 * consistent with regular THP allocations.
1672 */
1673 gfp_mask &= ~__GFP_RECLAIM;
1674 gfp_mask |= GFP_TRANSHUGE;
1675 order = folio_order(folio);
1676 }
1677 zidx = zone_idx(folio_zone(folio));
1678 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1679 gfp_mask |= __GFP_HIGHMEM;
1680
1681 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
1682
1683 return &new_folio->page;
1684}
1685
1686#ifdef CONFIG_NUMA
1687
1688static int store_status(int __user *status, int start, int value, int nr)
1689{
1690 while (nr-- > 0) {
1691 if (put_user(value, status + start))
1692 return -EFAULT;
1693 start++;
1694 }
1695
1696 return 0;
1697}
1698
1699static int do_move_pages_to_node(struct mm_struct *mm,
1700 struct list_head *pagelist, int node)
1701{
1702 int err;
1703 struct migration_target_control mtc = {
1704 .nid = node,
1705 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1706 };
1707
1708 err = migrate_pages(pagelist, alloc_migration_target, NULL,
1709 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1710 if (err)
1711 putback_movable_pages(pagelist);
1712 return err;
1713}
1714
1715/*
1716 * Resolves the given address to a struct page, isolates it from the LRU and
1717 * puts it to the given pagelist.
1718 * Returns:
1719 * errno - if the page cannot be found/isolated
1720 * 0 - when it doesn't have to be migrated because it is already on the
1721 * target node
1722 * 1 - when it has been queued
1723 */
1724static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1725 int node, struct list_head *pagelist, bool migrate_all)
1726{
1727 struct vm_area_struct *vma;
1728 struct page *page;
1729 int err;
1730
1731 mmap_read_lock(mm);
1732 err = -EFAULT;
1733 vma = vma_lookup(mm, addr);
1734 if (!vma || !vma_migratable(vma))
1735 goto out;
1736
1737 /* FOLL_DUMP to ignore special (like zero) pages */
1738 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1739
1740 err = PTR_ERR(page);
1741 if (IS_ERR(page))
1742 goto out;
1743
1744 err = -ENOENT;
1745 if (!page)
1746 goto out;
1747
1748 if (is_zone_device_page(page))
1749 goto out_putpage;
1750
1751 err = 0;
1752 if (page_to_nid(page) == node)
1753 goto out_putpage;
1754
1755 err = -EACCES;
1756 if (page_mapcount(page) > 1 && !migrate_all)
1757 goto out_putpage;
1758
1759 if (PageHuge(page)) {
1760 if (PageHead(page)) {
1761 err = isolate_hugetlb(page, pagelist);
1762 if (!err)
1763 err = 1;
1764 }
1765 } else {
1766 struct page *head;
1767
1768 head = compound_head(page);
1769 err = isolate_lru_page(head);
1770 if (err)
1771 goto out_putpage;
1772
1773 err = 1;
1774 list_add_tail(&head->lru, pagelist);
1775 mod_node_page_state(page_pgdat(head),
1776 NR_ISOLATED_ANON + page_is_file_lru(head),
1777 thp_nr_pages(head));
1778 }
1779out_putpage:
1780 /*
1781 * Either remove the duplicate refcount from
1782 * isolate_lru_page() or drop the page ref if it was
1783 * not isolated.
1784 */
1785 put_page(page);
1786out:
1787 mmap_read_unlock(mm);
1788 return err;
1789}
1790
1791static int move_pages_and_store_status(struct mm_struct *mm, int node,
1792 struct list_head *pagelist, int __user *status,
1793 int start, int i, unsigned long nr_pages)
1794{
1795 int err;
1796
1797 if (list_empty(pagelist))
1798 return 0;
1799
1800 err = do_move_pages_to_node(mm, pagelist, node);
1801 if (err) {
1802 /*
1803 * Positive err means the number of failed
1804 * pages to migrate. Since we are going to
1805 * abort and return the number of non-migrated
1806 * pages, so need to include the rest of the
1807 * nr_pages that have not been attempted as
1808 * well.
1809 */
1810 if (err > 0)
1811 err += nr_pages - i;
1812 return err;
1813 }
1814 return store_status(status, start, node, i - start);
1815}
1816
1817/*
1818 * Migrate an array of page address onto an array of nodes and fill
1819 * the corresponding array of status.
1820 */
1821static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1822 unsigned long nr_pages,
1823 const void __user * __user *pages,
1824 const int __user *nodes,
1825 int __user *status, int flags)
1826{
1827 int current_node = NUMA_NO_NODE;
1828 LIST_HEAD(pagelist);
1829 int start, i;
1830 int err = 0, err1;
1831
1832 lru_cache_disable();
1833
1834 for (i = start = 0; i < nr_pages; i++) {
1835 const void __user *p;
1836 unsigned long addr;
1837 int node;
1838
1839 err = -EFAULT;
1840 if (get_user(p, pages + i))
1841 goto out_flush;
1842 if (get_user(node, nodes + i))
1843 goto out_flush;
1844 addr = (unsigned long)untagged_addr(p);
1845
1846 err = -ENODEV;
1847 if (node < 0 || node >= MAX_NUMNODES)
1848 goto out_flush;
1849 if (!node_state(node, N_MEMORY))
1850 goto out_flush;
1851
1852 err = -EACCES;
1853 if (!node_isset(node, task_nodes))
1854 goto out_flush;
1855
1856 if (current_node == NUMA_NO_NODE) {
1857 current_node = node;
1858 start = i;
1859 } else if (node != current_node) {
1860 err = move_pages_and_store_status(mm, current_node,
1861 &pagelist, status, start, i, nr_pages);
1862 if (err)
1863 goto out;
1864 start = i;
1865 current_node = node;
1866 }
1867
1868 /*
1869 * Errors in the page lookup or isolation are not fatal and we simply
1870 * report them via status
1871 */
1872 err = add_page_for_migration(mm, addr, current_node,
1873 &pagelist, flags & MPOL_MF_MOVE_ALL);
1874
1875 if (err > 0) {
1876 /* The page is successfully queued for migration */
1877 continue;
1878 }
1879
1880 /*
1881 * The move_pages() man page does not have an -EEXIST choice, so
1882 * use -EFAULT instead.
1883 */
1884 if (err == -EEXIST)
1885 err = -EFAULT;
1886
1887 /*
1888 * If the page is already on the target node (!err), store the
1889 * node, otherwise, store the err.
1890 */
1891 err = store_status(status, i, err ? : current_node, 1);
1892 if (err)
1893 goto out_flush;
1894
1895 err = move_pages_and_store_status(mm, current_node, &pagelist,
1896 status, start, i, nr_pages);
1897 if (err) {
1898 /* We have accounted for page i */
1899 if (err > 0)
1900 err--;
1901 goto out;
1902 }
1903 current_node = NUMA_NO_NODE;
1904 }
1905out_flush:
1906 /* Make sure we do not overwrite the existing error */
1907 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1908 status, start, i, nr_pages);
1909 if (err >= 0)
1910 err = err1;
1911out:
1912 lru_cache_enable();
1913 return err;
1914}
1915
1916/*
1917 * Determine the nodes of an array of pages and store it in an array of status.
1918 */
1919static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1920 const void __user **pages, int *status)
1921{
1922 unsigned long i;
1923
1924 mmap_read_lock(mm);
1925
1926 for (i = 0; i < nr_pages; i++) {
1927 unsigned long addr = (unsigned long)(*pages);
1928 struct vm_area_struct *vma;
1929 struct page *page;
1930 int err = -EFAULT;
1931
1932 vma = vma_lookup(mm, addr);
1933 if (!vma)
1934 goto set_status;
1935
1936 /* FOLL_DUMP to ignore special (like zero) pages */
1937 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1938
1939 err = PTR_ERR(page);
1940 if (IS_ERR(page))
1941 goto set_status;
1942
1943 err = -ENOENT;
1944 if (!page)
1945 goto set_status;
1946
1947 if (!is_zone_device_page(page))
1948 err = page_to_nid(page);
1949
1950 put_page(page);
1951set_status:
1952 *status = err;
1953
1954 pages++;
1955 status++;
1956 }
1957
1958 mmap_read_unlock(mm);
1959}
1960
1961static int get_compat_pages_array(const void __user *chunk_pages[],
1962 const void __user * __user *pages,
1963 unsigned long chunk_nr)
1964{
1965 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1966 compat_uptr_t p;
1967 int i;
1968
1969 for (i = 0; i < chunk_nr; i++) {
1970 if (get_user(p, pages32 + i))
1971 return -EFAULT;
1972 chunk_pages[i] = compat_ptr(p);
1973 }
1974
1975 return 0;
1976}
1977
1978/*
1979 * Determine the nodes of a user array of pages and store it in
1980 * a user array of status.
1981 */
1982static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1983 const void __user * __user *pages,
1984 int __user *status)
1985{
1986#define DO_PAGES_STAT_CHUNK_NR 16UL
1987 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1988 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1989
1990 while (nr_pages) {
1991 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
1992
1993 if (in_compat_syscall()) {
1994 if (get_compat_pages_array(chunk_pages, pages,
1995 chunk_nr))
1996 break;
1997 } else {
1998 if (copy_from_user(chunk_pages, pages,
1999 chunk_nr * sizeof(*chunk_pages)))
2000 break;
2001 }
2002
2003 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2004
2005 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2006 break;
2007
2008 pages += chunk_nr;
2009 status += chunk_nr;
2010 nr_pages -= chunk_nr;
2011 }
2012 return nr_pages ? -EFAULT : 0;
2013}
2014
2015static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2016{
2017 struct task_struct *task;
2018 struct mm_struct *mm;
2019
2020 /*
2021 * There is no need to check if current process has the right to modify
2022 * the specified process when they are same.
2023 */
2024 if (!pid) {
2025 mmget(current->mm);
2026 *mem_nodes = cpuset_mems_allowed(current);
2027 return current->mm;
2028 }
2029
2030 /* Find the mm_struct */
2031 rcu_read_lock();
2032 task = find_task_by_vpid(pid);
2033 if (!task) {
2034 rcu_read_unlock();
2035 return ERR_PTR(-ESRCH);
2036 }
2037 get_task_struct(task);
2038
2039 /*
2040 * Check if this process has the right to modify the specified
2041 * process. Use the regular "ptrace_may_access()" checks.
2042 */
2043 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2044 rcu_read_unlock();
2045 mm = ERR_PTR(-EPERM);
2046 goto out;
2047 }
2048 rcu_read_unlock();
2049
2050 mm = ERR_PTR(security_task_movememory(task));
2051 if (IS_ERR(mm))
2052 goto out;
2053 *mem_nodes = cpuset_mems_allowed(task);
2054 mm = get_task_mm(task);
2055out:
2056 put_task_struct(task);
2057 if (!mm)
2058 mm = ERR_PTR(-EINVAL);
2059 return mm;
2060}
2061
2062/*
2063 * Move a list of pages in the address space of the currently executing
2064 * process.
2065 */
2066static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2067 const void __user * __user *pages,
2068 const int __user *nodes,
2069 int __user *status, int flags)
2070{
2071 struct mm_struct *mm;
2072 int err;
2073 nodemask_t task_nodes;
2074
2075 /* Check flags */
2076 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2077 return -EINVAL;
2078
2079 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2080 return -EPERM;
2081
2082 mm = find_mm_struct(pid, &task_nodes);
2083 if (IS_ERR(mm))
2084 return PTR_ERR(mm);
2085
2086 if (nodes)
2087 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2088 nodes, status, flags);
2089 else
2090 err = do_pages_stat(mm, nr_pages, pages, status);
2091
2092 mmput(mm);
2093 return err;
2094}
2095
2096SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2097 const void __user * __user *, pages,
2098 const int __user *, nodes,
2099 int __user *, status, int, flags)
2100{
2101 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2102}
2103
2104#ifdef CONFIG_NUMA_BALANCING
2105/*
2106 * Returns true if this is a safe migration target node for misplaced NUMA
2107 * pages. Currently it only checks the watermarks which is crude.
2108 */
2109static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2110 unsigned long nr_migrate_pages)
2111{
2112 int z;
2113
2114 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2115 struct zone *zone = pgdat->node_zones + z;
2116
2117 if (!managed_zone(zone))
2118 continue;
2119
2120 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2121 if (!zone_watermark_ok(zone, 0,
2122 high_wmark_pages(zone) +
2123 nr_migrate_pages,
2124 ZONE_MOVABLE, 0))
2125 continue;
2126 return true;
2127 }
2128 return false;
2129}
2130
2131static struct page *alloc_misplaced_dst_page(struct page *page,
2132 unsigned long data)
2133{
2134 int nid = (int) data;
2135 int order = compound_order(page);
2136 gfp_t gfp = __GFP_THISNODE;
2137 struct folio *new;
2138
2139 if (order > 0)
2140 gfp |= GFP_TRANSHUGE_LIGHT;
2141 else {
2142 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2143 __GFP_NOWARN;
2144 gfp &= ~__GFP_RECLAIM;
2145 }
2146 new = __folio_alloc_node(gfp, order, nid);
2147
2148 return &new->page;
2149}
2150
2151static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2152{
2153 int nr_pages = thp_nr_pages(page);
2154 int order = compound_order(page);
2155
2156 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2157
2158 /* Do not migrate THP mapped by multiple processes */
2159 if (PageTransHuge(page) && total_mapcount(page) > 1)
2160 return 0;
2161
2162 /* Avoid migrating to a node that is nearly full */
2163 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2164 int z;
2165
2166 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2167 return 0;
2168 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2169 if (managed_zone(pgdat->node_zones + z))
2170 break;
2171 }
2172 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
2173 return 0;
2174 }
2175
2176 if (isolate_lru_page(page))
2177 return 0;
2178
2179 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2180 nr_pages);
2181
2182 /*
2183 * Isolating the page has taken another reference, so the
2184 * caller's reference can be safely dropped without the page
2185 * disappearing underneath us during migration.
2186 */
2187 put_page(page);
2188 return 1;
2189}
2190
2191/*
2192 * Attempt to migrate a misplaced page to the specified destination
2193 * node. Caller is expected to have an elevated reference count on
2194 * the page that will be dropped by this function before returning.
2195 */
2196int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2197 int node)
2198{
2199 pg_data_t *pgdat = NODE_DATA(node);
2200 int isolated;
2201 int nr_remaining;
2202 unsigned int nr_succeeded;
2203 LIST_HEAD(migratepages);
2204 int nr_pages = thp_nr_pages(page);
2205
2206 /*
2207 * Don't migrate file pages that are mapped in multiple processes
2208 * with execute permissions as they are probably shared libraries.
2209 */
2210 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2211 (vma->vm_flags & VM_EXEC))
2212 goto out;
2213
2214 /*
2215 * Also do not migrate dirty pages as not all filesystems can move
2216 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2217 */
2218 if (page_is_file_lru(page) && PageDirty(page))
2219 goto out;
2220
2221 isolated = numamigrate_isolate_page(pgdat, page);
2222 if (!isolated)
2223 goto out;
2224
2225 list_add(&page->lru, &migratepages);
2226 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2227 NULL, node, MIGRATE_ASYNC,
2228 MR_NUMA_MISPLACED, &nr_succeeded);
2229 if (nr_remaining) {
2230 if (!list_empty(&migratepages)) {
2231 list_del(&page->lru);
2232 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2233 page_is_file_lru(page), -nr_pages);
2234 putback_lru_page(page);
2235 }
2236 isolated = 0;
2237 }
2238 if (nr_succeeded) {
2239 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2240 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2241 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2242 nr_succeeded);
2243 }
2244 BUG_ON(!list_empty(&migratepages));
2245 return isolated;
2246
2247out:
2248 put_page(page);
2249 return 0;
2250}
2251#endif /* CONFIG_NUMA_BALANCING */
2252#endif /* CONFIG_NUMA */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/nsproxy.h>
24#include <linux/pagevec.h>
25#include <linux/ksm.h>
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
30#include <linux/writeback.h>
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
33#include <linux/security.h>
34#include <linux/backing-dev.h>
35#include <linux/compaction.h>
36#include <linux/syscalls.h>
37#include <linux/compat.h>
38#include <linux/hugetlb.h>
39#include <linux/hugetlb_cgroup.h>
40#include <linux/gfp.h>
41#include <linux/pagewalk.h>
42#include <linux/pfn_t.h>
43#include <linux/memremap.h>
44#include <linux/userfaultfd_k.h>
45#include <linux/balloon_compaction.h>
46#include <linux/mmu_notifier.h>
47#include <linux/page_idle.h>
48#include <linux/page_owner.h>
49#include <linux/sched/mm.h>
50#include <linux/ptrace.h>
51
52#include <asm/tlbflush.h>
53
54#define CREATE_TRACE_POINTS
55#include <trace/events/migrate.h>
56
57#include "internal.h"
58
59/*
60 * migrate_prep() needs to be called before we start compiling a list of pages
61 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
62 * undesirable, use migrate_prep_local()
63 */
64int migrate_prep(void)
65{
66 /*
67 * Clear the LRU lists so pages can be isolated.
68 * Note that pages may be moved off the LRU after we have
69 * drained them. Those pages will fail to migrate like other
70 * pages that may be busy.
71 */
72 lru_add_drain_all();
73
74 return 0;
75}
76
77/* Do the necessary work of migrate_prep but not if it involves other CPUs */
78int migrate_prep_local(void)
79{
80 lru_add_drain();
81
82 return 0;
83}
84
85int isolate_movable_page(struct page *page, isolate_mode_t mode)
86{
87 struct address_space *mapping;
88
89 /*
90 * Avoid burning cycles with pages that are yet under __free_pages(),
91 * or just got freed under us.
92 *
93 * In case we 'win' a race for a movable page being freed under us and
94 * raise its refcount preventing __free_pages() from doing its job
95 * the put_page() at the end of this block will take care of
96 * release this page, thus avoiding a nasty leakage.
97 */
98 if (unlikely(!get_page_unless_zero(page)))
99 goto out;
100
101 /*
102 * Check PageMovable before holding a PG_lock because page's owner
103 * assumes anybody doesn't touch PG_lock of newly allocated page
104 * so unconditionally grabbing the lock ruins page's owner side.
105 */
106 if (unlikely(!__PageMovable(page)))
107 goto out_putpage;
108 /*
109 * As movable pages are not isolated from LRU lists, concurrent
110 * compaction threads can race against page migration functions
111 * as well as race against the releasing a page.
112 *
113 * In order to avoid having an already isolated movable page
114 * being (wrongly) re-isolated while it is under migration,
115 * or to avoid attempting to isolate pages being released,
116 * lets be sure we have the page lock
117 * before proceeding with the movable page isolation steps.
118 */
119 if (unlikely(!trylock_page(page)))
120 goto out_putpage;
121
122 if (!PageMovable(page) || PageIsolated(page))
123 goto out_no_isolated;
124
125 mapping = page_mapping(page);
126 VM_BUG_ON_PAGE(!mapping, page);
127
128 if (!mapping->a_ops->isolate_page(page, mode))
129 goto out_no_isolated;
130
131 /* Driver shouldn't use PG_isolated bit of page->flags */
132 WARN_ON_ONCE(PageIsolated(page));
133 __SetPageIsolated(page);
134 unlock_page(page);
135
136 return 0;
137
138out_no_isolated:
139 unlock_page(page);
140out_putpage:
141 put_page(page);
142out:
143 return -EBUSY;
144}
145
146/* It should be called on page which is PG_movable */
147void putback_movable_page(struct page *page)
148{
149 struct address_space *mapping;
150
151 VM_BUG_ON_PAGE(!PageLocked(page), page);
152 VM_BUG_ON_PAGE(!PageMovable(page), page);
153 VM_BUG_ON_PAGE(!PageIsolated(page), page);
154
155 mapping = page_mapping(page);
156 mapping->a_ops->putback_page(page);
157 __ClearPageIsolated(page);
158}
159
160/*
161 * Put previously isolated pages back onto the appropriate lists
162 * from where they were once taken off for compaction/migration.
163 *
164 * This function shall be used whenever the isolated pageset has been
165 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
166 * and isolate_huge_page().
167 */
168void putback_movable_pages(struct list_head *l)
169{
170 struct page *page;
171 struct page *page2;
172
173 list_for_each_entry_safe(page, page2, l, lru) {
174 if (unlikely(PageHuge(page))) {
175 putback_active_hugepage(page);
176 continue;
177 }
178 list_del(&page->lru);
179 /*
180 * We isolated non-lru movable page so here we can use
181 * __PageMovable because LRU page's mapping cannot have
182 * PAGE_MAPPING_MOVABLE.
183 */
184 if (unlikely(__PageMovable(page))) {
185 VM_BUG_ON_PAGE(!PageIsolated(page), page);
186 lock_page(page);
187 if (PageMovable(page))
188 putback_movable_page(page);
189 else
190 __ClearPageIsolated(page);
191 unlock_page(page);
192 put_page(page);
193 } else {
194 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
195 page_is_file_cache(page), -hpage_nr_pages(page));
196 putback_lru_page(page);
197 }
198 }
199}
200
201/*
202 * Restore a potential migration pte to a working pte entry
203 */
204static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
205 unsigned long addr, void *old)
206{
207 struct page_vma_mapped_walk pvmw = {
208 .page = old,
209 .vma = vma,
210 .address = addr,
211 .flags = PVMW_SYNC | PVMW_MIGRATION,
212 };
213 struct page *new;
214 pte_t pte;
215 swp_entry_t entry;
216
217 VM_BUG_ON_PAGE(PageTail(page), page);
218 while (page_vma_mapped_walk(&pvmw)) {
219 if (PageKsm(page))
220 new = page;
221 else
222 new = page - pvmw.page->index +
223 linear_page_index(vma, pvmw.address);
224
225#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
226 /* PMD-mapped THP migration entry */
227 if (!pvmw.pte) {
228 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
229 remove_migration_pmd(&pvmw, new);
230 continue;
231 }
232#endif
233
234 get_page(new);
235 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
236 if (pte_swp_soft_dirty(*pvmw.pte))
237 pte = pte_mksoft_dirty(pte);
238
239 /*
240 * Recheck VMA as permissions can change since migration started
241 */
242 entry = pte_to_swp_entry(*pvmw.pte);
243 if (is_write_migration_entry(entry))
244 pte = maybe_mkwrite(pte, vma);
245
246 if (unlikely(is_zone_device_page(new))) {
247 if (is_device_private_page(new)) {
248 entry = make_device_private_entry(new, pte_write(pte));
249 pte = swp_entry_to_pte(entry);
250 }
251 }
252
253#ifdef CONFIG_HUGETLB_PAGE
254 if (PageHuge(new)) {
255 pte = pte_mkhuge(pte);
256 pte = arch_make_huge_pte(pte, vma, new, 0);
257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
258 if (PageAnon(new))
259 hugepage_add_anon_rmap(new, vma, pvmw.address);
260 else
261 page_dup_rmap(new, true);
262 } else
263#endif
264 {
265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
266
267 if (PageAnon(new))
268 page_add_anon_rmap(new, vma, pvmw.address, false);
269 else
270 page_add_file_rmap(new, false);
271 }
272 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
273 mlock_vma_page(new);
274
275 if (PageTransHuge(page) && PageMlocked(page))
276 clear_page_mlock(page);
277
278 /* No need to invalidate - it was non-present before */
279 update_mmu_cache(vma, pvmw.address, pvmw.pte);
280 }
281
282 return true;
283}
284
285/*
286 * Get rid of all migration entries and replace them by
287 * references to the indicated page.
288 */
289void remove_migration_ptes(struct page *old, struct page *new, bool locked)
290{
291 struct rmap_walk_control rwc = {
292 .rmap_one = remove_migration_pte,
293 .arg = old,
294 };
295
296 if (locked)
297 rmap_walk_locked(new, &rwc);
298 else
299 rmap_walk(new, &rwc);
300}
301
302/*
303 * Something used the pte of a page under migration. We need to
304 * get to the page and wait until migration is finished.
305 * When we return from this function the fault will be retried.
306 */
307void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
308 spinlock_t *ptl)
309{
310 pte_t pte;
311 swp_entry_t entry;
312 struct page *page;
313
314 spin_lock(ptl);
315 pte = *ptep;
316 if (!is_swap_pte(pte))
317 goto out;
318
319 entry = pte_to_swp_entry(pte);
320 if (!is_migration_entry(entry))
321 goto out;
322
323 page = migration_entry_to_page(entry);
324
325 /*
326 * Once page cache replacement of page migration started, page_count
327 * is zero; but we must not call put_and_wait_on_page_locked() without
328 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
329 */
330 if (!get_page_unless_zero(page))
331 goto out;
332 pte_unmap_unlock(ptep, ptl);
333 put_and_wait_on_page_locked(page);
334 return;
335out:
336 pte_unmap_unlock(ptep, ptl);
337}
338
339void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
340 unsigned long address)
341{
342 spinlock_t *ptl = pte_lockptr(mm, pmd);
343 pte_t *ptep = pte_offset_map(pmd, address);
344 __migration_entry_wait(mm, ptep, ptl);
345}
346
347void migration_entry_wait_huge(struct vm_area_struct *vma,
348 struct mm_struct *mm, pte_t *pte)
349{
350 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
351 __migration_entry_wait(mm, pte, ptl);
352}
353
354#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
355void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
356{
357 spinlock_t *ptl;
358 struct page *page;
359
360 ptl = pmd_lock(mm, pmd);
361 if (!is_pmd_migration_entry(*pmd))
362 goto unlock;
363 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
364 if (!get_page_unless_zero(page))
365 goto unlock;
366 spin_unlock(ptl);
367 put_and_wait_on_page_locked(page);
368 return;
369unlock:
370 spin_unlock(ptl);
371}
372#endif
373
374static int expected_page_refs(struct address_space *mapping, struct page *page)
375{
376 int expected_count = 1;
377
378 /*
379 * Device public or private pages have an extra refcount as they are
380 * ZONE_DEVICE pages.
381 */
382 expected_count += is_device_private_page(page);
383 if (mapping)
384 expected_count += hpage_nr_pages(page) + page_has_private(page);
385
386 return expected_count;
387}
388
389/*
390 * Replace the page in the mapping.
391 *
392 * The number of remaining references must be:
393 * 1 for anonymous pages without a mapping
394 * 2 for pages with a mapping
395 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
396 */
397int migrate_page_move_mapping(struct address_space *mapping,
398 struct page *newpage, struct page *page, int extra_count)
399{
400 XA_STATE(xas, &mapping->i_pages, page_index(page));
401 struct zone *oldzone, *newzone;
402 int dirty;
403 int expected_count = expected_page_refs(mapping, page) + extra_count;
404
405 if (!mapping) {
406 /* Anonymous page without mapping */
407 if (page_count(page) != expected_count)
408 return -EAGAIN;
409
410 /* No turning back from here */
411 newpage->index = page->index;
412 newpage->mapping = page->mapping;
413 if (PageSwapBacked(page))
414 __SetPageSwapBacked(newpage);
415
416 return MIGRATEPAGE_SUCCESS;
417 }
418
419 oldzone = page_zone(page);
420 newzone = page_zone(newpage);
421
422 xas_lock_irq(&xas);
423 if (page_count(page) != expected_count || xas_load(&xas) != page) {
424 xas_unlock_irq(&xas);
425 return -EAGAIN;
426 }
427
428 if (!page_ref_freeze(page, expected_count)) {
429 xas_unlock_irq(&xas);
430 return -EAGAIN;
431 }
432
433 /*
434 * Now we know that no one else is looking at the page:
435 * no turning back from here.
436 */
437 newpage->index = page->index;
438 newpage->mapping = page->mapping;
439 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
440 if (PageSwapBacked(page)) {
441 __SetPageSwapBacked(newpage);
442 if (PageSwapCache(page)) {
443 SetPageSwapCache(newpage);
444 set_page_private(newpage, page_private(page));
445 }
446 } else {
447 VM_BUG_ON_PAGE(PageSwapCache(page), page);
448 }
449
450 /* Move dirty while page refs frozen and newpage not yet exposed */
451 dirty = PageDirty(page);
452 if (dirty) {
453 ClearPageDirty(page);
454 SetPageDirty(newpage);
455 }
456
457 xas_store(&xas, newpage);
458 if (PageTransHuge(page)) {
459 int i;
460
461 for (i = 1; i < HPAGE_PMD_NR; i++) {
462 xas_next(&xas);
463 xas_store(&xas, newpage);
464 }
465 }
466
467 /*
468 * Drop cache reference from old page by unfreezing
469 * to one less reference.
470 * We know this isn't the last reference.
471 */
472 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
473
474 xas_unlock(&xas);
475 /* Leave irq disabled to prevent preemption while updating stats */
476
477 /*
478 * If moved to a different zone then also account
479 * the page for that zone. Other VM counters will be
480 * taken care of when we establish references to the
481 * new page and drop references to the old page.
482 *
483 * Note that anonymous pages are accounted for
484 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
485 * are mapped to swap space.
486 */
487 if (newzone != oldzone) {
488 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
489 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
490 if (PageSwapBacked(page) && !PageSwapCache(page)) {
491 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
492 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
493 }
494 if (dirty && mapping_cap_account_dirty(mapping)) {
495 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
496 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
497 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
498 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
499 }
500 }
501 local_irq_enable();
502
503 return MIGRATEPAGE_SUCCESS;
504}
505EXPORT_SYMBOL(migrate_page_move_mapping);
506
507/*
508 * The expected number of remaining references is the same as that
509 * of migrate_page_move_mapping().
510 */
511int migrate_huge_page_move_mapping(struct address_space *mapping,
512 struct page *newpage, struct page *page)
513{
514 XA_STATE(xas, &mapping->i_pages, page_index(page));
515 int expected_count;
516
517 xas_lock_irq(&xas);
518 expected_count = 2 + page_has_private(page);
519 if (page_count(page) != expected_count || xas_load(&xas) != page) {
520 xas_unlock_irq(&xas);
521 return -EAGAIN;
522 }
523
524 if (!page_ref_freeze(page, expected_count)) {
525 xas_unlock_irq(&xas);
526 return -EAGAIN;
527 }
528
529 newpage->index = page->index;
530 newpage->mapping = page->mapping;
531
532 get_page(newpage);
533
534 xas_store(&xas, newpage);
535
536 page_ref_unfreeze(page, expected_count - 1);
537
538 xas_unlock_irq(&xas);
539
540 return MIGRATEPAGE_SUCCESS;
541}
542
543/*
544 * Gigantic pages are so large that we do not guarantee that page++ pointer
545 * arithmetic will work across the entire page. We need something more
546 * specialized.
547 */
548static void __copy_gigantic_page(struct page *dst, struct page *src,
549 int nr_pages)
550{
551 int i;
552 struct page *dst_base = dst;
553 struct page *src_base = src;
554
555 for (i = 0; i < nr_pages; ) {
556 cond_resched();
557 copy_highpage(dst, src);
558
559 i++;
560 dst = mem_map_next(dst, dst_base, i);
561 src = mem_map_next(src, src_base, i);
562 }
563}
564
565static void copy_huge_page(struct page *dst, struct page *src)
566{
567 int i;
568 int nr_pages;
569
570 if (PageHuge(src)) {
571 /* hugetlbfs page */
572 struct hstate *h = page_hstate(src);
573 nr_pages = pages_per_huge_page(h);
574
575 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
576 __copy_gigantic_page(dst, src, nr_pages);
577 return;
578 }
579 } else {
580 /* thp page */
581 BUG_ON(!PageTransHuge(src));
582 nr_pages = hpage_nr_pages(src);
583 }
584
585 for (i = 0; i < nr_pages; i++) {
586 cond_resched();
587 copy_highpage(dst + i, src + i);
588 }
589}
590
591/*
592 * Copy the page to its new location
593 */
594void migrate_page_states(struct page *newpage, struct page *page)
595{
596 int cpupid;
597
598 if (PageError(page))
599 SetPageError(newpage);
600 if (PageReferenced(page))
601 SetPageReferenced(newpage);
602 if (PageUptodate(page))
603 SetPageUptodate(newpage);
604 if (TestClearPageActive(page)) {
605 VM_BUG_ON_PAGE(PageUnevictable(page), page);
606 SetPageActive(newpage);
607 } else if (TestClearPageUnevictable(page))
608 SetPageUnevictable(newpage);
609 if (PageWorkingset(page))
610 SetPageWorkingset(newpage);
611 if (PageChecked(page))
612 SetPageChecked(newpage);
613 if (PageMappedToDisk(page))
614 SetPageMappedToDisk(newpage);
615
616 /* Move dirty on pages not done by migrate_page_move_mapping() */
617 if (PageDirty(page))
618 SetPageDirty(newpage);
619
620 if (page_is_young(page))
621 set_page_young(newpage);
622 if (page_is_idle(page))
623 set_page_idle(newpage);
624
625 /*
626 * Copy NUMA information to the new page, to prevent over-eager
627 * future migrations of this same page.
628 */
629 cpupid = page_cpupid_xchg_last(page, -1);
630 page_cpupid_xchg_last(newpage, cpupid);
631
632 ksm_migrate_page(newpage, page);
633 /*
634 * Please do not reorder this without considering how mm/ksm.c's
635 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
636 */
637 if (PageSwapCache(page))
638 ClearPageSwapCache(page);
639 ClearPagePrivate(page);
640 set_page_private(page, 0);
641
642 /*
643 * If any waiters have accumulated on the new page then
644 * wake them up.
645 */
646 if (PageWriteback(newpage))
647 end_page_writeback(newpage);
648
649 copy_page_owner(page, newpage);
650
651 mem_cgroup_migrate(page, newpage);
652}
653EXPORT_SYMBOL(migrate_page_states);
654
655void migrate_page_copy(struct page *newpage, struct page *page)
656{
657 if (PageHuge(page) || PageTransHuge(page))
658 copy_huge_page(newpage, page);
659 else
660 copy_highpage(newpage, page);
661
662 migrate_page_states(newpage, page);
663}
664EXPORT_SYMBOL(migrate_page_copy);
665
666/************************************************************
667 * Migration functions
668 ***********************************************************/
669
670/*
671 * Common logic to directly migrate a single LRU page suitable for
672 * pages that do not use PagePrivate/PagePrivate2.
673 *
674 * Pages are locked upon entry and exit.
675 */
676int migrate_page(struct address_space *mapping,
677 struct page *newpage, struct page *page,
678 enum migrate_mode mode)
679{
680 int rc;
681
682 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
683
684 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
685
686 if (rc != MIGRATEPAGE_SUCCESS)
687 return rc;
688
689 if (mode != MIGRATE_SYNC_NO_COPY)
690 migrate_page_copy(newpage, page);
691 else
692 migrate_page_states(newpage, page);
693 return MIGRATEPAGE_SUCCESS;
694}
695EXPORT_SYMBOL(migrate_page);
696
697#ifdef CONFIG_BLOCK
698/* Returns true if all buffers are successfully locked */
699static bool buffer_migrate_lock_buffers(struct buffer_head *head,
700 enum migrate_mode mode)
701{
702 struct buffer_head *bh = head;
703
704 /* Simple case, sync compaction */
705 if (mode != MIGRATE_ASYNC) {
706 do {
707 lock_buffer(bh);
708 bh = bh->b_this_page;
709
710 } while (bh != head);
711
712 return true;
713 }
714
715 /* async case, we cannot block on lock_buffer so use trylock_buffer */
716 do {
717 if (!trylock_buffer(bh)) {
718 /*
719 * We failed to lock the buffer and cannot stall in
720 * async migration. Release the taken locks
721 */
722 struct buffer_head *failed_bh = bh;
723 bh = head;
724 while (bh != failed_bh) {
725 unlock_buffer(bh);
726 bh = bh->b_this_page;
727 }
728 return false;
729 }
730
731 bh = bh->b_this_page;
732 } while (bh != head);
733 return true;
734}
735
736static int __buffer_migrate_page(struct address_space *mapping,
737 struct page *newpage, struct page *page, enum migrate_mode mode,
738 bool check_refs)
739{
740 struct buffer_head *bh, *head;
741 int rc;
742 int expected_count;
743
744 if (!page_has_buffers(page))
745 return migrate_page(mapping, newpage, page, mode);
746
747 /* Check whether page does not have extra refs before we do more work */
748 expected_count = expected_page_refs(mapping, page);
749 if (page_count(page) != expected_count)
750 return -EAGAIN;
751
752 head = page_buffers(page);
753 if (!buffer_migrate_lock_buffers(head, mode))
754 return -EAGAIN;
755
756 if (check_refs) {
757 bool busy;
758 bool invalidated = false;
759
760recheck_buffers:
761 busy = false;
762 spin_lock(&mapping->private_lock);
763 bh = head;
764 do {
765 if (atomic_read(&bh->b_count)) {
766 busy = true;
767 break;
768 }
769 bh = bh->b_this_page;
770 } while (bh != head);
771 if (busy) {
772 if (invalidated) {
773 rc = -EAGAIN;
774 goto unlock_buffers;
775 }
776 spin_unlock(&mapping->private_lock);
777 invalidate_bh_lrus();
778 invalidated = true;
779 goto recheck_buffers;
780 }
781 }
782
783 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
784 if (rc != MIGRATEPAGE_SUCCESS)
785 goto unlock_buffers;
786
787 ClearPagePrivate(page);
788 set_page_private(newpage, page_private(page));
789 set_page_private(page, 0);
790 put_page(page);
791 get_page(newpage);
792
793 bh = head;
794 do {
795 set_bh_page(bh, newpage, bh_offset(bh));
796 bh = bh->b_this_page;
797
798 } while (bh != head);
799
800 SetPagePrivate(newpage);
801
802 if (mode != MIGRATE_SYNC_NO_COPY)
803 migrate_page_copy(newpage, page);
804 else
805 migrate_page_states(newpage, page);
806
807 rc = MIGRATEPAGE_SUCCESS;
808unlock_buffers:
809 if (check_refs)
810 spin_unlock(&mapping->private_lock);
811 bh = head;
812 do {
813 unlock_buffer(bh);
814 bh = bh->b_this_page;
815
816 } while (bh != head);
817
818 return rc;
819}
820
821/*
822 * Migration function for pages with buffers. This function can only be used
823 * if the underlying filesystem guarantees that no other references to "page"
824 * exist. For example attached buffer heads are accessed only under page lock.
825 */
826int buffer_migrate_page(struct address_space *mapping,
827 struct page *newpage, struct page *page, enum migrate_mode mode)
828{
829 return __buffer_migrate_page(mapping, newpage, page, mode, false);
830}
831EXPORT_SYMBOL(buffer_migrate_page);
832
833/*
834 * Same as above except that this variant is more careful and checks that there
835 * are also no buffer head references. This function is the right one for
836 * mappings where buffer heads are directly looked up and referenced (such as
837 * block device mappings).
838 */
839int buffer_migrate_page_norefs(struct address_space *mapping,
840 struct page *newpage, struct page *page, enum migrate_mode mode)
841{
842 return __buffer_migrate_page(mapping, newpage, page, mode, true);
843}
844#endif
845
846/*
847 * Writeback a page to clean the dirty state
848 */
849static int writeout(struct address_space *mapping, struct page *page)
850{
851 struct writeback_control wbc = {
852 .sync_mode = WB_SYNC_NONE,
853 .nr_to_write = 1,
854 .range_start = 0,
855 .range_end = LLONG_MAX,
856 .for_reclaim = 1
857 };
858 int rc;
859
860 if (!mapping->a_ops->writepage)
861 /* No write method for the address space */
862 return -EINVAL;
863
864 if (!clear_page_dirty_for_io(page))
865 /* Someone else already triggered a write */
866 return -EAGAIN;
867
868 /*
869 * A dirty page may imply that the underlying filesystem has
870 * the page on some queue. So the page must be clean for
871 * migration. Writeout may mean we loose the lock and the
872 * page state is no longer what we checked for earlier.
873 * At this point we know that the migration attempt cannot
874 * be successful.
875 */
876 remove_migration_ptes(page, page, false);
877
878 rc = mapping->a_ops->writepage(page, &wbc);
879
880 if (rc != AOP_WRITEPAGE_ACTIVATE)
881 /* unlocked. Relock */
882 lock_page(page);
883
884 return (rc < 0) ? -EIO : -EAGAIN;
885}
886
887/*
888 * Default handling if a filesystem does not provide a migration function.
889 */
890static int fallback_migrate_page(struct address_space *mapping,
891 struct page *newpage, struct page *page, enum migrate_mode mode)
892{
893 if (PageDirty(page)) {
894 /* Only writeback pages in full synchronous migration */
895 switch (mode) {
896 case MIGRATE_SYNC:
897 case MIGRATE_SYNC_NO_COPY:
898 break;
899 default:
900 return -EBUSY;
901 }
902 return writeout(mapping, page);
903 }
904
905 /*
906 * Buffers may be managed in a filesystem specific way.
907 * We must have no buffers or drop them.
908 */
909 if (page_has_private(page) &&
910 !try_to_release_page(page, GFP_KERNEL))
911 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
912
913 return migrate_page(mapping, newpage, page, mode);
914}
915
916/*
917 * Move a page to a newly allocated page
918 * The page is locked and all ptes have been successfully removed.
919 *
920 * The new page will have replaced the old page if this function
921 * is successful.
922 *
923 * Return value:
924 * < 0 - error code
925 * MIGRATEPAGE_SUCCESS - success
926 */
927static int move_to_new_page(struct page *newpage, struct page *page,
928 enum migrate_mode mode)
929{
930 struct address_space *mapping;
931 int rc = -EAGAIN;
932 bool is_lru = !__PageMovable(page);
933
934 VM_BUG_ON_PAGE(!PageLocked(page), page);
935 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
936
937 mapping = page_mapping(page);
938
939 if (likely(is_lru)) {
940 if (!mapping)
941 rc = migrate_page(mapping, newpage, page, mode);
942 else if (mapping->a_ops->migratepage)
943 /*
944 * Most pages have a mapping and most filesystems
945 * provide a migratepage callback. Anonymous pages
946 * are part of swap space which also has its own
947 * migratepage callback. This is the most common path
948 * for page migration.
949 */
950 rc = mapping->a_ops->migratepage(mapping, newpage,
951 page, mode);
952 else
953 rc = fallback_migrate_page(mapping, newpage,
954 page, mode);
955 } else {
956 /*
957 * In case of non-lru page, it could be released after
958 * isolation step. In that case, we shouldn't try migration.
959 */
960 VM_BUG_ON_PAGE(!PageIsolated(page), page);
961 if (!PageMovable(page)) {
962 rc = MIGRATEPAGE_SUCCESS;
963 __ClearPageIsolated(page);
964 goto out;
965 }
966
967 rc = mapping->a_ops->migratepage(mapping, newpage,
968 page, mode);
969 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
970 !PageIsolated(page));
971 }
972
973 /*
974 * When successful, old pagecache page->mapping must be cleared before
975 * page is freed; but stats require that PageAnon be left as PageAnon.
976 */
977 if (rc == MIGRATEPAGE_SUCCESS) {
978 if (__PageMovable(page)) {
979 VM_BUG_ON_PAGE(!PageIsolated(page), page);
980
981 /*
982 * We clear PG_movable under page_lock so any compactor
983 * cannot try to migrate this page.
984 */
985 __ClearPageIsolated(page);
986 }
987
988 /*
989 * Anonymous and movable page->mapping will be cleard by
990 * free_pages_prepare so don't reset it here for keeping
991 * the type to work PageAnon, for example.
992 */
993 if (!PageMappingFlags(page))
994 page->mapping = NULL;
995
996 if (likely(!is_zone_device_page(newpage)))
997 flush_dcache_page(newpage);
998
999 }
1000out:
1001 return rc;
1002}
1003
1004static int __unmap_and_move(struct page *page, struct page *newpage,
1005 int force, enum migrate_mode mode)
1006{
1007 int rc = -EAGAIN;
1008 int page_was_mapped = 0;
1009 struct anon_vma *anon_vma = NULL;
1010 bool is_lru = !__PageMovable(page);
1011
1012 if (!trylock_page(page)) {
1013 if (!force || mode == MIGRATE_ASYNC)
1014 goto out;
1015
1016 /*
1017 * It's not safe for direct compaction to call lock_page.
1018 * For example, during page readahead pages are added locked
1019 * to the LRU. Later, when the IO completes the pages are
1020 * marked uptodate and unlocked. However, the queueing
1021 * could be merging multiple pages for one bio (e.g.
1022 * mpage_readpages). If an allocation happens for the
1023 * second or third page, the process can end up locking
1024 * the same page twice and deadlocking. Rather than
1025 * trying to be clever about what pages can be locked,
1026 * avoid the use of lock_page for direct compaction
1027 * altogether.
1028 */
1029 if (current->flags & PF_MEMALLOC)
1030 goto out;
1031
1032 lock_page(page);
1033 }
1034
1035 if (PageWriteback(page)) {
1036 /*
1037 * Only in the case of a full synchronous migration is it
1038 * necessary to wait for PageWriteback. In the async case,
1039 * the retry loop is too short and in the sync-light case,
1040 * the overhead of stalling is too much
1041 */
1042 switch (mode) {
1043 case MIGRATE_SYNC:
1044 case MIGRATE_SYNC_NO_COPY:
1045 break;
1046 default:
1047 rc = -EBUSY;
1048 goto out_unlock;
1049 }
1050 if (!force)
1051 goto out_unlock;
1052 wait_on_page_writeback(page);
1053 }
1054
1055 /*
1056 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1057 * we cannot notice that anon_vma is freed while we migrates a page.
1058 * This get_anon_vma() delays freeing anon_vma pointer until the end
1059 * of migration. File cache pages are no problem because of page_lock()
1060 * File Caches may use write_page() or lock_page() in migration, then,
1061 * just care Anon page here.
1062 *
1063 * Only page_get_anon_vma() understands the subtleties of
1064 * getting a hold on an anon_vma from outside one of its mms.
1065 * But if we cannot get anon_vma, then we won't need it anyway,
1066 * because that implies that the anon page is no longer mapped
1067 * (and cannot be remapped so long as we hold the page lock).
1068 */
1069 if (PageAnon(page) && !PageKsm(page))
1070 anon_vma = page_get_anon_vma(page);
1071
1072 /*
1073 * Block others from accessing the new page when we get around to
1074 * establishing additional references. We are usually the only one
1075 * holding a reference to newpage at this point. We used to have a BUG
1076 * here if trylock_page(newpage) fails, but would like to allow for
1077 * cases where there might be a race with the previous use of newpage.
1078 * This is much like races on refcount of oldpage: just don't BUG().
1079 */
1080 if (unlikely(!trylock_page(newpage)))
1081 goto out_unlock;
1082
1083 if (unlikely(!is_lru)) {
1084 rc = move_to_new_page(newpage, page, mode);
1085 goto out_unlock_both;
1086 }
1087
1088 /*
1089 * Corner case handling:
1090 * 1. When a new swap-cache page is read into, it is added to the LRU
1091 * and treated as swapcache but it has no rmap yet.
1092 * Calling try_to_unmap() against a page->mapping==NULL page will
1093 * trigger a BUG. So handle it here.
1094 * 2. An orphaned page (see truncate_complete_page) might have
1095 * fs-private metadata. The page can be picked up due to memory
1096 * offlining. Everywhere else except page reclaim, the page is
1097 * invisible to the vm, so the page can not be migrated. So try to
1098 * free the metadata, so the page can be freed.
1099 */
1100 if (!page->mapping) {
1101 VM_BUG_ON_PAGE(PageAnon(page), page);
1102 if (page_has_private(page)) {
1103 try_to_free_buffers(page);
1104 goto out_unlock_both;
1105 }
1106 } else if (page_mapped(page)) {
1107 /* Establish migration ptes */
1108 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1109 page);
1110 try_to_unmap(page,
1111 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1112 page_was_mapped = 1;
1113 }
1114
1115 if (!page_mapped(page))
1116 rc = move_to_new_page(newpage, page, mode);
1117
1118 if (page_was_mapped)
1119 remove_migration_ptes(page,
1120 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1121
1122out_unlock_both:
1123 unlock_page(newpage);
1124out_unlock:
1125 /* Drop an anon_vma reference if we took one */
1126 if (anon_vma)
1127 put_anon_vma(anon_vma);
1128 unlock_page(page);
1129out:
1130 /*
1131 * If migration is successful, decrease refcount of the newpage
1132 * which will not free the page because new page owner increased
1133 * refcounter. As well, if it is LRU page, add the page to LRU
1134 * list in here. Use the old state of the isolated source page to
1135 * determine if we migrated a LRU page. newpage was already unlocked
1136 * and possibly modified by its owner - don't rely on the page
1137 * state.
1138 */
1139 if (rc == MIGRATEPAGE_SUCCESS) {
1140 if (unlikely(!is_lru))
1141 put_page(newpage);
1142 else
1143 putback_lru_page(newpage);
1144 }
1145
1146 return rc;
1147}
1148
1149/*
1150 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
1151 * around it.
1152 */
1153#if defined(CONFIG_ARM) && \
1154 defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
1155#define ICE_noinline noinline
1156#else
1157#define ICE_noinline
1158#endif
1159
1160/*
1161 * Obtain the lock on page, remove all ptes and migrate the page
1162 * to the newly allocated page in newpage.
1163 */
1164static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1165 free_page_t put_new_page,
1166 unsigned long private, struct page *page,
1167 int force, enum migrate_mode mode,
1168 enum migrate_reason reason)
1169{
1170 int rc = MIGRATEPAGE_SUCCESS;
1171 struct page *newpage;
1172
1173 if (!thp_migration_supported() && PageTransHuge(page))
1174 return -ENOMEM;
1175
1176 newpage = get_new_page(page, private);
1177 if (!newpage)
1178 return -ENOMEM;
1179
1180 if (page_count(page) == 1) {
1181 /* page was freed from under us. So we are done. */
1182 ClearPageActive(page);
1183 ClearPageUnevictable(page);
1184 if (unlikely(__PageMovable(page))) {
1185 lock_page(page);
1186 if (!PageMovable(page))
1187 __ClearPageIsolated(page);
1188 unlock_page(page);
1189 }
1190 if (put_new_page)
1191 put_new_page(newpage, private);
1192 else
1193 put_page(newpage);
1194 goto out;
1195 }
1196
1197 rc = __unmap_and_move(page, newpage, force, mode);
1198 if (rc == MIGRATEPAGE_SUCCESS)
1199 set_page_owner_migrate_reason(newpage, reason);
1200
1201out:
1202 if (rc != -EAGAIN) {
1203 /*
1204 * A page that has been migrated has all references
1205 * removed and will be freed. A page that has not been
1206 * migrated will have kepts its references and be
1207 * restored.
1208 */
1209 list_del(&page->lru);
1210
1211 /*
1212 * Compaction can migrate also non-LRU pages which are
1213 * not accounted to NR_ISOLATED_*. They can be recognized
1214 * as __PageMovable
1215 */
1216 if (likely(!__PageMovable(page)))
1217 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1218 page_is_file_cache(page), -hpage_nr_pages(page));
1219 }
1220
1221 /*
1222 * If migration is successful, releases reference grabbed during
1223 * isolation. Otherwise, restore the page to right list unless
1224 * we want to retry.
1225 */
1226 if (rc == MIGRATEPAGE_SUCCESS) {
1227 put_page(page);
1228 if (reason == MR_MEMORY_FAILURE) {
1229 /*
1230 * Set PG_HWPoison on just freed page
1231 * intentionally. Although it's rather weird,
1232 * it's how HWPoison flag works at the moment.
1233 */
1234 if (set_hwpoison_free_buddy_page(page))
1235 num_poisoned_pages_inc();
1236 }
1237 } else {
1238 if (rc != -EAGAIN) {
1239 if (likely(!__PageMovable(page))) {
1240 putback_lru_page(page);
1241 goto put_new;
1242 }
1243
1244 lock_page(page);
1245 if (PageMovable(page))
1246 putback_movable_page(page);
1247 else
1248 __ClearPageIsolated(page);
1249 unlock_page(page);
1250 put_page(page);
1251 }
1252put_new:
1253 if (put_new_page)
1254 put_new_page(newpage, private);
1255 else
1256 put_page(newpage);
1257 }
1258
1259 return rc;
1260}
1261
1262/*
1263 * Counterpart of unmap_and_move_page() for hugepage migration.
1264 *
1265 * This function doesn't wait the completion of hugepage I/O
1266 * because there is no race between I/O and migration for hugepage.
1267 * Note that currently hugepage I/O occurs only in direct I/O
1268 * where no lock is held and PG_writeback is irrelevant,
1269 * and writeback status of all subpages are counted in the reference
1270 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1271 * under direct I/O, the reference of the head page is 512 and a bit more.)
1272 * This means that when we try to migrate hugepage whose subpages are
1273 * doing direct I/O, some references remain after try_to_unmap() and
1274 * hugepage migration fails without data corruption.
1275 *
1276 * There is also no race when direct I/O is issued on the page under migration,
1277 * because then pte is replaced with migration swap entry and direct I/O code
1278 * will wait in the page fault for migration to complete.
1279 */
1280static int unmap_and_move_huge_page(new_page_t get_new_page,
1281 free_page_t put_new_page, unsigned long private,
1282 struct page *hpage, int force,
1283 enum migrate_mode mode, int reason)
1284{
1285 int rc = -EAGAIN;
1286 int page_was_mapped = 0;
1287 struct page *new_hpage;
1288 struct anon_vma *anon_vma = NULL;
1289
1290 /*
1291 * Migratability of hugepages depends on architectures and their size.
1292 * This check is necessary because some callers of hugepage migration
1293 * like soft offline and memory hotremove don't walk through page
1294 * tables or check whether the hugepage is pmd-based or not before
1295 * kicking migration.
1296 */
1297 if (!hugepage_migration_supported(page_hstate(hpage))) {
1298 putback_active_hugepage(hpage);
1299 return -ENOSYS;
1300 }
1301
1302 new_hpage = get_new_page(hpage, private);
1303 if (!new_hpage)
1304 return -ENOMEM;
1305
1306 if (!trylock_page(hpage)) {
1307 if (!force)
1308 goto out;
1309 switch (mode) {
1310 case MIGRATE_SYNC:
1311 case MIGRATE_SYNC_NO_COPY:
1312 break;
1313 default:
1314 goto out;
1315 }
1316 lock_page(hpage);
1317 }
1318
1319 /*
1320 * Check for pages which are in the process of being freed. Without
1321 * page_mapping() set, hugetlbfs specific move page routine will not
1322 * be called and we could leak usage counts for subpools.
1323 */
1324 if (page_private(hpage) && !page_mapping(hpage)) {
1325 rc = -EBUSY;
1326 goto out_unlock;
1327 }
1328
1329 if (PageAnon(hpage))
1330 anon_vma = page_get_anon_vma(hpage);
1331
1332 if (unlikely(!trylock_page(new_hpage)))
1333 goto put_anon;
1334
1335 if (page_mapped(hpage)) {
1336 try_to_unmap(hpage,
1337 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1338 page_was_mapped = 1;
1339 }
1340
1341 if (!page_mapped(hpage))
1342 rc = move_to_new_page(new_hpage, hpage, mode);
1343
1344 if (page_was_mapped)
1345 remove_migration_ptes(hpage,
1346 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1347
1348 unlock_page(new_hpage);
1349
1350put_anon:
1351 if (anon_vma)
1352 put_anon_vma(anon_vma);
1353
1354 if (rc == MIGRATEPAGE_SUCCESS) {
1355 move_hugetlb_state(hpage, new_hpage, reason);
1356 put_new_page = NULL;
1357 }
1358
1359out_unlock:
1360 unlock_page(hpage);
1361out:
1362 if (rc != -EAGAIN)
1363 putback_active_hugepage(hpage);
1364
1365 /*
1366 * If migration was not successful and there's a freeing callback, use
1367 * it. Otherwise, put_page() will drop the reference grabbed during
1368 * isolation.
1369 */
1370 if (put_new_page)
1371 put_new_page(new_hpage, private);
1372 else
1373 putback_active_hugepage(new_hpage);
1374
1375 return rc;
1376}
1377
1378/*
1379 * migrate_pages - migrate the pages specified in a list, to the free pages
1380 * supplied as the target for the page migration
1381 *
1382 * @from: The list of pages to be migrated.
1383 * @get_new_page: The function used to allocate free pages to be used
1384 * as the target of the page migration.
1385 * @put_new_page: The function used to free target pages if migration
1386 * fails, or NULL if no special handling is necessary.
1387 * @private: Private data to be passed on to get_new_page()
1388 * @mode: The migration mode that specifies the constraints for
1389 * page migration, if any.
1390 * @reason: The reason for page migration.
1391 *
1392 * The function returns after 10 attempts or if no pages are movable any more
1393 * because the list has become empty or no retryable pages exist any more.
1394 * The caller should call putback_movable_pages() to return pages to the LRU
1395 * or free list only if ret != 0.
1396 *
1397 * Returns the number of pages that were not migrated, or an error code.
1398 */
1399int migrate_pages(struct list_head *from, new_page_t get_new_page,
1400 free_page_t put_new_page, unsigned long private,
1401 enum migrate_mode mode, int reason)
1402{
1403 int retry = 1;
1404 int nr_failed = 0;
1405 int nr_succeeded = 0;
1406 int pass = 0;
1407 struct page *page;
1408 struct page *page2;
1409 int swapwrite = current->flags & PF_SWAPWRITE;
1410 int rc;
1411
1412 if (!swapwrite)
1413 current->flags |= PF_SWAPWRITE;
1414
1415 for(pass = 0; pass < 10 && retry; pass++) {
1416 retry = 0;
1417
1418 list_for_each_entry_safe(page, page2, from, lru) {
1419retry:
1420 cond_resched();
1421
1422 if (PageHuge(page))
1423 rc = unmap_and_move_huge_page(get_new_page,
1424 put_new_page, private, page,
1425 pass > 2, mode, reason);
1426 else
1427 rc = unmap_and_move(get_new_page, put_new_page,
1428 private, page, pass > 2, mode,
1429 reason);
1430
1431 switch(rc) {
1432 case -ENOMEM:
1433 /*
1434 * THP migration might be unsupported or the
1435 * allocation could've failed so we should
1436 * retry on the same page with the THP split
1437 * to base pages.
1438 *
1439 * Head page is retried immediately and tail
1440 * pages are added to the tail of the list so
1441 * we encounter them after the rest of the list
1442 * is processed.
1443 */
1444 if (PageTransHuge(page) && !PageHuge(page)) {
1445 lock_page(page);
1446 rc = split_huge_page_to_list(page, from);
1447 unlock_page(page);
1448 if (!rc) {
1449 list_safe_reset_next(page, page2, lru);
1450 goto retry;
1451 }
1452 }
1453 nr_failed++;
1454 goto out;
1455 case -EAGAIN:
1456 retry++;
1457 break;
1458 case MIGRATEPAGE_SUCCESS:
1459 nr_succeeded++;
1460 break;
1461 default:
1462 /*
1463 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1464 * unlike -EAGAIN case, the failed page is
1465 * removed from migration page list and not
1466 * retried in the next outer loop.
1467 */
1468 nr_failed++;
1469 break;
1470 }
1471 }
1472 }
1473 nr_failed += retry;
1474 rc = nr_failed;
1475out:
1476 if (nr_succeeded)
1477 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1478 if (nr_failed)
1479 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1480 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1481
1482 if (!swapwrite)
1483 current->flags &= ~PF_SWAPWRITE;
1484
1485 return rc;
1486}
1487
1488#ifdef CONFIG_NUMA
1489
1490static int store_status(int __user *status, int start, int value, int nr)
1491{
1492 while (nr-- > 0) {
1493 if (put_user(value, status + start))
1494 return -EFAULT;
1495 start++;
1496 }
1497
1498 return 0;
1499}
1500
1501static int do_move_pages_to_node(struct mm_struct *mm,
1502 struct list_head *pagelist, int node)
1503{
1504 int err;
1505
1506 if (list_empty(pagelist))
1507 return 0;
1508
1509 err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
1510 MIGRATE_SYNC, MR_SYSCALL);
1511 if (err)
1512 putback_movable_pages(pagelist);
1513 return err;
1514}
1515
1516/*
1517 * Resolves the given address to a struct page, isolates it from the LRU and
1518 * puts it to the given pagelist.
1519 * Returns -errno if the page cannot be found/isolated or 0 when it has been
1520 * queued or the page doesn't need to be migrated because it is already on
1521 * the target node
1522 */
1523static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1524 int node, struct list_head *pagelist, bool migrate_all)
1525{
1526 struct vm_area_struct *vma;
1527 struct page *page;
1528 unsigned int follflags;
1529 int err;
1530
1531 down_read(&mm->mmap_sem);
1532 err = -EFAULT;
1533 vma = find_vma(mm, addr);
1534 if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1535 goto out;
1536
1537 /* FOLL_DUMP to ignore special (like zero) pages */
1538 follflags = FOLL_GET | FOLL_DUMP;
1539 page = follow_page(vma, addr, follflags);
1540
1541 err = PTR_ERR(page);
1542 if (IS_ERR(page))
1543 goto out;
1544
1545 err = -ENOENT;
1546 if (!page)
1547 goto out;
1548
1549 err = 0;
1550 if (page_to_nid(page) == node)
1551 goto out_putpage;
1552
1553 err = -EACCES;
1554 if (page_mapcount(page) > 1 && !migrate_all)
1555 goto out_putpage;
1556
1557 if (PageHuge(page)) {
1558 if (PageHead(page)) {
1559 isolate_huge_page(page, pagelist);
1560 err = 0;
1561 }
1562 } else {
1563 struct page *head;
1564
1565 head = compound_head(page);
1566 err = isolate_lru_page(head);
1567 if (err)
1568 goto out_putpage;
1569
1570 err = 0;
1571 list_add_tail(&head->lru, pagelist);
1572 mod_node_page_state(page_pgdat(head),
1573 NR_ISOLATED_ANON + page_is_file_cache(head),
1574 hpage_nr_pages(head));
1575 }
1576out_putpage:
1577 /*
1578 * Either remove the duplicate refcount from
1579 * isolate_lru_page() or drop the page ref if it was
1580 * not isolated.
1581 */
1582 put_page(page);
1583out:
1584 up_read(&mm->mmap_sem);
1585 return err;
1586}
1587
1588/*
1589 * Migrate an array of page address onto an array of nodes and fill
1590 * the corresponding array of status.
1591 */
1592static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1593 unsigned long nr_pages,
1594 const void __user * __user *pages,
1595 const int __user *nodes,
1596 int __user *status, int flags)
1597{
1598 int current_node = NUMA_NO_NODE;
1599 LIST_HEAD(pagelist);
1600 int start, i;
1601 int err = 0, err1;
1602
1603 migrate_prep();
1604
1605 for (i = start = 0; i < nr_pages; i++) {
1606 const void __user *p;
1607 unsigned long addr;
1608 int node;
1609
1610 err = -EFAULT;
1611 if (get_user(p, pages + i))
1612 goto out_flush;
1613 if (get_user(node, nodes + i))
1614 goto out_flush;
1615 addr = (unsigned long)untagged_addr(p);
1616
1617 err = -ENODEV;
1618 if (node < 0 || node >= MAX_NUMNODES)
1619 goto out_flush;
1620 if (!node_state(node, N_MEMORY))
1621 goto out_flush;
1622
1623 err = -EACCES;
1624 if (!node_isset(node, task_nodes))
1625 goto out_flush;
1626
1627 if (current_node == NUMA_NO_NODE) {
1628 current_node = node;
1629 start = i;
1630 } else if (node != current_node) {
1631 err = do_move_pages_to_node(mm, &pagelist, current_node);
1632 if (err)
1633 goto out;
1634 err = store_status(status, start, current_node, i - start);
1635 if (err)
1636 goto out;
1637 start = i;
1638 current_node = node;
1639 }
1640
1641 /*
1642 * Errors in the page lookup or isolation are not fatal and we simply
1643 * report them via status
1644 */
1645 err = add_page_for_migration(mm, addr, current_node,
1646 &pagelist, flags & MPOL_MF_MOVE_ALL);
1647 if (!err)
1648 continue;
1649
1650 err = store_status(status, i, err, 1);
1651 if (err)
1652 goto out_flush;
1653
1654 err = do_move_pages_to_node(mm, &pagelist, current_node);
1655 if (err)
1656 goto out;
1657 if (i > start) {
1658 err = store_status(status, start, current_node, i - start);
1659 if (err)
1660 goto out;
1661 }
1662 current_node = NUMA_NO_NODE;
1663 }
1664out_flush:
1665 if (list_empty(&pagelist))
1666 return err;
1667
1668 /* Make sure we do not overwrite the existing error */
1669 err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1670 if (!err1)
1671 err1 = store_status(status, start, current_node, i - start);
1672 if (!err)
1673 err = err1;
1674out:
1675 return err;
1676}
1677
1678/*
1679 * Determine the nodes of an array of pages and store it in an array of status.
1680 */
1681static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1682 const void __user **pages, int *status)
1683{
1684 unsigned long i;
1685
1686 down_read(&mm->mmap_sem);
1687
1688 for (i = 0; i < nr_pages; i++) {
1689 unsigned long addr = (unsigned long)(*pages);
1690 struct vm_area_struct *vma;
1691 struct page *page;
1692 int err = -EFAULT;
1693
1694 vma = find_vma(mm, addr);
1695 if (!vma || addr < vma->vm_start)
1696 goto set_status;
1697
1698 /* FOLL_DUMP to ignore special (like zero) pages */
1699 page = follow_page(vma, addr, FOLL_DUMP);
1700
1701 err = PTR_ERR(page);
1702 if (IS_ERR(page))
1703 goto set_status;
1704
1705 err = page ? page_to_nid(page) : -ENOENT;
1706set_status:
1707 *status = err;
1708
1709 pages++;
1710 status++;
1711 }
1712
1713 up_read(&mm->mmap_sem);
1714}
1715
1716/*
1717 * Determine the nodes of a user array of pages and store it in
1718 * a user array of status.
1719 */
1720static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1721 const void __user * __user *pages,
1722 int __user *status)
1723{
1724#define DO_PAGES_STAT_CHUNK_NR 16
1725 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1726 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1727
1728 while (nr_pages) {
1729 unsigned long chunk_nr;
1730
1731 chunk_nr = nr_pages;
1732 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1733 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1734
1735 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1736 break;
1737
1738 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1739
1740 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1741 break;
1742
1743 pages += chunk_nr;
1744 status += chunk_nr;
1745 nr_pages -= chunk_nr;
1746 }
1747 return nr_pages ? -EFAULT : 0;
1748}
1749
1750/*
1751 * Move a list of pages in the address space of the currently executing
1752 * process.
1753 */
1754static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1755 const void __user * __user *pages,
1756 const int __user *nodes,
1757 int __user *status, int flags)
1758{
1759 struct task_struct *task;
1760 struct mm_struct *mm;
1761 int err;
1762 nodemask_t task_nodes;
1763
1764 /* Check flags */
1765 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1766 return -EINVAL;
1767
1768 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1769 return -EPERM;
1770
1771 /* Find the mm_struct */
1772 rcu_read_lock();
1773 task = pid ? find_task_by_vpid(pid) : current;
1774 if (!task) {
1775 rcu_read_unlock();
1776 return -ESRCH;
1777 }
1778 get_task_struct(task);
1779
1780 /*
1781 * Check if this process has the right to modify the specified
1782 * process. Use the regular "ptrace_may_access()" checks.
1783 */
1784 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1785 rcu_read_unlock();
1786 err = -EPERM;
1787 goto out;
1788 }
1789 rcu_read_unlock();
1790
1791 err = security_task_movememory(task);
1792 if (err)
1793 goto out;
1794
1795 task_nodes = cpuset_mems_allowed(task);
1796 mm = get_task_mm(task);
1797 put_task_struct(task);
1798
1799 if (!mm)
1800 return -EINVAL;
1801
1802 if (nodes)
1803 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1804 nodes, status, flags);
1805 else
1806 err = do_pages_stat(mm, nr_pages, pages, status);
1807
1808 mmput(mm);
1809 return err;
1810
1811out:
1812 put_task_struct(task);
1813 return err;
1814}
1815
1816SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1817 const void __user * __user *, pages,
1818 const int __user *, nodes,
1819 int __user *, status, int, flags)
1820{
1821 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1822}
1823
1824#ifdef CONFIG_COMPAT
1825COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1826 compat_uptr_t __user *, pages32,
1827 const int __user *, nodes,
1828 int __user *, status,
1829 int, flags)
1830{
1831 const void __user * __user *pages;
1832 int i;
1833
1834 pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1835 for (i = 0; i < nr_pages; i++) {
1836 compat_uptr_t p;
1837
1838 if (get_user(p, pages32 + i) ||
1839 put_user(compat_ptr(p), pages + i))
1840 return -EFAULT;
1841 }
1842 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1843}
1844#endif /* CONFIG_COMPAT */
1845
1846#ifdef CONFIG_NUMA_BALANCING
1847/*
1848 * Returns true if this is a safe migration target node for misplaced NUMA
1849 * pages. Currently it only checks the watermarks which crude
1850 */
1851static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1852 unsigned long nr_migrate_pages)
1853{
1854 int z;
1855
1856 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1857 struct zone *zone = pgdat->node_zones + z;
1858
1859 if (!populated_zone(zone))
1860 continue;
1861
1862 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1863 if (!zone_watermark_ok(zone, 0,
1864 high_wmark_pages(zone) +
1865 nr_migrate_pages,
1866 0, 0))
1867 continue;
1868 return true;
1869 }
1870 return false;
1871}
1872
1873static struct page *alloc_misplaced_dst_page(struct page *page,
1874 unsigned long data)
1875{
1876 int nid = (int) data;
1877 struct page *newpage;
1878
1879 newpage = __alloc_pages_node(nid,
1880 (GFP_HIGHUSER_MOVABLE |
1881 __GFP_THISNODE | __GFP_NOMEMALLOC |
1882 __GFP_NORETRY | __GFP_NOWARN) &
1883 ~__GFP_RECLAIM, 0);
1884
1885 return newpage;
1886}
1887
1888static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1889{
1890 int page_lru;
1891
1892 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1893
1894 /* Avoid migrating to a node that is nearly full */
1895 if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
1896 return 0;
1897
1898 if (isolate_lru_page(page))
1899 return 0;
1900
1901 /*
1902 * migrate_misplaced_transhuge_page() skips page migration's usual
1903 * check on page_count(), so we must do it here, now that the page
1904 * has been isolated: a GUP pin, or any other pin, prevents migration.
1905 * The expected page count is 3: 1 for page's mapcount and 1 for the
1906 * caller's pin and 1 for the reference taken by isolate_lru_page().
1907 */
1908 if (PageTransHuge(page) && page_count(page) != 3) {
1909 putback_lru_page(page);
1910 return 0;
1911 }
1912
1913 page_lru = page_is_file_cache(page);
1914 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1915 hpage_nr_pages(page));
1916
1917 /*
1918 * Isolating the page has taken another reference, so the
1919 * caller's reference can be safely dropped without the page
1920 * disappearing underneath us during migration.
1921 */
1922 put_page(page);
1923 return 1;
1924}
1925
1926bool pmd_trans_migrating(pmd_t pmd)
1927{
1928 struct page *page = pmd_page(pmd);
1929 return PageLocked(page);
1930}
1931
1932/*
1933 * Attempt to migrate a misplaced page to the specified destination
1934 * node. Caller is expected to have an elevated reference count on
1935 * the page that will be dropped by this function before returning.
1936 */
1937int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1938 int node)
1939{
1940 pg_data_t *pgdat = NODE_DATA(node);
1941 int isolated;
1942 int nr_remaining;
1943 LIST_HEAD(migratepages);
1944
1945 /*
1946 * Don't migrate file pages that are mapped in multiple processes
1947 * with execute permissions as they are probably shared libraries.
1948 */
1949 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1950 (vma->vm_flags & VM_EXEC))
1951 goto out;
1952
1953 /*
1954 * Also do not migrate dirty pages as not all filesystems can move
1955 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
1956 */
1957 if (page_is_file_cache(page) && PageDirty(page))
1958 goto out;
1959
1960 isolated = numamigrate_isolate_page(pgdat, page);
1961 if (!isolated)
1962 goto out;
1963
1964 list_add(&page->lru, &migratepages);
1965 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1966 NULL, node, MIGRATE_ASYNC,
1967 MR_NUMA_MISPLACED);
1968 if (nr_remaining) {
1969 if (!list_empty(&migratepages)) {
1970 list_del(&page->lru);
1971 dec_node_page_state(page, NR_ISOLATED_ANON +
1972 page_is_file_cache(page));
1973 putback_lru_page(page);
1974 }
1975 isolated = 0;
1976 } else
1977 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1978 BUG_ON(!list_empty(&migratepages));
1979 return isolated;
1980
1981out:
1982 put_page(page);
1983 return 0;
1984}
1985#endif /* CONFIG_NUMA_BALANCING */
1986
1987#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1988/*
1989 * Migrates a THP to a given target node. page must be locked and is unlocked
1990 * before returning.
1991 */
1992int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1993 struct vm_area_struct *vma,
1994 pmd_t *pmd, pmd_t entry,
1995 unsigned long address,
1996 struct page *page, int node)
1997{
1998 spinlock_t *ptl;
1999 pg_data_t *pgdat = NODE_DATA(node);
2000 int isolated = 0;
2001 struct page *new_page = NULL;
2002 int page_lru = page_is_file_cache(page);
2003 unsigned long start = address & HPAGE_PMD_MASK;
2004
2005 new_page = alloc_pages_node(node,
2006 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2007 HPAGE_PMD_ORDER);
2008 if (!new_page)
2009 goto out_fail;
2010 prep_transhuge_page(new_page);
2011
2012 isolated = numamigrate_isolate_page(pgdat, page);
2013 if (!isolated) {
2014 put_page(new_page);
2015 goto out_fail;
2016 }
2017
2018 /* Prepare a page as a migration target */
2019 __SetPageLocked(new_page);
2020 if (PageSwapBacked(page))
2021 __SetPageSwapBacked(new_page);
2022
2023 /* anon mapping, we can simply copy page->mapping to the new page: */
2024 new_page->mapping = page->mapping;
2025 new_page->index = page->index;
2026 /* flush the cache before copying using the kernel virtual address */
2027 flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
2028 migrate_page_copy(new_page, page);
2029 WARN_ON(PageLRU(new_page));
2030
2031 /* Recheck the target PMD */
2032 ptl = pmd_lock(mm, pmd);
2033 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2034 spin_unlock(ptl);
2035
2036 /* Reverse changes made by migrate_page_copy() */
2037 if (TestClearPageActive(new_page))
2038 SetPageActive(page);
2039 if (TestClearPageUnevictable(new_page))
2040 SetPageUnevictable(page);
2041
2042 unlock_page(new_page);
2043 put_page(new_page); /* Free it */
2044
2045 /* Retake the callers reference and putback on LRU */
2046 get_page(page);
2047 putback_lru_page(page);
2048 mod_node_page_state(page_pgdat(page),
2049 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2050
2051 goto out_unlock;
2052 }
2053
2054 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2055 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2056
2057 /*
2058 * Overwrite the old entry under pagetable lock and establish
2059 * the new PTE. Any parallel GUP will either observe the old
2060 * page blocking on the page lock, block on the page table
2061 * lock or observe the new page. The SetPageUptodate on the
2062 * new page and page_add_new_anon_rmap guarantee the copy is
2063 * visible before the pagetable update.
2064 */
2065 page_add_anon_rmap(new_page, vma, start, true);
2066 /*
2067 * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2068 * has already been flushed globally. So no TLB can be currently
2069 * caching this non present pmd mapping. There's no need to clear the
2070 * pmd before doing set_pmd_at(), nor to flush the TLB after
2071 * set_pmd_at(). Clearing the pmd here would introduce a race
2072 * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2073 * mmap_sem for reading. If the pmd is set to NULL at any given time,
2074 * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2075 * pmd.
2076 */
2077 set_pmd_at(mm, start, pmd, entry);
2078 update_mmu_cache_pmd(vma, address, &entry);
2079
2080 page_ref_unfreeze(page, 2);
2081 mlock_migrate_page(new_page, page);
2082 page_remove_rmap(page, true);
2083 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2084
2085 spin_unlock(ptl);
2086
2087 /* Take an "isolate" reference and put new page on the LRU. */
2088 get_page(new_page);
2089 putback_lru_page(new_page);
2090
2091 unlock_page(new_page);
2092 unlock_page(page);
2093 put_page(page); /* Drop the rmap reference */
2094 put_page(page); /* Drop the LRU isolation reference */
2095
2096 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2097 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2098
2099 mod_node_page_state(page_pgdat(page),
2100 NR_ISOLATED_ANON + page_lru,
2101 -HPAGE_PMD_NR);
2102 return isolated;
2103
2104out_fail:
2105 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2106 ptl = pmd_lock(mm, pmd);
2107 if (pmd_same(*pmd, entry)) {
2108 entry = pmd_modify(entry, vma->vm_page_prot);
2109 set_pmd_at(mm, start, pmd, entry);
2110 update_mmu_cache_pmd(vma, address, &entry);
2111 }
2112 spin_unlock(ptl);
2113
2114out_unlock:
2115 unlock_page(page);
2116 put_page(page);
2117 return 0;
2118}
2119#endif /* CONFIG_NUMA_BALANCING */
2120
2121#endif /* CONFIG_NUMA */
2122
2123#ifdef CONFIG_DEVICE_PRIVATE
2124static int migrate_vma_collect_hole(unsigned long start,
2125 unsigned long end,
2126 struct mm_walk *walk)
2127{
2128 struct migrate_vma *migrate = walk->private;
2129 unsigned long addr;
2130
2131 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2132 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2133 migrate->dst[migrate->npages] = 0;
2134 migrate->npages++;
2135 migrate->cpages++;
2136 }
2137
2138 return 0;
2139}
2140
2141static int migrate_vma_collect_skip(unsigned long start,
2142 unsigned long end,
2143 struct mm_walk *walk)
2144{
2145 struct migrate_vma *migrate = walk->private;
2146 unsigned long addr;
2147
2148 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2149 migrate->dst[migrate->npages] = 0;
2150 migrate->src[migrate->npages++] = 0;
2151 }
2152
2153 return 0;
2154}
2155
2156static int migrate_vma_collect_pmd(pmd_t *pmdp,
2157 unsigned long start,
2158 unsigned long end,
2159 struct mm_walk *walk)
2160{
2161 struct migrate_vma *migrate = walk->private;
2162 struct vm_area_struct *vma = walk->vma;
2163 struct mm_struct *mm = vma->vm_mm;
2164 unsigned long addr = start, unmapped = 0;
2165 spinlock_t *ptl;
2166 pte_t *ptep;
2167
2168again:
2169 if (pmd_none(*pmdp))
2170 return migrate_vma_collect_hole(start, end, walk);
2171
2172 if (pmd_trans_huge(*pmdp)) {
2173 struct page *page;
2174
2175 ptl = pmd_lock(mm, pmdp);
2176 if (unlikely(!pmd_trans_huge(*pmdp))) {
2177 spin_unlock(ptl);
2178 goto again;
2179 }
2180
2181 page = pmd_page(*pmdp);
2182 if (is_huge_zero_page(page)) {
2183 spin_unlock(ptl);
2184 split_huge_pmd(vma, pmdp, addr);
2185 if (pmd_trans_unstable(pmdp))
2186 return migrate_vma_collect_skip(start, end,
2187 walk);
2188 } else {
2189 int ret;
2190
2191 get_page(page);
2192 spin_unlock(ptl);
2193 if (unlikely(!trylock_page(page)))
2194 return migrate_vma_collect_skip(start, end,
2195 walk);
2196 ret = split_huge_page(page);
2197 unlock_page(page);
2198 put_page(page);
2199 if (ret)
2200 return migrate_vma_collect_skip(start, end,
2201 walk);
2202 if (pmd_none(*pmdp))
2203 return migrate_vma_collect_hole(start, end,
2204 walk);
2205 }
2206 }
2207
2208 if (unlikely(pmd_bad(*pmdp)))
2209 return migrate_vma_collect_skip(start, end, walk);
2210
2211 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2212 arch_enter_lazy_mmu_mode();
2213
2214 for (; addr < end; addr += PAGE_SIZE, ptep++) {
2215 unsigned long mpfn, pfn;
2216 struct page *page;
2217 swp_entry_t entry;
2218 pte_t pte;
2219
2220 pte = *ptep;
2221
2222 if (pte_none(pte)) {
2223 mpfn = MIGRATE_PFN_MIGRATE;
2224 migrate->cpages++;
2225 goto next;
2226 }
2227
2228 if (!pte_present(pte)) {
2229 mpfn = 0;
2230
2231 /*
2232 * Only care about unaddressable device page special
2233 * page table entry. Other special swap entries are not
2234 * migratable, and we ignore regular swapped page.
2235 */
2236 entry = pte_to_swp_entry(pte);
2237 if (!is_device_private_entry(entry))
2238 goto next;
2239
2240 page = device_private_entry_to_page(entry);
2241 mpfn = migrate_pfn(page_to_pfn(page)) |
2242 MIGRATE_PFN_MIGRATE;
2243 if (is_write_device_private_entry(entry))
2244 mpfn |= MIGRATE_PFN_WRITE;
2245 } else {
2246 pfn = pte_pfn(pte);
2247 if (is_zero_pfn(pfn)) {
2248 mpfn = MIGRATE_PFN_MIGRATE;
2249 migrate->cpages++;
2250 goto next;
2251 }
2252 page = vm_normal_page(migrate->vma, addr, pte);
2253 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2254 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2255 }
2256
2257 /* FIXME support THP */
2258 if (!page || !page->mapping || PageTransCompound(page)) {
2259 mpfn = 0;
2260 goto next;
2261 }
2262
2263 /*
2264 * By getting a reference on the page we pin it and that blocks
2265 * any kind of migration. Side effect is that it "freezes" the
2266 * pte.
2267 *
2268 * We drop this reference after isolating the page from the lru
2269 * for non device page (device page are not on the lru and thus
2270 * can't be dropped from it).
2271 */
2272 get_page(page);
2273 migrate->cpages++;
2274
2275 /*
2276 * Optimize for the common case where page is only mapped once
2277 * in one process. If we can lock the page, then we can safely
2278 * set up a special migration page table entry now.
2279 */
2280 if (trylock_page(page)) {
2281 pte_t swp_pte;
2282
2283 mpfn |= MIGRATE_PFN_LOCKED;
2284 ptep_get_and_clear(mm, addr, ptep);
2285
2286 /* Setup special migration page table entry */
2287 entry = make_migration_entry(page, mpfn &
2288 MIGRATE_PFN_WRITE);
2289 swp_pte = swp_entry_to_pte(entry);
2290 if (pte_soft_dirty(pte))
2291 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2292 set_pte_at(mm, addr, ptep, swp_pte);
2293
2294 /*
2295 * This is like regular unmap: we remove the rmap and
2296 * drop page refcount. Page won't be freed, as we took
2297 * a reference just above.
2298 */
2299 page_remove_rmap(page, false);
2300 put_page(page);
2301
2302 if (pte_present(pte))
2303 unmapped++;
2304 }
2305
2306next:
2307 migrate->dst[migrate->npages] = 0;
2308 migrate->src[migrate->npages++] = mpfn;
2309 }
2310 arch_leave_lazy_mmu_mode();
2311 pte_unmap_unlock(ptep - 1, ptl);
2312
2313 /* Only flush the TLB if we actually modified any entries */
2314 if (unmapped)
2315 flush_tlb_range(walk->vma, start, end);
2316
2317 return 0;
2318}
2319
2320static const struct mm_walk_ops migrate_vma_walk_ops = {
2321 .pmd_entry = migrate_vma_collect_pmd,
2322 .pte_hole = migrate_vma_collect_hole,
2323};
2324
2325/*
2326 * migrate_vma_collect() - collect pages over a range of virtual addresses
2327 * @migrate: migrate struct containing all migration information
2328 *
2329 * This will walk the CPU page table. For each virtual address backed by a
2330 * valid page, it updates the src array and takes a reference on the page, in
2331 * order to pin the page until we lock it and unmap it.
2332 */
2333static void migrate_vma_collect(struct migrate_vma *migrate)
2334{
2335 struct mmu_notifier_range range;
2336
2337 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL,
2338 migrate->vma->vm_mm, migrate->start, migrate->end);
2339 mmu_notifier_invalidate_range_start(&range);
2340
2341 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2342 &migrate_vma_walk_ops, migrate);
2343
2344 mmu_notifier_invalidate_range_end(&range);
2345 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2346}
2347
2348/*
2349 * migrate_vma_check_page() - check if page is pinned or not
2350 * @page: struct page to check
2351 *
2352 * Pinned pages cannot be migrated. This is the same test as in
2353 * migrate_page_move_mapping(), except that here we allow migration of a
2354 * ZONE_DEVICE page.
2355 */
2356static bool migrate_vma_check_page(struct page *page)
2357{
2358 /*
2359 * One extra ref because caller holds an extra reference, either from
2360 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2361 * a device page.
2362 */
2363 int extra = 1;
2364
2365 /*
2366 * FIXME support THP (transparent huge page), it is bit more complex to
2367 * check them than regular pages, because they can be mapped with a pmd
2368 * or with a pte (split pte mapping).
2369 */
2370 if (PageCompound(page))
2371 return false;
2372
2373 /* Page from ZONE_DEVICE have one extra reference */
2374 if (is_zone_device_page(page)) {
2375 /*
2376 * Private page can never be pin as they have no valid pte and
2377 * GUP will fail for those. Yet if there is a pending migration
2378 * a thread might try to wait on the pte migration entry and
2379 * will bump the page reference count. Sadly there is no way to
2380 * differentiate a regular pin from migration wait. Hence to
2381 * avoid 2 racing thread trying to migrate back to CPU to enter
2382 * infinite loop (one stoping migration because the other is
2383 * waiting on pte migration entry). We always return true here.
2384 *
2385 * FIXME proper solution is to rework migration_entry_wait() so
2386 * it does not need to take a reference on page.
2387 */
2388 return is_device_private_page(page);
2389 }
2390
2391 /* For file back page */
2392 if (page_mapping(page))
2393 extra += 1 + page_has_private(page);
2394
2395 if ((page_count(page) - extra) > page_mapcount(page))
2396 return false;
2397
2398 return true;
2399}
2400
2401/*
2402 * migrate_vma_prepare() - lock pages and isolate them from the lru
2403 * @migrate: migrate struct containing all migration information
2404 *
2405 * This locks pages that have been collected by migrate_vma_collect(). Once each
2406 * page is locked it is isolated from the lru (for non-device pages). Finally,
2407 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2408 * migrated by concurrent kernel threads.
2409 */
2410static void migrate_vma_prepare(struct migrate_vma *migrate)
2411{
2412 const unsigned long npages = migrate->npages;
2413 const unsigned long start = migrate->start;
2414 unsigned long addr, i, restore = 0;
2415 bool allow_drain = true;
2416
2417 lru_add_drain();
2418
2419 for (i = 0; (i < npages) && migrate->cpages; i++) {
2420 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2421 bool remap = true;
2422
2423 if (!page)
2424 continue;
2425
2426 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2427 /*
2428 * Because we are migrating several pages there can be
2429 * a deadlock between 2 concurrent migration where each
2430 * are waiting on each other page lock.
2431 *
2432 * Make migrate_vma() a best effort thing and backoff
2433 * for any page we can not lock right away.
2434 */
2435 if (!trylock_page(page)) {
2436 migrate->src[i] = 0;
2437 migrate->cpages--;
2438 put_page(page);
2439 continue;
2440 }
2441 remap = false;
2442 migrate->src[i] |= MIGRATE_PFN_LOCKED;
2443 }
2444
2445 /* ZONE_DEVICE pages are not on LRU */
2446 if (!is_zone_device_page(page)) {
2447 if (!PageLRU(page) && allow_drain) {
2448 /* Drain CPU's pagevec */
2449 lru_add_drain_all();
2450 allow_drain = false;
2451 }
2452
2453 if (isolate_lru_page(page)) {
2454 if (remap) {
2455 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2456 migrate->cpages--;
2457 restore++;
2458 } else {
2459 migrate->src[i] = 0;
2460 unlock_page(page);
2461 migrate->cpages--;
2462 put_page(page);
2463 }
2464 continue;
2465 }
2466
2467 /* Drop the reference we took in collect */
2468 put_page(page);
2469 }
2470
2471 if (!migrate_vma_check_page(page)) {
2472 if (remap) {
2473 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2474 migrate->cpages--;
2475 restore++;
2476
2477 if (!is_zone_device_page(page)) {
2478 get_page(page);
2479 putback_lru_page(page);
2480 }
2481 } else {
2482 migrate->src[i] = 0;
2483 unlock_page(page);
2484 migrate->cpages--;
2485
2486 if (!is_zone_device_page(page))
2487 putback_lru_page(page);
2488 else
2489 put_page(page);
2490 }
2491 }
2492 }
2493
2494 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2495 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2496
2497 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2498 continue;
2499
2500 remove_migration_pte(page, migrate->vma, addr, page);
2501
2502 migrate->src[i] = 0;
2503 unlock_page(page);
2504 put_page(page);
2505 restore--;
2506 }
2507}
2508
2509/*
2510 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2511 * @migrate: migrate struct containing all migration information
2512 *
2513 * Replace page mapping (CPU page table pte) with a special migration pte entry
2514 * and check again if it has been pinned. Pinned pages are restored because we
2515 * cannot migrate them.
2516 *
2517 * This is the last step before we call the device driver callback to allocate
2518 * destination memory and copy contents of original page over to new page.
2519 */
2520static void migrate_vma_unmap(struct migrate_vma *migrate)
2521{
2522 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2523 const unsigned long npages = migrate->npages;
2524 const unsigned long start = migrate->start;
2525 unsigned long addr, i, restore = 0;
2526
2527 for (i = 0; i < npages; i++) {
2528 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2529
2530 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2531 continue;
2532
2533 if (page_mapped(page)) {
2534 try_to_unmap(page, flags);
2535 if (page_mapped(page))
2536 goto restore;
2537 }
2538
2539 if (migrate_vma_check_page(page))
2540 continue;
2541
2542restore:
2543 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2544 migrate->cpages--;
2545 restore++;
2546 }
2547
2548 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2549 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2550
2551 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2552 continue;
2553
2554 remove_migration_ptes(page, page, false);
2555
2556 migrate->src[i] = 0;
2557 unlock_page(page);
2558 restore--;
2559
2560 if (is_zone_device_page(page))
2561 put_page(page);
2562 else
2563 putback_lru_page(page);
2564 }
2565}
2566
2567/**
2568 * migrate_vma_setup() - prepare to migrate a range of memory
2569 * @args: contains the vma, start, and and pfns arrays for the migration
2570 *
2571 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2572 * without an error.
2573 *
2574 * Prepare to migrate a range of memory virtual address range by collecting all
2575 * the pages backing each virtual address in the range, saving them inside the
2576 * src array. Then lock those pages and unmap them. Once the pages are locked
2577 * and unmapped, check whether each page is pinned or not. Pages that aren't
2578 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2579 * corresponding src array entry. Then restores any pages that are pinned, by
2580 * remapping and unlocking those pages.
2581 *
2582 * The caller should then allocate destination memory and copy source memory to
2583 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2584 * flag set). Once these are allocated and copied, the caller must update each
2585 * corresponding entry in the dst array with the pfn value of the destination
2586 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2587 * (destination pages must have their struct pages locked, via lock_page()).
2588 *
2589 * Note that the caller does not have to migrate all the pages that are marked
2590 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2591 * device memory to system memory. If the caller cannot migrate a device page
2592 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2593 * consequences for the userspace process, so it must be avoided if at all
2594 * possible.
2595 *
2596 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2597 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2598 * allowing the caller to allocate device memory for those unback virtual
2599 * address. For this the caller simply has to allocate device memory and
2600 * properly set the destination entry like for regular migration. Note that
2601 * this can still fails and thus inside the device driver must check if the
2602 * migration was successful for those entries after calling migrate_vma_pages()
2603 * just like for regular migration.
2604 *
2605 * After that, the callers must call migrate_vma_pages() to go over each entry
2606 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2607 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2608 * then migrate_vma_pages() to migrate struct page information from the source
2609 * struct page to the destination struct page. If it fails to migrate the
2610 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2611 * src array.
2612 *
2613 * At this point all successfully migrated pages have an entry in the src
2614 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2615 * array entry with MIGRATE_PFN_VALID flag set.
2616 *
2617 * Once migrate_vma_pages() returns the caller may inspect which pages were
2618 * successfully migrated, and which were not. Successfully migrated pages will
2619 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2620 *
2621 * It is safe to update device page table after migrate_vma_pages() because
2622 * both destination and source page are still locked, and the mmap_sem is held
2623 * in read mode (hence no one can unmap the range being migrated).
2624 *
2625 * Once the caller is done cleaning up things and updating its page table (if it
2626 * chose to do so, this is not an obligation) it finally calls
2627 * migrate_vma_finalize() to update the CPU page table to point to new pages
2628 * for successfully migrated pages or otherwise restore the CPU page table to
2629 * point to the original source pages.
2630 */
2631int migrate_vma_setup(struct migrate_vma *args)
2632{
2633 long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2634
2635 args->start &= PAGE_MASK;
2636 args->end &= PAGE_MASK;
2637 if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2638 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2639 return -EINVAL;
2640 if (nr_pages <= 0)
2641 return -EINVAL;
2642 if (args->start < args->vma->vm_start ||
2643 args->start >= args->vma->vm_end)
2644 return -EINVAL;
2645 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2646 return -EINVAL;
2647 if (!args->src || !args->dst)
2648 return -EINVAL;
2649
2650 memset(args->src, 0, sizeof(*args->src) * nr_pages);
2651 args->cpages = 0;
2652 args->npages = 0;
2653
2654 migrate_vma_collect(args);
2655
2656 if (args->cpages)
2657 migrate_vma_prepare(args);
2658 if (args->cpages)
2659 migrate_vma_unmap(args);
2660
2661 /*
2662 * At this point pages are locked and unmapped, and thus they have
2663 * stable content and can safely be copied to destination memory that
2664 * is allocated by the drivers.
2665 */
2666 return 0;
2667
2668}
2669EXPORT_SYMBOL(migrate_vma_setup);
2670
2671static void migrate_vma_insert_page(struct migrate_vma *migrate,
2672 unsigned long addr,
2673 struct page *page,
2674 unsigned long *src,
2675 unsigned long *dst)
2676{
2677 struct vm_area_struct *vma = migrate->vma;
2678 struct mm_struct *mm = vma->vm_mm;
2679 struct mem_cgroup *memcg;
2680 bool flush = false;
2681 spinlock_t *ptl;
2682 pte_t entry;
2683 pgd_t *pgdp;
2684 p4d_t *p4dp;
2685 pud_t *pudp;
2686 pmd_t *pmdp;
2687 pte_t *ptep;
2688
2689 /* Only allow populating anonymous memory */
2690 if (!vma_is_anonymous(vma))
2691 goto abort;
2692
2693 pgdp = pgd_offset(mm, addr);
2694 p4dp = p4d_alloc(mm, pgdp, addr);
2695 if (!p4dp)
2696 goto abort;
2697 pudp = pud_alloc(mm, p4dp, addr);
2698 if (!pudp)
2699 goto abort;
2700 pmdp = pmd_alloc(mm, pudp, addr);
2701 if (!pmdp)
2702 goto abort;
2703
2704 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2705 goto abort;
2706
2707 /*
2708 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2709 * pte_offset_map() on pmds where a huge pmd might be created
2710 * from a different thread.
2711 *
2712 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2713 * parallel threads are excluded by other means.
2714 *
2715 * Here we only have down_read(mmap_sem).
2716 */
2717 if (pte_alloc(mm, pmdp))
2718 goto abort;
2719
2720 /* See the comment in pte_alloc_one_map() */
2721 if (unlikely(pmd_trans_unstable(pmdp)))
2722 goto abort;
2723
2724 if (unlikely(anon_vma_prepare(vma)))
2725 goto abort;
2726 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2727 goto abort;
2728
2729 /*
2730 * The memory barrier inside __SetPageUptodate makes sure that
2731 * preceding stores to the page contents become visible before
2732 * the set_pte_at() write.
2733 */
2734 __SetPageUptodate(page);
2735
2736 if (is_zone_device_page(page)) {
2737 if (is_device_private_page(page)) {
2738 swp_entry_t swp_entry;
2739
2740 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2741 entry = swp_entry_to_pte(swp_entry);
2742 }
2743 } else {
2744 entry = mk_pte(page, vma->vm_page_prot);
2745 if (vma->vm_flags & VM_WRITE)
2746 entry = pte_mkwrite(pte_mkdirty(entry));
2747 }
2748
2749 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2750
2751 if (pte_present(*ptep)) {
2752 unsigned long pfn = pte_pfn(*ptep);
2753
2754 if (!is_zero_pfn(pfn)) {
2755 pte_unmap_unlock(ptep, ptl);
2756 mem_cgroup_cancel_charge(page, memcg, false);
2757 goto abort;
2758 }
2759 flush = true;
2760 } else if (!pte_none(*ptep)) {
2761 pte_unmap_unlock(ptep, ptl);
2762 mem_cgroup_cancel_charge(page, memcg, false);
2763 goto abort;
2764 }
2765
2766 /*
2767 * Check for usefaultfd but do not deliver the fault. Instead,
2768 * just back off.
2769 */
2770 if (userfaultfd_missing(vma)) {
2771 pte_unmap_unlock(ptep, ptl);
2772 mem_cgroup_cancel_charge(page, memcg, false);
2773 goto abort;
2774 }
2775
2776 inc_mm_counter(mm, MM_ANONPAGES);
2777 page_add_new_anon_rmap(page, vma, addr, false);
2778 mem_cgroup_commit_charge(page, memcg, false, false);
2779 if (!is_zone_device_page(page))
2780 lru_cache_add_active_or_unevictable(page, vma);
2781 get_page(page);
2782
2783 if (flush) {
2784 flush_cache_page(vma, addr, pte_pfn(*ptep));
2785 ptep_clear_flush_notify(vma, addr, ptep);
2786 set_pte_at_notify(mm, addr, ptep, entry);
2787 update_mmu_cache(vma, addr, ptep);
2788 } else {
2789 /* No need to invalidate - it was non-present before */
2790 set_pte_at(mm, addr, ptep, entry);
2791 update_mmu_cache(vma, addr, ptep);
2792 }
2793
2794 pte_unmap_unlock(ptep, ptl);
2795 *src = MIGRATE_PFN_MIGRATE;
2796 return;
2797
2798abort:
2799 *src &= ~MIGRATE_PFN_MIGRATE;
2800}
2801
2802/**
2803 * migrate_vma_pages() - migrate meta-data from src page to dst page
2804 * @migrate: migrate struct containing all migration information
2805 *
2806 * This migrates struct page meta-data from source struct page to destination
2807 * struct page. This effectively finishes the migration from source page to the
2808 * destination page.
2809 */
2810void migrate_vma_pages(struct migrate_vma *migrate)
2811{
2812 const unsigned long npages = migrate->npages;
2813 const unsigned long start = migrate->start;
2814 struct mmu_notifier_range range;
2815 unsigned long addr, i;
2816 bool notified = false;
2817
2818 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2819 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2820 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2821 struct address_space *mapping;
2822 int r;
2823
2824 if (!newpage) {
2825 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2826 continue;
2827 }
2828
2829 if (!page) {
2830 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2831 continue;
2832 }
2833 if (!notified) {
2834 notified = true;
2835
2836 mmu_notifier_range_init(&range,
2837 MMU_NOTIFY_CLEAR, 0,
2838 NULL,
2839 migrate->vma->vm_mm,
2840 addr, migrate->end);
2841 mmu_notifier_invalidate_range_start(&range);
2842 }
2843 migrate_vma_insert_page(migrate, addr, newpage,
2844 &migrate->src[i],
2845 &migrate->dst[i]);
2846 continue;
2847 }
2848
2849 mapping = page_mapping(page);
2850
2851 if (is_zone_device_page(newpage)) {
2852 if (is_device_private_page(newpage)) {
2853 /*
2854 * For now only support private anonymous when
2855 * migrating to un-addressable device memory.
2856 */
2857 if (mapping) {
2858 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2859 continue;
2860 }
2861 } else {
2862 /*
2863 * Other types of ZONE_DEVICE page are not
2864 * supported.
2865 */
2866 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2867 continue;
2868 }
2869 }
2870
2871 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2872 if (r != MIGRATEPAGE_SUCCESS)
2873 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2874 }
2875
2876 /*
2877 * No need to double call mmu_notifier->invalidate_range() callback as
2878 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2879 * did already call it.
2880 */
2881 if (notified)
2882 mmu_notifier_invalidate_range_only_end(&range);
2883}
2884EXPORT_SYMBOL(migrate_vma_pages);
2885
2886/**
2887 * migrate_vma_finalize() - restore CPU page table entry
2888 * @migrate: migrate struct containing all migration information
2889 *
2890 * This replaces the special migration pte entry with either a mapping to the
2891 * new page if migration was successful for that page, or to the original page
2892 * otherwise.
2893 *
2894 * This also unlocks the pages and puts them back on the lru, or drops the extra
2895 * refcount, for device pages.
2896 */
2897void migrate_vma_finalize(struct migrate_vma *migrate)
2898{
2899 const unsigned long npages = migrate->npages;
2900 unsigned long i;
2901
2902 for (i = 0; i < npages; i++) {
2903 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2904 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2905
2906 if (!page) {
2907 if (newpage) {
2908 unlock_page(newpage);
2909 put_page(newpage);
2910 }
2911 continue;
2912 }
2913
2914 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2915 if (newpage) {
2916 unlock_page(newpage);
2917 put_page(newpage);
2918 }
2919 newpage = page;
2920 }
2921
2922 remove_migration_ptes(page, newpage, false);
2923 unlock_page(page);
2924 migrate->cpages--;
2925
2926 if (is_zone_device_page(page))
2927 put_page(page);
2928 else
2929 putback_lru_page(page);
2930
2931 if (newpage != page) {
2932 unlock_page(newpage);
2933 if (is_zone_device_page(newpage))
2934 put_page(newpage);
2935 else
2936 putback_lru_page(newpage);
2937 }
2938 }
2939}
2940EXPORT_SYMBOL(migrate_vma_finalize);
2941#endif /* CONFIG_DEVICE_PRIVATE */