Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Memory Migration functionality - linux/mm/migrate.c
   4 *
   5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   6 *
   7 * Page migration was first developed in the context of the memory hotplug
   8 * project. The main authors of the migration code are:
   9 *
  10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
  11 * Hirokazu Takahashi <taka@valinux.co.jp>
  12 * Dave Hansen <haveblue@us.ibm.com>
  13 * Christoph Lameter
  14 */
  15
  16#include <linux/migrate.h>
  17#include <linux/export.h>
  18#include <linux/swap.h>
  19#include <linux/swapops.h>
  20#include <linux/pagemap.h>
  21#include <linux/buffer_head.h>
  22#include <linux/mm_inline.h>
  23#include <linux/nsproxy.h>
  24#include <linux/pagevec.h>
  25#include <linux/ksm.h>
  26#include <linux/rmap.h>
  27#include <linux/topology.h>
  28#include <linux/cpu.h>
  29#include <linux/cpuset.h>
  30#include <linux/writeback.h>
  31#include <linux/mempolicy.h>
  32#include <linux/vmalloc.h>
  33#include <linux/security.h>
  34#include <linux/backing-dev.h>
  35#include <linux/compaction.h>
  36#include <linux/syscalls.h>
  37#include <linux/compat.h>
  38#include <linux/hugetlb.h>
  39#include <linux/hugetlb_cgroup.h>
  40#include <linux/gfp.h>
  41#include <linux/pfn_t.h>
  42#include <linux/memremap.h>
  43#include <linux/userfaultfd_k.h>
  44#include <linux/balloon_compaction.h>
  45#include <linux/page_idle.h>
  46#include <linux/page_owner.h>
  47#include <linux/sched/mm.h>
  48#include <linux/ptrace.h>
  49#include <linux/oom.h>
  50#include <linux/memory.h>
  51#include <linux/random.h>
  52#include <linux/sched/sysctl.h>
  53#include <linux/memory-tiers.h>
  54
  55#include <asm/tlbflush.h>
  56
  57#include <trace/events/migrate.h>
  58
  59#include "internal.h"
  60
  61int isolate_movable_page(struct page *page, isolate_mode_t mode)
  62{
 
  63	const struct movable_operations *mops;
  64
  65	/*
  66	 * Avoid burning cycles with pages that are yet under __free_pages(),
  67	 * or just got freed under us.
  68	 *
  69	 * In case we 'win' a race for a movable page being freed under us and
  70	 * raise its refcount preventing __free_pages() from doing its job
  71	 * the put_page() at the end of this block will take care of
  72	 * release this page, thus avoiding a nasty leakage.
  73	 */
  74	if (unlikely(!get_page_unless_zero(page)))
  75		goto out;
  76
  77	if (unlikely(PageSlab(page)))
  78		goto out_putpage;
  79	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
  80	smp_rmb();
  81	/*
  82	 * Check movable flag before taking the page lock because
  83	 * we use non-atomic bitops on newly allocated page flags so
  84	 * unconditionally grabbing the lock ruins page's owner side.
  85	 */
  86	if (unlikely(!__PageMovable(page)))
  87		goto out_putpage;
  88	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
  89	smp_rmb();
  90	if (unlikely(PageSlab(page)))
  91		goto out_putpage;
  92
  93	/*
  94	 * As movable pages are not isolated from LRU lists, concurrent
  95	 * compaction threads can race against page migration functions
  96	 * as well as race against the releasing a page.
  97	 *
  98	 * In order to avoid having an already isolated movable page
  99	 * being (wrongly) re-isolated while it is under migration,
 100	 * or to avoid attempting to isolate pages being released,
 101	 * lets be sure we have the page lock
 102	 * before proceeding with the movable page isolation steps.
 103	 */
 104	if (unlikely(!trylock_page(page)))
 105		goto out_putpage;
 106
 107	if (!PageMovable(page) || PageIsolated(page))
 108		goto out_no_isolated;
 109
 110	mops = page_movable_ops(page);
 111	VM_BUG_ON_PAGE(!mops, page);
 112
 113	if (!mops->isolate_page(page, mode))
 114		goto out_no_isolated;
 115
 116	/* Driver shouldn't use PG_isolated bit of page->flags */
 117	WARN_ON_ONCE(PageIsolated(page));
 118	SetPageIsolated(page);
 119	unlock_page(page);
 120
 121	return 0;
 122
 123out_no_isolated:
 124	unlock_page(page);
 125out_putpage:
 126	put_page(page);
 127out:
 128	return -EBUSY;
 129}
 130
 131static void putback_movable_page(struct page *page)
 132{
 133	const struct movable_operations *mops = page_movable_ops(page);
 134
 135	mops->putback_page(page);
 136	ClearPageIsolated(page);
 137}
 138
 139/*
 140 * Put previously isolated pages back onto the appropriate lists
 141 * from where they were once taken off for compaction/migration.
 142 *
 143 * This function shall be used whenever the isolated pageset has been
 144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
 145 * and isolate_hugetlb().
 146 */
 147void putback_movable_pages(struct list_head *l)
 148{
 149	struct page *page;
 150	struct page *page2;
 151
 152	list_for_each_entry_safe(page, page2, l, lru) {
 153		if (unlikely(PageHuge(page))) {
 154			putback_active_hugepage(page);
 155			continue;
 156		}
 157		list_del(&page->lru);
 158		/*
 159		 * We isolated non-lru movable page so here we can use
 160		 * __PageMovable because LRU page's mapping cannot have
 161		 * PAGE_MAPPING_MOVABLE.
 162		 */
 163		if (unlikely(__PageMovable(page))) {
 164			VM_BUG_ON_PAGE(!PageIsolated(page), page);
 165			lock_page(page);
 166			if (PageMovable(page))
 167				putback_movable_page(page);
 168			else
 169				ClearPageIsolated(page);
 170			unlock_page(page);
 171			put_page(page);
 172		} else {
 173			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
 174					page_is_file_lru(page), -thp_nr_pages(page));
 175			putback_lru_page(page);
 176		}
 177	}
 178}
 179
 180/*
 181 * Restore a potential migration pte to a working pte entry
 182 */
 183static bool remove_migration_pte(struct folio *folio,
 184		struct vm_area_struct *vma, unsigned long addr, void *old)
 185{
 186	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
 187
 188	while (page_vma_mapped_walk(&pvmw)) {
 189		rmap_t rmap_flags = RMAP_NONE;
 
 190		pte_t pte;
 191		swp_entry_t entry;
 192		struct page *new;
 193		unsigned long idx = 0;
 194
 195		/* pgoff is invalid for ksm pages, but they are never large */
 196		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
 197			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
 198		new = folio_page(folio, idx);
 199
 200#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 201		/* PMD-mapped THP migration entry */
 202		if (!pvmw.pte) {
 203			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
 204					!folio_test_pmd_mappable(folio), folio);
 205			remove_migration_pmd(&pvmw, new);
 206			continue;
 207		}
 208#endif
 209
 210		folio_get(folio);
 211		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
 212		if (pte_swp_soft_dirty(*pvmw.pte))
 213			pte = pte_mksoft_dirty(pte);
 214
 215		/*
 216		 * Recheck VMA as permissions can change since migration started
 217		 */
 218		entry = pte_to_swp_entry(*pvmw.pte);
 219		if (!is_migration_entry_young(entry))
 220			pte = pte_mkold(pte);
 221		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
 222			pte = pte_mkdirty(pte);
 
 
 
 
 
 223		if (is_writable_migration_entry(entry))
 224			pte = maybe_mkwrite(pte, vma);
 225		else if (pte_swp_uffd_wp(*pvmw.pte))
 226			pte = pte_mkuffd_wp(pte);
 227		else
 228			pte = pte_wrprotect(pte);
 229
 230		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
 231			rmap_flags |= RMAP_EXCLUSIVE;
 232
 233		if (unlikely(is_device_private_page(new))) {
 234			if (pte_write(pte))
 235				entry = make_writable_device_private_entry(
 236							page_to_pfn(new));
 237			else
 238				entry = make_readable_device_private_entry(
 239							page_to_pfn(new));
 240			pte = swp_entry_to_pte(entry);
 241			if (pte_swp_soft_dirty(*pvmw.pte))
 242				pte = pte_swp_mksoft_dirty(pte);
 243			if (pte_swp_uffd_wp(*pvmw.pte))
 244				pte = pte_swp_mkuffd_wp(pte);
 245		}
 246
 247#ifdef CONFIG_HUGETLB_PAGE
 248		if (folio_test_hugetlb(folio)) {
 249			unsigned int shift = huge_page_shift(hstate_vma(vma));
 
 
 250
 251			pte = pte_mkhuge(pte);
 252			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
 253			if (folio_test_anon(folio))
 254				hugepage_add_anon_rmap(new, vma, pvmw.address,
 255						       rmap_flags);
 256			else
 257				page_dup_file_rmap(new, true);
 258			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 
 259		} else
 260#endif
 261		{
 262			if (folio_test_anon(folio))
 263				page_add_anon_rmap(new, vma, pvmw.address,
 264						   rmap_flags);
 265			else
 266				page_add_file_rmap(new, vma, false);
 267			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 268		}
 269		if (vma->vm_flags & VM_LOCKED)
 270			mlock_page_drain_local();
 271
 272		trace_remove_migration_pte(pvmw.address, pte_val(pte),
 273					   compound_order(new));
 274
 275		/* No need to invalidate - it was non-present before */
 276		update_mmu_cache(vma, pvmw.address, pvmw.pte);
 277	}
 278
 279	return true;
 280}
 281
 282/*
 283 * Get rid of all migration entries and replace them by
 284 * references to the indicated page.
 285 */
 286void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
 287{
 288	struct rmap_walk_control rwc = {
 289		.rmap_one = remove_migration_pte,
 290		.arg = src,
 291	};
 292
 293	if (locked)
 294		rmap_walk_locked(dst, &rwc);
 295	else
 296		rmap_walk(dst, &rwc);
 297}
 298
 299/*
 300 * Something used the pte of a page under migration. We need to
 301 * get to the page and wait until migration is finished.
 302 * When we return from this function the fault will be retried.
 303 */
 304void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 305				spinlock_t *ptl)
 306{
 
 
 307	pte_t pte;
 308	swp_entry_t entry;
 309
 310	spin_lock(ptl);
 311	pte = *ptep;
 
 
 
 
 
 312	if (!is_swap_pte(pte))
 313		goto out;
 314
 315	entry = pte_to_swp_entry(pte);
 316	if (!is_migration_entry(entry))
 317		goto out;
 318
 319	migration_entry_wait_on_locked(entry, ptep, ptl);
 320	return;
 321out:
 322	pte_unmap_unlock(ptep, ptl);
 323}
 324
 325void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 326				unsigned long address)
 327{
 328	spinlock_t *ptl = pte_lockptr(mm, pmd);
 329	pte_t *ptep = pte_offset_map(pmd, address);
 330	__migration_entry_wait(mm, ptep, ptl);
 331}
 332
 333#ifdef CONFIG_HUGETLB_PAGE
 334void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl)
 
 
 
 
 
 
 335{
 
 336	pte_t pte;
 337
 
 338	spin_lock(ptl);
 339	pte = huge_ptep_get(ptep);
 340
 341	if (unlikely(!is_hugetlb_entry_migration(pte)))
 342		spin_unlock(ptl);
 343	else
 344		migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
 345}
 346
 347void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
 348{
 349	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
 350
 351	__migration_entry_wait_huge(pte, ptl);
 
 
 352}
 353#endif
 354
 355#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 356void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
 357{
 358	spinlock_t *ptl;
 359
 360	ptl = pmd_lock(mm, pmd);
 361	if (!is_pmd_migration_entry(*pmd))
 362		goto unlock;
 363	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
 364	return;
 365unlock:
 366	spin_unlock(ptl);
 367}
 368#endif
 369
 370static int folio_expected_refs(struct address_space *mapping,
 371		struct folio *folio)
 372{
 373	int refs = 1;
 374	if (!mapping)
 375		return refs;
 376
 377	refs += folio_nr_pages(folio);
 378	if (folio_test_private(folio))
 379		refs++;
 380
 381	return refs;
 382}
 383
 384/*
 385 * Replace the page in the mapping.
 386 *
 387 * The number of remaining references must be:
 388 * 1 for anonymous pages without a mapping
 389 * 2 for pages with a mapping
 390 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
 391 */
 392int folio_migrate_mapping(struct address_space *mapping,
 393		struct folio *newfolio, struct folio *folio, int extra_count)
 394{
 395	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
 396	struct zone *oldzone, *newzone;
 397	int dirty;
 398	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
 399	long nr = folio_nr_pages(folio);
 
 400
 401	if (!mapping) {
 402		/* Anonymous page without mapping */
 403		if (folio_ref_count(folio) != expected_count)
 404			return -EAGAIN;
 405
 406		/* No turning back from here */
 407		newfolio->index = folio->index;
 408		newfolio->mapping = folio->mapping;
 409		if (folio_test_swapbacked(folio))
 410			__folio_set_swapbacked(newfolio);
 411
 412		return MIGRATEPAGE_SUCCESS;
 413	}
 414
 415	oldzone = folio_zone(folio);
 416	newzone = folio_zone(newfolio);
 417
 418	xas_lock_irq(&xas);
 419	if (!folio_ref_freeze(folio, expected_count)) {
 420		xas_unlock_irq(&xas);
 421		return -EAGAIN;
 422	}
 423
 424	/*
 425	 * Now we know that no one else is looking at the folio:
 426	 * no turning back from here.
 427	 */
 428	newfolio->index = folio->index;
 429	newfolio->mapping = folio->mapping;
 430	folio_ref_add(newfolio, nr); /* add cache reference */
 431	if (folio_test_swapbacked(folio)) {
 432		__folio_set_swapbacked(newfolio);
 433		if (folio_test_swapcache(folio)) {
 434			folio_set_swapcache(newfolio);
 435			newfolio->private = folio_get_private(folio);
 436		}
 
 437	} else {
 438		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
 
 439	}
 440
 441	/* Move dirty while page refs frozen and newpage not yet exposed */
 442	dirty = folio_test_dirty(folio);
 443	if (dirty) {
 444		folio_clear_dirty(folio);
 445		folio_set_dirty(newfolio);
 446	}
 447
 448	xas_store(&xas, newfolio);
 
 
 
 
 449
 450	/*
 451	 * Drop cache reference from old page by unfreezing
 452	 * to one less reference.
 453	 * We know this isn't the last reference.
 454	 */
 455	folio_ref_unfreeze(folio, expected_count - nr);
 456
 457	xas_unlock(&xas);
 458	/* Leave irq disabled to prevent preemption while updating stats */
 459
 460	/*
 461	 * If moved to a different zone then also account
 462	 * the page for that zone. Other VM counters will be
 463	 * taken care of when we establish references to the
 464	 * new page and drop references to the old page.
 465	 *
 466	 * Note that anonymous pages are accounted for
 467	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
 468	 * are mapped to swap space.
 469	 */
 470	if (newzone != oldzone) {
 471		struct lruvec *old_lruvec, *new_lruvec;
 472		struct mem_cgroup *memcg;
 473
 474		memcg = folio_memcg(folio);
 475		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
 476		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 477
 478		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
 479		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
 480		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
 481			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
 482			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 
 
 
 
 
 483		}
 484#ifdef CONFIG_SWAP
 485		if (folio_test_swapcache(folio)) {
 486			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
 487			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
 488		}
 489#endif
 490		if (dirty && mapping_can_writeback(mapping)) {
 491			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
 492			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
 493			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
 494			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
 495		}
 496	}
 497	local_irq_enable();
 498
 499	return MIGRATEPAGE_SUCCESS;
 500}
 501EXPORT_SYMBOL(folio_migrate_mapping);
 502
 503/*
 504 * The expected number of remaining references is the same as that
 505 * of folio_migrate_mapping().
 506 */
 507int migrate_huge_page_move_mapping(struct address_space *mapping,
 508				   struct folio *dst, struct folio *src)
 509{
 510	XA_STATE(xas, &mapping->i_pages, folio_index(src));
 511	int expected_count;
 512
 513	xas_lock_irq(&xas);
 514	expected_count = 2 + folio_has_private(src);
 515	if (!folio_ref_freeze(src, expected_count)) {
 516		xas_unlock_irq(&xas);
 517		return -EAGAIN;
 518	}
 519
 520	dst->index = src->index;
 521	dst->mapping = src->mapping;
 522
 523	folio_get(dst);
 524
 525	xas_store(&xas, dst);
 526
 527	folio_ref_unfreeze(src, expected_count - 1);
 528
 529	xas_unlock_irq(&xas);
 530
 531	return MIGRATEPAGE_SUCCESS;
 532}
 533
 534/*
 535 * Copy the flags and some other ancillary information
 536 */
 537void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
 538{
 539	int cpupid;
 540
 541	if (folio_test_error(folio))
 542		folio_set_error(newfolio);
 543	if (folio_test_referenced(folio))
 544		folio_set_referenced(newfolio);
 545	if (folio_test_uptodate(folio))
 546		folio_mark_uptodate(newfolio);
 547	if (folio_test_clear_active(folio)) {
 548		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
 549		folio_set_active(newfolio);
 550	} else if (folio_test_clear_unevictable(folio))
 551		folio_set_unevictable(newfolio);
 552	if (folio_test_workingset(folio))
 553		folio_set_workingset(newfolio);
 554	if (folio_test_checked(folio))
 555		folio_set_checked(newfolio);
 556	/*
 557	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
 558	 * migration entries. We can still have PG_anon_exclusive set on an
 559	 * effectively unmapped and unreferenced first sub-pages of an
 560	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
 561	 */
 562	if (folio_test_mappedtodisk(folio))
 563		folio_set_mappedtodisk(newfolio);
 564
 565	/* Move dirty on pages not done by folio_migrate_mapping() */
 566	if (folio_test_dirty(folio))
 567		folio_set_dirty(newfolio);
 568
 569	if (folio_test_young(folio))
 570		folio_set_young(newfolio);
 571	if (folio_test_idle(folio))
 572		folio_set_idle(newfolio);
 573
 574	/*
 575	 * Copy NUMA information to the new page, to prevent over-eager
 576	 * future migrations of this same page.
 577	 */
 578	cpupid = page_cpupid_xchg_last(&folio->page, -1);
 579	/*
 580	 * For memory tiering mode, when migrate between slow and fast
 581	 * memory node, reset cpupid, because that is used to record
 582	 * page access time in slow memory node.
 583	 */
 584	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
 585		bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
 586		bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
 587
 588		if (f_toptier != t_toptier)
 589			cpupid = -1;
 590	}
 591	page_cpupid_xchg_last(&newfolio->page, cpupid);
 592
 593	folio_migrate_ksm(newfolio, folio);
 594	/*
 595	 * Please do not reorder this without considering how mm/ksm.c's
 596	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
 597	 */
 598	if (folio_test_swapcache(folio))
 599		folio_clear_swapcache(folio);
 600	folio_clear_private(folio);
 601
 602	/* page->private contains hugetlb specific flags */
 603	if (!folio_test_hugetlb(folio))
 604		folio->private = NULL;
 605
 606	/*
 607	 * If any waiters have accumulated on the new page then
 608	 * wake them up.
 609	 */
 610	if (folio_test_writeback(newfolio))
 611		folio_end_writeback(newfolio);
 612
 613	/*
 614	 * PG_readahead shares the same bit with PG_reclaim.  The above
 615	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
 616	 * bit after that.
 617	 */
 618	if (folio_test_readahead(folio))
 619		folio_set_readahead(newfolio);
 620
 621	folio_copy_owner(newfolio, folio);
 622
 623	if (!folio_test_hugetlb(folio))
 624		mem_cgroup_migrate(folio, newfolio);
 625}
 626EXPORT_SYMBOL(folio_migrate_flags);
 627
 628void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
 629{
 630	folio_copy(newfolio, folio);
 631	folio_migrate_flags(newfolio, folio);
 632}
 633EXPORT_SYMBOL(folio_migrate_copy);
 634
 635/************************************************************
 636 *                    Migration functions
 637 ***********************************************************/
 638
 639int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
 640		struct folio *src, enum migrate_mode mode, int extra_count)
 641{
 642	int rc;
 643
 644	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
 645
 646	rc = folio_migrate_mapping(mapping, dst, src, extra_count);
 647
 648	if (rc != MIGRATEPAGE_SUCCESS)
 649		return rc;
 650
 651	if (mode != MIGRATE_SYNC_NO_COPY)
 652		folio_migrate_copy(dst, src);
 653	else
 654		folio_migrate_flags(dst, src);
 655	return MIGRATEPAGE_SUCCESS;
 656}
 657
 658/**
 659 * migrate_folio() - Simple folio migration.
 660 * @mapping: The address_space containing the folio.
 661 * @dst: The folio to migrate the data to.
 662 * @src: The folio containing the current data.
 663 * @mode: How to migrate the page.
 664 *
 665 * Common logic to directly migrate a single LRU folio suitable for
 666 * folios that do not use PagePrivate/PagePrivate2.
 667 *
 668 * Folios are locked upon entry and exit.
 669 */
 670int migrate_folio(struct address_space *mapping, struct folio *dst,
 671		struct folio *src, enum migrate_mode mode)
 672{
 673	return migrate_folio_extra(mapping, dst, src, mode, 0);
 674}
 675EXPORT_SYMBOL(migrate_folio);
 676
 677#ifdef CONFIG_BLOCK
 678/* Returns true if all buffers are successfully locked */
 679static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 680							enum migrate_mode mode)
 681{
 682	struct buffer_head *bh = head;
 
 683
 684	/* Simple case, sync compaction */
 685	if (mode != MIGRATE_ASYNC) {
 686		do {
 687			lock_buffer(bh);
 688			bh = bh->b_this_page;
 689
 690		} while (bh != head);
 691
 692		return true;
 693	}
 694
 695	/* async case, we cannot block on lock_buffer so use trylock_buffer */
 696	do {
 697		if (!trylock_buffer(bh)) {
 698			/*
 699			 * We failed to lock the buffer and cannot stall in
 700			 * async migration. Release the taken locks
 701			 */
 702			struct buffer_head *failed_bh = bh;
 703			bh = head;
 704			while (bh != failed_bh) {
 705				unlock_buffer(bh);
 706				bh = bh->b_this_page;
 707			}
 708			return false;
 709		}
 710
 711		bh = bh->b_this_page;
 712	} while (bh != head);
 
 713	return true;
 
 
 
 
 
 
 
 
 
 
 
 714}
 715
 716static int __buffer_migrate_folio(struct address_space *mapping,
 717		struct folio *dst, struct folio *src, enum migrate_mode mode,
 718		bool check_refs)
 719{
 720	struct buffer_head *bh, *head;
 721	int rc;
 722	int expected_count;
 723
 724	head = folio_buffers(src);
 725	if (!head)
 726		return migrate_folio(mapping, dst, src, mode);
 727
 728	/* Check whether page does not have extra refs before we do more work */
 729	expected_count = folio_expected_refs(mapping, src);
 730	if (folio_ref_count(src) != expected_count)
 731		return -EAGAIN;
 732
 733	if (!buffer_migrate_lock_buffers(head, mode))
 734		return -EAGAIN;
 735
 736	if (check_refs) {
 737		bool busy;
 738		bool invalidated = false;
 739
 740recheck_buffers:
 741		busy = false;
 742		spin_lock(&mapping->private_lock);
 743		bh = head;
 744		do {
 745			if (atomic_read(&bh->b_count)) {
 746				busy = true;
 747				break;
 748			}
 749			bh = bh->b_this_page;
 750		} while (bh != head);
 751		if (busy) {
 752			if (invalidated) {
 753				rc = -EAGAIN;
 754				goto unlock_buffers;
 755			}
 756			spin_unlock(&mapping->private_lock);
 757			invalidate_bh_lrus();
 758			invalidated = true;
 759			goto recheck_buffers;
 760		}
 761	}
 762
 763	rc = folio_migrate_mapping(mapping, dst, src, 0);
 764	if (rc != MIGRATEPAGE_SUCCESS)
 765		goto unlock_buffers;
 766
 767	folio_attach_private(dst, folio_detach_private(src));
 768
 769	bh = head;
 770	do {
 771		set_bh_page(bh, &dst->page, bh_offset(bh));
 772		bh = bh->b_this_page;
 773	} while (bh != head);
 774
 775	if (mode != MIGRATE_SYNC_NO_COPY)
 776		folio_migrate_copy(dst, src);
 777	else
 778		folio_migrate_flags(dst, src);
 779
 780	rc = MIGRATEPAGE_SUCCESS;
 781unlock_buffers:
 782	if (check_refs)
 783		spin_unlock(&mapping->private_lock);
 784	bh = head;
 785	do {
 786		unlock_buffer(bh);
 787		bh = bh->b_this_page;
 788	} while (bh != head);
 789
 790	return rc;
 791}
 792
 793/**
 794 * buffer_migrate_folio() - Migration function for folios with buffers.
 795 * @mapping: The address space containing @src.
 796 * @dst: The folio to migrate to.
 797 * @src: The folio to migrate from.
 798 * @mode: How to migrate the folio.
 799 *
 800 * This function can only be used if the underlying filesystem guarantees
 801 * that no other references to @src exist. For example attached buffer
 802 * heads are accessed only under the folio lock.  If your filesystem cannot
 803 * provide this guarantee, buffer_migrate_folio_norefs() may be more
 804 * appropriate.
 805 *
 806 * Return: 0 on success or a negative errno on failure.
 807 */
 808int buffer_migrate_folio(struct address_space *mapping,
 809		struct folio *dst, struct folio *src, enum migrate_mode mode)
 810{
 811	return __buffer_migrate_folio(mapping, dst, src, mode, false);
 812}
 813EXPORT_SYMBOL(buffer_migrate_folio);
 814
 815/**
 816 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
 817 * @mapping: The address space containing @src.
 818 * @dst: The folio to migrate to.
 819 * @src: The folio to migrate from.
 820 * @mode: How to migrate the folio.
 821 *
 822 * Like buffer_migrate_folio() except that this variant is more careful
 823 * and checks that there are also no buffer head references. This function
 824 * is the right one for mappings where buffer heads are directly looked
 825 * up and referenced (such as block device mappings).
 826 *
 827 * Return: 0 on success or a negative errno on failure.
 828 */
 829int buffer_migrate_folio_norefs(struct address_space *mapping,
 830		struct folio *dst, struct folio *src, enum migrate_mode mode)
 831{
 832	return __buffer_migrate_folio(mapping, dst, src, mode, true);
 833}
 834EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
 835#endif
 836
 837int filemap_migrate_folio(struct address_space *mapping,
 838		struct folio *dst, struct folio *src, enum migrate_mode mode)
 839{
 840	int ret;
 841
 842	ret = folio_migrate_mapping(mapping, dst, src, 0);
 843	if (ret != MIGRATEPAGE_SUCCESS)
 844		return ret;
 845
 846	if (folio_get_private(src))
 847		folio_attach_private(dst, folio_detach_private(src));
 848
 849	if (mode != MIGRATE_SYNC_NO_COPY)
 850		folio_migrate_copy(dst, src);
 851	else
 852		folio_migrate_flags(dst, src);
 853	return MIGRATEPAGE_SUCCESS;
 854}
 855EXPORT_SYMBOL_GPL(filemap_migrate_folio);
 856
 857/*
 858 * Writeback a folio to clean the dirty state
 859 */
 860static int writeout(struct address_space *mapping, struct folio *folio)
 861{
 862	struct writeback_control wbc = {
 863		.sync_mode = WB_SYNC_NONE,
 864		.nr_to_write = 1,
 865		.range_start = 0,
 866		.range_end = LLONG_MAX,
 867		.for_reclaim = 1
 868	};
 869	int rc;
 870
 871	if (!mapping->a_ops->writepage)
 872		/* No write method for the address space */
 873		return -EINVAL;
 874
 875	if (!folio_clear_dirty_for_io(folio))
 876		/* Someone else already triggered a write */
 877		return -EAGAIN;
 878
 879	/*
 880	 * A dirty folio may imply that the underlying filesystem has
 881	 * the folio on some queue. So the folio must be clean for
 882	 * migration. Writeout may mean we lose the lock and the
 883	 * folio state is no longer what we checked for earlier.
 884	 * At this point we know that the migration attempt cannot
 885	 * be successful.
 886	 */
 887	remove_migration_ptes(folio, folio, false);
 888
 889	rc = mapping->a_ops->writepage(&folio->page, &wbc);
 890
 891	if (rc != AOP_WRITEPAGE_ACTIVATE)
 892		/* unlocked. Relock */
 893		folio_lock(folio);
 894
 895	return (rc < 0) ? -EIO : -EAGAIN;
 896}
 897
 898/*
 899 * Default handling if a filesystem does not provide a migration function.
 900 */
 901static int fallback_migrate_folio(struct address_space *mapping,
 902		struct folio *dst, struct folio *src, enum migrate_mode mode)
 903{
 904	if (folio_test_dirty(src)) {
 905		/* Only writeback folios in full synchronous migration */
 906		switch (mode) {
 907		case MIGRATE_SYNC:
 908		case MIGRATE_SYNC_NO_COPY:
 909			break;
 910		default:
 911			return -EBUSY;
 912		}
 913		return writeout(mapping, src);
 914	}
 915
 916	/*
 917	 * Buffers may be managed in a filesystem specific way.
 918	 * We must have no buffers or drop them.
 919	 */
 920	if (folio_test_private(src) &&
 921	    !filemap_release_folio(src, GFP_KERNEL))
 922		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 923
 924	return migrate_folio(mapping, dst, src, mode);
 925}
 926
 927/*
 928 * Move a page to a newly allocated page
 929 * The page is locked and all ptes have been successfully removed.
 930 *
 931 * The new page will have replaced the old page if this function
 932 * is successful.
 933 *
 934 * Return value:
 935 *   < 0 - error code
 936 *  MIGRATEPAGE_SUCCESS - success
 937 */
 938static int move_to_new_folio(struct folio *dst, struct folio *src,
 939				enum migrate_mode mode)
 940{
 941	int rc = -EAGAIN;
 942	bool is_lru = !__PageMovable(&src->page);
 943
 944	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
 945	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
 946
 947	if (likely(is_lru)) {
 948		struct address_space *mapping = folio_mapping(src);
 949
 950		if (!mapping)
 951			rc = migrate_folio(mapping, dst, src, mode);
 
 
 952		else if (mapping->a_ops->migrate_folio)
 953			/*
 954			 * Most folios have a mapping and most filesystems
 955			 * provide a migrate_folio callback. Anonymous folios
 956			 * are part of swap space which also has its own
 957			 * migrate_folio callback. This is the most common path
 958			 * for page migration.
 959			 */
 960			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
 961								mode);
 962		else
 963			rc = fallback_migrate_folio(mapping, dst, src, mode);
 964	} else {
 965		const struct movable_operations *mops;
 966
 967		/*
 968		 * In case of non-lru page, it could be released after
 969		 * isolation step. In that case, we shouldn't try migration.
 970		 */
 971		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
 972		if (!folio_test_movable(src)) {
 973			rc = MIGRATEPAGE_SUCCESS;
 974			folio_clear_isolated(src);
 975			goto out;
 976		}
 977
 978		mops = page_movable_ops(&src->page);
 979		rc = mops->migrate_page(&dst->page, &src->page, mode);
 980		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
 981				!folio_test_isolated(src));
 982	}
 983
 984	/*
 985	 * When successful, old pagecache src->mapping must be cleared before
 986	 * src is freed; but stats require that PageAnon be left as PageAnon.
 987	 */
 988	if (rc == MIGRATEPAGE_SUCCESS) {
 989		if (__PageMovable(&src->page)) {
 990			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
 991
 992			/*
 993			 * We clear PG_movable under page_lock so any compactor
 994			 * cannot try to migrate this page.
 995			 */
 996			folio_clear_isolated(src);
 997		}
 998
 999		/*
1000		 * Anonymous and movable src->mapping will be cleared by
1001		 * free_pages_prepare so don't reset it here for keeping
1002		 * the type to work PageAnon, for example.
1003		 */
1004		if (!folio_mapping_flags(src))
1005			src->mapping = NULL;
1006
1007		if (likely(!folio_is_zone_device(dst)))
1008			flush_dcache_folio(dst);
1009	}
1010out:
1011	return rc;
1012}
1013
1014static int __unmap_and_move(struct folio *src, struct folio *dst,
1015				int force, enum migrate_mode mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016{
 
1017	int rc = -EAGAIN;
1018	bool page_was_mapped = false;
1019	struct anon_vma *anon_vma = NULL;
1020	bool is_lru = !__PageMovable(&src->page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021
1022	if (!folio_trylock(src)) {
1023		if (!force || mode == MIGRATE_ASYNC)
1024			goto out;
1025
1026		/*
1027		 * It's not safe for direct compaction to call lock_page.
1028		 * For example, during page readahead pages are added locked
1029		 * to the LRU. Later, when the IO completes the pages are
1030		 * marked uptodate and unlocked. However, the queueing
1031		 * could be merging multiple pages for one bio (e.g.
1032		 * mpage_readahead). If an allocation happens for the
1033		 * second or third page, the process can end up locking
1034		 * the same page twice and deadlocking. Rather than
1035		 * trying to be clever about what pages can be locked,
1036		 * avoid the use of lock_page for direct compaction
1037		 * altogether.
1038		 */
1039		if (current->flags & PF_MEMALLOC)
1040			goto out;
1041
 
 
 
 
 
 
 
 
1042		folio_lock(src);
1043	}
 
 
 
1044
1045	if (folio_test_writeback(src)) {
1046		/*
1047		 * Only in the case of a full synchronous migration is it
1048		 * necessary to wait for PageWriteback. In the async case,
1049		 * the retry loop is too short and in the sync-light case,
1050		 * the overhead of stalling is too much
1051		 */
1052		switch (mode) {
1053		case MIGRATE_SYNC:
1054		case MIGRATE_SYNC_NO_COPY:
1055			break;
1056		default:
1057			rc = -EBUSY;
1058			goto out_unlock;
1059		}
1060		if (!force)
1061			goto out_unlock;
1062		folio_wait_writeback(src);
1063	}
1064
1065	/*
1066	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1067	 * we cannot notice that anon_vma is freed while we migrate a page.
1068	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1069	 * of migration. File cache pages are no problem because of page_lock()
1070	 * File Caches may use write_page() or lock_page() in migration, then,
1071	 * just care Anon page here.
1072	 *
1073	 * Only folio_get_anon_vma() understands the subtleties of
1074	 * getting a hold on an anon_vma from outside one of its mms.
1075	 * But if we cannot get anon_vma, then we won't need it anyway,
1076	 * because that implies that the anon page is no longer mapped
1077	 * (and cannot be remapped so long as we hold the page lock).
1078	 */
1079	if (folio_test_anon(src) && !folio_test_ksm(src))
1080		anon_vma = folio_get_anon_vma(src);
1081
1082	/*
1083	 * Block others from accessing the new page when we get around to
1084	 * establishing additional references. We are usually the only one
1085	 * holding a reference to dst at this point. We used to have a BUG
1086	 * here if folio_trylock(dst) fails, but would like to allow for
1087	 * cases where there might be a race with the previous use of dst.
1088	 * This is much like races on refcount of oldpage: just don't BUG().
1089	 */
1090	if (unlikely(!folio_trylock(dst)))
1091		goto out_unlock;
 
1092
1093	if (unlikely(!is_lru)) {
1094		rc = move_to_new_folio(dst, src, mode);
1095		goto out_unlock_both;
1096	}
1097
1098	/*
1099	 * Corner case handling:
1100	 * 1. When a new swap-cache page is read into, it is added to the LRU
1101	 * and treated as swapcache but it has no rmap yet.
1102	 * Calling try_to_unmap() against a src->mapping==NULL page will
1103	 * trigger a BUG.  So handle it here.
1104	 * 2. An orphaned page (see truncate_cleanup_page) might have
1105	 * fs-private metadata. The page can be picked up due to memory
1106	 * offlining.  Everywhere else except page reclaim, the page is
1107	 * invisible to the vm, so the page can not be migrated.  So try to
1108	 * free the metadata, so the page can be freed.
1109	 */
1110	if (!src->mapping) {
1111		if (folio_test_private(src)) {
1112			try_to_free_buffers(src);
1113			goto out_unlock_both;
1114		}
1115	} else if (folio_mapped(src)) {
1116		/* Establish migration ptes */
1117		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1118			       !folio_test_ksm(src) && !anon_vma, src);
1119		try_to_migrate(src, 0);
1120		page_was_mapped = true;
1121	}
1122
1123	if (!folio_mapped(src))
1124		rc = move_to_new_folio(dst, src, mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1125
1126	/*
1127	 * When successful, push dst to LRU immediately: so that if it
1128	 * turns out to be an mlocked page, remove_migration_ptes() will
1129	 * automatically build up the correct dst->mlock_count for it.
1130	 *
1131	 * We would like to do something similar for the old page, when
1132	 * unsuccessful, and other cases when a page has been temporarily
1133	 * isolated from the unevictable LRU: but this case is the easiest.
1134	 */
1135	if (rc == MIGRATEPAGE_SUCCESS) {
1136		folio_add_lru(dst);
1137		if (page_was_mapped)
1138			lru_add_drain();
1139	}
1140
1141	if (page_was_mapped)
1142		remove_migration_ptes(src,
1143			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1144
1145out_unlock_both:
1146	folio_unlock(dst);
1147out_unlock:
1148	/* Drop an anon_vma reference if we took one */
1149	if (anon_vma)
1150		put_anon_vma(anon_vma);
1151	folio_unlock(src);
1152out:
1153	/*
1154	 * If migration is successful, decrease refcount of dst,
1155	 * which will not free the page because new page owner increased
1156	 * refcounter.
1157	 */
1158	if (rc == MIGRATEPAGE_SUCCESS)
1159		folio_put(dst);
1160
1161	return rc;
1162}
1163
1164/*
1165 * Obtain the lock on folio, remove all ptes and migrate the folio
1166 * to the newly allocated folio in dst.
1167 */
1168static int unmap_and_move(new_page_t get_new_page,
1169				   free_page_t put_new_page,
1170				   unsigned long private, struct folio *src,
1171				   int force, enum migrate_mode mode,
1172				   enum migrate_reason reason,
1173				   struct list_head *ret)
1174{
1175	struct folio *dst;
1176	int rc = MIGRATEPAGE_SUCCESS;
1177	struct page *newpage = NULL;
1178
1179	if (!thp_migration_supported() && folio_test_transhuge(src))
1180		return -ENOSYS;
1181
1182	if (folio_ref_count(src) == 1) {
1183		/* Folio was freed from under us. So we are done. */
1184		folio_clear_active(src);
1185		folio_clear_unevictable(src);
1186		/* free_pages_prepare() will clear PG_isolated. */
1187		goto out;
1188	}
1189
1190	newpage = get_new_page(&src->page, private);
1191	if (!newpage)
1192		return -ENOMEM;
1193	dst = page_folio(newpage);
1194
1195	dst->private = NULL;
1196	rc = __unmap_and_move(src, dst, force, mode);
1197	if (rc == MIGRATEPAGE_SUCCESS)
1198		set_page_owner_migrate_reason(&dst->page, reason);
1199
 
1200out:
1201	if (rc != -EAGAIN) {
1202		/*
1203		 * A folio that has been migrated has all references
1204		 * removed and will be freed. A folio that has not been
1205		 * migrated will have kept its references and be restored.
1206		 */
1207		list_del(&src->lru);
1208	}
1209
1210	/*
1211	 * If migration is successful, releases reference grabbed during
1212	 * isolation. Otherwise, restore the folio to right list unless
1213	 * we want to retry.
1214	 */
1215	if (rc == MIGRATEPAGE_SUCCESS) {
1216		/*
1217		 * Compaction can migrate also non-LRU folios which are
1218		 * not accounted to NR_ISOLATED_*. They can be recognized
1219		 * as __folio_test_movable
1220		 */
1221		if (likely(!__folio_test_movable(src)))
1222			mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1223					folio_is_file_lru(src), -folio_nr_pages(src));
1224
1225		if (reason != MR_MEMORY_FAILURE)
1226			/*
1227			 * We release the folio in page_handle_poison.
1228			 */
1229			folio_put(src);
1230	} else {
1231		if (rc != -EAGAIN)
1232			list_add_tail(&src->lru, ret);
1233
1234		if (put_new_page)
1235			put_new_page(&dst->page, private);
1236		else
1237			folio_put(dst);
1238	}
1239
 
 
 
 
1240	return rc;
1241}
1242
1243/*
1244 * Counterpart of unmap_and_move_page() for hugepage migration.
1245 *
1246 * This function doesn't wait the completion of hugepage I/O
1247 * because there is no race between I/O and migration for hugepage.
1248 * Note that currently hugepage I/O occurs only in direct I/O
1249 * where no lock is held and PG_writeback is irrelevant,
1250 * and writeback status of all subpages are counted in the reference
1251 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1252 * under direct I/O, the reference of the head page is 512 and a bit more.)
1253 * This means that when we try to migrate hugepage whose subpages are
1254 * doing direct I/O, some references remain after try_to_unmap() and
1255 * hugepage migration fails without data corruption.
1256 *
1257 * There is also no race when direct I/O is issued on the page under migration,
1258 * because then pte is replaced with migration swap entry and direct I/O code
1259 * will wait in the page fault for migration to complete.
1260 */
1261static int unmap_and_move_huge_page(new_page_t get_new_page,
1262				free_page_t put_new_page, unsigned long private,
1263				struct page *hpage, int force,
1264				enum migrate_mode mode, int reason,
1265				struct list_head *ret)
1266{
1267	struct folio *dst, *src = page_folio(hpage);
1268	int rc = -EAGAIN;
1269	int page_was_mapped = 0;
1270	struct page *new_hpage;
1271	struct anon_vma *anon_vma = NULL;
1272	struct address_space *mapping = NULL;
1273
1274	/*
1275	 * Migratability of hugepages depends on architectures and their size.
1276	 * This check is necessary because some callers of hugepage migration
1277	 * like soft offline and memory hotremove don't walk through page
1278	 * tables or check whether the hugepage is pmd-based or not before
1279	 * kicking migration.
1280	 */
1281	if (!hugepage_migration_supported(page_hstate(hpage)))
1282		return -ENOSYS;
1283
1284	if (folio_ref_count(src) == 1) {
1285		/* page was freed from under us. So we are done. */
1286		putback_active_hugepage(hpage);
1287		return MIGRATEPAGE_SUCCESS;
1288	}
1289
1290	new_hpage = get_new_page(hpage, private);
1291	if (!new_hpage)
1292		return -ENOMEM;
1293	dst = page_folio(new_hpage);
1294
1295	if (!folio_trylock(src)) {
1296		if (!force)
1297			goto out;
1298		switch (mode) {
1299		case MIGRATE_SYNC:
1300		case MIGRATE_SYNC_NO_COPY:
1301			break;
1302		default:
1303			goto out;
1304		}
1305		folio_lock(src);
1306	}
1307
1308	/*
1309	 * Check for pages which are in the process of being freed.  Without
1310	 * folio_mapping() set, hugetlbfs specific move page routine will not
1311	 * be called and we could leak usage counts for subpools.
1312	 */
1313	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1314		rc = -EBUSY;
1315		goto out_unlock;
1316	}
1317
1318	if (folio_test_anon(src))
1319		anon_vma = folio_get_anon_vma(src);
1320
1321	if (unlikely(!folio_trylock(dst)))
1322		goto put_anon;
1323
1324	if (folio_mapped(src)) {
1325		enum ttu_flags ttu = 0;
1326
1327		if (!folio_test_anon(src)) {
1328			/*
1329			 * In shared mappings, try_to_unmap could potentially
1330			 * call huge_pmd_unshare.  Because of this, take
1331			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1332			 * to let lower levels know we have taken the lock.
1333			 */
1334			mapping = hugetlb_page_mapping_lock_write(hpage);
1335			if (unlikely(!mapping))
1336				goto unlock_put_anon;
1337
1338			ttu = TTU_RMAP_LOCKED;
1339		}
1340
1341		try_to_migrate(src, ttu);
1342		page_was_mapped = 1;
1343
1344		if (ttu & TTU_RMAP_LOCKED)
1345			i_mmap_unlock_write(mapping);
1346	}
1347
1348	if (!folio_mapped(src))
1349		rc = move_to_new_folio(dst, src, mode);
1350
1351	if (page_was_mapped)
1352		remove_migration_ptes(src,
1353			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1354
1355unlock_put_anon:
1356	folio_unlock(dst);
1357
1358put_anon:
1359	if (anon_vma)
1360		put_anon_vma(anon_vma);
1361
1362	if (rc == MIGRATEPAGE_SUCCESS) {
1363		move_hugetlb_state(src, dst, reason);
1364		put_new_page = NULL;
1365	}
1366
1367out_unlock:
1368	folio_unlock(src);
1369out:
1370	if (rc == MIGRATEPAGE_SUCCESS)
1371		putback_active_hugepage(hpage);
1372	else if (rc != -EAGAIN)
1373		list_move_tail(&src->lru, ret);
1374
1375	/*
1376	 * If migration was not successful and there's a freeing callback, use
1377	 * it.  Otherwise, put_page() will drop the reference grabbed during
1378	 * isolation.
1379	 */
1380	if (put_new_page)
1381		put_new_page(new_hpage, private);
1382	else
1383		putback_active_hugepage(new_hpage);
1384
1385	return rc;
1386}
1387
1388static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1389{
1390	int rc;
1391
1392	folio_lock(folio);
1393	rc = split_folio_to_list(folio, split_folios);
1394	folio_unlock(folio);
1395	if (!rc)
1396		list_move_tail(&folio->lru, split_folios);
1397
1398	return rc;
1399}
1400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1401/*
1402 * migrate_pages - migrate the folios specified in a list, to the free folios
1403 *		   supplied as the target for the page migration
1404 *
1405 * @from:		The list of folios to be migrated.
1406 * @get_new_page:	The function used to allocate free folios to be used
1407 *			as the target of the folio migration.
1408 * @put_new_page:	The function used to free target folios if migration
1409 *			fails, or NULL if no special handling is necessary.
1410 * @private:		Private data to be passed on to get_new_page()
1411 * @mode:		The migration mode that specifies the constraints for
1412 *			folio migration, if any.
1413 * @reason:		The reason for folio migration.
1414 * @ret_succeeded:	Set to the number of folios migrated successfully if
1415 *			the caller passes a non-NULL pointer.
1416 *
1417 * The function returns after 10 attempts or if no folios are movable any more
1418 * because the list has become empty or no retryable folios exist any more.
1419 * It is caller's responsibility to call putback_movable_pages() to return folios
1420 * to the LRU or free list only if ret != 0.
1421 *
1422 * Returns the number of {normal folio, large folio, hugetlb} that were not
1423 * migrated, or an error code. The number of large folio splits will be
1424 * considered as the number of non-migrated large folio, no matter how many
1425 * split folios of the large folio are migrated successfully.
1426 */
1427int migrate_pages(struct list_head *from, new_page_t get_new_page,
1428		free_page_t put_new_page, unsigned long private,
1429		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1430{
1431	int retry = 1;
1432	int large_retry = 1;
1433	int thp_retry = 1;
1434	int nr_failed = 0;
1435	int nr_failed_pages = 0;
1436	int nr_retry_pages = 0;
1437	int nr_succeeded = 0;
1438	int nr_thp_succeeded = 0;
1439	int nr_large_failed = 0;
1440	int nr_thp_failed = 0;
1441	int nr_thp_split = 0;
1442	int pass = 0;
1443	bool is_large = false;
1444	bool is_thp = false;
1445	struct folio *folio, *folio2;
1446	int rc, nr_pages;
1447	LIST_HEAD(ret_folios);
1448	LIST_HEAD(split_folios);
1449	bool nosplit = (reason == MR_NUMA_MISPLACED);
1450	bool no_split_folio_counting = false;
1451
1452	trace_mm_migrate_pages_start(mode, reason);
1453
1454split_folio_migration:
1455	for (pass = 0; pass < 10 && (retry || large_retry); pass++) {
1456		retry = 0;
1457		large_retry = 0;
1458		thp_retry = 0;
1459		nr_retry_pages = 0;
1460
1461		list_for_each_entry_safe(folio, folio2, from, lru) {
1462			/*
1463			 * Large folio statistics is based on the source large
1464			 * folio. Capture required information that might get
1465			 * lost during migration.
1466			 */
1467			is_large = folio_test_large(folio) && !folio_test_hugetlb(folio);
1468			is_thp = is_large && folio_test_pmd_mappable(folio);
1469			nr_pages = folio_nr_pages(folio);
 
1470			cond_resched();
1471
1472			if (folio_test_hugetlb(folio))
1473				rc = unmap_and_move_huge_page(get_new_page,
1474						put_new_page, private,
1475						&folio->page, pass > 2, mode,
1476						reason,
1477						&ret_folios);
1478			else
1479				rc = unmap_and_move(get_new_page, put_new_page,
1480						private, folio, pass > 2, mode,
1481						reason, &ret_folios);
 
 
 
 
 
 
 
 
1482			/*
1483			 * The rules are:
1484			 *	Success: non hugetlb folio will be freed, hugetlb
1485			 *		 folio will be put back
1486			 *	-EAGAIN: stay on the from list
1487			 *	-ENOMEM: stay on the from list
1488			 *	-ENOSYS: stay on the from list
1489			 *	Other errno: put on ret_folios list then splice to
1490			 *		     from list
1491			 */
1492			switch(rc) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1493			/*
1494			 * Large folio migration might be unsupported or
1495			 * the allocation could've failed so we should retry
1496			 * on the same folio with the large folio split
1497			 * to normal folios.
1498			 *
1499			 * Split folios are put in split_folios, and
1500			 * we will migrate them after the rest of the
1501			 * list is processed.
1502			 */
1503			case -ENOSYS:
1504				/* Large folio migration is unsupported */
1505				if (is_large) {
1506					nr_large_failed++;
1507					nr_thp_failed += is_thp;
1508					if (!try_split_folio(folio, &split_folios)) {
1509						nr_thp_split += is_thp;
1510						break;
1511					}
1512				/* Hugetlb migration is unsupported */
1513				} else if (!no_split_folio_counting) {
1514					nr_failed++;
1515				}
 
 
 
 
1516
1517				nr_failed_pages += nr_pages;
1518				list_move_tail(&folio->lru, &ret_folios);
1519				break;
 
 
 
 
 
 
 
 
 
 
1520			case -ENOMEM:
1521				/*
1522				 * When memory is low, don't bother to try to migrate
1523				 * other folios, just exit.
1524				 */
1525				if (is_large) {
1526					nr_large_failed++;
1527					nr_thp_failed += is_thp;
1528					/* Large folio NUMA faulting doesn't split to retry. */
1529					if (!nosplit) {
1530						int ret = try_split_folio(folio, &split_folios);
1531
1532						if (!ret) {
1533							nr_thp_split += is_thp;
1534							break;
1535						} else if (reason == MR_LONGTERM_PIN &&
1536							   ret == -EAGAIN) {
1537							/*
1538							 * Try again to split large folio to
1539							 * mitigate the failure of longterm pinning.
1540							 */
1541							large_retry++;
1542							thp_retry += is_thp;
1543							nr_retry_pages += nr_pages;
1544							break;
1545						}
 
 
1546					}
1547				} else if (!no_split_folio_counting) {
1548					nr_failed++;
1549				}
1550
1551				nr_failed_pages += nr_pages + nr_retry_pages;
1552				/*
1553				 * There might be some split folios of fail-to-migrate large
1554				 * folios left in split_folios list. Move them back to migration
1555				 * list so that they could be put back to the right list by
1556				 * the caller otherwise the folio refcnt will be leaked.
1557				 */
1558				list_splice_init(&split_folios, from);
1559				/* nr_failed isn't updated for not used */
1560				nr_large_failed += large_retry;
1561				nr_thp_failed += thp_retry;
1562				goto out;
 
 
 
1563			case -EAGAIN:
1564				if (is_large) {
1565					large_retry++;
1566					thp_retry += is_thp;
1567				} else if (!no_split_folio_counting) {
1568					retry++;
1569				}
1570				nr_retry_pages += nr_pages;
1571				break;
1572			case MIGRATEPAGE_SUCCESS:
1573				nr_succeeded += nr_pages;
1574				nr_thp_succeeded += is_thp;
 
 
 
 
1575				break;
1576			default:
1577				/*
1578				 * Permanent failure (-EBUSY, etc.):
1579				 * unlike -EAGAIN case, the failed folio is
1580				 * removed from migration folio list and not
1581				 * retried in the next outer loop.
1582				 */
1583				if (is_large) {
1584					nr_large_failed++;
1585					nr_thp_failed += is_thp;
1586				} else if (!no_split_folio_counting) {
1587					nr_failed++;
1588				}
 
 
 
 
 
 
 
1589
1590				nr_failed_pages += nr_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591				break;
1592			}
 
 
1593		}
1594	}
1595	nr_failed += retry;
1596	nr_large_failed += large_retry;
1597	nr_thp_failed += thp_retry;
1598	nr_failed_pages += nr_retry_pages;
1599	/*
1600	 * Try to migrate split folios of fail-to-migrate large folios, no
1601	 * nr_failed counting in this round, since all split folios of a
1602	 * large folio is counted as 1 failure in the first round.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1604	if (!list_empty(&split_folios)) {
1605		/*
1606		 * Move non-migrated folios (after 10 retries) to ret_folios
1607		 * to avoid migrating them again.
 
1608		 */
1609		list_splice_init(from, &ret_folios);
1610		list_splice_init(&split_folios, from);
1611		no_split_folio_counting = true;
1612		retry = 1;
1613		goto split_folio_migration;
1614	}
1615
1616	rc = nr_failed + nr_large_failed;
1617out:
1618	/*
1619	 * Put the permanent failure folio back to migration list, they
1620	 * will be put back to the right list by the caller.
1621	 */
1622	list_splice(&ret_folios, from);
1623
1624	/*
1625	 * Return 0 in case all split folios of fail-to-migrate large folios
1626	 * are migrated successfully.
1627	 */
1628	if (list_empty(from))
1629		rc = 0;
1630
1631	count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1632	count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
1633	count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1634	count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1635	count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1636	trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
1637			       nr_thp_failed, nr_thp_split, mode, reason);
 
 
1638
1639	if (ret_succeeded)
1640		*ret_succeeded = nr_succeeded;
1641
1642	return rc;
1643}
1644
1645struct page *alloc_migration_target(struct page *page, unsigned long private)
1646{
1647	struct folio *folio = page_folio(page);
1648	struct migration_target_control *mtc;
1649	gfp_t gfp_mask;
1650	unsigned int order = 0;
1651	struct folio *new_folio = NULL;
1652	int nid;
1653	int zidx;
1654
1655	mtc = (struct migration_target_control *)private;
1656	gfp_mask = mtc->gfp_mask;
1657	nid = mtc->nid;
1658	if (nid == NUMA_NO_NODE)
1659		nid = folio_nid(folio);
1660
1661	if (folio_test_hugetlb(folio)) {
1662		struct hstate *h = folio_hstate(folio);
1663
1664		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1665		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
 
1666	}
1667
1668	if (folio_test_large(folio)) {
1669		/*
1670		 * clear __GFP_RECLAIM to make the migration callback
1671		 * consistent with regular THP allocations.
1672		 */
1673		gfp_mask &= ~__GFP_RECLAIM;
1674		gfp_mask |= GFP_TRANSHUGE;
1675		order = folio_order(folio);
1676	}
1677	zidx = zone_idx(folio_zone(folio));
1678	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1679		gfp_mask |= __GFP_HIGHMEM;
1680
1681	new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
1682
1683	return &new_folio->page;
1684}
1685
1686#ifdef CONFIG_NUMA
1687
1688static int store_status(int __user *status, int start, int value, int nr)
1689{
1690	while (nr-- > 0) {
1691		if (put_user(value, status + start))
1692			return -EFAULT;
1693		start++;
1694	}
1695
1696	return 0;
1697}
1698
1699static int do_move_pages_to_node(struct mm_struct *mm,
1700		struct list_head *pagelist, int node)
1701{
1702	int err;
1703	struct migration_target_control mtc = {
1704		.nid = node,
1705		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1706	};
1707
1708	err = migrate_pages(pagelist, alloc_migration_target, NULL,
1709		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1710	if (err)
1711		putback_movable_pages(pagelist);
1712	return err;
1713}
1714
1715/*
1716 * Resolves the given address to a struct page, isolates it from the LRU and
1717 * puts it to the given pagelist.
1718 * Returns:
1719 *     errno - if the page cannot be found/isolated
1720 *     0 - when it doesn't have to be migrated because it is already on the
1721 *         target node
1722 *     1 - when it has been queued
1723 */
1724static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1725		int node, struct list_head *pagelist, bool migrate_all)
1726{
1727	struct vm_area_struct *vma;
 
1728	struct page *page;
 
1729	int err;
1730
1731	mmap_read_lock(mm);
 
 
1732	err = -EFAULT;
1733	vma = vma_lookup(mm, addr);
1734	if (!vma || !vma_migratable(vma))
1735		goto out;
1736
1737	/* FOLL_DUMP to ignore special (like zero) pages */
1738	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1739
1740	err = PTR_ERR(page);
1741	if (IS_ERR(page))
1742		goto out;
1743
1744	err = -ENOENT;
1745	if (!page)
1746		goto out;
1747
1748	if (is_zone_device_page(page))
1749		goto out_putpage;
 
1750
1751	err = 0;
1752	if (page_to_nid(page) == node)
1753		goto out_putpage;
1754
1755	err = -EACCES;
1756	if (page_mapcount(page) > 1 && !migrate_all)
1757		goto out_putpage;
1758
1759	if (PageHuge(page)) {
1760		if (PageHead(page)) {
1761			err = isolate_hugetlb(page, pagelist);
1762			if (!err)
1763				err = 1;
1764		}
1765	} else {
1766		struct page *head;
1767
1768		head = compound_head(page);
1769		err = isolate_lru_page(head);
1770		if (err)
1771			goto out_putpage;
1772
1773		err = 1;
1774		list_add_tail(&head->lru, pagelist);
1775		mod_node_page_state(page_pgdat(head),
1776			NR_ISOLATED_ANON + page_is_file_lru(head),
1777			thp_nr_pages(head));
1778	}
1779out_putpage:
1780	/*
1781	 * Either remove the duplicate refcount from
1782	 * isolate_lru_page() or drop the page ref if it was
1783	 * not isolated.
1784	 */
1785	put_page(page);
1786out:
1787	mmap_read_unlock(mm);
1788	return err;
1789}
1790
1791static int move_pages_and_store_status(struct mm_struct *mm, int node,
1792		struct list_head *pagelist, int __user *status,
1793		int start, int i, unsigned long nr_pages)
1794{
1795	int err;
1796
1797	if (list_empty(pagelist))
1798		return 0;
1799
1800	err = do_move_pages_to_node(mm, pagelist, node);
1801	if (err) {
1802		/*
1803		 * Positive err means the number of failed
1804		 * pages to migrate.  Since we are going to
1805		 * abort and return the number of non-migrated
1806		 * pages, so need to include the rest of the
1807		 * nr_pages that have not been attempted as
1808		 * well.
1809		 */
1810		if (err > 0)
1811			err += nr_pages - i;
1812		return err;
1813	}
1814	return store_status(status, start, node, i - start);
1815}
1816
1817/*
1818 * Migrate an array of page address onto an array of nodes and fill
1819 * the corresponding array of status.
1820 */
1821static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1822			 unsigned long nr_pages,
1823			 const void __user * __user *pages,
1824			 const int __user *nodes,
1825			 int __user *status, int flags)
1826{
 
1827	int current_node = NUMA_NO_NODE;
1828	LIST_HEAD(pagelist);
1829	int start, i;
1830	int err = 0, err1;
1831
1832	lru_cache_disable();
1833
1834	for (i = start = 0; i < nr_pages; i++) {
1835		const void __user *p;
1836		unsigned long addr;
1837		int node;
1838
1839		err = -EFAULT;
1840		if (get_user(p, pages + i))
1841			goto out_flush;
 
 
 
 
 
 
 
 
 
1842		if (get_user(node, nodes + i))
1843			goto out_flush;
1844		addr = (unsigned long)untagged_addr(p);
1845
1846		err = -ENODEV;
1847		if (node < 0 || node >= MAX_NUMNODES)
1848			goto out_flush;
1849		if (!node_state(node, N_MEMORY))
1850			goto out_flush;
1851
1852		err = -EACCES;
1853		if (!node_isset(node, task_nodes))
1854			goto out_flush;
1855
1856		if (current_node == NUMA_NO_NODE) {
1857			current_node = node;
1858			start = i;
1859		} else if (node != current_node) {
1860			err = move_pages_and_store_status(mm, current_node,
1861					&pagelist, status, start, i, nr_pages);
1862			if (err)
1863				goto out;
1864			start = i;
1865			current_node = node;
1866		}
1867
1868		/*
1869		 * Errors in the page lookup or isolation are not fatal and we simply
1870		 * report them via status
1871		 */
1872		err = add_page_for_migration(mm, addr, current_node,
1873				&pagelist, flags & MPOL_MF_MOVE_ALL);
1874
1875		if (err > 0) {
1876			/* The page is successfully queued for migration */
1877			continue;
1878		}
1879
1880		/*
1881		 * The move_pages() man page does not have an -EEXIST choice, so
1882		 * use -EFAULT instead.
1883		 */
1884		if (err == -EEXIST)
1885			err = -EFAULT;
1886
1887		/*
1888		 * If the page is already on the target node (!err), store the
1889		 * node, otherwise, store the err.
1890		 */
1891		err = store_status(status, i, err ? : current_node, 1);
1892		if (err)
1893			goto out_flush;
1894
1895		err = move_pages_and_store_status(mm, current_node, &pagelist,
1896				status, start, i, nr_pages);
1897		if (err) {
1898			/* We have accounted for page i */
1899			if (err > 0)
1900				err--;
1901			goto out;
1902		}
1903		current_node = NUMA_NO_NODE;
1904	}
1905out_flush:
1906	/* Make sure we do not overwrite the existing error */
1907	err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1908				status, start, i, nr_pages);
1909	if (err >= 0)
1910		err = err1;
1911out:
1912	lru_cache_enable();
1913	return err;
1914}
1915
1916/*
1917 * Determine the nodes of an array of pages and store it in an array of status.
1918 */
1919static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1920				const void __user **pages, int *status)
1921{
1922	unsigned long i;
1923
1924	mmap_read_lock(mm);
1925
1926	for (i = 0; i < nr_pages; i++) {
1927		unsigned long addr = (unsigned long)(*pages);
1928		struct vm_area_struct *vma;
1929		struct page *page;
1930		int err = -EFAULT;
1931
1932		vma = vma_lookup(mm, addr);
1933		if (!vma)
1934			goto set_status;
1935
1936		/* FOLL_DUMP to ignore special (like zero) pages */
1937		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1938
1939		err = PTR_ERR(page);
1940		if (IS_ERR(page))
1941			goto set_status;
1942
1943		err = -ENOENT;
1944		if (!page)
1945			goto set_status;
1946
1947		if (!is_zone_device_page(page))
1948			err = page_to_nid(page);
1949
1950		put_page(page);
1951set_status:
1952		*status = err;
1953
1954		pages++;
1955		status++;
1956	}
1957
1958	mmap_read_unlock(mm);
1959}
1960
1961static int get_compat_pages_array(const void __user *chunk_pages[],
1962				  const void __user * __user *pages,
1963				  unsigned long chunk_nr)
1964{
1965	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1966	compat_uptr_t p;
1967	int i;
1968
1969	for (i = 0; i < chunk_nr; i++) {
1970		if (get_user(p, pages32 + i))
1971			return -EFAULT;
1972		chunk_pages[i] = compat_ptr(p);
1973	}
1974
1975	return 0;
1976}
1977
1978/*
1979 * Determine the nodes of a user array of pages and store it in
1980 * a user array of status.
1981 */
1982static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1983			 const void __user * __user *pages,
1984			 int __user *status)
1985{
1986#define DO_PAGES_STAT_CHUNK_NR 16UL
1987	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1988	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1989
1990	while (nr_pages) {
1991		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
1992
1993		if (in_compat_syscall()) {
1994			if (get_compat_pages_array(chunk_pages, pages,
1995						   chunk_nr))
1996				break;
1997		} else {
1998			if (copy_from_user(chunk_pages, pages,
1999				      chunk_nr * sizeof(*chunk_pages)))
2000				break;
2001		}
2002
2003		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2004
2005		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2006			break;
2007
2008		pages += chunk_nr;
2009		status += chunk_nr;
2010		nr_pages -= chunk_nr;
2011	}
2012	return nr_pages ? -EFAULT : 0;
2013}
2014
2015static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2016{
2017	struct task_struct *task;
2018	struct mm_struct *mm;
2019
2020	/*
2021	 * There is no need to check if current process has the right to modify
2022	 * the specified process when they are same.
2023	 */
2024	if (!pid) {
2025		mmget(current->mm);
2026		*mem_nodes = cpuset_mems_allowed(current);
2027		return current->mm;
2028	}
2029
2030	/* Find the mm_struct */
2031	rcu_read_lock();
2032	task = find_task_by_vpid(pid);
2033	if (!task) {
2034		rcu_read_unlock();
2035		return ERR_PTR(-ESRCH);
2036	}
2037	get_task_struct(task);
2038
2039	/*
2040	 * Check if this process has the right to modify the specified
2041	 * process. Use the regular "ptrace_may_access()" checks.
2042	 */
2043	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2044		rcu_read_unlock();
2045		mm = ERR_PTR(-EPERM);
2046		goto out;
2047	}
2048	rcu_read_unlock();
2049
2050	mm = ERR_PTR(security_task_movememory(task));
2051	if (IS_ERR(mm))
2052		goto out;
2053	*mem_nodes = cpuset_mems_allowed(task);
2054	mm = get_task_mm(task);
2055out:
2056	put_task_struct(task);
2057	if (!mm)
2058		mm = ERR_PTR(-EINVAL);
2059	return mm;
2060}
2061
2062/*
2063 * Move a list of pages in the address space of the currently executing
2064 * process.
2065 */
2066static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2067			     const void __user * __user *pages,
2068			     const int __user *nodes,
2069			     int __user *status, int flags)
2070{
2071	struct mm_struct *mm;
2072	int err;
2073	nodemask_t task_nodes;
2074
2075	/* Check flags */
2076	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2077		return -EINVAL;
2078
2079	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2080		return -EPERM;
2081
2082	mm = find_mm_struct(pid, &task_nodes);
2083	if (IS_ERR(mm))
2084		return PTR_ERR(mm);
2085
2086	if (nodes)
2087		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2088				    nodes, status, flags);
2089	else
2090		err = do_pages_stat(mm, nr_pages, pages, status);
2091
2092	mmput(mm);
2093	return err;
2094}
2095
2096SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2097		const void __user * __user *, pages,
2098		const int __user *, nodes,
2099		int __user *, status, int, flags)
2100{
2101	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2102}
2103
2104#ifdef CONFIG_NUMA_BALANCING
2105/*
2106 * Returns true if this is a safe migration target node for misplaced NUMA
2107 * pages. Currently it only checks the watermarks which is crude.
2108 */
2109static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2110				   unsigned long nr_migrate_pages)
2111{
2112	int z;
2113
2114	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2115		struct zone *zone = pgdat->node_zones + z;
2116
2117		if (!managed_zone(zone))
2118			continue;
2119
2120		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2121		if (!zone_watermark_ok(zone, 0,
2122				       high_wmark_pages(zone) +
2123				       nr_migrate_pages,
2124				       ZONE_MOVABLE, 0))
2125			continue;
2126		return true;
2127	}
2128	return false;
2129}
2130
2131static struct page *alloc_misplaced_dst_page(struct page *page,
2132					   unsigned long data)
2133{
2134	int nid = (int) data;
2135	int order = compound_order(page);
2136	gfp_t gfp = __GFP_THISNODE;
2137	struct folio *new;
2138
2139	if (order > 0)
2140		gfp |= GFP_TRANSHUGE_LIGHT;
2141	else {
2142		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2143			__GFP_NOWARN;
2144		gfp &= ~__GFP_RECLAIM;
2145	}
2146	new = __folio_alloc_node(gfp, order, nid);
2147
2148	return &new->page;
2149}
2150
2151static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2152{
2153	int nr_pages = thp_nr_pages(page);
2154	int order = compound_order(page);
2155
2156	VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2157
2158	/* Do not migrate THP mapped by multiple processes */
2159	if (PageTransHuge(page) && total_mapcount(page) > 1)
2160		return 0;
2161
2162	/* Avoid migrating to a node that is nearly full */
2163	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2164		int z;
2165
2166		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2167			return 0;
2168		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2169			if (managed_zone(pgdat->node_zones + z))
2170				break;
2171		}
2172		wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
 
 
 
 
 
 
 
 
 
2173		return 0;
2174	}
2175
2176	if (isolate_lru_page(page))
2177		return 0;
2178
2179	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2180			    nr_pages);
2181
2182	/*
2183	 * Isolating the page has taken another reference, so the
2184	 * caller's reference can be safely dropped without the page
2185	 * disappearing underneath us during migration.
2186	 */
2187	put_page(page);
2188	return 1;
2189}
2190
2191/*
2192 * Attempt to migrate a misplaced page to the specified destination
2193 * node. Caller is expected to have an elevated reference count on
2194 * the page that will be dropped by this function before returning.
2195 */
2196int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2197			   int node)
2198{
2199	pg_data_t *pgdat = NODE_DATA(node);
2200	int isolated;
2201	int nr_remaining;
2202	unsigned int nr_succeeded;
2203	LIST_HEAD(migratepages);
2204	int nr_pages = thp_nr_pages(page);
2205
2206	/*
2207	 * Don't migrate file pages that are mapped in multiple processes
2208	 * with execute permissions as they are probably shared libraries.
 
 
 
2209	 */
2210	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2211	    (vma->vm_flags & VM_EXEC))
2212		goto out;
2213
2214	/*
2215	 * Also do not migrate dirty pages as not all filesystems can move
2216	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2217	 */
2218	if (page_is_file_lru(page) && PageDirty(page))
2219		goto out;
2220
2221	isolated = numamigrate_isolate_page(pgdat, page);
2222	if (!isolated)
2223		goto out;
2224
2225	list_add(&page->lru, &migratepages);
2226	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2227				     NULL, node, MIGRATE_ASYNC,
2228				     MR_NUMA_MISPLACED, &nr_succeeded);
2229	if (nr_remaining) {
2230		if (!list_empty(&migratepages)) {
2231			list_del(&page->lru);
2232			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2233					page_is_file_lru(page), -nr_pages);
2234			putback_lru_page(page);
2235		}
2236		isolated = 0;
2237	}
2238	if (nr_succeeded) {
2239		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2240		if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2241			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2242					    nr_succeeded);
2243	}
2244	BUG_ON(!list_empty(&migratepages));
2245	return isolated;
2246
2247out:
2248	put_page(page);
2249	return 0;
2250}
2251#endif /* CONFIG_NUMA_BALANCING */
2252#endif /* CONFIG_NUMA */
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Memory Migration functionality - linux/mm/migrate.c
   4 *
   5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   6 *
   7 * Page migration was first developed in the context of the memory hotplug
   8 * project. The main authors of the migration code are:
   9 *
  10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
  11 * Hirokazu Takahashi <taka@valinux.co.jp>
  12 * Dave Hansen <haveblue@us.ibm.com>
  13 * Christoph Lameter
  14 */
  15
  16#include <linux/migrate.h>
  17#include <linux/export.h>
  18#include <linux/swap.h>
  19#include <linux/swapops.h>
  20#include <linux/pagemap.h>
  21#include <linux/buffer_head.h>
  22#include <linux/mm_inline.h>
  23#include <linux/nsproxy.h>
 
  24#include <linux/ksm.h>
  25#include <linux/rmap.h>
  26#include <linux/topology.h>
  27#include <linux/cpu.h>
  28#include <linux/cpuset.h>
  29#include <linux/writeback.h>
  30#include <linux/mempolicy.h>
  31#include <linux/vmalloc.h>
  32#include <linux/security.h>
  33#include <linux/backing-dev.h>
  34#include <linux/compaction.h>
  35#include <linux/syscalls.h>
  36#include <linux/compat.h>
  37#include <linux/hugetlb.h>
  38#include <linux/hugetlb_cgroup.h>
  39#include <linux/gfp.h>
  40#include <linux/pfn_t.h>
  41#include <linux/memremap.h>
  42#include <linux/userfaultfd_k.h>
  43#include <linux/balloon_compaction.h>
  44#include <linux/page_idle.h>
  45#include <linux/page_owner.h>
  46#include <linux/sched/mm.h>
  47#include <linux/ptrace.h>
  48#include <linux/oom.h>
  49#include <linux/memory.h>
  50#include <linux/random.h>
  51#include <linux/sched/sysctl.h>
  52#include <linux/memory-tiers.h>
  53
  54#include <asm/tlbflush.h>
  55
  56#include <trace/events/migrate.h>
  57
  58#include "internal.h"
  59
  60bool isolate_movable_page(struct page *page, isolate_mode_t mode)
  61{
  62	struct folio *folio = folio_get_nontail_page(page);
  63	const struct movable_operations *mops;
  64
  65	/*
  66	 * Avoid burning cycles with pages that are yet under __free_pages(),
  67	 * or just got freed under us.
  68	 *
  69	 * In case we 'win' a race for a movable page being freed under us and
  70	 * raise its refcount preventing __free_pages() from doing its job
  71	 * the put_page() at the end of this block will take care of
  72	 * release this page, thus avoiding a nasty leakage.
  73	 */
  74	if (!folio)
  75		goto out;
  76
  77	if (unlikely(folio_test_slab(folio)))
  78		goto out_putfolio;
  79	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
  80	smp_rmb();
  81	/*
  82	 * Check movable flag before taking the page lock because
  83	 * we use non-atomic bitops on newly allocated page flags so
  84	 * unconditionally grabbing the lock ruins page's owner side.
  85	 */
  86	if (unlikely(!__folio_test_movable(folio)))
  87		goto out_putfolio;
  88	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
  89	smp_rmb();
  90	if (unlikely(folio_test_slab(folio)))
  91		goto out_putfolio;
  92
  93	/*
  94	 * As movable pages are not isolated from LRU lists, concurrent
  95	 * compaction threads can race against page migration functions
  96	 * as well as race against the releasing a page.
  97	 *
  98	 * In order to avoid having an already isolated movable page
  99	 * being (wrongly) re-isolated while it is under migration,
 100	 * or to avoid attempting to isolate pages being released,
 101	 * lets be sure we have the page lock
 102	 * before proceeding with the movable page isolation steps.
 103	 */
 104	if (unlikely(!folio_trylock(folio)))
 105		goto out_putfolio;
 106
 107	if (!folio_test_movable(folio) || folio_test_isolated(folio))
 108		goto out_no_isolated;
 109
 110	mops = folio_movable_ops(folio);
 111	VM_BUG_ON_FOLIO(!mops, folio);
 112
 113	if (!mops->isolate_page(&folio->page, mode))
 114		goto out_no_isolated;
 115
 116	/* Driver shouldn't use PG_isolated bit of page->flags */
 117	WARN_ON_ONCE(folio_test_isolated(folio));
 118	folio_set_isolated(folio);
 119	folio_unlock(folio);
 120
 121	return true;
 122
 123out_no_isolated:
 124	folio_unlock(folio);
 125out_putfolio:
 126	folio_put(folio);
 127out:
 128	return false;
 129}
 130
 131static void putback_movable_folio(struct folio *folio)
 132{
 133	const struct movable_operations *mops = folio_movable_ops(folio);
 134
 135	mops->putback_page(&folio->page);
 136	folio_clear_isolated(folio);
 137}
 138
 139/*
 140 * Put previously isolated pages back onto the appropriate lists
 141 * from where they were once taken off for compaction/migration.
 142 *
 143 * This function shall be used whenever the isolated pageset has been
 144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
 145 * and isolate_hugetlb().
 146 */
 147void putback_movable_pages(struct list_head *l)
 148{
 149	struct folio *folio;
 150	struct folio *folio2;
 151
 152	list_for_each_entry_safe(folio, folio2, l, lru) {
 153		if (unlikely(folio_test_hugetlb(folio))) {
 154			folio_putback_active_hugetlb(folio);
 155			continue;
 156		}
 157		list_del(&folio->lru);
 158		/*
 159		 * We isolated non-lru movable folio so here we can use
 160		 * __folio_test_movable because LRU folio's mapping cannot
 161		 * have PAGE_MAPPING_MOVABLE.
 162		 */
 163		if (unlikely(__folio_test_movable(folio))) {
 164			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
 165			folio_lock(folio);
 166			if (folio_test_movable(folio))
 167				putback_movable_folio(folio);
 168			else
 169				folio_clear_isolated(folio);
 170			folio_unlock(folio);
 171			folio_put(folio);
 172		} else {
 173			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
 174					folio_is_file_lru(folio), -folio_nr_pages(folio));
 175			folio_putback_lru(folio);
 176		}
 177	}
 178}
 179
 180/*
 181 * Restore a potential migration pte to a working pte entry
 182 */
 183static bool remove_migration_pte(struct folio *folio,
 184		struct vm_area_struct *vma, unsigned long addr, void *old)
 185{
 186	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
 187
 188	while (page_vma_mapped_walk(&pvmw)) {
 189		rmap_t rmap_flags = RMAP_NONE;
 190		pte_t old_pte;
 191		pte_t pte;
 192		swp_entry_t entry;
 193		struct page *new;
 194		unsigned long idx = 0;
 195
 196		/* pgoff is invalid for ksm pages, but they are never large */
 197		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
 198			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
 199		new = folio_page(folio, idx);
 200
 201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 202		/* PMD-mapped THP migration entry */
 203		if (!pvmw.pte) {
 204			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
 205					!folio_test_pmd_mappable(folio), folio);
 206			remove_migration_pmd(&pvmw, new);
 207			continue;
 208		}
 209#endif
 210
 211		folio_get(folio);
 212		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
 213		old_pte = ptep_get(pvmw.pte);
 
 214
 215		entry = pte_to_swp_entry(old_pte);
 
 
 
 216		if (!is_migration_entry_young(entry))
 217			pte = pte_mkold(pte);
 218		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
 219			pte = pte_mkdirty(pte);
 220		if (pte_swp_soft_dirty(old_pte))
 221			pte = pte_mksoft_dirty(pte);
 222		else
 223			pte = pte_clear_soft_dirty(pte);
 224
 225		if (is_writable_migration_entry(entry))
 226			pte = pte_mkwrite(pte, vma);
 227		else if (pte_swp_uffd_wp(old_pte))
 228			pte = pte_mkuffd_wp(pte);
 
 
 229
 230		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
 231			rmap_flags |= RMAP_EXCLUSIVE;
 232
 233		if (unlikely(is_device_private_page(new))) {
 234			if (pte_write(pte))
 235				entry = make_writable_device_private_entry(
 236							page_to_pfn(new));
 237			else
 238				entry = make_readable_device_private_entry(
 239							page_to_pfn(new));
 240			pte = swp_entry_to_pte(entry);
 241			if (pte_swp_soft_dirty(old_pte))
 242				pte = pte_swp_mksoft_dirty(pte);
 243			if (pte_swp_uffd_wp(old_pte))
 244				pte = pte_swp_mkuffd_wp(pte);
 245		}
 246
 247#ifdef CONFIG_HUGETLB_PAGE
 248		if (folio_test_hugetlb(folio)) {
 249			struct hstate *h = hstate_vma(vma);
 250			unsigned int shift = huge_page_shift(h);
 251			unsigned long psize = huge_page_size(h);
 252
 
 253			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
 254			if (folio_test_anon(folio))
 255				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
 256						      rmap_flags);
 257			else
 258				hugetlb_add_file_rmap(folio);
 259			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
 260					psize);
 261		} else
 262#endif
 263		{
 264			if (folio_test_anon(folio))
 265				folio_add_anon_rmap_pte(folio, new, vma,
 266							pvmw.address, rmap_flags);
 267			else
 268				folio_add_file_rmap_pte(folio, new, vma);
 269			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 270		}
 271		if (vma->vm_flags & VM_LOCKED)
 272			mlock_drain_local();
 273
 274		trace_remove_migration_pte(pvmw.address, pte_val(pte),
 275					   compound_order(new));
 276
 277		/* No need to invalidate - it was non-present before */
 278		update_mmu_cache(vma, pvmw.address, pvmw.pte);
 279	}
 280
 281	return true;
 282}
 283
 284/*
 285 * Get rid of all migration entries and replace them by
 286 * references to the indicated page.
 287 */
 288void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
 289{
 290	struct rmap_walk_control rwc = {
 291		.rmap_one = remove_migration_pte,
 292		.arg = src,
 293	};
 294
 295	if (locked)
 296		rmap_walk_locked(dst, &rwc);
 297	else
 298		rmap_walk(dst, &rwc);
 299}
 300
 301/*
 302 * Something used the pte of a page under migration. We need to
 303 * get to the page and wait until migration is finished.
 304 * When we return from this function the fault will be retried.
 305 */
 306void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 307			  unsigned long address)
 308{
 309	spinlock_t *ptl;
 310	pte_t *ptep;
 311	pte_t pte;
 312	swp_entry_t entry;
 313
 314	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 315	if (!ptep)
 316		return;
 317
 318	pte = ptep_get(ptep);
 319	pte_unmap(ptep);
 320
 321	if (!is_swap_pte(pte))
 322		goto out;
 323
 324	entry = pte_to_swp_entry(pte);
 325	if (!is_migration_entry(entry))
 326		goto out;
 327
 328	migration_entry_wait_on_locked(entry, ptl);
 329	return;
 330out:
 331	spin_unlock(ptl);
 
 
 
 
 
 
 
 
 332}
 333
 334#ifdef CONFIG_HUGETLB_PAGE
 335/*
 336 * The vma read lock must be held upon entry. Holding that lock prevents either
 337 * the pte or the ptl from being freed.
 338 *
 339 * This function will release the vma lock before returning.
 340 */
 341void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
 342{
 343	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
 344	pte_t pte;
 345
 346	hugetlb_vma_assert_locked(vma);
 347	spin_lock(ptl);
 348	pte = huge_ptep_get(ptep);
 349
 350	if (unlikely(!is_hugetlb_entry_migration(pte))) {
 351		spin_unlock(ptl);
 352		hugetlb_vma_unlock_read(vma);
 353	} else {
 354		/*
 355		 * If migration entry existed, safe to release vma lock
 356		 * here because the pgtable page won't be freed without the
 357		 * pgtable lock released.  See comment right above pgtable
 358		 * lock release in migration_entry_wait_on_locked().
 359		 */
 360		hugetlb_vma_unlock_read(vma);
 361		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
 362	}
 363}
 364#endif
 365
 366#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 367void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
 368{
 369	spinlock_t *ptl;
 370
 371	ptl = pmd_lock(mm, pmd);
 372	if (!is_pmd_migration_entry(*pmd))
 373		goto unlock;
 374	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
 375	return;
 376unlock:
 377	spin_unlock(ptl);
 378}
 379#endif
 380
 381static int folio_expected_refs(struct address_space *mapping,
 382		struct folio *folio)
 383{
 384	int refs = 1;
 385	if (!mapping)
 386		return refs;
 387
 388	refs += folio_nr_pages(folio);
 389	if (folio_test_private(folio))
 390		refs++;
 391
 392	return refs;
 393}
 394
 395/*
 396 * Replace the page in the mapping.
 397 *
 398 * The number of remaining references must be:
 399 * 1 for anonymous pages without a mapping
 400 * 2 for pages with a mapping
 401 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
 402 */
 403int folio_migrate_mapping(struct address_space *mapping,
 404		struct folio *newfolio, struct folio *folio, int extra_count)
 405{
 406	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
 407	struct zone *oldzone, *newzone;
 408	int dirty;
 409	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
 410	long nr = folio_nr_pages(folio);
 411	long entries, i;
 412
 413	if (!mapping) {
 414		/* Anonymous page without mapping */
 415		if (folio_ref_count(folio) != expected_count)
 416			return -EAGAIN;
 417
 418		/* No turning back from here */
 419		newfolio->index = folio->index;
 420		newfolio->mapping = folio->mapping;
 421		if (folio_test_swapbacked(folio))
 422			__folio_set_swapbacked(newfolio);
 423
 424		return MIGRATEPAGE_SUCCESS;
 425	}
 426
 427	oldzone = folio_zone(folio);
 428	newzone = folio_zone(newfolio);
 429
 430	xas_lock_irq(&xas);
 431	if (!folio_ref_freeze(folio, expected_count)) {
 432		xas_unlock_irq(&xas);
 433		return -EAGAIN;
 434	}
 435
 436	/*
 437	 * Now we know that no one else is looking at the folio:
 438	 * no turning back from here.
 439	 */
 440	newfolio->index = folio->index;
 441	newfolio->mapping = folio->mapping;
 442	folio_ref_add(newfolio, nr); /* add cache reference */
 443	if (folio_test_swapbacked(folio)) {
 444		__folio_set_swapbacked(newfolio);
 445		if (folio_test_swapcache(folio)) {
 446			folio_set_swapcache(newfolio);
 447			newfolio->private = folio_get_private(folio);
 448		}
 449		entries = nr;
 450	} else {
 451		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
 452		entries = 1;
 453	}
 454
 455	/* Move dirty while page refs frozen and newpage not yet exposed */
 456	dirty = folio_test_dirty(folio);
 457	if (dirty) {
 458		folio_clear_dirty(folio);
 459		folio_set_dirty(newfolio);
 460	}
 461
 462	/* Swap cache still stores N entries instead of a high-order entry */
 463	for (i = 0; i < entries; i++) {
 464		xas_store(&xas, newfolio);
 465		xas_next(&xas);
 466	}
 467
 468	/*
 469	 * Drop cache reference from old page by unfreezing
 470	 * to one less reference.
 471	 * We know this isn't the last reference.
 472	 */
 473	folio_ref_unfreeze(folio, expected_count - nr);
 474
 475	xas_unlock(&xas);
 476	/* Leave irq disabled to prevent preemption while updating stats */
 477
 478	/*
 479	 * If moved to a different zone then also account
 480	 * the page for that zone. Other VM counters will be
 481	 * taken care of when we establish references to the
 482	 * new page and drop references to the old page.
 483	 *
 484	 * Note that anonymous pages are accounted for
 485	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
 486	 * are mapped to swap space.
 487	 */
 488	if (newzone != oldzone) {
 489		struct lruvec *old_lruvec, *new_lruvec;
 490		struct mem_cgroup *memcg;
 491
 492		memcg = folio_memcg(folio);
 493		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
 494		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 495
 496		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
 497		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
 498		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
 499			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
 500			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 501
 502			if (folio_test_pmd_mappable(folio)) {
 503				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
 504				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
 505			}
 506		}
 507#ifdef CONFIG_SWAP
 508		if (folio_test_swapcache(folio)) {
 509			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
 510			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
 511		}
 512#endif
 513		if (dirty && mapping_can_writeback(mapping)) {
 514			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
 515			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
 516			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
 517			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
 518		}
 519	}
 520	local_irq_enable();
 521
 522	return MIGRATEPAGE_SUCCESS;
 523}
 524EXPORT_SYMBOL(folio_migrate_mapping);
 525
 526/*
 527 * The expected number of remaining references is the same as that
 528 * of folio_migrate_mapping().
 529 */
 530int migrate_huge_page_move_mapping(struct address_space *mapping,
 531				   struct folio *dst, struct folio *src)
 532{
 533	XA_STATE(xas, &mapping->i_pages, folio_index(src));
 534	int expected_count;
 535
 536	xas_lock_irq(&xas);
 537	expected_count = folio_expected_refs(mapping, src);
 538	if (!folio_ref_freeze(src, expected_count)) {
 539		xas_unlock_irq(&xas);
 540		return -EAGAIN;
 541	}
 542
 543	dst->index = src->index;
 544	dst->mapping = src->mapping;
 545
 546	folio_ref_add(dst, folio_nr_pages(dst));
 547
 548	xas_store(&xas, dst);
 549
 550	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
 551
 552	xas_unlock_irq(&xas);
 553
 554	return MIGRATEPAGE_SUCCESS;
 555}
 556
 557/*
 558 * Copy the flags and some other ancillary information
 559 */
 560void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
 561{
 562	int cpupid;
 563
 564	if (folio_test_error(folio))
 565		folio_set_error(newfolio);
 566	if (folio_test_referenced(folio))
 567		folio_set_referenced(newfolio);
 568	if (folio_test_uptodate(folio))
 569		folio_mark_uptodate(newfolio);
 570	if (folio_test_clear_active(folio)) {
 571		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
 572		folio_set_active(newfolio);
 573	} else if (folio_test_clear_unevictable(folio))
 574		folio_set_unevictable(newfolio);
 575	if (folio_test_workingset(folio))
 576		folio_set_workingset(newfolio);
 577	if (folio_test_checked(folio))
 578		folio_set_checked(newfolio);
 579	/*
 580	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
 581	 * migration entries. We can still have PG_anon_exclusive set on an
 582	 * effectively unmapped and unreferenced first sub-pages of an
 583	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
 584	 */
 585	if (folio_test_mappedtodisk(folio))
 586		folio_set_mappedtodisk(newfolio);
 587
 588	/* Move dirty on pages not done by folio_migrate_mapping() */
 589	if (folio_test_dirty(folio))
 590		folio_set_dirty(newfolio);
 591
 592	if (folio_test_young(folio))
 593		folio_set_young(newfolio);
 594	if (folio_test_idle(folio))
 595		folio_set_idle(newfolio);
 596
 597	/*
 598	 * Copy NUMA information to the new page, to prevent over-eager
 599	 * future migrations of this same page.
 600	 */
 601	cpupid = folio_xchg_last_cpupid(folio, -1);
 602	/*
 603	 * For memory tiering mode, when migrate between slow and fast
 604	 * memory node, reset cpupid, because that is used to record
 605	 * page access time in slow memory node.
 606	 */
 607	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
 608		bool f_toptier = node_is_toptier(folio_nid(folio));
 609		bool t_toptier = node_is_toptier(folio_nid(newfolio));
 610
 611		if (f_toptier != t_toptier)
 612			cpupid = -1;
 613	}
 614	folio_xchg_last_cpupid(newfolio, cpupid);
 615
 616	folio_migrate_ksm(newfolio, folio);
 617	/*
 618	 * Please do not reorder this without considering how mm/ksm.c's
 619	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
 620	 */
 621	if (folio_test_swapcache(folio))
 622		folio_clear_swapcache(folio);
 623	folio_clear_private(folio);
 624
 625	/* page->private contains hugetlb specific flags */
 626	if (!folio_test_hugetlb(folio))
 627		folio->private = NULL;
 628
 629	/*
 630	 * If any waiters have accumulated on the new page then
 631	 * wake them up.
 632	 */
 633	if (folio_test_writeback(newfolio))
 634		folio_end_writeback(newfolio);
 635
 636	/*
 637	 * PG_readahead shares the same bit with PG_reclaim.  The above
 638	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
 639	 * bit after that.
 640	 */
 641	if (folio_test_readahead(folio))
 642		folio_set_readahead(newfolio);
 643
 644	folio_copy_owner(newfolio, folio);
 645
 646	mem_cgroup_migrate(folio, newfolio);
 
 647}
 648EXPORT_SYMBOL(folio_migrate_flags);
 649
 650void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
 651{
 652	folio_copy(newfolio, folio);
 653	folio_migrate_flags(newfolio, folio);
 654}
 655EXPORT_SYMBOL(folio_migrate_copy);
 656
 657/************************************************************
 658 *                    Migration functions
 659 ***********************************************************/
 660
 661int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
 662		struct folio *src, enum migrate_mode mode, int extra_count)
 663{
 664	int rc;
 665
 666	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
 667
 668	rc = folio_migrate_mapping(mapping, dst, src, extra_count);
 669
 670	if (rc != MIGRATEPAGE_SUCCESS)
 671		return rc;
 672
 673	if (mode != MIGRATE_SYNC_NO_COPY)
 674		folio_migrate_copy(dst, src);
 675	else
 676		folio_migrate_flags(dst, src);
 677	return MIGRATEPAGE_SUCCESS;
 678}
 679
 680/**
 681 * migrate_folio() - Simple folio migration.
 682 * @mapping: The address_space containing the folio.
 683 * @dst: The folio to migrate the data to.
 684 * @src: The folio containing the current data.
 685 * @mode: How to migrate the page.
 686 *
 687 * Common logic to directly migrate a single LRU folio suitable for
 688 * folios that do not use PagePrivate/PagePrivate2.
 689 *
 690 * Folios are locked upon entry and exit.
 691 */
 692int migrate_folio(struct address_space *mapping, struct folio *dst,
 693		struct folio *src, enum migrate_mode mode)
 694{
 695	return migrate_folio_extra(mapping, dst, src, mode, 0);
 696}
 697EXPORT_SYMBOL(migrate_folio);
 698
 699#ifdef CONFIG_BUFFER_HEAD
 700/* Returns true if all buffers are successfully locked */
 701static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 702							enum migrate_mode mode)
 703{
 704	struct buffer_head *bh = head;
 705	struct buffer_head *failed_bh;
 706
 
 
 
 
 
 
 
 
 
 
 
 
 707	do {
 708		if (!trylock_buffer(bh)) {
 709			if (mode == MIGRATE_ASYNC)
 710				goto unlock;
 711			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
 712				goto unlock;
 713			lock_buffer(bh);
 
 
 
 
 
 
 714		}
 715
 716		bh = bh->b_this_page;
 717	} while (bh != head);
 718
 719	return true;
 720
 721unlock:
 722	/* We failed to lock the buffer and cannot stall. */
 723	failed_bh = bh;
 724	bh = head;
 725	while (bh != failed_bh) {
 726		unlock_buffer(bh);
 727		bh = bh->b_this_page;
 728	}
 729
 730	return false;
 731}
 732
 733static int __buffer_migrate_folio(struct address_space *mapping,
 734		struct folio *dst, struct folio *src, enum migrate_mode mode,
 735		bool check_refs)
 736{
 737	struct buffer_head *bh, *head;
 738	int rc;
 739	int expected_count;
 740
 741	head = folio_buffers(src);
 742	if (!head)
 743		return migrate_folio(mapping, dst, src, mode);
 744
 745	/* Check whether page does not have extra refs before we do more work */
 746	expected_count = folio_expected_refs(mapping, src);
 747	if (folio_ref_count(src) != expected_count)
 748		return -EAGAIN;
 749
 750	if (!buffer_migrate_lock_buffers(head, mode))
 751		return -EAGAIN;
 752
 753	if (check_refs) {
 754		bool busy;
 755		bool invalidated = false;
 756
 757recheck_buffers:
 758		busy = false;
 759		spin_lock(&mapping->i_private_lock);
 760		bh = head;
 761		do {
 762			if (atomic_read(&bh->b_count)) {
 763				busy = true;
 764				break;
 765			}
 766			bh = bh->b_this_page;
 767		} while (bh != head);
 768		if (busy) {
 769			if (invalidated) {
 770				rc = -EAGAIN;
 771				goto unlock_buffers;
 772			}
 773			spin_unlock(&mapping->i_private_lock);
 774			invalidate_bh_lrus();
 775			invalidated = true;
 776			goto recheck_buffers;
 777		}
 778	}
 779
 780	rc = folio_migrate_mapping(mapping, dst, src, 0);
 781	if (rc != MIGRATEPAGE_SUCCESS)
 782		goto unlock_buffers;
 783
 784	folio_attach_private(dst, folio_detach_private(src));
 785
 786	bh = head;
 787	do {
 788		folio_set_bh(bh, dst, bh_offset(bh));
 789		bh = bh->b_this_page;
 790	} while (bh != head);
 791
 792	if (mode != MIGRATE_SYNC_NO_COPY)
 793		folio_migrate_copy(dst, src);
 794	else
 795		folio_migrate_flags(dst, src);
 796
 797	rc = MIGRATEPAGE_SUCCESS;
 798unlock_buffers:
 799	if (check_refs)
 800		spin_unlock(&mapping->i_private_lock);
 801	bh = head;
 802	do {
 803		unlock_buffer(bh);
 804		bh = bh->b_this_page;
 805	} while (bh != head);
 806
 807	return rc;
 808}
 809
 810/**
 811 * buffer_migrate_folio() - Migration function for folios with buffers.
 812 * @mapping: The address space containing @src.
 813 * @dst: The folio to migrate to.
 814 * @src: The folio to migrate from.
 815 * @mode: How to migrate the folio.
 816 *
 817 * This function can only be used if the underlying filesystem guarantees
 818 * that no other references to @src exist. For example attached buffer
 819 * heads are accessed only under the folio lock.  If your filesystem cannot
 820 * provide this guarantee, buffer_migrate_folio_norefs() may be more
 821 * appropriate.
 822 *
 823 * Return: 0 on success or a negative errno on failure.
 824 */
 825int buffer_migrate_folio(struct address_space *mapping,
 826		struct folio *dst, struct folio *src, enum migrate_mode mode)
 827{
 828	return __buffer_migrate_folio(mapping, dst, src, mode, false);
 829}
 830EXPORT_SYMBOL(buffer_migrate_folio);
 831
 832/**
 833 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
 834 * @mapping: The address space containing @src.
 835 * @dst: The folio to migrate to.
 836 * @src: The folio to migrate from.
 837 * @mode: How to migrate the folio.
 838 *
 839 * Like buffer_migrate_folio() except that this variant is more careful
 840 * and checks that there are also no buffer head references. This function
 841 * is the right one for mappings where buffer heads are directly looked
 842 * up and referenced (such as block device mappings).
 843 *
 844 * Return: 0 on success or a negative errno on failure.
 845 */
 846int buffer_migrate_folio_norefs(struct address_space *mapping,
 847		struct folio *dst, struct folio *src, enum migrate_mode mode)
 848{
 849	return __buffer_migrate_folio(mapping, dst, src, mode, true);
 850}
 851EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
 852#endif /* CONFIG_BUFFER_HEAD */
 853
 854int filemap_migrate_folio(struct address_space *mapping,
 855		struct folio *dst, struct folio *src, enum migrate_mode mode)
 856{
 857	int ret;
 858
 859	ret = folio_migrate_mapping(mapping, dst, src, 0);
 860	if (ret != MIGRATEPAGE_SUCCESS)
 861		return ret;
 862
 863	if (folio_get_private(src))
 864		folio_attach_private(dst, folio_detach_private(src));
 865
 866	if (mode != MIGRATE_SYNC_NO_COPY)
 867		folio_migrate_copy(dst, src);
 868	else
 869		folio_migrate_flags(dst, src);
 870	return MIGRATEPAGE_SUCCESS;
 871}
 872EXPORT_SYMBOL_GPL(filemap_migrate_folio);
 873
 874/*
 875 * Writeback a folio to clean the dirty state
 876 */
 877static int writeout(struct address_space *mapping, struct folio *folio)
 878{
 879	struct writeback_control wbc = {
 880		.sync_mode = WB_SYNC_NONE,
 881		.nr_to_write = 1,
 882		.range_start = 0,
 883		.range_end = LLONG_MAX,
 884		.for_reclaim = 1
 885	};
 886	int rc;
 887
 888	if (!mapping->a_ops->writepage)
 889		/* No write method for the address space */
 890		return -EINVAL;
 891
 892	if (!folio_clear_dirty_for_io(folio))
 893		/* Someone else already triggered a write */
 894		return -EAGAIN;
 895
 896	/*
 897	 * A dirty folio may imply that the underlying filesystem has
 898	 * the folio on some queue. So the folio must be clean for
 899	 * migration. Writeout may mean we lose the lock and the
 900	 * folio state is no longer what we checked for earlier.
 901	 * At this point we know that the migration attempt cannot
 902	 * be successful.
 903	 */
 904	remove_migration_ptes(folio, folio, false);
 905
 906	rc = mapping->a_ops->writepage(&folio->page, &wbc);
 907
 908	if (rc != AOP_WRITEPAGE_ACTIVATE)
 909		/* unlocked. Relock */
 910		folio_lock(folio);
 911
 912	return (rc < 0) ? -EIO : -EAGAIN;
 913}
 914
 915/*
 916 * Default handling if a filesystem does not provide a migration function.
 917 */
 918static int fallback_migrate_folio(struct address_space *mapping,
 919		struct folio *dst, struct folio *src, enum migrate_mode mode)
 920{
 921	if (folio_test_dirty(src)) {
 922		/* Only writeback folios in full synchronous migration */
 923		switch (mode) {
 924		case MIGRATE_SYNC:
 925		case MIGRATE_SYNC_NO_COPY:
 926			break;
 927		default:
 928			return -EBUSY;
 929		}
 930		return writeout(mapping, src);
 931	}
 932
 933	/*
 934	 * Buffers may be managed in a filesystem specific way.
 935	 * We must have no buffers or drop them.
 936	 */
 937	if (!filemap_release_folio(src, GFP_KERNEL))
 
 938		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 939
 940	return migrate_folio(mapping, dst, src, mode);
 941}
 942
 943/*
 944 * Move a page to a newly allocated page
 945 * The page is locked and all ptes have been successfully removed.
 946 *
 947 * The new page will have replaced the old page if this function
 948 * is successful.
 949 *
 950 * Return value:
 951 *   < 0 - error code
 952 *  MIGRATEPAGE_SUCCESS - success
 953 */
 954static int move_to_new_folio(struct folio *dst, struct folio *src,
 955				enum migrate_mode mode)
 956{
 957	int rc = -EAGAIN;
 958	bool is_lru = !__folio_test_movable(src);
 959
 960	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
 961	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
 962
 963	if (likely(is_lru)) {
 964		struct address_space *mapping = folio_mapping(src);
 965
 966		if (!mapping)
 967			rc = migrate_folio(mapping, dst, src, mode);
 968		else if (mapping_unmovable(mapping))
 969			rc = -EOPNOTSUPP;
 970		else if (mapping->a_ops->migrate_folio)
 971			/*
 972			 * Most folios have a mapping and most filesystems
 973			 * provide a migrate_folio callback. Anonymous folios
 974			 * are part of swap space which also has its own
 975			 * migrate_folio callback. This is the most common path
 976			 * for page migration.
 977			 */
 978			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
 979								mode);
 980		else
 981			rc = fallback_migrate_folio(mapping, dst, src, mode);
 982	} else {
 983		const struct movable_operations *mops;
 984
 985		/*
 986		 * In case of non-lru page, it could be released after
 987		 * isolation step. In that case, we shouldn't try migration.
 988		 */
 989		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
 990		if (!folio_test_movable(src)) {
 991			rc = MIGRATEPAGE_SUCCESS;
 992			folio_clear_isolated(src);
 993			goto out;
 994		}
 995
 996		mops = folio_movable_ops(src);
 997		rc = mops->migrate_page(&dst->page, &src->page, mode);
 998		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
 999				!folio_test_isolated(src));
1000	}
1001
1002	/*
1003	 * When successful, old pagecache src->mapping must be cleared before
1004	 * src is freed; but stats require that PageAnon be left as PageAnon.
1005	 */
1006	if (rc == MIGRATEPAGE_SUCCESS) {
1007		if (__folio_test_movable(src)) {
1008			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1009
1010			/*
1011			 * We clear PG_movable under page_lock so any compactor
1012			 * cannot try to migrate this page.
1013			 */
1014			folio_clear_isolated(src);
1015		}
1016
1017		/*
1018		 * Anonymous and movable src->mapping will be cleared by
1019		 * free_pages_prepare so don't reset it here for keeping
1020		 * the type to work PageAnon, for example.
1021		 */
1022		if (!folio_mapping_flags(src))
1023			src->mapping = NULL;
1024
1025		if (likely(!folio_is_zone_device(dst)))
1026			flush_dcache_folio(dst);
1027	}
1028out:
1029	return rc;
1030}
1031
1032/*
1033 * To record some information during migration, we use unused private
1034 * field of struct folio of the newly allocated destination folio.
1035 * This is safe because nobody is using it except us.
1036 */
1037enum {
1038	PAGE_WAS_MAPPED = BIT(0),
1039	PAGE_WAS_MLOCKED = BIT(1),
1040	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1041};
1042
1043static void __migrate_folio_record(struct folio *dst,
1044				   int old_page_state,
1045				   struct anon_vma *anon_vma)
1046{
1047	dst->private = (void *)anon_vma + old_page_state;
1048}
1049
1050static void __migrate_folio_extract(struct folio *dst,
1051				   int *old_page_state,
1052				   struct anon_vma **anon_vmap)
1053{
1054	unsigned long private = (unsigned long)dst->private;
1055
1056	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1057	*old_page_state = private & PAGE_OLD_STATES;
1058	dst->private = NULL;
1059}
1060
1061/* Restore the source folio to the original state upon failure */
1062static void migrate_folio_undo_src(struct folio *src,
1063				   int page_was_mapped,
1064				   struct anon_vma *anon_vma,
1065				   bool locked,
1066				   struct list_head *ret)
1067{
1068	if (page_was_mapped)
1069		remove_migration_ptes(src, src, false);
1070	/* Drop an anon_vma reference if we took one */
1071	if (anon_vma)
1072		put_anon_vma(anon_vma);
1073	if (locked)
1074		folio_unlock(src);
1075	if (ret)
1076		list_move_tail(&src->lru, ret);
1077}
1078
1079/* Restore the destination folio to the original state upon failure */
1080static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1081		free_folio_t put_new_folio, unsigned long private)
1082{
1083	if (locked)
1084		folio_unlock(dst);
1085	if (put_new_folio)
1086		put_new_folio(dst, private);
1087	else
1088		folio_put(dst);
1089}
1090
1091/* Cleanup src folio upon migration success */
1092static void migrate_folio_done(struct folio *src,
1093			       enum migrate_reason reason)
1094{
1095	/*
1096	 * Compaction can migrate also non-LRU pages which are
1097	 * not accounted to NR_ISOLATED_*. They can be recognized
1098	 * as __folio_test_movable
1099	 */
1100	if (likely(!__folio_test_movable(src)))
1101		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1102				    folio_is_file_lru(src), -folio_nr_pages(src));
1103
1104	if (reason != MR_MEMORY_FAILURE)
1105		/* We release the page in page_handle_poison. */
1106		folio_put(src);
1107}
1108
1109/* Obtain the lock on page, remove all ptes. */
1110static int migrate_folio_unmap(new_folio_t get_new_folio,
1111		free_folio_t put_new_folio, unsigned long private,
1112		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1113		enum migrate_reason reason, struct list_head *ret)
1114{
1115	struct folio *dst;
1116	int rc = -EAGAIN;
1117	int old_page_state = 0;
1118	struct anon_vma *anon_vma = NULL;
1119	bool is_lru = !__folio_test_movable(src);
1120	bool locked = false;
1121	bool dst_locked = false;
1122
1123	if (folio_ref_count(src) == 1) {
1124		/* Folio was freed from under us. So we are done. */
1125		folio_clear_active(src);
1126		folio_clear_unevictable(src);
1127		/* free_pages_prepare() will clear PG_isolated. */
1128		list_del(&src->lru);
1129		migrate_folio_done(src, reason);
1130		return MIGRATEPAGE_SUCCESS;
1131	}
1132
1133	dst = get_new_folio(src, private);
1134	if (!dst)
1135		return -ENOMEM;
1136	*dstp = dst;
1137
1138	dst->private = NULL;
1139
1140	if (!folio_trylock(src)) {
1141		if (mode == MIGRATE_ASYNC)
1142			goto out;
1143
1144		/*
1145		 * It's not safe for direct compaction to call lock_page.
1146		 * For example, during page readahead pages are added locked
1147		 * to the LRU. Later, when the IO completes the pages are
1148		 * marked uptodate and unlocked. However, the queueing
1149		 * could be merging multiple pages for one bio (e.g.
1150		 * mpage_readahead). If an allocation happens for the
1151		 * second or third page, the process can end up locking
1152		 * the same page twice and deadlocking. Rather than
1153		 * trying to be clever about what pages can be locked,
1154		 * avoid the use of lock_page for direct compaction
1155		 * altogether.
1156		 */
1157		if (current->flags & PF_MEMALLOC)
1158			goto out;
1159
1160		/*
1161		 * In "light" mode, we can wait for transient locks (eg
1162		 * inserting a page into the page table), but it's not
1163		 * worth waiting for I/O.
1164		 */
1165		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1166			goto out;
1167
1168		folio_lock(src);
1169	}
1170	locked = true;
1171	if (folio_test_mlocked(src))
1172		old_page_state |= PAGE_WAS_MLOCKED;
1173
1174	if (folio_test_writeback(src)) {
1175		/*
1176		 * Only in the case of a full synchronous migration is it
1177		 * necessary to wait for PageWriteback. In the async case,
1178		 * the retry loop is too short and in the sync-light case,
1179		 * the overhead of stalling is too much
1180		 */
1181		switch (mode) {
1182		case MIGRATE_SYNC:
1183		case MIGRATE_SYNC_NO_COPY:
1184			break;
1185		default:
1186			rc = -EBUSY;
1187			goto out;
1188		}
 
 
1189		folio_wait_writeback(src);
1190	}
1191
1192	/*
1193	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1194	 * we cannot notice that anon_vma is freed while we migrate a page.
1195	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1196	 * of migration. File cache pages are no problem because of page_lock()
1197	 * File Caches may use write_page() or lock_page() in migration, then,
1198	 * just care Anon page here.
1199	 *
1200	 * Only folio_get_anon_vma() understands the subtleties of
1201	 * getting a hold on an anon_vma from outside one of its mms.
1202	 * But if we cannot get anon_vma, then we won't need it anyway,
1203	 * because that implies that the anon page is no longer mapped
1204	 * (and cannot be remapped so long as we hold the page lock).
1205	 */
1206	if (folio_test_anon(src) && !folio_test_ksm(src))
1207		anon_vma = folio_get_anon_vma(src);
1208
1209	/*
1210	 * Block others from accessing the new page when we get around to
1211	 * establishing additional references. We are usually the only one
1212	 * holding a reference to dst at this point. We used to have a BUG
1213	 * here if folio_trylock(dst) fails, but would like to allow for
1214	 * cases where there might be a race with the previous use of dst.
1215	 * This is much like races on refcount of oldpage: just don't BUG().
1216	 */
1217	if (unlikely(!folio_trylock(dst)))
1218		goto out;
1219	dst_locked = true;
1220
1221	if (unlikely(!is_lru)) {
1222		__migrate_folio_record(dst, old_page_state, anon_vma);
1223		return MIGRATEPAGE_UNMAP;
1224	}
1225
1226	/*
1227	 * Corner case handling:
1228	 * 1. When a new swap-cache page is read into, it is added to the LRU
1229	 * and treated as swapcache but it has no rmap yet.
1230	 * Calling try_to_unmap() against a src->mapping==NULL page will
1231	 * trigger a BUG.  So handle it here.
1232	 * 2. An orphaned page (see truncate_cleanup_page) might have
1233	 * fs-private metadata. The page can be picked up due to memory
1234	 * offlining.  Everywhere else except page reclaim, the page is
1235	 * invisible to the vm, so the page can not be migrated.  So try to
1236	 * free the metadata, so the page can be freed.
1237	 */
1238	if (!src->mapping) {
1239		if (folio_test_private(src)) {
1240			try_to_free_buffers(src);
1241			goto out;
1242		}
1243	} else if (folio_mapped(src)) {
1244		/* Establish migration ptes */
1245		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1246			       !folio_test_ksm(src) && !anon_vma, src);
1247		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1248		old_page_state |= PAGE_WAS_MAPPED;
1249	}
1250
1251	if (!folio_mapped(src)) {
1252		__migrate_folio_record(dst, old_page_state, anon_vma);
1253		return MIGRATEPAGE_UNMAP;
1254	}
1255
1256out:
1257	/*
1258	 * A folio that has not been unmapped will be restored to
1259	 * right list unless we want to retry.
1260	 */
1261	if (rc == -EAGAIN)
1262		ret = NULL;
1263
1264	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1265			       anon_vma, locked, ret);
1266	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1267
1268	return rc;
1269}
1270
1271/* Migrate the folio to the newly allocated folio in dst. */
1272static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1273			      struct folio *src, struct folio *dst,
1274			      enum migrate_mode mode, enum migrate_reason reason,
1275			      struct list_head *ret)
1276{
1277	int rc;
1278	int old_page_state = 0;
1279	struct anon_vma *anon_vma = NULL;
1280	bool is_lru = !__folio_test_movable(src);
1281	struct list_head *prev;
1282
1283	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1284	prev = dst->lru.prev;
1285	list_del(&dst->lru);
1286
1287	rc = move_to_new_folio(dst, src, mode);
1288	if (rc)
1289		goto out;
1290
1291	if (unlikely(!is_lru))
1292		goto out_unlock_both;
1293
1294	/*
1295	 * When successful, push dst to LRU immediately: so that if it
1296	 * turns out to be an mlocked page, remove_migration_ptes() will
1297	 * automatically build up the correct dst->mlock_count for it.
1298	 *
1299	 * We would like to do something similar for the old page, when
1300	 * unsuccessful, and other cases when a page has been temporarily
1301	 * isolated from the unevictable LRU: but this case is the easiest.
1302	 */
1303	folio_add_lru(dst);
1304	if (old_page_state & PAGE_WAS_MLOCKED)
1305		lru_add_drain();
 
 
1306
1307	if (old_page_state & PAGE_WAS_MAPPED)
1308		remove_migration_ptes(src, dst, false);
 
1309
1310out_unlock_both:
1311	folio_unlock(dst);
1312	set_page_owner_migrate_reason(&dst->page, reason);
 
 
 
 
 
1313	/*
1314	 * If migration is successful, decrease refcount of dst,
1315	 * which will not free the page because new page owner increased
1316	 * refcounter.
1317	 */
1318	folio_put(dst);
 
 
 
 
1319
1320	/*
1321	 * A folio that has been migrated has all references removed
1322	 * and will be freed.
1323	 */
1324	list_del(&src->lru);
1325	/* Drop an anon_vma reference if we took one */
1326	if (anon_vma)
1327		put_anon_vma(anon_vma);
1328	folio_unlock(src);
1329	migrate_folio_done(src, reason);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1330
1331	return rc;
1332out:
 
 
 
 
 
 
 
 
 
1333	/*
1334	 * A folio that has not been migrated will be restored to
1335	 * right list unless we want to retry.
 
1336	 */
1337	if (rc == -EAGAIN) {
1338		list_add(&dst->lru, prev);
1339		__migrate_folio_record(dst, old_page_state, anon_vma);
1340		return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1341	}
1342
1343	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1344			       anon_vma, true, ret);
1345	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1346
1347	return rc;
1348}
1349
1350/*
1351 * Counterpart of unmap_and_move_page() for hugepage migration.
1352 *
1353 * This function doesn't wait the completion of hugepage I/O
1354 * because there is no race between I/O and migration for hugepage.
1355 * Note that currently hugepage I/O occurs only in direct I/O
1356 * where no lock is held and PG_writeback is irrelevant,
1357 * and writeback status of all subpages are counted in the reference
1358 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1359 * under direct I/O, the reference of the head page is 512 and a bit more.)
1360 * This means that when we try to migrate hugepage whose subpages are
1361 * doing direct I/O, some references remain after try_to_unmap() and
1362 * hugepage migration fails without data corruption.
1363 *
1364 * There is also no race when direct I/O is issued on the page under migration,
1365 * because then pte is replaced with migration swap entry and direct I/O code
1366 * will wait in the page fault for migration to complete.
1367 */
1368static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1369		free_folio_t put_new_folio, unsigned long private,
1370		struct folio *src, int force, enum migrate_mode mode,
1371		int reason, struct list_head *ret)
 
1372{
1373	struct folio *dst;
1374	int rc = -EAGAIN;
1375	int page_was_mapped = 0;
 
1376	struct anon_vma *anon_vma = NULL;
1377	struct address_space *mapping = NULL;
1378
 
 
 
 
 
 
 
 
 
 
1379	if (folio_ref_count(src) == 1) {
1380		/* page was freed from under us. So we are done. */
1381		folio_putback_active_hugetlb(src);
1382		return MIGRATEPAGE_SUCCESS;
1383	}
1384
1385	dst = get_new_folio(src, private);
1386	if (!dst)
1387		return -ENOMEM;
 
1388
1389	if (!folio_trylock(src)) {
1390		if (!force)
1391			goto out;
1392		switch (mode) {
1393		case MIGRATE_SYNC:
1394		case MIGRATE_SYNC_NO_COPY:
1395			break;
1396		default:
1397			goto out;
1398		}
1399		folio_lock(src);
1400	}
1401
1402	/*
1403	 * Check for pages which are in the process of being freed.  Without
1404	 * folio_mapping() set, hugetlbfs specific move page routine will not
1405	 * be called and we could leak usage counts for subpools.
1406	 */
1407	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1408		rc = -EBUSY;
1409		goto out_unlock;
1410	}
1411
1412	if (folio_test_anon(src))
1413		anon_vma = folio_get_anon_vma(src);
1414
1415	if (unlikely(!folio_trylock(dst)))
1416		goto put_anon;
1417
1418	if (folio_mapped(src)) {
1419		enum ttu_flags ttu = 0;
1420
1421		if (!folio_test_anon(src)) {
1422			/*
1423			 * In shared mappings, try_to_unmap could potentially
1424			 * call huge_pmd_unshare.  Because of this, take
1425			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1426			 * to let lower levels know we have taken the lock.
1427			 */
1428			mapping = hugetlb_page_mapping_lock_write(&src->page);
1429			if (unlikely(!mapping))
1430				goto unlock_put_anon;
1431
1432			ttu = TTU_RMAP_LOCKED;
1433		}
1434
1435		try_to_migrate(src, ttu);
1436		page_was_mapped = 1;
1437
1438		if (ttu & TTU_RMAP_LOCKED)
1439			i_mmap_unlock_write(mapping);
1440	}
1441
1442	if (!folio_mapped(src))
1443		rc = move_to_new_folio(dst, src, mode);
1444
1445	if (page_was_mapped)
1446		remove_migration_ptes(src,
1447			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1448
1449unlock_put_anon:
1450	folio_unlock(dst);
1451
1452put_anon:
1453	if (anon_vma)
1454		put_anon_vma(anon_vma);
1455
1456	if (rc == MIGRATEPAGE_SUCCESS) {
1457		move_hugetlb_state(src, dst, reason);
1458		put_new_folio = NULL;
1459	}
1460
1461out_unlock:
1462	folio_unlock(src);
1463out:
1464	if (rc == MIGRATEPAGE_SUCCESS)
1465		folio_putback_active_hugetlb(src);
1466	else if (rc != -EAGAIN)
1467		list_move_tail(&src->lru, ret);
1468
1469	/*
1470	 * If migration was not successful and there's a freeing callback, use
1471	 * it.  Otherwise, put_page() will drop the reference grabbed during
1472	 * isolation.
1473	 */
1474	if (put_new_folio)
1475		put_new_folio(dst, private);
1476	else
1477		folio_putback_active_hugetlb(dst);
1478
1479	return rc;
1480}
1481
1482static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1483{
1484	int rc;
1485
1486	folio_lock(folio);
1487	rc = split_folio_to_list(folio, split_folios);
1488	folio_unlock(folio);
1489	if (!rc)
1490		list_move_tail(&folio->lru, split_folios);
1491
1492	return rc;
1493}
1494
1495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1496#define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1497#else
1498#define NR_MAX_BATCHED_MIGRATION	512
1499#endif
1500#define NR_MAX_MIGRATE_PAGES_RETRY	10
1501#define NR_MAX_MIGRATE_ASYNC_RETRY	3
1502#define NR_MAX_MIGRATE_SYNC_RETRY					\
1503	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1504
1505struct migrate_pages_stats {
1506	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1507				   units of base pages */
1508	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1509				   units of base pages.  Untried folios aren't counted */
1510	int nr_thp_succeeded;	/* THP migrated successfully */
1511	int nr_thp_failed;	/* THP failed to be migrated */
1512	int nr_thp_split;	/* THP split before migrating */
1513	int nr_split;	/* Large folio (include THP) split before migrating */
1514};
1515
1516/*
1517 * Returns the number of hugetlb folios that were not migrated, or an error code
1518 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1519 * any more because the list has become empty or no retryable hugetlb folios
1520 * exist any more. It is caller's responsibility to call putback_movable_pages()
1521 * only if ret != 0.
1522 */
1523static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1524			    free_folio_t put_new_folio, unsigned long private,
1525			    enum migrate_mode mode, int reason,
1526			    struct migrate_pages_stats *stats,
1527			    struct list_head *ret_folios)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1528{
1529	int retry = 1;
 
 
1530	int nr_failed = 0;
 
1531	int nr_retry_pages = 0;
 
 
 
 
 
1532	int pass = 0;
 
 
1533	struct folio *folio, *folio2;
1534	int rc, nr_pages;
 
 
 
 
 
 
1535
1536	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
 
1537		retry = 0;
 
 
1538		nr_retry_pages = 0;
1539
1540		list_for_each_entry_safe(folio, folio2, from, lru) {
1541			if (!folio_test_hugetlb(folio))
1542				continue;
1543
 
 
 
 
1544			nr_pages = folio_nr_pages(folio);
1545
1546			cond_resched();
1547
1548			/*
1549			 * Migratability of hugepages depends on architectures and
1550			 * their size.  This check is necessary because some callers
1551			 * of hugepage migration like soft offline and memory
1552			 * hotremove don't walk through page tables or check whether
1553			 * the hugepage is pmd-based or not before kicking migration.
1554			 */
1555			if (!hugepage_migration_supported(folio_hstate(folio))) {
1556				nr_failed++;
1557				stats->nr_failed_pages += nr_pages;
1558				list_move_tail(&folio->lru, ret_folios);
1559				continue;
1560			}
1561
1562			rc = unmap_and_move_huge_page(get_new_folio,
1563						      put_new_folio, private,
1564						      folio, pass > 2, mode,
1565						      reason, ret_folios);
1566			/*
1567			 * The rules are:
1568			 *	Success: hugetlb folio will be put back
 
1569			 *	-EAGAIN: stay on the from list
1570			 *	-ENOMEM: stay on the from list
1571			 *	Other errno: put on ret_folios list
 
 
1572			 */
1573			switch(rc) {
1574			case -ENOMEM:
1575				/*
1576				 * When memory is low, don't bother to try to migrate
1577				 * other folios, just exit.
1578				 */
1579				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1580				return -ENOMEM;
1581			case -EAGAIN:
1582				retry++;
1583				nr_retry_pages += nr_pages;
1584				break;
1585			case MIGRATEPAGE_SUCCESS:
1586				stats->nr_succeeded += nr_pages;
1587				break;
1588			default:
1589				/*
1590				 * Permanent failure (-EBUSY, etc.):
1591				 * unlike -EAGAIN case, the failed folio is
1592				 * removed from migration folio list and not
1593				 * retried in the next outer loop.
1594				 */
1595				nr_failed++;
1596				stats->nr_failed_pages += nr_pages;
1597				break;
1598			}
1599		}
1600	}
1601	/*
1602	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1603	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1604	 * folios as failed.
1605	 */
1606	nr_failed += retry;
1607	stats->nr_failed_pages += nr_retry_pages;
1608
1609	return nr_failed;
1610}
1611
1612/*
1613 * migrate_pages_batch() first unmaps folios in the from list as many as
1614 * possible, then move the unmapped folios.
1615 *
1616 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1617 * lock or bit when we have locked more than one folio.  Which may cause
1618 * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1619 * length of the from list must be <= 1.
1620 */
1621static int migrate_pages_batch(struct list_head *from,
1622		new_folio_t get_new_folio, free_folio_t put_new_folio,
1623		unsigned long private, enum migrate_mode mode, int reason,
1624		struct list_head *ret_folios, struct list_head *split_folios,
1625		struct migrate_pages_stats *stats, int nr_pass)
1626{
1627	int retry = 1;
1628	int thp_retry = 1;
1629	int nr_failed = 0;
1630	int nr_retry_pages = 0;
1631	int pass = 0;
1632	bool is_thp = false;
1633	bool is_large = false;
1634	struct folio *folio, *folio2, *dst = NULL, *dst2;
1635	int rc, rc_saved = 0, nr_pages;
1636	LIST_HEAD(unmap_folios);
1637	LIST_HEAD(dst_folios);
1638	bool nosplit = (reason == MR_NUMA_MISPLACED);
1639
1640	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1641			!list_empty(from) && !list_is_singular(from));
1642
1643	for (pass = 0; pass < nr_pass && retry; pass++) {
1644		retry = 0;
1645		thp_retry = 0;
1646		nr_retry_pages = 0;
1647
1648		list_for_each_entry_safe(folio, folio2, from, lru) {
1649			is_large = folio_test_large(folio);
1650			is_thp = is_large && folio_test_pmd_mappable(folio);
1651			nr_pages = folio_nr_pages(folio);
1652
1653			cond_resched();
1654
1655			/*
1656			 * Large folio migration might be unsupported or
1657			 * the allocation might be failed so we should retry
1658			 * on the same folio with the large folio split
1659			 * to normal folios.
1660			 *
1661			 * Split folios are put in split_folios, and
1662			 * we will migrate them after the rest of the
1663			 * list is processed.
1664			 */
1665			if (!thp_migration_supported() && is_thp) {
1666				nr_failed++;
1667				stats->nr_thp_failed++;
1668				if (!try_split_folio(folio, split_folios)) {
1669					stats->nr_thp_split++;
1670					stats->nr_split++;
1671					continue;
 
 
 
 
 
1672				}
1673				stats->nr_failed_pages += nr_pages;
1674				list_move_tail(&folio->lru, ret_folios);
1675				continue;
1676			}
1677
1678			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1679					private, folio, &dst, mode, reason,
1680					ret_folios);
1681			/*
1682			 * The rules are:
1683			 *	Success: folio will be freed
1684			 *	Unmap: folio will be put on unmap_folios list,
1685			 *	       dst folio put on dst_folios list
1686			 *	-EAGAIN: stay on the from list
1687			 *	-ENOMEM: stay on the from list
1688			 *	Other errno: put on ret_folios list
1689			 */
1690			switch(rc) {
1691			case -ENOMEM:
1692				/*
1693				 * When memory is low, don't bother to try to migrate
1694				 * other folios, move unmapped folios, then exit.
1695				 */
1696				nr_failed++;
1697				stats->nr_thp_failed += is_thp;
1698				/* Large folio NUMA faulting doesn't split to retry. */
1699				if (is_large && !nosplit) {
1700					int ret = try_split_folio(folio, split_folios);
1701
1702					if (!ret) {
1703						stats->nr_thp_split += is_thp;
1704						stats->nr_split++;
1705						break;
1706					} else if (reason == MR_LONGTERM_PIN &&
1707						   ret == -EAGAIN) {
1708						/*
1709						 * Try again to split large folio to
1710						 * mitigate the failure of longterm pinning.
1711						 */
1712						retry++;
1713						thp_retry += is_thp;
1714						nr_retry_pages += nr_pages;
1715						/* Undo duplicated failure counting. */
1716						nr_failed--;
1717						stats->nr_thp_failed -= is_thp;
1718						break;
1719					}
 
 
1720				}
1721
1722				stats->nr_failed_pages += nr_pages + nr_retry_pages;
 
 
 
 
 
 
 
1723				/* nr_failed isn't updated for not used */
1724				stats->nr_thp_failed += thp_retry;
1725				rc_saved = rc;
1726				if (list_empty(&unmap_folios))
1727					goto out;
1728				else
1729					goto move;
1730			case -EAGAIN:
1731				retry++;
1732				thp_retry += is_thp;
 
 
 
 
1733				nr_retry_pages += nr_pages;
1734				break;
1735			case MIGRATEPAGE_SUCCESS:
1736				stats->nr_succeeded += nr_pages;
1737				stats->nr_thp_succeeded += is_thp;
1738				break;
1739			case MIGRATEPAGE_UNMAP:
1740				list_move_tail(&folio->lru, &unmap_folios);
1741				list_add_tail(&dst->lru, &dst_folios);
1742				break;
1743			default:
1744				/*
1745				 * Permanent failure (-EBUSY, etc.):
1746				 * unlike -EAGAIN case, the failed folio is
1747				 * removed from migration folio list and not
1748				 * retried in the next outer loop.
1749				 */
1750				nr_failed++;
1751				stats->nr_thp_failed += is_thp;
1752				stats->nr_failed_pages += nr_pages;
1753				break;
1754			}
1755		}
1756	}
1757	nr_failed += retry;
1758	stats->nr_thp_failed += thp_retry;
1759	stats->nr_failed_pages += nr_retry_pages;
1760move:
1761	/* Flush TLBs for all unmapped folios */
1762	try_to_unmap_flush();
1763
1764	retry = 1;
1765	for (pass = 0; pass < nr_pass && retry; pass++) {
1766		retry = 0;
1767		thp_retry = 0;
1768		nr_retry_pages = 0;
1769
1770		dst = list_first_entry(&dst_folios, struct folio, lru);
1771		dst2 = list_next_entry(dst, lru);
1772		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1773			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1774			nr_pages = folio_nr_pages(folio);
1775
1776			cond_resched();
1777
1778			rc = migrate_folio_move(put_new_folio, private,
1779						folio, dst, mode,
1780						reason, ret_folios);
1781			/*
1782			 * The rules are:
1783			 *	Success: folio will be freed
1784			 *	-EAGAIN: stay on the unmap_folios list
1785			 *	Other errno: put on ret_folios list
1786			 */
1787			switch(rc) {
1788			case -EAGAIN:
1789				retry++;
1790				thp_retry += is_thp;
1791				nr_retry_pages += nr_pages;
1792				break;
1793			case MIGRATEPAGE_SUCCESS:
1794				stats->nr_succeeded += nr_pages;
1795				stats->nr_thp_succeeded += is_thp;
1796				break;
1797			default:
1798				nr_failed++;
1799				stats->nr_thp_failed += is_thp;
1800				stats->nr_failed_pages += nr_pages;
1801				break;
1802			}
1803			dst = dst2;
1804			dst2 = list_next_entry(dst, lru);
1805		}
1806	}
1807	nr_failed += retry;
1808	stats->nr_thp_failed += thp_retry;
1809	stats->nr_failed_pages += nr_retry_pages;
1810
1811	rc = rc_saved ? : nr_failed;
1812out:
1813	/* Cleanup remaining folios */
1814	dst = list_first_entry(&dst_folios, struct folio, lru);
1815	dst2 = list_next_entry(dst, lru);
1816	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1817		int old_page_state = 0;
1818		struct anon_vma *anon_vma = NULL;
1819
1820		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1821		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1822				       anon_vma, true, ret_folios);
1823		list_del(&dst->lru);
1824		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1825		dst = dst2;
1826		dst2 = list_next_entry(dst, lru);
1827	}
1828
1829	return rc;
1830}
1831
1832static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1833		free_folio_t put_new_folio, unsigned long private,
1834		enum migrate_mode mode, int reason,
1835		struct list_head *ret_folios, struct list_head *split_folios,
1836		struct migrate_pages_stats *stats)
1837{
1838	int rc, nr_failed = 0;
1839	LIST_HEAD(folios);
1840	struct migrate_pages_stats astats;
1841
1842	memset(&astats, 0, sizeof(astats));
1843	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1844	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1845				 reason, &folios, split_folios, &astats,
1846				 NR_MAX_MIGRATE_ASYNC_RETRY);
1847	stats->nr_succeeded += astats.nr_succeeded;
1848	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1849	stats->nr_thp_split += astats.nr_thp_split;
1850	stats->nr_split += astats.nr_split;
1851	if (rc < 0) {
1852		stats->nr_failed_pages += astats.nr_failed_pages;
1853		stats->nr_thp_failed += astats.nr_thp_failed;
1854		list_splice_tail(&folios, ret_folios);
1855		return rc;
1856	}
1857	stats->nr_thp_failed += astats.nr_thp_split;
1858	/*
1859	 * Do not count rc, as pages will be retried below.
1860	 * Count nr_split only, since it includes nr_thp_split.
1861	 */
1862	nr_failed += astats.nr_split;
1863	/*
1864	 * Fall back to migrate all failed folios one by one synchronously. All
1865	 * failed folios except split THPs will be retried, so their failure
1866	 * isn't counted
1867	 */
1868	list_splice_tail_init(&folios, from);
1869	while (!list_empty(from)) {
1870		list_move(from->next, &folios);
1871		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1872					 private, mode, reason, ret_folios,
1873					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1874		list_splice_tail_init(&folios, ret_folios);
1875		if (rc < 0)
1876			return rc;
1877		nr_failed += rc;
1878	}
1879
1880	return nr_failed;
1881}
1882
1883/*
1884 * migrate_pages - migrate the folios specified in a list, to the free folios
1885 *		   supplied as the target for the page migration
1886 *
1887 * @from:		The list of folios to be migrated.
1888 * @get_new_folio:	The function used to allocate free folios to be used
1889 *			as the target of the folio migration.
1890 * @put_new_folio:	The function used to free target folios if migration
1891 *			fails, or NULL if no special handling is necessary.
1892 * @private:		Private data to be passed on to get_new_folio()
1893 * @mode:		The migration mode that specifies the constraints for
1894 *			folio migration, if any.
1895 * @reason:		The reason for folio migration.
1896 * @ret_succeeded:	Set to the number of folios migrated successfully if
1897 *			the caller passes a non-NULL pointer.
1898 *
1899 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1900 * are movable any more because the list has become empty or no retryable folios
1901 * exist any more. It is caller's responsibility to call putback_movable_pages()
1902 * only if ret != 0.
1903 *
1904 * Returns the number of {normal folio, large folio, hugetlb} that were not
1905 * migrated, or an error code. The number of large folio splits will be
1906 * considered as the number of non-migrated large folio, no matter how many
1907 * split folios of the large folio are migrated successfully.
1908 */
1909int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1910		free_folio_t put_new_folio, unsigned long private,
1911		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1912{
1913	int rc, rc_gather;
1914	int nr_pages;
1915	struct folio *folio, *folio2;
1916	LIST_HEAD(folios);
1917	LIST_HEAD(ret_folios);
1918	LIST_HEAD(split_folios);
1919	struct migrate_pages_stats stats;
1920
1921	trace_mm_migrate_pages_start(mode, reason);
1922
1923	memset(&stats, 0, sizeof(stats));
1924
1925	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1926				     mode, reason, &stats, &ret_folios);
1927	if (rc_gather < 0)
1928		goto out;
1929
1930again:
1931	nr_pages = 0;
1932	list_for_each_entry_safe(folio, folio2, from, lru) {
1933		/* Retried hugetlb folios will be kept in list  */
1934		if (folio_test_hugetlb(folio)) {
1935			list_move_tail(&folio->lru, &ret_folios);
1936			continue;
1937		}
1938
1939		nr_pages += folio_nr_pages(folio);
1940		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1941			break;
1942	}
1943	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1944		list_cut_before(&folios, from, &folio2->lru);
1945	else
1946		list_splice_init(from, &folios);
1947	if (mode == MIGRATE_ASYNC)
1948		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1949				private, mode, reason, &ret_folios,
1950				&split_folios, &stats,
1951				NR_MAX_MIGRATE_PAGES_RETRY);
1952	else
1953		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1954				private, mode, reason, &ret_folios,
1955				&split_folios, &stats);
1956	list_splice_tail_init(&folios, &ret_folios);
1957	if (rc < 0) {
1958		rc_gather = rc;
1959		list_splice_tail(&split_folios, &ret_folios);
1960		goto out;
1961	}
1962	if (!list_empty(&split_folios)) {
1963		/*
1964		 * Failure isn't counted since all split folios of a large folio
1965		 * is counted as 1 failure already.  And, we only try to migrate
1966		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1967		 */
1968		migrate_pages_batch(&split_folios, get_new_folio,
1969				put_new_folio, private, MIGRATE_ASYNC, reason,
1970				&ret_folios, NULL, &stats, 1);
1971		list_splice_tail_init(&split_folios, &ret_folios);
1972	}
1973	rc_gather += rc;
1974	if (!list_empty(from))
1975		goto again;
1976out:
1977	/*
1978	 * Put the permanent failure folio back to migration list, they
1979	 * will be put back to the right list by the caller.
1980	 */
1981	list_splice(&ret_folios, from);
1982
1983	/*
1984	 * Return 0 in case all split folios of fail-to-migrate large folios
1985	 * are migrated successfully.
1986	 */
1987	if (list_empty(from))
1988		rc_gather = 0;
1989
1990	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1991	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1992	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1993	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1994	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1995	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1996			       stats.nr_thp_succeeded, stats.nr_thp_failed,
1997			       stats.nr_thp_split, stats.nr_split, mode,
1998			       reason);
1999
2000	if (ret_succeeded)
2001		*ret_succeeded = stats.nr_succeeded;
2002
2003	return rc_gather;
2004}
2005
2006struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2007{
 
2008	struct migration_target_control *mtc;
2009	gfp_t gfp_mask;
2010	unsigned int order = 0;
 
2011	int nid;
2012	int zidx;
2013
2014	mtc = (struct migration_target_control *)private;
2015	gfp_mask = mtc->gfp_mask;
2016	nid = mtc->nid;
2017	if (nid == NUMA_NO_NODE)
2018		nid = folio_nid(src);
2019
2020	if (folio_test_hugetlb(src)) {
2021		struct hstate *h = folio_hstate(src);
2022
2023		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2024		return alloc_hugetlb_folio_nodemask(h, nid,
2025						mtc->nmask, gfp_mask);
2026	}
2027
2028	if (folio_test_large(src)) {
2029		/*
2030		 * clear __GFP_RECLAIM to make the migration callback
2031		 * consistent with regular THP allocations.
2032		 */
2033		gfp_mask &= ~__GFP_RECLAIM;
2034		gfp_mask |= GFP_TRANSHUGE;
2035		order = folio_order(src);
2036	}
2037	zidx = zone_idx(folio_zone(src));
2038	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2039		gfp_mask |= __GFP_HIGHMEM;
2040
2041	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
 
 
2042}
2043
2044#ifdef CONFIG_NUMA
2045
2046static int store_status(int __user *status, int start, int value, int nr)
2047{
2048	while (nr-- > 0) {
2049		if (put_user(value, status + start))
2050			return -EFAULT;
2051		start++;
2052	}
2053
2054	return 0;
2055}
2056
2057static int do_move_pages_to_node(struct list_head *pagelist, int node)
 
2058{
2059	int err;
2060	struct migration_target_control mtc = {
2061		.nid = node,
2062		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2063	};
2064
2065	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2066		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2067	if (err)
2068		putback_movable_pages(pagelist);
2069	return err;
2070}
2071
2072/*
2073 * Resolves the given address to a struct page, isolates it from the LRU and
2074 * puts it to the given pagelist.
2075 * Returns:
2076 *     errno - if the page cannot be found/isolated
2077 *     0 - when it doesn't have to be migrated because it is already on the
2078 *         target node
2079 *     1 - when it has been queued
2080 */
2081static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2082		int node, struct list_head *pagelist, bool migrate_all)
2083{
2084	struct vm_area_struct *vma;
2085	unsigned long addr;
2086	struct page *page;
2087	struct folio *folio;
2088	int err;
2089
2090	mmap_read_lock(mm);
2091	addr = (unsigned long)untagged_addr_remote(mm, p);
2092
2093	err = -EFAULT;
2094	vma = vma_lookup(mm, addr);
2095	if (!vma || !vma_migratable(vma))
2096		goto out;
2097
2098	/* FOLL_DUMP to ignore special (like zero) pages */
2099	page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2100
2101	err = PTR_ERR(page);
2102	if (IS_ERR(page))
2103		goto out;
2104
2105	err = -ENOENT;
2106	if (!page)
2107		goto out;
2108
2109	folio = page_folio(page);
2110	if (folio_is_zone_device(folio))
2111		goto out_putfolio;
2112
2113	err = 0;
2114	if (folio_nid(folio) == node)
2115		goto out_putfolio;
2116
2117	err = -EACCES;
2118	if (page_mapcount(page) > 1 && !migrate_all)
2119		goto out_putfolio;
2120
2121	err = -EBUSY;
2122	if (folio_test_hugetlb(folio)) {
2123		if (isolate_hugetlb(folio, pagelist))
2124			err = 1;
 
 
2125	} else {
2126		if (!folio_isolate_lru(folio))
2127			goto out_putfolio;
 
 
 
 
2128
2129		err = 1;
2130		list_add_tail(&folio->lru, pagelist);
2131		node_stat_mod_folio(folio,
2132			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2133			folio_nr_pages(folio));
2134	}
2135out_putfolio:
2136	/*
2137	 * Either remove the duplicate refcount from folio_isolate_lru()
2138	 * or drop the folio ref if it was not isolated.
 
2139	 */
2140	folio_put(folio);
2141out:
2142	mmap_read_unlock(mm);
2143	return err;
2144}
2145
2146static int move_pages_and_store_status(int node,
2147		struct list_head *pagelist, int __user *status,
2148		int start, int i, unsigned long nr_pages)
2149{
2150	int err;
2151
2152	if (list_empty(pagelist))
2153		return 0;
2154
2155	err = do_move_pages_to_node(pagelist, node);
2156	if (err) {
2157		/*
2158		 * Positive err means the number of failed
2159		 * pages to migrate.  Since we are going to
2160		 * abort and return the number of non-migrated
2161		 * pages, so need to include the rest of the
2162		 * nr_pages that have not been attempted as
2163		 * well.
2164		 */
2165		if (err > 0)
2166			err += nr_pages - i;
2167		return err;
2168	}
2169	return store_status(status, start, node, i - start);
2170}
2171
2172/*
2173 * Migrate an array of page address onto an array of nodes and fill
2174 * the corresponding array of status.
2175 */
2176static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2177			 unsigned long nr_pages,
2178			 const void __user * __user *pages,
2179			 const int __user *nodes,
2180			 int __user *status, int flags)
2181{
2182	compat_uptr_t __user *compat_pages = (void __user *)pages;
2183	int current_node = NUMA_NO_NODE;
2184	LIST_HEAD(pagelist);
2185	int start, i;
2186	int err = 0, err1;
2187
2188	lru_cache_disable();
2189
2190	for (i = start = 0; i < nr_pages; i++) {
2191		const void __user *p;
 
2192		int node;
2193
2194		err = -EFAULT;
2195		if (in_compat_syscall()) {
2196			compat_uptr_t cp;
2197
2198			if (get_user(cp, compat_pages + i))
2199				goto out_flush;
2200
2201			p = compat_ptr(cp);
2202		} else {
2203			if (get_user(p, pages + i))
2204				goto out_flush;
2205		}
2206		if (get_user(node, nodes + i))
2207			goto out_flush;
 
2208
2209		err = -ENODEV;
2210		if (node < 0 || node >= MAX_NUMNODES)
2211			goto out_flush;
2212		if (!node_state(node, N_MEMORY))
2213			goto out_flush;
2214
2215		err = -EACCES;
2216		if (!node_isset(node, task_nodes))
2217			goto out_flush;
2218
2219		if (current_node == NUMA_NO_NODE) {
2220			current_node = node;
2221			start = i;
2222		} else if (node != current_node) {
2223			err = move_pages_and_store_status(current_node,
2224					&pagelist, status, start, i, nr_pages);
2225			if (err)
2226				goto out;
2227			start = i;
2228			current_node = node;
2229		}
2230
2231		/*
2232		 * Errors in the page lookup or isolation are not fatal and we simply
2233		 * report them via status
2234		 */
2235		err = add_page_for_migration(mm, p, current_node, &pagelist,
2236					     flags & MPOL_MF_MOVE_ALL);
2237
2238		if (err > 0) {
2239			/* The page is successfully queued for migration */
2240			continue;
2241		}
2242
2243		/*
2244		 * The move_pages() man page does not have an -EEXIST choice, so
2245		 * use -EFAULT instead.
2246		 */
2247		if (err == -EEXIST)
2248			err = -EFAULT;
2249
2250		/*
2251		 * If the page is already on the target node (!err), store the
2252		 * node, otherwise, store the err.
2253		 */
2254		err = store_status(status, i, err ? : current_node, 1);
2255		if (err)
2256			goto out_flush;
2257
2258		err = move_pages_and_store_status(current_node, &pagelist,
2259				status, start, i, nr_pages);
2260		if (err) {
2261			/* We have accounted for page i */
2262			if (err > 0)
2263				err--;
2264			goto out;
2265		}
2266		current_node = NUMA_NO_NODE;
2267	}
2268out_flush:
2269	/* Make sure we do not overwrite the existing error */
2270	err1 = move_pages_and_store_status(current_node, &pagelist,
2271				status, start, i, nr_pages);
2272	if (err >= 0)
2273		err = err1;
2274out:
2275	lru_cache_enable();
2276	return err;
2277}
2278
2279/*
2280 * Determine the nodes of an array of pages and store it in an array of status.
2281 */
2282static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2283				const void __user **pages, int *status)
2284{
2285	unsigned long i;
2286
2287	mmap_read_lock(mm);
2288
2289	for (i = 0; i < nr_pages; i++) {
2290		unsigned long addr = (unsigned long)(*pages);
2291		struct vm_area_struct *vma;
2292		struct page *page;
2293		int err = -EFAULT;
2294
2295		vma = vma_lookup(mm, addr);
2296		if (!vma)
2297			goto set_status;
2298
2299		/* FOLL_DUMP to ignore special (like zero) pages */
2300		page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2301
2302		err = PTR_ERR(page);
2303		if (IS_ERR(page))
2304			goto set_status;
2305
2306		err = -ENOENT;
2307		if (!page)
2308			goto set_status;
2309
2310		if (!is_zone_device_page(page))
2311			err = page_to_nid(page);
2312
2313		put_page(page);
2314set_status:
2315		*status = err;
2316
2317		pages++;
2318		status++;
2319	}
2320
2321	mmap_read_unlock(mm);
2322}
2323
2324static int get_compat_pages_array(const void __user *chunk_pages[],
2325				  const void __user * __user *pages,
2326				  unsigned long chunk_nr)
2327{
2328	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2329	compat_uptr_t p;
2330	int i;
2331
2332	for (i = 0; i < chunk_nr; i++) {
2333		if (get_user(p, pages32 + i))
2334			return -EFAULT;
2335		chunk_pages[i] = compat_ptr(p);
2336	}
2337
2338	return 0;
2339}
2340
2341/*
2342 * Determine the nodes of a user array of pages and store it in
2343 * a user array of status.
2344 */
2345static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2346			 const void __user * __user *pages,
2347			 int __user *status)
2348{
2349#define DO_PAGES_STAT_CHUNK_NR 16UL
2350	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2351	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2352
2353	while (nr_pages) {
2354		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2355
2356		if (in_compat_syscall()) {
2357			if (get_compat_pages_array(chunk_pages, pages,
2358						   chunk_nr))
2359				break;
2360		} else {
2361			if (copy_from_user(chunk_pages, pages,
2362				      chunk_nr * sizeof(*chunk_pages)))
2363				break;
2364		}
2365
2366		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2367
2368		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2369			break;
2370
2371		pages += chunk_nr;
2372		status += chunk_nr;
2373		nr_pages -= chunk_nr;
2374	}
2375	return nr_pages ? -EFAULT : 0;
2376}
2377
2378static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2379{
2380	struct task_struct *task;
2381	struct mm_struct *mm;
2382
2383	/*
2384	 * There is no need to check if current process has the right to modify
2385	 * the specified process when they are same.
2386	 */
2387	if (!pid) {
2388		mmget(current->mm);
2389		*mem_nodes = cpuset_mems_allowed(current);
2390		return current->mm;
2391	}
2392
2393	/* Find the mm_struct */
2394	rcu_read_lock();
2395	task = find_task_by_vpid(pid);
2396	if (!task) {
2397		rcu_read_unlock();
2398		return ERR_PTR(-ESRCH);
2399	}
2400	get_task_struct(task);
2401
2402	/*
2403	 * Check if this process has the right to modify the specified
2404	 * process. Use the regular "ptrace_may_access()" checks.
2405	 */
2406	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2407		rcu_read_unlock();
2408		mm = ERR_PTR(-EPERM);
2409		goto out;
2410	}
2411	rcu_read_unlock();
2412
2413	mm = ERR_PTR(security_task_movememory(task));
2414	if (IS_ERR(mm))
2415		goto out;
2416	*mem_nodes = cpuset_mems_allowed(task);
2417	mm = get_task_mm(task);
2418out:
2419	put_task_struct(task);
2420	if (!mm)
2421		mm = ERR_PTR(-EINVAL);
2422	return mm;
2423}
2424
2425/*
2426 * Move a list of pages in the address space of the currently executing
2427 * process.
2428 */
2429static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2430			     const void __user * __user *pages,
2431			     const int __user *nodes,
2432			     int __user *status, int flags)
2433{
2434	struct mm_struct *mm;
2435	int err;
2436	nodemask_t task_nodes;
2437
2438	/* Check flags */
2439	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2440		return -EINVAL;
2441
2442	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2443		return -EPERM;
2444
2445	mm = find_mm_struct(pid, &task_nodes);
2446	if (IS_ERR(mm))
2447		return PTR_ERR(mm);
2448
2449	if (nodes)
2450		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2451				    nodes, status, flags);
2452	else
2453		err = do_pages_stat(mm, nr_pages, pages, status);
2454
2455	mmput(mm);
2456	return err;
2457}
2458
2459SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2460		const void __user * __user *, pages,
2461		const int __user *, nodes,
2462		int __user *, status, int, flags)
2463{
2464	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2465}
2466
2467#ifdef CONFIG_NUMA_BALANCING
2468/*
2469 * Returns true if this is a safe migration target node for misplaced NUMA
2470 * pages. Currently it only checks the watermarks which is crude.
2471 */
2472static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2473				   unsigned long nr_migrate_pages)
2474{
2475	int z;
2476
2477	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2478		struct zone *zone = pgdat->node_zones + z;
2479
2480		if (!managed_zone(zone))
2481			continue;
2482
2483		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2484		if (!zone_watermark_ok(zone, 0,
2485				       high_wmark_pages(zone) +
2486				       nr_migrate_pages,
2487				       ZONE_MOVABLE, 0))
2488			continue;
2489		return true;
2490	}
2491	return false;
2492}
2493
2494static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2495					   unsigned long data)
2496{
2497	int nid = (int) data;
2498	int order = folio_order(src);
2499	gfp_t gfp = __GFP_THISNODE;
 
2500
2501	if (order > 0)
2502		gfp |= GFP_TRANSHUGE_LIGHT;
2503	else {
2504		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2505			__GFP_NOWARN;
2506		gfp &= ~__GFP_RECLAIM;
2507	}
2508	return __folio_alloc_node(gfp, order, nid);
 
 
2509}
2510
2511static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2512{
2513	int nr_pages = folio_nr_pages(folio);
 
 
 
 
 
 
 
2514
2515	/* Avoid migrating to a node that is nearly full */
2516	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2517		int z;
2518
2519		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2520			return 0;
2521		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2522			if (managed_zone(pgdat->node_zones + z))
2523				break;
2524		}
2525
2526		/*
2527		 * If there are no managed zones, it should not proceed
2528		 * further.
2529		 */
2530		if (z < 0)
2531			return 0;
2532
2533		wakeup_kswapd(pgdat->node_zones + z, 0,
2534			      folio_order(folio), ZONE_MOVABLE);
2535		return 0;
2536	}
2537
2538	if (!folio_isolate_lru(folio))
2539		return 0;
2540
2541	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2542			    nr_pages);
2543
2544	/*
2545	 * Isolating the folio has taken another reference, so the
2546	 * caller's reference can be safely dropped without the folio
2547	 * disappearing underneath us during migration.
2548	 */
2549	folio_put(folio);
2550	return 1;
2551}
2552
2553/*
2554 * Attempt to migrate a misplaced folio to the specified destination
2555 * node. Caller is expected to have an elevated reference count on
2556 * the folio that will be dropped by this function before returning.
2557 */
2558int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2559			    int node)
2560{
2561	pg_data_t *pgdat = NODE_DATA(node);
2562	int isolated;
2563	int nr_remaining;
2564	unsigned int nr_succeeded;
2565	LIST_HEAD(migratepages);
2566	int nr_pages = folio_nr_pages(folio);
2567
2568	/*
2569	 * Don't migrate file folios that are mapped in multiple processes
2570	 * with execute permissions as they are probably shared libraries.
2571	 * To check if the folio is shared, ideally we want to make sure
2572	 * every page is mapped to the same process. Doing that is very
2573	 * expensive, so check the estimated mapcount of the folio instead.
2574	 */
2575	if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
2576	    (vma->vm_flags & VM_EXEC))
2577		goto out;
2578
2579	/*
2580	 * Also do not migrate dirty folios as not all filesystems can move
2581	 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2582	 */
2583	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2584		goto out;
2585
2586	isolated = numamigrate_isolate_folio(pgdat, folio);
2587	if (!isolated)
2588		goto out;
2589
2590	list_add(&folio->lru, &migratepages);
2591	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2592				     NULL, node, MIGRATE_ASYNC,
2593				     MR_NUMA_MISPLACED, &nr_succeeded);
2594	if (nr_remaining) {
2595		if (!list_empty(&migratepages)) {
2596			list_del(&folio->lru);
2597			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2598					folio_is_file_lru(folio), -nr_pages);
2599			folio_putback_lru(folio);
2600		}
2601		isolated = 0;
2602	}
2603	if (nr_succeeded) {
2604		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2605		if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2606			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2607					    nr_succeeded);
2608	}
2609	BUG_ON(!list_empty(&migratepages));
2610	return isolated;
2611
2612out:
2613	folio_put(folio);
2614	return 0;
2615}
2616#endif /* CONFIG_NUMA_BALANCING */
2617#endif /* CONFIG_NUMA */