Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Memory Migration functionality - linux/mm/migrate.c
   4 *
   5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   6 *
   7 * Page migration was first developed in the context of the memory hotplug
   8 * project. The main authors of the migration code are:
   9 *
  10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
  11 * Hirokazu Takahashi <taka@valinux.co.jp>
  12 * Dave Hansen <haveblue@us.ibm.com>
  13 * Christoph Lameter
  14 */
  15
  16#include <linux/migrate.h>
  17#include <linux/export.h>
  18#include <linux/swap.h>
  19#include <linux/swapops.h>
  20#include <linux/pagemap.h>
  21#include <linux/buffer_head.h>
  22#include <linux/mm_inline.h>
  23#include <linux/nsproxy.h>
  24#include <linux/pagevec.h>
  25#include <linux/ksm.h>
  26#include <linux/rmap.h>
  27#include <linux/topology.h>
  28#include <linux/cpu.h>
  29#include <linux/cpuset.h>
  30#include <linux/writeback.h>
  31#include <linux/mempolicy.h>
  32#include <linux/vmalloc.h>
  33#include <linux/security.h>
  34#include <linux/backing-dev.h>
  35#include <linux/compaction.h>
  36#include <linux/syscalls.h>
  37#include <linux/compat.h>
  38#include <linux/hugetlb.h>
  39#include <linux/hugetlb_cgroup.h>
  40#include <linux/gfp.h>
  41#include <linux/pagewalk.h>
  42#include <linux/pfn_t.h>
  43#include <linux/memremap.h>
  44#include <linux/userfaultfd_k.h>
  45#include <linux/balloon_compaction.h>
  46#include <linux/mmu_notifier.h>
  47#include <linux/page_idle.h>
  48#include <linux/page_owner.h>
  49#include <linux/sched/mm.h>
  50#include <linux/ptrace.h>
  51#include <linux/oom.h>
 
 
 
  52
  53#include <asm/tlbflush.h>
  54
  55#define CREATE_TRACE_POINTS
  56#include <trace/events/migrate.h>
  57
  58#include "internal.h"
  59
  60int isolate_movable_page(struct page *page, isolate_mode_t mode)
  61{
  62	struct address_space *mapping;
 
  63
  64	/*
  65	 * Avoid burning cycles with pages that are yet under __free_pages(),
  66	 * or just got freed under us.
  67	 *
  68	 * In case we 'win' a race for a movable page being freed under us and
  69	 * raise its refcount preventing __free_pages() from doing its job
  70	 * the put_page() at the end of this block will take care of
  71	 * release this page, thus avoiding a nasty leakage.
  72	 */
  73	if (unlikely(!get_page_unless_zero(page)))
  74		goto out;
  75
  76	/*
  77	 * Check PageMovable before holding a PG_lock because page's owner
  78	 * assumes anybody doesn't touch PG_lock of newly allocated page
  79	 * so unconditionally grabbing the lock ruins page's owner side.
  80	 */
  81	if (unlikely(!__PageMovable(page)))
  82		goto out_putpage;
 
 
 
 
 
 
 
 
 
  83	/*
  84	 * As movable pages are not isolated from LRU lists, concurrent
  85	 * compaction threads can race against page migration functions
  86	 * as well as race against the releasing a page.
  87	 *
  88	 * In order to avoid having an already isolated movable page
  89	 * being (wrongly) re-isolated while it is under migration,
  90	 * or to avoid attempting to isolate pages being released,
  91	 * lets be sure we have the page lock
  92	 * before proceeding with the movable page isolation steps.
  93	 */
  94	if (unlikely(!trylock_page(page)))
  95		goto out_putpage;
  96
  97	if (!PageMovable(page) || PageIsolated(page))
  98		goto out_no_isolated;
  99
 100	mapping = page_mapping(page);
 101	VM_BUG_ON_PAGE(!mapping, page);
 102
 103	if (!mapping->a_ops->isolate_page(page, mode))
 104		goto out_no_isolated;
 105
 106	/* Driver shouldn't use PG_isolated bit of page->flags */
 107	WARN_ON_ONCE(PageIsolated(page));
 108	__SetPageIsolated(page);
 109	unlock_page(page);
 110
 111	return 0;
 112
 113out_no_isolated:
 114	unlock_page(page);
 115out_putpage:
 116	put_page(page);
 117out:
 118	return -EBUSY;
 119}
 120
 121static void putback_movable_page(struct page *page)
 122{
 123	struct address_space *mapping;
 124
 125	mapping = page_mapping(page);
 126	mapping->a_ops->putback_page(page);
 127	__ClearPageIsolated(page);
 128}
 129
 130/*
 131 * Put previously isolated pages back onto the appropriate lists
 132 * from where they were once taken off for compaction/migration.
 133 *
 134 * This function shall be used whenever the isolated pageset has been
 135 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
 136 * and isolate_huge_page().
 137 */
 138void putback_movable_pages(struct list_head *l)
 139{
 140	struct page *page;
 141	struct page *page2;
 142
 143	list_for_each_entry_safe(page, page2, l, lru) {
 144		if (unlikely(PageHuge(page))) {
 145			putback_active_hugepage(page);
 146			continue;
 147		}
 148		list_del(&page->lru);
 149		/*
 150		 * We isolated non-lru movable page so here we can use
 151		 * __PageMovable because LRU page's mapping cannot have
 152		 * PAGE_MAPPING_MOVABLE.
 153		 */
 154		if (unlikely(__PageMovable(page))) {
 155			VM_BUG_ON_PAGE(!PageIsolated(page), page);
 156			lock_page(page);
 157			if (PageMovable(page))
 158				putback_movable_page(page);
 159			else
 160				__ClearPageIsolated(page);
 161			unlock_page(page);
 162			put_page(page);
 163		} else {
 164			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
 165					page_is_file_lru(page), -thp_nr_pages(page));
 166			putback_lru_page(page);
 167		}
 168	}
 169}
 170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171/*
 172 * Restore a potential migration pte to a working pte entry
 173 */
 174static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 175				 unsigned long addr, void *old)
 176{
 177	struct page_vma_mapped_walk pvmw = {
 178		.page = old,
 179		.vma = vma,
 180		.address = addr,
 181		.flags = PVMW_SYNC | PVMW_MIGRATION,
 182	};
 183	struct page *new;
 184	pte_t pte;
 185	swp_entry_t entry;
 186
 187	VM_BUG_ON_PAGE(PageTail(page), page);
 188	while (page_vma_mapped_walk(&pvmw)) {
 189		if (PageKsm(page))
 190			new = page;
 191		else
 192			new = page - pvmw.page->index +
 193				linear_page_index(vma, pvmw.address);
 
 
 
 
 
 
 194
 195#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 196		/* PMD-mapped THP migration entry */
 197		if (!pvmw.pte) {
 198			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
 
 199			remove_migration_pmd(&pvmw, new);
 200			continue;
 201		}
 202#endif
 
 
 
 203
 204		get_page(new);
 205		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
 206		if (pte_swp_soft_dirty(*pvmw.pte))
 
 
 
 
 
 
 
 207			pte = pte_mksoft_dirty(pte);
 
 
 208
 209		/*
 210		 * Recheck VMA as permissions can change since migration started
 211		 */
 212		entry = pte_to_swp_entry(*pvmw.pte);
 213		if (is_writable_migration_entry(entry))
 214			pte = maybe_mkwrite(pte, vma);
 215		else if (pte_swp_uffd_wp(*pvmw.pte))
 216			pte = pte_mkuffd_wp(pte);
 217
 
 
 
 218		if (unlikely(is_device_private_page(new))) {
 219			if (pte_write(pte))
 220				entry = make_writable_device_private_entry(
 221							page_to_pfn(new));
 222			else
 223				entry = make_readable_device_private_entry(
 224							page_to_pfn(new));
 225			pte = swp_entry_to_pte(entry);
 226			if (pte_swp_soft_dirty(*pvmw.pte))
 227				pte = pte_swp_mksoft_dirty(pte);
 228			if (pte_swp_uffd_wp(*pvmw.pte))
 229				pte = pte_swp_mkuffd_wp(pte);
 230		}
 231
 232#ifdef CONFIG_HUGETLB_PAGE
 233		if (PageHuge(new)) {
 234			unsigned int shift = huge_page_shift(hstate_vma(vma));
 
 
 235
 236			pte = pte_mkhuge(pte);
 237			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
 238			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 239			if (PageAnon(new))
 240				hugepage_add_anon_rmap(new, vma, pvmw.address);
 241			else
 242				page_dup_rmap(new, true);
 
 
 243		} else
 244#endif
 245		{
 246			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 247
 248			if (PageAnon(new))
 249				page_add_anon_rmap(new, vma, pvmw.address, false);
 250			else
 251				page_add_file_rmap(new, false);
 
 252		}
 253		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
 254			mlock_vma_page(new);
 255
 256		if (PageTransHuge(page) && PageMlocked(page))
 257			clear_page_mlock(page);
 258
 259		/* No need to invalidate - it was non-present before */
 260		update_mmu_cache(vma, pvmw.address, pvmw.pte);
 261	}
 262
 263	return true;
 264}
 265
 266/*
 267 * Get rid of all migration entries and replace them by
 268 * references to the indicated page.
 269 */
 270void remove_migration_ptes(struct page *old, struct page *new, bool locked)
 271{
 
 
 
 
 
 272	struct rmap_walk_control rwc = {
 273		.rmap_one = remove_migration_pte,
 274		.arg = old,
 275	};
 276
 277	if (locked)
 278		rmap_walk_locked(new, &rwc);
 
 
 279	else
 280		rmap_walk(new, &rwc);
 281}
 282
 283/*
 284 * Something used the pte of a page under migration. We need to
 285 * get to the page and wait until migration is finished.
 286 * When we return from this function the fault will be retried.
 287 */
 288void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 289				spinlock_t *ptl)
 290{
 
 
 291	pte_t pte;
 292	swp_entry_t entry;
 293	struct page *page;
 294
 295	spin_lock(ptl);
 296	pte = *ptep;
 
 
 
 
 
 297	if (!is_swap_pte(pte))
 298		goto out;
 299
 300	entry = pte_to_swp_entry(pte);
 301	if (!is_migration_entry(entry))
 302		goto out;
 303
 304	page = pfn_swap_entry_to_page(entry);
 305	page = compound_head(page);
 306
 307	/*
 308	 * Once page cache replacement of page migration started, page_count
 309	 * is zero; but we must not call put_and_wait_on_page_locked() without
 310	 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
 311	 */
 312	if (!get_page_unless_zero(page))
 313		goto out;
 314	pte_unmap_unlock(ptep, ptl);
 315	put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
 316	return;
 317out:
 318	pte_unmap_unlock(ptep, ptl);
 319}
 320
 321void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 322				unsigned long address)
 
 
 
 
 
 
 323{
 324	spinlock_t *ptl = pte_lockptr(mm, pmd);
 325	pte_t *ptep = pte_offset_map(pmd, address);
 326	__migration_entry_wait(mm, ptep, ptl);
 327}
 328
 329void migration_entry_wait_huge(struct vm_area_struct *vma,
 330		struct mm_struct *mm, pte_t *pte)
 331{
 332	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
 333	__migration_entry_wait(mm, pte, ptl);
 
 
 
 
 
 
 
 
 
 
 
 
 334}
 
 335
 336#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 337void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
 338{
 339	spinlock_t *ptl;
 340	struct page *page;
 341
 342	ptl = pmd_lock(mm, pmd);
 343	if (!is_pmd_migration_entry(*pmd))
 344		goto unlock;
 345	page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
 346	if (!get_page_unless_zero(page))
 347		goto unlock;
 348	spin_unlock(ptl);
 349	put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
 350	return;
 351unlock:
 352	spin_unlock(ptl);
 353}
 354#endif
 355
 356static int expected_page_refs(struct address_space *mapping, struct page *page)
 
 357{
 358	int expected_count = 1;
 
 
 359
 360	/*
 361	 * Device private pages have an extra refcount as they are
 362	 * ZONE_DEVICE pages.
 363	 */
 364	expected_count += is_device_private_page(page);
 365	if (mapping)
 366		expected_count += thp_nr_pages(page) + page_has_private(page);
 367
 368	return expected_count;
 369}
 370
 371/*
 372 * Replace the page in the mapping.
 373 *
 374 * The number of remaining references must be:
 375 * 1 for anonymous pages without a mapping
 376 * 2 for pages with a mapping
 377 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
 378 */
 379int migrate_page_move_mapping(struct address_space *mapping,
 380		struct page *newpage, struct page *page, int extra_count)
 381{
 382	XA_STATE(xas, &mapping->i_pages, page_index(page));
 383	struct zone *oldzone, *newzone;
 384	int dirty;
 385	int expected_count = expected_page_refs(mapping, page) + extra_count;
 386	int nr = thp_nr_pages(page);
 387
 388	if (!mapping) {
 389		/* Anonymous page without mapping */
 390		if (page_count(page) != expected_count)
 391			return -EAGAIN;
 
 
 
 
 
 392
 393		/* No turning back from here */
 394		newpage->index = page->index;
 395		newpage->mapping = page->mapping;
 396		if (PageSwapBacked(page))
 397			__SetPageSwapBacked(newpage);
 
 
 398
 399		return MIGRATEPAGE_SUCCESS;
 400	}
 401
 402	oldzone = page_zone(page);
 403	newzone = page_zone(newpage);
 404
 405	xas_lock_irq(&xas);
 406	if (page_count(page) != expected_count || xas_load(&xas) != page) {
 407		xas_unlock_irq(&xas);
 408		return -EAGAIN;
 409	}
 410
 411	if (!page_ref_freeze(page, expected_count)) {
 412		xas_unlock_irq(&xas);
 413		return -EAGAIN;
 414	}
 415
 416	/*
 417	 * Now we know that no one else is looking at the page:
 418	 * no turning back from here.
 419	 */
 420	newpage->index = page->index;
 421	newpage->mapping = page->mapping;
 422	page_ref_add(newpage, nr); /* add cache reference */
 423	if (PageSwapBacked(page)) {
 424		__SetPageSwapBacked(newpage);
 425		if (PageSwapCache(page)) {
 426			SetPageSwapCache(newpage);
 427			set_page_private(newpage, page_private(page));
 
 
 428		}
 
 429	} else {
 430		VM_BUG_ON_PAGE(PageSwapCache(page), page);
 
 431	}
 432
 433	/* Move dirty while page refs frozen and newpage not yet exposed */
 434	dirty = PageDirty(page);
 435	if (dirty) {
 436		ClearPageDirty(page);
 437		SetPageDirty(newpage);
 438	}
 439
 440	xas_store(&xas, newpage);
 441	if (PageTransHuge(page)) {
 442		int i;
 443
 444		for (i = 1; i < nr; i++) {
 445			xas_next(&xas);
 446			xas_store(&xas, newpage);
 447		}
 448	}
 449
 450	/*
 451	 * Drop cache reference from old page by unfreezing
 452	 * to one less reference.
 453	 * We know this isn't the last reference.
 454	 */
 455	page_ref_unfreeze(page, expected_count - nr);
 456
 457	xas_unlock(&xas);
 458	/* Leave irq disabled to prevent preemption while updating stats */
 459
 460	/*
 461	 * If moved to a different zone then also account
 462	 * the page for that zone. Other VM counters will be
 463	 * taken care of when we establish references to the
 464	 * new page and drop references to the old page.
 465	 *
 466	 * Note that anonymous pages are accounted for
 467	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
 468	 * are mapped to swap space.
 469	 */
 470	if (newzone != oldzone) {
 471		struct lruvec *old_lruvec, *new_lruvec;
 472		struct mem_cgroup *memcg;
 473
 474		memcg = page_memcg(page);
 475		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
 476		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 477
 478		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
 479		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
 480		if (PageSwapBacked(page) && !PageSwapCache(page)) {
 481			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
 482			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 
 
 
 
 
 483		}
 484#ifdef CONFIG_SWAP
 485		if (PageSwapCache(page)) {
 486			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
 487			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
 488		}
 489#endif
 490		if (dirty && mapping_can_writeback(mapping)) {
 491			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
 492			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
 493			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
 494			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
 495		}
 496	}
 497	local_irq_enable();
 498
 499	return MIGRATEPAGE_SUCCESS;
 500}
 501EXPORT_SYMBOL(migrate_page_move_mapping);
 
 
 
 
 
 
 
 
 
 
 
 502
 503/*
 504 * The expected number of remaining references is the same as that
 505 * of migrate_page_move_mapping().
 506 */
 507int migrate_huge_page_move_mapping(struct address_space *mapping,
 508				   struct page *newpage, struct page *page)
 509{
 510	XA_STATE(xas, &mapping->i_pages, page_index(page));
 511	int expected_count;
 512
 513	xas_lock_irq(&xas);
 514	expected_count = 2 + page_has_private(page);
 515	if (page_count(page) != expected_count || xas_load(&xas) != page) {
 516		xas_unlock_irq(&xas);
 517		return -EAGAIN;
 518	}
 519
 520	if (!page_ref_freeze(page, expected_count)) {
 
 
 
 
 
 521		xas_unlock_irq(&xas);
 522		return -EAGAIN;
 523	}
 524
 525	newpage->index = page->index;
 526	newpage->mapping = page->mapping;
 527
 528	get_page(newpage);
 529
 530	xas_store(&xas, newpage);
 531
 532	page_ref_unfreeze(page, expected_count - 1);
 533
 534	xas_unlock_irq(&xas);
 535
 536	return MIGRATEPAGE_SUCCESS;
 537}
 538
 539/*
 540 * Copy the page to its new location
 541 */
 542void migrate_page_states(struct page *newpage, struct page *page)
 543{
 544	int cpupid;
 545
 546	if (PageError(page))
 547		SetPageError(newpage);
 548	if (PageReferenced(page))
 549		SetPageReferenced(newpage);
 550	if (PageUptodate(page))
 551		SetPageUptodate(newpage);
 552	if (TestClearPageActive(page)) {
 553		VM_BUG_ON_PAGE(PageUnevictable(page), page);
 554		SetPageActive(newpage);
 555	} else if (TestClearPageUnevictable(page))
 556		SetPageUnevictable(newpage);
 557	if (PageWorkingset(page))
 558		SetPageWorkingset(newpage);
 559	if (PageChecked(page))
 560		SetPageChecked(newpage);
 561	if (PageMappedToDisk(page))
 562		SetPageMappedToDisk(newpage);
 563
 564	/* Move dirty on pages not done by migrate_page_move_mapping() */
 565	if (PageDirty(page))
 566		SetPageDirty(newpage);
 567
 568	if (page_is_young(page))
 569		set_page_young(newpage);
 570	if (page_is_idle(page))
 571		set_page_idle(newpage);
 
 
 
 
 572
 
 573	/*
 574	 * Copy NUMA information to the new page, to prevent over-eager
 575	 * future migrations of this same page.
 576	 */
 577	cpupid = page_cpupid_xchg_last(page, -1);
 578	page_cpupid_xchg_last(newpage, cpupid);
 
 
 
 
 
 
 
 579
 580	ksm_migrate_page(newpage, page);
 
 
 
 
 
 581	/*
 582	 * Please do not reorder this without considering how mm/ksm.c's
 583	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
 
 584	 */
 585	if (PageSwapCache(page))
 586		ClearPageSwapCache(page);
 587	ClearPagePrivate(page);
 588
 589	/* page->private contains hugetlb specific flags */
 590	if (!PageHuge(page))
 591		set_page_private(page, 0);
 592
 593	/*
 594	 * If any waiters have accumulated on the new page then
 595	 * wake them up.
 596	 */
 597	if (PageWriteback(newpage))
 598		end_page_writeback(newpage);
 599
 600	/*
 601	 * PG_readahead shares the same bit with PG_reclaim.  The above
 602	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
 603	 * bit after that.
 604	 */
 605	if (PageReadahead(page))
 606		SetPageReadahead(newpage);
 607
 608	copy_page_owner(page, newpage);
 609
 610	if (!PageHuge(page))
 611		mem_cgroup_migrate(page, newpage);
 612}
 613EXPORT_SYMBOL(migrate_page_states);
 614
 615void migrate_page_copy(struct page *newpage, struct page *page)
 616{
 617	if (PageHuge(page) || PageTransHuge(page))
 618		copy_huge_page(newpage, page);
 619	else
 620		copy_highpage(newpage, page);
 621
 622	migrate_page_states(newpage, page);
 623}
 624EXPORT_SYMBOL(migrate_page_copy);
 625
 626/************************************************************
 627 *                    Migration functions
 628 ***********************************************************/
 629
 630/*
 631 * Common logic to directly migrate a single LRU page suitable for
 632 * pages that do not use PagePrivate/PagePrivate2.
 633 *
 634 * Pages are locked upon entry and exit.
 635 */
 636int migrate_page(struct address_space *mapping,
 637		struct page *newpage, struct page *page,
 638		enum migrate_mode mode)
 639{
 640	int rc;
 641
 642	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
 
 
 643
 644	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
 
 
 645
 
 646	if (rc != MIGRATEPAGE_SUCCESS)
 647		return rc;
 648
 649	if (mode != MIGRATE_SYNC_NO_COPY)
 650		migrate_page_copy(newpage, page);
 651	else
 652		migrate_page_states(newpage, page);
 653	return MIGRATEPAGE_SUCCESS;
 654}
 655EXPORT_SYMBOL(migrate_page);
 656
 657#ifdef CONFIG_BLOCK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658/* Returns true if all buffers are successfully locked */
 659static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 660							enum migrate_mode mode)
 661{
 662	struct buffer_head *bh = head;
 
 663
 664	/* Simple case, sync compaction */
 665	if (mode != MIGRATE_ASYNC) {
 666		do {
 667			lock_buffer(bh);
 668			bh = bh->b_this_page;
 669
 670		} while (bh != head);
 671
 672		return true;
 673	}
 674
 675	/* async case, we cannot block on lock_buffer so use trylock_buffer */
 676	do {
 677		if (!trylock_buffer(bh)) {
 678			/*
 679			 * We failed to lock the buffer and cannot stall in
 680			 * async migration. Release the taken locks
 681			 */
 682			struct buffer_head *failed_bh = bh;
 683			bh = head;
 684			while (bh != failed_bh) {
 685				unlock_buffer(bh);
 686				bh = bh->b_this_page;
 687			}
 688			return false;
 689		}
 690
 691		bh = bh->b_this_page;
 692	} while (bh != head);
 
 693	return true;
 
 
 
 
 
 
 
 
 
 
 
 694}
 695
 696static int __buffer_migrate_page(struct address_space *mapping,
 697		struct page *newpage, struct page *page, enum migrate_mode mode,
 698		bool check_refs)
 699{
 700	struct buffer_head *bh, *head;
 701	int rc;
 702	int expected_count;
 703
 704	if (!page_has_buffers(page))
 705		return migrate_page(mapping, newpage, page, mode);
 
 706
 707	/* Check whether page does not have extra refs before we do more work */
 708	expected_count = expected_page_refs(mapping, page);
 709	if (page_count(page) != expected_count)
 710		return -EAGAIN;
 711
 712	head = page_buffers(page);
 713	if (!buffer_migrate_lock_buffers(head, mode))
 714		return -EAGAIN;
 715
 716	if (check_refs) {
 717		bool busy;
 718		bool invalidated = false;
 719
 720recheck_buffers:
 721		busy = false;
 722		spin_lock(&mapping->private_lock);
 723		bh = head;
 724		do {
 725			if (atomic_read(&bh->b_count)) {
 726				busy = true;
 727				break;
 728			}
 729			bh = bh->b_this_page;
 730		} while (bh != head);
 731		if (busy) {
 732			if (invalidated) {
 733				rc = -EAGAIN;
 734				goto unlock_buffers;
 735			}
 736			spin_unlock(&mapping->private_lock);
 737			invalidate_bh_lrus();
 738			invalidated = true;
 739			goto recheck_buffers;
 740		}
 741	}
 742
 743	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
 744	if (rc != MIGRATEPAGE_SUCCESS)
 745		goto unlock_buffers;
 746
 747	attach_page_private(newpage, detach_page_private(page));
 748
 749	bh = head;
 750	do {
 751		set_bh_page(bh, newpage, bh_offset(bh));
 752		bh = bh->b_this_page;
 753
 754	} while (bh != head);
 755
 756	if (mode != MIGRATE_SYNC_NO_COPY)
 757		migrate_page_copy(newpage, page);
 758	else
 759		migrate_page_states(newpage, page);
 760
 761	rc = MIGRATEPAGE_SUCCESS;
 762unlock_buffers:
 763	if (check_refs)
 764		spin_unlock(&mapping->private_lock);
 765	bh = head;
 766	do {
 767		unlock_buffer(bh);
 768		bh = bh->b_this_page;
 769
 770	} while (bh != head);
 771
 772	return rc;
 773}
 774
 775/*
 776 * Migration function for pages with buffers. This function can only be used
 777 * if the underlying filesystem guarantees that no other references to "page"
 778 * exist. For example attached buffer heads are accessed only under page lock.
 
 
 
 
 
 
 
 
 
 
 779 */
 780int buffer_migrate_page(struct address_space *mapping,
 781		struct page *newpage, struct page *page, enum migrate_mode mode)
 782{
 783	return __buffer_migrate_page(mapping, newpage, page, mode, false);
 784}
 785EXPORT_SYMBOL(buffer_migrate_page);
 786
 787/*
 788 * Same as above except that this variant is more careful and checks that there
 789 * are also no buffer head references. This function is the right one for
 790 * mappings where buffer heads are directly looked up and referenced (such as
 791 * block device mappings).
 
 
 
 
 
 
 
 
 792 */
 793int buffer_migrate_page_norefs(struct address_space *mapping,
 794		struct page *newpage, struct page *page, enum migrate_mode mode)
 795{
 796	return __buffer_migrate_page(mapping, newpage, page, mode, true);
 797}
 798#endif
 
 
 
 
 
 
 
 
 799
 800/*
 801 * Writeback a page to clean the dirty state
 802 */
 803static int writeout(struct address_space *mapping, struct page *page)
 804{
 805	struct writeback_control wbc = {
 806		.sync_mode = WB_SYNC_NONE,
 807		.nr_to_write = 1,
 808		.range_start = 0,
 809		.range_end = LLONG_MAX,
 810		.for_reclaim = 1
 811	};
 812	int rc;
 813
 814	if (!mapping->a_ops->writepage)
 815		/* No write method for the address space */
 816		return -EINVAL;
 817
 818	if (!clear_page_dirty_for_io(page))
 819		/* Someone else already triggered a write */
 820		return -EAGAIN;
 821
 822	/*
 823	 * A dirty page may imply that the underlying filesystem has
 824	 * the page on some queue. So the page must be clean for
 825	 * migration. Writeout may mean we loose the lock and the
 826	 * page state is no longer what we checked for earlier.
 827	 * At this point we know that the migration attempt cannot
 828	 * be successful.
 829	 */
 830	remove_migration_ptes(page, page, false);
 831
 832	rc = mapping->a_ops->writepage(page, &wbc);
 833
 834	if (rc != AOP_WRITEPAGE_ACTIVATE)
 835		/* unlocked. Relock */
 836		lock_page(page);
 837
 838	return (rc < 0) ? -EIO : -EAGAIN;
 839}
 840
 841/*
 842 * Default handling if a filesystem does not provide a migration function.
 843 */
 844static int fallback_migrate_page(struct address_space *mapping,
 845	struct page *newpage, struct page *page, enum migrate_mode mode)
 846{
 847	if (PageDirty(page)) {
 848		/* Only writeback pages in full synchronous migration */
 849		switch (mode) {
 850		case MIGRATE_SYNC:
 851		case MIGRATE_SYNC_NO_COPY:
 852			break;
 853		default:
 854			return -EBUSY;
 855		}
 856		return writeout(mapping, page);
 857	}
 858
 859	/*
 860	 * Buffers may be managed in a filesystem specific way.
 861	 * We must have no buffers or drop them.
 862	 */
 863	if (page_has_private(page) &&
 864	    !try_to_release_page(page, GFP_KERNEL))
 865		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 866
 867	return migrate_page(mapping, newpage, page, mode);
 868}
 869
 870/*
 871 * Move a page to a newly allocated page
 872 * The page is locked and all ptes have been successfully removed.
 873 *
 874 * The new page will have replaced the old page if this function
 875 * is successful.
 876 *
 877 * Return value:
 878 *   < 0 - error code
 879 *  MIGRATEPAGE_SUCCESS - success
 880 */
 881static int move_to_new_page(struct page *newpage, struct page *page,
 882				enum migrate_mode mode)
 883{
 884	struct address_space *mapping;
 885	int rc = -EAGAIN;
 886	bool is_lru = !__PageMovable(page);
 887
 888	VM_BUG_ON_PAGE(!PageLocked(page), page);
 889	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 890
 891	mapping = page_mapping(page);
 
 892
 893	if (likely(is_lru)) {
 
 
 894		if (!mapping)
 895			rc = migrate_page(mapping, newpage, page, mode);
 896		else if (mapping->a_ops->migratepage)
 
 
 897			/*
 898			 * Most pages have a mapping and most filesystems
 899			 * provide a migratepage callback. Anonymous pages
 900			 * are part of swap space which also has its own
 901			 * migratepage callback. This is the most common path
 902			 * for page migration.
 903			 */
 904			rc = mapping->a_ops->migratepage(mapping, newpage,
 905							page, mode);
 906		else
 907			rc = fallback_migrate_page(mapping, newpage,
 908							page, mode);
 909	} else {
 
 
 910		/*
 911		 * In case of non-lru page, it could be released after
 912		 * isolation step. In that case, we shouldn't try migration.
 913		 */
 914		VM_BUG_ON_PAGE(!PageIsolated(page), page);
 915		if (!PageMovable(page)) {
 916			rc = MIGRATEPAGE_SUCCESS;
 917			__ClearPageIsolated(page);
 918			goto out;
 919		}
 920
 921		rc = mapping->a_ops->migratepage(mapping, newpage,
 922						page, mode);
 923		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
 924			!PageIsolated(page));
 925	}
 926
 927	/*
 928	 * When successful, old pagecache page->mapping must be cleared before
 929	 * page is freed; but stats require that PageAnon be left as PageAnon.
 930	 */
 931	if (rc == MIGRATEPAGE_SUCCESS) {
 932		if (__PageMovable(page)) {
 933			VM_BUG_ON_PAGE(!PageIsolated(page), page);
 934
 935			/*
 936			 * We clear PG_movable under page_lock so any compactor
 937			 * cannot try to migrate this page.
 938			 */
 939			__ClearPageIsolated(page);
 940		}
 941
 942		/*
 943		 * Anonymous and movable page->mapping will be cleared by
 944		 * free_pages_prepare so don't reset it here for keeping
 945		 * the type to work PageAnon, for example.
 946		 */
 947		if (!PageMappingFlags(page))
 948			page->mapping = NULL;
 949
 950		if (likely(!is_zone_device_page(newpage)))
 951			flush_dcache_page(newpage);
 952
 
 
 953	}
 954out:
 955	return rc;
 956}
 957
 958static int __unmap_and_move(struct page *page, struct page *newpage,
 959				int force, enum migrate_mode mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 961	int rc = -EAGAIN;
 962	int page_was_mapped = 0;
 963	struct anon_vma *anon_vma = NULL;
 964	bool is_lru = !__PageMovable(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965
 966	if (!trylock_page(page)) {
 967		if (!force || mode == MIGRATE_ASYNC)
 968			goto out;
 969
 970		/*
 971		 * It's not safe for direct compaction to call lock_page.
 972		 * For example, during page readahead pages are added locked
 973		 * to the LRU. Later, when the IO completes the pages are
 974		 * marked uptodate and unlocked. However, the queueing
 975		 * could be merging multiple pages for one bio (e.g.
 976		 * mpage_readahead). If an allocation happens for the
 977		 * second or third page, the process can end up locking
 978		 * the same page twice and deadlocking. Rather than
 979		 * trying to be clever about what pages can be locked,
 980		 * avoid the use of lock_page for direct compaction
 981		 * altogether.
 982		 */
 983		if (current->flags & PF_MEMALLOC)
 984			goto out;
 985
 986		lock_page(page);
 
 
 
 
 
 
 
 
 987	}
 
 
 
 988
 989	if (PageWriteback(page)) {
 990		/*
 991		 * Only in the case of a full synchronous migration is it
 992		 * necessary to wait for PageWriteback. In the async case,
 993		 * the retry loop is too short and in the sync-light case,
 994		 * the overhead of stalling is too much
 995		 */
 996		switch (mode) {
 997		case MIGRATE_SYNC:
 998		case MIGRATE_SYNC_NO_COPY:
 999			break;
1000		default:
1001			rc = -EBUSY;
1002			goto out_unlock;
1003		}
1004		if (!force)
1005			goto out_unlock;
1006		wait_on_page_writeback(page);
1007	}
1008
1009	/*
1010	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1011	 * we cannot notice that anon_vma is freed while we migrates a page.
1012	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1013	 * of migration. File cache pages are no problem because of page_lock()
1014	 * File Caches may use write_page() or lock_page() in migration, then,
1015	 * just care Anon page here.
1016	 *
1017	 * Only page_get_anon_vma() understands the subtleties of
1018	 * getting a hold on an anon_vma from outside one of its mms.
1019	 * But if we cannot get anon_vma, then we won't need it anyway,
1020	 * because that implies that the anon page is no longer mapped
1021	 * (and cannot be remapped so long as we hold the page lock).
1022	 */
1023	if (PageAnon(page) && !PageKsm(page))
1024		anon_vma = page_get_anon_vma(page);
1025
1026	/*
1027	 * Block others from accessing the new page when we get around to
1028	 * establishing additional references. We are usually the only one
1029	 * holding a reference to newpage at this point. We used to have a BUG
1030	 * here if trylock_page(newpage) fails, but would like to allow for
1031	 * cases where there might be a race with the previous use of newpage.
1032	 * This is much like races on refcount of oldpage: just don't BUG().
1033	 */
1034	if (unlikely(!trylock_page(newpage)))
1035		goto out_unlock;
 
1036
1037	if (unlikely(!is_lru)) {
1038		rc = move_to_new_page(newpage, page, mode);
1039		goto out_unlock_both;
1040	}
1041
1042	/*
1043	 * Corner case handling:
1044	 * 1. When a new swap-cache page is read into, it is added to the LRU
1045	 * and treated as swapcache but it has no rmap yet.
1046	 * Calling try_to_unmap() against a page->mapping==NULL page will
1047	 * trigger a BUG.  So handle it here.
1048	 * 2. An orphaned page (see truncate_cleanup_page) might have
1049	 * fs-private metadata. The page can be picked up due to memory
1050	 * offlining.  Everywhere else except page reclaim, the page is
1051	 * invisible to the vm, so the page can not be migrated.  So try to
1052	 * free the metadata, so the page can be freed.
1053	 */
1054	if (!page->mapping) {
1055		VM_BUG_ON_PAGE(PageAnon(page), page);
1056		if (page_has_private(page)) {
1057			try_to_free_buffers(page);
1058			goto out_unlock_both;
1059		}
1060	} else if (page_mapped(page)) {
1061		/* Establish migration ptes */
1062		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1063				page);
1064		try_to_migrate(page, 0);
1065		page_was_mapped = 1;
1066	}
1067
1068	if (!page_mapped(page))
1069		rc = move_to_new_page(newpage, page, mode);
1070
1071	if (page_was_mapped)
1072		remove_migration_ptes(page,
1073			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1074
1075out_unlock_both:
1076	unlock_page(newpage);
1077out_unlock:
1078	/* Drop an anon_vma reference if we took one */
1079	if (anon_vma)
1080		put_anon_vma(anon_vma);
1081	unlock_page(page);
1082out:
1083	/*
1084	 * If migration is successful, decrease refcount of the newpage
1085	 * which will not free the page because new page owner increased
1086	 * refcounter. As well, if it is LRU page, add the page to LRU
1087	 * list in here. Use the old state of the isolated source page to
1088	 * determine if we migrated a LRU page. newpage was already unlocked
1089	 * and possibly modified by its owner - don't rely on the page
1090	 * state.
1091	 */
1092	if (rc == MIGRATEPAGE_SUCCESS) {
1093		if (unlikely(!is_lru))
1094			put_page(newpage);
1095		else
1096			putback_lru_page(newpage);
1097	}
1098
1099	return rc;
1100}
1101
1102/*
1103 * Obtain the lock on page, remove all ptes and migrate the page
1104 * to the newly allocated page in newpage.
1105 */
1106static int unmap_and_move(new_page_t get_new_page,
1107				   free_page_t put_new_page,
1108				   unsigned long private, struct page *page,
1109				   int force, enum migrate_mode mode,
1110				   enum migrate_reason reason,
1111				   struct list_head *ret)
1112{
1113	int rc = MIGRATEPAGE_SUCCESS;
1114	struct page *newpage = NULL;
 
 
 
1115
1116	if (!thp_migration_supported() && PageTransHuge(page))
1117		return -ENOSYS;
 
1118
1119	if (page_count(page) == 1) {
1120		/* page was freed from under us. So we are done. */
1121		ClearPageActive(page);
1122		ClearPageUnevictable(page);
1123		if (unlikely(__PageMovable(page))) {
1124			lock_page(page);
1125			if (!PageMovable(page))
1126				__ClearPageIsolated(page);
1127			unlock_page(page);
1128		}
1129		goto out;
1130	}
1131
1132	newpage = get_new_page(page, private);
1133	if (!newpage)
1134		return -ENOMEM;
1135
1136	rc = __unmap_and_move(page, newpage, force, mode);
1137	if (rc == MIGRATEPAGE_SUCCESS)
1138		set_page_owner_migrate_reason(newpage, reason);
 
 
 
 
 
 
 
 
 
1139
1140out:
1141	if (rc != -EAGAIN) {
1142		/*
1143		 * A page that has been migrated has all references
1144		 * removed and will be freed. A page that has not been
1145		 * migrated will have kept its references and be restored.
1146		 */
1147		list_del(&page->lru);
1148	}
1149
 
 
 
1150	/*
1151	 * If migration is successful, releases reference grabbed during
1152	 * isolation. Otherwise, restore the page to right list unless
1153	 * we want to retry.
1154	 */
1155	if (rc == MIGRATEPAGE_SUCCESS) {
1156		/*
1157		 * Compaction can migrate also non-LRU pages which are
1158		 * not accounted to NR_ISOLATED_*. They can be recognized
1159		 * as __PageMovable
1160		 */
1161		if (likely(!__PageMovable(page)))
1162			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1163					page_is_file_lru(page), -thp_nr_pages(page));
1164
1165		if (reason != MR_MEMORY_FAILURE)
1166			/*
1167			 * We release the page in page_handle_poison.
1168			 */
1169			put_page(page);
1170	} else {
1171		if (rc != -EAGAIN)
1172			list_add_tail(&page->lru, ret);
 
 
1173
1174		if (put_new_page)
1175			put_new_page(newpage, private);
1176		else
1177			put_page(newpage);
 
 
 
 
 
 
1178	}
1179
 
 
 
 
1180	return rc;
1181}
1182
1183/*
1184 * Counterpart of unmap_and_move_page() for hugepage migration.
1185 *
1186 * This function doesn't wait the completion of hugepage I/O
1187 * because there is no race between I/O and migration for hugepage.
1188 * Note that currently hugepage I/O occurs only in direct I/O
1189 * where no lock is held and PG_writeback is irrelevant,
1190 * and writeback status of all subpages are counted in the reference
1191 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1192 * under direct I/O, the reference of the head page is 512 and a bit more.)
1193 * This means that when we try to migrate hugepage whose subpages are
1194 * doing direct I/O, some references remain after try_to_unmap() and
1195 * hugepage migration fails without data corruption.
1196 *
1197 * There is also no race when direct I/O is issued on the page under migration,
1198 * because then pte is replaced with migration swap entry and direct I/O code
1199 * will wait in the page fault for migration to complete.
1200 */
1201static int unmap_and_move_huge_page(new_page_t get_new_page,
1202				free_page_t put_new_page, unsigned long private,
1203				struct page *hpage, int force,
1204				enum migrate_mode mode, int reason,
1205				struct list_head *ret)
1206{
 
1207	int rc = -EAGAIN;
1208	int page_was_mapped = 0;
1209	struct page *new_hpage;
1210	struct anon_vma *anon_vma = NULL;
1211	struct address_space *mapping = NULL;
1212
1213	/*
1214	 * Migratability of hugepages depends on architectures and their size.
1215	 * This check is necessary because some callers of hugepage migration
1216	 * like soft offline and memory hotremove don't walk through page
1217	 * tables or check whether the hugepage is pmd-based or not before
1218	 * kicking migration.
1219	 */
1220	if (!hugepage_migration_supported(page_hstate(hpage))) {
1221		list_move_tail(&hpage->lru, ret);
1222		return -ENOSYS;
1223	}
1224
1225	if (page_count(hpage) == 1) {
1226		/* page was freed from under us. So we are done. */
1227		putback_active_hugepage(hpage);
1228		return MIGRATEPAGE_SUCCESS;
1229	}
1230
1231	new_hpage = get_new_page(hpage, private);
1232	if (!new_hpage)
1233		return -ENOMEM;
1234
1235	if (!trylock_page(hpage)) {
1236		if (!force)
1237			goto out;
1238		switch (mode) {
1239		case MIGRATE_SYNC:
1240		case MIGRATE_SYNC_NO_COPY:
1241			break;
1242		default:
1243			goto out;
1244		}
1245		lock_page(hpage);
1246	}
1247
1248	/*
1249	 * Check for pages which are in the process of being freed.  Without
1250	 * page_mapping() set, hugetlbfs specific move page routine will not
1251	 * be called and we could leak usage counts for subpools.
1252	 */
1253	if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
1254		rc = -EBUSY;
1255		goto out_unlock;
1256	}
1257
1258	if (PageAnon(hpage))
1259		anon_vma = page_get_anon_vma(hpage);
1260
1261	if (unlikely(!trylock_page(new_hpage)))
1262		goto put_anon;
1263
1264	if (page_mapped(hpage)) {
1265		bool mapping_locked = false;
1266		enum ttu_flags ttu = 0;
1267
1268		if (!PageAnon(hpage)) {
1269			/*
1270			 * In shared mappings, try_to_unmap could potentially
1271			 * call huge_pmd_unshare.  Because of this, take
1272			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1273			 * to let lower levels know we have taken the lock.
1274			 */
1275			mapping = hugetlb_page_mapping_lock_write(hpage);
1276			if (unlikely(!mapping))
1277				goto unlock_put_anon;
1278
1279			mapping_locked = true;
1280			ttu |= TTU_RMAP_LOCKED;
1281		}
1282
1283		try_to_migrate(hpage, ttu);
1284		page_was_mapped = 1;
1285
1286		if (mapping_locked)
1287			i_mmap_unlock_write(mapping);
1288	}
1289
1290	if (!page_mapped(hpage))
1291		rc = move_to_new_page(new_hpage, hpage, mode);
1292
1293	if (page_was_mapped)
1294		remove_migration_ptes(hpage,
1295			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1296
1297unlock_put_anon:
1298	unlock_page(new_hpage);
1299
1300put_anon:
1301	if (anon_vma)
1302		put_anon_vma(anon_vma);
1303
1304	if (rc == MIGRATEPAGE_SUCCESS) {
1305		move_hugetlb_state(hpage, new_hpage, reason);
1306		put_new_page = NULL;
1307	}
1308
1309out_unlock:
1310	unlock_page(hpage);
1311out:
1312	if (rc == MIGRATEPAGE_SUCCESS)
1313		putback_active_hugepage(hpage);
1314	else if (rc != -EAGAIN)
1315		list_move_tail(&hpage->lru, ret);
1316
1317	/*
1318	 * If migration was not successful and there's a freeing callback, use
1319	 * it.  Otherwise, put_page() will drop the reference grabbed during
1320	 * isolation.
1321	 */
1322	if (put_new_page)
1323		put_new_page(new_hpage, private);
1324	else
1325		putback_active_hugepage(new_hpage);
1326
1327	return rc;
1328}
1329
1330static inline int try_split_thp(struct page *page, struct page **page2,
1331				struct list_head *from)
1332{
1333	int rc = 0;
1334
1335	lock_page(page);
1336	rc = split_huge_page_to_list(page, from);
1337	unlock_page(page);
 
 
 
 
 
1338	if (!rc)
1339		list_safe_reset_next(page, *page2, lru);
1340
1341	return rc;
1342}
1343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344/*
1345 * migrate_pages - migrate the pages specified in a list, to the free pages
1346 *		   supplied as the target for the page migration
1347 *
1348 * @from:		The list of pages to be migrated.
1349 * @get_new_page:	The function used to allocate free pages to be used
1350 *			as the target of the page migration.
1351 * @put_new_page:	The function used to free target pages if migration
1352 *			fails, or NULL if no special handling is necessary.
1353 * @private:		Private data to be passed on to get_new_page()
1354 * @mode:		The migration mode that specifies the constraints for
1355 *			page migration, if any.
1356 * @reason:		The reason for page migration.
1357 *
1358 * The function returns after 10 attempts or if no pages are movable any more
1359 * because the list has become empty or no retryable pages exist any more.
1360 * It is caller's responsibility to call putback_movable_pages() to return pages
1361 * to the LRU or free list only if ret != 0.
1362 *
1363 * Returns the number of pages that were not migrated, or an error code.
1364 */
1365int migrate_pages(struct list_head *from, new_page_t get_new_page,
1366		free_page_t put_new_page, unsigned long private,
1367		enum migrate_mode mode, int reason)
1368{
1369	int retry = 1;
1370	int thp_retry = 1;
1371	int nr_failed = 0;
1372	int nr_succeeded = 0;
1373	int nr_thp_succeeded = 0;
1374	int nr_thp_failed = 0;
1375	int nr_thp_split = 0;
1376	int pass = 0;
1377	bool is_thp = false;
1378	struct page *page;
1379	struct page *page2;
1380	int swapwrite = current->flags & PF_SWAPWRITE;
1381	int rc, nr_subpages;
1382	LIST_HEAD(ret_pages);
1383	bool nosplit = (reason == MR_NUMA_MISPLACED);
1384
1385	trace_mm_migrate_pages_start(mode, reason);
 
 
 
 
 
 
1386
1387	if (!swapwrite)
1388		current->flags |= PF_SWAPWRITE;
1389
1390	for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1391		retry = 0;
1392		thp_retry = 0;
1393
1394		list_for_each_entry_safe(page, page2, from, lru) {
1395retry:
1396			/*
1397			 * THP statistics is based on the source huge page.
1398			 * Capture required information that might get lost
1399			 * during migration.
 
 
1400			 */
1401			is_thp = PageTransHuge(page) && !PageHuge(page);
1402			nr_subpages = thp_nr_pages(page);
1403			cond_resched();
 
 
 
1404
1405			if (PageHuge(page))
1406				rc = unmap_and_move_huge_page(get_new_page,
1407						put_new_page, private, page,
1408						pass > 2, mode, reason,
1409						&ret_pages);
1410			else
1411				rc = unmap_and_move(get_new_page, put_new_page,
1412						private, page, pass > 2, mode,
1413						reason, &ret_pages);
1414			/*
1415			 * The rules are:
1416			 *	Success: non hugetlb page will be freed, hugetlb
1417			 *		 page will be put back
1418			 *	-EAGAIN: stay on the from list
1419			 *	-ENOMEM: stay on the from list
1420			 *	Other errno: put on ret_pages list then splice to
1421			 *		     from list
1422			 */
1423			switch(rc) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1424			/*
1425			 * THP migration might be unsupported or the
1426			 * allocation could've failed so we should
1427			 * retry on the same page with the THP split
1428			 * to base pages.
 
 
 
 
 
 
 
 
 
 
 
 
1429			 *
1430			 * Head page is retried immediately and tail
1431			 * pages are added to the tail of the list so
1432			 * we encounter them after the rest of the list
1433			 * is processed.
1434			 */
1435			case -ENOSYS:
1436				/* THP migration is unsupported */
1437				if (is_thp) {
1438					if (!try_split_thp(page, &page2, from)) {
1439						nr_thp_split++;
1440						goto retry;
1441					}
1442
1443					nr_thp_failed++;
1444					nr_failed += nr_subpages;
1445					break;
1446				}
 
1447
1448				/* Hugetlb migration is unsupported */
 
 
 
 
 
 
 
 
 
 
1449				nr_failed++;
1450				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1451			case -ENOMEM:
1452				/*
1453				 * When memory is low, don't bother to try to migrate
1454				 * other pages, just exit.
1455				 * THP NUMA faulting doesn't split THP to retry.
1456				 */
1457				if (is_thp && !nosplit) {
1458					if (!try_split_thp(page, &page2, from)) {
1459						nr_thp_split++;
1460						goto retry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1461					}
 
1462
1463					nr_thp_failed++;
1464					nr_failed += nr_subpages;
 
 
 
1465					goto out;
1466				}
1467				nr_failed++;
1468				goto out;
1469			case -EAGAIN:
1470				if (is_thp) {
1471					thp_retry++;
1472					break;
1473				}
1474				retry++;
 
 
1475				break;
1476			case MIGRATEPAGE_SUCCESS:
1477				if (is_thp) {
1478					nr_thp_succeeded++;
1479					nr_succeeded += nr_subpages;
1480					break;
1481				}
1482				nr_succeeded++;
1483				break;
1484			default:
1485				/*
1486				 * Permanent failure (-EBUSY, etc.):
1487				 * unlike -EAGAIN case, the failed page is
1488				 * removed from migration page list and not
1489				 * retried in the next outer loop.
1490				 */
1491				if (is_thp) {
1492					nr_thp_failed++;
1493					nr_failed += nr_subpages;
1494					break;
1495				}
1496				nr_failed++;
 
 
1497				break;
1498			}
1499		}
1500	}
1501	nr_failed += retry + thp_retry;
1502	nr_thp_failed += thp_retry;
1503	rc = nr_failed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1504out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1505	/*
1506	 * Put the permanent failure page back to migration list, they
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1507	 * will be put back to the right list by the caller.
1508	 */
1509	list_splice(&ret_pages, from);
 
 
 
 
 
 
 
1510
1511	count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1512	count_vm_events(PGMIGRATE_FAIL, nr_failed);
1513	count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1514	count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1515	count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1516	trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
1517			       nr_thp_failed, nr_thp_split, mode, reason);
 
 
1518
1519	if (!swapwrite)
1520		current->flags &= ~PF_SWAPWRITE;
1521
1522	return rc;
1523}
1524
1525struct page *alloc_migration_target(struct page *page, unsigned long private)
1526{
1527	struct migration_target_control *mtc;
1528	gfp_t gfp_mask;
1529	unsigned int order = 0;
1530	struct page *new_page = NULL;
1531	int nid;
1532	int zidx;
1533
1534	mtc = (struct migration_target_control *)private;
1535	gfp_mask = mtc->gfp_mask;
1536	nid = mtc->nid;
1537	if (nid == NUMA_NO_NODE)
1538		nid = page_to_nid(page);
1539
1540	if (PageHuge(page)) {
1541		struct hstate *h = page_hstate(compound_head(page));
1542
1543		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1544		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
 
 
1545	}
1546
1547	if (PageTransHuge(page)) {
1548		/*
1549		 * clear __GFP_RECLAIM to make the migration callback
1550		 * consistent with regular THP allocations.
1551		 */
1552		gfp_mask &= ~__GFP_RECLAIM;
1553		gfp_mask |= GFP_TRANSHUGE;
1554		order = HPAGE_PMD_ORDER;
1555	}
1556	zidx = zone_idx(page_zone(page));
1557	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1558		gfp_mask |= __GFP_HIGHMEM;
1559
1560	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
1561
1562	if (new_page && PageTransHuge(new_page))
1563		prep_transhuge_page(new_page);
1564
1565	return new_page;
1566}
1567
1568#ifdef CONFIG_NUMA
1569
1570static int store_status(int __user *status, int start, int value, int nr)
1571{
1572	while (nr-- > 0) {
1573		if (put_user(value, status + start))
1574			return -EFAULT;
1575		start++;
1576	}
1577
1578	return 0;
1579}
1580
1581static int do_move_pages_to_node(struct mm_struct *mm,
1582		struct list_head *pagelist, int node)
1583{
1584	int err;
1585	struct migration_target_control mtc = {
1586		.nid = node,
1587		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
 
1588	};
1589
1590	err = migrate_pages(pagelist, alloc_migration_target, NULL,
1591			(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1592	if (err)
1593		putback_movable_pages(pagelist);
1594	return err;
1595}
1596
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1597/*
1598 * Resolves the given address to a struct page, isolates it from the LRU and
1599 * puts it to the given pagelist.
1600 * Returns:
1601 *     errno - if the page cannot be found/isolated
1602 *     0 - when it doesn't have to be migrated because it is already on the
1603 *         target node
1604 *     1 - when it has been queued
1605 */
1606static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1607		int node, struct list_head *pagelist, bool migrate_all)
1608{
1609	struct vm_area_struct *vma;
1610	struct page *page;
1611	unsigned int follflags;
1612	int err;
 
1613
1614	mmap_read_lock(mm);
1615	err = -EFAULT;
1616	vma = find_vma(mm, addr);
1617	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1618		goto out;
1619
1620	/* FOLL_DUMP to ignore special (like zero) pages */
1621	follflags = FOLL_GET | FOLL_DUMP;
1622	page = follow_page(vma, addr, follflags);
1623
1624	err = PTR_ERR(page);
1625	if (IS_ERR(page))
1626		goto out;
1627
1628	err = -ENOENT;
1629	if (!page)
1630		goto out;
1631
1632	err = 0;
1633	if (page_to_nid(page) == node)
1634		goto out_putpage;
1635
1636	err = -EACCES;
1637	if (page_mapcount(page) > 1 && !migrate_all)
1638		goto out_putpage;
1639
1640	if (PageHuge(page)) {
1641		if (PageHead(page)) {
1642			isolate_huge_page(page, pagelist);
1643			err = 1;
1644		}
1645	} else {
1646		struct page *head;
1647
1648		head = compound_head(page);
1649		err = isolate_lru_page(head);
1650		if (err)
1651			goto out_putpage;
1652
1653		err = 1;
1654		list_add_tail(&head->lru, pagelist);
1655		mod_node_page_state(page_pgdat(head),
1656			NR_ISOLATED_ANON + page_is_file_lru(head),
1657			thp_nr_pages(head));
1658	}
1659out_putpage:
1660	/*
1661	 * Either remove the duplicate refcount from
1662	 * isolate_lru_page() or drop the page ref if it was
1663	 * not isolated.
1664	 */
1665	put_page(page);
1666out:
1667	mmap_read_unlock(mm);
1668	return err;
1669}
1670
1671static int move_pages_and_store_status(struct mm_struct *mm, int node,
1672		struct list_head *pagelist, int __user *status,
1673		int start, int i, unsigned long nr_pages)
1674{
1675	int err;
1676
1677	if (list_empty(pagelist))
1678		return 0;
1679
1680	err = do_move_pages_to_node(mm, pagelist, node);
1681	if (err) {
1682		/*
1683		 * Positive err means the number of failed
1684		 * pages to migrate.  Since we are going to
1685		 * abort and return the number of non-migrated
1686		 * pages, so need to include the rest of the
1687		 * nr_pages that have not been attempted as
1688		 * well.
1689		 */
1690		if (err > 0)
1691			err += nr_pages - i - 1;
1692		return err;
1693	}
1694	return store_status(status, start, node, i - start);
1695}
1696
1697/*
1698 * Migrate an array of page address onto an array of nodes and fill
1699 * the corresponding array of status.
1700 */
1701static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1702			 unsigned long nr_pages,
1703			 const void __user * __user *pages,
1704			 const int __user *nodes,
1705			 int __user *status, int flags)
1706{
 
1707	int current_node = NUMA_NO_NODE;
1708	LIST_HEAD(pagelist);
1709	int start, i;
1710	int err = 0, err1;
1711
1712	lru_cache_disable();
1713
1714	for (i = start = 0; i < nr_pages; i++) {
1715		const void __user *p;
1716		unsigned long addr;
1717		int node;
1718
1719		err = -EFAULT;
1720		if (get_user(p, pages + i))
1721			goto out_flush;
 
 
 
 
 
 
 
 
 
1722		if (get_user(node, nodes + i))
1723			goto out_flush;
1724		addr = (unsigned long)untagged_addr(p);
1725
1726		err = -ENODEV;
1727		if (node < 0 || node >= MAX_NUMNODES)
1728			goto out_flush;
1729		if (!node_state(node, N_MEMORY))
1730			goto out_flush;
1731
1732		err = -EACCES;
1733		if (!node_isset(node, task_nodes))
1734			goto out_flush;
1735
1736		if (current_node == NUMA_NO_NODE) {
1737			current_node = node;
1738			start = i;
1739		} else if (node != current_node) {
1740			err = move_pages_and_store_status(mm, current_node,
1741					&pagelist, status, start, i, nr_pages);
1742			if (err)
1743				goto out;
1744			start = i;
1745			current_node = node;
1746		}
1747
1748		/*
1749		 * Errors in the page lookup or isolation are not fatal and we simply
1750		 * report them via status
1751		 */
1752		err = add_page_for_migration(mm, addr, current_node,
1753				&pagelist, flags & MPOL_MF_MOVE_ALL);
1754
1755		if (err > 0) {
1756			/* The page is successfully queued for migration */
1757			continue;
1758		}
1759
1760		/*
 
 
 
 
 
 
 
1761		 * If the page is already on the target node (!err), store the
1762		 * node, otherwise, store the err.
1763		 */
1764		err = store_status(status, i, err ? : current_node, 1);
1765		if (err)
1766			goto out_flush;
1767
1768		err = move_pages_and_store_status(mm, current_node, &pagelist,
1769				status, start, i, nr_pages);
1770		if (err)
 
 
 
1771			goto out;
 
1772		current_node = NUMA_NO_NODE;
1773	}
1774out_flush:
1775	/* Make sure we do not overwrite the existing error */
1776	err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1777				status, start, i, nr_pages);
1778	if (err >= 0)
1779		err = err1;
1780out:
1781	lru_cache_enable();
1782	return err;
1783}
1784
1785/*
1786 * Determine the nodes of an array of pages and store it in an array of status.
1787 */
1788static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1789				const void __user **pages, int *status)
1790{
1791	unsigned long i;
1792
1793	mmap_read_lock(mm);
1794
1795	for (i = 0; i < nr_pages; i++) {
1796		unsigned long addr = (unsigned long)(*pages);
1797		struct vm_area_struct *vma;
1798		struct page *page;
 
1799		int err = -EFAULT;
1800
1801		vma = vma_lookup(mm, addr);
1802		if (!vma)
1803			goto set_status;
1804
1805		/* FOLL_DUMP to ignore special (like zero) pages */
1806		page = follow_page(vma, addr, FOLL_DUMP);
1807
1808		err = PTR_ERR(page);
1809		if (IS_ERR(page))
1810			goto set_status;
1811
1812		err = page ? page_to_nid(page) : -ENOENT;
 
 
 
 
1813set_status:
1814		*status = err;
1815
1816		pages++;
1817		status++;
1818	}
1819
1820	mmap_read_unlock(mm);
1821}
1822
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1823/*
1824 * Determine the nodes of a user array of pages and store it in
1825 * a user array of status.
1826 */
1827static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1828			 const void __user * __user *pages,
1829			 int __user *status)
1830{
1831#define DO_PAGES_STAT_CHUNK_NR 16
1832	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1833	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1834
1835	while (nr_pages) {
1836		unsigned long chunk_nr;
1837
1838		chunk_nr = nr_pages;
1839		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1840			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1841
1842		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1843			break;
 
 
 
 
 
 
 
1844
1845		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1846
1847		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1848			break;
1849
1850		pages += chunk_nr;
1851		status += chunk_nr;
1852		nr_pages -= chunk_nr;
1853	}
1854	return nr_pages ? -EFAULT : 0;
1855}
1856
1857static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1858{
1859	struct task_struct *task;
1860	struct mm_struct *mm;
1861
1862	/*
1863	 * There is no need to check if current process has the right to modify
1864	 * the specified process when they are same.
1865	 */
1866	if (!pid) {
1867		mmget(current->mm);
1868		*mem_nodes = cpuset_mems_allowed(current);
1869		return current->mm;
1870	}
1871
1872	/* Find the mm_struct */
1873	rcu_read_lock();
1874	task = find_task_by_vpid(pid);
1875	if (!task) {
1876		rcu_read_unlock();
1877		return ERR_PTR(-ESRCH);
1878	}
1879	get_task_struct(task);
1880
1881	/*
1882	 * Check if this process has the right to modify the specified
1883	 * process. Use the regular "ptrace_may_access()" checks.
1884	 */
1885	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1886		rcu_read_unlock();
1887		mm = ERR_PTR(-EPERM);
1888		goto out;
1889	}
1890	rcu_read_unlock();
1891
1892	mm = ERR_PTR(security_task_movememory(task));
1893	if (IS_ERR(mm))
1894		goto out;
1895	*mem_nodes = cpuset_mems_allowed(task);
1896	mm = get_task_mm(task);
1897out:
1898	put_task_struct(task);
1899	if (!mm)
1900		mm = ERR_PTR(-EINVAL);
1901	return mm;
1902}
1903
1904/*
1905 * Move a list of pages in the address space of the currently executing
1906 * process.
1907 */
1908static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1909			     const void __user * __user *pages,
1910			     const int __user *nodes,
1911			     int __user *status, int flags)
1912{
1913	struct mm_struct *mm;
1914	int err;
1915	nodemask_t task_nodes;
1916
1917	/* Check flags */
1918	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1919		return -EINVAL;
1920
1921	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1922		return -EPERM;
1923
1924	mm = find_mm_struct(pid, &task_nodes);
1925	if (IS_ERR(mm))
1926		return PTR_ERR(mm);
1927
1928	if (nodes)
1929		err = do_pages_move(mm, task_nodes, nr_pages, pages,
1930				    nodes, status, flags);
1931	else
1932		err = do_pages_stat(mm, nr_pages, pages, status);
1933
1934	mmput(mm);
1935	return err;
1936}
1937
1938SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1939		const void __user * __user *, pages,
1940		const int __user *, nodes,
1941		int __user *, status, int, flags)
1942{
1943	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1944}
1945
1946#ifdef CONFIG_COMPAT
1947COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1948		       compat_uptr_t __user *, pages32,
1949		       const int __user *, nodes,
1950		       int __user *, status,
1951		       int, flags)
1952{
1953	const void __user * __user *pages;
1954	int i;
1955
1956	pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1957	for (i = 0; i < nr_pages; i++) {
1958		compat_uptr_t p;
1959
1960		if (get_user(p, pages32 + i) ||
1961			put_user(compat_ptr(p), pages + i))
1962			return -EFAULT;
1963	}
1964	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1965}
1966#endif /* CONFIG_COMPAT */
1967
1968#ifdef CONFIG_NUMA_BALANCING
1969/*
1970 * Returns true if this is a safe migration target node for misplaced NUMA
1971 * pages. Currently it only checks the watermarks which crude
1972 */
1973static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1974				   unsigned long nr_migrate_pages)
1975{
1976	int z;
1977
1978	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1979		struct zone *zone = pgdat->node_zones + z;
1980
1981		if (!populated_zone(zone))
1982			continue;
1983
1984		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
1985		if (!zone_watermark_ok(zone, 0,
1986				       high_wmark_pages(zone) +
1987				       nr_migrate_pages,
1988				       ZONE_MOVABLE, 0))
1989			continue;
1990		return true;
1991	}
1992	return false;
1993}
1994
1995static struct page *alloc_misplaced_dst_page(struct page *page,
1996					   unsigned long data)
1997{
1998	int nid = (int) data;
1999	struct page *newpage;
2000
2001	newpage = __alloc_pages_node(nid,
2002					 (GFP_HIGHUSER_MOVABLE |
2003					  __GFP_THISNODE | __GFP_NOMEMALLOC |
2004					  __GFP_NORETRY | __GFP_NOWARN) &
2005					 ~__GFP_RECLAIM, 0);
2006
2007	return newpage;
2008}
2009
2010static struct page *alloc_misplaced_dst_page_thp(struct page *page,
2011						 unsigned long data)
2012{
2013	int nid = (int) data;
2014	struct page *newpage;
2015
2016	newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2017				   HPAGE_PMD_ORDER);
2018	if (!newpage)
2019		goto out;
2020
2021	prep_transhuge_page(newpage);
2022
2023out:
2024	return newpage;
2025}
2026
2027static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2028{
2029	int page_lru;
2030
2031	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2032
2033	/* Do not migrate THP mapped by multiple processes */
2034	if (PageTransHuge(page) && total_mapcount(page) > 1)
2035		return 0;
2036
2037	/* Avoid migrating to a node that is nearly full */
2038	if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
2039		return 0;
2040
2041	if (isolate_lru_page(page))
2042		return 0;
2043
2044	page_lru = page_is_file_lru(page);
2045	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2046				thp_nr_pages(page));
2047
2048	/*
2049	 * Isolating the page has taken another reference, so the
2050	 * caller's reference can be safely dropped without the page
2051	 * disappearing underneath us during migration.
2052	 */
2053	put_page(page);
2054	return 1;
2055}
2056
2057/*
2058 * Attempt to migrate a misplaced page to the specified destination
2059 * node. Caller is expected to have an elevated reference count on
2060 * the page that will be dropped by this function before returning.
2061 */
2062int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2063			   int node)
2064{
 
2065	pg_data_t *pgdat = NODE_DATA(node);
2066	int isolated;
2067	int nr_remaining;
2068	LIST_HEAD(migratepages);
2069	new_page_t *new;
2070	bool compound;
2071	int nr_pages = thp_nr_pages(page);
2072
2073	/*
2074	 * PTE mapped THP or HugeTLB page can't reach here so the page could
2075	 * be either base page or THP.  And it must be head page if it is
2076	 * THP.
2077	 */
2078	compound = PageTransHuge(page);
2079
2080	if (compound)
2081		new = alloc_misplaced_dst_page_thp;
2082	else
2083		new = alloc_misplaced_dst_page;
2084
2085	/*
2086	 * Don't migrate file pages that are mapped in multiple processes
2087	 * with execute permissions as they are probably shared libraries.
2088	 */
2089	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2090	    (vma->vm_flags & VM_EXEC))
2091		goto out;
2092
2093	/*
2094	 * Also do not migrate dirty pages as not all filesystems can move
2095	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2096	 */
2097	if (page_is_file_lru(page) && PageDirty(page))
2098		goto out;
2099
2100	isolated = numamigrate_isolate_page(pgdat, page);
2101	if (!isolated)
2102		goto out;
2103
2104	list_add(&page->lru, &migratepages);
2105	nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
2106				     MIGRATE_ASYNC, MR_NUMA_MISPLACED);
2107	if (nr_remaining) {
2108		if (!list_empty(&migratepages)) {
2109			list_del(&page->lru);
2110			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2111					page_is_file_lru(page), -nr_pages);
2112			putback_lru_page(page);
2113		}
2114		isolated = 0;
2115	} else
2116		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages);
2117	BUG_ON(!list_empty(&migratepages));
2118	return isolated;
2119
2120out:
2121	put_page(page);
2122	return 0;
2123}
2124#endif /* CONFIG_NUMA_BALANCING */
2125#endif /* CONFIG_NUMA */
2126
2127#ifdef CONFIG_DEVICE_PRIVATE
2128static int migrate_vma_collect_skip(unsigned long start,
2129				    unsigned long end,
2130				    struct mm_walk *walk)
2131{
2132	struct migrate_vma *migrate = walk->private;
2133	unsigned long addr;
2134
2135	for (addr = start; addr < end; addr += PAGE_SIZE) {
2136		migrate->dst[migrate->npages] = 0;
2137		migrate->src[migrate->npages++] = 0;
2138	}
2139
2140	return 0;
2141}
2142
2143static int migrate_vma_collect_hole(unsigned long start,
2144				    unsigned long end,
2145				    __always_unused int depth,
2146				    struct mm_walk *walk)
2147{
2148	struct migrate_vma *migrate = walk->private;
2149	unsigned long addr;
2150
2151	/* Only allow populating anonymous memory. */
2152	if (!vma_is_anonymous(walk->vma))
2153		return migrate_vma_collect_skip(start, end, walk);
2154
2155	for (addr = start; addr < end; addr += PAGE_SIZE) {
2156		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2157		migrate->dst[migrate->npages] = 0;
2158		migrate->npages++;
2159		migrate->cpages++;
2160	}
2161
2162	return 0;
2163}
2164
2165static int migrate_vma_collect_pmd(pmd_t *pmdp,
2166				   unsigned long start,
2167				   unsigned long end,
2168				   struct mm_walk *walk)
2169{
2170	struct migrate_vma *migrate = walk->private;
2171	struct vm_area_struct *vma = walk->vma;
2172	struct mm_struct *mm = vma->vm_mm;
2173	unsigned long addr = start, unmapped = 0;
2174	spinlock_t *ptl;
2175	pte_t *ptep;
2176
2177again:
2178	if (pmd_none(*pmdp))
2179		return migrate_vma_collect_hole(start, end, -1, walk);
2180
2181	if (pmd_trans_huge(*pmdp)) {
2182		struct page *page;
2183
2184		ptl = pmd_lock(mm, pmdp);
2185		if (unlikely(!pmd_trans_huge(*pmdp))) {
2186			spin_unlock(ptl);
2187			goto again;
2188		}
2189
2190		page = pmd_page(*pmdp);
2191		if (is_huge_zero_page(page)) {
2192			spin_unlock(ptl);
2193			split_huge_pmd(vma, pmdp, addr);
2194			if (pmd_trans_unstable(pmdp))
2195				return migrate_vma_collect_skip(start, end,
2196								walk);
2197		} else {
2198			int ret;
2199
2200			get_page(page);
2201			spin_unlock(ptl);
2202			if (unlikely(!trylock_page(page)))
2203				return migrate_vma_collect_skip(start, end,
2204								walk);
2205			ret = split_huge_page(page);
2206			unlock_page(page);
2207			put_page(page);
2208			if (ret)
2209				return migrate_vma_collect_skip(start, end,
2210								walk);
2211			if (pmd_none(*pmdp))
2212				return migrate_vma_collect_hole(start, end, -1,
2213								walk);
2214		}
2215	}
2216
2217	if (unlikely(pmd_bad(*pmdp)))
2218		return migrate_vma_collect_skip(start, end, walk);
2219
2220	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2221	arch_enter_lazy_mmu_mode();
2222
2223	for (; addr < end; addr += PAGE_SIZE, ptep++) {
2224		unsigned long mpfn = 0, pfn;
2225		struct page *page;
2226		swp_entry_t entry;
2227		pte_t pte;
2228
2229		pte = *ptep;
2230
2231		if (pte_none(pte)) {
2232			if (vma_is_anonymous(vma)) {
2233				mpfn = MIGRATE_PFN_MIGRATE;
2234				migrate->cpages++;
2235			}
2236			goto next;
2237		}
2238
2239		if (!pte_present(pte)) {
2240			/*
2241			 * Only care about unaddressable device page special
2242			 * page table entry. Other special swap entries are not
2243			 * migratable, and we ignore regular swapped page.
2244			 */
2245			entry = pte_to_swp_entry(pte);
2246			if (!is_device_private_entry(entry))
2247				goto next;
2248
2249			page = pfn_swap_entry_to_page(entry);
2250			if (!(migrate->flags &
2251				MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2252			    page->pgmap->owner != migrate->pgmap_owner)
2253				goto next;
2254
2255			mpfn = migrate_pfn(page_to_pfn(page)) |
2256					MIGRATE_PFN_MIGRATE;
2257			if (is_writable_device_private_entry(entry))
2258				mpfn |= MIGRATE_PFN_WRITE;
2259		} else {
2260			if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
2261				goto next;
2262			pfn = pte_pfn(pte);
2263			if (is_zero_pfn(pfn)) {
2264				mpfn = MIGRATE_PFN_MIGRATE;
2265				migrate->cpages++;
2266				goto next;
2267			}
2268			page = vm_normal_page(migrate->vma, addr, pte);
2269			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2270			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2271		}
2272
2273		/* FIXME support THP */
2274		if (!page || !page->mapping || PageTransCompound(page)) {
2275			mpfn = 0;
2276			goto next;
2277		}
2278
 
2279		/*
2280		 * By getting a reference on the page we pin it and that blocks
2281		 * any kind of migration. Side effect is that it "freezes" the
2282		 * pte.
2283		 *
2284		 * We drop this reference after isolating the page from the lru
2285		 * for non device page (device page are not on the lru and thus
2286		 * can't be dropped from it).
2287		 */
2288		get_page(page);
2289		migrate->cpages++;
 
2290
2291		/*
2292		 * Optimize for the common case where page is only mapped once
2293		 * in one process. If we can lock the page, then we can safely
2294		 * set up a special migration page table entry now.
2295		 */
2296		if (trylock_page(page)) {
2297			pte_t swp_pte;
2298
2299			mpfn |= MIGRATE_PFN_LOCKED;
2300			ptep_get_and_clear(mm, addr, ptep);
2301
2302			/* Setup special migration page table entry */
2303			if (mpfn & MIGRATE_PFN_WRITE)
2304				entry = make_writable_migration_entry(
2305							page_to_pfn(page));
2306			else
2307				entry = make_readable_migration_entry(
2308							page_to_pfn(page));
2309			swp_pte = swp_entry_to_pte(entry);
2310			if (pte_present(pte)) {
2311				if (pte_soft_dirty(pte))
2312					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2313				if (pte_uffd_wp(pte))
2314					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2315			} else {
2316				if (pte_swp_soft_dirty(pte))
2317					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2318				if (pte_swp_uffd_wp(pte))
2319					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2320			}
2321			set_pte_at(mm, addr, ptep, swp_pte);
2322
2323			/*
2324			 * This is like regular unmap: we remove the rmap and
2325			 * drop page refcount. Page won't be freed, as we took
2326			 * a reference just above.
2327			 */
2328			page_remove_rmap(page, false);
2329			put_page(page);
2330
2331			if (pte_present(pte))
2332				unmapped++;
2333		}
2334
2335next:
2336		migrate->dst[migrate->npages] = 0;
2337		migrate->src[migrate->npages++] = mpfn;
2338	}
2339	arch_leave_lazy_mmu_mode();
2340	pte_unmap_unlock(ptep - 1, ptl);
2341
2342	/* Only flush the TLB if we actually modified any entries */
2343	if (unmapped)
2344		flush_tlb_range(walk->vma, start, end);
2345
2346	return 0;
2347}
2348
2349static const struct mm_walk_ops migrate_vma_walk_ops = {
2350	.pmd_entry		= migrate_vma_collect_pmd,
2351	.pte_hole		= migrate_vma_collect_hole,
2352};
2353
2354/*
2355 * migrate_vma_collect() - collect pages over a range of virtual addresses
2356 * @migrate: migrate struct containing all migration information
2357 *
2358 * This will walk the CPU page table. For each virtual address backed by a
2359 * valid page, it updates the src array and takes a reference on the page, in
2360 * order to pin the page until we lock it and unmap it.
2361 */
2362static void migrate_vma_collect(struct migrate_vma *migrate)
2363{
2364	struct mmu_notifier_range range;
2365
2366	/*
2367	 * Note that the pgmap_owner is passed to the mmu notifier callback so
2368	 * that the registered device driver can skip invalidating device
2369	 * private page mappings that won't be migrated.
2370	 */
2371	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
2372		migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
2373		migrate->pgmap_owner);
2374	mmu_notifier_invalidate_range_start(&range);
2375
2376	walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2377			&migrate_vma_walk_ops, migrate);
2378
2379	mmu_notifier_invalidate_range_end(&range);
2380	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2381}
2382
2383/*
2384 * migrate_vma_check_page() - check if page is pinned or not
2385 * @page: struct page to check
2386 *
2387 * Pinned pages cannot be migrated. This is the same test as in
2388 * migrate_page_move_mapping(), except that here we allow migration of a
2389 * ZONE_DEVICE page.
2390 */
2391static bool migrate_vma_check_page(struct page *page)
2392{
2393	/*
2394	 * One extra ref because caller holds an extra reference, either from
2395	 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2396	 * a device page.
2397	 */
2398	int extra = 1;
2399
2400	/*
2401	 * FIXME support THP (transparent huge page), it is bit more complex to
2402	 * check them than regular pages, because they can be mapped with a pmd
2403	 * or with a pte (split pte mapping).
2404	 */
2405	if (PageCompound(page))
2406		return false;
2407
2408	/* Page from ZONE_DEVICE have one extra reference */
2409	if (is_zone_device_page(page)) {
2410		/*
2411		 * Private page can never be pin as they have no valid pte and
2412		 * GUP will fail for those. Yet if there is a pending migration
2413		 * a thread might try to wait on the pte migration entry and
2414		 * will bump the page reference count. Sadly there is no way to
2415		 * differentiate a regular pin from migration wait. Hence to
2416		 * avoid 2 racing thread trying to migrate back to CPU to enter
2417		 * infinite loop (one stopping migration because the other is
2418		 * waiting on pte migration entry). We always return true here.
2419		 *
2420		 * FIXME proper solution is to rework migration_entry_wait() so
2421		 * it does not need to take a reference on page.
2422		 */
2423		return is_device_private_page(page);
2424	}
2425
2426	/* For file back page */
2427	if (page_mapping(page))
2428		extra += 1 + page_has_private(page);
2429
2430	if ((page_count(page) - extra) > page_mapcount(page))
2431		return false;
2432
2433	return true;
2434}
2435
2436/*
2437 * migrate_vma_prepare() - lock pages and isolate them from the lru
2438 * @migrate: migrate struct containing all migration information
2439 *
2440 * This locks pages that have been collected by migrate_vma_collect(). Once each
2441 * page is locked it is isolated from the lru (for non-device pages). Finally,
2442 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2443 * migrated by concurrent kernel threads.
2444 */
2445static void migrate_vma_prepare(struct migrate_vma *migrate)
2446{
2447	const unsigned long npages = migrate->npages;
2448	const unsigned long start = migrate->start;
2449	unsigned long addr, i, restore = 0;
2450	bool allow_drain = true;
2451
2452	lru_add_drain();
2453
2454	for (i = 0; (i < npages) && migrate->cpages; i++) {
2455		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2456		bool remap = true;
2457
2458		if (!page)
2459			continue;
2460
2461		if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2462			/*
2463			 * Because we are migrating several pages there can be
2464			 * a deadlock between 2 concurrent migration where each
2465			 * are waiting on each other page lock.
2466			 *
2467			 * Make migrate_vma() a best effort thing and backoff
2468			 * for any page we can not lock right away.
2469			 */
2470			if (!trylock_page(page)) {
2471				migrate->src[i] = 0;
2472				migrate->cpages--;
2473				put_page(page);
2474				continue;
2475			}
2476			remap = false;
2477			migrate->src[i] |= MIGRATE_PFN_LOCKED;
2478		}
2479
2480		/* ZONE_DEVICE pages are not on LRU */
2481		if (!is_zone_device_page(page)) {
2482			if (!PageLRU(page) && allow_drain) {
2483				/* Drain CPU's pagevec */
2484				lru_add_drain_all();
2485				allow_drain = false;
2486			}
2487
2488			if (isolate_lru_page(page)) {
2489				if (remap) {
2490					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2491					migrate->cpages--;
2492					restore++;
2493				} else {
2494					migrate->src[i] = 0;
2495					unlock_page(page);
2496					migrate->cpages--;
2497					put_page(page);
2498				}
2499				continue;
2500			}
2501
2502			/* Drop the reference we took in collect */
2503			put_page(page);
2504		}
2505
2506		if (!migrate_vma_check_page(page)) {
2507			if (remap) {
2508				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2509				migrate->cpages--;
2510				restore++;
2511
2512				if (!is_zone_device_page(page)) {
2513					get_page(page);
2514					putback_lru_page(page);
2515				}
2516			} else {
2517				migrate->src[i] = 0;
2518				unlock_page(page);
2519				migrate->cpages--;
2520
2521				if (!is_zone_device_page(page))
2522					putback_lru_page(page);
2523				else
2524					put_page(page);
2525			}
2526		}
2527	}
2528
2529	for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2530		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2531
2532		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2533			continue;
2534
2535		remove_migration_pte(page, migrate->vma, addr, page);
2536
2537		migrate->src[i] = 0;
2538		unlock_page(page);
2539		put_page(page);
2540		restore--;
2541	}
2542}
2543
2544/*
2545 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2546 * @migrate: migrate struct containing all migration information
2547 *
2548 * Replace page mapping (CPU page table pte) with a special migration pte entry
2549 * and check again if it has been pinned. Pinned pages are restored because we
2550 * cannot migrate them.
2551 *
2552 * This is the last step before we call the device driver callback to allocate
2553 * destination memory and copy contents of original page over to new page.
2554 */
2555static void migrate_vma_unmap(struct migrate_vma *migrate)
2556{
2557	const unsigned long npages = migrate->npages;
2558	const unsigned long start = migrate->start;
2559	unsigned long addr, i, restore = 0;
2560
2561	for (i = 0; i < npages; i++) {
2562		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2563
2564		if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2565			continue;
2566
2567		if (page_mapped(page)) {
2568			try_to_migrate(page, 0);
2569			if (page_mapped(page))
2570				goto restore;
2571		}
2572
2573		if (migrate_vma_check_page(page))
2574			continue;
2575
2576restore:
2577		migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2578		migrate->cpages--;
2579		restore++;
2580	}
2581
2582	for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2583		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2584
2585		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2586			continue;
2587
2588		remove_migration_ptes(page, page, false);
2589
2590		migrate->src[i] = 0;
2591		unlock_page(page);
2592		restore--;
2593
2594		if (is_zone_device_page(page))
2595			put_page(page);
2596		else
2597			putback_lru_page(page);
2598	}
2599}
2600
2601/**
2602 * migrate_vma_setup() - prepare to migrate a range of memory
2603 * @args: contains the vma, start, and pfns arrays for the migration
2604 *
2605 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2606 * without an error.
2607 *
2608 * Prepare to migrate a range of memory virtual address range by collecting all
2609 * the pages backing each virtual address in the range, saving them inside the
2610 * src array.  Then lock those pages and unmap them. Once the pages are locked
2611 * and unmapped, check whether each page is pinned or not.  Pages that aren't
2612 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2613 * corresponding src array entry.  Then restores any pages that are pinned, by
2614 * remapping and unlocking those pages.
2615 *
2616 * The caller should then allocate destination memory and copy source memory to
2617 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2618 * flag set).  Once these are allocated and copied, the caller must update each
2619 * corresponding entry in the dst array with the pfn value of the destination
2620 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2621 * (destination pages must have their struct pages locked, via lock_page()).
2622 *
2623 * Note that the caller does not have to migrate all the pages that are marked
2624 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2625 * device memory to system memory.  If the caller cannot migrate a device page
2626 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2627 * consequences for the userspace process, so it must be avoided if at all
2628 * possible.
2629 *
2630 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2631 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2632 * allowing the caller to allocate device memory for those unbacked virtual
2633 * addresses.  For this the caller simply has to allocate device memory and
2634 * properly set the destination entry like for regular migration.  Note that
2635 * this can still fail, and thus inside the device driver you must check if the
2636 * migration was successful for those entries after calling migrate_vma_pages(),
2637 * just like for regular migration.
2638 *
2639 * After that, the callers must call migrate_vma_pages() to go over each entry
2640 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2641 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2642 * then migrate_vma_pages() to migrate struct page information from the source
2643 * struct page to the destination struct page.  If it fails to migrate the
2644 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2645 * src array.
2646 *
2647 * At this point all successfully migrated pages have an entry in the src
2648 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2649 * array entry with MIGRATE_PFN_VALID flag set.
2650 *
2651 * Once migrate_vma_pages() returns the caller may inspect which pages were
2652 * successfully migrated, and which were not.  Successfully migrated pages will
2653 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2654 *
2655 * It is safe to update device page table after migrate_vma_pages() because
2656 * both destination and source page are still locked, and the mmap_lock is held
2657 * in read mode (hence no one can unmap the range being migrated).
2658 *
2659 * Once the caller is done cleaning up things and updating its page table (if it
2660 * chose to do so, this is not an obligation) it finally calls
2661 * migrate_vma_finalize() to update the CPU page table to point to new pages
2662 * for successfully migrated pages or otherwise restore the CPU page table to
2663 * point to the original source pages.
2664 */
2665int migrate_vma_setup(struct migrate_vma *args)
2666{
2667	long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2668
2669	args->start &= PAGE_MASK;
2670	args->end &= PAGE_MASK;
2671	if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2672	    (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2673		return -EINVAL;
2674	if (nr_pages <= 0)
2675		return -EINVAL;
2676	if (args->start < args->vma->vm_start ||
2677	    args->start >= args->vma->vm_end)
2678		return -EINVAL;
2679	if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2680		return -EINVAL;
2681	if (!args->src || !args->dst)
2682		return -EINVAL;
2683
2684	memset(args->src, 0, sizeof(*args->src) * nr_pages);
2685	args->cpages = 0;
2686	args->npages = 0;
2687
2688	migrate_vma_collect(args);
2689
2690	if (args->cpages)
2691		migrate_vma_prepare(args);
2692	if (args->cpages)
2693		migrate_vma_unmap(args);
2694
2695	/*
2696	 * At this point pages are locked and unmapped, and thus they have
2697	 * stable content and can safely be copied to destination memory that
2698	 * is allocated by the drivers.
2699	 */
2700	return 0;
2701
2702}
2703EXPORT_SYMBOL(migrate_vma_setup);
2704
2705/*
2706 * This code closely matches the code in:
2707 *   __handle_mm_fault()
2708 *     handle_pte_fault()
2709 *       do_anonymous_page()
2710 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2711 * private page.
2712 */
2713static void migrate_vma_insert_page(struct migrate_vma *migrate,
2714				    unsigned long addr,
2715				    struct page *page,
2716				    unsigned long *src)
2717{
2718	struct vm_area_struct *vma = migrate->vma;
2719	struct mm_struct *mm = vma->vm_mm;
2720	bool flush = false;
2721	spinlock_t *ptl;
2722	pte_t entry;
2723	pgd_t *pgdp;
2724	p4d_t *p4dp;
2725	pud_t *pudp;
2726	pmd_t *pmdp;
2727	pte_t *ptep;
2728
2729	/* Only allow populating anonymous memory */
2730	if (!vma_is_anonymous(vma))
2731		goto abort;
2732
2733	pgdp = pgd_offset(mm, addr);
2734	p4dp = p4d_alloc(mm, pgdp, addr);
2735	if (!p4dp)
2736		goto abort;
2737	pudp = pud_alloc(mm, p4dp, addr);
2738	if (!pudp)
2739		goto abort;
2740	pmdp = pmd_alloc(mm, pudp, addr);
2741	if (!pmdp)
2742		goto abort;
2743
2744	if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2745		goto abort;
2746
2747	/*
2748	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
2749	 * pte_offset_map() on pmds where a huge pmd might be created
2750	 * from a different thread.
2751	 *
2752	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
2753	 * parallel threads are excluded by other means.
2754	 *
2755	 * Here we only have mmap_read_lock(mm).
2756	 */
2757	if (pte_alloc(mm, pmdp))
2758		goto abort;
2759
2760	/* See the comment in pte_alloc_one_map() */
2761	if (unlikely(pmd_trans_unstable(pmdp)))
2762		goto abort;
2763
2764	if (unlikely(anon_vma_prepare(vma)))
2765		goto abort;
2766	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
2767		goto abort;
2768
2769	/*
2770	 * The memory barrier inside __SetPageUptodate makes sure that
2771	 * preceding stores to the page contents become visible before
2772	 * the set_pte_at() write.
2773	 */
2774	__SetPageUptodate(page);
2775
2776	if (is_zone_device_page(page)) {
2777		if (is_device_private_page(page)) {
2778			swp_entry_t swp_entry;
2779
2780			if (vma->vm_flags & VM_WRITE)
2781				swp_entry = make_writable_device_private_entry(
2782							page_to_pfn(page));
2783			else
2784				swp_entry = make_readable_device_private_entry(
2785							page_to_pfn(page));
2786			entry = swp_entry_to_pte(swp_entry);
2787		} else {
2788			/*
2789			 * For now we only support migrating to un-addressable
2790			 * device memory.
2791			 */
2792			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
2793			goto abort;
2794		}
2795	} else {
2796		entry = mk_pte(page, vma->vm_page_prot);
2797		if (vma->vm_flags & VM_WRITE)
2798			entry = pte_mkwrite(pte_mkdirty(entry));
2799	}
2800
2801	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2802
2803	if (check_stable_address_space(mm))
2804		goto unlock_abort;
2805
2806	if (pte_present(*ptep)) {
2807		unsigned long pfn = pte_pfn(*ptep);
2808
2809		if (!is_zero_pfn(pfn))
2810			goto unlock_abort;
2811		flush = true;
2812	} else if (!pte_none(*ptep))
2813		goto unlock_abort;
2814
2815	/*
2816	 * Check for userfaultfd but do not deliver the fault. Instead,
2817	 * just back off.
2818	 */
2819	if (userfaultfd_missing(vma))
2820		goto unlock_abort;
2821
2822	inc_mm_counter(mm, MM_ANONPAGES);
2823	page_add_new_anon_rmap(page, vma, addr, false);
2824	if (!is_zone_device_page(page))
2825		lru_cache_add_inactive_or_unevictable(page, vma);
2826	get_page(page);
2827
2828	if (flush) {
2829		flush_cache_page(vma, addr, pte_pfn(*ptep));
2830		ptep_clear_flush_notify(vma, addr, ptep);
2831		set_pte_at_notify(mm, addr, ptep, entry);
2832		update_mmu_cache(vma, addr, ptep);
2833	} else {
2834		/* No need to invalidate - it was non-present before */
2835		set_pte_at(mm, addr, ptep, entry);
2836		update_mmu_cache(vma, addr, ptep);
2837	}
2838
2839	pte_unmap_unlock(ptep, ptl);
2840	*src = MIGRATE_PFN_MIGRATE;
2841	return;
2842
2843unlock_abort:
2844	pte_unmap_unlock(ptep, ptl);
2845abort:
2846	*src &= ~MIGRATE_PFN_MIGRATE;
2847}
2848
2849/**
2850 * migrate_vma_pages() - migrate meta-data from src page to dst page
2851 * @migrate: migrate struct containing all migration information
2852 *
2853 * This migrates struct page meta-data from source struct page to destination
2854 * struct page. This effectively finishes the migration from source page to the
2855 * destination page.
2856 */
2857void migrate_vma_pages(struct migrate_vma *migrate)
2858{
2859	const unsigned long npages = migrate->npages;
2860	const unsigned long start = migrate->start;
2861	struct mmu_notifier_range range;
2862	unsigned long addr, i;
2863	bool notified = false;
2864
2865	for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2866		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2867		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2868		struct address_space *mapping;
2869		int r;
2870
2871		if (!newpage) {
2872			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2873			continue;
2874		}
2875
2876		if (!page) {
2877			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2878				continue;
2879			if (!notified) {
2880				notified = true;
2881
2882				mmu_notifier_range_init_owner(&range,
2883					MMU_NOTIFY_MIGRATE, 0, migrate->vma,
2884					migrate->vma->vm_mm, addr, migrate->end,
2885					migrate->pgmap_owner);
2886				mmu_notifier_invalidate_range_start(&range);
2887			}
2888			migrate_vma_insert_page(migrate, addr, newpage,
2889						&migrate->src[i]);
2890			continue;
2891		}
2892
2893		mapping = page_mapping(page);
2894
2895		if (is_zone_device_page(newpage)) {
2896			if (is_device_private_page(newpage)) {
2897				/*
2898				 * For now only support private anonymous when
2899				 * migrating to un-addressable device memory.
2900				 */
2901				if (mapping) {
2902					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2903					continue;
2904				}
2905			} else {
2906				/*
2907				 * Other types of ZONE_DEVICE page are not
2908				 * supported.
2909				 */
2910				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2911				continue;
2912			}
2913		}
2914
2915		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2916		if (r != MIGRATEPAGE_SUCCESS)
2917			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2918	}
2919
2920	/*
2921	 * No need to double call mmu_notifier->invalidate_range() callback as
2922	 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2923	 * did already call it.
2924	 */
2925	if (notified)
2926		mmu_notifier_invalidate_range_only_end(&range);
2927}
2928EXPORT_SYMBOL(migrate_vma_pages);
2929
2930/**
2931 * migrate_vma_finalize() - restore CPU page table entry
2932 * @migrate: migrate struct containing all migration information
2933 *
2934 * This replaces the special migration pte entry with either a mapping to the
2935 * new page if migration was successful for that page, or to the original page
2936 * otherwise.
2937 *
2938 * This also unlocks the pages and puts them back on the lru, or drops the extra
2939 * refcount, for device pages.
2940 */
2941void migrate_vma_finalize(struct migrate_vma *migrate)
 
2942{
2943	const unsigned long npages = migrate->npages;
2944	unsigned long i;
2945
2946	for (i = 0; i < npages; i++) {
2947		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2948		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2949
2950		if (!page) {
2951			if (newpage) {
2952				unlock_page(newpage);
2953				put_page(newpage);
2954			}
2955			continue;
2956		}
2957
2958		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2959			if (newpage) {
2960				unlock_page(newpage);
2961				put_page(newpage);
2962			}
2963			newpage = page;
2964		}
2965
2966		remove_migration_ptes(page, newpage, false);
2967		unlock_page(page);
2968
2969		if (is_zone_device_page(page))
2970			put_page(page);
2971		else
2972			putback_lru_page(page);
2973
2974		if (newpage != page) {
2975			unlock_page(newpage);
2976			if (is_zone_device_page(newpage))
2977				put_page(newpage);
2978			else
2979				putback_lru_page(newpage);
2980		}
 
 
 
 
 
 
2981	}
 
 
 
2982}
2983EXPORT_SYMBOL(migrate_vma_finalize);
2984#endif /* CONFIG_DEVICE_PRIVATE */
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Memory Migration functionality - linux/mm/migrate.c
   4 *
   5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   6 *
   7 * Page migration was first developed in the context of the memory hotplug
   8 * project. The main authors of the migration code are:
   9 *
  10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
  11 * Hirokazu Takahashi <taka@valinux.co.jp>
  12 * Dave Hansen <haveblue@us.ibm.com>
  13 * Christoph Lameter
  14 */
  15
  16#include <linux/migrate.h>
  17#include <linux/export.h>
  18#include <linux/swap.h>
  19#include <linux/swapops.h>
  20#include <linux/pagemap.h>
  21#include <linux/buffer_head.h>
  22#include <linux/mm_inline.h>
 
 
  23#include <linux/ksm.h>
  24#include <linux/rmap.h>
  25#include <linux/topology.h>
  26#include <linux/cpu.h>
  27#include <linux/cpuset.h>
  28#include <linux/writeback.h>
  29#include <linux/mempolicy.h>
  30#include <linux/vmalloc.h>
  31#include <linux/security.h>
  32#include <linux/backing-dev.h>
  33#include <linux/compaction.h>
  34#include <linux/syscalls.h>
  35#include <linux/compat.h>
  36#include <linux/hugetlb.h>
 
  37#include <linux/gfp.h>
 
  38#include <linux/pfn_t.h>
 
 
 
 
  39#include <linux/page_idle.h>
  40#include <linux/page_owner.h>
  41#include <linux/sched/mm.h>
  42#include <linux/ptrace.h>
  43#include <linux/memory.h>
  44#include <linux/sched/sysctl.h>
  45#include <linux/memory-tiers.h>
  46#include <linux/pagewalk.h>
  47
  48#include <asm/tlbflush.h>
  49
 
  50#include <trace/events/migrate.h>
  51
  52#include "internal.h"
  53
  54bool isolate_movable_page(struct page *page, isolate_mode_t mode)
  55{
  56	struct folio *folio = folio_get_nontail_page(page);
  57	const struct movable_operations *mops;
  58
  59	/*
  60	 * Avoid burning cycles with pages that are yet under __free_pages(),
  61	 * or just got freed under us.
  62	 *
  63	 * In case we 'win' a race for a movable page being freed under us and
  64	 * raise its refcount preventing __free_pages() from doing its job
  65	 * the put_page() at the end of this block will take care of
  66	 * release this page, thus avoiding a nasty leakage.
  67	 */
  68	if (!folio)
  69		goto out;
  70
  71	if (unlikely(folio_test_slab(folio)))
  72		goto out_putfolio;
  73	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
  74	smp_rmb();
  75	/*
  76	 * Check movable flag before taking the page lock because
  77	 * we use non-atomic bitops on newly allocated page flags so
  78	 * unconditionally grabbing the lock ruins page's owner side.
  79	 */
  80	if (unlikely(!__folio_test_movable(folio)))
  81		goto out_putfolio;
  82	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
  83	smp_rmb();
  84	if (unlikely(folio_test_slab(folio)))
  85		goto out_putfolio;
  86
  87	/*
  88	 * As movable pages are not isolated from LRU lists, concurrent
  89	 * compaction threads can race against page migration functions
  90	 * as well as race against the releasing a page.
  91	 *
  92	 * In order to avoid having an already isolated movable page
  93	 * being (wrongly) re-isolated while it is under migration,
  94	 * or to avoid attempting to isolate pages being released,
  95	 * lets be sure we have the page lock
  96	 * before proceeding with the movable page isolation steps.
  97	 */
  98	if (unlikely(!folio_trylock(folio)))
  99		goto out_putfolio;
 100
 101	if (!folio_test_movable(folio) || folio_test_isolated(folio))
 102		goto out_no_isolated;
 103
 104	mops = folio_movable_ops(folio);
 105	VM_BUG_ON_FOLIO(!mops, folio);
 106
 107	if (!mops->isolate_page(&folio->page, mode))
 108		goto out_no_isolated;
 109
 110	/* Driver shouldn't use the isolated flag */
 111	WARN_ON_ONCE(folio_test_isolated(folio));
 112	folio_set_isolated(folio);
 113	folio_unlock(folio);
 114
 115	return true;
 116
 117out_no_isolated:
 118	folio_unlock(folio);
 119out_putfolio:
 120	folio_put(folio);
 121out:
 122	return false;
 123}
 124
 125static void putback_movable_folio(struct folio *folio)
 126{
 127	const struct movable_operations *mops = folio_movable_ops(folio);
 128
 129	mops->putback_page(&folio->page);
 130	folio_clear_isolated(folio);
 
 131}
 132
 133/*
 134 * Put previously isolated pages back onto the appropriate lists
 135 * from where they were once taken off for compaction/migration.
 136 *
 137 * This function shall be used whenever the isolated pageset has been
 138 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
 139 * and isolate_hugetlb().
 140 */
 141void putback_movable_pages(struct list_head *l)
 142{
 143	struct folio *folio;
 144	struct folio *folio2;
 145
 146	list_for_each_entry_safe(folio, folio2, l, lru) {
 147		if (unlikely(folio_test_hugetlb(folio))) {
 148			folio_putback_active_hugetlb(folio);
 149			continue;
 150		}
 151		list_del(&folio->lru);
 152		/*
 153		 * We isolated non-lru movable folio so here we can use
 154		 * __folio_test_movable because LRU folio's mapping cannot
 155		 * have PAGE_MAPPING_MOVABLE.
 156		 */
 157		if (unlikely(__folio_test_movable(folio))) {
 158			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
 159			folio_lock(folio);
 160			if (folio_test_movable(folio))
 161				putback_movable_folio(folio);
 162			else
 163				folio_clear_isolated(folio);
 164			folio_unlock(folio);
 165			folio_put(folio);
 166		} else {
 167			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
 168					folio_is_file_lru(folio), -folio_nr_pages(folio));
 169			folio_putback_lru(folio);
 170		}
 171	}
 172}
 173
 174/* Must be called with an elevated refcount on the non-hugetlb folio */
 175bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
 176{
 177	bool isolated, lru;
 178
 179	if (folio_test_hugetlb(folio))
 180		return isolate_hugetlb(folio, list);
 181
 182	lru = !__folio_test_movable(folio);
 183	if (lru)
 184		isolated = folio_isolate_lru(folio);
 185	else
 186		isolated = isolate_movable_page(&folio->page,
 187						ISOLATE_UNEVICTABLE);
 188
 189	if (!isolated)
 190		return false;
 191
 192	list_add(&folio->lru, list);
 193	if (lru)
 194		node_stat_add_folio(folio, NR_ISOLATED_ANON +
 195				    folio_is_file_lru(folio));
 196
 197	return true;
 198}
 199
 200static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
 201					  struct folio *folio,
 202					  unsigned long idx)
 203{
 204	struct page *page = folio_page(folio, idx);
 205	bool contains_data;
 206	pte_t newpte;
 207	void *addr;
 208
 209	if (PageCompound(page))
 210		return false;
 211	VM_BUG_ON_PAGE(!PageAnon(page), page);
 212	VM_BUG_ON_PAGE(!PageLocked(page), page);
 213	VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
 214
 215	if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
 216	    mm_forbids_zeropage(pvmw->vma->vm_mm))
 217		return false;
 218
 219	/*
 220	 * The pmd entry mapping the old thp was flushed and the pte mapping
 221	 * this subpage has been non present. If the subpage is only zero-filled
 222	 * then map it to the shared zeropage.
 223	 */
 224	addr = kmap_local_page(page);
 225	contains_data = memchr_inv(addr, 0, PAGE_SIZE);
 226	kunmap_local(addr);
 227
 228	if (contains_data)
 229		return false;
 230
 231	newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
 232					pvmw->vma->vm_page_prot));
 233	set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
 234
 235	dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
 236	return true;
 237}
 238
 239struct rmap_walk_arg {
 240	struct folio *folio;
 241	bool map_unused_to_zeropage;
 242};
 243
 244/*
 245 * Restore a potential migration pte to a working pte entry
 246 */
 247static bool remove_migration_pte(struct folio *folio,
 248		struct vm_area_struct *vma, unsigned long addr, void *arg)
 249{
 250	struct rmap_walk_arg *rmap_walk_arg = arg;
 251	DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
 
 
 
 
 
 
 
 252
 
 253	while (page_vma_mapped_walk(&pvmw)) {
 254		rmap_t rmap_flags = RMAP_NONE;
 255		pte_t old_pte;
 256		pte_t pte;
 257		swp_entry_t entry;
 258		struct page *new;
 259		unsigned long idx = 0;
 260
 261		/* pgoff is invalid for ksm pages, but they are never large */
 262		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
 263			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
 264		new = folio_page(folio, idx);
 265
 266#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 267		/* PMD-mapped THP migration entry */
 268		if (!pvmw.pte) {
 269			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
 270					!folio_test_pmd_mappable(folio), folio);
 271			remove_migration_pmd(&pvmw, new);
 272			continue;
 273		}
 274#endif
 275		if (rmap_walk_arg->map_unused_to_zeropage &&
 276		    try_to_map_unused_to_zeropage(&pvmw, folio, idx))
 277			continue;
 278
 279		folio_get(folio);
 280		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
 281		old_pte = ptep_get(pvmw.pte);
 282
 283		entry = pte_to_swp_entry(old_pte);
 284		if (!is_migration_entry_young(entry))
 285			pte = pte_mkold(pte);
 286		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
 287			pte = pte_mkdirty(pte);
 288		if (pte_swp_soft_dirty(old_pte))
 289			pte = pte_mksoft_dirty(pte);
 290		else
 291			pte = pte_clear_soft_dirty(pte);
 292
 
 
 
 
 293		if (is_writable_migration_entry(entry))
 294			pte = pte_mkwrite(pte, vma);
 295		else if (pte_swp_uffd_wp(old_pte))
 296			pte = pte_mkuffd_wp(pte);
 297
 298		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
 299			rmap_flags |= RMAP_EXCLUSIVE;
 300
 301		if (unlikely(is_device_private_page(new))) {
 302			if (pte_write(pte))
 303				entry = make_writable_device_private_entry(
 304							page_to_pfn(new));
 305			else
 306				entry = make_readable_device_private_entry(
 307							page_to_pfn(new));
 308			pte = swp_entry_to_pte(entry);
 309			if (pte_swp_soft_dirty(old_pte))
 310				pte = pte_swp_mksoft_dirty(pte);
 311			if (pte_swp_uffd_wp(old_pte))
 312				pte = pte_swp_mkuffd_wp(pte);
 313		}
 314
 315#ifdef CONFIG_HUGETLB_PAGE
 316		if (folio_test_hugetlb(folio)) {
 317			struct hstate *h = hstate_vma(vma);
 318			unsigned int shift = huge_page_shift(h);
 319			unsigned long psize = huge_page_size(h);
 320
 
 321			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
 322			if (folio_test_anon(folio))
 323				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
 324						      rmap_flags);
 325			else
 326				hugetlb_add_file_rmap(folio);
 327			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
 328					psize);
 329		} else
 330#endif
 331		{
 332			if (folio_test_anon(folio))
 333				folio_add_anon_rmap_pte(folio, new, vma,
 334							pvmw.address, rmap_flags);
 
 335			else
 336				folio_add_file_rmap_pte(folio, new, vma);
 337			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
 338		}
 339		if (vma->vm_flags & VM_LOCKED)
 340			mlock_drain_local();
 341
 342		trace_remove_migration_pte(pvmw.address, pte_val(pte),
 343					   compound_order(new));
 344
 345		/* No need to invalidate - it was non-present before */
 346		update_mmu_cache(vma, pvmw.address, pvmw.pte);
 347	}
 348
 349	return true;
 350}
 351
 352/*
 353 * Get rid of all migration entries and replace them by
 354 * references to the indicated page.
 355 */
 356void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
 357{
 358	struct rmap_walk_arg rmap_walk_arg = {
 359		.folio = src,
 360		.map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
 361	};
 362
 363	struct rmap_walk_control rwc = {
 364		.rmap_one = remove_migration_pte,
 365		.arg = &rmap_walk_arg,
 366	};
 367
 368	VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
 369
 370	if (flags & RMP_LOCKED)
 371		rmap_walk_locked(dst, &rwc);
 372	else
 373		rmap_walk(dst, &rwc);
 374}
 375
 376/*
 377 * Something used the pte of a page under migration. We need to
 378 * get to the page and wait until migration is finished.
 379 * When we return from this function the fault will be retried.
 380 */
 381void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 382			  unsigned long address)
 383{
 384	spinlock_t *ptl;
 385	pte_t *ptep;
 386	pte_t pte;
 387	swp_entry_t entry;
 
 388
 389	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 390	if (!ptep)
 391		return;
 392
 393	pte = ptep_get(ptep);
 394	pte_unmap(ptep);
 395
 396	if (!is_swap_pte(pte))
 397		goto out;
 398
 399	entry = pte_to_swp_entry(pte);
 400	if (!is_migration_entry(entry))
 401		goto out;
 402
 403	migration_entry_wait_on_locked(entry, ptl);
 
 
 
 
 
 
 
 
 
 
 
 404	return;
 405out:
 406	spin_unlock(ptl);
 407}
 408
 409#ifdef CONFIG_HUGETLB_PAGE
 410/*
 411 * The vma read lock must be held upon entry. Holding that lock prevents either
 412 * the pte or the ptl from being freed.
 413 *
 414 * This function will release the vma lock before returning.
 415 */
 416void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 417{
 418	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
 419	pte_t pte;
 
 
 420
 421	hugetlb_vma_assert_locked(vma);
 422	spin_lock(ptl);
 423	pte = huge_ptep_get(vma->vm_mm, addr, ptep);
 424
 425	if (unlikely(!is_hugetlb_entry_migration(pte))) {
 426		spin_unlock(ptl);
 427		hugetlb_vma_unlock_read(vma);
 428	} else {
 429		/*
 430		 * If migration entry existed, safe to release vma lock
 431		 * here because the pgtable page won't be freed without the
 432		 * pgtable lock released.  See comment right above pgtable
 433		 * lock release in migration_entry_wait_on_locked().
 434		 */
 435		hugetlb_vma_unlock_read(vma);
 436		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
 437	}
 438}
 439#endif
 440
 441#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 442void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
 443{
 444	spinlock_t *ptl;
 
 445
 446	ptl = pmd_lock(mm, pmd);
 447	if (!is_pmd_migration_entry(*pmd))
 448		goto unlock;
 449	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
 
 
 
 
 450	return;
 451unlock:
 452	spin_unlock(ptl);
 453}
 454#endif
 455
 456static int folio_expected_refs(struct address_space *mapping,
 457		struct folio *folio)
 458{
 459	int refs = 1;
 460	if (!mapping)
 461		return refs;
 462
 463	refs += folio_nr_pages(folio);
 464	if (folio_test_private(folio))
 465		refs++;
 
 
 
 
 466
 467	return refs;
 468}
 469
 470/*
 471 * Replace the folio in the mapping.
 472 *
 473 * The number of remaining references must be:
 474 * 1 for anonymous folios without a mapping
 475 * 2 for folios with a mapping
 476 * 3 for folios with a mapping and the private flag set.
 477 */
 478static int __folio_migrate_mapping(struct address_space *mapping,
 479		struct folio *newfolio, struct folio *folio, int expected_count)
 480{
 481	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
 482	struct zone *oldzone, *newzone;
 483	int dirty;
 484	long nr = folio_nr_pages(folio);
 485	long entries, i;
 486
 487	if (!mapping) {
 488		/* Take off deferred split queue while frozen and memcg set */
 489		if (folio_test_large(folio) &&
 490		    folio_test_large_rmappable(folio)) {
 491			if (!folio_ref_freeze(folio, expected_count))
 492				return -EAGAIN;
 493			folio_unqueue_deferred_split(folio);
 494			folio_ref_unfreeze(folio, expected_count);
 495		}
 496
 497		/* No turning back from here */
 498		newfolio->index = folio->index;
 499		newfolio->mapping = folio->mapping;
 500		if (folio_test_anon(folio) && folio_test_large(folio))
 501			mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
 502		if (folio_test_swapbacked(folio))
 503			__folio_set_swapbacked(newfolio);
 504
 505		return MIGRATEPAGE_SUCCESS;
 506	}
 507
 508	oldzone = folio_zone(folio);
 509	newzone = folio_zone(newfolio);
 510
 511	xas_lock_irq(&xas);
 512	if (!folio_ref_freeze(folio, expected_count)) {
 513		xas_unlock_irq(&xas);
 514		return -EAGAIN;
 515	}
 516
 517	/* Take off deferred split queue while frozen and memcg set */
 518	folio_unqueue_deferred_split(folio);
 
 
 519
 520	/*
 521	 * Now we know that no one else is looking at the folio:
 522	 * no turning back from here.
 523	 */
 524	newfolio->index = folio->index;
 525	newfolio->mapping = folio->mapping;
 526	if (folio_test_anon(folio) && folio_test_large(folio))
 527		mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
 528	folio_ref_add(newfolio, nr); /* add cache reference */
 529	if (folio_test_swapbacked(folio)) {
 530		__folio_set_swapbacked(newfolio);
 531		if (folio_test_swapcache(folio)) {
 532			folio_set_swapcache(newfolio);
 533			newfolio->private = folio_get_private(folio);
 534		}
 535		entries = nr;
 536	} else {
 537		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
 538		entries = 1;
 539	}
 540
 541	/* Move dirty while folio refs frozen and newfolio not yet exposed */
 542	dirty = folio_test_dirty(folio);
 543	if (dirty) {
 544		folio_clear_dirty(folio);
 545		folio_set_dirty(newfolio);
 546	}
 547
 548	/* Swap cache still stores N entries instead of a high-order entry */
 549	for (i = 0; i < entries; i++) {
 550		xas_store(&xas, newfolio);
 551		xas_next(&xas);
 
 
 
 
 552	}
 553
 554	/*
 555	 * Drop cache reference from old folio by unfreezing
 556	 * to one less reference.
 557	 * We know this isn't the last reference.
 558	 */
 559	folio_ref_unfreeze(folio, expected_count - nr);
 560
 561	xas_unlock(&xas);
 562	/* Leave irq disabled to prevent preemption while updating stats */
 563
 564	/*
 565	 * If moved to a different zone then also account
 566	 * the folio for that zone. Other VM counters will be
 567	 * taken care of when we establish references to the
 568	 * new folio and drop references to the old folio.
 569	 *
 570	 * Note that anonymous folios are accounted for
 571	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
 572	 * are mapped to swap space.
 573	 */
 574	if (newzone != oldzone) {
 575		struct lruvec *old_lruvec, *new_lruvec;
 576		struct mem_cgroup *memcg;
 577
 578		memcg = folio_memcg(folio);
 579		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
 580		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 581
 582		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
 583		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
 584		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
 585			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
 586			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 587
 588			if (folio_test_pmd_mappable(folio)) {
 589				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
 590				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
 591			}
 592		}
 593#ifdef CONFIG_SWAP
 594		if (folio_test_swapcache(folio)) {
 595			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
 596			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
 597		}
 598#endif
 599		if (dirty && mapping_can_writeback(mapping)) {
 600			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
 601			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
 602			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
 603			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
 604		}
 605	}
 606	local_irq_enable();
 607
 608	return MIGRATEPAGE_SUCCESS;
 609}
 610
 611int folio_migrate_mapping(struct address_space *mapping,
 612		struct folio *newfolio, struct folio *folio, int extra_count)
 613{
 614	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
 615
 616	if (folio_ref_count(folio) != expected_count)
 617		return -EAGAIN;
 618
 619	return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
 620}
 621EXPORT_SYMBOL(folio_migrate_mapping);
 622
 623/*
 624 * The expected number of remaining references is the same as that
 625 * of folio_migrate_mapping().
 626 */
 627int migrate_huge_page_move_mapping(struct address_space *mapping,
 628				   struct folio *dst, struct folio *src)
 629{
 630	XA_STATE(xas, &mapping->i_pages, folio_index(src));
 631	int rc, expected_count = folio_expected_refs(mapping, src);
 632
 633	if (folio_ref_count(src) != expected_count)
 
 
 
 634		return -EAGAIN;
 
 635
 636	rc = folio_mc_copy(dst, src);
 637	if (unlikely(rc))
 638		return rc;
 639
 640	xas_lock_irq(&xas);
 641	if (!folio_ref_freeze(src, expected_count)) {
 642		xas_unlock_irq(&xas);
 643		return -EAGAIN;
 644	}
 645
 646	dst->index = src->index;
 647	dst->mapping = src->mapping;
 648
 649	folio_ref_add(dst, folio_nr_pages(dst));
 650
 651	xas_store(&xas, dst);
 652
 653	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
 654
 655	xas_unlock_irq(&xas);
 656
 657	return MIGRATEPAGE_SUCCESS;
 658}
 659
 660/*
 661 * Copy the flags and some other ancillary information
 662 */
 663void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
 664{
 665	int cpupid;
 666
 667	if (folio_test_referenced(folio))
 668		folio_set_referenced(newfolio);
 669	if (folio_test_uptodate(folio))
 670		folio_mark_uptodate(newfolio);
 671	if (folio_test_clear_active(folio)) {
 672		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
 673		folio_set_active(newfolio);
 674	} else if (folio_test_clear_unevictable(folio))
 675		folio_set_unevictable(newfolio);
 676	if (folio_test_workingset(folio))
 677		folio_set_workingset(newfolio);
 678	if (folio_test_checked(folio))
 679		folio_set_checked(newfolio);
 680	/*
 681	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
 682	 * migration entries. We can still have PG_anon_exclusive set on an
 683	 * effectively unmapped and unreferenced first sub-pages of an
 684	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
 685	 */
 686	if (folio_test_mappedtodisk(folio))
 687		folio_set_mappedtodisk(newfolio);
 688
 689	/* Move dirty on pages not done by folio_migrate_mapping() */
 690	if (folio_test_dirty(folio))
 691		folio_set_dirty(newfolio);
 692
 693	if (folio_test_young(folio))
 694		folio_set_young(newfolio);
 695	if (folio_test_idle(folio))
 696		folio_set_idle(newfolio);
 697
 698	folio_migrate_refs(newfolio, folio);
 699	/*
 700	 * Copy NUMA information to the new page, to prevent over-eager
 701	 * future migrations of this same page.
 702	 */
 703	cpupid = folio_xchg_last_cpupid(folio, -1);
 704	/*
 705	 * For memory tiering mode, when migrate between slow and fast
 706	 * memory node, reset cpupid, because that is used to record
 707	 * page access time in slow memory node.
 708	 */
 709	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
 710		bool f_toptier = node_is_toptier(folio_nid(folio));
 711		bool t_toptier = node_is_toptier(folio_nid(newfolio));
 712
 713		if (f_toptier != t_toptier)
 714			cpupid = -1;
 715	}
 716	folio_xchg_last_cpupid(newfolio, cpupid);
 717
 718	folio_migrate_ksm(newfolio, folio);
 719	/*
 720	 * Please do not reorder this without considering how mm/ksm.c's
 721	 * ksm_get_folio() depends upon ksm_migrate_page() and the
 722	 * swapcache flag.
 723	 */
 724	if (folio_test_swapcache(folio))
 725		folio_clear_swapcache(folio);
 726	folio_clear_private(folio);
 727
 728	/* page->private contains hugetlb specific flags */
 729	if (!folio_test_hugetlb(folio))
 730		folio->private = NULL;
 731
 732	/*
 733	 * If any waiters have accumulated on the new page then
 734	 * wake them up.
 735	 */
 736	if (folio_test_writeback(newfolio))
 737		folio_end_writeback(newfolio);
 738
 739	/*
 740	 * PG_readahead shares the same bit with PG_reclaim.  The above
 741	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
 742	 * bit after that.
 743	 */
 744	if (folio_test_readahead(folio))
 745		folio_set_readahead(newfolio);
 
 
 746
 747	folio_copy_owner(newfolio, folio);
 748	pgalloc_tag_swap(newfolio, folio);
 
 
 
 
 
 
 
 
 
 749
 750	mem_cgroup_migrate(folio, newfolio);
 751}
 752EXPORT_SYMBOL(folio_migrate_flags);
 753
 754/************************************************************
 755 *                    Migration functions
 756 ***********************************************************/
 757
 758static int __migrate_folio(struct address_space *mapping, struct folio *dst,
 759			   struct folio *src, void *src_private,
 760			   enum migrate_mode mode)
 
 
 
 
 
 
 761{
 762	int rc, expected_count = folio_expected_refs(mapping, src);
 763
 764	/* Check whether src does not have extra refs before we do more work */
 765	if (folio_ref_count(src) != expected_count)
 766		return -EAGAIN;
 767
 768	rc = folio_mc_copy(dst, src);
 769	if (unlikely(rc))
 770		return rc;
 771
 772	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
 773	if (rc != MIGRATEPAGE_SUCCESS)
 774		return rc;
 775
 776	if (src_private)
 777		folio_attach_private(dst, folio_detach_private(src));
 778
 779	folio_migrate_flags(dst, src);
 780	return MIGRATEPAGE_SUCCESS;
 781}
 
 782
 783/**
 784 * migrate_folio() - Simple folio migration.
 785 * @mapping: The address_space containing the folio.
 786 * @dst: The folio to migrate the data to.
 787 * @src: The folio containing the current data.
 788 * @mode: How to migrate the page.
 789 *
 790 * Common logic to directly migrate a single LRU folio suitable for
 791 * folios that do not have private data.
 792 *
 793 * Folios are locked upon entry and exit.
 794 */
 795int migrate_folio(struct address_space *mapping, struct folio *dst,
 796		  struct folio *src, enum migrate_mode mode)
 797{
 798	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
 799	return __migrate_folio(mapping, dst, src, NULL, mode);
 800}
 801EXPORT_SYMBOL(migrate_folio);
 802
 803#ifdef CONFIG_BUFFER_HEAD
 804/* Returns true if all buffers are successfully locked */
 805static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 806							enum migrate_mode mode)
 807{
 808	struct buffer_head *bh = head;
 809	struct buffer_head *failed_bh;
 810
 
 
 
 
 
 
 
 
 
 
 
 
 811	do {
 812		if (!trylock_buffer(bh)) {
 813			if (mode == MIGRATE_ASYNC)
 814				goto unlock;
 815			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
 816				goto unlock;
 817			lock_buffer(bh);
 
 
 
 
 
 
 818		}
 819
 820		bh = bh->b_this_page;
 821	} while (bh != head);
 822
 823	return true;
 824
 825unlock:
 826	/* We failed to lock the buffer and cannot stall. */
 827	failed_bh = bh;
 828	bh = head;
 829	while (bh != failed_bh) {
 830		unlock_buffer(bh);
 831		bh = bh->b_this_page;
 832	}
 833
 834	return false;
 835}
 836
 837static int __buffer_migrate_folio(struct address_space *mapping,
 838		struct folio *dst, struct folio *src, enum migrate_mode mode,
 839		bool check_refs)
 840{
 841	struct buffer_head *bh, *head;
 842	int rc;
 843	int expected_count;
 844
 845	head = folio_buffers(src);
 846	if (!head)
 847		return migrate_folio(mapping, dst, src, mode);
 848
 849	/* Check whether page does not have extra refs before we do more work */
 850	expected_count = folio_expected_refs(mapping, src);
 851	if (folio_ref_count(src) != expected_count)
 852		return -EAGAIN;
 853
 
 854	if (!buffer_migrate_lock_buffers(head, mode))
 855		return -EAGAIN;
 856
 857	if (check_refs) {
 858		bool busy;
 859		bool invalidated = false;
 860
 861recheck_buffers:
 862		busy = false;
 863		spin_lock(&mapping->i_private_lock);
 864		bh = head;
 865		do {
 866			if (atomic_read(&bh->b_count)) {
 867				busy = true;
 868				break;
 869			}
 870			bh = bh->b_this_page;
 871		} while (bh != head);
 872		if (busy) {
 873			if (invalidated) {
 874				rc = -EAGAIN;
 875				goto unlock_buffers;
 876			}
 877			spin_unlock(&mapping->i_private_lock);
 878			invalidate_bh_lrus();
 879			invalidated = true;
 880			goto recheck_buffers;
 881		}
 882	}
 883
 884	rc = filemap_migrate_folio(mapping, dst, src, mode);
 885	if (rc != MIGRATEPAGE_SUCCESS)
 886		goto unlock_buffers;
 887
 
 
 888	bh = head;
 889	do {
 890		folio_set_bh(bh, dst, bh_offset(bh));
 891		bh = bh->b_this_page;
 
 892	} while (bh != head);
 893
 
 
 
 
 
 
 894unlock_buffers:
 895	if (check_refs)
 896		spin_unlock(&mapping->i_private_lock);
 897	bh = head;
 898	do {
 899		unlock_buffer(bh);
 900		bh = bh->b_this_page;
 
 901	} while (bh != head);
 902
 903	return rc;
 904}
 905
 906/**
 907 * buffer_migrate_folio() - Migration function for folios with buffers.
 908 * @mapping: The address space containing @src.
 909 * @dst: The folio to migrate to.
 910 * @src: The folio to migrate from.
 911 * @mode: How to migrate the folio.
 912 *
 913 * This function can only be used if the underlying filesystem guarantees
 914 * that no other references to @src exist. For example attached buffer
 915 * heads are accessed only under the folio lock.  If your filesystem cannot
 916 * provide this guarantee, buffer_migrate_folio_norefs() may be more
 917 * appropriate.
 918 *
 919 * Return: 0 on success or a negative errno on failure.
 920 */
 921int buffer_migrate_folio(struct address_space *mapping,
 922		struct folio *dst, struct folio *src, enum migrate_mode mode)
 923{
 924	return __buffer_migrate_folio(mapping, dst, src, mode, false);
 925}
 926EXPORT_SYMBOL(buffer_migrate_folio);
 927
 928/**
 929 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
 930 * @mapping: The address space containing @src.
 931 * @dst: The folio to migrate to.
 932 * @src: The folio to migrate from.
 933 * @mode: How to migrate the folio.
 934 *
 935 * Like buffer_migrate_folio() except that this variant is more careful
 936 * and checks that there are also no buffer head references. This function
 937 * is the right one for mappings where buffer heads are directly looked
 938 * up and referenced (such as block device mappings).
 939 *
 940 * Return: 0 on success or a negative errno on failure.
 941 */
 942int buffer_migrate_folio_norefs(struct address_space *mapping,
 943		struct folio *dst, struct folio *src, enum migrate_mode mode)
 944{
 945	return __buffer_migrate_folio(mapping, dst, src, mode, true);
 946}
 947EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
 948#endif /* CONFIG_BUFFER_HEAD */
 949
 950int filemap_migrate_folio(struct address_space *mapping,
 951		struct folio *dst, struct folio *src, enum migrate_mode mode)
 952{
 953	return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
 954}
 955EXPORT_SYMBOL_GPL(filemap_migrate_folio);
 956
 957/*
 958 * Writeback a folio to clean the dirty state
 959 */
 960static int writeout(struct address_space *mapping, struct folio *folio)
 961{
 962	struct writeback_control wbc = {
 963		.sync_mode = WB_SYNC_NONE,
 964		.nr_to_write = 1,
 965		.range_start = 0,
 966		.range_end = LLONG_MAX,
 967		.for_reclaim = 1
 968	};
 969	int rc;
 970
 971	if (!mapping->a_ops->writepage)
 972		/* No write method for the address space */
 973		return -EINVAL;
 974
 975	if (!folio_clear_dirty_for_io(folio))
 976		/* Someone else already triggered a write */
 977		return -EAGAIN;
 978
 979	/*
 980	 * A dirty folio may imply that the underlying filesystem has
 981	 * the folio on some queue. So the folio must be clean for
 982	 * migration. Writeout may mean we lose the lock and the
 983	 * folio state is no longer what we checked for earlier.
 984	 * At this point we know that the migration attempt cannot
 985	 * be successful.
 986	 */
 987	remove_migration_ptes(folio, folio, 0);
 988
 989	rc = mapping->a_ops->writepage(&folio->page, &wbc);
 990
 991	if (rc != AOP_WRITEPAGE_ACTIVATE)
 992		/* unlocked. Relock */
 993		folio_lock(folio);
 994
 995	return (rc < 0) ? -EIO : -EAGAIN;
 996}
 997
 998/*
 999 * Default handling if a filesystem does not provide a migration function.
1000 */
1001static int fallback_migrate_folio(struct address_space *mapping,
1002		struct folio *dst, struct folio *src, enum migrate_mode mode)
1003{
1004	if (folio_test_dirty(src)) {
1005		/* Only writeback folios in full synchronous migration */
1006		switch (mode) {
1007		case MIGRATE_SYNC:
 
1008			break;
1009		default:
1010			return -EBUSY;
1011		}
1012		return writeout(mapping, src);
1013	}
1014
1015	/*
1016	 * Buffers may be managed in a filesystem specific way.
1017	 * We must have no buffers or drop them.
1018	 */
1019	if (!filemap_release_folio(src, GFP_KERNEL))
 
1020		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1021
1022	return migrate_folio(mapping, dst, src, mode);
1023}
1024
1025/*
1026 * Move a page to a newly allocated page
1027 * The page is locked and all ptes have been successfully removed.
1028 *
1029 * The new page will have replaced the old page if this function
1030 * is successful.
1031 *
1032 * Return value:
1033 *   < 0 - error code
1034 *  MIGRATEPAGE_SUCCESS - success
1035 */
1036static int move_to_new_folio(struct folio *dst, struct folio *src,
1037				enum migrate_mode mode)
1038{
 
1039	int rc = -EAGAIN;
1040	bool is_lru = !__folio_test_movable(src);
 
 
 
1041
1042	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1043	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1044
1045	if (likely(is_lru)) {
1046		struct address_space *mapping = folio_mapping(src);
1047
1048		if (!mapping)
1049			rc = migrate_folio(mapping, dst, src, mode);
1050		else if (mapping_inaccessible(mapping))
1051			rc = -EOPNOTSUPP;
1052		else if (mapping->a_ops->migrate_folio)
1053			/*
1054			 * Most folios have a mapping and most filesystems
1055			 * provide a migrate_folio callback. Anonymous folios
1056			 * are part of swap space which also has its own
1057			 * migrate_folio callback. This is the most common path
1058			 * for page migration.
1059			 */
1060			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1061								mode);
1062		else
1063			rc = fallback_migrate_folio(mapping, dst, src, mode);
 
1064	} else {
1065		const struct movable_operations *mops;
1066
1067		/*
1068		 * In case of non-lru page, it could be released after
1069		 * isolation step. In that case, we shouldn't try migration.
1070		 */
1071		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1072		if (!folio_test_movable(src)) {
1073			rc = MIGRATEPAGE_SUCCESS;
1074			folio_clear_isolated(src);
1075			goto out;
1076		}
1077
1078		mops = folio_movable_ops(src);
1079		rc = mops->migrate_page(&dst->page, &src->page, mode);
1080		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1081				!folio_test_isolated(src));
1082	}
1083
1084	/*
1085	 * When successful, old pagecache src->mapping must be cleared before
1086	 * src is freed; but stats require that PageAnon be left as PageAnon.
1087	 */
1088	if (rc == MIGRATEPAGE_SUCCESS) {
1089		if (__folio_test_movable(src)) {
1090			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1091
1092			/*
1093			 * We clear PG_movable under page_lock so any compactor
1094			 * cannot try to migrate this page.
1095			 */
1096			folio_clear_isolated(src);
1097		}
1098
1099		/*
1100		 * Anonymous and movable src->mapping will be cleared by
1101		 * free_pages_prepare so don't reset it here for keeping
1102		 * the type to work PageAnon, for example.
1103		 */
1104		if (!folio_mapping_flags(src))
1105			src->mapping = NULL;
 
 
 
1106
1107		if (likely(!folio_is_zone_device(dst)))
1108			flush_dcache_folio(dst);
1109	}
1110out:
1111	return rc;
1112}
1113
1114/*
1115 * To record some information during migration, we use unused private
1116 * field of struct folio of the newly allocated destination folio.
1117 * This is safe because nobody is using it except us.
1118 */
1119enum {
1120	PAGE_WAS_MAPPED = BIT(0),
1121	PAGE_WAS_MLOCKED = BIT(1),
1122	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1123};
1124
1125static void __migrate_folio_record(struct folio *dst,
1126				   int old_page_state,
1127				   struct anon_vma *anon_vma)
1128{
1129	dst->private = (void *)anon_vma + old_page_state;
1130}
1131
1132static void __migrate_folio_extract(struct folio *dst,
1133				   int *old_page_state,
1134				   struct anon_vma **anon_vmap)
1135{
1136	unsigned long private = (unsigned long)dst->private;
1137
1138	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1139	*old_page_state = private & PAGE_OLD_STATES;
1140	dst->private = NULL;
1141}
1142
1143/* Restore the source folio to the original state upon failure */
1144static void migrate_folio_undo_src(struct folio *src,
1145				   int page_was_mapped,
1146				   struct anon_vma *anon_vma,
1147				   bool locked,
1148				   struct list_head *ret)
1149{
1150	if (page_was_mapped)
1151		remove_migration_ptes(src, src, 0);
1152	/* Drop an anon_vma reference if we took one */
1153	if (anon_vma)
1154		put_anon_vma(anon_vma);
1155	if (locked)
1156		folio_unlock(src);
1157	if (ret)
1158		list_move_tail(&src->lru, ret);
1159}
1160
1161/* Restore the destination folio to the original state upon failure */
1162static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1163		free_folio_t put_new_folio, unsigned long private)
1164{
1165	if (locked)
1166		folio_unlock(dst);
1167	if (put_new_folio)
1168		put_new_folio(dst, private);
1169	else
1170		folio_put(dst);
1171}
1172
1173/* Cleanup src folio upon migration success */
1174static void migrate_folio_done(struct folio *src,
1175			       enum migrate_reason reason)
1176{
1177	/*
1178	 * Compaction can migrate also non-LRU pages which are
1179	 * not accounted to NR_ISOLATED_*. They can be recognized
1180	 * as __folio_test_movable
1181	 */
1182	if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
1183		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1184				    folio_is_file_lru(src), -folio_nr_pages(src));
1185
1186	if (reason != MR_MEMORY_FAILURE)
1187		/* We release the page in page_handle_poison. */
1188		folio_put(src);
1189}
1190
1191/* Obtain the lock on page, remove all ptes. */
1192static int migrate_folio_unmap(new_folio_t get_new_folio,
1193		free_folio_t put_new_folio, unsigned long private,
1194		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1195		enum migrate_reason reason, struct list_head *ret)
1196{
1197	struct folio *dst;
1198	int rc = -EAGAIN;
1199	int old_page_state = 0;
1200	struct anon_vma *anon_vma = NULL;
1201	bool is_lru = data_race(!__folio_test_movable(src));
1202	bool locked = false;
1203	bool dst_locked = false;
1204
1205	if (folio_ref_count(src) == 1) {
1206		/* Folio was freed from under us. So we are done. */
1207		folio_clear_active(src);
1208		folio_clear_unevictable(src);
1209		/* free_pages_prepare() will clear PG_isolated. */
1210		list_del(&src->lru);
1211		migrate_folio_done(src, reason);
1212		return MIGRATEPAGE_SUCCESS;
1213	}
1214
1215	dst = get_new_folio(src, private);
1216	if (!dst)
1217		return -ENOMEM;
1218	*dstp = dst;
1219
1220	dst->private = NULL;
1221
1222	if (!folio_trylock(src)) {
1223		if (mode == MIGRATE_ASYNC)
1224			goto out;
1225
1226		/*
1227		 * It's not safe for direct compaction to call lock_page.
1228		 * For example, during page readahead pages are added locked
1229		 * to the LRU. Later, when the IO completes the pages are
1230		 * marked uptodate and unlocked. However, the queueing
1231		 * could be merging multiple pages for one bio (e.g.
1232		 * mpage_readahead). If an allocation happens for the
1233		 * second or third page, the process can end up locking
1234		 * the same page twice and deadlocking. Rather than
1235		 * trying to be clever about what pages can be locked,
1236		 * avoid the use of lock_page for direct compaction
1237		 * altogether.
1238		 */
1239		if (current->flags & PF_MEMALLOC)
1240			goto out;
1241
1242		/*
1243		 * In "light" mode, we can wait for transient locks (eg
1244		 * inserting a page into the page table), but it's not
1245		 * worth waiting for I/O.
1246		 */
1247		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1248			goto out;
1249
1250		folio_lock(src);
1251	}
1252	locked = true;
1253	if (folio_test_mlocked(src))
1254		old_page_state |= PAGE_WAS_MLOCKED;
1255
1256	if (folio_test_writeback(src)) {
1257		/*
1258		 * Only in the case of a full synchronous migration is it
1259		 * necessary to wait for PageWriteback. In the async case,
1260		 * the retry loop is too short and in the sync-light case,
1261		 * the overhead of stalling is too much
1262		 */
1263		switch (mode) {
1264		case MIGRATE_SYNC:
 
1265			break;
1266		default:
1267			rc = -EBUSY;
1268			goto out;
1269		}
1270		folio_wait_writeback(src);
 
 
1271	}
1272
1273	/*
1274	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1275	 * we cannot notice that anon_vma is freed while we migrate a page.
1276	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1277	 * of migration. File cache pages are no problem because of page_lock()
1278	 * File Caches may use write_page() or lock_page() in migration, then,
1279	 * just care Anon page here.
1280	 *
1281	 * Only folio_get_anon_vma() understands the subtleties of
1282	 * getting a hold on an anon_vma from outside one of its mms.
1283	 * But if we cannot get anon_vma, then we won't need it anyway,
1284	 * because that implies that the anon page is no longer mapped
1285	 * (and cannot be remapped so long as we hold the page lock).
1286	 */
1287	if (folio_test_anon(src) && !folio_test_ksm(src))
1288		anon_vma = folio_get_anon_vma(src);
1289
1290	/*
1291	 * Block others from accessing the new page when we get around to
1292	 * establishing additional references. We are usually the only one
1293	 * holding a reference to dst at this point. We used to have a BUG
1294	 * here if folio_trylock(dst) fails, but would like to allow for
1295	 * cases where there might be a race with the previous use of dst.
1296	 * This is much like races on refcount of oldpage: just don't BUG().
1297	 */
1298	if (unlikely(!folio_trylock(dst)))
1299		goto out;
1300	dst_locked = true;
1301
1302	if (unlikely(!is_lru)) {
1303		__migrate_folio_record(dst, old_page_state, anon_vma);
1304		return MIGRATEPAGE_UNMAP;
1305	}
1306
1307	/*
1308	 * Corner case handling:
1309	 * 1. When a new swap-cache page is read into, it is added to the LRU
1310	 * and treated as swapcache but it has no rmap yet.
1311	 * Calling try_to_unmap() against a src->mapping==NULL page will
1312	 * trigger a BUG.  So handle it here.
1313	 * 2. An orphaned page (see truncate_cleanup_page) might have
1314	 * fs-private metadata. The page can be picked up due to memory
1315	 * offlining.  Everywhere else except page reclaim, the page is
1316	 * invisible to the vm, so the page can not be migrated.  So try to
1317	 * free the metadata, so the page can be freed.
1318	 */
1319	if (!src->mapping) {
1320		if (folio_test_private(src)) {
1321			try_to_free_buffers(src);
1322			goto out;
 
1323		}
1324	} else if (folio_mapped(src)) {
1325		/* Establish migration ptes */
1326		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1327			       !folio_test_ksm(src) && !anon_vma, src);
1328		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1329		old_page_state |= PAGE_WAS_MAPPED;
1330	}
1331
1332	if (!folio_mapped(src)) {
1333		__migrate_folio_record(dst, old_page_state, anon_vma);
1334		return MIGRATEPAGE_UNMAP;
1335	}
 
 
1336
 
 
 
 
 
 
 
1337out:
1338	/*
1339	 * A folio that has not been unmapped will be restored to
1340	 * right list unless we want to retry.
 
 
 
 
 
1341	 */
1342	if (rc == -EAGAIN)
1343		ret = NULL;
1344
1345	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1346			       anon_vma, locked, ret);
1347	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1348
1349	return rc;
1350}
1351
1352/* Migrate the folio to the newly allocated folio in dst. */
1353static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1354			      struct folio *src, struct folio *dst,
1355			      enum migrate_mode mode, enum migrate_reason reason,
1356			      struct list_head *ret)
 
 
 
 
 
1357{
1358	int rc;
1359	int old_page_state = 0;
1360	struct anon_vma *anon_vma = NULL;
1361	bool is_lru = !__folio_test_movable(src);
1362	struct list_head *prev;
1363
1364	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1365	prev = dst->lru.prev;
1366	list_del(&dst->lru);
1367
1368	rc = move_to_new_folio(dst, src, mode);
1369	if (rc)
 
 
 
 
 
 
 
 
1370		goto out;
 
1371
1372	if (unlikely(!is_lru))
1373		goto out_unlock_both;
 
1374
1375	/*
1376	 * When successful, push dst to LRU immediately: so that if it
1377	 * turns out to be an mlocked page, remove_migration_ptes() will
1378	 * automatically build up the correct dst->mlock_count for it.
1379	 *
1380	 * We would like to do something similar for the old page, when
1381	 * unsuccessful, and other cases when a page has been temporarily
1382	 * isolated from the unevictable LRU: but this case is the easiest.
1383	 */
1384	folio_add_lru(dst);
1385	if (old_page_state & PAGE_WAS_MLOCKED)
1386		lru_add_drain();
1387
1388	if (old_page_state & PAGE_WAS_MAPPED)
1389		remove_migration_ptes(src, dst, 0);
 
 
 
 
 
 
 
1390
1391out_unlock_both:
1392	folio_unlock(dst);
1393	set_page_owner_migrate_reason(&dst->page, reason);
1394	/*
1395	 * If migration is successful, decrease refcount of dst,
1396	 * which will not free the page because new page owner increased
1397	 * refcounter.
1398	 */
1399	folio_put(dst);
 
 
 
 
 
 
 
 
1400
1401	/*
1402	 * A folio that has been migrated has all references removed
1403	 * and will be freed.
1404	 */
1405	list_del(&src->lru);
1406	/* Drop an anon_vma reference if we took one */
1407	if (anon_vma)
1408		put_anon_vma(anon_vma);
1409	folio_unlock(src);
1410	migrate_folio_done(src, reason);
1411
1412	return rc;
1413out:
1414	/*
1415	 * A folio that has not been migrated will be restored to
1416	 * right list unless we want to retry.
1417	 */
1418	if (rc == -EAGAIN) {
1419		list_add(&dst->lru, prev);
1420		__migrate_folio_record(dst, old_page_state, anon_vma);
1421		return rc;
1422	}
1423
1424	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1425			       anon_vma, true, ret);
1426	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1427
1428	return rc;
1429}
1430
1431/*
1432 * Counterpart of unmap_and_move_page() for hugepage migration.
1433 *
1434 * This function doesn't wait the completion of hugepage I/O
1435 * because there is no race between I/O and migration for hugepage.
1436 * Note that currently hugepage I/O occurs only in direct I/O
1437 * where no lock is held and PG_writeback is irrelevant,
1438 * and writeback status of all subpages are counted in the reference
1439 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1440 * under direct I/O, the reference of the head page is 512 and a bit more.)
1441 * This means that when we try to migrate hugepage whose subpages are
1442 * doing direct I/O, some references remain after try_to_unmap() and
1443 * hugepage migration fails without data corruption.
1444 *
1445 * There is also no race when direct I/O is issued on the page under migration,
1446 * because then pte is replaced with migration swap entry and direct I/O code
1447 * will wait in the page fault for migration to complete.
1448 */
1449static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1450		free_folio_t put_new_folio, unsigned long private,
1451		struct folio *src, int force, enum migrate_mode mode,
1452		int reason, struct list_head *ret)
 
1453{
1454	struct folio *dst;
1455	int rc = -EAGAIN;
1456	int page_was_mapped = 0;
 
1457	struct anon_vma *anon_vma = NULL;
1458	struct address_space *mapping = NULL;
1459
1460	if (folio_ref_count(src) == 1) {
 
 
 
 
 
 
 
 
 
 
 
 
1461		/* page was freed from under us. So we are done. */
1462		folio_putback_active_hugetlb(src);
1463		return MIGRATEPAGE_SUCCESS;
1464	}
1465
1466	dst = get_new_folio(src, private);
1467	if (!dst)
1468		return -ENOMEM;
1469
1470	if (!folio_trylock(src)) {
1471		if (!force)
1472			goto out;
1473		switch (mode) {
1474		case MIGRATE_SYNC:
 
1475			break;
1476		default:
1477			goto out;
1478		}
1479		folio_lock(src);
1480	}
1481
1482	/*
1483	 * Check for pages which are in the process of being freed.  Without
1484	 * folio_mapping() set, hugetlbfs specific move page routine will not
1485	 * be called and we could leak usage counts for subpools.
1486	 */
1487	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1488		rc = -EBUSY;
1489		goto out_unlock;
1490	}
1491
1492	if (folio_test_anon(src))
1493		anon_vma = folio_get_anon_vma(src);
1494
1495	if (unlikely(!folio_trylock(dst)))
1496		goto put_anon;
1497
1498	if (folio_mapped(src)) {
 
1499		enum ttu_flags ttu = 0;
1500
1501		if (!folio_test_anon(src)) {
1502			/*
1503			 * In shared mappings, try_to_unmap could potentially
1504			 * call huge_pmd_unshare.  Because of this, take
1505			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1506			 * to let lower levels know we have taken the lock.
1507			 */
1508			mapping = hugetlb_folio_mapping_lock_write(src);
1509			if (unlikely(!mapping))
1510				goto unlock_put_anon;
1511
1512			ttu = TTU_RMAP_LOCKED;
 
1513		}
1514
1515		try_to_migrate(src, ttu);
1516		page_was_mapped = 1;
1517
1518		if (ttu & TTU_RMAP_LOCKED)
1519			i_mmap_unlock_write(mapping);
1520	}
1521
1522	if (!folio_mapped(src))
1523		rc = move_to_new_folio(dst, src, mode);
1524
1525	if (page_was_mapped)
1526		remove_migration_ptes(src,
1527			rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
1528
1529unlock_put_anon:
1530	folio_unlock(dst);
1531
1532put_anon:
1533	if (anon_vma)
1534		put_anon_vma(anon_vma);
1535
1536	if (rc == MIGRATEPAGE_SUCCESS) {
1537		move_hugetlb_state(src, dst, reason);
1538		put_new_folio = NULL;
1539	}
1540
1541out_unlock:
1542	folio_unlock(src);
1543out:
1544	if (rc == MIGRATEPAGE_SUCCESS)
1545		folio_putback_active_hugetlb(src);
1546	else if (rc != -EAGAIN)
1547		list_move_tail(&src->lru, ret);
1548
1549	/*
1550	 * If migration was not successful and there's a freeing callback, use
1551	 * it.  Otherwise, put_page() will drop the reference grabbed during
1552	 * isolation.
1553	 */
1554	if (put_new_folio)
1555		put_new_folio(dst, private);
1556	else
1557		folio_putback_active_hugetlb(dst);
1558
1559	return rc;
1560}
1561
1562static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1563				  enum migrate_mode mode)
1564{
1565	int rc;
1566
1567	if (mode == MIGRATE_ASYNC) {
1568		if (!folio_trylock(folio))
1569			return -EAGAIN;
1570	} else {
1571		folio_lock(folio);
1572	}
1573	rc = split_folio_to_list(folio, split_folios);
1574	folio_unlock(folio);
1575	if (!rc)
1576		list_move_tail(&folio->lru, split_folios);
1577
1578	return rc;
1579}
1580
1581#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1582#define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1583#else
1584#define NR_MAX_BATCHED_MIGRATION	512
1585#endif
1586#define NR_MAX_MIGRATE_PAGES_RETRY	10
1587#define NR_MAX_MIGRATE_ASYNC_RETRY	3
1588#define NR_MAX_MIGRATE_SYNC_RETRY					\
1589	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1590
1591struct migrate_pages_stats {
1592	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1593				   units of base pages */
1594	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1595				   units of base pages.  Untried folios aren't counted */
1596	int nr_thp_succeeded;	/* THP migrated successfully */
1597	int nr_thp_failed;	/* THP failed to be migrated */
1598	int nr_thp_split;	/* THP split before migrating */
1599	int nr_split;	/* Large folio (include THP) split before migrating */
1600};
1601
1602/*
1603 * Returns the number of hugetlb folios that were not migrated, or an error code
1604 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1605 * any more because the list has become empty or no retryable hugetlb folios
1606 * exist any more. It is caller's responsibility to call putback_movable_pages()
1607 * only if ret != 0.
1608 */
1609static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1610			    free_folio_t put_new_folio, unsigned long private,
1611			    enum migrate_mode mode, int reason,
1612			    struct migrate_pages_stats *stats,
1613			    struct list_head *ret_folios)
 
 
 
 
 
 
 
 
 
 
 
 
1614{
1615	int retry = 1;
 
1616	int nr_failed = 0;
1617	int nr_retry_pages = 0;
 
 
 
1618	int pass = 0;
1619	struct folio *folio, *folio2;
1620	int rc, nr_pages;
 
 
 
 
 
1621
1622	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1623		retry = 0;
1624		nr_retry_pages = 0;
1625
1626		list_for_each_entry_safe(folio, folio2, from, lru) {
1627			if (!folio_test_hugetlb(folio))
1628				continue;
1629
1630			nr_pages = folio_nr_pages(folio);
 
1631
1632			cond_resched();
 
 
1633
 
 
1634			/*
1635			 * Migratability of hugepages depends on architectures and
1636			 * their size.  This check is necessary because some callers
1637			 * of hugepage migration like soft offline and memory
1638			 * hotremove don't walk through page tables or check whether
1639			 * the hugepage is pmd-based or not before kicking migration.
1640			 */
1641			if (!hugepage_migration_supported(folio_hstate(folio))) {
1642				nr_failed++;
1643				stats->nr_failed_pages += nr_pages;
1644				list_move_tail(&folio->lru, ret_folios);
1645				continue;
1646			}
1647
1648			rc = unmap_and_move_huge_page(get_new_folio,
1649						      put_new_folio, private,
1650						      folio, pass > 2, mode,
1651						      reason, ret_folios);
 
 
 
 
 
1652			/*
1653			 * The rules are:
1654			 *	Success: hugetlb folio will be put back
 
1655			 *	-EAGAIN: stay on the from list
1656			 *	-ENOMEM: stay on the from list
1657			 *	Other errno: put on ret_folios list
 
1658			 */
1659			switch(rc) {
1660			case -ENOMEM:
1661				/*
1662				 * When memory is low, don't bother to try to migrate
1663				 * other folios, just exit.
1664				 */
1665				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1666				return -ENOMEM;
1667			case -EAGAIN:
1668				retry++;
1669				nr_retry_pages += nr_pages;
1670				break;
1671			case MIGRATEPAGE_SUCCESS:
1672				stats->nr_succeeded += nr_pages;
1673				break;
1674			default:
1675				/*
1676				 * Permanent failure (-EBUSY, etc.):
1677				 * unlike -EAGAIN case, the failed folio is
1678				 * removed from migration folio list and not
1679				 * retried in the next outer loop.
1680				 */
1681				nr_failed++;
1682				stats->nr_failed_pages += nr_pages;
1683				break;
1684			}
1685		}
1686	}
1687	/*
1688	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1689	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1690	 * folios as failed.
1691	 */
1692	nr_failed += retry;
1693	stats->nr_failed_pages += nr_retry_pages;
1694
1695	return nr_failed;
1696}
1697
1698/*
1699 * migrate_pages_batch() first unmaps folios in the from list as many as
1700 * possible, then move the unmapped folios.
1701 *
1702 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1703 * lock or bit when we have locked more than one folio.  Which may cause
1704 * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1705 * length of the from list must be <= 1.
1706 */
1707static int migrate_pages_batch(struct list_head *from,
1708		new_folio_t get_new_folio, free_folio_t put_new_folio,
1709		unsigned long private, enum migrate_mode mode, int reason,
1710		struct list_head *ret_folios, struct list_head *split_folios,
1711		struct migrate_pages_stats *stats, int nr_pass)
1712{
1713	int retry = 1;
1714	int thp_retry = 1;
1715	int nr_failed = 0;
1716	int nr_retry_pages = 0;
1717	int pass = 0;
1718	bool is_thp = false;
1719	bool is_large = false;
1720	struct folio *folio, *folio2, *dst = NULL, *dst2;
1721	int rc, rc_saved = 0, nr_pages;
1722	LIST_HEAD(unmap_folios);
1723	LIST_HEAD(dst_folios);
1724	bool nosplit = (reason == MR_NUMA_MISPLACED);
1725
1726	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1727			!list_empty(from) && !list_is_singular(from));
1728
1729	for (pass = 0; pass < nr_pass && retry; pass++) {
1730		retry = 0;
1731		thp_retry = 0;
1732		nr_retry_pages = 0;
1733
1734		list_for_each_entry_safe(folio, folio2, from, lru) {
1735			is_large = folio_test_large(folio);
1736			is_thp = folio_test_pmd_mappable(folio);
1737			nr_pages = folio_nr_pages(folio);
1738
1739			cond_resched();
1740
1741			/*
1742			 * The rare folio on the deferred split list should
1743			 * be split now. It should not count as a failure:
1744			 * but increment nr_failed because, without doing so,
1745			 * migrate_pages() may report success with (split but
1746			 * unmigrated) pages still on its fromlist; whereas it
1747			 * always reports success when its fromlist is empty.
1748			 * stats->nr_thp_failed should be increased too,
1749			 * otherwise stats inconsistency will happen when
1750			 * migrate_pages_batch is called via migrate_pages()
1751			 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1752			 *
1753			 * Only check it without removing it from the list.
1754			 * Since the folio can be on deferred_split_scan()
1755			 * local list and removing it can cause the local list
1756			 * corruption. Folio split process below can handle it
1757			 * with the help of folio_ref_freeze().
1758			 *
1759			 * nr_pages > 2 is needed to avoid checking order-1
1760			 * page cache folios. They exist, in contrast to
1761			 * non-existent order-1 anonymous folios, and do not
1762			 * use _deferred_list.
1763			 */
1764			if (nr_pages > 2 &&
1765			   !list_empty(&folio->_deferred_list) &&
1766			   folio_test_partially_mapped(folio)) {
1767				if (!try_split_folio(folio, split_folios, mode)) {
1768					nr_failed++;
1769					stats->nr_thp_failed += is_thp;
1770					stats->nr_thp_split += is_thp;
1771					stats->nr_split++;
1772					continue;
 
 
1773				}
1774			}
1775
1776			/*
1777			 * Large folio migration might be unsupported or
1778			 * the allocation might be failed so we should retry
1779			 * on the same folio with the large folio split
1780			 * to normal folios.
1781			 *
1782			 * Split folios are put in split_folios, and
1783			 * we will migrate them after the rest of the
1784			 * list is processed.
1785			 */
1786			if (!thp_migration_supported() && is_thp) {
1787				nr_failed++;
1788				stats->nr_thp_failed++;
1789				if (!try_split_folio(folio, split_folios, mode)) {
1790					stats->nr_thp_split++;
1791					stats->nr_split++;
1792					continue;
1793				}
1794				stats->nr_failed_pages += nr_pages;
1795				list_move_tail(&folio->lru, ret_folios);
1796				continue;
1797			}
1798
1799			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1800					private, folio, &dst, mode, reason,
1801					ret_folios);
1802			/*
1803			 * The rules are:
1804			 *	Success: folio will be freed
1805			 *	Unmap: folio will be put on unmap_folios list,
1806			 *	       dst folio put on dst_folios list
1807			 *	-EAGAIN: stay on the from list
1808			 *	-ENOMEM: stay on the from list
1809			 *	Other errno: put on ret_folios list
1810			 */
1811			switch(rc) {
1812			case -ENOMEM:
1813				/*
1814				 * When memory is low, don't bother to try to migrate
1815				 * other folios, move unmapped folios, then exit.
 
1816				 */
1817				nr_failed++;
1818				stats->nr_thp_failed += is_thp;
1819				/* Large folio NUMA faulting doesn't split to retry. */
1820				if (is_large && !nosplit) {
1821					int ret = try_split_folio(folio, split_folios, mode);
1822
1823					if (!ret) {
1824						stats->nr_thp_split += is_thp;
1825						stats->nr_split++;
1826						break;
1827					} else if (reason == MR_LONGTERM_PIN &&
1828						   ret == -EAGAIN) {
1829						/*
1830						 * Try again to split large folio to
1831						 * mitigate the failure of longterm pinning.
1832						 */
1833						retry++;
1834						thp_retry += is_thp;
1835						nr_retry_pages += nr_pages;
1836						/* Undo duplicated failure counting. */
1837						nr_failed--;
1838						stats->nr_thp_failed -= is_thp;
1839						break;
1840					}
1841				}
1842
1843				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1844				/* nr_failed isn't updated for not used */
1845				stats->nr_thp_failed += thp_retry;
1846				rc_saved = rc;
1847				if (list_empty(&unmap_folios))
1848					goto out;
1849				else
1850					goto move;
 
1851			case -EAGAIN:
 
 
 
 
1852				retry++;
1853				thp_retry += is_thp;
1854				nr_retry_pages += nr_pages;
1855				break;
1856			case MIGRATEPAGE_SUCCESS:
1857				stats->nr_succeeded += nr_pages;
1858				stats->nr_thp_succeeded += is_thp;
1859				break;
1860			case MIGRATEPAGE_UNMAP:
1861				list_move_tail(&folio->lru, &unmap_folios);
1862				list_add_tail(&dst->lru, &dst_folios);
1863				break;
1864			default:
1865				/*
1866				 * Permanent failure (-EBUSY, etc.):
1867				 * unlike -EAGAIN case, the failed folio is
1868				 * removed from migration folio list and not
1869				 * retried in the next outer loop.
1870				 */
 
 
 
 
 
1871				nr_failed++;
1872				stats->nr_thp_failed += is_thp;
1873				stats->nr_failed_pages += nr_pages;
1874				break;
1875			}
1876		}
1877	}
1878	nr_failed += retry;
1879	stats->nr_thp_failed += thp_retry;
1880	stats->nr_failed_pages += nr_retry_pages;
1881move:
1882	/* Flush TLBs for all unmapped folios */
1883	try_to_unmap_flush();
1884
1885	retry = 1;
1886	for (pass = 0; pass < nr_pass && retry; pass++) {
1887		retry = 0;
1888		thp_retry = 0;
1889		nr_retry_pages = 0;
1890
1891		dst = list_first_entry(&dst_folios, struct folio, lru);
1892		dst2 = list_next_entry(dst, lru);
1893		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1894			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1895			nr_pages = folio_nr_pages(folio);
1896
1897			cond_resched();
1898
1899			rc = migrate_folio_move(put_new_folio, private,
1900						folio, dst, mode,
1901						reason, ret_folios);
1902			/*
1903			 * The rules are:
1904			 *	Success: folio will be freed
1905			 *	-EAGAIN: stay on the unmap_folios list
1906			 *	Other errno: put on ret_folios list
1907			 */
1908			switch(rc) {
1909			case -EAGAIN:
1910				retry++;
1911				thp_retry += is_thp;
1912				nr_retry_pages += nr_pages;
1913				break;
1914			case MIGRATEPAGE_SUCCESS:
1915				stats->nr_succeeded += nr_pages;
1916				stats->nr_thp_succeeded += is_thp;
1917				break;
1918			default:
1919				nr_failed++;
1920				stats->nr_thp_failed += is_thp;
1921				stats->nr_failed_pages += nr_pages;
1922				break;
1923			}
1924			dst = dst2;
1925			dst2 = list_next_entry(dst, lru);
1926		}
1927	}
1928	nr_failed += retry;
1929	stats->nr_thp_failed += thp_retry;
1930	stats->nr_failed_pages += nr_retry_pages;
1931
1932	rc = rc_saved ? : nr_failed;
1933out:
1934	/* Cleanup remaining folios */
1935	dst = list_first_entry(&dst_folios, struct folio, lru);
1936	dst2 = list_next_entry(dst, lru);
1937	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1938		int old_page_state = 0;
1939		struct anon_vma *anon_vma = NULL;
1940
1941		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1942		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1943				       anon_vma, true, ret_folios);
1944		list_del(&dst->lru);
1945		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1946		dst = dst2;
1947		dst2 = list_next_entry(dst, lru);
1948	}
1949
1950	return rc;
1951}
1952
1953static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1954		free_folio_t put_new_folio, unsigned long private,
1955		enum migrate_mode mode, int reason,
1956		struct list_head *ret_folios, struct list_head *split_folios,
1957		struct migrate_pages_stats *stats)
1958{
1959	int rc, nr_failed = 0;
1960	LIST_HEAD(folios);
1961	struct migrate_pages_stats astats;
1962
1963	memset(&astats, 0, sizeof(astats));
1964	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1965	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1966				 reason, &folios, split_folios, &astats,
1967				 NR_MAX_MIGRATE_ASYNC_RETRY);
1968	stats->nr_succeeded += astats.nr_succeeded;
1969	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1970	stats->nr_thp_split += astats.nr_thp_split;
1971	stats->nr_split += astats.nr_split;
1972	if (rc < 0) {
1973		stats->nr_failed_pages += astats.nr_failed_pages;
1974		stats->nr_thp_failed += astats.nr_thp_failed;
1975		list_splice_tail(&folios, ret_folios);
1976		return rc;
1977	}
1978	stats->nr_thp_failed += astats.nr_thp_split;
1979	/*
1980	 * Do not count rc, as pages will be retried below.
1981	 * Count nr_split only, since it includes nr_thp_split.
1982	 */
1983	nr_failed += astats.nr_split;
1984	/*
1985	 * Fall back to migrate all failed folios one by one synchronously. All
1986	 * failed folios except split THPs will be retried, so their failure
1987	 * isn't counted
1988	 */
1989	list_splice_tail_init(&folios, from);
1990	while (!list_empty(from)) {
1991		list_move(from->next, &folios);
1992		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1993					 private, mode, reason, ret_folios,
1994					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1995		list_splice_tail_init(&folios, ret_folios);
1996		if (rc < 0)
1997			return rc;
1998		nr_failed += rc;
1999	}
2000
2001	return nr_failed;
2002}
2003
2004/*
2005 * migrate_pages - migrate the folios specified in a list, to the free folios
2006 *		   supplied as the target for the page migration
2007 *
2008 * @from:		The list of folios to be migrated.
2009 * @get_new_folio:	The function used to allocate free folios to be used
2010 *			as the target of the folio migration.
2011 * @put_new_folio:	The function used to free target folios if migration
2012 *			fails, or NULL if no special handling is necessary.
2013 * @private:		Private data to be passed on to get_new_folio()
2014 * @mode:		The migration mode that specifies the constraints for
2015 *			folio migration, if any.
2016 * @reason:		The reason for folio migration.
2017 * @ret_succeeded:	Set to the number of folios migrated successfully if
2018 *			the caller passes a non-NULL pointer.
2019 *
2020 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2021 * are movable any more because the list has become empty or no retryable folios
2022 * exist any more. It is caller's responsibility to call putback_movable_pages()
2023 * only if ret != 0.
2024 *
2025 * Returns the number of {normal folio, large folio, hugetlb} that were not
2026 * migrated, or an error code. The number of large folio splits will be
2027 * considered as the number of non-migrated large folio, no matter how many
2028 * split folios of the large folio are migrated successfully.
2029 */
2030int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2031		free_folio_t put_new_folio, unsigned long private,
2032		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2033{
2034	int rc, rc_gather;
2035	int nr_pages;
2036	struct folio *folio, *folio2;
2037	LIST_HEAD(folios);
2038	LIST_HEAD(ret_folios);
2039	LIST_HEAD(split_folios);
2040	struct migrate_pages_stats stats;
2041
2042	trace_mm_migrate_pages_start(mode, reason);
2043
2044	memset(&stats, 0, sizeof(stats));
2045
2046	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2047				     mode, reason, &stats, &ret_folios);
2048	if (rc_gather < 0)
2049		goto out;
2050
2051again:
2052	nr_pages = 0;
2053	list_for_each_entry_safe(folio, folio2, from, lru) {
2054		/* Retried hugetlb folios will be kept in list  */
2055		if (folio_test_hugetlb(folio)) {
2056			list_move_tail(&folio->lru, &ret_folios);
2057			continue;
2058		}
2059
2060		nr_pages += folio_nr_pages(folio);
2061		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2062			break;
2063	}
2064	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2065		list_cut_before(&folios, from, &folio2->lru);
2066	else
2067		list_splice_init(from, &folios);
2068	if (mode == MIGRATE_ASYNC)
2069		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2070				private, mode, reason, &ret_folios,
2071				&split_folios, &stats,
2072				NR_MAX_MIGRATE_PAGES_RETRY);
2073	else
2074		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2075				private, mode, reason, &ret_folios,
2076				&split_folios, &stats);
2077	list_splice_tail_init(&folios, &ret_folios);
2078	if (rc < 0) {
2079		rc_gather = rc;
2080		list_splice_tail(&split_folios, &ret_folios);
2081		goto out;
2082	}
2083	if (!list_empty(&split_folios)) {
2084		/*
2085		 * Failure isn't counted since all split folios of a large folio
2086		 * is counted as 1 failure already.  And, we only try to migrate
2087		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2088		 */
2089		migrate_pages_batch(&split_folios, get_new_folio,
2090				put_new_folio, private, MIGRATE_ASYNC, reason,
2091				&ret_folios, NULL, &stats, 1);
2092		list_splice_tail_init(&split_folios, &ret_folios);
2093	}
2094	rc_gather += rc;
2095	if (!list_empty(from))
2096		goto again;
2097out:
2098	/*
2099	 * Put the permanent failure folio back to migration list, they
2100	 * will be put back to the right list by the caller.
2101	 */
2102	list_splice(&ret_folios, from);
2103
2104	/*
2105	 * Return 0 in case all split folios of fail-to-migrate large folios
2106	 * are migrated successfully.
2107	 */
2108	if (list_empty(from))
2109		rc_gather = 0;
2110
2111	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2112	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2113	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2114	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2115	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2116	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2117			       stats.nr_thp_succeeded, stats.nr_thp_failed,
2118			       stats.nr_thp_split, stats.nr_split, mode,
2119			       reason);
2120
2121	if (ret_succeeded)
2122		*ret_succeeded = stats.nr_succeeded;
2123
2124	return rc_gather;
2125}
2126
2127struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2128{
2129	struct migration_target_control *mtc;
2130	gfp_t gfp_mask;
2131	unsigned int order = 0;
 
2132	int nid;
2133	int zidx;
2134
2135	mtc = (struct migration_target_control *)private;
2136	gfp_mask = mtc->gfp_mask;
2137	nid = mtc->nid;
2138	if (nid == NUMA_NO_NODE)
2139		nid = folio_nid(src);
2140
2141	if (folio_test_hugetlb(src)) {
2142		struct hstate *h = folio_hstate(src);
2143
2144		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2145		return alloc_hugetlb_folio_nodemask(h, nid,
2146						mtc->nmask, gfp_mask,
2147						htlb_allow_alloc_fallback(mtc->reason));
2148	}
2149
2150	if (folio_test_large(src)) {
2151		/*
2152		 * clear __GFP_RECLAIM to make the migration callback
2153		 * consistent with regular THP allocations.
2154		 */
2155		gfp_mask &= ~__GFP_RECLAIM;
2156		gfp_mask |= GFP_TRANSHUGE;
2157		order = folio_order(src);
2158	}
2159	zidx = zone_idx(folio_zone(src));
2160	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2161		gfp_mask |= __GFP_HIGHMEM;
2162
2163	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
 
 
 
 
 
2164}
2165
2166#ifdef CONFIG_NUMA
2167
2168static int store_status(int __user *status, int start, int value, int nr)
2169{
2170	while (nr-- > 0) {
2171		if (put_user(value, status + start))
2172			return -EFAULT;
2173		start++;
2174	}
2175
2176	return 0;
2177}
2178
2179static int do_move_pages_to_node(struct list_head *pagelist, int node)
 
2180{
2181	int err;
2182	struct migration_target_control mtc = {
2183		.nid = node,
2184		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2185		.reason = MR_SYSCALL,
2186	};
2187
2188	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2189		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2190	if (err)
2191		putback_movable_pages(pagelist);
2192	return err;
2193}
2194
2195static int __add_folio_for_migration(struct folio *folio, int node,
2196		struct list_head *pagelist, bool migrate_all)
2197{
2198	if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2199		return -EFAULT;
2200
2201	if (folio_is_zone_device(folio))
2202		return -ENOENT;
2203
2204	if (folio_nid(folio) == node)
2205		return 0;
2206
2207	if (folio_likely_mapped_shared(folio) && !migrate_all)
2208		return -EACCES;
2209
2210	if (folio_test_hugetlb(folio)) {
2211		if (isolate_hugetlb(folio, pagelist))
2212			return 1;
2213	} else if (folio_isolate_lru(folio)) {
2214		list_add_tail(&folio->lru, pagelist);
2215		node_stat_mod_folio(folio,
2216			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2217			folio_nr_pages(folio));
2218		return 1;
2219	}
2220	return -EBUSY;
2221}
2222
2223/*
2224 * Resolves the given address to a struct folio, isolates it from the LRU and
2225 * puts it to the given pagelist.
2226 * Returns:
2227 *     errno - if the folio cannot be found/isolated
2228 *     0 - when it doesn't have to be migrated because it is already on the
2229 *         target node
2230 *     1 - when it has been queued
2231 */
2232static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2233		int node, struct list_head *pagelist, bool migrate_all)
2234{
2235	struct vm_area_struct *vma;
2236	struct folio_walk fw;
2237	struct folio *folio;
2238	unsigned long addr;
2239	int err = -EFAULT;
2240
2241	mmap_read_lock(mm);
2242	addr = (unsigned long)untagged_addr_remote(mm, p);
 
 
 
 
 
 
 
 
 
 
 
2243
2244	vma = vma_lookup(mm, addr);
2245	if (vma && vma_migratable(vma)) {
2246		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2247		if (folio) {
2248			err = __add_folio_for_migration(folio, node, pagelist,
2249							migrate_all);
2250			folio_walk_end(&fw, vma);
2251		} else {
2252			err = -ENOENT;
 
 
 
 
 
 
 
2253		}
 
 
 
 
 
 
 
 
 
 
 
 
 
2254	}
 
 
 
 
 
 
 
 
2255	mmap_read_unlock(mm);
2256	return err;
2257}
2258
2259static int move_pages_and_store_status(int node,
2260		struct list_head *pagelist, int __user *status,
2261		int start, int i, unsigned long nr_pages)
2262{
2263	int err;
2264
2265	if (list_empty(pagelist))
2266		return 0;
2267
2268	err = do_move_pages_to_node(pagelist, node);
2269	if (err) {
2270		/*
2271		 * Positive err means the number of failed
2272		 * pages to migrate.  Since we are going to
2273		 * abort and return the number of non-migrated
2274		 * pages, so need to include the rest of the
2275		 * nr_pages that have not been attempted as
2276		 * well.
2277		 */
2278		if (err > 0)
2279			err += nr_pages - i;
2280		return err;
2281	}
2282	return store_status(status, start, node, i - start);
2283}
2284
2285/*
2286 * Migrate an array of page address onto an array of nodes and fill
2287 * the corresponding array of status.
2288 */
2289static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2290			 unsigned long nr_pages,
2291			 const void __user * __user *pages,
2292			 const int __user *nodes,
2293			 int __user *status, int flags)
2294{
2295	compat_uptr_t __user *compat_pages = (void __user *)pages;
2296	int current_node = NUMA_NO_NODE;
2297	LIST_HEAD(pagelist);
2298	int start, i;
2299	int err = 0, err1;
2300
2301	lru_cache_disable();
2302
2303	for (i = start = 0; i < nr_pages; i++) {
2304		const void __user *p;
 
2305		int node;
2306
2307		err = -EFAULT;
2308		if (in_compat_syscall()) {
2309			compat_uptr_t cp;
2310
2311			if (get_user(cp, compat_pages + i))
2312				goto out_flush;
2313
2314			p = compat_ptr(cp);
2315		} else {
2316			if (get_user(p, pages + i))
2317				goto out_flush;
2318		}
2319		if (get_user(node, nodes + i))
2320			goto out_flush;
 
2321
2322		err = -ENODEV;
2323		if (node < 0 || node >= MAX_NUMNODES)
2324			goto out_flush;
2325		if (!node_state(node, N_MEMORY))
2326			goto out_flush;
2327
2328		err = -EACCES;
2329		if (!node_isset(node, task_nodes))
2330			goto out_flush;
2331
2332		if (current_node == NUMA_NO_NODE) {
2333			current_node = node;
2334			start = i;
2335		} else if (node != current_node) {
2336			err = move_pages_and_store_status(current_node,
2337					&pagelist, status, start, i, nr_pages);
2338			if (err)
2339				goto out;
2340			start = i;
2341			current_node = node;
2342		}
2343
2344		/*
2345		 * Errors in the page lookup or isolation are not fatal and we simply
2346		 * report them via status
2347		 */
2348		err = add_folio_for_migration(mm, p, current_node, &pagelist,
2349					      flags & MPOL_MF_MOVE_ALL);
2350
2351		if (err > 0) {
2352			/* The page is successfully queued for migration */
2353			continue;
2354		}
2355
2356		/*
2357		 * The move_pages() man page does not have an -EEXIST choice, so
2358		 * use -EFAULT instead.
2359		 */
2360		if (err == -EEXIST)
2361			err = -EFAULT;
2362
2363		/*
2364		 * If the page is already on the target node (!err), store the
2365		 * node, otherwise, store the err.
2366		 */
2367		err = store_status(status, i, err ? : current_node, 1);
2368		if (err)
2369			goto out_flush;
2370
2371		err = move_pages_and_store_status(current_node, &pagelist,
2372				status, start, i, nr_pages);
2373		if (err) {
2374			/* We have accounted for page i */
2375			if (err > 0)
2376				err--;
2377			goto out;
2378		}
2379		current_node = NUMA_NO_NODE;
2380	}
2381out_flush:
2382	/* Make sure we do not overwrite the existing error */
2383	err1 = move_pages_and_store_status(current_node, &pagelist,
2384				status, start, i, nr_pages);
2385	if (err >= 0)
2386		err = err1;
2387out:
2388	lru_cache_enable();
2389	return err;
2390}
2391
2392/*
2393 * Determine the nodes of an array of pages and store it in an array of status.
2394 */
2395static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2396				const void __user **pages, int *status)
2397{
2398	unsigned long i;
2399
2400	mmap_read_lock(mm);
2401
2402	for (i = 0; i < nr_pages; i++) {
2403		unsigned long addr = (unsigned long)(*pages);
2404		struct vm_area_struct *vma;
2405		struct folio_walk fw;
2406		struct folio *folio;
2407		int err = -EFAULT;
2408
2409		vma = vma_lookup(mm, addr);
2410		if (!vma)
2411			goto set_status;
2412
2413		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2414		if (folio) {
2415			if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2416				err = -EFAULT;
2417			else if (folio_is_zone_device(folio))
2418				err = -ENOENT;
2419			else
2420				err = folio_nid(folio);
2421			folio_walk_end(&fw, vma);
2422		} else {
2423			err = -ENOENT;
2424		}
2425set_status:
2426		*status = err;
2427
2428		pages++;
2429		status++;
2430	}
2431
2432	mmap_read_unlock(mm);
2433}
2434
2435static int get_compat_pages_array(const void __user *chunk_pages[],
2436				  const void __user * __user *pages,
2437				  unsigned long chunk_nr)
2438{
2439	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2440	compat_uptr_t p;
2441	int i;
2442
2443	for (i = 0; i < chunk_nr; i++) {
2444		if (get_user(p, pages32 + i))
2445			return -EFAULT;
2446		chunk_pages[i] = compat_ptr(p);
2447	}
2448
2449	return 0;
2450}
2451
2452/*
2453 * Determine the nodes of a user array of pages and store it in
2454 * a user array of status.
2455 */
2456static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2457			 const void __user * __user *pages,
2458			 int __user *status)
2459{
2460#define DO_PAGES_STAT_CHUNK_NR 16UL
2461	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2462	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2463
2464	while (nr_pages) {
2465		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
 
 
 
 
2466
2467		if (in_compat_syscall()) {
2468			if (get_compat_pages_array(chunk_pages, pages,
2469						   chunk_nr))
2470				break;
2471		} else {
2472			if (copy_from_user(chunk_pages, pages,
2473				      chunk_nr * sizeof(*chunk_pages)))
2474				break;
2475		}
2476
2477		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2478
2479		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2480			break;
2481
2482		pages += chunk_nr;
2483		status += chunk_nr;
2484		nr_pages -= chunk_nr;
2485	}
2486	return nr_pages ? -EFAULT : 0;
2487}
2488
2489static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2490{
2491	struct task_struct *task;
2492	struct mm_struct *mm;
2493
2494	/*
2495	 * There is no need to check if current process has the right to modify
2496	 * the specified process when they are same.
2497	 */
2498	if (!pid) {
2499		mmget(current->mm);
2500		*mem_nodes = cpuset_mems_allowed(current);
2501		return current->mm;
2502	}
2503
2504	task = find_get_task_by_vpid(pid);
 
 
2505	if (!task) {
 
2506		return ERR_PTR(-ESRCH);
2507	}
 
2508
2509	/*
2510	 * Check if this process has the right to modify the specified
2511	 * process. Use the regular "ptrace_may_access()" checks.
2512	 */
2513	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
 
2514		mm = ERR_PTR(-EPERM);
2515		goto out;
2516	}
 
2517
2518	mm = ERR_PTR(security_task_movememory(task));
2519	if (IS_ERR(mm))
2520		goto out;
2521	*mem_nodes = cpuset_mems_allowed(task);
2522	mm = get_task_mm(task);
2523out:
2524	put_task_struct(task);
2525	if (!mm)
2526		mm = ERR_PTR(-EINVAL);
2527	return mm;
2528}
2529
2530/*
2531 * Move a list of pages in the address space of the currently executing
2532 * process.
2533 */
2534static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2535			     const void __user * __user *pages,
2536			     const int __user *nodes,
2537			     int __user *status, int flags)
2538{
2539	struct mm_struct *mm;
2540	int err;
2541	nodemask_t task_nodes;
2542
2543	/* Check flags */
2544	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2545		return -EINVAL;
2546
2547	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2548		return -EPERM;
2549
2550	mm = find_mm_struct(pid, &task_nodes);
2551	if (IS_ERR(mm))
2552		return PTR_ERR(mm);
2553
2554	if (nodes)
2555		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2556				    nodes, status, flags);
2557	else
2558		err = do_pages_stat(mm, nr_pages, pages, status);
2559
2560	mmput(mm);
2561	return err;
2562}
2563
2564SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2565		const void __user * __user *, pages,
2566		const int __user *, nodes,
2567		int __user *, status, int, flags)
2568{
2569	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2570}
2571
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2572#ifdef CONFIG_NUMA_BALANCING
2573/*
2574 * Returns true if this is a safe migration target node for misplaced NUMA
2575 * pages. Currently it only checks the watermarks which is crude.
2576 */
2577static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2578				   unsigned long nr_migrate_pages)
2579{
2580	int z;
2581
2582	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2583		struct zone *zone = pgdat->node_zones + z;
2584
2585		if (!managed_zone(zone))
2586			continue;
2587
2588		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2589		if (!zone_watermark_ok(zone, 0,
2590				       high_wmark_pages(zone) +
2591				       nr_migrate_pages,
2592				       ZONE_MOVABLE, ALLOC_CMA))
2593			continue;
2594		return true;
2595	}
2596	return false;
2597}
2598
2599static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2600					   unsigned long data)
2601{
2602	int nid = (int) data;
2603	int order = folio_order(src);
2604	gfp_t gfp = __GFP_THISNODE;
 
 
 
 
 
2605
2606	if (order > 0)
2607		gfp |= GFP_TRANSHUGE_LIGHT;
2608	else {
2609		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2610			__GFP_NOWARN;
2611		gfp &= ~__GFP_RECLAIM;
2612	}
2613	return __folio_alloc_node(gfp, order, nid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2614}
2615
2616/*
2617 * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2618 * permitted. Must be called with the PTL still held.
 
2619 */
2620int migrate_misplaced_folio_prepare(struct folio *folio,
2621		struct vm_area_struct *vma, int node)
2622{
2623	int nr_pages = folio_nr_pages(folio);
2624	pg_data_t *pgdat = NODE_DATA(node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2625
2626	if (folio_is_file_lru(folio)) {
2627		/*
2628		 * Do not migrate file folios that are mapped in multiple
2629		 * processes with execute permissions as they are probably
2630		 * shared libraries.
2631		 *
2632		 * See folio_likely_mapped_shared() on possible imprecision
2633		 * when we cannot easily detect if a folio is shared.
 
2634		 */
2635		if ((vma->vm_flags & VM_EXEC) &&
2636		    folio_likely_mapped_shared(folio))
2637			return -EACCES;
2638
2639		/*
2640		 * Do not migrate dirty folios as not all filesystems can move
2641		 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2642		 * cycles.
2643		 */
2644		if (folio_test_dirty(folio))
2645			return -EAGAIN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2646	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2647
2648	/* Avoid migrating to a node that is nearly full */
2649	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2650		int z;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2651
2652		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2653			return -EAGAIN;
2654		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2655			if (managed_zone(pgdat->node_zones + z))
2656				break;
2657		}
 
2658
 
 
2659		/*
2660		 * If there are no managed zones, it should not proceed
2661		 * further.
 
 
 
 
 
 
 
 
 
2662		 */
2663		if (z < 0)
2664			return -EAGAIN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2665
2666		wakeup_kswapd(pgdat->node_zones + z, 0,
2667			      folio_order(folio), ZONE_MOVABLE);
2668		return -EAGAIN;
 
2669	}
 
2670
2671	if (!folio_isolate_lru(folio))
2672		return -EAGAIN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2673
2674	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2675			    nr_pages);
 
 
 
2676	return 0;
 
2677}
 
2678
2679/*
2680 * Attempt to migrate a misplaced folio to the specified destination
2681 * node. Caller is expected to have isolated the folio by calling
2682 * migrate_misplaced_folio_prepare(), which will result in an
2683 * elevated reference count on the folio. This function will un-isolate the
2684 * folio, dereferencing the folio before returning.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2685 */
2686int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2687			    int node)
2688{
2689	pg_data_t *pgdat = NODE_DATA(node);
2690	int nr_remaining;
2691	unsigned int nr_succeeded;
2692	LIST_HEAD(migratepages);
2693	struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2694	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2695
2696	list_add(&folio->lru, &migratepages);
2697	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2698				     NULL, node, MIGRATE_ASYNC,
2699				     MR_NUMA_MISPLACED, &nr_succeeded);
2700	if (nr_remaining && !list_empty(&migratepages))
2701		putback_movable_pages(&migratepages);
2702	if (nr_succeeded) {
2703		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2704		count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2705		if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2706		    && !node_is_toptier(folio_nid(folio))
2707		    && node_is_toptier(node))
2708			mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2709	}
2710	mem_cgroup_put(memcg);
2711	BUG_ON(!list_empty(&migratepages));
2712	return nr_remaining ? -EAGAIN : 0;
2713}
2714#endif /* CONFIG_NUMA_BALANCING */
2715#endif /* CONFIG_NUMA */