Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/compaction.c
   4 *
   5 * Memory compaction for the reduction of external fragmentation. Note that
   6 * this heavily depends upon page migration to do all the real heavy
   7 * lifting
   8 *
   9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  10 */
  11#include <linux/cpu.h>
  12#include <linux/swap.h>
  13#include <linux/migrate.h>
  14#include <linux/compaction.h>
  15#include <linux/mm_inline.h>
  16#include <linux/sched/signal.h>
  17#include <linux/backing-dev.h>
  18#include <linux/sysctl.h>
  19#include <linux/sysfs.h>
  20#include <linux/page-isolation.h>
  21#include <linux/kasan.h>
  22#include <linux/kthread.h>
  23#include <linux/freezer.h>
  24#include <linux/page_owner.h>
  25#include <linux/psi.h>
  26#include "internal.h"
  27
  28#ifdef CONFIG_COMPACTION
 
 
 
 
 
  29static inline void count_compact_event(enum vm_event_item item)
  30{
  31	count_vm_event(item);
  32}
  33
  34static inline void count_compact_events(enum vm_event_item item, long delta)
  35{
  36	count_vm_events(item, delta);
  37}
 
 
 
 
 
 
 
 
 
 
 
 
  38#else
  39#define count_compact_event(item) do { } while (0)
  40#define count_compact_events(item, delta) do { } while (0)
 
  41#endif
  42
  43#if defined CONFIG_COMPACTION || defined CONFIG_CMA
  44
  45#define CREATE_TRACE_POINTS
  46#include <trace/events/compaction.h>
  47
  48#define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
  49#define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
  50#define pageblock_start_pfn(pfn)	block_start_pfn(pfn, pageblock_order)
  51#define pageblock_end_pfn(pfn)		block_end_pfn(pfn, pageblock_order)
  52
  53static unsigned long release_freepages(struct list_head *freelist)
 
 
 
 
 
 
 
 
 
 
 
 
 
  54{
 
  55	struct page *page, *next;
  56	unsigned long high_pfn = 0;
  57
  58	list_for_each_entry_safe(page, next, freelist, lru) {
  59		unsigned long pfn = page_to_pfn(page);
  60		list_del(&page->lru);
  61		__free_page(page);
  62		if (pfn > high_pfn)
  63			high_pfn = pfn;
  64	}
  65
  66	return high_pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67}
  68
  69static void split_map_pages(struct list_head *list)
  70{
  71	unsigned int i, order, nr_pages;
  72	struct page *page, *next;
  73	LIST_HEAD(tmp_list);
  74
  75	list_for_each_entry_safe(page, next, list, lru) {
  76		list_del(&page->lru);
  77
  78		order = page_private(page);
  79		nr_pages = 1 << order;
  80
  81		post_alloc_hook(page, order, __GFP_MOVABLE);
  82		if (order)
  83			split_page(page, order);
  84
  85		for (i = 0; i < nr_pages; i++) {
  86			list_add(&page->lru, &tmp_list);
  87			page++;
 
 
 
 
 
 
  88		}
  89	}
  90
  91	list_splice(&tmp_list, list);
  92}
  93
  94#ifdef CONFIG_COMPACTION
  95
  96int PageMovable(struct page *page)
  97{
  98	struct address_space *mapping;
  99
 100	VM_BUG_ON_PAGE(!PageLocked(page), page);
 101	if (!__PageMovable(page))
 102		return 0;
 103
 104	mapping = page_mapping(page);
 105	if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
 106		return 1;
 107
 108	return 0;
 109}
 110EXPORT_SYMBOL(PageMovable);
 111
 112void __SetPageMovable(struct page *page, struct address_space *mapping)
 113{
 114	VM_BUG_ON_PAGE(!PageLocked(page), page);
 115	VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
 116	page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
 117}
 118EXPORT_SYMBOL(__SetPageMovable);
 119
 120void __ClearPageMovable(struct page *page)
 121{
 122	VM_BUG_ON_PAGE(!PageLocked(page), page);
 123	VM_BUG_ON_PAGE(!PageMovable(page), page);
 124	/*
 125	 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
 126	 * flag so that VM can catch up released page by driver after isolation.
 127	 * With it, VM migration doesn't try to put it back.
 128	 */
 129	page->mapping = (void *)((unsigned long)page->mapping &
 130				PAGE_MAPPING_MOVABLE);
 131}
 132EXPORT_SYMBOL(__ClearPageMovable);
 133
 134/* Do not skip compaction more than 64 times */
 135#define COMPACT_MAX_DEFER_SHIFT 6
 136
 137/*
 138 * Compaction is deferred when compaction fails to result in a page
 139 * allocation success. 1 << compact_defer_limit compactions are skipped up
 140 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
 141 */
 142void defer_compaction(struct zone *zone, int order)
 143{
 144	zone->compact_considered = 0;
 145	zone->compact_defer_shift++;
 146
 147	if (order < zone->compact_order_failed)
 148		zone->compact_order_failed = order;
 149
 150	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
 151		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
 152
 153	trace_mm_compaction_defer_compaction(zone, order);
 154}
 155
 156/* Returns true if compaction should be skipped this time */
 157bool compaction_deferred(struct zone *zone, int order)
 158{
 159	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
 160
 161	if (order < zone->compact_order_failed)
 162		return false;
 163
 164	/* Avoid possible overflow */
 165	if (++zone->compact_considered > defer_limit)
 166		zone->compact_considered = defer_limit;
 167
 168	if (zone->compact_considered >= defer_limit)
 169		return false;
 
 170
 171	trace_mm_compaction_deferred(zone, order);
 172
 173	return true;
 174}
 175
 176/*
 177 * Update defer tracking counters after successful compaction of given order,
 178 * which means an allocation either succeeded (alloc_success == true) or is
 179 * expected to succeed.
 180 */
 181void compaction_defer_reset(struct zone *zone, int order,
 182		bool alloc_success)
 183{
 184	if (alloc_success) {
 185		zone->compact_considered = 0;
 186		zone->compact_defer_shift = 0;
 187	}
 188	if (order >= zone->compact_order_failed)
 189		zone->compact_order_failed = order + 1;
 190
 191	trace_mm_compaction_defer_reset(zone, order);
 192}
 193
 194/* Returns true if restarting compaction after many failures */
 195bool compaction_restarting(struct zone *zone, int order)
 196{
 197	if (order < zone->compact_order_failed)
 198		return false;
 199
 200	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
 201		zone->compact_considered >= 1UL << zone->compact_defer_shift;
 202}
 203
 204/* Returns true if the pageblock should be scanned for pages to isolate. */
 205static inline bool isolation_suitable(struct compact_control *cc,
 206					struct page *page)
 207{
 208	if (cc->ignore_skip_hint)
 209		return true;
 210
 211	return !get_pageblock_skip(page);
 212}
 213
 214static void reset_cached_positions(struct zone *zone)
 215{
 216	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
 217	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
 218	zone->compact_cached_free_pfn =
 219				pageblock_start_pfn(zone_end_pfn(zone) - 1);
 220}
 221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 222/*
 223 * Compound pages of >= pageblock_order should consistenly be skipped until
 224 * released. It is always pointless to compact pages of such order (if they are
 225 * migratable), and the pageblocks they occupy cannot contain any free pages.
 226 */
 227static bool pageblock_skip_persistent(struct page *page)
 228{
 229	if (!PageCompound(page))
 230		return false;
 231
 232	page = compound_head(page);
 233
 234	if (compound_order(page) >= pageblock_order)
 235		return true;
 236
 237	return false;
 238}
 239
 240static bool
 241__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 242							bool check_target)
 243{
 244	struct page *page = pfn_to_online_page(pfn);
 245	struct page *block_page;
 246	struct page *end_page;
 247	unsigned long block_pfn;
 248
 249	if (!page)
 250		return false;
 251	if (zone != page_zone(page))
 252		return false;
 253	if (pageblock_skip_persistent(page))
 254		return false;
 255
 256	/*
 257	 * If skip is already cleared do no further checking once the
 258	 * restart points have been set.
 259	 */
 260	if (check_source && check_target && !get_pageblock_skip(page))
 261		return true;
 262
 263	/*
 264	 * If clearing skip for the target scanner, do not select a
 265	 * non-movable pageblock as the starting point.
 266	 */
 267	if (!check_source && check_target &&
 268	    get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
 269		return false;
 270
 271	/* Ensure the start of the pageblock or zone is online and valid */
 272	block_pfn = pageblock_start_pfn(pfn);
 273	block_pfn = max(block_pfn, zone->zone_start_pfn);
 274	block_page = pfn_to_online_page(block_pfn);
 275	if (block_page) {
 276		page = block_page;
 277		pfn = block_pfn;
 278	}
 279
 280	/* Ensure the end of the pageblock or zone is online and valid */
 281	block_pfn = pageblock_end_pfn(pfn) - 1;
 282	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
 283	end_page = pfn_to_online_page(block_pfn);
 284	if (!end_page)
 285		return false;
 286
 287	/*
 288	 * Only clear the hint if a sample indicates there is either a
 289	 * free page or an LRU page in the block. One or other condition
 290	 * is necessary for the block to be a migration source/target.
 291	 */
 292	do {
 293		if (pfn_valid_within(pfn)) {
 294			if (check_source && PageLRU(page)) {
 295				clear_pageblock_skip(page);
 296				return true;
 297			}
 298
 299			if (check_target && PageBuddy(page)) {
 300				clear_pageblock_skip(page);
 301				return true;
 302			}
 303		}
 304
 305		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
 306		pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
 307	} while (page <= end_page);
 308
 309	return false;
 310}
 311
 312/*
 313 * This function is called to clear all cached information on pageblocks that
 314 * should be skipped for page isolation when the migrate and free page scanner
 315 * meet.
 316 */
 317static void __reset_isolation_suitable(struct zone *zone)
 318{
 319	unsigned long migrate_pfn = zone->zone_start_pfn;
 320	unsigned long free_pfn = zone_end_pfn(zone) - 1;
 321	unsigned long reset_migrate = free_pfn;
 322	unsigned long reset_free = migrate_pfn;
 323	bool source_set = false;
 324	bool free_set = false;
 325
 
 326	if (!zone->compact_blockskip_flush)
 327		return;
 328
 329	zone->compact_blockskip_flush = false;
 330
 331	/*
 332	 * Walk the zone and update pageblock skip information. Source looks
 333	 * for PageLRU while target looks for PageBuddy. When the scanner
 334	 * is found, both PageBuddy and PageLRU are checked as the pageblock
 335	 * is suitable as both source and target.
 336	 */
 337	for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
 338					free_pfn -= pageblock_nr_pages) {
 339		cond_resched();
 340
 341		/* Update the migrate PFN */
 342		if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
 343		    migrate_pfn < reset_migrate) {
 344			source_set = true;
 345			reset_migrate = migrate_pfn;
 346			zone->compact_init_migrate_pfn = reset_migrate;
 347			zone->compact_cached_migrate_pfn[0] = reset_migrate;
 348			zone->compact_cached_migrate_pfn[1] = reset_migrate;
 349		}
 350
 351		/* Update the free PFN */
 352		if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
 353		    free_pfn > reset_free) {
 354			free_set = true;
 355			reset_free = free_pfn;
 356			zone->compact_init_free_pfn = reset_free;
 357			zone->compact_cached_free_pfn = reset_free;
 358		}
 359	}
 360
 361	/* Leave no distance if no suitable block was reset */
 362	if (reset_migrate >= reset_free) {
 363		zone->compact_cached_migrate_pfn[0] = migrate_pfn;
 364		zone->compact_cached_migrate_pfn[1] = migrate_pfn;
 365		zone->compact_cached_free_pfn = free_pfn;
 366	}
 367}
 368
 369void reset_isolation_suitable(pg_data_t *pgdat)
 370{
 371	int zoneid;
 372
 373	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 374		struct zone *zone = &pgdat->node_zones[zoneid];
 375		if (!populated_zone(zone))
 376			continue;
 377
 378		/* Only flush if a full compaction finished recently */
 379		if (zone->compact_blockskip_flush)
 380			__reset_isolation_suitable(zone);
 381	}
 382}
 383
 384/*
 385 * Sets the pageblock skip bit if it was clear. Note that this is a hint as
 386 * locks are not required for read/writers. Returns true if it was already set.
 387 */
 388static bool test_and_set_skip(struct compact_control *cc, struct page *page,
 389							unsigned long pfn)
 390{
 391	bool skip;
 392
 393	/* Do no update if skip hint is being ignored */
 394	if (cc->ignore_skip_hint)
 395		return false;
 396
 397	if (!IS_ALIGNED(pfn, pageblock_nr_pages))
 398		return false;
 399
 400	skip = get_pageblock_skip(page);
 401	if (!skip && !cc->no_set_skip_hint)
 402		set_pageblock_skip(page);
 403
 404	return skip;
 405}
 406
 407static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
 408{
 409	struct zone *zone = cc->zone;
 410
 411	pfn = pageblock_end_pfn(pfn);
 412
 413	/* Set for isolation rather than compaction */
 414	if (cc->no_set_skip_hint)
 415		return;
 416
 
 
 
 417	if (pfn > zone->compact_cached_migrate_pfn[0])
 418		zone->compact_cached_migrate_pfn[0] = pfn;
 419	if (cc->mode != MIGRATE_ASYNC &&
 420	    pfn > zone->compact_cached_migrate_pfn[1])
 421		zone->compact_cached_migrate_pfn[1] = pfn;
 422}
 423
 424/*
 425 * If no pages were isolated then mark this pageblock to be skipped in the
 426 * future. The information is later cleared by __reset_isolation_suitable().
 427 */
 428static void update_pageblock_skip(struct compact_control *cc,
 429			struct page *page, unsigned long pfn)
 430{
 431	struct zone *zone = cc->zone;
 432
 433	if (cc->no_set_skip_hint)
 434		return;
 435
 436	if (!page)
 437		return;
 438
 439	set_pageblock_skip(page);
 440
 441	/* Update where async and sync compaction should restart */
 442	if (pfn < zone->compact_cached_free_pfn)
 443		zone->compact_cached_free_pfn = pfn;
 444}
 445#else
 446static inline bool isolation_suitable(struct compact_control *cc,
 447					struct page *page)
 448{
 449	return true;
 450}
 451
 452static inline bool pageblock_skip_persistent(struct page *page)
 453{
 454	return false;
 455}
 456
 457static inline void update_pageblock_skip(struct compact_control *cc,
 458			struct page *page, unsigned long pfn)
 459{
 460}
 461
 462static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
 463{
 464}
 465
 466static bool test_and_set_skip(struct compact_control *cc, struct page *page,
 467							unsigned long pfn)
 468{
 469	return false;
 470}
 471#endif /* CONFIG_COMPACTION */
 472
 473/*
 474 * Compaction requires the taking of some coarse locks that are potentially
 475 * very heavily contended. For async compaction, trylock and record if the
 476 * lock is contended. The lock will still be acquired but compaction will
 477 * abort when the current block is finished regardless of success rate.
 478 * Sync compaction acquires the lock.
 479 *
 480 * Always returns true which makes it easier to track lock state in callers.
 481 */
 482static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 483						struct compact_control *cc)
 
 484{
 485	/* Track if the lock is contended in async mode */
 486	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
 487		if (spin_trylock_irqsave(lock, *flags))
 488			return true;
 489
 490		cc->contended = true;
 491	}
 492
 493	spin_lock_irqsave(lock, *flags);
 494	return true;
 495}
 496
 497/*
 498 * Compaction requires the taking of some coarse locks that are potentially
 499 * very heavily contended. The lock should be periodically unlocked to avoid
 500 * having disabled IRQs for a long time, even when there is nobody waiting on
 501 * the lock. It might also be that allowing the IRQs will result in
 502 * need_resched() becoming true. If scheduling is needed, async compaction
 503 * aborts. Sync compaction schedules.
 504 * Either compaction type will also abort if a fatal signal is pending.
 505 * In either case if the lock was locked, it is dropped and not regained.
 506 *
 507 * Returns true if compaction should abort due to fatal signal pending, or
 508 *		async compaction due to need_resched()
 509 * Returns false when compaction can continue (sync compaction might have
 510 *		scheduled)
 511 */
 512static bool compact_unlock_should_abort(spinlock_t *lock,
 513		unsigned long flags, bool *locked, struct compact_control *cc)
 514{
 515	if (*locked) {
 516		spin_unlock_irqrestore(lock, flags);
 517		*locked = false;
 518	}
 519
 520	if (fatal_signal_pending(current)) {
 521		cc->contended = true;
 522		return true;
 523	}
 524
 525	cond_resched();
 526
 527	return false;
 528}
 529
 530/*
 531 * Isolate free pages onto a private freelist. If @strict is true, will abort
 532 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 533 * (even though it may still end up isolating some pages).
 534 */
 535static unsigned long isolate_freepages_block(struct compact_control *cc,
 536				unsigned long *start_pfn,
 537				unsigned long end_pfn,
 538				struct list_head *freelist,
 539				unsigned int stride,
 540				bool strict)
 541{
 542	int nr_scanned = 0, total_isolated = 0;
 543	struct page *cursor;
 544	unsigned long flags = 0;
 545	bool locked = false;
 546	unsigned long blockpfn = *start_pfn;
 547	unsigned int order;
 548
 549	/* Strict mode is for isolation, speed is secondary */
 550	if (strict)
 551		stride = 1;
 552
 553	cursor = pfn_to_page(blockpfn);
 554
 555	/* Isolate free pages. */
 556	for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
 557		int isolated;
 558		struct page *page = cursor;
 559
 560		/*
 561		 * Periodically drop the lock (if held) regardless of its
 562		 * contention, to give chance to IRQs. Abort if fatal signal
 563		 * pending or async compaction detects need_resched()
 564		 */
 565		if (!(blockpfn % SWAP_CLUSTER_MAX)
 566		    && compact_unlock_should_abort(&cc->zone->lock, flags,
 567								&locked, cc))
 568			break;
 569
 570		nr_scanned++;
 571		if (!pfn_valid_within(blockpfn))
 572			goto isolate_fail;
 573
 574		/*
 575		 * For compound pages such as THP and hugetlbfs, we can save
 576		 * potentially a lot of iterations if we skip them at once.
 577		 * The check is racy, but we can consider only valid values
 578		 * and the only danger is skipping too much.
 579		 */
 580		if (PageCompound(page)) {
 581			const unsigned int order = compound_order(page);
 582
 583			if (likely(order < MAX_ORDER)) {
 584				blockpfn += (1UL << order) - 1;
 585				cursor += (1UL << order) - 1;
 
 586			}
 
 587			goto isolate_fail;
 588		}
 589
 590		if (!PageBuddy(page))
 591			goto isolate_fail;
 592
 593		/*
 594		 * If we already hold the lock, we can skip some rechecking.
 595		 * Note that if we hold the lock now, checked_pageblock was
 596		 * already set in some previous iteration (or strict is true),
 597		 * so it is correct to skip the suitable migration target
 598		 * recheck as well.
 599		 */
 600		if (!locked) {
 601			locked = compact_lock_irqsave(&cc->zone->lock,
 602								&flags, cc);
 603
 604			/* Recheck this is a buddy page under lock */
 605			if (!PageBuddy(page))
 606				goto isolate_fail;
 607		}
 608
 609		/* Found a free page, will break it into order-0 pages */
 610		order = page_order(page);
 611		isolated = __isolate_free_page(page, order);
 612		if (!isolated)
 613			break;
 614		set_page_private(page, order);
 615
 
 616		total_isolated += isolated;
 617		cc->nr_freepages += isolated;
 618		list_add_tail(&page->lru, freelist);
 619
 620		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
 621			blockpfn += isolated;
 622			break;
 623		}
 624		/* Advance to the end of split page */
 625		blockpfn += isolated - 1;
 626		cursor += isolated - 1;
 627		continue;
 628
 629isolate_fail:
 630		if (strict)
 631			break;
 632		else
 633			continue;
 634
 635	}
 636
 637	if (locked)
 638		spin_unlock_irqrestore(&cc->zone->lock, flags);
 639
 640	/*
 641	 * There is a tiny chance that we have read bogus compound_order(),
 642	 * so be careful to not go outside of the pageblock.
 643	 */
 644	if (unlikely(blockpfn > end_pfn))
 645		blockpfn = end_pfn;
 646
 647	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
 648					nr_scanned, total_isolated);
 649
 650	/* Record how far we have got within the block */
 651	*start_pfn = blockpfn;
 652
 653	/*
 654	 * If strict isolation is requested by CMA then check that all the
 655	 * pages requested were isolated. If there were any failures, 0 is
 656	 * returned and CMA will fail.
 657	 */
 658	if (strict && blockpfn < end_pfn)
 659		total_isolated = 0;
 660
 661	cc->total_free_scanned += nr_scanned;
 662	if (total_isolated)
 663		count_compact_events(COMPACTISOLATED, total_isolated);
 664	return total_isolated;
 665}
 666
 667/**
 668 * isolate_freepages_range() - isolate free pages.
 669 * @cc:        Compaction control structure.
 670 * @start_pfn: The first PFN to start isolating.
 671 * @end_pfn:   The one-past-last PFN.
 672 *
 673 * Non-free pages, invalid PFNs, or zone boundaries within the
 674 * [start_pfn, end_pfn) range are considered errors, cause function to
 675 * undo its actions and return zero.
 676 *
 677 * Otherwise, function returns one-past-the-last PFN of isolated page
 678 * (which may be greater then end_pfn if end fell in a middle of
 679 * a free page).
 680 */
 681unsigned long
 682isolate_freepages_range(struct compact_control *cc,
 683			unsigned long start_pfn, unsigned long end_pfn)
 684{
 685	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
 686	LIST_HEAD(freelist);
 
 
 
 
 687
 688	pfn = start_pfn;
 689	block_start_pfn = pageblock_start_pfn(pfn);
 690	if (block_start_pfn < cc->zone->zone_start_pfn)
 691		block_start_pfn = cc->zone->zone_start_pfn;
 692	block_end_pfn = pageblock_end_pfn(pfn);
 693
 694	for (; pfn < end_pfn; pfn += isolated,
 695				block_start_pfn = block_end_pfn,
 696				block_end_pfn += pageblock_nr_pages) {
 697		/* Protect pfn from changing by isolate_freepages_block */
 698		unsigned long isolate_start_pfn = pfn;
 699
 700		block_end_pfn = min(block_end_pfn, end_pfn);
 701
 702		/*
 703		 * pfn could pass the block_end_pfn if isolated freepage
 704		 * is more than pageblock order. In this case, we adjust
 705		 * scanning range to right one.
 706		 */
 707		if (pfn >= block_end_pfn) {
 708			block_start_pfn = pageblock_start_pfn(pfn);
 709			block_end_pfn = pageblock_end_pfn(pfn);
 710			block_end_pfn = min(block_end_pfn, end_pfn);
 711		}
 712
 
 
 713		if (!pageblock_pfn_to_page(block_start_pfn,
 714					block_end_pfn, cc->zone))
 715			break;
 716
 717		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
 718					block_end_pfn, &freelist, 0, true);
 719
 720		/*
 721		 * In strict mode, isolate_freepages_block() returns 0 if
 722		 * there are any holes in the block (ie. invalid PFNs or
 723		 * non-free pages).
 724		 */
 725		if (!isolated)
 726			break;
 727
 728		/*
 729		 * If we managed to isolate pages, it is always (1 << n) *
 730		 * pageblock_nr_pages for some non-negative n.  (Max order
 731		 * page may span two pageblocks).
 732		 */
 733	}
 734
 735	/* __isolate_free_page() does not map the pages */
 736	split_map_pages(&freelist);
 737
 738	if (pfn < end_pfn) {
 739		/* Loop terminated early, cleanup. */
 740		release_freepages(&freelist);
 741		return 0;
 742	}
 743
 
 
 
 744	/* We don't use freelists for anything. */
 745	return pfn;
 746}
 747
 748/* Similar to reclaim, but different enough that they don't share logic */
 749static bool too_many_isolated(pg_data_t *pgdat)
 750{
 
 
 
 751	unsigned long active, inactive, isolated;
 752
 753	inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
 754			node_page_state(pgdat, NR_INACTIVE_ANON);
 755	active = node_page_state(pgdat, NR_ACTIVE_FILE) +
 756			node_page_state(pgdat, NR_ACTIVE_ANON);
 757	isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
 758			node_page_state(pgdat, NR_ISOLATED_ANON);
 759
 760	return isolated > (inactive + active) / 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761}
 762
 763/**
 764 * isolate_migratepages_block() - isolate all migrate-able pages within
 765 *				  a single pageblock
 766 * @cc:		Compaction control structure.
 767 * @low_pfn:	The first PFN to isolate
 768 * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
 769 * @isolate_mode: Isolation mode to be used.
 770 *
 771 * Isolate all pages that can be migrated from the range specified by
 772 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 773 * Returns zero if there is a fatal signal pending, otherwise PFN of the
 774 * first page that was not scanned (which may be both less, equal to or more
 775 * than end_pfn).
 776 *
 777 * The pages are isolated on cc->migratepages list (not required to be empty),
 778 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
 779 * is neither read nor updated.
 780 */
 781static unsigned long
 782isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 783			unsigned long end_pfn, isolate_mode_t isolate_mode)
 784{
 785	pg_data_t *pgdat = cc->zone->zone_pgdat;
 786	unsigned long nr_scanned = 0, nr_isolated = 0;
 787	struct lruvec *lruvec;
 788	unsigned long flags = 0;
 789	bool locked = false;
 
 790	struct page *page = NULL, *valid_page = NULL;
 
 791	unsigned long start_pfn = low_pfn;
 792	bool skip_on_failure = false;
 793	unsigned long next_skip_pfn = 0;
 794	bool skip_updated = false;
 
 
 
 795
 796	/*
 797	 * Ensure that there are not too many pages isolated from the LRU
 798	 * list by either parallel reclaimers or compaction. If there are,
 799	 * delay for some time until fewer pages are isolated
 800	 */
 801	while (unlikely(too_many_isolated(pgdat))) {
 
 
 
 
 802		/* async migration should just abort */
 803		if (cc->mode == MIGRATE_ASYNC)
 804			return 0;
 805
 806		congestion_wait(BLK_RW_ASYNC, HZ/10);
 807
 808		if (fatal_signal_pending(current))
 809			return 0;
 810	}
 811
 812	cond_resched();
 813
 814	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
 815		skip_on_failure = true;
 816		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 817	}
 818
 819	/* Time to isolate some pages for migration */
 820	for (; low_pfn < end_pfn; low_pfn++) {
 
 821
 822		if (skip_on_failure && low_pfn >= next_skip_pfn) {
 823			/*
 824			 * We have isolated all migration candidates in the
 825			 * previous order-aligned block, and did not skip it due
 826			 * to failure. We should migrate the pages now and
 827			 * hopefully succeed compaction.
 828			 */
 829			if (nr_isolated)
 830				break;
 831
 832			/*
 833			 * We failed to isolate in the previous order-aligned
 834			 * block. Set the new boundary to the end of the
 835			 * current block. Note we can't simply increase
 836			 * next_skip_pfn by 1 << order, as low_pfn might have
 837			 * been incremented by a higher number due to skipping
 838			 * a compound or a high-order buddy page in the
 839			 * previous loop iteration.
 840			 */
 841			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 842		}
 843
 844		/*
 845		 * Periodically drop the lock (if held) regardless of its
 846		 * contention, to give chance to IRQs. Abort completely if
 847		 * a fatal signal is pending.
 848		 */
 849		if (!(low_pfn % SWAP_CLUSTER_MAX)
 850		    && compact_unlock_should_abort(&pgdat->lru_lock,
 851					    flags, &locked, cc)) {
 852			low_pfn = 0;
 853			goto fatal_pending;
 
 
 
 
 
 
 
 
 
 854		}
 855
 856		if (!pfn_valid_within(low_pfn))
 857			goto isolate_fail;
 858		nr_scanned++;
 859
 860		page = pfn_to_page(low_pfn);
 861
 862		/*
 863		 * Check if the pageblock has already been marked skipped.
 864		 * Only the aligned PFN is checked as the caller isolates
 865		 * COMPACT_CLUSTER_MAX at a time so the second call must
 866		 * not falsely conclude that the block should be skipped.
 867		 */
 868		if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
 869			if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
 
 870				low_pfn = end_pfn;
 
 871				goto isolate_abort;
 872			}
 873			valid_page = page;
 874		}
 875
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876		/*
 877		 * Skip if free. We read page order here without zone lock
 878		 * which is generally unsafe, but the race window is small and
 879		 * the worst thing that can happen is that we skip some
 880		 * potential isolation targets.
 881		 */
 882		if (PageBuddy(page)) {
 883			unsigned long freepage_order = page_order_unsafe(page);
 884
 885			/*
 886			 * Without lock, we cannot be sure that what we got is
 887			 * a valid page order. Consider only values in the
 888			 * valid order range to prevent low_pfn overflow.
 889			 */
 890			if (freepage_order > 0 && freepage_order < MAX_ORDER)
 891				low_pfn += (1UL << freepage_order) - 1;
 
 
 892			continue;
 893		}
 894
 895		/*
 896		 * Regardless of being on LRU, compound pages such as THP and
 897		 * hugetlbfs are not to be compacted. We can potentially save
 898		 * a lot of iterations if we skip them at once. The check is
 899		 * racy, but we can consider only valid values and the only
 900		 * danger is skipping too much.
 
 901		 */
 902		if (PageCompound(page)) {
 903			const unsigned int order = compound_order(page);
 904
 905			if (likely(order < MAX_ORDER))
 906				low_pfn += (1UL << order) - 1;
 907			goto isolate_fail;
 
 
 
 
 
 908		}
 909
 910		/*
 911		 * Check may be lockless but that's ok as we recheck later.
 912		 * It's possible to migrate LRU and non-lru movable pages.
 913		 * Skip any other type of page
 914		 */
 915		if (!PageLRU(page)) {
 916			/*
 917			 * __PageMovable can return false positive so we need
 918			 * to verify it under page_lock.
 919			 */
 920			if (unlikely(__PageMovable(page)) &&
 921					!PageIsolated(page)) {
 922				if (locked) {
 923					spin_unlock_irqrestore(&pgdat->lru_lock,
 924									flags);
 925					locked = false;
 926				}
 927
 928				if (!isolate_movable_page(page, isolate_mode))
 
 929					goto isolate_success;
 
 930			}
 931
 932			goto isolate_fail;
 933		}
 934
 935		/*
 
 
 
 
 
 
 
 
 
 936		 * Migration will fail if an anonymous page is pinned in memory,
 937		 * so avoid taking lru_lock and isolating it unnecessarily in an
 938		 * admittedly racy check.
 939		 */
 940		if (!page_mapping(page) &&
 941		    page_count(page) > page_mapcount(page))
 942			goto isolate_fail;
 943
 944		/*
 945		 * Only allow to migrate anonymous pages in GFP_NOFS context
 946		 * because those do not depend on fs locks.
 947		 */
 948		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
 949			goto isolate_fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950
 951		/* If we already hold the lock, we can skip some rechecking */
 952		if (!locked) {
 953			locked = compact_lock_irqsave(&pgdat->lru_lock,
 954								&flags, cc);
 955
 956			/* Try get exclusive access under lock */
 957			if (!skip_updated) {
 
 
 
 
 
 
 
 
 
 958				skip_updated = true;
 959				if (test_and_set_skip(cc, page, low_pfn))
 
 
 960					goto isolate_abort;
 
 961			}
 962
 963			/* Recheck PageLRU and PageCompound under lock */
 964			if (!PageLRU(page))
 965				goto isolate_fail;
 966
 967			/*
 968			 * Page become compound since the non-locked check,
 969			 * and it's on LRU. It can only be a THP so the order
 970			 * is safe to read and it's 0 for tail pages.
 971			 */
 972			if (unlikely(PageCompound(page))) {
 973				low_pfn += compound_nr(page) - 1;
 974				goto isolate_fail;
 
 
 
 
 975			}
 976		}
 977
 978		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 979
 980		/* Try isolate the page */
 981		if (__isolate_lru_page(page, isolate_mode) != 0)
 982			goto isolate_fail;
 983
 984		VM_BUG_ON_PAGE(PageCompound(page), page);
 985
 986		/* Successfully isolated */
 987		del_page_from_lru_list(page, lruvec, page_lru(page));
 988		inc_node_page_state(page,
 989				NR_ISOLATED_ANON + page_is_file_cache(page));
 
 990
 991isolate_success:
 992		list_add(&page->lru, &cc->migratepages);
 993		cc->nr_migratepages++;
 994		nr_isolated++;
 
 
 995
 996		/*
 997		 * Avoid isolating too much unless this block is being
 998		 * rescanned (e.g. dirty/writeback pages, parallel allocation)
 999		 * or a lock is contended. For contention, isolate quickly to
1000		 * potentially remove one source of contention.
1001		 */
1002		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
1003		    !cc->rescan && !cc->contended) {
1004			++low_pfn;
1005			break;
1006		}
1007
1008		continue;
 
 
 
 
 
 
 
 
 
1009isolate_fail:
1010		if (!skip_on_failure)
1011			continue;
1012
1013		/*
1014		 * We have isolated some pages, but then failed. Release them
1015		 * instead of migrating, as we cannot form the cc->order buddy
1016		 * page anyway.
1017		 */
1018		if (nr_isolated) {
1019			if (locked) {
1020				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
1021				locked = false;
1022			}
1023			putback_movable_pages(&cc->migratepages);
1024			cc->nr_migratepages = 0;
1025			nr_isolated = 0;
1026		}
1027
1028		if (low_pfn < next_skip_pfn) {
1029			low_pfn = next_skip_pfn - 1;
1030			/*
1031			 * The check near the loop beginning would have updated
1032			 * next_skip_pfn too, but this is a bit simpler.
1033			 */
1034			next_skip_pfn += 1UL << cc->order;
1035		}
 
 
 
1036	}
1037
1038	/*
1039	 * The PageBuddy() check could have potentially brought us outside
1040	 * the range to be scanned.
1041	 */
1042	if (unlikely(low_pfn > end_pfn))
1043		low_pfn = end_pfn;
1044
 
 
1045isolate_abort:
1046	if (locked)
1047		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
 
 
 
 
1048
1049	/*
1050	 * Updated the cached scanner pfn once the pageblock has been scanned
1051	 * Pages will either be migrated in which case there is no point
1052	 * scanning in the near future or migration failed in which case the
1053	 * failure reason may persist. The block is marked for skipping if
1054	 * there were no pages isolated in the block or if the block is
1055	 * rescanned twice in a row.
1056	 */
1057	if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
1058		if (valid_page && !skip_updated)
1059			set_pageblock_skip(valid_page);
1060		update_cached_migrate(cc, low_pfn);
1061	}
1062
1063	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1064						nr_scanned, nr_isolated);
1065
1066fatal_pending:
1067	cc->total_migrate_scanned += nr_scanned;
1068	if (nr_isolated)
1069		count_compact_events(COMPACTISOLATED, nr_isolated);
1070
1071	return low_pfn;
 
 
1072}
1073
1074/**
1075 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1076 * @cc:        Compaction control structure.
1077 * @start_pfn: The first PFN to start isolating.
1078 * @end_pfn:   The one-past-last PFN.
1079 *
1080 * Returns zero if isolation fails fatally due to e.g. pending signal.
1081 * Otherwise, function returns one-past-the-last PFN of isolated page
1082 * (which may be greater than end_pfn if end fell in a middle of a THP page).
1083 */
1084unsigned long
1085isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1086							unsigned long end_pfn)
1087{
1088	unsigned long pfn, block_start_pfn, block_end_pfn;
 
1089
1090	/* Scan block by block. First and last block may be incomplete */
1091	pfn = start_pfn;
1092	block_start_pfn = pageblock_start_pfn(pfn);
1093	if (block_start_pfn < cc->zone->zone_start_pfn)
1094		block_start_pfn = cc->zone->zone_start_pfn;
1095	block_end_pfn = pageblock_end_pfn(pfn);
1096
1097	for (; pfn < end_pfn; pfn = block_end_pfn,
1098				block_start_pfn = block_end_pfn,
1099				block_end_pfn += pageblock_nr_pages) {
1100
1101		block_end_pfn = min(block_end_pfn, end_pfn);
1102
1103		if (!pageblock_pfn_to_page(block_start_pfn,
1104					block_end_pfn, cc->zone))
1105			continue;
1106
1107		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
1108							ISOLATE_UNEVICTABLE);
1109
1110		if (!pfn)
1111			break;
1112
1113		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
1114			break;
1115	}
1116
1117	return pfn;
1118}
1119
1120#endif /* CONFIG_COMPACTION || CONFIG_CMA */
1121#ifdef CONFIG_COMPACTION
1122
1123static bool suitable_migration_source(struct compact_control *cc,
1124							struct page *page)
1125{
1126	int block_mt;
1127
1128	if (pageblock_skip_persistent(page))
1129		return false;
1130
1131	if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1132		return true;
1133
1134	block_mt = get_pageblock_migratetype(page);
1135
1136	if (cc->migratetype == MIGRATE_MOVABLE)
1137		return is_migrate_movable(block_mt);
1138	else
1139		return block_mt == cc->migratetype;
1140}
1141
1142/* Returns true if the page is within a block suitable for migration to */
1143static bool suitable_migration_target(struct compact_control *cc,
1144							struct page *page)
1145{
1146	/* If the page is a large free page, then disallow migration */
1147	if (PageBuddy(page)) {
 
 
1148		/*
1149		 * We are checking page_order without zone->lock taken. But
1150		 * the only small danger is that we skip a potentially suitable
1151		 * pageblock, so it's not worth to check order for valid range.
1152		 */
1153		if (page_order_unsafe(page) >= pageblock_order)
1154			return false;
1155	}
1156
1157	if (cc->ignore_block_suitable)
1158		return true;
1159
1160	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1161	if (is_migrate_movable(get_pageblock_migratetype(page)))
1162		return true;
1163
1164	/* Otherwise skip the block */
1165	return false;
1166}
1167
1168static inline unsigned int
1169freelist_scan_limit(struct compact_control *cc)
1170{
1171	unsigned short shift = BITS_PER_LONG - 1;
1172
1173	return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1174}
1175
1176/*
1177 * Test whether the free scanner has reached the same or lower pageblock than
1178 * the migration scanner, and compaction should thus terminate.
1179 */
1180static inline bool compact_scanners_met(struct compact_control *cc)
1181{
1182	return (cc->free_pfn >> pageblock_order)
1183		<= (cc->migrate_pfn >> pageblock_order);
1184}
1185
1186/*
1187 * Used when scanning for a suitable migration target which scans freelists
1188 * in reverse. Reorders the list such as the unscanned pages are scanned
1189 * first on the next iteration of the free scanner
1190 */
1191static void
1192move_freelist_head(struct list_head *freelist, struct page *freepage)
1193{
1194	LIST_HEAD(sublist);
1195
1196	if (!list_is_last(freelist, &freepage->lru)) {
1197		list_cut_before(&sublist, freelist, &freepage->lru);
1198		if (!list_empty(&sublist))
1199			list_splice_tail(&sublist, freelist);
1200	}
1201}
1202
1203/*
1204 * Similar to move_freelist_head except used by the migration scanner
1205 * when scanning forward. It's possible for these list operations to
1206 * move against each other if they search the free list exactly in
1207 * lockstep.
1208 */
1209static void
1210move_freelist_tail(struct list_head *freelist, struct page *freepage)
1211{
1212	LIST_HEAD(sublist);
1213
1214	if (!list_is_first(freelist, &freepage->lru)) {
1215		list_cut_position(&sublist, freelist, &freepage->lru);
1216		if (!list_empty(&sublist))
1217			list_splice_tail(&sublist, freelist);
1218	}
1219}
1220
1221static void
1222fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
1223{
1224	unsigned long start_pfn, end_pfn;
1225	struct page *page = pfn_to_page(pfn);
1226
1227	/* Do not search around if there are enough pages already */
1228	if (cc->nr_freepages >= cc->nr_migratepages)
1229		return;
1230
1231	/* Minimise scanning during async compaction */
1232	if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1233		return;
1234
1235	/* Pageblock boundaries */
1236	start_pfn = pageblock_start_pfn(pfn);
1237	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
1238
1239	/* Scan before */
1240	if (start_pfn != pfn) {
1241		isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
1242		if (cc->nr_freepages >= cc->nr_migratepages)
1243			return;
1244	}
1245
1246	/* Scan after */
1247	start_pfn = pfn + nr_isolated;
1248	if (start_pfn < end_pfn)
1249		isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
1250
1251	/* Skip this pageblock in the future as it's full or nearly full */
1252	if (cc->nr_freepages < cc->nr_migratepages)
1253		set_pageblock_skip(page);
1254}
1255
1256/* Search orders in round-robin fashion */
1257static int next_search_order(struct compact_control *cc, int order)
1258{
1259	order--;
1260	if (order < 0)
1261		order = cc->order - 1;
1262
1263	/* Search wrapped around? */
1264	if (order == cc->search_order) {
1265		cc->search_order--;
1266		if (cc->search_order < 0)
1267			cc->search_order = cc->order - 1;
1268		return -1;
1269	}
1270
1271	return order;
1272}
1273
1274static unsigned long
1275fast_isolate_freepages(struct compact_control *cc)
1276{
1277	unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
1278	unsigned int nr_scanned = 0;
1279	unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
1280	unsigned long nr_isolated = 0;
1281	unsigned long distance;
1282	struct page *page = NULL;
1283	bool scan_start = false;
1284	int order;
1285
1286	/* Full compaction passes in a negative order */
1287	if (cc->order <= 0)
1288		return cc->free_pfn;
1289
1290	/*
1291	 * If starting the scan, use a deeper search and use the highest
1292	 * PFN found if a suitable one is not found.
1293	 */
1294	if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1295		limit = pageblock_nr_pages >> 1;
1296		scan_start = true;
1297	}
1298
1299	/*
1300	 * Preferred point is in the top quarter of the scan space but take
1301	 * a pfn from the top half if the search is problematic.
1302	 */
1303	distance = (cc->free_pfn - cc->migrate_pfn);
1304	low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1305	min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1306
1307	if (WARN_ON_ONCE(min_pfn > low_pfn))
1308		low_pfn = min_pfn;
1309
1310	/*
1311	 * Search starts from the last successful isolation order or the next
1312	 * order to search after a previous failure
1313	 */
1314	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1315
1316	for (order = cc->search_order;
1317	     !page && order >= 0;
1318	     order = next_search_order(cc, order)) {
1319		struct free_area *area = &cc->zone->free_area[order];
1320		struct list_head *freelist;
1321		struct page *freepage;
1322		unsigned long flags;
1323		unsigned int order_scanned = 0;
 
1324
1325		if (!area->nr_free)
1326			continue;
1327
1328		spin_lock_irqsave(&cc->zone->lock, flags);
1329		freelist = &area->free_list[MIGRATE_MOVABLE];
1330		list_for_each_entry_reverse(freepage, freelist, lru) {
1331			unsigned long pfn;
1332
1333			order_scanned++;
1334			nr_scanned++;
1335			pfn = page_to_pfn(freepage);
1336
1337			if (pfn >= highest)
1338				highest = pageblock_start_pfn(pfn);
 
1339
1340			if (pfn >= low_pfn) {
1341				cc->fast_search_fail = 0;
1342				cc->search_order = order;
1343				page = freepage;
1344				break;
1345			}
1346
1347			if (pfn >= min_pfn && pfn > high_pfn) {
1348				high_pfn = pfn;
1349
1350				/* Shorten the scan if a candidate is found */
1351				limit >>= 1;
1352			}
1353
1354			if (order_scanned >= limit)
1355				break;
1356		}
1357
1358		/* Use a minimum pfn if a preferred one was not found */
1359		if (!page && high_pfn) {
1360			page = pfn_to_page(high_pfn);
1361
1362			/* Update freepage for the list reorder below */
1363			freepage = page;
1364		}
1365
1366		/* Reorder to so a future search skips recent pages */
1367		move_freelist_head(freelist, freepage);
1368
1369		/* Isolate the page if available */
1370		if (page) {
1371			if (__isolate_free_page(page, order)) {
1372				set_page_private(page, order);
1373				nr_isolated = 1 << order;
 
 
1374				cc->nr_freepages += nr_isolated;
1375				list_add_tail(&page->lru, &cc->freepages);
1376				count_compact_events(COMPACTISOLATED, nr_isolated);
1377			} else {
1378				/* If isolation fails, abort the search */
1379				order = cc->search_order + 1;
1380				page = NULL;
1381			}
1382		}
1383
1384		spin_unlock_irqrestore(&cc->zone->lock, flags);
1385
 
 
 
 
1386		/*
1387		 * Smaller scan on next order so the total scan ig related
1388		 * to freelist_scan_limit.
1389		 */
1390		if (order_scanned >= limit)
1391			limit = min(1U, limit >> 1);
1392	}
1393
 
 
 
1394	if (!page) {
1395		cc->fast_search_fail++;
1396		if (scan_start) {
1397			/*
1398			 * Use the highest PFN found above min. If one was
1399			 * not found, be pessemistic for direct compaction
1400			 * and use the min mark.
1401			 */
1402			if (highest) {
1403				page = pfn_to_page(highest);
1404				cc->free_pfn = highest;
1405			} else {
1406				if (cc->direct_compaction && pfn_valid(min_pfn)) {
1407					page = pfn_to_page(min_pfn);
 
 
 
 
 
 
1408					cc->free_pfn = min_pfn;
1409				}
1410			}
1411		}
1412	}
1413
1414	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1415		highest -= pageblock_nr_pages;
1416		cc->zone->compact_cached_free_pfn = highest;
1417	}
1418
1419	cc->total_free_scanned += nr_scanned;
1420	if (!page)
1421		return cc->free_pfn;
1422
1423	low_pfn = page_to_pfn(page);
1424	fast_isolate_around(cc, low_pfn, nr_isolated);
1425	return low_pfn;
1426}
1427
1428/*
1429 * Based on information in the current compact_control, find blocks
1430 * suitable for isolating free pages from and then isolate them.
1431 */
1432static void isolate_freepages(struct compact_control *cc)
1433{
1434	struct zone *zone = cc->zone;
1435	struct page *page;
1436	unsigned long block_start_pfn;	/* start of current pageblock */
1437	unsigned long isolate_start_pfn; /* exact pfn we start at */
1438	unsigned long block_end_pfn;	/* end of current pageblock */
1439	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
1440	struct list_head *freelist = &cc->freepages;
1441	unsigned int stride;
1442
1443	/* Try a small search of the free lists for a candidate */
1444	isolate_start_pfn = fast_isolate_freepages(cc);
1445	if (cc->nr_freepages)
1446		goto splitmap;
1447
1448	/*
1449	 * Initialise the free scanner. The starting point is where we last
1450	 * successfully isolated from, zone-cached value, or the end of the
1451	 * zone when isolating for the first time. For looping we also need
1452	 * this pfn aligned down to the pageblock boundary, because we do
1453	 * block_start_pfn -= pageblock_nr_pages in the for loop.
1454	 * For ending point, take care when isolating in last pageblock of a
1455	 * a zone which ends in the middle of a pageblock.
1456	 * The low boundary is the end of the pageblock the migration scanner
1457	 * is using.
1458	 */
1459	isolate_start_pfn = cc->free_pfn;
1460	block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
1461	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1462						zone_end_pfn(zone));
1463	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1464	stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1465
1466	/*
1467	 * Isolate free pages until enough are available to migrate the
1468	 * pages on cc->migratepages. We stop searching if the migrate
1469	 * and free page scanners meet or enough free pages are isolated.
1470	 */
1471	for (; block_start_pfn >= low_pfn;
1472				block_end_pfn = block_start_pfn,
1473				block_start_pfn -= pageblock_nr_pages,
1474				isolate_start_pfn = block_start_pfn) {
1475		unsigned long nr_isolated;
1476
1477		/*
1478		 * This can iterate a massively long zone without finding any
1479		 * suitable migration targets, so periodically check resched.
1480		 */
1481		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1482			cond_resched();
1483
1484		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1485									zone);
1486		if (!page)
 
 
 
 
 
 
1487			continue;
 
1488
1489		/* Check the block is suitable for migration */
1490		if (!suitable_migration_target(cc, page))
1491			continue;
1492
1493		/* If isolation recently failed, do not retry */
1494		if (!isolation_suitable(cc, page))
1495			continue;
1496
1497		/* Found a block suitable for isolating free pages from. */
1498		nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1499					block_end_pfn, freelist, stride, false);
1500
1501		/* Update the skip hint if the full pageblock was scanned */
1502		if (isolate_start_pfn == block_end_pfn)
1503			update_pageblock_skip(cc, page, block_start_pfn);
 
1504
1505		/* Are enough freepages isolated? */
1506		if (cc->nr_freepages >= cc->nr_migratepages) {
1507			if (isolate_start_pfn >= block_end_pfn) {
1508				/*
1509				 * Restart at previous pageblock if more
1510				 * freepages can be isolated next time.
1511				 */
1512				isolate_start_pfn =
1513					block_start_pfn - pageblock_nr_pages;
1514			}
1515			break;
1516		} else if (isolate_start_pfn < block_end_pfn) {
1517			/*
1518			 * If isolation failed early, do not continue
1519			 * needlessly.
1520			 */
1521			break;
1522		}
1523
1524		/* Adjust stride depending on isolation */
1525		if (nr_isolated) {
1526			stride = 1;
1527			continue;
1528		}
1529		stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
1530	}
1531
1532	/*
1533	 * Record where the free scanner will restart next time. Either we
1534	 * broke from the loop and set isolate_start_pfn based on the last
1535	 * call to isolate_freepages_block(), or we met the migration scanner
1536	 * and the loop terminated due to isolate_start_pfn < low_pfn
1537	 */
1538	cc->free_pfn = isolate_start_pfn;
1539
1540splitmap:
1541	/* __isolate_free_page() does not map the pages */
1542	split_map_pages(freelist);
1543}
1544
1545/*
1546 * This is a migrate-callback that "allocates" freepages by taking pages
1547 * from the isolated freelists in the block we are migrating to.
1548 */
1549static struct page *compaction_alloc(struct page *migratepage,
1550					unsigned long data)
1551{
1552	struct compact_control *cc = (struct compact_control *)data;
 
 
 
 
1553	struct page *freepage;
 
1554
1555	if (list_empty(&cc->freepages)) {
1556		isolate_freepages(cc);
 
 
1557
1558		if (list_empty(&cc->freepages))
 
 
1559			return NULL;
 
 
 
1560	}
1561
1562	freepage = list_entry(cc->freepages.next, struct page, lru);
 
 
 
1563	list_del(&freepage->lru);
1564	cc->nr_freepages--;
1565
1566	return freepage;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1567}
1568
1569/*
1570 * This is a migrate-callback that "frees" freepages back to the isolated
1571 * freelist.  All pages on the freelist are from the same zone, so there is no
1572 * special handling needed for NUMA.
1573 */
1574static void compaction_free(struct page *page, unsigned long data)
1575{
1576	struct compact_control *cc = (struct compact_control *)data;
 
 
1577
1578	list_add(&page->lru, &cc->freepages);
1579	cc->nr_freepages++;
 
 
 
 
 
 
 
 
1580}
1581
1582/* possible outcome of isolate_migratepages */
1583typedef enum {
1584	ISOLATE_ABORT,		/* Abort compaction now */
1585	ISOLATE_NONE,		/* No pages isolated, continue scanning */
1586	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
1587} isolate_migrate_t;
1588
1589/*
1590 * Allow userspace to control policy on scanning the unevictable LRU for
1591 * compactable pages.
1592 */
1593int sysctl_compact_unevictable_allowed __read_mostly = 1;
 
 
 
 
 
 
 
 
1594
1595static inline void
1596update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1597{
1598	if (cc->fast_start_pfn == ULONG_MAX)
1599		return;
1600
1601	if (!cc->fast_start_pfn)
1602		cc->fast_start_pfn = pfn;
1603
1604	cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1605}
1606
1607static inline unsigned long
1608reinit_migrate_pfn(struct compact_control *cc)
1609{
1610	if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1611		return cc->migrate_pfn;
1612
1613	cc->migrate_pfn = cc->fast_start_pfn;
1614	cc->fast_start_pfn = ULONG_MAX;
1615
1616	return cc->migrate_pfn;
1617}
1618
1619/*
1620 * Briefly search the free lists for a migration source that already has
1621 * some free pages to reduce the number of pages that need migration
1622 * before a pageblock is free.
1623 */
1624static unsigned long fast_find_migrateblock(struct compact_control *cc)
1625{
1626	unsigned int limit = freelist_scan_limit(cc);
1627	unsigned int nr_scanned = 0;
1628	unsigned long distance;
1629	unsigned long pfn = cc->migrate_pfn;
1630	unsigned long high_pfn;
1631	int order;
 
1632
1633	/* Skip hints are relied on to avoid repeats on the fast search */
1634	if (cc->ignore_skip_hint)
1635		return pfn;
1636
1637	/*
 
 
 
 
 
 
 
1638	 * If the migrate_pfn is not at the start of a zone or the start
1639	 * of a pageblock then assume this is a continuation of a previous
1640	 * scan restarted due to COMPACT_CLUSTER_MAX.
1641	 */
1642	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1643		return pfn;
1644
1645	/*
1646	 * For smaller orders, just linearly scan as the number of pages
1647	 * to migrate should be relatively small and does not necessarily
1648	 * justify freeing up a large block for a small allocation.
1649	 */
1650	if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
1651		return pfn;
1652
1653	/*
1654	 * Only allow kcompactd and direct requests for movable pages to
1655	 * quickly clear out a MOVABLE pageblock for allocation. This
1656	 * reduces the risk that a large movable pageblock is freed for
1657	 * an unmovable/reclaimable small allocation.
1658	 */
1659	if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
1660		return pfn;
1661
1662	/*
1663	 * When starting the migration scanner, pick any pageblock within the
1664	 * first half of the search space. Otherwise try and pick a pageblock
1665	 * within the first eighth to reduce the chances that a migration
1666	 * target later becomes a source.
1667	 */
1668	distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
1669	if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1670		distance >>= 2;
1671	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
1672
1673	for (order = cc->order - 1;
1674	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
1675	     order--) {
1676		struct free_area *area = &cc->zone->free_area[order];
1677		struct list_head *freelist;
1678		unsigned long flags;
1679		struct page *freepage;
1680
1681		if (!area->nr_free)
1682			continue;
1683
1684		spin_lock_irqsave(&cc->zone->lock, flags);
1685		freelist = &area->free_list[MIGRATE_MOVABLE];
1686		list_for_each_entry(freepage, freelist, lru) {
1687			unsigned long free_pfn;
1688
1689			nr_scanned++;
 
 
 
 
1690			free_pfn = page_to_pfn(freepage);
1691			if (free_pfn < high_pfn) {
1692				/*
1693				 * Avoid if skipped recently. Ideally it would
1694				 * move to the tail but even safe iteration of
1695				 * the list assumes an entry is deleted, not
1696				 * reordered.
1697				 */
1698				if (get_pageblock_skip(freepage)) {
1699					if (list_is_last(freelist, &freepage->lru))
1700						break;
1701
1702					continue;
1703				}
1704
1705				/* Reorder to so a future search skips recent pages */
1706				move_freelist_tail(freelist, freepage);
1707
1708				update_fast_start_pfn(cc, free_pfn);
1709				pfn = pageblock_start_pfn(free_pfn);
 
 
1710				cc->fast_search_fail = 0;
1711				set_pageblock_skip(freepage);
1712				break;
1713			}
1714
1715			if (nr_scanned >= limit) {
1716				cc->fast_search_fail++;
1717				move_freelist_tail(freelist, freepage);
1718				break;
1719			}
1720		}
1721		spin_unlock_irqrestore(&cc->zone->lock, flags);
1722	}
1723
1724	cc->total_migrate_scanned += nr_scanned;
1725
1726	/*
1727	 * If fast scanning failed then use a cached entry for a page block
1728	 * that had free pages as the basis for starting a linear scan.
1729	 */
1730	if (pfn == cc->migrate_pfn)
 
1731		pfn = reinit_migrate_pfn(cc);
1732
1733	return pfn;
1734}
1735
1736/*
1737 * Isolate all pages that can be migrated from the first suitable block,
1738 * starting at the block pointed to by the migrate scanner pfn within
1739 * compact_control.
1740 */
1741static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
1742{
1743	unsigned long block_start_pfn;
1744	unsigned long block_end_pfn;
1745	unsigned long low_pfn;
1746	struct page *page;
1747	const isolate_mode_t isolate_mode =
1748		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1749		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1750	bool fast_find_block;
1751
1752	/*
1753	 * Start at where we last stopped, or beginning of the zone as
1754	 * initialized by compact_zone(). The first failure will use
1755	 * the lowest PFN as the starting point for linear scanning.
1756	 */
1757	low_pfn = fast_find_migrateblock(cc);
1758	block_start_pfn = pageblock_start_pfn(low_pfn);
1759	if (block_start_pfn < cc->zone->zone_start_pfn)
1760		block_start_pfn = cc->zone->zone_start_pfn;
1761
1762	/*
1763	 * fast_find_migrateblock marks a pageblock skipped so to avoid
1764	 * the isolation_suitable check below, check whether the fast
1765	 * search was successful.
1766	 */
1767	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
1768
1769	/* Only scan within a pageblock boundary */
1770	block_end_pfn = pageblock_end_pfn(low_pfn);
1771
1772	/*
1773	 * Iterate over whole pageblocks until we find the first suitable.
1774	 * Do not cross the free scanner.
1775	 */
1776	for (; block_end_pfn <= cc->free_pfn;
1777			fast_find_block = false,
1778			low_pfn = block_end_pfn,
1779			block_start_pfn = block_end_pfn,
1780			block_end_pfn += pageblock_nr_pages) {
1781
1782		/*
1783		 * This can potentially iterate a massively long zone with
1784		 * many pageblocks unsuitable, so periodically check if we
1785		 * need to schedule.
1786		 */
1787		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1788			cond_resched();
1789
1790		page = pageblock_pfn_to_page(block_start_pfn,
1791						block_end_pfn, cc->zone);
1792		if (!page)
 
 
 
 
 
1793			continue;
 
1794
1795		/*
1796		 * If isolation recently failed, do not retry. Only check the
1797		 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
1798		 * to be visited multiple times. Assume skip was checked
1799		 * before making it "skip" so other compaction instances do
1800		 * not scan the same block.
1801		 */
1802		if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
 
1803		    !fast_find_block && !isolation_suitable(cc, page))
1804			continue;
1805
1806		/*
1807		 * For async compaction, also only scan in MOVABLE blocks
1808		 * without huge pages. Async compaction is optimistic to see
1809		 * if the minimum amount of work satisfies the allocation.
1810		 * The cached PFN is updated as it's possible that all
1811		 * remaining blocks between source and target are unsuitable
1812		 * and the compaction scanners fail to meet.
1813		 */
1814		if (!suitable_migration_source(cc, page)) {
1815			update_cached_migrate(cc, block_end_pfn);
1816			continue;
1817		}
1818
1819		/* Perform the isolation */
1820		low_pfn = isolate_migratepages_block(cc, low_pfn,
1821						block_end_pfn, isolate_mode);
1822
1823		if (!low_pfn)
1824			return ISOLATE_ABORT;
1825
1826		/*
1827		 * Either we isolated something and proceed with migration. Or
1828		 * we failed and compact_zone should decide if we should
1829		 * continue or not.
1830		 */
1831		break;
1832	}
1833
1834	/* Record where migration scanner will be restarted. */
1835	cc->migrate_pfn = low_pfn;
1836
1837	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1838}
1839
1840/*
1841 * order == -1 is expected when compacting via
1842 * /proc/sys/vm/compact_memory
 
 
1843 */
1844static inline bool is_via_compact_memory(int order)
1845{
1846	return order == -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847}
1848
1849static enum compact_result __compact_finished(struct compact_control *cc)
1850{
1851	unsigned int order;
1852	const int migratetype = cc->migratetype;
1853	int ret;
1854
1855	/* Compaction run completes if the migrate and free scanner meet */
1856	if (compact_scanners_met(cc)) {
1857		/* Let the next compaction start anew. */
1858		reset_cached_positions(cc->zone);
1859
1860		/*
1861		 * Mark that the PG_migrate_skip information should be cleared
1862		 * by kswapd when it goes to sleep. kcompactd does not set the
1863		 * flag itself as the decision to be clear should be directly
1864		 * based on an allocation request.
1865		 */
1866		if (cc->direct_compaction)
1867			cc->zone->compact_blockskip_flush = true;
1868
1869		if (cc->whole_zone)
1870			return COMPACT_COMPLETE;
1871		else
1872			return COMPACT_PARTIAL_SKIPPED;
1873	}
1874
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1875	if (is_via_compact_memory(cc->order))
1876		return COMPACT_CONTINUE;
1877
1878	/*
1879	 * Always finish scanning a pageblock to reduce the possibility of
1880	 * fallbacks in the future. This is particularly important when
1881	 * migration source is unmovable/reclaimable but it's not worth
1882	 * special casing.
1883	 */
1884	if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
1885		return COMPACT_CONTINUE;
1886
1887	/* Direct compactor: Is a suitable page free? */
1888	ret = COMPACT_NO_SUITABLE_PAGE;
1889	for (order = cc->order; order < MAX_ORDER; order++) {
1890		struct free_area *area = &cc->zone->free_area[order];
1891		bool can_steal;
1892
1893		/* Job done if page is free of the right migratetype */
1894		if (!free_area_empty(area, migratetype))
1895			return COMPACT_SUCCESS;
1896
1897#ifdef CONFIG_CMA
1898		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1899		if (migratetype == MIGRATE_MOVABLE &&
1900			!free_area_empty(area, MIGRATE_CMA))
1901			return COMPACT_SUCCESS;
1902#endif
1903		/*
1904		 * Job done if allocation would steal freepages from
1905		 * other migratetype buddy lists.
1906		 */
1907		if (find_suitable_fallback(area, order, migratetype,
1908						true, &can_steal) != -1) {
1909
1910			/* movable pages are OK in any pageblock */
1911			if (migratetype == MIGRATE_MOVABLE)
1912				return COMPACT_SUCCESS;
1913
1914			/*
1915			 * We are stealing for a non-movable allocation. Make
1916			 * sure we finish compacting the current pageblock
1917			 * first so it is as free as possible and we won't
1918			 * have to steal another one soon. This only applies
1919			 * to sync compaction, as async compaction operates
1920			 * on pageblocks of the same migratetype.
1921			 */
1922			if (cc->mode == MIGRATE_ASYNC ||
1923					IS_ALIGNED(cc->migrate_pfn,
1924							pageblock_nr_pages)) {
1925				return COMPACT_SUCCESS;
1926			}
1927
1928			ret = COMPACT_CONTINUE;
1929			break;
1930		}
1931	}
1932
 
1933	if (cc->contended || fatal_signal_pending(current))
1934		ret = COMPACT_CONTENDED;
1935
1936	return ret;
1937}
1938
1939static enum compact_result compact_finished(struct compact_control *cc)
1940{
1941	int ret;
1942
1943	ret = __compact_finished(cc);
1944	trace_mm_compaction_finished(cc->zone, cc->order, ret);
1945	if (ret == COMPACT_NO_SUITABLE_PAGE)
1946		ret = COMPACT_CONTINUE;
1947
1948	return ret;
1949}
1950
1951/*
1952 * compaction_suitable: Is this suitable to run compaction on this zone now?
1953 * Returns
1954 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1955 *   COMPACT_SUCCESS  - If the allocation would succeed without compaction
1956 *   COMPACT_CONTINUE - If compaction should run now
1957 */
1958static enum compact_result __compaction_suitable(struct zone *zone, int order,
1959					unsigned int alloc_flags,
1960					int classzone_idx,
1961					unsigned long wmark_target)
1962{
1963	unsigned long watermark;
1964
1965	if (is_via_compact_memory(order))
1966		return COMPACT_CONTINUE;
1967
1968	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
1969	/*
1970	 * If watermarks for high-order allocation are already met, there
1971	 * should be no need for compaction at all.
1972	 */
1973	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1974								alloc_flags))
1975		return COMPACT_SUCCESS;
1976
1977	/*
1978	 * Watermarks for order-0 must be met for compaction to be able to
1979	 * isolate free pages for migration targets. This means that the
1980	 * watermark and alloc_flags have to match, or be more pessimistic than
1981	 * the check in __isolate_free_page(). We don't use the direct
1982	 * compactor's alloc_flags, as they are not relevant for freepage
1983	 * isolation. We however do use the direct compactor's classzone_idx to
1984	 * skip over zones where lowmem reserves would prevent allocation even
1985	 * if compaction succeeds.
1986	 * For costly orders, we require low watermark instead of min for
1987	 * compaction to proceed to increase its chances.
1988	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1989	 * suitable migration targets
1990	 */
1991	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1992				low_wmark_pages(zone) : min_wmark_pages(zone);
1993	watermark += compact_gap(order);
1994	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1995						ALLOC_CMA, wmark_target))
1996		return COMPACT_SKIPPED;
1997
1998	return COMPACT_CONTINUE;
1999}
2000
2001enum compact_result compaction_suitable(struct zone *zone, int order,
2002					unsigned int alloc_flags,
2003					int classzone_idx)
 
2004{
2005	enum compact_result ret;
2006	int fragindex;
2007
2008	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
2009				    zone_page_state(zone, NR_FREE_PAGES));
2010	/*
2011	 * fragmentation index determines if allocation failures are due to
2012	 * low memory or external fragmentation
2013	 *
2014	 * index of -1000 would imply allocations might succeed depending on
2015	 * watermarks, but we already failed the high-order watermark check
2016	 * index towards 0 implies failure is due to lack of memory
2017	 * index towards 1000 implies failure is due to fragmentation
2018	 *
2019	 * Only compact if a failure would be due to fragmentation. Also
2020	 * ignore fragindex for non-costly orders where the alternative to
2021	 * a successful reclaim/compaction is OOM. Fragindex and the
2022	 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
2023	 * excessive compaction for costly orders, but it should not be at the
2024	 * expense of system stability.
2025	 */
2026	if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
2027		fragindex = fragmentation_index(zone, order);
2028		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
2029			ret = COMPACT_NOT_SUITABLE_ZONE;
 
 
 
 
 
 
 
 
 
2030	}
2031
2032	trace_mm_compaction_suitable(zone, order, ret);
2033	if (ret == COMPACT_NOT_SUITABLE_ZONE)
2034		ret = COMPACT_SKIPPED;
2035
2036	return ret;
2037}
2038
2039bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2040		int alloc_flags)
2041{
2042	struct zone *zone;
2043	struct zoneref *z;
2044
2045	/*
2046	 * Make sure at least one zone would pass __compaction_suitable if we continue
2047	 * retrying the reclaim.
2048	 */
2049	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
2050					ac->nodemask) {
2051		unsigned long available;
2052		enum compact_result compact_result;
2053
2054		/*
2055		 * Do not consider all the reclaimable memory because we do not
2056		 * want to trash just for a single high order allocation which
2057		 * is even not guaranteed to appear even if __compaction_suitable
2058		 * is happy about the watermark check.
2059		 */
2060		available = zone_reclaimable_pages(zone) / order;
2061		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2062		compact_result = __compaction_suitable(zone, order, alloc_flags,
2063				ac_classzone_idx(ac), available);
2064		if (compact_result != COMPACT_SKIPPED)
2065			return true;
2066	}
2067
2068	return false;
2069}
2070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071static enum compact_result
2072compact_zone(struct compact_control *cc, struct capture_control *capc)
2073{
2074	enum compact_result ret;
2075	unsigned long start_pfn = cc->zone->zone_start_pfn;
2076	unsigned long end_pfn = zone_end_pfn(cc->zone);
2077	unsigned long last_migrated_pfn;
2078	const bool sync = cc->mode != MIGRATE_ASYNC;
2079	bool update_cached;
 
 
2080
2081	/*
2082	 * These counters track activities during zone compaction.  Initialize
2083	 * them before compacting a new zone.
2084	 */
2085	cc->total_migrate_scanned = 0;
2086	cc->total_free_scanned = 0;
2087	cc->nr_migratepages = 0;
2088	cc->nr_freepages = 0;
2089	INIT_LIST_HEAD(&cc->freepages);
 
2090	INIT_LIST_HEAD(&cc->migratepages);
2091
2092	cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
2093	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
2094							cc->classzone_idx);
2095	/* Compaction is likely to fail */
2096	if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
2097		return ret;
2098
2099	/* huh, compaction_suitable is returning something unexpected */
2100	VM_BUG_ON(ret != COMPACT_CONTINUE);
 
 
 
 
 
2101
2102	/*
2103	 * Clear pageblock skip if there were failures recently and compaction
2104	 * is about to be retried after being deferred.
2105	 */
2106	if (compaction_restarting(cc->zone, cc->order))
2107		__reset_isolation_suitable(cc->zone);
2108
2109	/*
2110	 * Setup to move all movable pages to the end of the zone. Used cached
2111	 * information on where the scanners should start (unless we explicitly
2112	 * want to compact the whole zone), but check that it is initialised
2113	 * by ensuring the values are within zone boundaries.
2114	 */
2115	cc->fast_start_pfn = 0;
2116	if (cc->whole_zone) {
2117		cc->migrate_pfn = start_pfn;
2118		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2119	} else {
2120		cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2121		cc->free_pfn = cc->zone->compact_cached_free_pfn;
2122		if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2123			cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2124			cc->zone->compact_cached_free_pfn = cc->free_pfn;
2125		}
2126		if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2127			cc->migrate_pfn = start_pfn;
2128			cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2129			cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2130		}
2131
2132		if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2133			cc->whole_zone = true;
2134	}
2135
2136	last_migrated_pfn = 0;
2137
2138	/*
2139	 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
2140	 * the basis that some migrations will fail in ASYNC mode. However,
2141	 * if the cached PFNs match and pageblocks are skipped due to having
2142	 * no isolation candidates, then the sync state does not matter.
2143	 * Until a pageblock with isolation candidates is found, keep the
2144	 * cached PFNs in sync to avoid revisiting the same blocks.
2145	 */
2146	update_cached = !sync &&
2147		cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2148
2149	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
2150				cc->free_pfn, end_pfn, sync);
2151
2152	migrate_prep_local();
 
2153
2154	while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
2155		int err;
2156		unsigned long start_pfn = cc->migrate_pfn;
2157
2158		/*
2159		 * Avoid multiple rescans which can happen if a page cannot be
2160		 * isolated (dirty/writeback in async mode) or if the migrated
2161		 * pages are being allocated before the pageblock is cleared.
2162		 * The first rescan will capture the entire pageblock for
2163		 * migration. If it fails, it'll be marked skip and scanning
2164		 * will proceed as normal.
2165		 */
2166		cc->rescan = false;
2167		if (pageblock_start_pfn(last_migrated_pfn) ==
2168		    pageblock_start_pfn(start_pfn)) {
2169			cc->rescan = true;
2170		}
2171
 
2172		switch (isolate_migratepages(cc)) {
2173		case ISOLATE_ABORT:
2174			ret = COMPACT_CONTENDED;
2175			putback_movable_pages(&cc->migratepages);
2176			cc->nr_migratepages = 0;
2177			last_migrated_pfn = 0;
2178			goto out;
2179		case ISOLATE_NONE:
2180			if (update_cached) {
2181				cc->zone->compact_cached_migrate_pfn[1] =
2182					cc->zone->compact_cached_migrate_pfn[0];
2183			}
2184
2185			/*
2186			 * We haven't isolated and migrated anything, but
2187			 * there might still be unflushed migrations from
2188			 * previous cc->order aligned block.
2189			 */
2190			goto check_drain;
2191		case ISOLATE_SUCCESS:
2192			update_cached = false;
2193			last_migrated_pfn = start_pfn;
2194			;
2195		}
2196
 
 
 
 
 
 
2197		err = migrate_pages(&cc->migratepages, compaction_alloc,
2198				compaction_free, (unsigned long)cc, cc->mode,
2199				MR_COMPACTION);
2200
2201		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
2202							&cc->migratepages);
2203
2204		/* All pages were either migrated or will be released */
2205		cc->nr_migratepages = 0;
2206		if (err) {
2207			putback_movable_pages(&cc->migratepages);
2208			/*
2209			 * migrate_pages() may return -ENOMEM when scanners meet
2210			 * and we want compact_finished() to detect it
2211			 */
2212			if (err == -ENOMEM && !compact_scanners_met(cc)) {
2213				ret = COMPACT_CONTENDED;
2214				goto out;
2215			}
2216			/*
2217			 * We failed to migrate at least one page in the current
2218			 * order-aligned block, so skip the rest of it.
 
 
 
 
 
 
 
2219			 */
2220			if (cc->direct_compaction &&
2221						(cc->mode == MIGRATE_ASYNC)) {
2222				cc->migrate_pfn = block_end_pfn(
2223						cc->migrate_pfn - 1, cc->order);
2224				/* Draining pcplists is useless in this case */
2225				last_migrated_pfn = 0;
 
 
 
 
 
 
 
 
2226			}
2227		}
2228
 
 
 
 
 
 
2229check_drain:
2230		/*
2231		 * Has the migration scanner moved away from the previous
2232		 * cc->order aligned block where we migrated from? If yes,
2233		 * flush the pages that were freed, so that they can merge and
2234		 * compact_finished() can detect immediately if allocation
2235		 * would succeed.
2236		 */
2237		if (cc->order > 0 && last_migrated_pfn) {
2238			int cpu;
2239			unsigned long current_block_start =
2240				block_start_pfn(cc->migrate_pfn, cc->order);
2241
2242			if (last_migrated_pfn < current_block_start) {
2243				cpu = get_cpu();
2244				lru_add_drain_cpu(cpu);
2245				drain_local_pages(cc->zone);
2246				put_cpu();
2247				/* No more flushing until we migrate again */
2248				last_migrated_pfn = 0;
2249			}
2250		}
2251
2252		/* Stop if a page has been captured */
2253		if (capc && capc->page) {
2254			ret = COMPACT_SUCCESS;
2255			break;
2256		}
2257	}
2258
2259out:
2260	/*
2261	 * Release free pages and update where the free scanner should restart,
2262	 * so we don't leave any returned pages behind in the next attempt.
2263	 */
2264	if (cc->nr_freepages > 0) {
2265		unsigned long free_pfn = release_freepages(&cc->freepages);
2266
2267		cc->nr_freepages = 0;
2268		VM_BUG_ON(free_pfn == 0);
2269		/* The cached pfn is always the first in a pageblock */
2270		free_pfn = pageblock_start_pfn(free_pfn);
2271		/*
2272		 * Only go back, not forward. The cached pfn might have been
2273		 * already reset to zone end in compact_finished()
2274		 */
2275		if (free_pfn > cc->zone->compact_cached_free_pfn)
2276			cc->zone->compact_cached_free_pfn = free_pfn;
2277	}
2278
2279	count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2280	count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2281
2282	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
2283				cc->free_pfn, end_pfn, sync, ret);
 
2284
2285	return ret;
2286}
2287
2288static enum compact_result compact_zone_order(struct zone *zone, int order,
2289		gfp_t gfp_mask, enum compact_priority prio,
2290		unsigned int alloc_flags, int classzone_idx,
2291		struct page **capture)
2292{
2293	enum compact_result ret;
2294	struct compact_control cc = {
2295		.order = order,
2296		.search_order = order,
2297		.gfp_mask = gfp_mask,
2298		.zone = zone,
2299		.mode = (prio == COMPACT_PRIO_ASYNC) ?
2300					MIGRATE_ASYNC :	MIGRATE_SYNC_LIGHT,
2301		.alloc_flags = alloc_flags,
2302		.classzone_idx = classzone_idx,
2303		.direct_compaction = true,
2304		.whole_zone = (prio == MIN_COMPACT_PRIORITY),
2305		.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
2306		.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
2307	};
2308	struct capture_control capc = {
2309		.cc = &cc,
2310		.page = NULL,
2311	};
2312
2313	if (capture)
2314		current->capture_control = &capc;
 
 
 
 
 
2315
2316	ret = compact_zone(&cc, &capc);
2317
2318	VM_BUG_ON(!list_empty(&cc.freepages));
2319	VM_BUG_ON(!list_empty(&cc.migratepages));
2320
2321	*capture = capc.page;
2322	current->capture_control = NULL;
 
 
 
 
 
 
 
 
 
 
2323
2324	return ret;
2325}
2326
2327int sysctl_extfrag_threshold = 500;
2328
2329/**
2330 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2331 * @gfp_mask: The GFP mask of the current allocation
2332 * @order: The order of the current allocation
2333 * @alloc_flags: The allocation flags of the current allocation
2334 * @ac: The context of current allocation
2335 * @prio: Determines how hard direct compaction should try to succeed
 
2336 *
2337 * This is the main entry point for direct page compaction.
2338 */
2339enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2340		unsigned int alloc_flags, const struct alloc_context *ac,
2341		enum compact_priority prio, struct page **capture)
2342{
2343	int may_perform_io = gfp_mask & __GFP_IO;
2344	struct zoneref *z;
2345	struct zone *zone;
2346	enum compact_result rc = COMPACT_SKIPPED;
2347
2348	/*
2349	 * Check if the GFP flags allow compaction - GFP_NOIO is really
2350	 * tricky context because the migration might require IO
2351	 */
2352	if (!may_perform_io)
2353		return COMPACT_SKIPPED;
2354
2355	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2356
2357	/* Compact each zone in the list */
2358	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
2359								ac->nodemask) {
2360		enum compact_result status;
2361
2362		if (prio > MIN_COMPACT_PRIORITY
2363					&& compaction_deferred(zone, order)) {
2364			rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
2365			continue;
2366		}
2367
2368		status = compact_zone_order(zone, order, gfp_mask, prio,
2369				alloc_flags, ac_classzone_idx(ac), capture);
2370		rc = max(status, rc);
2371
2372		/* The allocation should succeed, stop compacting */
2373		if (status == COMPACT_SUCCESS) {
2374			/*
2375			 * We think the allocation will succeed in this zone,
2376			 * but it is not certain, hence the false. The caller
2377			 * will repeat this with true if allocation indeed
2378			 * succeeds in this zone.
2379			 */
2380			compaction_defer_reset(zone, order, false);
2381
2382			break;
2383		}
2384
2385		if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
2386					status == COMPACT_PARTIAL_SKIPPED))
2387			/*
2388			 * We think that allocation won't succeed in this zone
2389			 * so we defer compaction there. If it ends up
2390			 * succeeding after all, it will be reset.
2391			 */
2392			defer_compaction(zone, order);
2393
2394		/*
2395		 * We might have stopped compacting due to need_resched() in
2396		 * async compaction, or due to a fatal signal detected. In that
2397		 * case do not try further zones
2398		 */
2399		if ((prio == COMPACT_PRIO_ASYNC && need_resched())
2400					|| fatal_signal_pending(current))
2401			break;
2402	}
2403
2404	return rc;
2405}
2406
2407
2408/* Compact all zones within a node */
2409static void compact_node(int nid)
 
 
 
 
 
 
 
 
 
2410{
2411	pg_data_t *pgdat = NODE_DATA(nid);
2412	int zoneid;
2413	struct zone *zone;
2414	struct compact_control cc = {
2415		.order = -1,
2416		.mode = MIGRATE_SYNC,
2417		.ignore_skip_hint = true,
2418		.whole_zone = true,
2419		.gfp_mask = GFP_KERNEL,
 
2420	};
2421
2422
2423	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2424
2425		zone = &pgdat->node_zones[zoneid];
2426		if (!populated_zone(zone))
2427			continue;
2428
 
 
 
2429		cc.zone = zone;
2430
2431		compact_zone(&cc, NULL);
2432
2433		VM_BUG_ON(!list_empty(&cc.freepages));
2434		VM_BUG_ON(!list_empty(&cc.migratepages));
 
 
 
 
2435	}
 
 
2436}
2437
2438/* Compact all nodes in the system */
2439static void compact_nodes(void)
2440{
2441	int nid;
2442
2443	/* Flush pending updates to the LRU lists */
2444	lru_add_drain_all();
2445
2446	for_each_online_node(nid)
2447		compact_node(nid);
 
 
 
 
 
2448}
2449
2450/* The written value is actually unused, all memory is compacted */
2451int sysctl_compact_memory;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2452
2453/*
2454 * This is the entry point for compacting all nodes via
2455 * /proc/sys/vm/compact_memory
2456 */
2457int sysctl_compaction_handler(struct ctl_table *table, int write,
2458			void __user *buffer, size_t *length, loff_t *ppos)
2459{
 
 
 
 
 
 
 
 
 
2460	if (write)
2461		compact_nodes();
2462
2463	return 0;
2464}
2465
2466#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
2467static ssize_t sysfs_compact_node(struct device *dev,
2468			struct device_attribute *attr,
2469			const char *buf, size_t count)
2470{
2471	int nid = dev->id;
2472
2473	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
2474		/* Flush pending updates to the LRU lists */
2475		lru_add_drain_all();
2476
2477		compact_node(nid);
2478	}
2479
2480	return count;
2481}
2482static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node);
2483
2484int compaction_register_node(struct node *node)
2485{
2486	return device_create_file(&node->dev, &dev_attr_compact);
2487}
2488
2489void compaction_unregister_node(struct node *node)
2490{
2491	return device_remove_file(&node->dev, &dev_attr_compact);
2492}
2493#endif /* CONFIG_SYSFS && CONFIG_NUMA */
2494
2495static inline bool kcompactd_work_requested(pg_data_t *pgdat)
2496{
2497	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
 
2498}
2499
2500static bool kcompactd_node_suitable(pg_data_t *pgdat)
2501{
2502	int zoneid;
2503	struct zone *zone;
2504	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
 
2505
2506	for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
2507		zone = &pgdat->node_zones[zoneid];
2508
2509		if (!populated_zone(zone))
2510			continue;
2511
2512		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
2513					classzone_idx) == COMPACT_CONTINUE)
 
 
2514			return true;
2515	}
2516
2517	return false;
2518}
2519
2520static void kcompactd_do_work(pg_data_t *pgdat)
2521{
2522	/*
2523	 * With no special task, compact all zones so that a page of requested
2524	 * order is allocatable.
2525	 */
2526	int zoneid;
2527	struct zone *zone;
2528	struct compact_control cc = {
2529		.order = pgdat->kcompactd_max_order,
2530		.search_order = pgdat->kcompactd_max_order,
2531		.classzone_idx = pgdat->kcompactd_classzone_idx,
2532		.mode = MIGRATE_SYNC_LIGHT,
2533		.ignore_skip_hint = false,
2534		.gfp_mask = GFP_KERNEL,
2535	};
 
 
2536	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
2537							cc.classzone_idx);
2538	count_compact_event(KCOMPACTD_WAKE);
2539
2540	for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
2541		int status;
2542
2543		zone = &pgdat->node_zones[zoneid];
2544		if (!populated_zone(zone))
2545			continue;
2546
2547		if (compaction_deferred(zone, cc.order))
2548			continue;
2549
2550		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
2551							COMPACT_CONTINUE)
 
2552			continue;
2553
2554		if (kthread_should_stop())
2555			return;
2556
2557		cc.zone = zone;
2558		status = compact_zone(&cc, NULL);
2559
2560		if (status == COMPACT_SUCCESS) {
2561			compaction_defer_reset(zone, cc.order, false);
2562		} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
2563			/*
2564			 * Buddy pages may become stranded on pcps that could
2565			 * otherwise coalesce on the zone's free area for
2566			 * order >= cc.order.  This is ratelimited by the
2567			 * upcoming deferral.
2568			 */
2569			drain_all_pages(zone);
2570
2571			/*
2572			 * We use sync migration mode here, so we defer like
2573			 * sync direct compaction does.
2574			 */
2575			defer_compaction(zone, cc.order);
2576		}
2577
2578		count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2579				     cc.total_migrate_scanned);
2580		count_compact_events(KCOMPACTD_FREE_SCANNED,
2581				     cc.total_free_scanned);
2582
2583		VM_BUG_ON(!list_empty(&cc.freepages));
2584		VM_BUG_ON(!list_empty(&cc.migratepages));
2585	}
2586
2587	/*
2588	 * Regardless of success, we are done until woken up next. But remember
2589	 * the requested order/classzone_idx in case it was higher/tighter than
2590	 * our current ones
2591	 */
2592	if (pgdat->kcompactd_max_order <= cc.order)
2593		pgdat->kcompactd_max_order = 0;
2594	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
2595		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2596}
2597
2598void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
2599{
2600	if (!order)
2601		return;
2602
2603	if (pgdat->kcompactd_max_order < order)
2604		pgdat->kcompactd_max_order = order;
2605
2606	if (pgdat->kcompactd_classzone_idx > classzone_idx)
2607		pgdat->kcompactd_classzone_idx = classzone_idx;
2608
2609	/*
2610	 * Pairs with implicit barrier in wait_event_freezable()
2611	 * such that wakeups are not missed.
2612	 */
2613	if (!wq_has_sleeper(&pgdat->kcompactd_wait))
2614		return;
2615
2616	if (!kcompactd_node_suitable(pgdat))
2617		return;
2618
2619	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2620							classzone_idx);
2621	wake_up_interruptible(&pgdat->kcompactd_wait);
2622}
2623
2624/*
2625 * The background compaction daemon, started as a kernel thread
2626 * from the init process.
2627 */
2628static int kcompactd(void *p)
2629{
2630	pg_data_t *pgdat = (pg_data_t*)p;
2631	struct task_struct *tsk = current;
 
 
2632
2633	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2634
2635	if (!cpumask_empty(cpumask))
2636		set_cpus_allowed_ptr(tsk, cpumask);
2637
2638	set_freezable();
2639
2640	pgdat->kcompactd_max_order = 0;
2641	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2642
2643	while (!kthread_should_stop()) {
2644		unsigned long pflags;
2645
 
 
 
 
 
 
2646		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2647		wait_event_freezable(pgdat->kcompactd_wait,
2648				kcompactd_work_requested(pgdat));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2649
2650		psi_memstall_enter(&pflags);
2651		kcompactd_do_work(pgdat);
2652		psi_memstall_leave(&pflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2653	}
2654
2655	return 0;
2656}
2657
2658/*
2659 * This kcompactd start function will be called by init and node-hot-add.
2660 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2661 */
2662int kcompactd_run(int nid)
2663{
2664	pg_data_t *pgdat = NODE_DATA(nid);
2665	int ret = 0;
2666
2667	if (pgdat->kcompactd)
2668		return 0;
2669
2670	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2671	if (IS_ERR(pgdat->kcompactd)) {
2672		pr_err("Failed to start kcompactd on node %d\n", nid);
2673		ret = PTR_ERR(pgdat->kcompactd);
2674		pgdat->kcompactd = NULL;
2675	}
2676	return ret;
2677}
2678
2679/*
2680 * Called by memory hotplug when all memory in a node is offlined. Caller must
2681 * hold mem_hotplug_begin/end().
2682 */
2683void kcompactd_stop(int nid)
2684{
2685	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2686
2687	if (kcompactd) {
2688		kthread_stop(kcompactd);
2689		NODE_DATA(nid)->kcompactd = NULL;
2690	}
2691}
2692
2693/*
2694 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2695 * not required for correctness. So if the last cpu in a node goes
2696 * away, we get changed to run anywhere: as the first one comes back,
2697 * restore their cpu bindings.
2698 */
2699static int kcompactd_cpu_online(unsigned int cpu)
2700{
2701	int nid;
2702
2703	for_each_node_state(nid, N_MEMORY) {
2704		pg_data_t *pgdat = NODE_DATA(nid);
2705		const struct cpumask *mask;
2706
2707		mask = cpumask_of_node(pgdat->node_id);
2708
2709		if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2710			/* One of our CPUs online: restore mask */
2711			set_cpus_allowed_ptr(pgdat->kcompactd, mask);
 
2712	}
2713	return 0;
2714}
2715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2716static int __init kcompactd_init(void)
2717{
2718	int nid;
2719	int ret;
2720
2721	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2722					"mm/compaction:online",
2723					kcompactd_cpu_online, NULL);
2724	if (ret < 0) {
2725		pr_err("kcompactd: failed to register hotplug callbacks.\n");
2726		return ret;
2727	}
2728
2729	for_each_node_state(nid, N_MEMORY)
2730		kcompactd_run(nid);
 
2731	return 0;
2732}
2733subsys_initcall(kcompactd_init)
2734
2735#endif /* CONFIG_COMPACTION */
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/compaction.c
   4 *
   5 * Memory compaction for the reduction of external fragmentation. Note that
   6 * this heavily depends upon page migration to do all the real heavy
   7 * lifting
   8 *
   9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  10 */
  11#include <linux/cpu.h>
  12#include <linux/swap.h>
  13#include <linux/migrate.h>
  14#include <linux/compaction.h>
  15#include <linux/mm_inline.h>
  16#include <linux/sched/signal.h>
  17#include <linux/backing-dev.h>
  18#include <linux/sysctl.h>
  19#include <linux/sysfs.h>
  20#include <linux/page-isolation.h>
  21#include <linux/kasan.h>
  22#include <linux/kthread.h>
  23#include <linux/freezer.h>
  24#include <linux/page_owner.h>
  25#include <linux/psi.h>
  26#include "internal.h"
  27
  28#ifdef CONFIG_COMPACTION
  29/*
  30 * Fragmentation score check interval for proactive compaction purposes.
  31 */
  32#define HPAGE_FRAG_CHECK_INTERVAL_MSEC	(500)
  33
  34static inline void count_compact_event(enum vm_event_item item)
  35{
  36	count_vm_event(item);
  37}
  38
  39static inline void count_compact_events(enum vm_event_item item, long delta)
  40{
  41	count_vm_events(item, delta);
  42}
  43
  44/*
  45 * order == -1 is expected when compacting proactively via
  46 * 1. /proc/sys/vm/compact_memory
  47 * 2. /sys/devices/system/node/nodex/compact
  48 * 3. /proc/sys/vm/compaction_proactiveness
  49 */
  50static inline bool is_via_compact_memory(int order)
  51{
  52	return order == -1;
  53}
  54
  55#else
  56#define count_compact_event(item) do { } while (0)
  57#define count_compact_events(item, delta) do { } while (0)
  58static inline bool is_via_compact_memory(int order) { return false; }
  59#endif
  60
  61#if defined CONFIG_COMPACTION || defined CONFIG_CMA
  62
  63#define CREATE_TRACE_POINTS
  64#include <trace/events/compaction.h>
  65
  66#define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
  67#define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
 
 
  68
  69/*
  70 * Page order with-respect-to which proactive compaction
  71 * calculates external fragmentation, which is used as
  72 * the "fragmentation score" of a node/zone.
  73 */
  74#if defined CONFIG_TRANSPARENT_HUGEPAGE
  75#define COMPACTION_HPAGE_ORDER	HPAGE_PMD_ORDER
  76#elif defined CONFIG_HUGETLBFS
  77#define COMPACTION_HPAGE_ORDER	HUGETLB_PAGE_ORDER
  78#else
  79#define COMPACTION_HPAGE_ORDER	(PMD_SHIFT - PAGE_SHIFT)
  80#endif
  81
  82static void split_map_pages(struct list_head *freepages)
  83{
  84	unsigned int i, order;
  85	struct page *page, *next;
  86	LIST_HEAD(tmp_list);
  87
  88	for (order = 0; order < NR_PAGE_ORDERS; order++) {
  89		list_for_each_entry_safe(page, next, &freepages[order], lru) {
  90			unsigned int nr_pages;
 
 
 
 
  91
  92			list_del(&page->lru);
  93
  94			nr_pages = 1 << order;
  95
  96			post_alloc_hook(page, order, __GFP_MOVABLE);
  97			if (order)
  98				split_page(page, order);
  99
 100			for (i = 0; i < nr_pages; i++) {
 101				list_add(&page->lru, &tmp_list);
 102				page++;
 103			}
 104		}
 105		list_splice_init(&tmp_list, &freepages[0]);
 106	}
 107}
 108
 109static unsigned long release_free_list(struct list_head *freepages)
 110{
 111	int order;
 112	unsigned long high_pfn = 0;
 
 
 
 
 113
 114	for (order = 0; order < NR_PAGE_ORDERS; order++) {
 115		struct page *page, *next;
 116
 117		list_for_each_entry_safe(page, next, &freepages[order], lru) {
 118			unsigned long pfn = page_to_pfn(page);
 
 119
 120			list_del(&page->lru);
 121			/*
 122			 * Convert free pages into post allocation pages, so
 123			 * that we can free them via __free_page.
 124			 */
 125			post_alloc_hook(page, order, __GFP_MOVABLE);
 126			__free_pages(page, order);
 127			if (pfn > high_pfn)
 128				high_pfn = pfn;
 129		}
 130	}
 131	return high_pfn;
 
 132}
 133
 134#ifdef CONFIG_COMPACTION
 135bool PageMovable(struct page *page)
 
 136{
 137	const struct movable_operations *mops;
 138
 139	VM_BUG_ON_PAGE(!PageLocked(page), page);
 140	if (!__PageMovable(page))
 141		return false;
 142
 143	mops = page_movable_ops(page);
 144	if (mops)
 145		return true;
 146
 147	return false;
 148}
 
 149
 150void __SetPageMovable(struct page *page, const struct movable_operations *mops)
 151{
 152	VM_BUG_ON_PAGE(!PageLocked(page), page);
 153	VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page);
 154	page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
 155}
 156EXPORT_SYMBOL(__SetPageMovable);
 157
 158void __ClearPageMovable(struct page *page)
 159{
 
 160	VM_BUG_ON_PAGE(!PageMovable(page), page);
 161	/*
 162	 * This page still has the type of a movable page, but it's
 163	 * actually not movable any more.
 
 164	 */
 165	page->mapping = (void *)PAGE_MAPPING_MOVABLE;
 
 166}
 167EXPORT_SYMBOL(__ClearPageMovable);
 168
 169/* Do not skip compaction more than 64 times */
 170#define COMPACT_MAX_DEFER_SHIFT 6
 171
 172/*
 173 * Compaction is deferred when compaction fails to result in a page
 174 * allocation success. 1 << compact_defer_shift, compactions are skipped up
 175 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
 176 */
 177static void defer_compaction(struct zone *zone, int order)
 178{
 179	zone->compact_considered = 0;
 180	zone->compact_defer_shift++;
 181
 182	if (order < zone->compact_order_failed)
 183		zone->compact_order_failed = order;
 184
 185	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
 186		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
 187
 188	trace_mm_compaction_defer_compaction(zone, order);
 189}
 190
 191/* Returns true if compaction should be skipped this time */
 192static bool compaction_deferred(struct zone *zone, int order)
 193{
 194	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
 195
 196	if (order < zone->compact_order_failed)
 197		return false;
 198
 199	/* Avoid possible overflow */
 200	if (++zone->compact_considered >= defer_limit) {
 201		zone->compact_considered = defer_limit;
 
 
 202		return false;
 203	}
 204
 205	trace_mm_compaction_deferred(zone, order);
 206
 207	return true;
 208}
 209
 210/*
 211 * Update defer tracking counters after successful compaction of given order,
 212 * which means an allocation either succeeded (alloc_success == true) or is
 213 * expected to succeed.
 214 */
 215void compaction_defer_reset(struct zone *zone, int order,
 216		bool alloc_success)
 217{
 218	if (alloc_success) {
 219		zone->compact_considered = 0;
 220		zone->compact_defer_shift = 0;
 221	}
 222	if (order >= zone->compact_order_failed)
 223		zone->compact_order_failed = order + 1;
 224
 225	trace_mm_compaction_defer_reset(zone, order);
 226}
 227
 228/* Returns true if restarting compaction after many failures */
 229static bool compaction_restarting(struct zone *zone, int order)
 230{
 231	if (order < zone->compact_order_failed)
 232		return false;
 233
 234	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
 235		zone->compact_considered >= 1UL << zone->compact_defer_shift;
 236}
 237
 238/* Returns true if the pageblock should be scanned for pages to isolate. */
 239static inline bool isolation_suitable(struct compact_control *cc,
 240					struct page *page)
 241{
 242	if (cc->ignore_skip_hint)
 243		return true;
 244
 245	return !get_pageblock_skip(page);
 246}
 247
 248static void reset_cached_positions(struct zone *zone)
 249{
 250	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
 251	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
 252	zone->compact_cached_free_pfn =
 253				pageblock_start_pfn(zone_end_pfn(zone) - 1);
 254}
 255
 256#ifdef CONFIG_SPARSEMEM
 257/*
 258 * If the PFN falls into an offline section, return the start PFN of the
 259 * next online section. If the PFN falls into an online section or if
 260 * there is no next online section, return 0.
 261 */
 262static unsigned long skip_offline_sections(unsigned long start_pfn)
 263{
 264	unsigned long start_nr = pfn_to_section_nr(start_pfn);
 265
 266	if (online_section_nr(start_nr))
 267		return 0;
 268
 269	while (++start_nr <= __highest_present_section_nr) {
 270		if (online_section_nr(start_nr))
 271			return section_nr_to_pfn(start_nr);
 272	}
 273
 274	return 0;
 275}
 276
 277/*
 278 * If the PFN falls into an offline section, return the end PFN of the
 279 * next online section in reverse. If the PFN falls into an online section
 280 * or if there is no next online section in reverse, return 0.
 281 */
 282static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
 283{
 284	unsigned long start_nr = pfn_to_section_nr(start_pfn);
 285
 286	if (!start_nr || online_section_nr(start_nr))
 287		return 0;
 288
 289	while (start_nr-- > 0) {
 290		if (online_section_nr(start_nr))
 291			return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION;
 292	}
 293
 294	return 0;
 295}
 296#else
 297static unsigned long skip_offline_sections(unsigned long start_pfn)
 298{
 299	return 0;
 300}
 301
 302static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
 303{
 304	return 0;
 305}
 306#endif
 307
 308/*
 309 * Compound pages of >= pageblock_order should consistently be skipped until
 310 * released. It is always pointless to compact pages of such order (if they are
 311 * migratable), and the pageblocks they occupy cannot contain any free pages.
 312 */
 313static bool pageblock_skip_persistent(struct page *page)
 314{
 315	if (!PageCompound(page))
 316		return false;
 317
 318	page = compound_head(page);
 319
 320	if (compound_order(page) >= pageblock_order)
 321		return true;
 322
 323	return false;
 324}
 325
 326static bool
 327__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 328							bool check_target)
 329{
 330	struct page *page = pfn_to_online_page(pfn);
 331	struct page *block_page;
 332	struct page *end_page;
 333	unsigned long block_pfn;
 334
 335	if (!page)
 336		return false;
 337	if (zone != page_zone(page))
 338		return false;
 339	if (pageblock_skip_persistent(page))
 340		return false;
 341
 342	/*
 343	 * If skip is already cleared do no further checking once the
 344	 * restart points have been set.
 345	 */
 346	if (check_source && check_target && !get_pageblock_skip(page))
 347		return true;
 348
 349	/*
 350	 * If clearing skip for the target scanner, do not select a
 351	 * non-movable pageblock as the starting point.
 352	 */
 353	if (!check_source && check_target &&
 354	    get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
 355		return false;
 356
 357	/* Ensure the start of the pageblock or zone is online and valid */
 358	block_pfn = pageblock_start_pfn(pfn);
 359	block_pfn = max(block_pfn, zone->zone_start_pfn);
 360	block_page = pfn_to_online_page(block_pfn);
 361	if (block_page) {
 362		page = block_page;
 363		pfn = block_pfn;
 364	}
 365
 366	/* Ensure the end of the pageblock or zone is online and valid */
 367	block_pfn = pageblock_end_pfn(pfn) - 1;
 368	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
 369	end_page = pfn_to_online_page(block_pfn);
 370	if (!end_page)
 371		return false;
 372
 373	/*
 374	 * Only clear the hint if a sample indicates there is either a
 375	 * free page or an LRU page in the block. One or other condition
 376	 * is necessary for the block to be a migration source/target.
 377	 */
 378	do {
 379		if (check_source && PageLRU(page)) {
 380			clear_pageblock_skip(page);
 381			return true;
 382		}
 
 383
 384		if (check_target && PageBuddy(page)) {
 385			clear_pageblock_skip(page);
 386			return true;
 
 387		}
 388
 389		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
 
 390	} while (page <= end_page);
 391
 392	return false;
 393}
 394
 395/*
 396 * This function is called to clear all cached information on pageblocks that
 397 * should be skipped for page isolation when the migrate and free page scanner
 398 * meet.
 399 */
 400static void __reset_isolation_suitable(struct zone *zone)
 401{
 402	unsigned long migrate_pfn = zone->zone_start_pfn;
 403	unsigned long free_pfn = zone_end_pfn(zone) - 1;
 404	unsigned long reset_migrate = free_pfn;
 405	unsigned long reset_free = migrate_pfn;
 406	bool source_set = false;
 407	bool free_set = false;
 408
 409	/* Only flush if a full compaction finished recently */
 410	if (!zone->compact_blockskip_flush)
 411		return;
 412
 413	zone->compact_blockskip_flush = false;
 414
 415	/*
 416	 * Walk the zone and update pageblock skip information. Source looks
 417	 * for PageLRU while target looks for PageBuddy. When the scanner
 418	 * is found, both PageBuddy and PageLRU are checked as the pageblock
 419	 * is suitable as both source and target.
 420	 */
 421	for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
 422					free_pfn -= pageblock_nr_pages) {
 423		cond_resched();
 424
 425		/* Update the migrate PFN */
 426		if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
 427		    migrate_pfn < reset_migrate) {
 428			source_set = true;
 429			reset_migrate = migrate_pfn;
 430			zone->compact_init_migrate_pfn = reset_migrate;
 431			zone->compact_cached_migrate_pfn[0] = reset_migrate;
 432			zone->compact_cached_migrate_pfn[1] = reset_migrate;
 433		}
 434
 435		/* Update the free PFN */
 436		if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
 437		    free_pfn > reset_free) {
 438			free_set = true;
 439			reset_free = free_pfn;
 440			zone->compact_init_free_pfn = reset_free;
 441			zone->compact_cached_free_pfn = reset_free;
 442		}
 443	}
 444
 445	/* Leave no distance if no suitable block was reset */
 446	if (reset_migrate >= reset_free) {
 447		zone->compact_cached_migrate_pfn[0] = migrate_pfn;
 448		zone->compact_cached_migrate_pfn[1] = migrate_pfn;
 449		zone->compact_cached_free_pfn = free_pfn;
 450	}
 451}
 452
 453void reset_isolation_suitable(pg_data_t *pgdat)
 454{
 455	int zoneid;
 456
 457	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 458		struct zone *zone = &pgdat->node_zones[zoneid];
 459		if (!populated_zone(zone))
 460			continue;
 461
 462		__reset_isolation_suitable(zone);
 
 
 463	}
 464}
 465
 466/*
 467 * Sets the pageblock skip bit if it was clear. Note that this is a hint as
 468 * locks are not required for read/writers. Returns true if it was already set.
 469 */
 470static bool test_and_set_skip(struct compact_control *cc, struct page *page)
 
 471{
 472	bool skip;
 473
 474	/* Do not update if skip hint is being ignored */
 475	if (cc->ignore_skip_hint)
 476		return false;
 477
 
 
 
 478	skip = get_pageblock_skip(page);
 479	if (!skip && !cc->no_set_skip_hint)
 480		set_pageblock_skip(page);
 481
 482	return skip;
 483}
 484
 485static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
 486{
 487	struct zone *zone = cc->zone;
 488
 
 
 489	/* Set for isolation rather than compaction */
 490	if (cc->no_set_skip_hint)
 491		return;
 492
 493	pfn = pageblock_end_pfn(pfn);
 494
 495	/* Update where async and sync compaction should restart */
 496	if (pfn > zone->compact_cached_migrate_pfn[0])
 497		zone->compact_cached_migrate_pfn[0] = pfn;
 498	if (cc->mode != MIGRATE_ASYNC &&
 499	    pfn > zone->compact_cached_migrate_pfn[1])
 500		zone->compact_cached_migrate_pfn[1] = pfn;
 501}
 502
 503/*
 504 * If no pages were isolated then mark this pageblock to be skipped in the
 505 * future. The information is later cleared by __reset_isolation_suitable().
 506 */
 507static void update_pageblock_skip(struct compact_control *cc,
 508			struct page *page, unsigned long pfn)
 509{
 510	struct zone *zone = cc->zone;
 511
 512	if (cc->no_set_skip_hint)
 513		return;
 514
 
 
 
 515	set_pageblock_skip(page);
 516
 
 517	if (pfn < zone->compact_cached_free_pfn)
 518		zone->compact_cached_free_pfn = pfn;
 519}
 520#else
 521static inline bool isolation_suitable(struct compact_control *cc,
 522					struct page *page)
 523{
 524	return true;
 525}
 526
 527static inline bool pageblock_skip_persistent(struct page *page)
 528{
 529	return false;
 530}
 531
 532static inline void update_pageblock_skip(struct compact_control *cc,
 533			struct page *page, unsigned long pfn)
 534{
 535}
 536
 537static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
 538{
 539}
 540
 541static bool test_and_set_skip(struct compact_control *cc, struct page *page)
 
 542{
 543	return false;
 544}
 545#endif /* CONFIG_COMPACTION */
 546
 547/*
 548 * Compaction requires the taking of some coarse locks that are potentially
 549 * very heavily contended. For async compaction, trylock and record if the
 550 * lock is contended. The lock will still be acquired but compaction will
 551 * abort when the current block is finished regardless of success rate.
 552 * Sync compaction acquires the lock.
 553 *
 554 * Always returns true which makes it easier to track lock state in callers.
 555 */
 556static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 557						struct compact_control *cc)
 558	__acquires(lock)
 559{
 560	/* Track if the lock is contended in async mode */
 561	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
 562		if (spin_trylock_irqsave(lock, *flags))
 563			return true;
 564
 565		cc->contended = true;
 566	}
 567
 568	spin_lock_irqsave(lock, *flags);
 569	return true;
 570}
 571
 572/*
 573 * Compaction requires the taking of some coarse locks that are potentially
 574 * very heavily contended. The lock should be periodically unlocked to avoid
 575 * having disabled IRQs for a long time, even when there is nobody waiting on
 576 * the lock. It might also be that allowing the IRQs will result in
 577 * need_resched() becoming true. If scheduling is needed, compaction schedules.
 
 578 * Either compaction type will also abort if a fatal signal is pending.
 579 * In either case if the lock was locked, it is dropped and not regained.
 580 *
 581 * Returns true if compaction should abort due to fatal signal pending.
 582 * Returns false when compaction can continue.
 
 
 583 */
 584static bool compact_unlock_should_abort(spinlock_t *lock,
 585		unsigned long flags, bool *locked, struct compact_control *cc)
 586{
 587	if (*locked) {
 588		spin_unlock_irqrestore(lock, flags);
 589		*locked = false;
 590	}
 591
 592	if (fatal_signal_pending(current)) {
 593		cc->contended = true;
 594		return true;
 595	}
 596
 597	cond_resched();
 598
 599	return false;
 600}
 601
 602/*
 603 * Isolate free pages onto a private freelist. If @strict is true, will abort
 604 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 605 * (even though it may still end up isolating some pages).
 606 */
 607static unsigned long isolate_freepages_block(struct compact_control *cc,
 608				unsigned long *start_pfn,
 609				unsigned long end_pfn,
 610				struct list_head *freelist,
 611				unsigned int stride,
 612				bool strict)
 613{
 614	int nr_scanned = 0, total_isolated = 0;
 615	struct page *page;
 616	unsigned long flags = 0;
 617	bool locked = false;
 618	unsigned long blockpfn = *start_pfn;
 619	unsigned int order;
 620
 621	/* Strict mode is for isolation, speed is secondary */
 622	if (strict)
 623		stride = 1;
 624
 625	page = pfn_to_page(blockpfn);
 626
 627	/* Isolate free pages. */
 628	for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
 629		int isolated;
 
 630
 631		/*
 632		 * Periodically drop the lock (if held) regardless of its
 633		 * contention, to give chance to IRQs. Abort if fatal signal
 634		 * pending.
 635		 */
 636		if (!(blockpfn % COMPACT_CLUSTER_MAX)
 637		    && compact_unlock_should_abort(&cc->zone->lock, flags,
 638								&locked, cc))
 639			break;
 640
 641		nr_scanned++;
 
 
 642
 643		/*
 644		 * For compound pages such as THP and hugetlbfs, we can save
 645		 * potentially a lot of iterations if we skip them at once.
 646		 * The check is racy, but we can consider only valid values
 647		 * and the only danger is skipping too much.
 648		 */
 649		if (PageCompound(page)) {
 650			const unsigned int order = compound_order(page);
 651
 652			if (blockpfn + (1UL << order) <= end_pfn) {
 653				blockpfn += (1UL << order) - 1;
 654				page += (1UL << order) - 1;
 655				nr_scanned += (1UL << order) - 1;
 656			}
 657
 658			goto isolate_fail;
 659		}
 660
 661		if (!PageBuddy(page))
 662			goto isolate_fail;
 663
 664		/* If we already hold the lock, we can skip some rechecking. */
 
 
 
 
 
 
 665		if (!locked) {
 666			locked = compact_lock_irqsave(&cc->zone->lock,
 667								&flags, cc);
 668
 669			/* Recheck this is a buddy page under lock */
 670			if (!PageBuddy(page))
 671				goto isolate_fail;
 672		}
 673
 674		/* Found a free page, will break it into order-0 pages */
 675		order = buddy_order(page);
 676		isolated = __isolate_free_page(page, order);
 677		if (!isolated)
 678			break;
 679		set_page_private(page, order);
 680
 681		nr_scanned += isolated - 1;
 682		total_isolated += isolated;
 683		cc->nr_freepages += isolated;
 684		list_add_tail(&page->lru, &freelist[order]);
 685
 686		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
 687			blockpfn += isolated;
 688			break;
 689		}
 690		/* Advance to the end of split page */
 691		blockpfn += isolated - 1;
 692		page += isolated - 1;
 693		continue;
 694
 695isolate_fail:
 696		if (strict)
 697			break;
 
 
 698
 699	}
 700
 701	if (locked)
 702		spin_unlock_irqrestore(&cc->zone->lock, flags);
 703
 704	/*
 705	 * Be careful to not go outside of the pageblock.
 
 706	 */
 707	if (unlikely(blockpfn > end_pfn))
 708		blockpfn = end_pfn;
 709
 710	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
 711					nr_scanned, total_isolated);
 712
 713	/* Record how far we have got within the block */
 714	*start_pfn = blockpfn;
 715
 716	/*
 717	 * If strict isolation is requested by CMA then check that all the
 718	 * pages requested were isolated. If there were any failures, 0 is
 719	 * returned and CMA will fail.
 720	 */
 721	if (strict && blockpfn < end_pfn)
 722		total_isolated = 0;
 723
 724	cc->total_free_scanned += nr_scanned;
 725	if (total_isolated)
 726		count_compact_events(COMPACTISOLATED, total_isolated);
 727	return total_isolated;
 728}
 729
 730/**
 731 * isolate_freepages_range() - isolate free pages.
 732 * @cc:        Compaction control structure.
 733 * @start_pfn: The first PFN to start isolating.
 734 * @end_pfn:   The one-past-last PFN.
 735 *
 736 * Non-free pages, invalid PFNs, or zone boundaries within the
 737 * [start_pfn, end_pfn) range are considered errors, cause function to
 738 * undo its actions and return zero.
 739 *
 740 * Otherwise, function returns one-past-the-last PFN of isolated page
 741 * (which may be greater then end_pfn if end fell in a middle of
 742 * a free page).
 743 */
 744unsigned long
 745isolate_freepages_range(struct compact_control *cc,
 746			unsigned long start_pfn, unsigned long end_pfn)
 747{
 748	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
 749	int order;
 750	struct list_head tmp_freepages[NR_PAGE_ORDERS];
 751
 752	for (order = 0; order < NR_PAGE_ORDERS; order++)
 753		INIT_LIST_HEAD(&tmp_freepages[order]);
 754
 755	pfn = start_pfn;
 756	block_start_pfn = pageblock_start_pfn(pfn);
 757	if (block_start_pfn < cc->zone->zone_start_pfn)
 758		block_start_pfn = cc->zone->zone_start_pfn;
 759	block_end_pfn = pageblock_end_pfn(pfn);
 760
 761	for (; pfn < end_pfn; pfn += isolated,
 762				block_start_pfn = block_end_pfn,
 763				block_end_pfn += pageblock_nr_pages) {
 764		/* Protect pfn from changing by isolate_freepages_block */
 765		unsigned long isolate_start_pfn = pfn;
 766
 
 
 767		/*
 768		 * pfn could pass the block_end_pfn if isolated freepage
 769		 * is more than pageblock order. In this case, we adjust
 770		 * scanning range to right one.
 771		 */
 772		if (pfn >= block_end_pfn) {
 773			block_start_pfn = pageblock_start_pfn(pfn);
 774			block_end_pfn = pageblock_end_pfn(pfn);
 
 775		}
 776
 777		block_end_pfn = min(block_end_pfn, end_pfn);
 778
 779		if (!pageblock_pfn_to_page(block_start_pfn,
 780					block_end_pfn, cc->zone))
 781			break;
 782
 783		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
 784					block_end_pfn, tmp_freepages, 0, true);
 785
 786		/*
 787		 * In strict mode, isolate_freepages_block() returns 0 if
 788		 * there are any holes in the block (ie. invalid PFNs or
 789		 * non-free pages).
 790		 */
 791		if (!isolated)
 792			break;
 793
 794		/*
 795		 * If we managed to isolate pages, it is always (1 << n) *
 796		 * pageblock_nr_pages for some non-negative n.  (Max order
 797		 * page may span two pageblocks).
 798		 */
 799	}
 800
 
 
 
 801	if (pfn < end_pfn) {
 802		/* Loop terminated early, cleanup. */
 803		release_free_list(tmp_freepages);
 804		return 0;
 805	}
 806
 807	/* __isolate_free_page() does not map the pages */
 808	split_map_pages(tmp_freepages);
 809
 810	/* We don't use freelists for anything. */
 811	return pfn;
 812}
 813
 814/* Similar to reclaim, but different enough that they don't share logic */
 815static bool too_many_isolated(struct compact_control *cc)
 816{
 817	pg_data_t *pgdat = cc->zone->zone_pgdat;
 818	bool too_many;
 819
 820	unsigned long active, inactive, isolated;
 821
 822	inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
 823			node_page_state(pgdat, NR_INACTIVE_ANON);
 824	active = node_page_state(pgdat, NR_ACTIVE_FILE) +
 825			node_page_state(pgdat, NR_ACTIVE_ANON);
 826	isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
 827			node_page_state(pgdat, NR_ISOLATED_ANON);
 828
 829	/*
 830	 * Allow GFP_NOFS to isolate past the limit set for regular
 831	 * compaction runs. This prevents an ABBA deadlock when other
 832	 * compactors have already isolated to the limit, but are
 833	 * blocked on filesystem locks held by the GFP_NOFS thread.
 834	 */
 835	if (cc->gfp_mask & __GFP_FS) {
 836		inactive >>= 3;
 837		active >>= 3;
 838	}
 839
 840	too_many = isolated > (inactive + active) / 2;
 841	if (!too_many)
 842		wake_throttle_isolated(pgdat);
 843
 844	return too_many;
 845}
 846
 847/**
 848 * skip_isolation_on_order() - determine when to skip folio isolation based on
 849 *			       folio order and compaction target order
 850 * @order:		to-be-isolated folio order
 851 * @target_order:	compaction target order
 852 *
 853 * This avoids unnecessary folio isolations during compaction.
 854 */
 855static bool skip_isolation_on_order(int order, int target_order)
 856{
 857	/*
 858	 * Unless we are performing global compaction (i.e.,
 859	 * is_via_compact_memory), skip any folios that are larger than the
 860	 * target order: we wouldn't be here if we'd have a free folio with
 861	 * the desired target_order, so migrating this folio would likely fail
 862	 * later.
 863	 */
 864	if (!is_via_compact_memory(target_order) && order >= target_order)
 865		return true;
 866	/*
 867	 * We limit memory compaction to pageblocks and won't try
 868	 * creating free blocks of memory that are larger than that.
 869	 */
 870	return order >= pageblock_order;
 871}
 872
 873/**
 874 * isolate_migratepages_block() - isolate all migrate-able pages within
 875 *				  a single pageblock
 876 * @cc:		Compaction control structure.
 877 * @low_pfn:	The first PFN to isolate
 878 * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
 879 * @mode:	Isolation mode to be used.
 880 *
 881 * Isolate all pages that can be migrated from the range specified by
 882 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 883 * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion,
 884 * -ENOMEM in case we could not allocate a page, or 0.
 885 * cc->migrate_pfn will contain the next pfn to scan.
 886 *
 887 * The pages are isolated on cc->migratepages list (not required to be empty),
 888 * and cc->nr_migratepages is updated accordingly.
 
 889 */
 890static int
 891isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 892			unsigned long end_pfn, isolate_mode_t mode)
 893{
 894	pg_data_t *pgdat = cc->zone->zone_pgdat;
 895	unsigned long nr_scanned = 0, nr_isolated = 0;
 896	struct lruvec *lruvec;
 897	unsigned long flags = 0;
 898	struct lruvec *locked = NULL;
 899	struct folio *folio = NULL;
 900	struct page *page = NULL, *valid_page = NULL;
 901	struct address_space *mapping;
 902	unsigned long start_pfn = low_pfn;
 903	bool skip_on_failure = false;
 904	unsigned long next_skip_pfn = 0;
 905	bool skip_updated = false;
 906	int ret = 0;
 907
 908	cc->migrate_pfn = low_pfn;
 909
 910	/*
 911	 * Ensure that there are not too many pages isolated from the LRU
 912	 * list by either parallel reclaimers or compaction. If there are,
 913	 * delay for some time until fewer pages are isolated
 914	 */
 915	while (unlikely(too_many_isolated(cc))) {
 916		/* stop isolation if there are still pages not migrated */
 917		if (cc->nr_migratepages)
 918			return -EAGAIN;
 919
 920		/* async migration should just abort */
 921		if (cc->mode == MIGRATE_ASYNC)
 922			return -EAGAIN;
 923
 924		reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
 925
 926		if (fatal_signal_pending(current))
 927			return -EINTR;
 928	}
 929
 930	cond_resched();
 931
 932	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
 933		skip_on_failure = true;
 934		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 935	}
 936
 937	/* Time to isolate some pages for migration */
 938	for (; low_pfn < end_pfn; low_pfn++) {
 939		bool is_dirty, is_unevictable;
 940
 941		if (skip_on_failure && low_pfn >= next_skip_pfn) {
 942			/*
 943			 * We have isolated all migration candidates in the
 944			 * previous order-aligned block, and did not skip it due
 945			 * to failure. We should migrate the pages now and
 946			 * hopefully succeed compaction.
 947			 */
 948			if (nr_isolated)
 949				break;
 950
 951			/*
 952			 * We failed to isolate in the previous order-aligned
 953			 * block. Set the new boundary to the end of the
 954			 * current block. Note we can't simply increase
 955			 * next_skip_pfn by 1 << order, as low_pfn might have
 956			 * been incremented by a higher number due to skipping
 957			 * a compound or a high-order buddy page in the
 958			 * previous loop iteration.
 959			 */
 960			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 961		}
 962
 963		/*
 964		 * Periodically drop the lock (if held) regardless of its
 965		 * contention, to give chance to IRQs. Abort completely if
 966		 * a fatal signal is pending.
 967		 */
 968		if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
 969			if (locked) {
 970				unlock_page_lruvec_irqrestore(locked, flags);
 971				locked = NULL;
 972			}
 973
 974			if (fatal_signal_pending(current)) {
 975				cc->contended = true;
 976				ret = -EINTR;
 977
 978				goto fatal_pending;
 979			}
 980
 981			cond_resched();
 982		}
 983
 
 
 984		nr_scanned++;
 985
 986		page = pfn_to_page(low_pfn);
 987
 988		/*
 989		 * Check if the pageblock has already been marked skipped.
 990		 * Only the first PFN is checked as the caller isolates
 991		 * COMPACT_CLUSTER_MAX at a time so the second call must
 992		 * not falsely conclude that the block should be skipped.
 993		 */
 994		if (!valid_page && (pageblock_aligned(low_pfn) ||
 995				    low_pfn == cc->zone->zone_start_pfn)) {
 996			if (!isolation_suitable(cc, page)) {
 997				low_pfn = end_pfn;
 998				folio = NULL;
 999				goto isolate_abort;
1000			}
1001			valid_page = page;
1002		}
1003
1004		if (PageHuge(page)) {
1005			/*
1006			 * skip hugetlbfs if we are not compacting for pages
1007			 * bigger than its order. THPs and other compound pages
1008			 * are handled below.
1009			 */
1010			if (!cc->alloc_contig) {
1011				const unsigned int order = compound_order(page);
1012
1013				if (order <= MAX_PAGE_ORDER) {
1014					low_pfn += (1UL << order) - 1;
1015					nr_scanned += (1UL << order) - 1;
1016				}
1017				goto isolate_fail;
1018			}
1019			/* for alloc_contig case */
1020			if (locked) {
1021				unlock_page_lruvec_irqrestore(locked, flags);
1022				locked = NULL;
1023			}
1024
1025			ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
1026
1027			/*
1028			 * Fail isolation in case isolate_or_dissolve_huge_page()
1029			 * reports an error. In case of -ENOMEM, abort right away.
1030			 */
1031			if (ret < 0) {
1032				 /* Do not report -EBUSY down the chain */
1033				if (ret == -EBUSY)
1034					ret = 0;
1035				low_pfn += compound_nr(page) - 1;
1036				nr_scanned += compound_nr(page) - 1;
1037				goto isolate_fail;
1038			}
1039
1040			if (PageHuge(page)) {
1041				/*
1042				 * Hugepage was successfully isolated and placed
1043				 * on the cc->migratepages list.
1044				 */
1045				folio = page_folio(page);
1046				low_pfn += folio_nr_pages(folio) - 1;
1047				goto isolate_success_no_list;
1048			}
1049
1050			/*
1051			 * Ok, the hugepage was dissolved. Now these pages are
1052			 * Buddy and cannot be re-allocated because they are
1053			 * isolated. Fall-through as the check below handles
1054			 * Buddy pages.
1055			 */
1056		}
1057
1058		/*
1059		 * Skip if free. We read page order here without zone lock
1060		 * which is generally unsafe, but the race window is small and
1061		 * the worst thing that can happen is that we skip some
1062		 * potential isolation targets.
1063		 */
1064		if (PageBuddy(page)) {
1065			unsigned long freepage_order = buddy_order_unsafe(page);
1066
1067			/*
1068			 * Without lock, we cannot be sure that what we got is
1069			 * a valid page order. Consider only values in the
1070			 * valid order range to prevent low_pfn overflow.
1071			 */
1072			if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) {
1073				low_pfn += (1UL << freepage_order) - 1;
1074				nr_scanned += (1UL << freepage_order) - 1;
1075			}
1076			continue;
1077		}
1078
1079		/*
1080		 * Regardless of being on LRU, compound pages such as THP
1081		 * (hugetlbfs is handled above) are not to be compacted unless
1082		 * we are attempting an allocation larger than the compound
1083		 * page size. We can potentially save a lot of iterations if we
1084		 * skip them at once. The check is racy, but we can consider
1085		 * only valid values and the only danger is skipping too much.
1086		 */
1087		if (PageCompound(page) && !cc->alloc_contig) {
1088			const unsigned int order = compound_order(page);
1089
1090			/* Skip based on page order and compaction target order. */
1091			if (skip_isolation_on_order(order, cc->order)) {
1092				if (order <= MAX_PAGE_ORDER) {
1093					low_pfn += (1UL << order) - 1;
1094					nr_scanned += (1UL << order) - 1;
1095				}
1096				goto isolate_fail;
1097			}
1098		}
1099
1100		/*
1101		 * Check may be lockless but that's ok as we recheck later.
1102		 * It's possible to migrate LRU and non-lru movable pages.
1103		 * Skip any other type of page
1104		 */
1105		if (!PageLRU(page)) {
1106			/*
1107			 * __PageMovable can return false positive so we need
1108			 * to verify it under page_lock.
1109			 */
1110			if (unlikely(__PageMovable(page)) &&
1111					!PageIsolated(page)) {
1112				if (locked) {
1113					unlock_page_lruvec_irqrestore(locked, flags);
1114					locked = NULL;
 
1115				}
1116
1117				if (isolate_movable_page(page, mode)) {
1118					folio = page_folio(page);
1119					goto isolate_success;
1120				}
1121			}
1122
1123			goto isolate_fail;
1124		}
1125
1126		/*
1127		 * Be careful not to clear PageLRU until after we're
1128		 * sure the page is not being freed elsewhere -- the
1129		 * page release code relies on it.
1130		 */
1131		folio = folio_get_nontail_page(page);
1132		if (unlikely(!folio))
1133			goto isolate_fail;
1134
1135		/*
1136		 * Migration will fail if an anonymous page is pinned in memory,
1137		 * so avoid taking lru_lock and isolating it unnecessarily in an
1138		 * admittedly racy check.
1139		 */
1140		mapping = folio_mapping(folio);
1141		if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio))
1142			goto isolate_fail_put;
1143
1144		/*
1145		 * Only allow to migrate anonymous pages in GFP_NOFS context
1146		 * because those do not depend on fs locks.
1147		 */
1148		if (!(cc->gfp_mask & __GFP_FS) && mapping)
1149			goto isolate_fail_put;
1150
1151		/* Only take pages on LRU: a check now makes later tests safe */
1152		if (!folio_test_lru(folio))
1153			goto isolate_fail_put;
1154
1155		is_unevictable = folio_test_unevictable(folio);
1156
1157		/* Compaction might skip unevictable pages but CMA takes them */
1158		if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable)
1159			goto isolate_fail_put;
1160
1161		/*
1162		 * To minimise LRU disruption, the caller can indicate with
1163		 * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages
1164		 * it will be able to migrate without blocking - clean pages
1165		 * for the most part.  PageWriteback would require blocking.
1166		 */
1167		if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
1168			goto isolate_fail_put;
1169
1170		is_dirty = folio_test_dirty(folio);
1171
1172		if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) ||
1173		    (mapping && is_unevictable)) {
1174			bool migrate_dirty = true;
1175			bool is_unmovable;
1176
1177			/*
1178			 * Only folios without mappings or that have
1179			 * a ->migrate_folio callback are possible to migrate
1180			 * without blocking.
1181			 *
1182			 * Folios from unmovable mappings are not migratable.
1183			 *
1184			 * However, we can be racing with truncation, which can
1185			 * free the mapping that we need to check. Truncation
1186			 * holds the folio lock until after the folio is removed
1187			 * from the page so holding it ourselves is sufficient.
1188			 *
1189			 * To avoid locking the folio just to check unmovable,
1190			 * assume every unmovable folio is also unevictable,
1191			 * which is a cheaper test.  If our assumption goes
1192			 * wrong, it's not a correctness bug, just potentially
1193			 * wasted cycles.
1194			 */
1195			if (!folio_trylock(folio))
1196				goto isolate_fail_put;
1197
1198			mapping = folio_mapping(folio);
1199			if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) {
1200				migrate_dirty = !mapping ||
1201						mapping->a_ops->migrate_folio;
1202			}
1203			is_unmovable = mapping && mapping_unmovable(mapping);
1204			folio_unlock(folio);
1205			if (!migrate_dirty || is_unmovable)
1206				goto isolate_fail_put;
1207		}
1208
1209		/* Try isolate the folio */
1210		if (!folio_test_clear_lru(folio))
1211			goto isolate_fail_put;
1212
1213		lruvec = folio_lruvec(folio);
1214
1215		/* If we already hold the lock, we can skip some rechecking */
1216		if (lruvec != locked) {
1217			if (locked)
1218				unlock_page_lruvec_irqrestore(locked, flags);
1219
1220			compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
1221			locked = lruvec;
1222
1223			lruvec_memcg_debug(lruvec, folio);
1224
1225			/*
1226			 * Try get exclusive access under lock. If marked for
1227			 * skip, the scan is aborted unless the current context
1228			 * is a rescan to reach the end of the pageblock.
1229			 */
1230			if (!skip_updated && valid_page) {
1231				skip_updated = true;
1232				if (test_and_set_skip(cc, valid_page) &&
1233				    !cc->finish_pageblock) {
1234					low_pfn = end_pfn;
1235					goto isolate_abort;
1236				}
1237			}
1238
 
 
 
 
1239			/*
1240			 * Check LRU folio order under the lock
 
 
1241			 */
1242			if (unlikely(skip_isolation_on_order(folio_order(folio),
1243							     cc->order) &&
1244				     !cc->alloc_contig)) {
1245				low_pfn += folio_nr_pages(folio) - 1;
1246				nr_scanned += folio_nr_pages(folio) - 1;
1247				folio_set_lru(folio);
1248				goto isolate_fail_put;
1249			}
1250		}
1251
1252		/* The folio is taken off the LRU */
1253		if (folio_test_large(folio))
1254			low_pfn += folio_nr_pages(folio) - 1;
 
 
 
 
1255
1256		/* Successfully isolated */
1257		lruvec_del_folio(lruvec, folio);
1258		node_stat_mod_folio(folio,
1259				NR_ISOLATED_ANON + folio_is_file_lru(folio),
1260				folio_nr_pages(folio));
1261
1262isolate_success:
1263		list_add(&folio->lru, &cc->migratepages);
1264isolate_success_no_list:
1265		cc->nr_migratepages += folio_nr_pages(folio);
1266		nr_isolated += folio_nr_pages(folio);
1267		nr_scanned += folio_nr_pages(folio) - 1;
1268
1269		/*
1270		 * Avoid isolating too much unless this block is being
1271		 * fully scanned (e.g. dirty/writeback pages, parallel allocation)
1272		 * or a lock is contended. For contention, isolate quickly to
1273		 * potentially remove one source of contention.
1274		 */
1275		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
1276		    !cc->finish_pageblock && !cc->contended) {
1277			++low_pfn;
1278			break;
1279		}
1280
1281		continue;
1282
1283isolate_fail_put:
1284		/* Avoid potential deadlock in freeing page under lru_lock */
1285		if (locked) {
1286			unlock_page_lruvec_irqrestore(locked, flags);
1287			locked = NULL;
1288		}
1289		folio_put(folio);
1290
1291isolate_fail:
1292		if (!skip_on_failure && ret != -ENOMEM)
1293			continue;
1294
1295		/*
1296		 * We have isolated some pages, but then failed. Release them
1297		 * instead of migrating, as we cannot form the cc->order buddy
1298		 * page anyway.
1299		 */
1300		if (nr_isolated) {
1301			if (locked) {
1302				unlock_page_lruvec_irqrestore(locked, flags);
1303				locked = NULL;
1304			}
1305			putback_movable_pages(&cc->migratepages);
1306			cc->nr_migratepages = 0;
1307			nr_isolated = 0;
1308		}
1309
1310		if (low_pfn < next_skip_pfn) {
1311			low_pfn = next_skip_pfn - 1;
1312			/*
1313			 * The check near the loop beginning would have updated
1314			 * next_skip_pfn too, but this is a bit simpler.
1315			 */
1316			next_skip_pfn += 1UL << cc->order;
1317		}
1318
1319		if (ret == -ENOMEM)
1320			break;
1321	}
1322
1323	/*
1324	 * The PageBuddy() check could have potentially brought us outside
1325	 * the range to be scanned.
1326	 */
1327	if (unlikely(low_pfn > end_pfn))
1328		low_pfn = end_pfn;
1329
1330	folio = NULL;
1331
1332isolate_abort:
1333	if (locked)
1334		unlock_page_lruvec_irqrestore(locked, flags);
1335	if (folio) {
1336		folio_set_lru(folio);
1337		folio_put(folio);
1338	}
1339
1340	/*
1341	 * Update the cached scanner pfn once the pageblock has been scanned.
1342	 * Pages will either be migrated in which case there is no point
1343	 * scanning in the near future or migration failed in which case the
1344	 * failure reason may persist. The block is marked for skipping if
1345	 * there were no pages isolated in the block or if the block is
1346	 * rescanned twice in a row.
1347	 */
1348	if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) {
1349		if (!cc->no_set_skip_hint && valid_page && !skip_updated)
1350			set_pageblock_skip(valid_page);
1351		update_cached_migrate(cc, low_pfn);
1352	}
1353
1354	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1355						nr_scanned, nr_isolated);
1356
1357fatal_pending:
1358	cc->total_migrate_scanned += nr_scanned;
1359	if (nr_isolated)
1360		count_compact_events(COMPACTISOLATED, nr_isolated);
1361
1362	cc->migrate_pfn = low_pfn;
1363
1364	return ret;
1365}
1366
1367/**
1368 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1369 * @cc:        Compaction control structure.
1370 * @start_pfn: The first PFN to start isolating.
1371 * @end_pfn:   The one-past-last PFN.
1372 *
1373 * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM
1374 * in case we could not allocate a page, or 0.
 
1375 */
1376int
1377isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1378							unsigned long end_pfn)
1379{
1380	unsigned long pfn, block_start_pfn, block_end_pfn;
1381	int ret = 0;
1382
1383	/* Scan block by block. First and last block may be incomplete */
1384	pfn = start_pfn;
1385	block_start_pfn = pageblock_start_pfn(pfn);
1386	if (block_start_pfn < cc->zone->zone_start_pfn)
1387		block_start_pfn = cc->zone->zone_start_pfn;
1388	block_end_pfn = pageblock_end_pfn(pfn);
1389
1390	for (; pfn < end_pfn; pfn = block_end_pfn,
1391				block_start_pfn = block_end_pfn,
1392				block_end_pfn += pageblock_nr_pages) {
1393
1394		block_end_pfn = min(block_end_pfn, end_pfn);
1395
1396		if (!pageblock_pfn_to_page(block_start_pfn,
1397					block_end_pfn, cc->zone))
1398			continue;
1399
1400		ret = isolate_migratepages_block(cc, pfn, block_end_pfn,
1401						 ISOLATE_UNEVICTABLE);
1402
1403		if (ret)
1404			break;
1405
1406		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
1407			break;
1408	}
1409
1410	return ret;
1411}
1412
1413#endif /* CONFIG_COMPACTION || CONFIG_CMA */
1414#ifdef CONFIG_COMPACTION
1415
1416static bool suitable_migration_source(struct compact_control *cc,
1417							struct page *page)
1418{
1419	int block_mt;
1420
1421	if (pageblock_skip_persistent(page))
1422		return false;
1423
1424	if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1425		return true;
1426
1427	block_mt = get_pageblock_migratetype(page);
1428
1429	if (cc->migratetype == MIGRATE_MOVABLE)
1430		return is_migrate_movable(block_mt);
1431	else
1432		return block_mt == cc->migratetype;
1433}
1434
1435/* Returns true if the page is within a block suitable for migration to */
1436static bool suitable_migration_target(struct compact_control *cc,
1437							struct page *page)
1438{
1439	/* If the page is a large free page, then disallow migration */
1440	if (PageBuddy(page)) {
1441		int order = cc->order > 0 ? cc->order : pageblock_order;
1442
1443		/*
1444		 * We are checking page_order without zone->lock taken. But
1445		 * the only small danger is that we skip a potentially suitable
1446		 * pageblock, so it's not worth to check order for valid range.
1447		 */
1448		if (buddy_order_unsafe(page) >= order)
1449			return false;
1450	}
1451
1452	if (cc->ignore_block_suitable)
1453		return true;
1454
1455	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1456	if (is_migrate_movable(get_pageblock_migratetype(page)))
1457		return true;
1458
1459	/* Otherwise skip the block */
1460	return false;
1461}
1462
1463static inline unsigned int
1464freelist_scan_limit(struct compact_control *cc)
1465{
1466	unsigned short shift = BITS_PER_LONG - 1;
1467
1468	return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1469}
1470
1471/*
1472 * Test whether the free scanner has reached the same or lower pageblock than
1473 * the migration scanner, and compaction should thus terminate.
1474 */
1475static inline bool compact_scanners_met(struct compact_control *cc)
1476{
1477	return (cc->free_pfn >> pageblock_order)
1478		<= (cc->migrate_pfn >> pageblock_order);
1479}
1480
1481/*
1482 * Used when scanning for a suitable migration target which scans freelists
1483 * in reverse. Reorders the list such as the unscanned pages are scanned
1484 * first on the next iteration of the free scanner
1485 */
1486static void
1487move_freelist_head(struct list_head *freelist, struct page *freepage)
1488{
1489	LIST_HEAD(sublist);
1490
1491	if (!list_is_first(&freepage->buddy_list, freelist)) {
1492		list_cut_before(&sublist, freelist, &freepage->buddy_list);
1493		list_splice_tail(&sublist, freelist);
 
1494	}
1495}
1496
1497/*
1498 * Similar to move_freelist_head except used by the migration scanner
1499 * when scanning forward. It's possible for these list operations to
1500 * move against each other if they search the free list exactly in
1501 * lockstep.
1502 */
1503static void
1504move_freelist_tail(struct list_head *freelist, struct page *freepage)
1505{
1506	LIST_HEAD(sublist);
1507
1508	if (!list_is_last(&freepage->buddy_list, freelist)) {
1509		list_cut_position(&sublist, freelist, &freepage->buddy_list);
1510		list_splice_tail(&sublist, freelist);
 
1511	}
1512}
1513
1514static void
1515fast_isolate_around(struct compact_control *cc, unsigned long pfn)
1516{
1517	unsigned long start_pfn, end_pfn;
1518	struct page *page;
1519
1520	/* Do not search around if there are enough pages already */
1521	if (cc->nr_freepages >= cc->nr_migratepages)
1522		return;
1523
1524	/* Minimise scanning during async compaction */
1525	if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1526		return;
1527
1528	/* Pageblock boundaries */
1529	start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
1530	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
1531
1532	page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
1533	if (!page)
1534		return;
 
 
 
1535
1536	isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false);
 
 
 
1537
1538	/* Skip this pageblock in the future as it's full or nearly full */
1539	if (start_pfn == end_pfn && !cc->no_set_skip_hint)
1540		set_pageblock_skip(page);
1541}
1542
1543/* Search orders in round-robin fashion */
1544static int next_search_order(struct compact_control *cc, int order)
1545{
1546	order--;
1547	if (order < 0)
1548		order = cc->order - 1;
1549
1550	/* Search wrapped around? */
1551	if (order == cc->search_order) {
1552		cc->search_order--;
1553		if (cc->search_order < 0)
1554			cc->search_order = cc->order - 1;
1555		return -1;
1556	}
1557
1558	return order;
1559}
1560
1561static void fast_isolate_freepages(struct compact_control *cc)
 
1562{
1563	unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
1564	unsigned int nr_scanned = 0, total_isolated = 0;
1565	unsigned long low_pfn, min_pfn, highest = 0;
1566	unsigned long nr_isolated = 0;
1567	unsigned long distance;
1568	struct page *page = NULL;
1569	bool scan_start = false;
1570	int order;
1571
1572	/* Full compaction passes in a negative order */
1573	if (cc->order <= 0)
1574		return;
1575
1576	/*
1577	 * If starting the scan, use a deeper search and use the highest
1578	 * PFN found if a suitable one is not found.
1579	 */
1580	if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1581		limit = pageblock_nr_pages >> 1;
1582		scan_start = true;
1583	}
1584
1585	/*
1586	 * Preferred point is in the top quarter of the scan space but take
1587	 * a pfn from the top half if the search is problematic.
1588	 */
1589	distance = (cc->free_pfn - cc->migrate_pfn);
1590	low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1591	min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1592
1593	if (WARN_ON_ONCE(min_pfn > low_pfn))
1594		low_pfn = min_pfn;
1595
1596	/*
1597	 * Search starts from the last successful isolation order or the next
1598	 * order to search after a previous failure
1599	 */
1600	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1601
1602	for (order = cc->search_order;
1603	     !page && order >= 0;
1604	     order = next_search_order(cc, order)) {
1605		struct free_area *area = &cc->zone->free_area[order];
1606		struct list_head *freelist;
1607		struct page *freepage;
1608		unsigned long flags;
1609		unsigned int order_scanned = 0;
1610		unsigned long high_pfn = 0;
1611
1612		if (!area->nr_free)
1613			continue;
1614
1615		spin_lock_irqsave(&cc->zone->lock, flags);
1616		freelist = &area->free_list[MIGRATE_MOVABLE];
1617		list_for_each_entry_reverse(freepage, freelist, buddy_list) {
1618			unsigned long pfn;
1619
1620			order_scanned++;
1621			nr_scanned++;
1622			pfn = page_to_pfn(freepage);
1623
1624			if (pfn >= highest)
1625				highest = max(pageblock_start_pfn(pfn),
1626					      cc->zone->zone_start_pfn);
1627
1628			if (pfn >= low_pfn) {
1629				cc->fast_search_fail = 0;
1630				cc->search_order = order;
1631				page = freepage;
1632				break;
1633			}
1634
1635			if (pfn >= min_pfn && pfn > high_pfn) {
1636				high_pfn = pfn;
1637
1638				/* Shorten the scan if a candidate is found */
1639				limit >>= 1;
1640			}
1641
1642			if (order_scanned >= limit)
1643				break;
1644		}
1645
1646		/* Use a maximum candidate pfn if a preferred one was not found */
1647		if (!page && high_pfn) {
1648			page = pfn_to_page(high_pfn);
1649
1650			/* Update freepage for the list reorder below */
1651			freepage = page;
1652		}
1653
1654		/* Reorder to so a future search skips recent pages */
1655		move_freelist_head(freelist, freepage);
1656
1657		/* Isolate the page if available */
1658		if (page) {
1659			if (__isolate_free_page(page, order)) {
1660				set_page_private(page, order);
1661				nr_isolated = 1 << order;
1662				nr_scanned += nr_isolated - 1;
1663				total_isolated += nr_isolated;
1664				cc->nr_freepages += nr_isolated;
1665				list_add_tail(&page->lru, &cc->freepages[order]);
1666				count_compact_events(COMPACTISOLATED, nr_isolated);
1667			} else {
1668				/* If isolation fails, abort the search */
1669				order = cc->search_order + 1;
1670				page = NULL;
1671			}
1672		}
1673
1674		spin_unlock_irqrestore(&cc->zone->lock, flags);
1675
1676		/* Skip fast search if enough freepages isolated */
1677		if (cc->nr_freepages >= cc->nr_migratepages)
1678			break;
1679
1680		/*
1681		 * Smaller scan on next order so the total scan is related
1682		 * to freelist_scan_limit.
1683		 */
1684		if (order_scanned >= limit)
1685			limit = max(1U, limit >> 1);
1686	}
1687
1688	trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn,
1689						   nr_scanned, total_isolated);
1690
1691	if (!page) {
1692		cc->fast_search_fail++;
1693		if (scan_start) {
1694			/*
1695			 * Use the highest PFN found above min. If one was
1696			 * not found, be pessimistic for direct compaction
1697			 * and use the min mark.
1698			 */
1699			if (highest >= min_pfn) {
1700				page = pfn_to_page(highest);
1701				cc->free_pfn = highest;
1702			} else {
1703				if (cc->direct_compaction && pfn_valid(min_pfn)) {
1704					page = pageblock_pfn_to_page(min_pfn,
1705						min(pageblock_end_pfn(min_pfn),
1706						    zone_end_pfn(cc->zone)),
1707						cc->zone);
1708					if (page && !suitable_migration_target(cc, page))
1709						page = NULL;
1710
1711					cc->free_pfn = min_pfn;
1712				}
1713			}
1714		}
1715	}
1716
1717	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1718		highest -= pageblock_nr_pages;
1719		cc->zone->compact_cached_free_pfn = highest;
1720	}
1721
1722	cc->total_free_scanned += nr_scanned;
1723	if (!page)
1724		return;
1725
1726	low_pfn = page_to_pfn(page);
1727	fast_isolate_around(cc, low_pfn);
 
1728}
1729
1730/*
1731 * Based on information in the current compact_control, find blocks
1732 * suitable for isolating free pages from and then isolate them.
1733 */
1734static void isolate_freepages(struct compact_control *cc)
1735{
1736	struct zone *zone = cc->zone;
1737	struct page *page;
1738	unsigned long block_start_pfn;	/* start of current pageblock */
1739	unsigned long isolate_start_pfn; /* exact pfn we start at */
1740	unsigned long block_end_pfn;	/* end of current pageblock */
1741	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
 
1742	unsigned int stride;
1743
1744	/* Try a small search of the free lists for a candidate */
1745	fast_isolate_freepages(cc);
1746	if (cc->nr_freepages)
1747		return;
1748
1749	/*
1750	 * Initialise the free scanner. The starting point is where we last
1751	 * successfully isolated from, zone-cached value, or the end of the
1752	 * zone when isolating for the first time. For looping we also need
1753	 * this pfn aligned down to the pageblock boundary, because we do
1754	 * block_start_pfn -= pageblock_nr_pages in the for loop.
1755	 * For ending point, take care when isolating in last pageblock of a
1756	 * zone which ends in the middle of a pageblock.
1757	 * The low boundary is the end of the pageblock the migration scanner
1758	 * is using.
1759	 */
1760	isolate_start_pfn = cc->free_pfn;
1761	block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
1762	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1763						zone_end_pfn(zone));
1764	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1765	stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1766
1767	/*
1768	 * Isolate free pages until enough are available to migrate the
1769	 * pages on cc->migratepages. We stop searching if the migrate
1770	 * and free page scanners meet or enough free pages are isolated.
1771	 */
1772	for (; block_start_pfn >= low_pfn;
1773				block_end_pfn = block_start_pfn,
1774				block_start_pfn -= pageblock_nr_pages,
1775				isolate_start_pfn = block_start_pfn) {
1776		unsigned long nr_isolated;
1777
1778		/*
1779		 * This can iterate a massively long zone without finding any
1780		 * suitable migration targets, so periodically check resched.
1781		 */
1782		if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
1783			cond_resched();
1784
1785		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1786									zone);
1787		if (!page) {
1788			unsigned long next_pfn;
1789
1790			next_pfn = skip_offline_sections_reverse(block_start_pfn);
1791			if (next_pfn)
1792				block_start_pfn = max(next_pfn, low_pfn);
1793
1794			continue;
1795		}
1796
1797		/* Check the block is suitable for migration */
1798		if (!suitable_migration_target(cc, page))
1799			continue;
1800
1801		/* If isolation recently failed, do not retry */
1802		if (!isolation_suitable(cc, page))
1803			continue;
1804
1805		/* Found a block suitable for isolating free pages from. */
1806		nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1807					block_end_pfn, cc->freepages, stride, false);
1808
1809		/* Update the skip hint if the full pageblock was scanned */
1810		if (isolate_start_pfn == block_end_pfn)
1811			update_pageblock_skip(cc, page, block_start_pfn -
1812					      pageblock_nr_pages);
1813
1814		/* Are enough freepages isolated? */
1815		if (cc->nr_freepages >= cc->nr_migratepages) {
1816			if (isolate_start_pfn >= block_end_pfn) {
1817				/*
1818				 * Restart at previous pageblock if more
1819				 * freepages can be isolated next time.
1820				 */
1821				isolate_start_pfn =
1822					block_start_pfn - pageblock_nr_pages;
1823			}
1824			break;
1825		} else if (isolate_start_pfn < block_end_pfn) {
1826			/*
1827			 * If isolation failed early, do not continue
1828			 * needlessly.
1829			 */
1830			break;
1831		}
1832
1833		/* Adjust stride depending on isolation */
1834		if (nr_isolated) {
1835			stride = 1;
1836			continue;
1837		}
1838		stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
1839	}
1840
1841	/*
1842	 * Record where the free scanner will restart next time. Either we
1843	 * broke from the loop and set isolate_start_pfn based on the last
1844	 * call to isolate_freepages_block(), or we met the migration scanner
1845	 * and the loop terminated due to isolate_start_pfn < low_pfn
1846	 */
1847	cc->free_pfn = isolate_start_pfn;
 
 
 
 
1848}
1849
1850/*
1851 * This is a migrate-callback that "allocates" freepages by taking pages
1852 * from the isolated freelists in the block we are migrating to.
1853 */
1854static struct folio *compaction_alloc(struct folio *src, unsigned long data)
 
1855{
1856	struct compact_control *cc = (struct compact_control *)data;
1857	struct folio *dst;
1858	int order = folio_order(src);
1859	bool has_isolated_pages = false;
1860	int start_order;
1861	struct page *freepage;
1862	unsigned long size;
1863
1864again:
1865	for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++)
1866		if (!list_empty(&cc->freepages[start_order]))
1867			break;
1868
1869	/* no free pages in the list */
1870	if (start_order == NR_PAGE_ORDERS) {
1871		if (has_isolated_pages)
1872			return NULL;
1873		isolate_freepages(cc);
1874		has_isolated_pages = true;
1875		goto again;
1876	}
1877
1878	freepage = list_first_entry(&cc->freepages[start_order], struct page,
1879				lru);
1880	size = 1 << start_order;
1881
1882	list_del(&freepage->lru);
 
1883
1884	while (start_order > order) {
1885		start_order--;
1886		size >>= 1;
1887
1888		list_add(&freepage[size].lru, &cc->freepages[start_order]);
1889		set_page_private(&freepage[size], start_order);
1890	}
1891	dst = (struct folio *)freepage;
1892
1893	post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
1894	if (order)
1895		prep_compound_page(&dst->page, order);
1896	cc->nr_freepages -= 1 << order;
1897	cc->nr_migratepages -= 1 << order;
1898	return page_rmappable_folio(&dst->page);
1899}
1900
1901/*
1902 * This is a migrate-callback that "frees" freepages back to the isolated
1903 * freelist.  All pages on the freelist are from the same zone, so there is no
1904 * special handling needed for NUMA.
1905 */
1906static void compaction_free(struct folio *dst, unsigned long data)
1907{
1908	struct compact_control *cc = (struct compact_control *)data;
1909	int order = folio_order(dst);
1910	struct page *page = &dst->page;
1911
1912	if (folio_put_testzero(dst)) {
1913		free_pages_prepare(page, order);
1914		list_add(&dst->lru, &cc->freepages[order]);
1915		cc->nr_freepages += 1 << order;
1916	}
1917	cc->nr_migratepages += 1 << order;
1918	/*
1919	 * someone else has referenced the page, we cannot take it back to our
1920	 * free list.
1921	 */
1922}
1923
1924/* possible outcome of isolate_migratepages */
1925typedef enum {
1926	ISOLATE_ABORT,		/* Abort compaction now */
1927	ISOLATE_NONE,		/* No pages isolated, continue scanning */
1928	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
1929} isolate_migrate_t;
1930
1931/*
1932 * Allow userspace to control policy on scanning the unevictable LRU for
1933 * compactable pages.
1934 */
1935static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT;
1936/*
1937 * Tunable for proactive compaction. It determines how
1938 * aggressively the kernel should compact memory in the
1939 * background. It takes values in the range [0, 100].
1940 */
1941static unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
1942static int sysctl_extfrag_threshold = 500;
1943static int __read_mostly sysctl_compact_memory;
1944
1945static inline void
1946update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1947{
1948	if (cc->fast_start_pfn == ULONG_MAX)
1949		return;
1950
1951	if (!cc->fast_start_pfn)
1952		cc->fast_start_pfn = pfn;
1953
1954	cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1955}
1956
1957static inline unsigned long
1958reinit_migrate_pfn(struct compact_control *cc)
1959{
1960	if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1961		return cc->migrate_pfn;
1962
1963	cc->migrate_pfn = cc->fast_start_pfn;
1964	cc->fast_start_pfn = ULONG_MAX;
1965
1966	return cc->migrate_pfn;
1967}
1968
1969/*
1970 * Briefly search the free lists for a migration source that already has
1971 * some free pages to reduce the number of pages that need migration
1972 * before a pageblock is free.
1973 */
1974static unsigned long fast_find_migrateblock(struct compact_control *cc)
1975{
1976	unsigned int limit = freelist_scan_limit(cc);
1977	unsigned int nr_scanned = 0;
1978	unsigned long distance;
1979	unsigned long pfn = cc->migrate_pfn;
1980	unsigned long high_pfn;
1981	int order;
1982	bool found_block = false;
1983
1984	/* Skip hints are relied on to avoid repeats on the fast search */
1985	if (cc->ignore_skip_hint)
1986		return pfn;
1987
1988	/*
1989	 * If the pageblock should be finished then do not select a different
1990	 * pageblock.
1991	 */
1992	if (cc->finish_pageblock)
1993		return pfn;
1994
1995	/*
1996	 * If the migrate_pfn is not at the start of a zone or the start
1997	 * of a pageblock then assume this is a continuation of a previous
1998	 * scan restarted due to COMPACT_CLUSTER_MAX.
1999	 */
2000	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
2001		return pfn;
2002
2003	/*
2004	 * For smaller orders, just linearly scan as the number of pages
2005	 * to migrate should be relatively small and does not necessarily
2006	 * justify freeing up a large block for a small allocation.
2007	 */
2008	if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
2009		return pfn;
2010
2011	/*
2012	 * Only allow kcompactd and direct requests for movable pages to
2013	 * quickly clear out a MOVABLE pageblock for allocation. This
2014	 * reduces the risk that a large movable pageblock is freed for
2015	 * an unmovable/reclaimable small allocation.
2016	 */
2017	if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
2018		return pfn;
2019
2020	/*
2021	 * When starting the migration scanner, pick any pageblock within the
2022	 * first half of the search space. Otherwise try and pick a pageblock
2023	 * within the first eighth to reduce the chances that a migration
2024	 * target later becomes a source.
2025	 */
2026	distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
2027	if (cc->migrate_pfn != cc->zone->zone_start_pfn)
2028		distance >>= 2;
2029	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
2030
2031	for (order = cc->order - 1;
2032	     order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
2033	     order--) {
2034		struct free_area *area = &cc->zone->free_area[order];
2035		struct list_head *freelist;
2036		unsigned long flags;
2037		struct page *freepage;
2038
2039		if (!area->nr_free)
2040			continue;
2041
2042		spin_lock_irqsave(&cc->zone->lock, flags);
2043		freelist = &area->free_list[MIGRATE_MOVABLE];
2044		list_for_each_entry(freepage, freelist, buddy_list) {
2045			unsigned long free_pfn;
2046
2047			if (nr_scanned++ >= limit) {
2048				move_freelist_tail(freelist, freepage);
2049				break;
2050			}
2051
2052			free_pfn = page_to_pfn(freepage);
2053			if (free_pfn < high_pfn) {
2054				/*
2055				 * Avoid if skipped recently. Ideally it would
2056				 * move to the tail but even safe iteration of
2057				 * the list assumes an entry is deleted, not
2058				 * reordered.
2059				 */
2060				if (get_pageblock_skip(freepage))
 
 
 
2061					continue;
 
2062
2063				/* Reorder to so a future search skips recent pages */
2064				move_freelist_tail(freelist, freepage);
2065
2066				update_fast_start_pfn(cc, free_pfn);
2067				pfn = pageblock_start_pfn(free_pfn);
2068				if (pfn < cc->zone->zone_start_pfn)
2069					pfn = cc->zone->zone_start_pfn;
2070				cc->fast_search_fail = 0;
2071				found_block = true;
 
 
 
 
 
 
2072				break;
2073			}
2074		}
2075		spin_unlock_irqrestore(&cc->zone->lock, flags);
2076	}
2077
2078	cc->total_migrate_scanned += nr_scanned;
2079
2080	/*
2081	 * If fast scanning failed then use a cached entry for a page block
2082	 * that had free pages as the basis for starting a linear scan.
2083	 */
2084	if (!found_block) {
2085		cc->fast_search_fail++;
2086		pfn = reinit_migrate_pfn(cc);
2087	}
2088	return pfn;
2089}
2090
2091/*
2092 * Isolate all pages that can be migrated from the first suitable block,
2093 * starting at the block pointed to by the migrate scanner pfn within
2094 * compact_control.
2095 */
2096static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
2097{
2098	unsigned long block_start_pfn;
2099	unsigned long block_end_pfn;
2100	unsigned long low_pfn;
2101	struct page *page;
2102	const isolate_mode_t isolate_mode =
2103		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
2104		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
2105	bool fast_find_block;
2106
2107	/*
2108	 * Start at where we last stopped, or beginning of the zone as
2109	 * initialized by compact_zone(). The first failure will use
2110	 * the lowest PFN as the starting point for linear scanning.
2111	 */
2112	low_pfn = fast_find_migrateblock(cc);
2113	block_start_pfn = pageblock_start_pfn(low_pfn);
2114	if (block_start_pfn < cc->zone->zone_start_pfn)
2115		block_start_pfn = cc->zone->zone_start_pfn;
2116
2117	/*
2118	 * fast_find_migrateblock() has already ensured the pageblock is not
2119	 * set with a skipped flag, so to avoid the isolation_suitable check
2120	 * below again, check whether the fast search was successful.
2121	 */
2122	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
2123
2124	/* Only scan within a pageblock boundary */
2125	block_end_pfn = pageblock_end_pfn(low_pfn);
2126
2127	/*
2128	 * Iterate over whole pageblocks until we find the first suitable.
2129	 * Do not cross the free scanner.
2130	 */
2131	for (; block_end_pfn <= cc->free_pfn;
2132			fast_find_block = false,
2133			cc->migrate_pfn = low_pfn = block_end_pfn,
2134			block_start_pfn = block_end_pfn,
2135			block_end_pfn += pageblock_nr_pages) {
2136
2137		/*
2138		 * This can potentially iterate a massively long zone with
2139		 * many pageblocks unsuitable, so periodically check if we
2140		 * need to schedule.
2141		 */
2142		if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages)))
2143			cond_resched();
2144
2145		page = pageblock_pfn_to_page(block_start_pfn,
2146						block_end_pfn, cc->zone);
2147		if (!page) {
2148			unsigned long next_pfn;
2149
2150			next_pfn = skip_offline_sections(block_start_pfn);
2151			if (next_pfn)
2152				block_end_pfn = min(next_pfn, cc->free_pfn);
2153			continue;
2154		}
2155
2156		/*
2157		 * If isolation recently failed, do not retry. Only check the
2158		 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
2159		 * to be visited multiple times. Assume skip was checked
2160		 * before making it "skip" so other compaction instances do
2161		 * not scan the same block.
2162		 */
2163		if ((pageblock_aligned(low_pfn) ||
2164		     low_pfn == cc->zone->zone_start_pfn) &&
2165		    !fast_find_block && !isolation_suitable(cc, page))
2166			continue;
2167
2168		/*
2169		 * For async direct compaction, only scan the pageblocks of the
2170		 * same migratetype without huge pages. Async direct compaction
2171		 * is optimistic to see if the minimum amount of work satisfies
2172		 * the allocation. The cached PFN is updated as it's possible
2173		 * that all remaining blocks between source and target are
2174		 * unsuitable and the compaction scanners fail to meet.
2175		 */
2176		if (!suitable_migration_source(cc, page)) {
2177			update_cached_migrate(cc, block_end_pfn);
2178			continue;
2179		}
2180
2181		/* Perform the isolation */
2182		if (isolate_migratepages_block(cc, low_pfn, block_end_pfn,
2183						isolate_mode))
 
 
2184			return ISOLATE_ABORT;
2185
2186		/*
2187		 * Either we isolated something and proceed with migration. Or
2188		 * we failed and compact_zone should decide if we should
2189		 * continue or not.
2190		 */
2191		break;
2192	}
2193
 
 
 
2194	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
2195}
2196
2197/*
2198 * Determine whether kswapd is (or recently was!) running on this node.
2199 *
2200 * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
2201 * zero it.
2202 */
2203static bool kswapd_is_running(pg_data_t *pgdat)
2204{
2205	bool running;
2206
2207	pgdat_kswapd_lock(pgdat);
2208	running = pgdat->kswapd && task_is_running(pgdat->kswapd);
2209	pgdat_kswapd_unlock(pgdat);
2210
2211	return running;
2212}
2213
2214/*
2215 * A zone's fragmentation score is the external fragmentation wrt to the
2216 * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
2217 */
2218static unsigned int fragmentation_score_zone(struct zone *zone)
2219{
2220	return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
2221}
2222
2223/*
2224 * A weighted zone's fragmentation score is the external fragmentation
2225 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
2226 * returns a value in the range [0, 100].
2227 *
2228 * The scaling factor ensures that proactive compaction focuses on larger
2229 * zones like ZONE_NORMAL, rather than smaller, specialized zones like
2230 * ZONE_DMA32. For smaller zones, the score value remains close to zero,
2231 * and thus never exceeds the high threshold for proactive compaction.
2232 */
2233static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
2234{
2235	unsigned long score;
2236
2237	score = zone->present_pages * fragmentation_score_zone(zone);
2238	return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
2239}
2240
2241/*
2242 * The per-node proactive (background) compaction process is started by its
2243 * corresponding kcompactd thread when the node's fragmentation score
2244 * exceeds the high threshold. The compaction process remains active till
2245 * the node's score falls below the low threshold, or one of the back-off
2246 * conditions is met.
2247 */
2248static unsigned int fragmentation_score_node(pg_data_t *pgdat)
2249{
2250	unsigned int score = 0;
2251	int zoneid;
2252
2253	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2254		struct zone *zone;
2255
2256		zone = &pgdat->node_zones[zoneid];
2257		if (!populated_zone(zone))
2258			continue;
2259		score += fragmentation_score_zone_weighted(zone);
2260	}
2261
2262	return score;
2263}
2264
2265static unsigned int fragmentation_score_wmark(bool low)
2266{
2267	unsigned int wmark_low;
2268
2269	/*
2270	 * Cap the low watermark to avoid excessive compaction
2271	 * activity in case a user sets the proactiveness tunable
2272	 * close to 100 (maximum).
2273	 */
2274	wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
2275	return low ? wmark_low : min(wmark_low + 10, 100U);
2276}
2277
2278static bool should_proactive_compact_node(pg_data_t *pgdat)
2279{
2280	int wmark_high;
2281
2282	if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
2283		return false;
2284
2285	wmark_high = fragmentation_score_wmark(false);
2286	return fragmentation_score_node(pgdat) > wmark_high;
2287}
2288
2289static enum compact_result __compact_finished(struct compact_control *cc)
2290{
2291	unsigned int order;
2292	const int migratetype = cc->migratetype;
2293	int ret;
2294
2295	/* Compaction run completes if the migrate and free scanner meet */
2296	if (compact_scanners_met(cc)) {
2297		/* Let the next compaction start anew. */
2298		reset_cached_positions(cc->zone);
2299
2300		/*
2301		 * Mark that the PG_migrate_skip information should be cleared
2302		 * by kswapd when it goes to sleep. kcompactd does not set the
2303		 * flag itself as the decision to be clear should be directly
2304		 * based on an allocation request.
2305		 */
2306		if (cc->direct_compaction)
2307			cc->zone->compact_blockskip_flush = true;
2308
2309		if (cc->whole_zone)
2310			return COMPACT_COMPLETE;
2311		else
2312			return COMPACT_PARTIAL_SKIPPED;
2313	}
2314
2315	if (cc->proactive_compaction) {
2316		int score, wmark_low;
2317		pg_data_t *pgdat;
2318
2319		pgdat = cc->zone->zone_pgdat;
2320		if (kswapd_is_running(pgdat))
2321			return COMPACT_PARTIAL_SKIPPED;
2322
2323		score = fragmentation_score_zone(cc->zone);
2324		wmark_low = fragmentation_score_wmark(true);
2325
2326		if (score > wmark_low)
2327			ret = COMPACT_CONTINUE;
2328		else
2329			ret = COMPACT_SUCCESS;
2330
2331		goto out;
2332	}
2333
2334	if (is_via_compact_memory(cc->order))
2335		return COMPACT_CONTINUE;
2336
2337	/*
2338	 * Always finish scanning a pageblock to reduce the possibility of
2339	 * fallbacks in the future. This is particularly important when
2340	 * migration source is unmovable/reclaimable but it's not worth
2341	 * special casing.
2342	 */
2343	if (!pageblock_aligned(cc->migrate_pfn))
2344		return COMPACT_CONTINUE;
2345
2346	/* Direct compactor: Is a suitable page free? */
2347	ret = COMPACT_NO_SUITABLE_PAGE;
2348	for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
2349		struct free_area *area = &cc->zone->free_area[order];
2350		bool can_steal;
2351
2352		/* Job done if page is free of the right migratetype */
2353		if (!free_area_empty(area, migratetype))
2354			return COMPACT_SUCCESS;
2355
2356#ifdef CONFIG_CMA
2357		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
2358		if (migratetype == MIGRATE_MOVABLE &&
2359			!free_area_empty(area, MIGRATE_CMA))
2360			return COMPACT_SUCCESS;
2361#endif
2362		/*
2363		 * Job done if allocation would steal freepages from
2364		 * other migratetype buddy lists.
2365		 */
2366		if (find_suitable_fallback(area, order, migratetype,
2367						true, &can_steal) != -1)
2368			/*
2369			 * Movable pages are OK in any pageblock. If we are
2370			 * stealing for a non-movable allocation, make sure
2371			 * we finish compacting the current pageblock first
2372			 * (which is assured by the above migrate_pfn align
2373			 * check) so it is as free as possible and we won't
2374			 * have to steal another one soon.
2375			 */
2376			return COMPACT_SUCCESS;
 
 
 
 
 
 
 
 
 
 
 
 
 
2377	}
2378
2379out:
2380	if (cc->contended || fatal_signal_pending(current))
2381		ret = COMPACT_CONTENDED;
2382
2383	return ret;
2384}
2385
2386static enum compact_result compact_finished(struct compact_control *cc)
2387{
2388	int ret;
2389
2390	ret = __compact_finished(cc);
2391	trace_mm_compaction_finished(cc->zone, cc->order, ret);
2392	if (ret == COMPACT_NO_SUITABLE_PAGE)
2393		ret = COMPACT_CONTINUE;
2394
2395	return ret;
2396}
2397
2398static bool __compaction_suitable(struct zone *zone, int order,
2399				  int highest_zoneidx,
2400				  unsigned long wmark_target)
 
 
 
 
 
 
 
 
2401{
2402	unsigned long watermark;
 
 
 
 
 
 
 
 
 
 
 
 
 
2403	/*
2404	 * Watermarks for order-0 must be met for compaction to be able to
2405	 * isolate free pages for migration targets. This means that the
2406	 * watermark and alloc_flags have to match, or be more pessimistic than
2407	 * the check in __isolate_free_page(). We don't use the direct
2408	 * compactor's alloc_flags, as they are not relevant for freepage
2409	 * isolation. We however do use the direct compactor's highest_zoneidx
2410	 * to skip over zones where lowmem reserves would prevent allocation
2411	 * even if compaction succeeds.
2412	 * For costly orders, we require low watermark instead of min for
2413	 * compaction to proceed to increase its chances.
2414	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
2415	 * suitable migration targets
2416	 */
2417	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
2418				low_wmark_pages(zone) : min_wmark_pages(zone);
2419	watermark += compact_gap(order);
2420	return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
2421				   ALLOC_CMA, wmark_target);
 
 
 
2422}
2423
2424/*
2425 * compaction_suitable: Is this suitable to run compaction on this zone now?
2426 */
2427bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
2428{
2429	enum compact_result compact_result;
2430	bool suitable;
2431
2432	suitable = __compaction_suitable(zone, order, highest_zoneidx,
2433					 zone_page_state(zone, NR_FREE_PAGES));
2434	/*
2435	 * fragmentation index determines if allocation failures are due to
2436	 * low memory or external fragmentation
2437	 *
2438	 * index of -1000 would imply allocations might succeed depending on
2439	 * watermarks, but we already failed the high-order watermark check
2440	 * index towards 0 implies failure is due to lack of memory
2441	 * index towards 1000 implies failure is due to fragmentation
2442	 *
2443	 * Only compact if a failure would be due to fragmentation. Also
2444	 * ignore fragindex for non-costly orders where the alternative to
2445	 * a successful reclaim/compaction is OOM. Fragindex and the
2446	 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
2447	 * excessive compaction for costly orders, but it should not be at the
2448	 * expense of system stability.
2449	 */
2450	if (suitable) {
2451		compact_result = COMPACT_CONTINUE;
2452		if (order > PAGE_ALLOC_COSTLY_ORDER) {
2453			int fragindex = fragmentation_index(zone, order);
2454
2455			if (fragindex >= 0 &&
2456			    fragindex <= sysctl_extfrag_threshold) {
2457				suitable = false;
2458				compact_result = COMPACT_NOT_SUITABLE_ZONE;
2459			}
2460		}
2461	} else {
2462		compact_result = COMPACT_SKIPPED;
2463	}
2464
2465	trace_mm_compaction_suitable(zone, order, compact_result);
 
 
2466
2467	return suitable;
2468}
2469
2470bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2471		int alloc_flags)
2472{
2473	struct zone *zone;
2474	struct zoneref *z;
2475
2476	/*
2477	 * Make sure at least one zone would pass __compaction_suitable if we continue
2478	 * retrying the reclaim.
2479	 */
2480	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2481				ac->highest_zoneidx, ac->nodemask) {
2482		unsigned long available;
 
2483
2484		/*
2485		 * Do not consider all the reclaimable memory because we do not
2486		 * want to trash just for a single high order allocation which
2487		 * is even not guaranteed to appear even if __compaction_suitable
2488		 * is happy about the watermark check.
2489		 */
2490		available = zone_reclaimable_pages(zone) / order;
2491		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2492		if (__compaction_suitable(zone, order, ac->highest_zoneidx,
2493					  available))
 
2494			return true;
2495	}
2496
2497	return false;
2498}
2499
2500/*
2501 * Should we do compaction for target allocation order.
2502 * Return COMPACT_SUCCESS if allocation for target order can be already
2503 * satisfied
2504 * Return COMPACT_SKIPPED if compaction for target order is likely to fail
2505 * Return COMPACT_CONTINUE if compaction for target order should be ran
2506 */
2507static enum compact_result
2508compaction_suit_allocation_order(struct zone *zone, unsigned int order,
2509				 int highest_zoneidx, unsigned int alloc_flags)
2510{
2511	unsigned long watermark;
2512
2513	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
2514	if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
2515			      alloc_flags))
2516		return COMPACT_SUCCESS;
2517
2518	if (!compaction_suitable(zone, order, highest_zoneidx))
2519		return COMPACT_SKIPPED;
2520
2521	return COMPACT_CONTINUE;
2522}
2523
2524static enum compact_result
2525compact_zone(struct compact_control *cc, struct capture_control *capc)
2526{
2527	enum compact_result ret;
2528	unsigned long start_pfn = cc->zone->zone_start_pfn;
2529	unsigned long end_pfn = zone_end_pfn(cc->zone);
2530	unsigned long last_migrated_pfn;
2531	const bool sync = cc->mode != MIGRATE_ASYNC;
2532	bool update_cached;
2533	unsigned int nr_succeeded = 0, nr_migratepages;
2534	int order;
2535
2536	/*
2537	 * These counters track activities during zone compaction.  Initialize
2538	 * them before compacting a new zone.
2539	 */
2540	cc->total_migrate_scanned = 0;
2541	cc->total_free_scanned = 0;
2542	cc->nr_migratepages = 0;
2543	cc->nr_freepages = 0;
2544	for (order = 0; order < NR_PAGE_ORDERS; order++)
2545		INIT_LIST_HEAD(&cc->freepages[order]);
2546	INIT_LIST_HEAD(&cc->migratepages);
2547
2548	cc->migratetype = gfp_migratetype(cc->gfp_mask);
 
 
 
 
 
2549
2550	if (!is_via_compact_memory(cc->order)) {
2551		ret = compaction_suit_allocation_order(cc->zone, cc->order,
2552						       cc->highest_zoneidx,
2553						       cc->alloc_flags);
2554		if (ret != COMPACT_CONTINUE)
2555			return ret;
2556	}
2557
2558	/*
2559	 * Clear pageblock skip if there were failures recently and compaction
2560	 * is about to be retried after being deferred.
2561	 */
2562	if (compaction_restarting(cc->zone, cc->order))
2563		__reset_isolation_suitable(cc->zone);
2564
2565	/*
2566	 * Setup to move all movable pages to the end of the zone. Used cached
2567	 * information on where the scanners should start (unless we explicitly
2568	 * want to compact the whole zone), but check that it is initialised
2569	 * by ensuring the values are within zone boundaries.
2570	 */
2571	cc->fast_start_pfn = 0;
2572	if (cc->whole_zone) {
2573		cc->migrate_pfn = start_pfn;
2574		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2575	} else {
2576		cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2577		cc->free_pfn = cc->zone->compact_cached_free_pfn;
2578		if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2579			cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2580			cc->zone->compact_cached_free_pfn = cc->free_pfn;
2581		}
2582		if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2583			cc->migrate_pfn = start_pfn;
2584			cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2585			cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2586		}
2587
2588		if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2589			cc->whole_zone = true;
2590	}
2591
2592	last_migrated_pfn = 0;
2593
2594	/*
2595	 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
2596	 * the basis that some migrations will fail in ASYNC mode. However,
2597	 * if the cached PFNs match and pageblocks are skipped due to having
2598	 * no isolation candidates, then the sync state does not matter.
2599	 * Until a pageblock with isolation candidates is found, keep the
2600	 * cached PFNs in sync to avoid revisiting the same blocks.
2601	 */
2602	update_cached = !sync &&
2603		cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2604
2605	trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync);
 
2606
2607	/* lru_add_drain_all could be expensive with involving other CPUs */
2608	lru_add_drain();
2609
2610	while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
2611		int err;
2612		unsigned long iteration_start_pfn = cc->migrate_pfn;
2613
2614		/*
2615		 * Avoid multiple rescans of the same pageblock which can
2616		 * happen if a page cannot be isolated (dirty/writeback in
2617		 * async mode) or if the migrated pages are being allocated
2618		 * before the pageblock is cleared.  The first rescan will
2619		 * capture the entire pageblock for migration. If it fails,
2620		 * it'll be marked skip and scanning will proceed as normal.
2621		 */
2622		cc->finish_pageblock = false;
2623		if (pageblock_start_pfn(last_migrated_pfn) ==
2624		    pageblock_start_pfn(iteration_start_pfn)) {
2625			cc->finish_pageblock = true;
2626		}
2627
2628rescan:
2629		switch (isolate_migratepages(cc)) {
2630		case ISOLATE_ABORT:
2631			ret = COMPACT_CONTENDED;
2632			putback_movable_pages(&cc->migratepages);
2633			cc->nr_migratepages = 0;
 
2634			goto out;
2635		case ISOLATE_NONE:
2636			if (update_cached) {
2637				cc->zone->compact_cached_migrate_pfn[1] =
2638					cc->zone->compact_cached_migrate_pfn[0];
2639			}
2640
2641			/*
2642			 * We haven't isolated and migrated anything, but
2643			 * there might still be unflushed migrations from
2644			 * previous cc->order aligned block.
2645			 */
2646			goto check_drain;
2647		case ISOLATE_SUCCESS:
2648			update_cached = false;
2649			last_migrated_pfn = max(cc->zone->zone_start_pfn,
2650				pageblock_start_pfn(cc->migrate_pfn - 1));
2651		}
2652
2653		/*
2654		 * Record the number of pages to migrate since the
2655		 * compaction_alloc/free() will update cc->nr_migratepages
2656		 * properly.
2657		 */
2658		nr_migratepages = cc->nr_migratepages;
2659		err = migrate_pages(&cc->migratepages, compaction_alloc,
2660				compaction_free, (unsigned long)cc, cc->mode,
2661				MR_COMPACTION, &nr_succeeded);
2662
2663		trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded);
 
2664
2665		/* All pages were either migrated or will be released */
2666		cc->nr_migratepages = 0;
2667		if (err) {
2668			putback_movable_pages(&cc->migratepages);
2669			/*
2670			 * migrate_pages() may return -ENOMEM when scanners meet
2671			 * and we want compact_finished() to detect it
2672			 */
2673			if (err == -ENOMEM && !compact_scanners_met(cc)) {
2674				ret = COMPACT_CONTENDED;
2675				goto out;
2676			}
2677			/*
2678			 * If an ASYNC or SYNC_LIGHT fails to migrate a page
2679			 * within the pageblock_order-aligned block and
2680			 * fast_find_migrateblock may be used then scan the
2681			 * remainder of the pageblock. This will mark the
2682			 * pageblock "skip" to avoid rescanning in the near
2683			 * future. This will isolate more pages than necessary
2684			 * for the request but avoid loops due to
2685			 * fast_find_migrateblock revisiting blocks that were
2686			 * recently partially scanned.
2687			 */
2688			if (!pageblock_aligned(cc->migrate_pfn) &&
2689			    !cc->ignore_skip_hint && !cc->finish_pageblock &&
2690			    (cc->mode < MIGRATE_SYNC)) {
2691				cc->finish_pageblock = true;
2692
2693				/*
2694				 * Draining pcplists does not help THP if
2695				 * any page failed to migrate. Even after
2696				 * drain, the pageblock will not be free.
2697				 */
2698				if (cc->order == COMPACTION_HPAGE_ORDER)
2699					last_migrated_pfn = 0;
2700
2701				goto rescan;
2702			}
2703		}
2704
2705		/* Stop if a page has been captured */
2706		if (capc && capc->page) {
2707			ret = COMPACT_SUCCESS;
2708			break;
2709		}
2710
2711check_drain:
2712		/*
2713		 * Has the migration scanner moved away from the previous
2714		 * cc->order aligned block where we migrated from? If yes,
2715		 * flush the pages that were freed, so that they can merge and
2716		 * compact_finished() can detect immediately if allocation
2717		 * would succeed.
2718		 */
2719		if (cc->order > 0 && last_migrated_pfn) {
 
2720			unsigned long current_block_start =
2721				block_start_pfn(cc->migrate_pfn, cc->order);
2722
2723			if (last_migrated_pfn < current_block_start) {
2724				lru_add_drain_cpu_zone(cc->zone);
 
 
 
2725				/* No more flushing until we migrate again */
2726				last_migrated_pfn = 0;
2727			}
2728		}
 
 
 
 
 
 
2729	}
2730
2731out:
2732	/*
2733	 * Release free pages and update where the free scanner should restart,
2734	 * so we don't leave any returned pages behind in the next attempt.
2735	 */
2736	if (cc->nr_freepages > 0) {
2737		unsigned long free_pfn = release_free_list(cc->freepages);
2738
2739		cc->nr_freepages = 0;
2740		VM_BUG_ON(free_pfn == 0);
2741		/* The cached pfn is always the first in a pageblock */
2742		free_pfn = pageblock_start_pfn(free_pfn);
2743		/*
2744		 * Only go back, not forward. The cached pfn might have been
2745		 * already reset to zone end in compact_finished()
2746		 */
2747		if (free_pfn > cc->zone->compact_cached_free_pfn)
2748			cc->zone->compact_cached_free_pfn = free_pfn;
2749	}
2750
2751	count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2752	count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2753
2754	trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
2755
2756	VM_BUG_ON(!list_empty(&cc->migratepages));
2757
2758	return ret;
2759}
2760
2761static enum compact_result compact_zone_order(struct zone *zone, int order,
2762		gfp_t gfp_mask, enum compact_priority prio,
2763		unsigned int alloc_flags, int highest_zoneidx,
2764		struct page **capture)
2765{
2766	enum compact_result ret;
2767	struct compact_control cc = {
2768		.order = order,
2769		.search_order = order,
2770		.gfp_mask = gfp_mask,
2771		.zone = zone,
2772		.mode = (prio == COMPACT_PRIO_ASYNC) ?
2773					MIGRATE_ASYNC :	MIGRATE_SYNC_LIGHT,
2774		.alloc_flags = alloc_flags,
2775		.highest_zoneidx = highest_zoneidx,
2776		.direct_compaction = true,
2777		.whole_zone = (prio == MIN_COMPACT_PRIORITY),
2778		.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
2779		.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
2780	};
2781	struct capture_control capc = {
2782		.cc = &cc,
2783		.page = NULL,
2784	};
2785
2786	/*
2787	 * Make sure the structs are really initialized before we expose the
2788	 * capture control, in case we are interrupted and the interrupt handler
2789	 * frees a page.
2790	 */
2791	barrier();
2792	WRITE_ONCE(current->capture_control, &capc);
2793
2794	ret = compact_zone(&cc, &capc);
2795
2796	/*
2797	 * Make sure we hide capture control first before we read the captured
2798	 * page pointer, otherwise an interrupt could free and capture a page
2799	 * and we would leak it.
2800	 */
2801	WRITE_ONCE(current->capture_control, NULL);
2802	*capture = READ_ONCE(capc.page);
2803	/*
2804	 * Technically, it is also possible that compaction is skipped but
2805	 * the page is still captured out of luck(IRQ came and freed the page).
2806	 * Returning COMPACT_SUCCESS in such cases helps in properly accounting
2807	 * the COMPACT[STALL|FAIL] when compaction is skipped.
2808	 */
2809	if (*capture)
2810		ret = COMPACT_SUCCESS;
2811
2812	return ret;
2813}
2814
 
 
2815/**
2816 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2817 * @gfp_mask: The GFP mask of the current allocation
2818 * @order: The order of the current allocation
2819 * @alloc_flags: The allocation flags of the current allocation
2820 * @ac: The context of current allocation
2821 * @prio: Determines how hard direct compaction should try to succeed
2822 * @capture: Pointer to free page created by compaction will be stored here
2823 *
2824 * This is the main entry point for direct page compaction.
2825 */
2826enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2827		unsigned int alloc_flags, const struct alloc_context *ac,
2828		enum compact_priority prio, struct page **capture)
2829{
 
2830	struct zoneref *z;
2831	struct zone *zone;
2832	enum compact_result rc = COMPACT_SKIPPED;
2833
2834	if (!gfp_compaction_allowed(gfp_mask))
 
 
 
 
2835		return COMPACT_SKIPPED;
2836
2837	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2838
2839	/* Compact each zone in the list */
2840	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2841					ac->highest_zoneidx, ac->nodemask) {
2842		enum compact_result status;
2843
2844		if (prio > MIN_COMPACT_PRIORITY
2845					&& compaction_deferred(zone, order)) {
2846			rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
2847			continue;
2848		}
2849
2850		status = compact_zone_order(zone, order, gfp_mask, prio,
2851				alloc_flags, ac->highest_zoneidx, capture);
2852		rc = max(status, rc);
2853
2854		/* The allocation should succeed, stop compacting */
2855		if (status == COMPACT_SUCCESS) {
2856			/*
2857			 * We think the allocation will succeed in this zone,
2858			 * but it is not certain, hence the false. The caller
2859			 * will repeat this with true if allocation indeed
2860			 * succeeds in this zone.
2861			 */
2862			compaction_defer_reset(zone, order, false);
2863
2864			break;
2865		}
2866
2867		if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
2868					status == COMPACT_PARTIAL_SKIPPED))
2869			/*
2870			 * We think that allocation won't succeed in this zone
2871			 * so we defer compaction there. If it ends up
2872			 * succeeding after all, it will be reset.
2873			 */
2874			defer_compaction(zone, order);
2875
2876		/*
2877		 * We might have stopped compacting due to need_resched() in
2878		 * async compaction, or due to a fatal signal detected. In that
2879		 * case do not try further zones
2880		 */
2881		if ((prio == COMPACT_PRIO_ASYNC && need_resched())
2882					|| fatal_signal_pending(current))
2883			break;
2884	}
2885
2886	return rc;
2887}
2888
2889/*
2890 * compact_node() - compact all zones within a node
2891 * @pgdat: The node page data
2892 * @proactive: Whether the compaction is proactive
2893 *
2894 * For proactive compaction, compact till each zone's fragmentation score
2895 * reaches within proactive compaction thresholds (as determined by the
2896 * proactiveness tunable), it is possible that the function returns before
2897 * reaching score targets due to various back-off conditions, such as,
2898 * contention on per-node or per-zone locks.
2899 */
2900static int compact_node(pg_data_t *pgdat, bool proactive)
2901{
 
2902	int zoneid;
2903	struct zone *zone;
2904	struct compact_control cc = {
2905		.order = -1,
2906		.mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
2907		.ignore_skip_hint = true,
2908		.whole_zone = true,
2909		.gfp_mask = GFP_KERNEL,
2910		.proactive_compaction = proactive,
2911	};
2912
 
2913	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 
2914		zone = &pgdat->node_zones[zoneid];
2915		if (!populated_zone(zone))
2916			continue;
2917
2918		if (fatal_signal_pending(current))
2919			return -EINTR;
2920
2921		cc.zone = zone;
2922
2923		compact_zone(&cc, NULL);
2924
2925		if (proactive) {
2926			count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2927					     cc.total_migrate_scanned);
2928			count_compact_events(KCOMPACTD_FREE_SCANNED,
2929					     cc.total_free_scanned);
2930		}
2931	}
2932
2933	return 0;
2934}
2935
2936/* Compact all zones of all nodes in the system */
2937static int compact_nodes(void)
2938{
2939	int ret, nid;
2940
2941	/* Flush pending updates to the LRU lists */
2942	lru_add_drain_all();
2943
2944	for_each_online_node(nid) {
2945		ret = compact_node(NODE_DATA(nid), false);
2946		if (ret)
2947			return ret;
2948	}
2949
2950	return 0;
2951}
2952
2953static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
2954		void *buffer, size_t *length, loff_t *ppos)
2955{
2956	int rc, nid;
2957
2958	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
2959	if (rc)
2960		return rc;
2961
2962	if (write && sysctl_compaction_proactiveness) {
2963		for_each_online_node(nid) {
2964			pg_data_t *pgdat = NODE_DATA(nid);
2965
2966			if (pgdat->proactive_compact_trigger)
2967				continue;
2968
2969			pgdat->proactive_compact_trigger = true;
2970			trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1,
2971							     pgdat->nr_zones - 1);
2972			wake_up_interruptible(&pgdat->kcompactd_wait);
2973		}
2974	}
2975
2976	return 0;
2977}
2978
2979/*
2980 * This is the entry point for compacting all nodes via
2981 * /proc/sys/vm/compact_memory
2982 */
2983static int sysctl_compaction_handler(struct ctl_table *table, int write,
2984			void *buffer, size_t *length, loff_t *ppos)
2985{
2986	int ret;
2987
2988	ret = proc_dointvec(table, write, buffer, length, ppos);
2989	if (ret)
2990		return ret;
2991
2992	if (sysctl_compact_memory != 1)
2993		return -EINVAL;
2994
2995	if (write)
2996		ret = compact_nodes();
2997
2998	return ret;
2999}
3000
3001#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
3002static ssize_t compact_store(struct device *dev,
3003			     struct device_attribute *attr,
3004			     const char *buf, size_t count)
3005{
3006	int nid = dev->id;
3007
3008	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
3009		/* Flush pending updates to the LRU lists */
3010		lru_add_drain_all();
3011
3012		compact_node(NODE_DATA(nid), false);
3013	}
3014
3015	return count;
3016}
3017static DEVICE_ATTR_WO(compact);
3018
3019int compaction_register_node(struct node *node)
3020{
3021	return device_create_file(&node->dev, &dev_attr_compact);
3022}
3023
3024void compaction_unregister_node(struct node *node)
3025{
3026	device_remove_file(&node->dev, &dev_attr_compact);
3027}
3028#endif /* CONFIG_SYSFS && CONFIG_NUMA */
3029
3030static inline bool kcompactd_work_requested(pg_data_t *pgdat)
3031{
3032	return pgdat->kcompactd_max_order > 0 || kthread_should_stop() ||
3033		pgdat->proactive_compact_trigger;
3034}
3035
3036static bool kcompactd_node_suitable(pg_data_t *pgdat)
3037{
3038	int zoneid;
3039	struct zone *zone;
3040	enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
3041	enum compact_result ret;
3042
3043	for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
3044		zone = &pgdat->node_zones[zoneid];
3045
3046		if (!populated_zone(zone))
3047			continue;
3048
3049		ret = compaction_suit_allocation_order(zone,
3050				pgdat->kcompactd_max_order,
3051				highest_zoneidx, ALLOC_WMARK_MIN);
3052		if (ret == COMPACT_CONTINUE)
3053			return true;
3054	}
3055
3056	return false;
3057}
3058
3059static void kcompactd_do_work(pg_data_t *pgdat)
3060{
3061	/*
3062	 * With no special task, compact all zones so that a page of requested
3063	 * order is allocatable.
3064	 */
3065	int zoneid;
3066	struct zone *zone;
3067	struct compact_control cc = {
3068		.order = pgdat->kcompactd_max_order,
3069		.search_order = pgdat->kcompactd_max_order,
3070		.highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
3071		.mode = MIGRATE_SYNC_LIGHT,
3072		.ignore_skip_hint = false,
3073		.gfp_mask = GFP_KERNEL,
3074	};
3075	enum compact_result ret;
3076
3077	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
3078							cc.highest_zoneidx);
3079	count_compact_event(KCOMPACTD_WAKE);
3080
3081	for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
3082		int status;
3083
3084		zone = &pgdat->node_zones[zoneid];
3085		if (!populated_zone(zone))
3086			continue;
3087
3088		if (compaction_deferred(zone, cc.order))
3089			continue;
3090
3091		ret = compaction_suit_allocation_order(zone,
3092				cc.order, zoneid, ALLOC_WMARK_MIN);
3093		if (ret != COMPACT_CONTINUE)
3094			continue;
3095
3096		if (kthread_should_stop())
3097			return;
3098
3099		cc.zone = zone;
3100		status = compact_zone(&cc, NULL);
3101
3102		if (status == COMPACT_SUCCESS) {
3103			compaction_defer_reset(zone, cc.order, false);
3104		} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
3105			/*
3106			 * Buddy pages may become stranded on pcps that could
3107			 * otherwise coalesce on the zone's free area for
3108			 * order >= cc.order.  This is ratelimited by the
3109			 * upcoming deferral.
3110			 */
3111			drain_all_pages(zone);
3112
3113			/*
3114			 * We use sync migration mode here, so we defer like
3115			 * sync direct compaction does.
3116			 */
3117			defer_compaction(zone, cc.order);
3118		}
3119
3120		count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
3121				     cc.total_migrate_scanned);
3122		count_compact_events(KCOMPACTD_FREE_SCANNED,
3123				     cc.total_free_scanned);
 
 
 
3124	}
3125
3126	/*
3127	 * Regardless of success, we are done until woken up next. But remember
3128	 * the requested order/highest_zoneidx in case it was higher/tighter
3129	 * than our current ones
3130	 */
3131	if (pgdat->kcompactd_max_order <= cc.order)
3132		pgdat->kcompactd_max_order = 0;
3133	if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
3134		pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
3135}
3136
3137void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
3138{
3139	if (!order)
3140		return;
3141
3142	if (pgdat->kcompactd_max_order < order)
3143		pgdat->kcompactd_max_order = order;
3144
3145	if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
3146		pgdat->kcompactd_highest_zoneidx = highest_zoneidx;
3147
3148	/*
3149	 * Pairs with implicit barrier in wait_event_freezable()
3150	 * such that wakeups are not missed.
3151	 */
3152	if (!wq_has_sleeper(&pgdat->kcompactd_wait))
3153		return;
3154
3155	if (!kcompactd_node_suitable(pgdat))
3156		return;
3157
3158	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
3159							highest_zoneidx);
3160	wake_up_interruptible(&pgdat->kcompactd_wait);
3161}
3162
3163/*
3164 * The background compaction daemon, started as a kernel thread
3165 * from the init process.
3166 */
3167static int kcompactd(void *p)
3168{
3169	pg_data_t *pgdat = (pg_data_t *)p;
3170	struct task_struct *tsk = current;
3171	long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
3172	long timeout = default_timeout;
3173
3174	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3175
3176	if (!cpumask_empty(cpumask))
3177		set_cpus_allowed_ptr(tsk, cpumask);
3178
3179	set_freezable();
3180
3181	pgdat->kcompactd_max_order = 0;
3182	pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
3183
3184	while (!kthread_should_stop()) {
3185		unsigned long pflags;
3186
3187		/*
3188		 * Avoid the unnecessary wakeup for proactive compaction
3189		 * when it is disabled.
3190		 */
3191		if (!sysctl_compaction_proactiveness)
3192			timeout = MAX_SCHEDULE_TIMEOUT;
3193		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
3194		if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
3195			kcompactd_work_requested(pgdat), timeout) &&
3196			!pgdat->proactive_compact_trigger) {
3197
3198			psi_memstall_enter(&pflags);
3199			kcompactd_do_work(pgdat);
3200			psi_memstall_leave(&pflags);
3201			/*
3202			 * Reset the timeout value. The defer timeout from
3203			 * proactive compaction is lost here but that is fine
3204			 * as the condition of the zone changing substantionally
3205			 * then carrying on with the previous defer interval is
3206			 * not useful.
3207			 */
3208			timeout = default_timeout;
3209			continue;
3210		}
3211
3212		/*
3213		 * Start the proactive work with default timeout. Based
3214		 * on the fragmentation score, this timeout is updated.
3215		 */
3216		timeout = default_timeout;
3217		if (should_proactive_compact_node(pgdat)) {
3218			unsigned int prev_score, score;
3219
3220			prev_score = fragmentation_score_node(pgdat);
3221			compact_node(pgdat, true);
3222			score = fragmentation_score_node(pgdat);
3223			/*
3224			 * Defer proactive compaction if the fragmentation
3225			 * score did not go down i.e. no progress made.
3226			 */
3227			if (unlikely(score >= prev_score))
3228				timeout =
3229				   default_timeout << COMPACT_MAX_DEFER_SHIFT;
3230		}
3231		if (unlikely(pgdat->proactive_compact_trigger))
3232			pgdat->proactive_compact_trigger = false;
3233	}
3234
3235	return 0;
3236}
3237
3238/*
3239 * This kcompactd start function will be called by init and node-hot-add.
3240 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
3241 */
3242void __meminit kcompactd_run(int nid)
3243{
3244	pg_data_t *pgdat = NODE_DATA(nid);
 
3245
3246	if (pgdat->kcompactd)
3247		return;
3248
3249	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
3250	if (IS_ERR(pgdat->kcompactd)) {
3251		pr_err("Failed to start kcompactd on node %d\n", nid);
 
3252		pgdat->kcompactd = NULL;
3253	}
 
3254}
3255
3256/*
3257 * Called by memory hotplug when all memory in a node is offlined. Caller must
3258 * be holding mem_hotplug_begin/done().
3259 */
3260void __meminit kcompactd_stop(int nid)
3261{
3262	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
3263
3264	if (kcompactd) {
3265		kthread_stop(kcompactd);
3266		NODE_DATA(nid)->kcompactd = NULL;
3267	}
3268}
3269
3270/*
3271 * It's optimal to keep kcompactd on the same CPUs as their memory, but
3272 * not required for correctness. So if the last cpu in a node goes
3273 * away, we get changed to run anywhere: as the first one comes back,
3274 * restore their cpu bindings.
3275 */
3276static int kcompactd_cpu_online(unsigned int cpu)
3277{
3278	int nid;
3279
3280	for_each_node_state(nid, N_MEMORY) {
3281		pg_data_t *pgdat = NODE_DATA(nid);
3282		const struct cpumask *mask;
3283
3284		mask = cpumask_of_node(pgdat->node_id);
3285
3286		if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3287			/* One of our CPUs online: restore mask */
3288			if (pgdat->kcompactd)
3289				set_cpus_allowed_ptr(pgdat->kcompactd, mask);
3290	}
3291	return 0;
3292}
3293
3294static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
3295		int write, void *buffer, size_t *lenp, loff_t *ppos)
3296{
3297	int ret, old;
3298
3299	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write)
3300		return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3301
3302	old = *(int *)table->data;
3303	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3304	if (ret)
3305		return ret;
3306	if (old != *(int *)table->data)
3307		pr_warn_once("sysctl attribute %s changed by %s[%d]\n",
3308			     table->procname, current->comm,
3309			     task_pid_nr(current));
3310	return ret;
3311}
3312
3313static struct ctl_table vm_compaction[] = {
3314	{
3315		.procname	= "compact_memory",
3316		.data		= &sysctl_compact_memory,
3317		.maxlen		= sizeof(int),
3318		.mode		= 0200,
3319		.proc_handler	= sysctl_compaction_handler,
3320	},
3321	{
3322		.procname	= "compaction_proactiveness",
3323		.data		= &sysctl_compaction_proactiveness,
3324		.maxlen		= sizeof(sysctl_compaction_proactiveness),
3325		.mode		= 0644,
3326		.proc_handler	= compaction_proactiveness_sysctl_handler,
3327		.extra1		= SYSCTL_ZERO,
3328		.extra2		= SYSCTL_ONE_HUNDRED,
3329	},
3330	{
3331		.procname	= "extfrag_threshold",
3332		.data		= &sysctl_extfrag_threshold,
3333		.maxlen		= sizeof(int),
3334		.mode		= 0644,
3335		.proc_handler	= proc_dointvec_minmax,
3336		.extra1		= SYSCTL_ZERO,
3337		.extra2		= SYSCTL_ONE_THOUSAND,
3338	},
3339	{
3340		.procname	= "compact_unevictable_allowed",
3341		.data		= &sysctl_compact_unevictable_allowed,
3342		.maxlen		= sizeof(int),
3343		.mode		= 0644,
3344		.proc_handler	= proc_dointvec_minmax_warn_RT_change,
3345		.extra1		= SYSCTL_ZERO,
3346		.extra2		= SYSCTL_ONE,
3347	},
3348	{ }
3349};
3350
3351static int __init kcompactd_init(void)
3352{
3353	int nid;
3354	int ret;
3355
3356	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3357					"mm/compaction:online",
3358					kcompactd_cpu_online, NULL);
3359	if (ret < 0) {
3360		pr_err("kcompactd: failed to register hotplug callbacks.\n");
3361		return ret;
3362	}
3363
3364	for_each_node_state(nid, N_MEMORY)
3365		kcompactd_run(nid);
3366	register_sysctl_init("vm", vm_compaction);
3367	return 0;
3368}
3369subsys_initcall(kcompactd_init)
3370
3371#endif /* CONFIG_COMPACTION */