Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/mm/compaction.c
   4 *
   5 * Memory compaction for the reduction of external fragmentation. Note that
   6 * this heavily depends upon page migration to do all the real heavy
   7 * lifting
   8 *
   9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
  10 */
  11#include <linux/cpu.h>
  12#include <linux/swap.h>
  13#include <linux/migrate.h>
  14#include <linux/compaction.h>
  15#include <linux/mm_inline.h>
  16#include <linux/sched/signal.h>
  17#include <linux/backing-dev.h>
  18#include <linux/sysctl.h>
  19#include <linux/sysfs.h>
  20#include <linux/page-isolation.h>
  21#include <linux/kasan.h>
  22#include <linux/kthread.h>
  23#include <linux/freezer.h>
  24#include <linux/page_owner.h>
  25#include <linux/psi.h>
  26#include "internal.h"
  27
  28#ifdef CONFIG_COMPACTION
  29static inline void count_compact_event(enum vm_event_item item)
  30{
  31	count_vm_event(item);
  32}
  33
  34static inline void count_compact_events(enum vm_event_item item, long delta)
  35{
  36	count_vm_events(item, delta);
  37}
  38#else
  39#define count_compact_event(item) do { } while (0)
  40#define count_compact_events(item, delta) do { } while (0)
  41#endif
  42
  43#if defined CONFIG_COMPACTION || defined CONFIG_CMA
  44
  45#define CREATE_TRACE_POINTS
  46#include <trace/events/compaction.h>
  47
  48#define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
  49#define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
  50#define pageblock_start_pfn(pfn)	block_start_pfn(pfn, pageblock_order)
  51#define pageblock_end_pfn(pfn)		block_end_pfn(pfn, pageblock_order)
  52
  53static unsigned long release_freepages(struct list_head *freelist)
  54{
  55	struct page *page, *next;
  56	unsigned long high_pfn = 0;
  57
  58	list_for_each_entry_safe(page, next, freelist, lru) {
  59		unsigned long pfn = page_to_pfn(page);
  60		list_del(&page->lru);
  61		__free_page(page);
  62		if (pfn > high_pfn)
  63			high_pfn = pfn;
  64	}
  65
  66	return high_pfn;
  67}
  68
  69static void split_map_pages(struct list_head *list)
  70{
  71	unsigned int i, order, nr_pages;
  72	struct page *page, *next;
  73	LIST_HEAD(tmp_list);
  74
  75	list_for_each_entry_safe(page, next, list, lru) {
  76		list_del(&page->lru);
  77
  78		order = page_private(page);
  79		nr_pages = 1 << order;
  80
  81		post_alloc_hook(page, order, __GFP_MOVABLE);
  82		if (order)
  83			split_page(page, order);
  84
  85		for (i = 0; i < nr_pages; i++) {
  86			list_add(&page->lru, &tmp_list);
  87			page++;
  88		}
  89	}
  90
  91	list_splice(&tmp_list, list);
  92}
  93
 
 
 
 
 
  94#ifdef CONFIG_COMPACTION
  95
  96int PageMovable(struct page *page)
  97{
  98	struct address_space *mapping;
  99
 100	VM_BUG_ON_PAGE(!PageLocked(page), page);
 101	if (!__PageMovable(page))
 102		return 0;
 103
 104	mapping = page_mapping(page);
 105	if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
 106		return 1;
 107
 108	return 0;
 109}
 110EXPORT_SYMBOL(PageMovable);
 111
 112void __SetPageMovable(struct page *page, struct address_space *mapping)
 113{
 114	VM_BUG_ON_PAGE(!PageLocked(page), page);
 115	VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
 116	page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
 117}
 118EXPORT_SYMBOL(__SetPageMovable);
 119
 120void __ClearPageMovable(struct page *page)
 121{
 122	VM_BUG_ON_PAGE(!PageLocked(page), page);
 123	VM_BUG_ON_PAGE(!PageMovable(page), page);
 124	/*
 125	 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
 126	 * flag so that VM can catch up released page by driver after isolation.
 127	 * With it, VM migration doesn't try to put it back.
 128	 */
 129	page->mapping = (void *)((unsigned long)page->mapping &
 130				PAGE_MAPPING_MOVABLE);
 131}
 132EXPORT_SYMBOL(__ClearPageMovable);
 133
 134/* Do not skip compaction more than 64 times */
 135#define COMPACT_MAX_DEFER_SHIFT 6
 136
 137/*
 138 * Compaction is deferred when compaction fails to result in a page
 139 * allocation success. 1 << compact_defer_limit compactions are skipped up
 140 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
 141 */
 142void defer_compaction(struct zone *zone, int order)
 143{
 144	zone->compact_considered = 0;
 145	zone->compact_defer_shift++;
 146
 147	if (order < zone->compact_order_failed)
 148		zone->compact_order_failed = order;
 149
 150	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
 151		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
 152
 153	trace_mm_compaction_defer_compaction(zone, order);
 154}
 155
 156/* Returns true if compaction should be skipped this time */
 157bool compaction_deferred(struct zone *zone, int order)
 158{
 159	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
 160
 161	if (order < zone->compact_order_failed)
 162		return false;
 163
 164	/* Avoid possible overflow */
 165	if (++zone->compact_considered > defer_limit)
 166		zone->compact_considered = defer_limit;
 167
 168	if (zone->compact_considered >= defer_limit)
 169		return false;
 170
 171	trace_mm_compaction_deferred(zone, order);
 172
 173	return true;
 174}
 175
 176/*
 177 * Update defer tracking counters after successful compaction of given order,
 178 * which means an allocation either succeeded (alloc_success == true) or is
 179 * expected to succeed.
 180 */
 181void compaction_defer_reset(struct zone *zone, int order,
 182		bool alloc_success)
 183{
 184	if (alloc_success) {
 185		zone->compact_considered = 0;
 186		zone->compact_defer_shift = 0;
 187	}
 188	if (order >= zone->compact_order_failed)
 189		zone->compact_order_failed = order + 1;
 190
 191	trace_mm_compaction_defer_reset(zone, order);
 192}
 193
 194/* Returns true if restarting compaction after many failures */
 195bool compaction_restarting(struct zone *zone, int order)
 196{
 197	if (order < zone->compact_order_failed)
 198		return false;
 199
 200	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
 201		zone->compact_considered >= 1UL << zone->compact_defer_shift;
 202}
 203
 204/* Returns true if the pageblock should be scanned for pages to isolate. */
 205static inline bool isolation_suitable(struct compact_control *cc,
 206					struct page *page)
 207{
 208	if (cc->ignore_skip_hint)
 209		return true;
 210
 211	return !get_pageblock_skip(page);
 212}
 213
 214static void reset_cached_positions(struct zone *zone)
 215{
 216	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
 217	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
 218	zone->compact_cached_free_pfn =
 219				pageblock_start_pfn(zone_end_pfn(zone) - 1);
 220}
 221
 222/*
 223 * Compound pages of >= pageblock_order should consistenly be skipped until
 224 * released. It is always pointless to compact pages of such order (if they are
 225 * migratable), and the pageblocks they occupy cannot contain any free pages.
 226 */
 227static bool pageblock_skip_persistent(struct page *page)
 228{
 229	if (!PageCompound(page))
 230		return false;
 231
 232	page = compound_head(page);
 233
 234	if (compound_order(page) >= pageblock_order)
 235		return true;
 236
 237	return false;
 238}
 239
 240static bool
 241__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 242							bool check_target)
 243{
 244	struct page *page = pfn_to_online_page(pfn);
 245	struct page *block_page;
 246	struct page *end_page;
 247	unsigned long block_pfn;
 248
 249	if (!page)
 250		return false;
 251	if (zone != page_zone(page))
 252		return false;
 253	if (pageblock_skip_persistent(page))
 254		return false;
 255
 256	/*
 257	 * If skip is already cleared do no further checking once the
 258	 * restart points have been set.
 259	 */
 260	if (check_source && check_target && !get_pageblock_skip(page))
 261		return true;
 262
 263	/*
 264	 * If clearing skip for the target scanner, do not select a
 265	 * non-movable pageblock as the starting point.
 266	 */
 267	if (!check_source && check_target &&
 268	    get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
 269		return false;
 270
 271	/* Ensure the start of the pageblock or zone is online and valid */
 272	block_pfn = pageblock_start_pfn(pfn);
 273	block_pfn = max(block_pfn, zone->zone_start_pfn);
 274	block_page = pfn_to_online_page(block_pfn);
 275	if (block_page) {
 276		page = block_page;
 277		pfn = block_pfn;
 278	}
 279
 280	/* Ensure the end of the pageblock or zone is online and valid */
 281	block_pfn = pageblock_end_pfn(pfn) - 1;
 282	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
 283	end_page = pfn_to_online_page(block_pfn);
 284	if (!end_page)
 285		return false;
 286
 287	/*
 288	 * Only clear the hint if a sample indicates there is either a
 289	 * free page or an LRU page in the block. One or other condition
 290	 * is necessary for the block to be a migration source/target.
 291	 */
 292	do {
 293		if (pfn_valid_within(pfn)) {
 294			if (check_source && PageLRU(page)) {
 295				clear_pageblock_skip(page);
 296				return true;
 297			}
 298
 299			if (check_target && PageBuddy(page)) {
 300				clear_pageblock_skip(page);
 301				return true;
 302			}
 303		}
 304
 305		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
 306		pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
 307	} while (page <= end_page);
 308
 309	return false;
 310}
 311
 312/*
 313 * This function is called to clear all cached information on pageblocks that
 314 * should be skipped for page isolation when the migrate and free page scanner
 315 * meet.
 316 */
 317static void __reset_isolation_suitable(struct zone *zone)
 318{
 319	unsigned long migrate_pfn = zone->zone_start_pfn;
 320	unsigned long free_pfn = zone_end_pfn(zone) - 1;
 321	unsigned long reset_migrate = free_pfn;
 322	unsigned long reset_free = migrate_pfn;
 323	bool source_set = false;
 324	bool free_set = false;
 325
 326	if (!zone->compact_blockskip_flush)
 327		return;
 328
 329	zone->compact_blockskip_flush = false;
 330
 331	/*
 332	 * Walk the zone and update pageblock skip information. Source looks
 333	 * for PageLRU while target looks for PageBuddy. When the scanner
 334	 * is found, both PageBuddy and PageLRU are checked as the pageblock
 335	 * is suitable as both source and target.
 336	 */
 337	for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
 338					free_pfn -= pageblock_nr_pages) {
 339		cond_resched();
 340
 341		/* Update the migrate PFN */
 342		if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
 343		    migrate_pfn < reset_migrate) {
 344			source_set = true;
 345			reset_migrate = migrate_pfn;
 346			zone->compact_init_migrate_pfn = reset_migrate;
 347			zone->compact_cached_migrate_pfn[0] = reset_migrate;
 348			zone->compact_cached_migrate_pfn[1] = reset_migrate;
 349		}
 350
 351		/* Update the free PFN */
 352		if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
 353		    free_pfn > reset_free) {
 354			free_set = true;
 355			reset_free = free_pfn;
 356			zone->compact_init_free_pfn = reset_free;
 357			zone->compact_cached_free_pfn = reset_free;
 358		}
 359	}
 360
 361	/* Leave no distance if no suitable block was reset */
 362	if (reset_migrate >= reset_free) {
 363		zone->compact_cached_migrate_pfn[0] = migrate_pfn;
 364		zone->compact_cached_migrate_pfn[1] = migrate_pfn;
 365		zone->compact_cached_free_pfn = free_pfn;
 366	}
 
 
 367}
 368
 369void reset_isolation_suitable(pg_data_t *pgdat)
 370{
 371	int zoneid;
 372
 373	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 374		struct zone *zone = &pgdat->node_zones[zoneid];
 375		if (!populated_zone(zone))
 376			continue;
 377
 378		/* Only flush if a full compaction finished recently */
 379		if (zone->compact_blockskip_flush)
 380			__reset_isolation_suitable(zone);
 381	}
 382}
 383
 384/*
 385 * Sets the pageblock skip bit if it was clear. Note that this is a hint as
 386 * locks are not required for read/writers. Returns true if it was already set.
 387 */
 388static bool test_and_set_skip(struct compact_control *cc, struct page *page,
 389							unsigned long pfn)
 390{
 391	bool skip;
 392
 393	/* Do no update if skip hint is being ignored */
 394	if (cc->ignore_skip_hint)
 395		return false;
 396
 397	if (!IS_ALIGNED(pfn, pageblock_nr_pages))
 398		return false;
 399
 400	skip = get_pageblock_skip(page);
 401	if (!skip && !cc->no_set_skip_hint)
 402		set_pageblock_skip(page);
 403
 404	return skip;
 405}
 406
 407static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
 408{
 409	struct zone *zone = cc->zone;
 410
 411	pfn = pageblock_end_pfn(pfn);
 412
 413	/* Set for isolation rather than compaction */
 414	if (cc->no_set_skip_hint)
 415		return;
 416
 417	if (pfn > zone->compact_cached_migrate_pfn[0])
 418		zone->compact_cached_migrate_pfn[0] = pfn;
 419	if (cc->mode != MIGRATE_ASYNC &&
 420	    pfn > zone->compact_cached_migrate_pfn[1])
 421		zone->compact_cached_migrate_pfn[1] = pfn;
 422}
 423
 424/*
 425 * If no pages were isolated then mark this pageblock to be skipped in the
 426 * future. The information is later cleared by __reset_isolation_suitable().
 427 */
 428static void update_pageblock_skip(struct compact_control *cc,
 429			struct page *page, unsigned long pfn)
 
 430{
 431	struct zone *zone = cc->zone;
 
 432
 433	if (cc->no_set_skip_hint)
 434		return;
 435
 436	if (!page)
 437		return;
 438
 
 
 
 439	set_pageblock_skip(page);
 440
 
 
 441	/* Update where async and sync compaction should restart */
 442	if (pfn < zone->compact_cached_free_pfn)
 443		zone->compact_cached_free_pfn = pfn;
 
 
 
 
 
 
 
 
 444}
 445#else
 446static inline bool isolation_suitable(struct compact_control *cc,
 447					struct page *page)
 448{
 449	return true;
 450}
 451
 452static inline bool pageblock_skip_persistent(struct page *page)
 453{
 454	return false;
 455}
 456
 457static inline void update_pageblock_skip(struct compact_control *cc,
 458			struct page *page, unsigned long pfn)
 459{
 460}
 461
 462static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
 463{
 464}
 465
 466static bool test_and_set_skip(struct compact_control *cc, struct page *page,
 467							unsigned long pfn)
 468{
 469	return false;
 470}
 471#endif /* CONFIG_COMPACTION */
 472
 473/*
 474 * Compaction requires the taking of some coarse locks that are potentially
 475 * very heavily contended. For async compaction, trylock and record if the
 476 * lock is contended. The lock will still be acquired but compaction will
 477 * abort when the current block is finished regardless of success rate.
 478 * Sync compaction acquires the lock.
 479 *
 480 * Always returns true which makes it easier to track lock state in callers.
 
 481 */
 482static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 483						struct compact_control *cc)
 484{
 485	/* Track if the lock is contended in async mode */
 486	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
 487		if (spin_trylock_irqsave(lock, *flags))
 488			return true;
 489
 490		cc->contended = true;
 
 491	}
 492
 493	spin_lock_irqsave(lock, *flags);
 494	return true;
 495}
 496
 497/*
 498 * Compaction requires the taking of some coarse locks that are potentially
 499 * very heavily contended. The lock should be periodically unlocked to avoid
 500 * having disabled IRQs for a long time, even when there is nobody waiting on
 501 * the lock. It might also be that allowing the IRQs will result in
 502 * need_resched() becoming true. If scheduling is needed, async compaction
 503 * aborts. Sync compaction schedules.
 504 * Either compaction type will also abort if a fatal signal is pending.
 505 * In either case if the lock was locked, it is dropped and not regained.
 506 *
 507 * Returns true if compaction should abort due to fatal signal pending, or
 508 *		async compaction due to need_resched()
 509 * Returns false when compaction can continue (sync compaction might have
 510 *		scheduled)
 511 */
 512static bool compact_unlock_should_abort(spinlock_t *lock,
 513		unsigned long flags, bool *locked, struct compact_control *cc)
 514{
 515	if (*locked) {
 516		spin_unlock_irqrestore(lock, flags);
 517		*locked = false;
 518	}
 519
 520	if (fatal_signal_pending(current)) {
 521		cc->contended = true;
 522		return true;
 523	}
 524
 525	cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 526
 527	return false;
 528}
 529
 530/*
 531 * Isolate free pages onto a private freelist. If @strict is true, will abort
 532 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 533 * (even though it may still end up isolating some pages).
 534 */
 535static unsigned long isolate_freepages_block(struct compact_control *cc,
 536				unsigned long *start_pfn,
 537				unsigned long end_pfn,
 538				struct list_head *freelist,
 539				unsigned int stride,
 540				bool strict)
 541{
 542	int nr_scanned = 0, total_isolated = 0;
 543	struct page *cursor;
 544	unsigned long flags = 0;
 545	bool locked = false;
 546	unsigned long blockpfn = *start_pfn;
 547	unsigned int order;
 548
 549	/* Strict mode is for isolation, speed is secondary */
 550	if (strict)
 551		stride = 1;
 552
 553	cursor = pfn_to_page(blockpfn);
 554
 555	/* Isolate free pages. */
 556	for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
 557		int isolated;
 558		struct page *page = cursor;
 559
 560		/*
 561		 * Periodically drop the lock (if held) regardless of its
 562		 * contention, to give chance to IRQs. Abort if fatal signal
 563		 * pending or async compaction detects need_resched()
 564		 */
 565		if (!(blockpfn % SWAP_CLUSTER_MAX)
 566		    && compact_unlock_should_abort(&cc->zone->lock, flags,
 567								&locked, cc))
 568			break;
 569
 570		nr_scanned++;
 571		if (!pfn_valid_within(blockpfn))
 572			goto isolate_fail;
 573
 
 
 
 574		/*
 575		 * For compound pages such as THP and hugetlbfs, we can save
 576		 * potentially a lot of iterations if we skip them at once.
 577		 * The check is racy, but we can consider only valid values
 578		 * and the only danger is skipping too much.
 579		 */
 580		if (PageCompound(page)) {
 581			const unsigned int order = compound_order(page);
 582
 583			if (likely(order < MAX_ORDER)) {
 584				blockpfn += (1UL << order) - 1;
 585				cursor += (1UL << order) - 1;
 586			}
 
 587			goto isolate_fail;
 588		}
 589
 590		if (!PageBuddy(page))
 591			goto isolate_fail;
 592
 593		/*
 594		 * If we already hold the lock, we can skip some rechecking.
 595		 * Note that if we hold the lock now, checked_pageblock was
 596		 * already set in some previous iteration (or strict is true),
 597		 * so it is correct to skip the suitable migration target
 598		 * recheck as well.
 599		 */
 600		if (!locked) {
 601			locked = compact_lock_irqsave(&cc->zone->lock,
 
 
 
 
 
 
 
 
 602								&flags, cc);
 
 
 603
 604			/* Recheck this is a buddy page under lock */
 605			if (!PageBuddy(page))
 606				goto isolate_fail;
 607		}
 608
 609		/* Found a free page, will break it into order-0 pages */
 610		order = page_order(page);
 611		isolated = __isolate_free_page(page, order);
 612		if (!isolated)
 613			break;
 614		set_page_private(page, order);
 615
 616		total_isolated += isolated;
 617		cc->nr_freepages += isolated;
 618		list_add_tail(&page->lru, freelist);
 619
 620		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
 621			blockpfn += isolated;
 622			break;
 623		}
 624		/* Advance to the end of split page */
 625		blockpfn += isolated - 1;
 626		cursor += isolated - 1;
 627		continue;
 628
 629isolate_fail:
 630		if (strict)
 631			break;
 632		else
 633			continue;
 634
 635	}
 636
 637	if (locked)
 638		spin_unlock_irqrestore(&cc->zone->lock, flags);
 639
 640	/*
 641	 * There is a tiny chance that we have read bogus compound_order(),
 642	 * so be careful to not go outside of the pageblock.
 643	 */
 644	if (unlikely(blockpfn > end_pfn))
 645		blockpfn = end_pfn;
 646
 647	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
 648					nr_scanned, total_isolated);
 649
 650	/* Record how far we have got within the block */
 651	*start_pfn = blockpfn;
 652
 653	/*
 654	 * If strict isolation is requested by CMA then check that all the
 655	 * pages requested were isolated. If there were any failures, 0 is
 656	 * returned and CMA will fail.
 657	 */
 658	if (strict && blockpfn < end_pfn)
 659		total_isolated = 0;
 660
 661	cc->total_free_scanned += nr_scanned;
 
 
 
 
 662	if (total_isolated)
 663		count_compact_events(COMPACTISOLATED, total_isolated);
 664	return total_isolated;
 665}
 666
 667/**
 668 * isolate_freepages_range() - isolate free pages.
 669 * @cc:        Compaction control structure.
 670 * @start_pfn: The first PFN to start isolating.
 671 * @end_pfn:   The one-past-last PFN.
 672 *
 673 * Non-free pages, invalid PFNs, or zone boundaries within the
 674 * [start_pfn, end_pfn) range are considered errors, cause function to
 675 * undo its actions and return zero.
 676 *
 677 * Otherwise, function returns one-past-the-last PFN of isolated page
 678 * (which may be greater then end_pfn if end fell in a middle of
 679 * a free page).
 680 */
 681unsigned long
 682isolate_freepages_range(struct compact_control *cc,
 683			unsigned long start_pfn, unsigned long end_pfn)
 684{
 685	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
 686	LIST_HEAD(freelist);
 687
 688	pfn = start_pfn;
 689	block_start_pfn = pageblock_start_pfn(pfn);
 690	if (block_start_pfn < cc->zone->zone_start_pfn)
 691		block_start_pfn = cc->zone->zone_start_pfn;
 692	block_end_pfn = pageblock_end_pfn(pfn);
 693
 694	for (; pfn < end_pfn; pfn += isolated,
 695				block_start_pfn = block_end_pfn,
 696				block_end_pfn += pageblock_nr_pages) {
 697		/* Protect pfn from changing by isolate_freepages_block */
 698		unsigned long isolate_start_pfn = pfn;
 699
 700		block_end_pfn = min(block_end_pfn, end_pfn);
 701
 702		/*
 703		 * pfn could pass the block_end_pfn if isolated freepage
 704		 * is more than pageblock order. In this case, we adjust
 705		 * scanning range to right one.
 706		 */
 707		if (pfn >= block_end_pfn) {
 708			block_start_pfn = pageblock_start_pfn(pfn);
 709			block_end_pfn = pageblock_end_pfn(pfn);
 710			block_end_pfn = min(block_end_pfn, end_pfn);
 711		}
 712
 713		if (!pageblock_pfn_to_page(block_start_pfn,
 714					block_end_pfn, cc->zone))
 715			break;
 716
 717		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
 718					block_end_pfn, &freelist, 0, true);
 719
 720		/*
 721		 * In strict mode, isolate_freepages_block() returns 0 if
 722		 * there are any holes in the block (ie. invalid PFNs or
 723		 * non-free pages).
 724		 */
 725		if (!isolated)
 726			break;
 727
 728		/*
 729		 * If we managed to isolate pages, it is always (1 << n) *
 730		 * pageblock_nr_pages for some non-negative n.  (Max order
 731		 * page may span two pageblocks).
 732		 */
 733	}
 734
 735	/* __isolate_free_page() does not map the pages */
 736	split_map_pages(&freelist);
 737
 738	if (pfn < end_pfn) {
 739		/* Loop terminated early, cleanup. */
 740		release_freepages(&freelist);
 741		return 0;
 742	}
 743
 744	/* We don't use freelists for anything. */
 745	return pfn;
 746}
 747
 748/* Similar to reclaim, but different enough that they don't share logic */
 749static bool too_many_isolated(pg_data_t *pgdat)
 750{
 751	unsigned long active, inactive, isolated;
 752
 753	inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
 754			node_page_state(pgdat, NR_INACTIVE_ANON);
 755	active = node_page_state(pgdat, NR_ACTIVE_FILE) +
 756			node_page_state(pgdat, NR_ACTIVE_ANON);
 757	isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
 758			node_page_state(pgdat, NR_ISOLATED_ANON);
 759
 760	return isolated > (inactive + active) / 2;
 761}
 762
 763/**
 764 * isolate_migratepages_block() - isolate all migrate-able pages within
 765 *				  a single pageblock
 766 * @cc:		Compaction control structure.
 767 * @low_pfn:	The first PFN to isolate
 768 * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
 769 * @isolate_mode: Isolation mode to be used.
 770 *
 771 * Isolate all pages that can be migrated from the range specified by
 772 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 773 * Returns zero if there is a fatal signal pending, otherwise PFN of the
 774 * first page that was not scanned (which may be both less, equal to or more
 775 * than end_pfn).
 776 *
 777 * The pages are isolated on cc->migratepages list (not required to be empty),
 778 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
 779 * is neither read nor updated.
 780 */
 781static unsigned long
 782isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 783			unsigned long end_pfn, isolate_mode_t isolate_mode)
 784{
 785	pg_data_t *pgdat = cc->zone->zone_pgdat;
 786	unsigned long nr_scanned = 0, nr_isolated = 0;
 787	struct lruvec *lruvec;
 788	unsigned long flags = 0;
 789	bool locked = false;
 790	struct page *page = NULL, *valid_page = NULL;
 791	unsigned long start_pfn = low_pfn;
 792	bool skip_on_failure = false;
 793	unsigned long next_skip_pfn = 0;
 794	bool skip_updated = false;
 795
 796	/*
 797	 * Ensure that there are not too many pages isolated from the LRU
 798	 * list by either parallel reclaimers or compaction. If there are,
 799	 * delay for some time until fewer pages are isolated
 800	 */
 801	while (unlikely(too_many_isolated(pgdat))) {
 802		/* async migration should just abort */
 803		if (cc->mode == MIGRATE_ASYNC)
 804			return 0;
 805
 806		congestion_wait(BLK_RW_ASYNC, HZ/10);
 807
 808		if (fatal_signal_pending(current))
 809			return 0;
 810	}
 811
 812	cond_resched();
 
 813
 814	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
 815		skip_on_failure = true;
 816		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 817	}
 818
 819	/* Time to isolate some pages for migration */
 820	for (; low_pfn < end_pfn; low_pfn++) {
 821
 822		if (skip_on_failure && low_pfn >= next_skip_pfn) {
 823			/*
 824			 * We have isolated all migration candidates in the
 825			 * previous order-aligned block, and did not skip it due
 826			 * to failure. We should migrate the pages now and
 827			 * hopefully succeed compaction.
 828			 */
 829			if (nr_isolated)
 830				break;
 831
 832			/*
 833			 * We failed to isolate in the previous order-aligned
 834			 * block. Set the new boundary to the end of the
 835			 * current block. Note we can't simply increase
 836			 * next_skip_pfn by 1 << order, as low_pfn might have
 837			 * been incremented by a higher number due to skipping
 838			 * a compound or a high-order buddy page in the
 839			 * previous loop iteration.
 840			 */
 841			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 842		}
 843
 844		/*
 845		 * Periodically drop the lock (if held) regardless of its
 846		 * contention, to give chance to IRQs. Abort completely if
 847		 * a fatal signal is pending.
 848		 */
 849		if (!(low_pfn % SWAP_CLUSTER_MAX)
 850		    && compact_unlock_should_abort(&pgdat->lru_lock,
 851					    flags, &locked, cc)) {
 852			low_pfn = 0;
 853			goto fatal_pending;
 854		}
 855
 856		if (!pfn_valid_within(low_pfn))
 857			goto isolate_fail;
 858		nr_scanned++;
 859
 860		page = pfn_to_page(low_pfn);
 861
 862		/*
 863		 * Check if the pageblock has already been marked skipped.
 864		 * Only the aligned PFN is checked as the caller isolates
 865		 * COMPACT_CLUSTER_MAX at a time so the second call must
 866		 * not falsely conclude that the block should be skipped.
 867		 */
 868		if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
 869			if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
 870				low_pfn = end_pfn;
 871				goto isolate_abort;
 872			}
 873			valid_page = page;
 874		}
 875
 876		/*
 877		 * Skip if free. We read page order here without zone lock
 878		 * which is generally unsafe, but the race window is small and
 879		 * the worst thing that can happen is that we skip some
 880		 * potential isolation targets.
 881		 */
 882		if (PageBuddy(page)) {
 883			unsigned long freepage_order = page_order_unsafe(page);
 884
 885			/*
 886			 * Without lock, we cannot be sure that what we got is
 887			 * a valid page order. Consider only values in the
 888			 * valid order range to prevent low_pfn overflow.
 889			 */
 890			if (freepage_order > 0 && freepage_order < MAX_ORDER)
 891				low_pfn += (1UL << freepage_order) - 1;
 892			continue;
 893		}
 894
 895		/*
 896		 * Regardless of being on LRU, compound pages such as THP and
 897		 * hugetlbfs are not to be compacted. We can potentially save
 898		 * a lot of iterations if we skip them at once. The check is
 899		 * racy, but we can consider only valid values and the only
 900		 * danger is skipping too much.
 901		 */
 902		if (PageCompound(page)) {
 903			const unsigned int order = compound_order(page);
 
 
 
 904
 905			if (likely(order < MAX_ORDER))
 906				low_pfn += (1UL << order) - 1;
 907			goto isolate_fail;
 908		}
 909
 910		/*
 911		 * Check may be lockless but that's ok as we recheck later.
 912		 * It's possible to migrate LRU and non-lru movable pages.
 913		 * Skip any other type of page
 914		 */
 915		if (!PageLRU(page)) {
 916			/*
 917			 * __PageMovable can return false positive so we need
 918			 * to verify it under page_lock.
 919			 */
 920			if (unlikely(__PageMovable(page)) &&
 921					!PageIsolated(page)) {
 922				if (locked) {
 923					spin_unlock_irqrestore(&pgdat->lru_lock,
 924									flags);
 925					locked = false;
 926				}
 927
 928				if (!isolate_movable_page(page, isolate_mode))
 929					goto isolate_success;
 930			}
 931
 932			goto isolate_fail;
 933		}
 934
 935		/*
 936		 * Migration will fail if an anonymous page is pinned in memory,
 937		 * so avoid taking lru_lock and isolating it unnecessarily in an
 938		 * admittedly racy check.
 939		 */
 940		if (!page_mapping(page) &&
 941		    page_count(page) > page_mapcount(page))
 942			goto isolate_fail;
 943
 944		/*
 945		 * Only allow to migrate anonymous pages in GFP_NOFS context
 946		 * because those do not depend on fs locks.
 947		 */
 948		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
 949			goto isolate_fail;
 950
 951		/* If we already hold the lock, we can skip some rechecking */
 952		if (!locked) {
 953			locked = compact_lock_irqsave(&pgdat->lru_lock,
 954								&flags, cc);
 955
 956			/* Try get exclusive access under lock */
 957			if (!skip_updated) {
 958				skip_updated = true;
 959				if (test_and_set_skip(cc, page, low_pfn))
 960					goto isolate_abort;
 961			}
 962
 963			/* Recheck PageLRU and PageCompound under lock */
 964			if (!PageLRU(page))
 965				goto isolate_fail;
 966
 967			/*
 968			 * Page become compound since the non-locked check,
 969			 * and it's on LRU. It can only be a THP so the order
 970			 * is safe to read and it's 0 for tail pages.
 971			 */
 972			if (unlikely(PageCompound(page))) {
 973				low_pfn += compound_nr(page) - 1;
 974				goto isolate_fail;
 975			}
 976		}
 977
 978		lruvec = mem_cgroup_page_lruvec(page, pgdat);
 979
 980		/* Try isolate the page */
 981		if (__isolate_lru_page(page, isolate_mode) != 0)
 982			goto isolate_fail;
 983
 984		VM_BUG_ON_PAGE(PageCompound(page), page);
 985
 986		/* Successfully isolated */
 987		del_page_from_lru_list(page, lruvec, page_lru(page));
 988		inc_node_page_state(page,
 989				NR_ISOLATED_ANON + page_is_file_cache(page));
 990
 991isolate_success:
 992		list_add(&page->lru, &cc->migratepages);
 993		cc->nr_migratepages++;
 994		nr_isolated++;
 995
 996		/*
 997		 * Avoid isolating too much unless this block is being
 998		 * rescanned (e.g. dirty/writeback pages, parallel allocation)
 999		 * or a lock is contended. For contention, isolate quickly to
1000		 * potentially remove one source of contention.
1001		 */
1002		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
1003		    !cc->rescan && !cc->contended) {
 
 
 
1004			++low_pfn;
1005			break;
1006		}
1007
1008		continue;
1009isolate_fail:
1010		if (!skip_on_failure)
1011			continue;
1012
1013		/*
1014		 * We have isolated some pages, but then failed. Release them
1015		 * instead of migrating, as we cannot form the cc->order buddy
1016		 * page anyway.
1017		 */
1018		if (nr_isolated) {
1019			if (locked) {
1020				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
1021				locked = false;
1022			}
1023			putback_movable_pages(&cc->migratepages);
1024			cc->nr_migratepages = 0;
 
1025			nr_isolated = 0;
1026		}
1027
1028		if (low_pfn < next_skip_pfn) {
1029			low_pfn = next_skip_pfn - 1;
1030			/*
1031			 * The check near the loop beginning would have updated
1032			 * next_skip_pfn too, but this is a bit simpler.
1033			 */
1034			next_skip_pfn += 1UL << cc->order;
1035		}
1036	}
1037
1038	/*
1039	 * The PageBuddy() check could have potentially brought us outside
1040	 * the range to be scanned.
1041	 */
1042	if (unlikely(low_pfn > end_pfn))
1043		low_pfn = end_pfn;
1044
1045isolate_abort:
1046	if (locked)
1047		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
1048
1049	/*
1050	 * Updated the cached scanner pfn once the pageblock has been scanned
1051	 * Pages will either be migrated in which case there is no point
1052	 * scanning in the near future or migration failed in which case the
1053	 * failure reason may persist. The block is marked for skipping if
1054	 * there were no pages isolated in the block or if the block is
1055	 * rescanned twice in a row.
1056	 */
1057	if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
1058		if (valid_page && !skip_updated)
1059			set_pageblock_skip(valid_page);
1060		update_cached_migrate(cc, low_pfn);
1061	}
1062
1063	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1064						nr_scanned, nr_isolated);
1065
1066fatal_pending:
1067	cc->total_migrate_scanned += nr_scanned;
1068	if (nr_isolated)
1069		count_compact_events(COMPACTISOLATED, nr_isolated);
1070
1071	return low_pfn;
1072}
1073
1074/**
1075 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1076 * @cc:        Compaction control structure.
1077 * @start_pfn: The first PFN to start isolating.
1078 * @end_pfn:   The one-past-last PFN.
1079 *
1080 * Returns zero if isolation fails fatally due to e.g. pending signal.
1081 * Otherwise, function returns one-past-the-last PFN of isolated page
1082 * (which may be greater than end_pfn if end fell in a middle of a THP page).
1083 */
1084unsigned long
1085isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1086							unsigned long end_pfn)
1087{
1088	unsigned long pfn, block_start_pfn, block_end_pfn;
1089
1090	/* Scan block by block. First and last block may be incomplete */
1091	pfn = start_pfn;
1092	block_start_pfn = pageblock_start_pfn(pfn);
1093	if (block_start_pfn < cc->zone->zone_start_pfn)
1094		block_start_pfn = cc->zone->zone_start_pfn;
1095	block_end_pfn = pageblock_end_pfn(pfn);
1096
1097	for (; pfn < end_pfn; pfn = block_end_pfn,
1098				block_start_pfn = block_end_pfn,
1099				block_end_pfn += pageblock_nr_pages) {
1100
1101		block_end_pfn = min(block_end_pfn, end_pfn);
1102
1103		if (!pageblock_pfn_to_page(block_start_pfn,
1104					block_end_pfn, cc->zone))
1105			continue;
1106
1107		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
1108							ISOLATE_UNEVICTABLE);
1109
1110		if (!pfn)
1111			break;
1112
1113		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
1114			break;
1115	}
1116
1117	return pfn;
1118}
1119
1120#endif /* CONFIG_COMPACTION || CONFIG_CMA */
1121#ifdef CONFIG_COMPACTION
1122
1123static bool suitable_migration_source(struct compact_control *cc,
1124							struct page *page)
1125{
1126	int block_mt;
1127
1128	if (pageblock_skip_persistent(page))
1129		return false;
1130
1131	if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1132		return true;
1133
1134	block_mt = get_pageblock_migratetype(page);
1135
1136	if (cc->migratetype == MIGRATE_MOVABLE)
1137		return is_migrate_movable(block_mt);
1138	else
1139		return block_mt == cc->migratetype;
1140}
1141
1142/* Returns true if the page is within a block suitable for migration to */
1143static bool suitable_migration_target(struct compact_control *cc,
1144							struct page *page)
1145{
 
 
 
1146	/* If the page is a large free page, then disallow migration */
1147	if (PageBuddy(page)) {
1148		/*
1149		 * We are checking page_order without zone->lock taken. But
1150		 * the only small danger is that we skip a potentially suitable
1151		 * pageblock, so it's not worth to check order for valid range.
1152		 */
1153		if (page_order_unsafe(page) >= pageblock_order)
1154			return false;
1155	}
1156
1157	if (cc->ignore_block_suitable)
1158		return true;
1159
1160	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1161	if (is_migrate_movable(get_pageblock_migratetype(page)))
1162		return true;
1163
1164	/* Otherwise skip the block */
1165	return false;
1166}
1167
1168static inline unsigned int
1169freelist_scan_limit(struct compact_control *cc)
1170{
1171	unsigned short shift = BITS_PER_LONG - 1;
1172
1173	return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1174}
1175
1176/*
1177 * Test whether the free scanner has reached the same or lower pageblock than
1178 * the migration scanner, and compaction should thus terminate.
1179 */
1180static inline bool compact_scanners_met(struct compact_control *cc)
1181{
1182	return (cc->free_pfn >> pageblock_order)
1183		<= (cc->migrate_pfn >> pageblock_order);
1184}
1185
1186/*
1187 * Used when scanning for a suitable migration target which scans freelists
1188 * in reverse. Reorders the list such as the unscanned pages are scanned
1189 * first on the next iteration of the free scanner
1190 */
1191static void
1192move_freelist_head(struct list_head *freelist, struct page *freepage)
1193{
1194	LIST_HEAD(sublist);
1195
1196	if (!list_is_last(freelist, &freepage->lru)) {
1197		list_cut_before(&sublist, freelist, &freepage->lru);
1198		if (!list_empty(&sublist))
1199			list_splice_tail(&sublist, freelist);
1200	}
1201}
1202
1203/*
1204 * Similar to move_freelist_head except used by the migration scanner
1205 * when scanning forward. It's possible for these list operations to
1206 * move against each other if they search the free list exactly in
1207 * lockstep.
1208 */
1209static void
1210move_freelist_tail(struct list_head *freelist, struct page *freepage)
1211{
1212	LIST_HEAD(sublist);
1213
1214	if (!list_is_first(freelist, &freepage->lru)) {
1215		list_cut_position(&sublist, freelist, &freepage->lru);
1216		if (!list_empty(&sublist))
1217			list_splice_tail(&sublist, freelist);
1218	}
1219}
1220
1221static void
1222fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
1223{
1224	unsigned long start_pfn, end_pfn;
1225	struct page *page = pfn_to_page(pfn);
1226
1227	/* Do not search around if there are enough pages already */
1228	if (cc->nr_freepages >= cc->nr_migratepages)
1229		return;
1230
1231	/* Minimise scanning during async compaction */
1232	if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1233		return;
1234
1235	/* Pageblock boundaries */
1236	start_pfn = pageblock_start_pfn(pfn);
1237	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
1238
1239	/* Scan before */
1240	if (start_pfn != pfn) {
1241		isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
1242		if (cc->nr_freepages >= cc->nr_migratepages)
1243			return;
1244	}
1245
1246	/* Scan after */
1247	start_pfn = pfn + nr_isolated;
1248	if (start_pfn < end_pfn)
1249		isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
1250
1251	/* Skip this pageblock in the future as it's full or nearly full */
1252	if (cc->nr_freepages < cc->nr_migratepages)
1253		set_pageblock_skip(page);
1254}
1255
1256/* Search orders in round-robin fashion */
1257static int next_search_order(struct compact_control *cc, int order)
1258{
1259	order--;
1260	if (order < 0)
1261		order = cc->order - 1;
1262
1263	/* Search wrapped around? */
1264	if (order == cc->search_order) {
1265		cc->search_order--;
1266		if (cc->search_order < 0)
1267			cc->search_order = cc->order - 1;
1268		return -1;
1269	}
1270
1271	return order;
1272}
1273
1274static unsigned long
1275fast_isolate_freepages(struct compact_control *cc)
1276{
1277	unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
1278	unsigned int nr_scanned = 0;
1279	unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
1280	unsigned long nr_isolated = 0;
1281	unsigned long distance;
1282	struct page *page = NULL;
1283	bool scan_start = false;
1284	int order;
1285
1286	/* Full compaction passes in a negative order */
1287	if (cc->order <= 0)
1288		return cc->free_pfn;
1289
1290	/*
1291	 * If starting the scan, use a deeper search and use the highest
1292	 * PFN found if a suitable one is not found.
1293	 */
1294	if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1295		limit = pageblock_nr_pages >> 1;
1296		scan_start = true;
1297	}
1298
1299	/*
1300	 * Preferred point is in the top quarter of the scan space but take
1301	 * a pfn from the top half if the search is problematic.
1302	 */
1303	distance = (cc->free_pfn - cc->migrate_pfn);
1304	low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1305	min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1306
1307	if (WARN_ON_ONCE(min_pfn > low_pfn))
1308		low_pfn = min_pfn;
1309
1310	/*
1311	 * Search starts from the last successful isolation order or the next
1312	 * order to search after a previous failure
1313	 */
1314	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1315
1316	for (order = cc->search_order;
1317	     !page && order >= 0;
1318	     order = next_search_order(cc, order)) {
1319		struct free_area *area = &cc->zone->free_area[order];
1320		struct list_head *freelist;
1321		struct page *freepage;
1322		unsigned long flags;
1323		unsigned int order_scanned = 0;
1324
1325		if (!area->nr_free)
1326			continue;
1327
1328		spin_lock_irqsave(&cc->zone->lock, flags);
1329		freelist = &area->free_list[MIGRATE_MOVABLE];
1330		list_for_each_entry_reverse(freepage, freelist, lru) {
1331			unsigned long pfn;
1332
1333			order_scanned++;
1334			nr_scanned++;
1335			pfn = page_to_pfn(freepage);
1336
1337			if (pfn >= highest)
1338				highest = pageblock_start_pfn(pfn);
1339
1340			if (pfn >= low_pfn) {
1341				cc->fast_search_fail = 0;
1342				cc->search_order = order;
1343				page = freepage;
1344				break;
1345			}
1346
1347			if (pfn >= min_pfn && pfn > high_pfn) {
1348				high_pfn = pfn;
1349
1350				/* Shorten the scan if a candidate is found */
1351				limit >>= 1;
1352			}
1353
1354			if (order_scanned >= limit)
1355				break;
1356		}
1357
1358		/* Use a minimum pfn if a preferred one was not found */
1359		if (!page && high_pfn) {
1360			page = pfn_to_page(high_pfn);
1361
1362			/* Update freepage for the list reorder below */
1363			freepage = page;
1364		}
1365
1366		/* Reorder to so a future search skips recent pages */
1367		move_freelist_head(freelist, freepage);
1368
1369		/* Isolate the page if available */
1370		if (page) {
1371			if (__isolate_free_page(page, order)) {
1372				set_page_private(page, order);
1373				nr_isolated = 1 << order;
1374				cc->nr_freepages += nr_isolated;
1375				list_add_tail(&page->lru, &cc->freepages);
1376				count_compact_events(COMPACTISOLATED, nr_isolated);
1377			} else {
1378				/* If isolation fails, abort the search */
1379				order = cc->search_order + 1;
1380				page = NULL;
1381			}
1382		}
1383
1384		spin_unlock_irqrestore(&cc->zone->lock, flags);
1385
1386		/*
1387		 * Smaller scan on next order so the total scan ig related
1388		 * to freelist_scan_limit.
1389		 */
1390		if (order_scanned >= limit)
1391			limit = min(1U, limit >> 1);
1392	}
1393
1394	if (!page) {
1395		cc->fast_search_fail++;
1396		if (scan_start) {
1397			/*
1398			 * Use the highest PFN found above min. If one was
1399			 * not found, be pessemistic for direct compaction
1400			 * and use the min mark.
1401			 */
1402			if (highest) {
1403				page = pfn_to_page(highest);
1404				cc->free_pfn = highest;
1405			} else {
1406				if (cc->direct_compaction && pfn_valid(min_pfn)) {
1407					page = pfn_to_page(min_pfn);
1408					cc->free_pfn = min_pfn;
1409				}
1410			}
1411		}
1412	}
1413
1414	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1415		highest -= pageblock_nr_pages;
1416		cc->zone->compact_cached_free_pfn = highest;
1417	}
1418
1419	cc->total_free_scanned += nr_scanned;
1420	if (!page)
1421		return cc->free_pfn;
1422
1423	low_pfn = page_to_pfn(page);
1424	fast_isolate_around(cc, low_pfn, nr_isolated);
1425	return low_pfn;
1426}
1427
1428/*
1429 * Based on information in the current compact_control, find blocks
1430 * suitable for isolating free pages from and then isolate them.
1431 */
1432static void isolate_freepages(struct compact_control *cc)
1433{
1434	struct zone *zone = cc->zone;
1435	struct page *page;
1436	unsigned long block_start_pfn;	/* start of current pageblock */
1437	unsigned long isolate_start_pfn; /* exact pfn we start at */
1438	unsigned long block_end_pfn;	/* end of current pageblock */
1439	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
1440	struct list_head *freelist = &cc->freepages;
1441	unsigned int stride;
1442
1443	/* Try a small search of the free lists for a candidate */
1444	isolate_start_pfn = fast_isolate_freepages(cc);
1445	if (cc->nr_freepages)
1446		goto splitmap;
1447
1448	/*
1449	 * Initialise the free scanner. The starting point is where we last
1450	 * successfully isolated from, zone-cached value, or the end of the
1451	 * zone when isolating for the first time. For looping we also need
1452	 * this pfn aligned down to the pageblock boundary, because we do
1453	 * block_start_pfn -= pageblock_nr_pages in the for loop.
1454	 * For ending point, take care when isolating in last pageblock of a
1455	 * a zone which ends in the middle of a pageblock.
1456	 * The low boundary is the end of the pageblock the migration scanner
1457	 * is using.
1458	 */
1459	isolate_start_pfn = cc->free_pfn;
1460	block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
1461	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1462						zone_end_pfn(zone));
1463	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1464	stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1465
1466	/*
1467	 * Isolate free pages until enough are available to migrate the
1468	 * pages on cc->migratepages. We stop searching if the migrate
1469	 * and free page scanners meet or enough free pages are isolated.
1470	 */
1471	for (; block_start_pfn >= low_pfn;
1472				block_end_pfn = block_start_pfn,
1473				block_start_pfn -= pageblock_nr_pages,
1474				isolate_start_pfn = block_start_pfn) {
1475		unsigned long nr_isolated;
1476
1477		/*
1478		 * This can iterate a massively long zone without finding any
1479		 * suitable migration targets, so periodically check resched.
 
1480		 */
1481		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1482			cond_resched();
 
1483
1484		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1485									zone);
1486		if (!page)
1487			continue;
1488
1489		/* Check the block is suitable for migration */
1490		if (!suitable_migration_target(cc, page))
1491			continue;
1492
1493		/* If isolation recently failed, do not retry */
1494		if (!isolation_suitable(cc, page))
1495			continue;
1496
1497		/* Found a block suitable for isolating free pages from. */
1498		nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1499					block_end_pfn, freelist, stride, false);
1500
1501		/* Update the skip hint if the full pageblock was scanned */
1502		if (isolate_start_pfn == block_end_pfn)
1503			update_pageblock_skip(cc, page, block_start_pfn);
1504
1505		/* Are enough freepages isolated? */
1506		if (cc->nr_freepages >= cc->nr_migratepages) {
1507			if (isolate_start_pfn >= block_end_pfn) {
1508				/*
1509				 * Restart at previous pageblock if more
1510				 * freepages can be isolated next time.
1511				 */
1512				isolate_start_pfn =
1513					block_start_pfn - pageblock_nr_pages;
1514			}
1515			break;
1516		} else if (isolate_start_pfn < block_end_pfn) {
1517			/*
1518			 * If isolation failed early, do not continue
1519			 * needlessly.
1520			 */
1521			break;
1522		}
1523
1524		/* Adjust stride depending on isolation */
1525		if (nr_isolated) {
1526			stride = 1;
1527			continue;
1528		}
1529		stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
1530	}
1531
 
 
 
1532	/*
1533	 * Record where the free scanner will restart next time. Either we
1534	 * broke from the loop and set isolate_start_pfn based on the last
1535	 * call to isolate_freepages_block(), or we met the migration scanner
1536	 * and the loop terminated due to isolate_start_pfn < low_pfn
1537	 */
1538	cc->free_pfn = isolate_start_pfn;
1539
1540splitmap:
1541	/* __isolate_free_page() does not map the pages */
1542	split_map_pages(freelist);
1543}
1544
1545/*
1546 * This is a migrate-callback that "allocates" freepages by taking pages
1547 * from the isolated freelists in the block we are migrating to.
1548 */
1549static struct page *compaction_alloc(struct page *migratepage,
1550					unsigned long data)
 
1551{
1552	struct compact_control *cc = (struct compact_control *)data;
1553	struct page *freepage;
1554
 
 
 
 
1555	if (list_empty(&cc->freepages)) {
1556		isolate_freepages(cc);
 
1557
1558		if (list_empty(&cc->freepages))
1559			return NULL;
1560	}
1561
1562	freepage = list_entry(cc->freepages.next, struct page, lru);
1563	list_del(&freepage->lru);
1564	cc->nr_freepages--;
1565
1566	return freepage;
1567}
1568
1569/*
1570 * This is a migrate-callback that "frees" freepages back to the isolated
1571 * freelist.  All pages on the freelist are from the same zone, so there is no
1572 * special handling needed for NUMA.
1573 */
1574static void compaction_free(struct page *page, unsigned long data)
1575{
1576	struct compact_control *cc = (struct compact_control *)data;
1577
1578	list_add(&page->lru, &cc->freepages);
1579	cc->nr_freepages++;
1580}
1581
1582/* possible outcome of isolate_migratepages */
1583typedef enum {
1584	ISOLATE_ABORT,		/* Abort compaction now */
1585	ISOLATE_NONE,		/* No pages isolated, continue scanning */
1586	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
1587} isolate_migrate_t;
1588
1589/*
1590 * Allow userspace to control policy on scanning the unevictable LRU for
1591 * compactable pages.
1592 */
1593int sysctl_compact_unevictable_allowed __read_mostly = 1;
1594
1595static inline void
1596update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1597{
1598	if (cc->fast_start_pfn == ULONG_MAX)
1599		return;
1600
1601	if (!cc->fast_start_pfn)
1602		cc->fast_start_pfn = pfn;
1603
1604	cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1605}
1606
1607static inline unsigned long
1608reinit_migrate_pfn(struct compact_control *cc)
1609{
1610	if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1611		return cc->migrate_pfn;
1612
1613	cc->migrate_pfn = cc->fast_start_pfn;
1614	cc->fast_start_pfn = ULONG_MAX;
1615
1616	return cc->migrate_pfn;
1617}
1618
1619/*
1620 * Briefly search the free lists for a migration source that already has
1621 * some free pages to reduce the number of pages that need migration
1622 * before a pageblock is free.
1623 */
1624static unsigned long fast_find_migrateblock(struct compact_control *cc)
1625{
1626	unsigned int limit = freelist_scan_limit(cc);
1627	unsigned int nr_scanned = 0;
1628	unsigned long distance;
1629	unsigned long pfn = cc->migrate_pfn;
1630	unsigned long high_pfn;
1631	int order;
1632
1633	/* Skip hints are relied on to avoid repeats on the fast search */
1634	if (cc->ignore_skip_hint)
1635		return pfn;
1636
1637	/*
1638	 * If the migrate_pfn is not at the start of a zone or the start
1639	 * of a pageblock then assume this is a continuation of a previous
1640	 * scan restarted due to COMPACT_CLUSTER_MAX.
1641	 */
1642	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1643		return pfn;
1644
1645	/*
1646	 * For smaller orders, just linearly scan as the number of pages
1647	 * to migrate should be relatively small and does not necessarily
1648	 * justify freeing up a large block for a small allocation.
1649	 */
1650	if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
1651		return pfn;
1652
1653	/*
1654	 * Only allow kcompactd and direct requests for movable pages to
1655	 * quickly clear out a MOVABLE pageblock for allocation. This
1656	 * reduces the risk that a large movable pageblock is freed for
1657	 * an unmovable/reclaimable small allocation.
1658	 */
1659	if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
1660		return pfn;
1661
1662	/*
1663	 * When starting the migration scanner, pick any pageblock within the
1664	 * first half of the search space. Otherwise try and pick a pageblock
1665	 * within the first eighth to reduce the chances that a migration
1666	 * target later becomes a source.
1667	 */
1668	distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
1669	if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1670		distance >>= 2;
1671	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
1672
1673	for (order = cc->order - 1;
1674	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
1675	     order--) {
1676		struct free_area *area = &cc->zone->free_area[order];
1677		struct list_head *freelist;
1678		unsigned long flags;
1679		struct page *freepage;
1680
1681		if (!area->nr_free)
1682			continue;
1683
1684		spin_lock_irqsave(&cc->zone->lock, flags);
1685		freelist = &area->free_list[MIGRATE_MOVABLE];
1686		list_for_each_entry(freepage, freelist, lru) {
1687			unsigned long free_pfn;
1688
1689			nr_scanned++;
1690			free_pfn = page_to_pfn(freepage);
1691			if (free_pfn < high_pfn) {
1692				/*
1693				 * Avoid if skipped recently. Ideally it would
1694				 * move to the tail but even safe iteration of
1695				 * the list assumes an entry is deleted, not
1696				 * reordered.
1697				 */
1698				if (get_pageblock_skip(freepage)) {
1699					if (list_is_last(freelist, &freepage->lru))
1700						break;
1701
1702					continue;
1703				}
1704
1705				/* Reorder to so a future search skips recent pages */
1706				move_freelist_tail(freelist, freepage);
1707
1708				update_fast_start_pfn(cc, free_pfn);
1709				pfn = pageblock_start_pfn(free_pfn);
1710				cc->fast_search_fail = 0;
1711				set_pageblock_skip(freepage);
1712				break;
1713			}
1714
1715			if (nr_scanned >= limit) {
1716				cc->fast_search_fail++;
1717				move_freelist_tail(freelist, freepage);
1718				break;
1719			}
1720		}
1721		spin_unlock_irqrestore(&cc->zone->lock, flags);
1722	}
1723
1724	cc->total_migrate_scanned += nr_scanned;
1725
1726	/*
1727	 * If fast scanning failed then use a cached entry for a page block
1728	 * that had free pages as the basis for starting a linear scan.
1729	 */
1730	if (pfn == cc->migrate_pfn)
1731		pfn = reinit_migrate_pfn(cc);
1732
1733	return pfn;
1734}
1735
1736/*
1737 * Isolate all pages that can be migrated from the first suitable block,
1738 * starting at the block pointed to by the migrate scanner pfn within
1739 * compact_control.
1740 */
1741static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 
1742{
1743	unsigned long block_start_pfn;
1744	unsigned long block_end_pfn;
1745	unsigned long low_pfn;
1746	struct page *page;
1747	const isolate_mode_t isolate_mode =
1748		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1749		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1750	bool fast_find_block;
1751
1752	/*
1753	 * Start at where we last stopped, or beginning of the zone as
1754	 * initialized by compact_zone(). The first failure will use
1755	 * the lowest PFN as the starting point for linear scanning.
1756	 */
1757	low_pfn = fast_find_migrateblock(cc);
1758	block_start_pfn = pageblock_start_pfn(low_pfn);
1759	if (block_start_pfn < cc->zone->zone_start_pfn)
1760		block_start_pfn = cc->zone->zone_start_pfn;
1761
1762	/*
1763	 * fast_find_migrateblock marks a pageblock skipped so to avoid
1764	 * the isolation_suitable check below, check whether the fast
1765	 * search was successful.
1766	 */
1767	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
1768
1769	/* Only scan within a pageblock boundary */
1770	block_end_pfn = pageblock_end_pfn(low_pfn);
1771
1772	/*
1773	 * Iterate over whole pageblocks until we find the first suitable.
1774	 * Do not cross the free scanner.
1775	 */
1776	for (; block_end_pfn <= cc->free_pfn;
1777			fast_find_block = false,
1778			low_pfn = block_end_pfn,
1779			block_start_pfn = block_end_pfn,
1780			block_end_pfn += pageblock_nr_pages) {
1781
1782		/*
1783		 * This can potentially iterate a massively long zone with
1784		 * many pageblocks unsuitable, so periodically check if we
1785		 * need to schedule.
1786		 */
1787		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1788			cond_resched();
 
1789
1790		page = pageblock_pfn_to_page(block_start_pfn,
1791						block_end_pfn, cc->zone);
1792		if (!page)
1793			continue;
1794
1795		/*
1796		 * If isolation recently failed, do not retry. Only check the
1797		 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
1798		 * to be visited multiple times. Assume skip was checked
1799		 * before making it "skip" so other compaction instances do
1800		 * not scan the same block.
1801		 */
1802		if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
1803		    !fast_find_block && !isolation_suitable(cc, page))
1804			continue;
1805
1806		/*
1807		 * For async compaction, also only scan in MOVABLE blocks
1808		 * without huge pages. Async compaction is optimistic to see
1809		 * if the minimum amount of work satisfies the allocation.
1810		 * The cached PFN is updated as it's possible that all
1811		 * remaining blocks between source and target are unsuitable
1812		 * and the compaction scanners fail to meet.
1813		 */
1814		if (!suitable_migration_source(cc, page)) {
1815			update_cached_migrate(cc, block_end_pfn);
1816			continue;
1817		}
1818
1819		/* Perform the isolation */
1820		low_pfn = isolate_migratepages_block(cc, low_pfn,
1821						block_end_pfn, isolate_mode);
1822
1823		if (!low_pfn)
1824			return ISOLATE_ABORT;
1825
1826		/*
1827		 * Either we isolated something and proceed with migration. Or
1828		 * we failed and compact_zone should decide if we should
1829		 * continue or not.
1830		 */
1831		break;
1832	}
1833
1834	/* Record where migration scanner will be restarted. */
1835	cc->migrate_pfn = low_pfn;
1836
1837	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1838}
1839
1840/*
1841 * order == -1 is expected when compacting via
1842 * /proc/sys/vm/compact_memory
1843 */
1844static inline bool is_via_compact_memory(int order)
1845{
1846	return order == -1;
1847}
1848
1849static enum compact_result __compact_finished(struct compact_control *cc)
 
1850{
1851	unsigned int order;
1852	const int migratetype = cc->migratetype;
1853	int ret;
 
 
1854
1855	/* Compaction run completes if the migrate and free scanner meet */
1856	if (compact_scanners_met(cc)) {
1857		/* Let the next compaction start anew. */
1858		reset_cached_positions(cc->zone);
1859
1860		/*
1861		 * Mark that the PG_migrate_skip information should be cleared
1862		 * by kswapd when it goes to sleep. kcompactd does not set the
1863		 * flag itself as the decision to be clear should be directly
1864		 * based on an allocation request.
1865		 */
1866		if (cc->direct_compaction)
1867			cc->zone->compact_blockskip_flush = true;
1868
1869		if (cc->whole_zone)
1870			return COMPACT_COMPLETE;
1871		else
1872			return COMPACT_PARTIAL_SKIPPED;
1873	}
1874
1875	if (is_via_compact_memory(cc->order))
1876		return COMPACT_CONTINUE;
1877
1878	/*
1879	 * Always finish scanning a pageblock to reduce the possibility of
1880	 * fallbacks in the future. This is particularly important when
1881	 * migration source is unmovable/reclaimable but it's not worth
1882	 * special casing.
1883	 */
1884	if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
1885		return COMPACT_CONTINUE;
1886
1887	/* Direct compactor: Is a suitable page free? */
1888	ret = COMPACT_NO_SUITABLE_PAGE;
1889	for (order = cc->order; order < MAX_ORDER; order++) {
1890		struct free_area *area = &cc->zone->free_area[order];
1891		bool can_steal;
1892
1893		/* Job done if page is free of the right migratetype */
1894		if (!free_area_empty(area, migratetype))
1895			return COMPACT_SUCCESS;
1896
1897#ifdef CONFIG_CMA
1898		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1899		if (migratetype == MIGRATE_MOVABLE &&
1900			!free_area_empty(area, MIGRATE_CMA))
1901			return COMPACT_SUCCESS;
1902#endif
1903		/*
1904		 * Job done if allocation would steal freepages from
1905		 * other migratetype buddy lists.
1906		 */
1907		if (find_suitable_fallback(area, order, migratetype,
1908						true, &can_steal) != -1) {
1909
1910			/* movable pages are OK in any pageblock */
1911			if (migratetype == MIGRATE_MOVABLE)
1912				return COMPACT_SUCCESS;
1913
1914			/*
1915			 * We are stealing for a non-movable allocation. Make
1916			 * sure we finish compacting the current pageblock
1917			 * first so it is as free as possible and we won't
1918			 * have to steal another one soon. This only applies
1919			 * to sync compaction, as async compaction operates
1920			 * on pageblocks of the same migratetype.
1921			 */
1922			if (cc->mode == MIGRATE_ASYNC ||
1923					IS_ALIGNED(cc->migrate_pfn,
1924							pageblock_nr_pages)) {
1925				return COMPACT_SUCCESS;
1926			}
1927
1928			ret = COMPACT_CONTINUE;
1929			break;
1930		}
1931	}
1932
1933	if (cc->contended || fatal_signal_pending(current))
1934		ret = COMPACT_CONTENDED;
1935
1936	return ret;
1937}
1938
1939static enum compact_result compact_finished(struct compact_control *cc)
 
 
1940{
1941	int ret;
1942
1943	ret = __compact_finished(cc);
1944	trace_mm_compaction_finished(cc->zone, cc->order, ret);
1945	if (ret == COMPACT_NO_SUITABLE_PAGE)
1946		ret = COMPACT_CONTINUE;
1947
1948	return ret;
1949}
1950
1951/*
1952 * compaction_suitable: Is this suitable to run compaction on this zone now?
1953 * Returns
1954 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1955 *   COMPACT_SUCCESS  - If the allocation would succeed without compaction
1956 *   COMPACT_CONTINUE - If compaction should run now
1957 */
1958static enum compact_result __compaction_suitable(struct zone *zone, int order,
1959					unsigned int alloc_flags,
1960					int classzone_idx,
1961					unsigned long wmark_target)
1962{
1963	unsigned long watermark;
1964
1965	if (is_via_compact_memory(order))
1966		return COMPACT_CONTINUE;
1967
1968	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
1969	/*
1970	 * If watermarks for high-order allocation are already met, there
1971	 * should be no need for compaction at all.
1972	 */
1973	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1974								alloc_flags))
1975		return COMPACT_SUCCESS;
1976
1977	/*
1978	 * Watermarks for order-0 must be met for compaction to be able to
1979	 * isolate free pages for migration targets. This means that the
1980	 * watermark and alloc_flags have to match, or be more pessimistic than
1981	 * the check in __isolate_free_page(). We don't use the direct
1982	 * compactor's alloc_flags, as they are not relevant for freepage
1983	 * isolation. We however do use the direct compactor's classzone_idx to
1984	 * skip over zones where lowmem reserves would prevent allocation even
1985	 * if compaction succeeds.
1986	 * For costly orders, we require low watermark instead of min for
1987	 * compaction to proceed to increase its chances.
1988	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1989	 * suitable migration targets
1990	 */
1991	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1992				low_wmark_pages(zone) : min_wmark_pages(zone);
1993	watermark += compact_gap(order);
1994	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1995						ALLOC_CMA, wmark_target))
1996		return COMPACT_SKIPPED;
1997
1998	return COMPACT_CONTINUE;
1999}
2000
2001enum compact_result compaction_suitable(struct zone *zone, int order,
2002					unsigned int alloc_flags,
2003					int classzone_idx)
2004{
2005	enum compact_result ret;
2006	int fragindex;
2007
2008	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
2009				    zone_page_state(zone, NR_FREE_PAGES));
2010	/*
2011	 * fragmentation index determines if allocation failures are due to
2012	 * low memory or external fragmentation
2013	 *
2014	 * index of -1000 would imply allocations might succeed depending on
2015	 * watermarks, but we already failed the high-order watermark check
2016	 * index towards 0 implies failure is due to lack of memory
2017	 * index towards 1000 implies failure is due to fragmentation
2018	 *
2019	 * Only compact if a failure would be due to fragmentation. Also
2020	 * ignore fragindex for non-costly orders where the alternative to
2021	 * a successful reclaim/compaction is OOM. Fragindex and the
2022	 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
2023	 * excessive compaction for costly orders, but it should not be at the
2024	 * expense of system stability.
2025	 */
2026	if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
2027		fragindex = fragmentation_index(zone, order);
2028		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
2029			ret = COMPACT_NOT_SUITABLE_ZONE;
2030	}
2031
2032	trace_mm_compaction_suitable(zone, order, ret);
2033	if (ret == COMPACT_NOT_SUITABLE_ZONE)
2034		ret = COMPACT_SKIPPED;
2035
2036	return ret;
2037}
2038
2039bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2040		int alloc_flags)
2041{
2042	struct zone *zone;
2043	struct zoneref *z;
2044
2045	/*
2046	 * Make sure at least one zone would pass __compaction_suitable if we continue
2047	 * retrying the reclaim.
2048	 */
2049	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
2050					ac->nodemask) {
2051		unsigned long available;
2052		enum compact_result compact_result;
2053
2054		/*
2055		 * Do not consider all the reclaimable memory because we do not
2056		 * want to trash just for a single high order allocation which
2057		 * is even not guaranteed to appear even if __compaction_suitable
2058		 * is happy about the watermark check.
2059		 */
2060		available = zone_reclaimable_pages(zone) / order;
2061		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2062		compact_result = __compaction_suitable(zone, order, alloc_flags,
2063				ac_classzone_idx(ac), available);
2064		if (compact_result != COMPACT_SKIPPED)
2065			return true;
2066	}
2067
2068	return false;
2069}
2070
2071static enum compact_result
2072compact_zone(struct compact_control *cc, struct capture_control *capc)
2073{
2074	enum compact_result ret;
2075	unsigned long start_pfn = cc->zone->zone_start_pfn;
2076	unsigned long end_pfn = zone_end_pfn(cc->zone);
2077	unsigned long last_migrated_pfn;
2078	const bool sync = cc->mode != MIGRATE_ASYNC;
2079	bool update_cached;
2080
2081	/*
2082	 * These counters track activities during zone compaction.  Initialize
2083	 * them before compacting a new zone.
2084	 */
2085	cc->total_migrate_scanned = 0;
2086	cc->total_free_scanned = 0;
2087	cc->nr_migratepages = 0;
2088	cc->nr_freepages = 0;
2089	INIT_LIST_HEAD(&cc->freepages);
2090	INIT_LIST_HEAD(&cc->migratepages);
2091
2092	cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
2093	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
2094							cc->classzone_idx);
2095	/* Compaction is likely to fail */
2096	if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
2097		return ret;
2098
2099	/* huh, compaction_suitable is returning something unexpected */
2100	VM_BUG_ON(ret != COMPACT_CONTINUE);
2101
2102	/*
2103	 * Clear pageblock skip if there were failures recently and compaction
2104	 * is about to be retried after being deferred.
2105	 */
2106	if (compaction_restarting(cc->zone, cc->order))
2107		__reset_isolation_suitable(cc->zone);
2108
2109	/*
2110	 * Setup to move all movable pages to the end of the zone. Used cached
2111	 * information on where the scanners should start (unless we explicitly
2112	 * want to compact the whole zone), but check that it is initialised
2113	 * by ensuring the values are within zone boundaries.
2114	 */
2115	cc->fast_start_pfn = 0;
2116	if (cc->whole_zone) {
2117		cc->migrate_pfn = start_pfn;
2118		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2119	} else {
2120		cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2121		cc->free_pfn = cc->zone->compact_cached_free_pfn;
2122		if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2123			cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2124			cc->zone->compact_cached_free_pfn = cc->free_pfn;
2125		}
2126		if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2127			cc->migrate_pfn = start_pfn;
2128			cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2129			cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2130		}
2131
2132		if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2133			cc->whole_zone = true;
2134	}
2135
2136	last_migrated_pfn = 0;
2137
2138	/*
2139	 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
2140	 * the basis that some migrations will fail in ASYNC mode. However,
2141	 * if the cached PFNs match and pageblocks are skipped due to having
2142	 * no isolation candidates, then the sync state does not matter.
2143	 * Until a pageblock with isolation candidates is found, keep the
2144	 * cached PFNs in sync to avoid revisiting the same blocks.
2145	 */
2146	update_cached = !sync &&
2147		cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2148
2149	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
2150				cc->free_pfn, end_pfn, sync);
2151
2152	migrate_prep_local();
2153
2154	while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
 
2155		int err;
2156		unsigned long start_pfn = cc->migrate_pfn;
2157
2158		/*
2159		 * Avoid multiple rescans which can happen if a page cannot be
2160		 * isolated (dirty/writeback in async mode) or if the migrated
2161		 * pages are being allocated before the pageblock is cleared.
2162		 * The first rescan will capture the entire pageblock for
2163		 * migration. If it fails, it'll be marked skip and scanning
2164		 * will proceed as normal.
2165		 */
2166		cc->rescan = false;
2167		if (pageblock_start_pfn(last_migrated_pfn) ==
2168		    pageblock_start_pfn(start_pfn)) {
2169			cc->rescan = true;
2170		}
2171
2172		switch (isolate_migratepages(cc)) {
2173		case ISOLATE_ABORT:
2174			ret = COMPACT_CONTENDED;
2175			putback_movable_pages(&cc->migratepages);
2176			cc->nr_migratepages = 0;
2177			last_migrated_pfn = 0;
2178			goto out;
2179		case ISOLATE_NONE:
2180			if (update_cached) {
2181				cc->zone->compact_cached_migrate_pfn[1] =
2182					cc->zone->compact_cached_migrate_pfn[0];
2183			}
2184
2185			/*
2186			 * We haven't isolated and migrated anything, but
2187			 * there might still be unflushed migrations from
2188			 * previous cc->order aligned block.
2189			 */
2190			goto check_drain;
2191		case ISOLATE_SUCCESS:
2192			update_cached = false;
2193			last_migrated_pfn = start_pfn;
2194			;
2195		}
2196
2197		err = migrate_pages(&cc->migratepages, compaction_alloc,
2198				compaction_free, (unsigned long)cc, cc->mode,
2199				MR_COMPACTION);
2200
2201		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
2202							&cc->migratepages);
2203
2204		/* All pages were either migrated or will be released */
2205		cc->nr_migratepages = 0;
2206		if (err) {
2207			putback_movable_pages(&cc->migratepages);
2208			/*
2209			 * migrate_pages() may return -ENOMEM when scanners meet
2210			 * and we want compact_finished() to detect it
2211			 */
2212			if (err == -ENOMEM && !compact_scanners_met(cc)) {
2213				ret = COMPACT_CONTENDED;
2214				goto out;
2215			}
2216			/*
2217			 * We failed to migrate at least one page in the current
2218			 * order-aligned block, so skip the rest of it.
2219			 */
2220			if (cc->direct_compaction &&
2221						(cc->mode == MIGRATE_ASYNC)) {
2222				cc->migrate_pfn = block_end_pfn(
2223						cc->migrate_pfn - 1, cc->order);
2224				/* Draining pcplists is useless in this case */
2225				last_migrated_pfn = 0;
 
2226			}
2227		}
2228
2229check_drain:
2230		/*
2231		 * Has the migration scanner moved away from the previous
2232		 * cc->order aligned block where we migrated from? If yes,
2233		 * flush the pages that were freed, so that they can merge and
2234		 * compact_finished() can detect immediately if allocation
2235		 * would succeed.
2236		 */
2237		if (cc->order > 0 && last_migrated_pfn) {
2238			int cpu;
2239			unsigned long current_block_start =
2240				block_start_pfn(cc->migrate_pfn, cc->order);
2241
2242			if (last_migrated_pfn < current_block_start) {
2243				cpu = get_cpu();
2244				lru_add_drain_cpu(cpu);
2245				drain_local_pages(cc->zone);
2246				put_cpu();
2247				/* No more flushing until we migrate again */
2248				last_migrated_pfn = 0;
2249			}
2250		}
2251
2252		/* Stop if a page has been captured */
2253		if (capc && capc->page) {
2254			ret = COMPACT_SUCCESS;
2255			break;
2256		}
2257	}
2258
2259out:
2260	/*
2261	 * Release free pages and update where the free scanner should restart,
2262	 * so we don't leave any returned pages behind in the next attempt.
2263	 */
2264	if (cc->nr_freepages > 0) {
2265		unsigned long free_pfn = release_freepages(&cc->freepages);
2266
2267		cc->nr_freepages = 0;
2268		VM_BUG_ON(free_pfn == 0);
2269		/* The cached pfn is always the first in a pageblock */
2270		free_pfn = pageblock_start_pfn(free_pfn);
2271		/*
2272		 * Only go back, not forward. The cached pfn might have been
2273		 * already reset to zone end in compact_finished()
2274		 */
2275		if (free_pfn > cc->zone->compact_cached_free_pfn)
2276			cc->zone->compact_cached_free_pfn = free_pfn;
2277	}
2278
2279	count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2280	count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2281
2282	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
2283				cc->free_pfn, end_pfn, sync, ret);
2284
2285	return ret;
2286}
2287
2288static enum compact_result compact_zone_order(struct zone *zone, int order,
2289		gfp_t gfp_mask, enum compact_priority prio,
2290		unsigned int alloc_flags, int classzone_idx,
2291		struct page **capture)
2292{
2293	enum compact_result ret;
2294	struct compact_control cc = {
 
 
2295		.order = order,
2296		.search_order = order,
2297		.gfp_mask = gfp_mask,
2298		.zone = zone,
2299		.mode = (prio == COMPACT_PRIO_ASYNC) ?
2300					MIGRATE_ASYNC :	MIGRATE_SYNC_LIGHT,
2301		.alloc_flags = alloc_flags,
2302		.classzone_idx = classzone_idx,
2303		.direct_compaction = true,
2304		.whole_zone = (prio == MIN_COMPACT_PRIORITY),
2305		.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
2306		.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
2307	};
2308	struct capture_control capc = {
2309		.cc = &cc,
2310		.page = NULL,
2311	};
2312
2313	if (capture)
2314		current->capture_control = &capc;
2315
2316	ret = compact_zone(&cc, &capc);
2317
2318	VM_BUG_ON(!list_empty(&cc.freepages));
2319	VM_BUG_ON(!list_empty(&cc.migratepages));
2320
2321	*capture = capc.page;
2322	current->capture_control = NULL;
2323
2324	return ret;
2325}
2326
2327int sysctl_extfrag_threshold = 500;
2328
2329/**
2330 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2331 * @gfp_mask: The GFP mask of the current allocation
2332 * @order: The order of the current allocation
2333 * @alloc_flags: The allocation flags of the current allocation
2334 * @ac: The context of current allocation
2335 * @prio: Determines how hard direct compaction should try to succeed
2336 *
2337 * This is the main entry point for direct page compaction.
2338 */
2339enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2340		unsigned int alloc_flags, const struct alloc_context *ac,
2341		enum compact_priority prio, struct page **capture)
2342{
2343	int may_perform_io = gfp_mask & __GFP_IO;
2344	struct zoneref *z;
2345	struct zone *zone;
2346	enum compact_result rc = COMPACT_SKIPPED;
2347
2348	/*
2349	 * Check if the GFP flags allow compaction - GFP_NOIO is really
2350	 * tricky context because the migration might require IO
2351	 */
2352	if (!may_perform_io)
2353		return COMPACT_SKIPPED;
2354
2355	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2356
2357	/* Compact each zone in the list */
2358	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
2359								ac->nodemask) {
2360		enum compact_result status;
2361
2362		if (prio > MIN_COMPACT_PRIORITY
2363					&& compaction_deferred(zone, order)) {
2364			rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
2365			continue;
2366		}
2367
2368		status = compact_zone_order(zone, order, gfp_mask, prio,
2369				alloc_flags, ac_classzone_idx(ac), capture);
2370		rc = max(status, rc);
2371
2372		/* The allocation should succeed, stop compacting */
2373		if (status == COMPACT_SUCCESS) {
2374			/*
2375			 * We think the allocation will succeed in this zone,
2376			 * but it is not certain, hence the false. The caller
2377			 * will repeat this with true if allocation indeed
2378			 * succeeds in this zone.
2379			 */
2380			compaction_defer_reset(zone, order, false);
2381
2382			break;
2383		}
2384
2385		if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
2386					status == COMPACT_PARTIAL_SKIPPED))
2387			/*
2388			 * We think that allocation won't succeed in this zone
2389			 * so we defer compaction there. If it ends up
2390			 * succeeding after all, it will be reset.
2391			 */
2392			defer_compaction(zone, order);
2393
2394		/*
2395		 * We might have stopped compacting due to need_resched() in
2396		 * async compaction, or due to a fatal signal detected. In that
2397		 * case do not try further zones
2398		 */
2399		if ((prio == COMPACT_PRIO_ASYNC && need_resched())
2400					|| fatal_signal_pending(current))
2401			break;
2402	}
2403
2404	return rc;
2405}
2406
2407
2408/* Compact all zones within a node */
2409static void compact_node(int nid)
2410{
2411	pg_data_t *pgdat = NODE_DATA(nid);
2412	int zoneid;
2413	struct zone *zone;
2414	struct compact_control cc = {
2415		.order = -1,
2416		.mode = MIGRATE_SYNC,
2417		.ignore_skip_hint = true,
2418		.whole_zone = true,
2419		.gfp_mask = GFP_KERNEL,
2420	};
2421
2422
2423	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2424
2425		zone = &pgdat->node_zones[zoneid];
2426		if (!populated_zone(zone))
2427			continue;
2428
 
 
2429		cc.zone = zone;
 
 
2430
2431		compact_zone(&cc, NULL);
2432
2433		VM_BUG_ON(!list_empty(&cc.freepages));
2434		VM_BUG_ON(!list_empty(&cc.migratepages));
2435	}
2436}
2437
2438/* Compact all nodes in the system */
2439static void compact_nodes(void)
2440{
2441	int nid;
2442
2443	/* Flush pending updates to the LRU lists */
2444	lru_add_drain_all();
2445
2446	for_each_online_node(nid)
2447		compact_node(nid);
2448}
2449
2450/* The written value is actually unused, all memory is compacted */
2451int sysctl_compact_memory;
2452
2453/*
2454 * This is the entry point for compacting all nodes via
2455 * /proc/sys/vm/compact_memory
2456 */
2457int sysctl_compaction_handler(struct ctl_table *table, int write,
2458			void __user *buffer, size_t *length, loff_t *ppos)
2459{
2460	if (write)
2461		compact_nodes();
2462
2463	return 0;
2464}
2465
 
 
 
 
 
 
 
 
2466#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
2467static ssize_t sysfs_compact_node(struct device *dev,
2468			struct device_attribute *attr,
2469			const char *buf, size_t count)
2470{
2471	int nid = dev->id;
2472
2473	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
2474		/* Flush pending updates to the LRU lists */
2475		lru_add_drain_all();
2476
2477		compact_node(nid);
2478	}
2479
2480	return count;
2481}
2482static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node);
2483
2484int compaction_register_node(struct node *node)
2485{
2486	return device_create_file(&node->dev, &dev_attr_compact);
2487}
2488
2489void compaction_unregister_node(struct node *node)
2490{
2491	return device_remove_file(&node->dev, &dev_attr_compact);
2492}
2493#endif /* CONFIG_SYSFS && CONFIG_NUMA */
2494
2495static inline bool kcompactd_work_requested(pg_data_t *pgdat)
2496{
2497	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
2498}
2499
2500static bool kcompactd_node_suitable(pg_data_t *pgdat)
2501{
2502	int zoneid;
2503	struct zone *zone;
2504	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
2505
2506	for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
2507		zone = &pgdat->node_zones[zoneid];
2508
2509		if (!populated_zone(zone))
2510			continue;
2511
2512		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
2513					classzone_idx) == COMPACT_CONTINUE)
2514			return true;
2515	}
2516
2517	return false;
2518}
2519
2520static void kcompactd_do_work(pg_data_t *pgdat)
2521{
2522	/*
2523	 * With no special task, compact all zones so that a page of requested
2524	 * order is allocatable.
2525	 */
2526	int zoneid;
2527	struct zone *zone;
2528	struct compact_control cc = {
2529		.order = pgdat->kcompactd_max_order,
2530		.search_order = pgdat->kcompactd_max_order,
2531		.classzone_idx = pgdat->kcompactd_classzone_idx,
2532		.mode = MIGRATE_SYNC_LIGHT,
2533		.ignore_skip_hint = false,
2534		.gfp_mask = GFP_KERNEL,
 
2535	};
2536	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
2537							cc.classzone_idx);
2538	count_compact_event(KCOMPACTD_WAKE);
2539
2540	for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
2541		int status;
2542
2543		zone = &pgdat->node_zones[zoneid];
2544		if (!populated_zone(zone))
2545			continue;
2546
2547		if (compaction_deferred(zone, cc.order))
2548			continue;
2549
2550		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
2551							COMPACT_CONTINUE)
2552			continue;
2553
 
 
 
 
 
 
2554		if (kthread_should_stop())
2555			return;
2556
2557		cc.zone = zone;
2558		status = compact_zone(&cc, NULL);
2559
2560		if (status == COMPACT_SUCCESS) {
2561			compaction_defer_reset(zone, cc.order, false);
2562		} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
2563			/*
2564			 * Buddy pages may become stranded on pcps that could
2565			 * otherwise coalesce on the zone's free area for
2566			 * order >= cc.order.  This is ratelimited by the
2567			 * upcoming deferral.
2568			 */
2569			drain_all_pages(zone);
2570
2571			/*
2572			 * We use sync migration mode here, so we defer like
2573			 * sync direct compaction does.
2574			 */
2575			defer_compaction(zone, cc.order);
2576		}
2577
2578		count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2579				     cc.total_migrate_scanned);
2580		count_compact_events(KCOMPACTD_FREE_SCANNED,
2581				     cc.total_free_scanned);
2582
2583		VM_BUG_ON(!list_empty(&cc.freepages));
2584		VM_BUG_ON(!list_empty(&cc.migratepages));
2585	}
2586
2587	/*
2588	 * Regardless of success, we are done until woken up next. But remember
2589	 * the requested order/classzone_idx in case it was higher/tighter than
2590	 * our current ones
2591	 */
2592	if (pgdat->kcompactd_max_order <= cc.order)
2593		pgdat->kcompactd_max_order = 0;
2594	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
2595		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2596}
2597
2598void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
2599{
2600	if (!order)
2601		return;
2602
2603	if (pgdat->kcompactd_max_order < order)
2604		pgdat->kcompactd_max_order = order;
2605
2606	if (pgdat->kcompactd_classzone_idx > classzone_idx)
2607		pgdat->kcompactd_classzone_idx = classzone_idx;
2608
2609	/*
2610	 * Pairs with implicit barrier in wait_event_freezable()
2611	 * such that wakeups are not missed.
2612	 */
2613	if (!wq_has_sleeper(&pgdat->kcompactd_wait))
2614		return;
2615
2616	if (!kcompactd_node_suitable(pgdat))
2617		return;
2618
2619	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2620							classzone_idx);
2621	wake_up_interruptible(&pgdat->kcompactd_wait);
2622}
2623
2624/*
2625 * The background compaction daemon, started as a kernel thread
2626 * from the init process.
2627 */
2628static int kcompactd(void *p)
2629{
2630	pg_data_t *pgdat = (pg_data_t*)p;
2631	struct task_struct *tsk = current;
2632
2633	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2634
2635	if (!cpumask_empty(cpumask))
2636		set_cpus_allowed_ptr(tsk, cpumask);
2637
2638	set_freezable();
2639
2640	pgdat->kcompactd_max_order = 0;
2641	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2642
2643	while (!kthread_should_stop()) {
2644		unsigned long pflags;
2645
2646		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2647		wait_event_freezable(pgdat->kcompactd_wait,
2648				kcompactd_work_requested(pgdat));
2649
2650		psi_memstall_enter(&pflags);
2651		kcompactd_do_work(pgdat);
2652		psi_memstall_leave(&pflags);
2653	}
2654
2655	return 0;
2656}
2657
2658/*
2659 * This kcompactd start function will be called by init and node-hot-add.
2660 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2661 */
2662int kcompactd_run(int nid)
2663{
2664	pg_data_t *pgdat = NODE_DATA(nid);
2665	int ret = 0;
2666
2667	if (pgdat->kcompactd)
2668		return 0;
2669
2670	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2671	if (IS_ERR(pgdat->kcompactd)) {
2672		pr_err("Failed to start kcompactd on node %d\n", nid);
2673		ret = PTR_ERR(pgdat->kcompactd);
2674		pgdat->kcompactd = NULL;
2675	}
2676	return ret;
2677}
2678
2679/*
2680 * Called by memory hotplug when all memory in a node is offlined. Caller must
2681 * hold mem_hotplug_begin/end().
2682 */
2683void kcompactd_stop(int nid)
2684{
2685	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2686
2687	if (kcompactd) {
2688		kthread_stop(kcompactd);
2689		NODE_DATA(nid)->kcompactd = NULL;
2690	}
2691}
2692
2693/*
2694 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2695 * not required for correctness. So if the last cpu in a node goes
2696 * away, we get changed to run anywhere: as the first one comes back,
2697 * restore their cpu bindings.
2698 */
2699static int kcompactd_cpu_online(unsigned int cpu)
2700{
2701	int nid;
2702
2703	for_each_node_state(nid, N_MEMORY) {
2704		pg_data_t *pgdat = NODE_DATA(nid);
2705		const struct cpumask *mask;
2706
2707		mask = cpumask_of_node(pgdat->node_id);
2708
2709		if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2710			/* One of our CPUs online: restore mask */
2711			set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2712	}
2713	return 0;
2714}
2715
2716static int __init kcompactd_init(void)
2717{
2718	int nid;
2719	int ret;
2720
2721	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2722					"mm/compaction:online",
2723					kcompactd_cpu_online, NULL);
2724	if (ret < 0) {
2725		pr_err("kcompactd: failed to register hotplug callbacks.\n");
2726		return ret;
2727	}
2728
2729	for_each_node_state(nid, N_MEMORY)
2730		kcompactd_run(nid);
2731	return 0;
2732}
2733subsys_initcall(kcompactd_init)
2734
2735#endif /* CONFIG_COMPACTION */
v4.10.11
 
   1/*
   2 * linux/mm/compaction.c
   3 *
   4 * Memory compaction for the reduction of external fragmentation. Note that
   5 * this heavily depends upon page migration to do all the real heavy
   6 * lifting
   7 *
   8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
   9 */
  10#include <linux/cpu.h>
  11#include <linux/swap.h>
  12#include <linux/migrate.h>
  13#include <linux/compaction.h>
  14#include <linux/mm_inline.h>
 
  15#include <linux/backing-dev.h>
  16#include <linux/sysctl.h>
  17#include <linux/sysfs.h>
  18#include <linux/page-isolation.h>
  19#include <linux/kasan.h>
  20#include <linux/kthread.h>
  21#include <linux/freezer.h>
  22#include <linux/page_owner.h>
 
  23#include "internal.h"
  24
  25#ifdef CONFIG_COMPACTION
  26static inline void count_compact_event(enum vm_event_item item)
  27{
  28	count_vm_event(item);
  29}
  30
  31static inline void count_compact_events(enum vm_event_item item, long delta)
  32{
  33	count_vm_events(item, delta);
  34}
  35#else
  36#define count_compact_event(item) do { } while (0)
  37#define count_compact_events(item, delta) do { } while (0)
  38#endif
  39
  40#if defined CONFIG_COMPACTION || defined CONFIG_CMA
  41
  42#define CREATE_TRACE_POINTS
  43#include <trace/events/compaction.h>
  44
  45#define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
  46#define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
  47#define pageblock_start_pfn(pfn)	block_start_pfn(pfn, pageblock_order)
  48#define pageblock_end_pfn(pfn)		block_end_pfn(pfn, pageblock_order)
  49
  50static unsigned long release_freepages(struct list_head *freelist)
  51{
  52	struct page *page, *next;
  53	unsigned long high_pfn = 0;
  54
  55	list_for_each_entry_safe(page, next, freelist, lru) {
  56		unsigned long pfn = page_to_pfn(page);
  57		list_del(&page->lru);
  58		__free_page(page);
  59		if (pfn > high_pfn)
  60			high_pfn = pfn;
  61	}
  62
  63	return high_pfn;
  64}
  65
  66static void map_pages(struct list_head *list)
  67{
  68	unsigned int i, order, nr_pages;
  69	struct page *page, *next;
  70	LIST_HEAD(tmp_list);
  71
  72	list_for_each_entry_safe(page, next, list, lru) {
  73		list_del(&page->lru);
  74
  75		order = page_private(page);
  76		nr_pages = 1 << order;
  77
  78		post_alloc_hook(page, order, __GFP_MOVABLE);
  79		if (order)
  80			split_page(page, order);
  81
  82		for (i = 0; i < nr_pages; i++) {
  83			list_add(&page->lru, &tmp_list);
  84			page++;
  85		}
  86	}
  87
  88	list_splice(&tmp_list, list);
  89}
  90
  91static inline bool migrate_async_suitable(int migratetype)
  92{
  93	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  94}
  95
  96#ifdef CONFIG_COMPACTION
  97
  98int PageMovable(struct page *page)
  99{
 100	struct address_space *mapping;
 101
 102	VM_BUG_ON_PAGE(!PageLocked(page), page);
 103	if (!__PageMovable(page))
 104		return 0;
 105
 106	mapping = page_mapping(page);
 107	if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
 108		return 1;
 109
 110	return 0;
 111}
 112EXPORT_SYMBOL(PageMovable);
 113
 114void __SetPageMovable(struct page *page, struct address_space *mapping)
 115{
 116	VM_BUG_ON_PAGE(!PageLocked(page), page);
 117	VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
 118	page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
 119}
 120EXPORT_SYMBOL(__SetPageMovable);
 121
 122void __ClearPageMovable(struct page *page)
 123{
 124	VM_BUG_ON_PAGE(!PageLocked(page), page);
 125	VM_BUG_ON_PAGE(!PageMovable(page), page);
 126	/*
 127	 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
 128	 * flag so that VM can catch up released page by driver after isolation.
 129	 * With it, VM migration doesn't try to put it back.
 130	 */
 131	page->mapping = (void *)((unsigned long)page->mapping &
 132				PAGE_MAPPING_MOVABLE);
 133}
 134EXPORT_SYMBOL(__ClearPageMovable);
 135
 136/* Do not skip compaction more than 64 times */
 137#define COMPACT_MAX_DEFER_SHIFT 6
 138
 139/*
 140 * Compaction is deferred when compaction fails to result in a page
 141 * allocation success. 1 << compact_defer_limit compactions are skipped up
 142 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
 143 */
 144void defer_compaction(struct zone *zone, int order)
 145{
 146	zone->compact_considered = 0;
 147	zone->compact_defer_shift++;
 148
 149	if (order < zone->compact_order_failed)
 150		zone->compact_order_failed = order;
 151
 152	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
 153		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
 154
 155	trace_mm_compaction_defer_compaction(zone, order);
 156}
 157
 158/* Returns true if compaction should be skipped this time */
 159bool compaction_deferred(struct zone *zone, int order)
 160{
 161	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
 162
 163	if (order < zone->compact_order_failed)
 164		return false;
 165
 166	/* Avoid possible overflow */
 167	if (++zone->compact_considered > defer_limit)
 168		zone->compact_considered = defer_limit;
 169
 170	if (zone->compact_considered >= defer_limit)
 171		return false;
 172
 173	trace_mm_compaction_deferred(zone, order);
 174
 175	return true;
 176}
 177
 178/*
 179 * Update defer tracking counters after successful compaction of given order,
 180 * which means an allocation either succeeded (alloc_success == true) or is
 181 * expected to succeed.
 182 */
 183void compaction_defer_reset(struct zone *zone, int order,
 184		bool alloc_success)
 185{
 186	if (alloc_success) {
 187		zone->compact_considered = 0;
 188		zone->compact_defer_shift = 0;
 189	}
 190	if (order >= zone->compact_order_failed)
 191		zone->compact_order_failed = order + 1;
 192
 193	trace_mm_compaction_defer_reset(zone, order);
 194}
 195
 196/* Returns true if restarting compaction after many failures */
 197bool compaction_restarting(struct zone *zone, int order)
 198{
 199	if (order < zone->compact_order_failed)
 200		return false;
 201
 202	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
 203		zone->compact_considered >= 1UL << zone->compact_defer_shift;
 204}
 205
 206/* Returns true if the pageblock should be scanned for pages to isolate. */
 207static inline bool isolation_suitable(struct compact_control *cc,
 208					struct page *page)
 209{
 210	if (cc->ignore_skip_hint)
 211		return true;
 212
 213	return !get_pageblock_skip(page);
 214}
 215
 216static void reset_cached_positions(struct zone *zone)
 217{
 218	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
 219	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
 220	zone->compact_cached_free_pfn =
 221				pageblock_start_pfn(zone_end_pfn(zone) - 1);
 222}
 223
 224/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 225 * This function is called to clear all cached information on pageblocks that
 226 * should be skipped for page isolation when the migrate and free page scanner
 227 * meet.
 228 */
 229static void __reset_isolation_suitable(struct zone *zone)
 230{
 231	unsigned long start_pfn = zone->zone_start_pfn;
 232	unsigned long end_pfn = zone_end_pfn(zone);
 233	unsigned long pfn;
 
 
 
 
 
 
 234
 235	zone->compact_blockskip_flush = false;
 236
 237	/* Walk the zone and mark every pageblock as suitable for isolation */
 238	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 239		struct page *page;
 240
 
 
 
 
 241		cond_resched();
 242
 243		if (!pfn_valid(pfn))
 244			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245
 246		page = pfn_to_page(pfn);
 247		if (zone != page_zone(page))
 248			continue;
 249
 250		clear_pageblock_skip(page);
 251	}
 252
 253	reset_cached_positions(zone);
 254}
 255
 256void reset_isolation_suitable(pg_data_t *pgdat)
 257{
 258	int zoneid;
 259
 260	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 261		struct zone *zone = &pgdat->node_zones[zoneid];
 262		if (!populated_zone(zone))
 263			continue;
 264
 265		/* Only flush if a full compaction finished recently */
 266		if (zone->compact_blockskip_flush)
 267			__reset_isolation_suitable(zone);
 268	}
 269}
 270
 271/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 272 * If no pages were isolated then mark this pageblock to be skipped in the
 273 * future. The information is later cleared by __reset_isolation_suitable().
 274 */
 275static void update_pageblock_skip(struct compact_control *cc,
 276			struct page *page, unsigned long nr_isolated,
 277			bool migrate_scanner)
 278{
 279	struct zone *zone = cc->zone;
 280	unsigned long pfn;
 281
 282	if (cc->ignore_skip_hint)
 283		return;
 284
 285	if (!page)
 286		return;
 287
 288	if (nr_isolated)
 289		return;
 290
 291	set_pageblock_skip(page);
 292
 293	pfn = page_to_pfn(page);
 294
 295	/* Update where async and sync compaction should restart */
 296	if (migrate_scanner) {
 297		if (pfn > zone->compact_cached_migrate_pfn[0])
 298			zone->compact_cached_migrate_pfn[0] = pfn;
 299		if (cc->mode != MIGRATE_ASYNC &&
 300		    pfn > zone->compact_cached_migrate_pfn[1])
 301			zone->compact_cached_migrate_pfn[1] = pfn;
 302	} else {
 303		if (pfn < zone->compact_cached_free_pfn)
 304			zone->compact_cached_free_pfn = pfn;
 305	}
 306}
 307#else
 308static inline bool isolation_suitable(struct compact_control *cc,
 309					struct page *page)
 310{
 311	return true;
 312}
 313
 314static void update_pageblock_skip(struct compact_control *cc,
 315			struct page *page, unsigned long nr_isolated,
 316			bool migrate_scanner)
 
 
 
 
 
 
 
 
 
 
 
 
 
 317{
 
 318}
 319#endif /* CONFIG_COMPACTION */
 320
 321/*
 322 * Compaction requires the taking of some coarse locks that are potentially
 323 * very heavily contended. For async compaction, back out if the lock cannot
 324 * be taken immediately. For sync compaction, spin on the lock if needed.
 
 
 325 *
 326 * Returns true if the lock is held
 327 * Returns false if the lock is not held and compaction should abort
 328 */
 329static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
 330						struct compact_control *cc)
 331{
 332	if (cc->mode == MIGRATE_ASYNC) {
 333		if (!spin_trylock_irqsave(lock, *flags)) {
 334			cc->contended = true;
 335			return false;
 336		}
 337	} else {
 338		spin_lock_irqsave(lock, *flags);
 339	}
 340
 
 341	return true;
 342}
 343
 344/*
 345 * Compaction requires the taking of some coarse locks that are potentially
 346 * very heavily contended. The lock should be periodically unlocked to avoid
 347 * having disabled IRQs for a long time, even when there is nobody waiting on
 348 * the lock. It might also be that allowing the IRQs will result in
 349 * need_resched() becoming true. If scheduling is needed, async compaction
 350 * aborts. Sync compaction schedules.
 351 * Either compaction type will also abort if a fatal signal is pending.
 352 * In either case if the lock was locked, it is dropped and not regained.
 353 *
 354 * Returns true if compaction should abort due to fatal signal pending, or
 355 *		async compaction due to need_resched()
 356 * Returns false when compaction can continue (sync compaction might have
 357 *		scheduled)
 358 */
 359static bool compact_unlock_should_abort(spinlock_t *lock,
 360		unsigned long flags, bool *locked, struct compact_control *cc)
 361{
 362	if (*locked) {
 363		spin_unlock_irqrestore(lock, flags);
 364		*locked = false;
 365	}
 366
 367	if (fatal_signal_pending(current)) {
 368		cc->contended = true;
 369		return true;
 370	}
 371
 372	if (need_resched()) {
 373		if (cc->mode == MIGRATE_ASYNC) {
 374			cc->contended = true;
 375			return true;
 376		}
 377		cond_resched();
 378	}
 379
 380	return false;
 381}
 382
 383/*
 384 * Aside from avoiding lock contention, compaction also periodically checks
 385 * need_resched() and either schedules in sync compaction or aborts async
 386 * compaction. This is similar to what compact_unlock_should_abort() does, but
 387 * is used where no lock is concerned.
 388 *
 389 * Returns false when no scheduling was needed, or sync compaction scheduled.
 390 * Returns true when async compaction should abort.
 391 */
 392static inline bool compact_should_abort(struct compact_control *cc)
 393{
 394	/* async compaction aborts if contended */
 395	if (need_resched()) {
 396		if (cc->mode == MIGRATE_ASYNC) {
 397			cc->contended = true;
 398			return true;
 399		}
 400
 401		cond_resched();
 402	}
 403
 404	return false;
 405}
 406
 407/*
 408 * Isolate free pages onto a private freelist. If @strict is true, will abort
 409 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 410 * (even though it may still end up isolating some pages).
 411 */
 412static unsigned long isolate_freepages_block(struct compact_control *cc,
 413				unsigned long *start_pfn,
 414				unsigned long end_pfn,
 415				struct list_head *freelist,
 
 416				bool strict)
 417{
 418	int nr_scanned = 0, total_isolated = 0;
 419	struct page *cursor, *valid_page = NULL;
 420	unsigned long flags = 0;
 421	bool locked = false;
 422	unsigned long blockpfn = *start_pfn;
 423	unsigned int order;
 424
 
 
 
 
 425	cursor = pfn_to_page(blockpfn);
 426
 427	/* Isolate free pages. */
 428	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
 429		int isolated;
 430		struct page *page = cursor;
 431
 432		/*
 433		 * Periodically drop the lock (if held) regardless of its
 434		 * contention, to give chance to IRQs. Abort if fatal signal
 435		 * pending or async compaction detects need_resched()
 436		 */
 437		if (!(blockpfn % SWAP_CLUSTER_MAX)
 438		    && compact_unlock_should_abort(&cc->zone->lock, flags,
 439								&locked, cc))
 440			break;
 441
 442		nr_scanned++;
 443		if (!pfn_valid_within(blockpfn))
 444			goto isolate_fail;
 445
 446		if (!valid_page)
 447			valid_page = page;
 448
 449		/*
 450		 * For compound pages such as THP and hugetlbfs, we can save
 451		 * potentially a lot of iterations if we skip them at once.
 452		 * The check is racy, but we can consider only valid values
 453		 * and the only danger is skipping too much.
 454		 */
 455		if (PageCompound(page)) {
 456			unsigned int comp_order = compound_order(page);
 457
 458			if (likely(comp_order < MAX_ORDER)) {
 459				blockpfn += (1UL << comp_order) - 1;
 460				cursor += (1UL << comp_order) - 1;
 461			}
 462
 463			goto isolate_fail;
 464		}
 465
 466		if (!PageBuddy(page))
 467			goto isolate_fail;
 468
 469		/*
 470		 * If we already hold the lock, we can skip some rechecking.
 471		 * Note that if we hold the lock now, checked_pageblock was
 472		 * already set in some previous iteration (or strict is true),
 473		 * so it is correct to skip the suitable migration target
 474		 * recheck as well.
 475		 */
 476		if (!locked) {
 477			/*
 478			 * The zone lock must be held to isolate freepages.
 479			 * Unfortunately this is a very coarse lock and can be
 480			 * heavily contended if there are parallel allocations
 481			 * or parallel compactions. For async compaction do not
 482			 * spin on the lock and we acquire the lock as late as
 483			 * possible.
 484			 */
 485			locked = compact_trylock_irqsave(&cc->zone->lock,
 486								&flags, cc);
 487			if (!locked)
 488				break;
 489
 490			/* Recheck this is a buddy page under lock */
 491			if (!PageBuddy(page))
 492				goto isolate_fail;
 493		}
 494
 495		/* Found a free page, will break it into order-0 pages */
 496		order = page_order(page);
 497		isolated = __isolate_free_page(page, order);
 498		if (!isolated)
 499			break;
 500		set_page_private(page, order);
 501
 502		total_isolated += isolated;
 503		cc->nr_freepages += isolated;
 504		list_add_tail(&page->lru, freelist);
 505
 506		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
 507			blockpfn += isolated;
 508			break;
 509		}
 510		/* Advance to the end of split page */
 511		blockpfn += isolated - 1;
 512		cursor += isolated - 1;
 513		continue;
 514
 515isolate_fail:
 516		if (strict)
 517			break;
 518		else
 519			continue;
 520
 521	}
 522
 523	if (locked)
 524		spin_unlock_irqrestore(&cc->zone->lock, flags);
 525
 526	/*
 527	 * There is a tiny chance that we have read bogus compound_order(),
 528	 * so be careful to not go outside of the pageblock.
 529	 */
 530	if (unlikely(blockpfn > end_pfn))
 531		blockpfn = end_pfn;
 532
 533	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
 534					nr_scanned, total_isolated);
 535
 536	/* Record how far we have got within the block */
 537	*start_pfn = blockpfn;
 538
 539	/*
 540	 * If strict isolation is requested by CMA then check that all the
 541	 * pages requested were isolated. If there were any failures, 0 is
 542	 * returned and CMA will fail.
 543	 */
 544	if (strict && blockpfn < end_pfn)
 545		total_isolated = 0;
 546
 547	/* Update the pageblock-skip if the whole pageblock was scanned */
 548	if (blockpfn == end_pfn)
 549		update_pageblock_skip(cc, valid_page, total_isolated, false);
 550
 551	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
 552	if (total_isolated)
 553		count_compact_events(COMPACTISOLATED, total_isolated);
 554	return total_isolated;
 555}
 556
 557/**
 558 * isolate_freepages_range() - isolate free pages.
 
 559 * @start_pfn: The first PFN to start isolating.
 560 * @end_pfn:   The one-past-last PFN.
 561 *
 562 * Non-free pages, invalid PFNs, or zone boundaries within the
 563 * [start_pfn, end_pfn) range are considered errors, cause function to
 564 * undo its actions and return zero.
 565 *
 566 * Otherwise, function returns one-past-the-last PFN of isolated page
 567 * (which may be greater then end_pfn if end fell in a middle of
 568 * a free page).
 569 */
 570unsigned long
 571isolate_freepages_range(struct compact_control *cc,
 572			unsigned long start_pfn, unsigned long end_pfn)
 573{
 574	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
 575	LIST_HEAD(freelist);
 576
 577	pfn = start_pfn;
 578	block_start_pfn = pageblock_start_pfn(pfn);
 579	if (block_start_pfn < cc->zone->zone_start_pfn)
 580		block_start_pfn = cc->zone->zone_start_pfn;
 581	block_end_pfn = pageblock_end_pfn(pfn);
 582
 583	for (; pfn < end_pfn; pfn += isolated,
 584				block_start_pfn = block_end_pfn,
 585				block_end_pfn += pageblock_nr_pages) {
 586		/* Protect pfn from changing by isolate_freepages_block */
 587		unsigned long isolate_start_pfn = pfn;
 588
 589		block_end_pfn = min(block_end_pfn, end_pfn);
 590
 591		/*
 592		 * pfn could pass the block_end_pfn if isolated freepage
 593		 * is more than pageblock order. In this case, we adjust
 594		 * scanning range to right one.
 595		 */
 596		if (pfn >= block_end_pfn) {
 597			block_start_pfn = pageblock_start_pfn(pfn);
 598			block_end_pfn = pageblock_end_pfn(pfn);
 599			block_end_pfn = min(block_end_pfn, end_pfn);
 600		}
 601
 602		if (!pageblock_pfn_to_page(block_start_pfn,
 603					block_end_pfn, cc->zone))
 604			break;
 605
 606		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
 607						block_end_pfn, &freelist, true);
 608
 609		/*
 610		 * In strict mode, isolate_freepages_block() returns 0 if
 611		 * there are any holes in the block (ie. invalid PFNs or
 612		 * non-free pages).
 613		 */
 614		if (!isolated)
 615			break;
 616
 617		/*
 618		 * If we managed to isolate pages, it is always (1 << n) *
 619		 * pageblock_nr_pages for some non-negative n.  (Max order
 620		 * page may span two pageblocks).
 621		 */
 622	}
 623
 624	/* __isolate_free_page() does not map the pages */
 625	map_pages(&freelist);
 626
 627	if (pfn < end_pfn) {
 628		/* Loop terminated early, cleanup. */
 629		release_freepages(&freelist);
 630		return 0;
 631	}
 632
 633	/* We don't use freelists for anything. */
 634	return pfn;
 635}
 636
 637/* Similar to reclaim, but different enough that they don't share logic */
 638static bool too_many_isolated(struct zone *zone)
 639{
 640	unsigned long active, inactive, isolated;
 641
 642	inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
 643			node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
 644	active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
 645			node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
 646	isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
 647			node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
 648
 649	return isolated > (inactive + active) / 2;
 650}
 651
 652/**
 653 * isolate_migratepages_block() - isolate all migrate-able pages within
 654 *				  a single pageblock
 655 * @cc:		Compaction control structure.
 656 * @low_pfn:	The first PFN to isolate
 657 * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
 658 * @isolate_mode: Isolation mode to be used.
 659 *
 660 * Isolate all pages that can be migrated from the range specified by
 661 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 662 * Returns zero if there is a fatal signal pending, otherwise PFN of the
 663 * first page that was not scanned (which may be both less, equal to or more
 664 * than end_pfn).
 665 *
 666 * The pages are isolated on cc->migratepages list (not required to be empty),
 667 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
 668 * is neither read nor updated.
 669 */
 670static unsigned long
 671isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 672			unsigned long end_pfn, isolate_mode_t isolate_mode)
 673{
 674	struct zone *zone = cc->zone;
 675	unsigned long nr_scanned = 0, nr_isolated = 0;
 676	struct lruvec *lruvec;
 677	unsigned long flags = 0;
 678	bool locked = false;
 679	struct page *page = NULL, *valid_page = NULL;
 680	unsigned long start_pfn = low_pfn;
 681	bool skip_on_failure = false;
 682	unsigned long next_skip_pfn = 0;
 
 683
 684	/*
 685	 * Ensure that there are not too many pages isolated from the LRU
 686	 * list by either parallel reclaimers or compaction. If there are,
 687	 * delay for some time until fewer pages are isolated
 688	 */
 689	while (unlikely(too_many_isolated(zone))) {
 690		/* async migration should just abort */
 691		if (cc->mode == MIGRATE_ASYNC)
 692			return 0;
 693
 694		congestion_wait(BLK_RW_ASYNC, HZ/10);
 695
 696		if (fatal_signal_pending(current))
 697			return 0;
 698	}
 699
 700	if (compact_should_abort(cc))
 701		return 0;
 702
 703	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
 704		skip_on_failure = true;
 705		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 706	}
 707
 708	/* Time to isolate some pages for migration */
 709	for (; low_pfn < end_pfn; low_pfn++) {
 710
 711		if (skip_on_failure && low_pfn >= next_skip_pfn) {
 712			/*
 713			 * We have isolated all migration candidates in the
 714			 * previous order-aligned block, and did not skip it due
 715			 * to failure. We should migrate the pages now and
 716			 * hopefully succeed compaction.
 717			 */
 718			if (nr_isolated)
 719				break;
 720
 721			/*
 722			 * We failed to isolate in the previous order-aligned
 723			 * block. Set the new boundary to the end of the
 724			 * current block. Note we can't simply increase
 725			 * next_skip_pfn by 1 << order, as low_pfn might have
 726			 * been incremented by a higher number due to skipping
 727			 * a compound or a high-order buddy page in the
 728			 * previous loop iteration.
 729			 */
 730			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
 731		}
 732
 733		/*
 734		 * Periodically drop the lock (if held) regardless of its
 735		 * contention, to give chance to IRQs. Abort async compaction
 736		 * if contended.
 737		 */
 738		if (!(low_pfn % SWAP_CLUSTER_MAX)
 739		    && compact_unlock_should_abort(zone_lru_lock(zone), flags,
 740								&locked, cc))
 741			break;
 
 
 742
 743		if (!pfn_valid_within(low_pfn))
 744			goto isolate_fail;
 745		nr_scanned++;
 746
 747		page = pfn_to_page(low_pfn);
 748
 749		if (!valid_page)
 
 
 
 
 
 
 
 
 
 
 750			valid_page = page;
 
 751
 752		/*
 753		 * Skip if free. We read page order here without zone lock
 754		 * which is generally unsafe, but the race window is small and
 755		 * the worst thing that can happen is that we skip some
 756		 * potential isolation targets.
 757		 */
 758		if (PageBuddy(page)) {
 759			unsigned long freepage_order = page_order_unsafe(page);
 760
 761			/*
 762			 * Without lock, we cannot be sure that what we got is
 763			 * a valid page order. Consider only values in the
 764			 * valid order range to prevent low_pfn overflow.
 765			 */
 766			if (freepage_order > 0 && freepage_order < MAX_ORDER)
 767				low_pfn += (1UL << freepage_order) - 1;
 768			continue;
 769		}
 770
 771		/*
 772		 * Regardless of being on LRU, compound pages such as THP and
 773		 * hugetlbfs are not to be compacted. We can potentially save
 774		 * a lot of iterations if we skip them at once. The check is
 775		 * racy, but we can consider only valid values and the only
 776		 * danger is skipping too much.
 777		 */
 778		if (PageCompound(page)) {
 779			unsigned int comp_order = compound_order(page);
 780
 781			if (likely(comp_order < MAX_ORDER))
 782				low_pfn += (1UL << comp_order) - 1;
 783
 
 
 784			goto isolate_fail;
 785		}
 786
 787		/*
 788		 * Check may be lockless but that's ok as we recheck later.
 789		 * It's possible to migrate LRU and non-lru movable pages.
 790		 * Skip any other type of page
 791		 */
 792		if (!PageLRU(page)) {
 793			/*
 794			 * __PageMovable can return false positive so we need
 795			 * to verify it under page_lock.
 796			 */
 797			if (unlikely(__PageMovable(page)) &&
 798					!PageIsolated(page)) {
 799				if (locked) {
 800					spin_unlock_irqrestore(zone_lru_lock(zone),
 801									flags);
 802					locked = false;
 803				}
 804
 805				if (isolate_movable_page(page, isolate_mode))
 806					goto isolate_success;
 807			}
 808
 809			goto isolate_fail;
 810		}
 811
 812		/*
 813		 * Migration will fail if an anonymous page is pinned in memory,
 814		 * so avoid taking lru_lock and isolating it unnecessarily in an
 815		 * admittedly racy check.
 816		 */
 817		if (!page_mapping(page) &&
 818		    page_count(page) > page_mapcount(page))
 819			goto isolate_fail;
 820
 821		/*
 822		 * Only allow to migrate anonymous pages in GFP_NOFS context
 823		 * because those do not depend on fs locks.
 824		 */
 825		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
 826			goto isolate_fail;
 827
 828		/* If we already hold the lock, we can skip some rechecking */
 829		if (!locked) {
 830			locked = compact_trylock_irqsave(zone_lru_lock(zone),
 831								&flags, cc);
 832			if (!locked)
 833				break;
 
 
 
 
 
 834
 835			/* Recheck PageLRU and PageCompound under lock */
 836			if (!PageLRU(page))
 837				goto isolate_fail;
 838
 839			/*
 840			 * Page become compound since the non-locked check,
 841			 * and it's on LRU. It can only be a THP so the order
 842			 * is safe to read and it's 0 for tail pages.
 843			 */
 844			if (unlikely(PageCompound(page))) {
 845				low_pfn += (1UL << compound_order(page)) - 1;
 846				goto isolate_fail;
 847			}
 848		}
 849
 850		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
 851
 852		/* Try isolate the page */
 853		if (__isolate_lru_page(page, isolate_mode) != 0)
 854			goto isolate_fail;
 855
 856		VM_BUG_ON_PAGE(PageCompound(page), page);
 857
 858		/* Successfully isolated */
 859		del_page_from_lru_list(page, lruvec, page_lru(page));
 860		inc_node_page_state(page,
 861				NR_ISOLATED_ANON + page_is_file_cache(page));
 862
 863isolate_success:
 864		list_add(&page->lru, &cc->migratepages);
 865		cc->nr_migratepages++;
 866		nr_isolated++;
 867
 868		/*
 869		 * Record where we could have freed pages by migration and not
 870		 * yet flushed them to buddy allocator.
 871		 * - this is the lowest page that was isolated and likely be
 872		 * then freed by migration.
 873		 */
 874		if (!cc->last_migrated_pfn)
 875			cc->last_migrated_pfn = low_pfn;
 876
 877		/* Avoid isolating too much */
 878		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
 879			++low_pfn;
 880			break;
 881		}
 882
 883		continue;
 884isolate_fail:
 885		if (!skip_on_failure)
 886			continue;
 887
 888		/*
 889		 * We have isolated some pages, but then failed. Release them
 890		 * instead of migrating, as we cannot form the cc->order buddy
 891		 * page anyway.
 892		 */
 893		if (nr_isolated) {
 894			if (locked) {
 895				spin_unlock_irqrestore(zone_lru_lock(zone), flags);
 896				locked = false;
 897			}
 898			putback_movable_pages(&cc->migratepages);
 899			cc->nr_migratepages = 0;
 900			cc->last_migrated_pfn = 0;
 901			nr_isolated = 0;
 902		}
 903
 904		if (low_pfn < next_skip_pfn) {
 905			low_pfn = next_skip_pfn - 1;
 906			/*
 907			 * The check near the loop beginning would have updated
 908			 * next_skip_pfn too, but this is a bit simpler.
 909			 */
 910			next_skip_pfn += 1UL << cc->order;
 911		}
 912	}
 913
 914	/*
 915	 * The PageBuddy() check could have potentially brought us outside
 916	 * the range to be scanned.
 917	 */
 918	if (unlikely(low_pfn > end_pfn))
 919		low_pfn = end_pfn;
 920
 
 921	if (locked)
 922		spin_unlock_irqrestore(zone_lru_lock(zone), flags);
 923
 924	/*
 925	 * Update the pageblock-skip information and cached scanner pfn,
 926	 * if the whole pageblock was scanned without isolating any page.
 927	 */
 928	if (low_pfn == end_pfn)
 929		update_pageblock_skip(cc, valid_page, nr_isolated, true);
 
 
 
 
 
 
 
 930
 931	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
 932						nr_scanned, nr_isolated);
 933
 934	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
 
 935	if (nr_isolated)
 936		count_compact_events(COMPACTISOLATED, nr_isolated);
 937
 938	return low_pfn;
 939}
 940
 941/**
 942 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
 943 * @cc:        Compaction control structure.
 944 * @start_pfn: The first PFN to start isolating.
 945 * @end_pfn:   The one-past-last PFN.
 946 *
 947 * Returns zero if isolation fails fatally due to e.g. pending signal.
 948 * Otherwise, function returns one-past-the-last PFN of isolated page
 949 * (which may be greater than end_pfn if end fell in a middle of a THP page).
 950 */
 951unsigned long
 952isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
 953							unsigned long end_pfn)
 954{
 955	unsigned long pfn, block_start_pfn, block_end_pfn;
 956
 957	/* Scan block by block. First and last block may be incomplete */
 958	pfn = start_pfn;
 959	block_start_pfn = pageblock_start_pfn(pfn);
 960	if (block_start_pfn < cc->zone->zone_start_pfn)
 961		block_start_pfn = cc->zone->zone_start_pfn;
 962	block_end_pfn = pageblock_end_pfn(pfn);
 963
 964	for (; pfn < end_pfn; pfn = block_end_pfn,
 965				block_start_pfn = block_end_pfn,
 966				block_end_pfn += pageblock_nr_pages) {
 967
 968		block_end_pfn = min(block_end_pfn, end_pfn);
 969
 970		if (!pageblock_pfn_to_page(block_start_pfn,
 971					block_end_pfn, cc->zone))
 972			continue;
 973
 974		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
 975							ISOLATE_UNEVICTABLE);
 976
 977		if (!pfn)
 978			break;
 979
 980		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
 981			break;
 982	}
 983
 984	return pfn;
 985}
 986
 987#endif /* CONFIG_COMPACTION || CONFIG_CMA */
 988#ifdef CONFIG_COMPACTION
 989
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 990/* Returns true if the page is within a block suitable for migration to */
 991static bool suitable_migration_target(struct compact_control *cc,
 992							struct page *page)
 993{
 994	if (cc->ignore_block_suitable)
 995		return true;
 996
 997	/* If the page is a large free page, then disallow migration */
 998	if (PageBuddy(page)) {
 999		/*
1000		 * We are checking page_order without zone->lock taken. But
1001		 * the only small danger is that we skip a potentially suitable
1002		 * pageblock, so it's not worth to check order for valid range.
1003		 */
1004		if (page_order_unsafe(page) >= pageblock_order)
1005			return false;
1006	}
1007
 
 
 
1008	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1009	if (migrate_async_suitable(get_pageblock_migratetype(page)))
1010		return true;
1011
1012	/* Otherwise skip the block */
1013	return false;
1014}
1015
 
 
 
 
 
 
 
 
1016/*
1017 * Test whether the free scanner has reached the same or lower pageblock than
1018 * the migration scanner, and compaction should thus terminate.
1019 */
1020static inline bool compact_scanners_met(struct compact_control *cc)
1021{
1022	return (cc->free_pfn >> pageblock_order)
1023		<= (cc->migrate_pfn >> pageblock_order);
1024}
1025
1026/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1027 * Based on information in the current compact_control, find blocks
1028 * suitable for isolating free pages from and then isolate them.
1029 */
1030static void isolate_freepages(struct compact_control *cc)
1031{
1032	struct zone *zone = cc->zone;
1033	struct page *page;
1034	unsigned long block_start_pfn;	/* start of current pageblock */
1035	unsigned long isolate_start_pfn; /* exact pfn we start at */
1036	unsigned long block_end_pfn;	/* end of current pageblock */
1037	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
1038	struct list_head *freelist = &cc->freepages;
 
 
 
 
 
 
1039
1040	/*
1041	 * Initialise the free scanner. The starting point is where we last
1042	 * successfully isolated from, zone-cached value, or the end of the
1043	 * zone when isolating for the first time. For looping we also need
1044	 * this pfn aligned down to the pageblock boundary, because we do
1045	 * block_start_pfn -= pageblock_nr_pages in the for loop.
1046	 * For ending point, take care when isolating in last pageblock of a
1047	 * a zone which ends in the middle of a pageblock.
1048	 * The low boundary is the end of the pageblock the migration scanner
1049	 * is using.
1050	 */
1051	isolate_start_pfn = cc->free_pfn;
1052	block_start_pfn = pageblock_start_pfn(cc->free_pfn);
1053	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1054						zone_end_pfn(zone));
1055	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
 
1056
1057	/*
1058	 * Isolate free pages until enough are available to migrate the
1059	 * pages on cc->migratepages. We stop searching if the migrate
1060	 * and free page scanners meet or enough free pages are isolated.
1061	 */
1062	for (; block_start_pfn >= low_pfn;
1063				block_end_pfn = block_start_pfn,
1064				block_start_pfn -= pageblock_nr_pages,
1065				isolate_start_pfn = block_start_pfn) {
 
 
1066		/*
1067		 * This can iterate a massively long zone without finding any
1068		 * suitable migration targets, so periodically check if we need
1069		 * to schedule, or even abort async compaction.
1070		 */
1071		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1072						&& compact_should_abort(cc))
1073			break;
1074
1075		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1076									zone);
1077		if (!page)
1078			continue;
1079
1080		/* Check the block is suitable for migration */
1081		if (!suitable_migration_target(cc, page))
1082			continue;
1083
1084		/* If isolation recently failed, do not retry */
1085		if (!isolation_suitable(cc, page))
1086			continue;
1087
1088		/* Found a block suitable for isolating free pages from. */
1089		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1090					freelist, false);
1091
1092		/*
1093		 * If we isolated enough freepages, or aborted due to lock
1094		 * contention, terminate.
1095		 */
1096		if ((cc->nr_freepages >= cc->nr_migratepages)
1097							|| cc->contended) {
1098			if (isolate_start_pfn >= block_end_pfn) {
1099				/*
1100				 * Restart at previous pageblock if more
1101				 * freepages can be isolated next time.
1102				 */
1103				isolate_start_pfn =
1104					block_start_pfn - pageblock_nr_pages;
1105			}
1106			break;
1107		} else if (isolate_start_pfn < block_end_pfn) {
1108			/*
1109			 * If isolation failed early, do not continue
1110			 * needlessly.
1111			 */
1112			break;
1113		}
 
 
 
 
 
 
 
1114	}
1115
1116	/* __isolate_free_page() does not map the pages */
1117	map_pages(freelist);
1118
1119	/*
1120	 * Record where the free scanner will restart next time. Either we
1121	 * broke from the loop and set isolate_start_pfn based on the last
1122	 * call to isolate_freepages_block(), or we met the migration scanner
1123	 * and the loop terminated due to isolate_start_pfn < low_pfn
1124	 */
1125	cc->free_pfn = isolate_start_pfn;
 
 
 
 
1126}
1127
1128/*
1129 * This is a migrate-callback that "allocates" freepages by taking pages
1130 * from the isolated freelists in the block we are migrating to.
1131 */
1132static struct page *compaction_alloc(struct page *migratepage,
1133					unsigned long data,
1134					int **result)
1135{
1136	struct compact_control *cc = (struct compact_control *)data;
1137	struct page *freepage;
1138
1139	/*
1140	 * Isolate free pages if necessary, and if we are not aborting due to
1141	 * contention.
1142	 */
1143	if (list_empty(&cc->freepages)) {
1144		if (!cc->contended)
1145			isolate_freepages(cc);
1146
1147		if (list_empty(&cc->freepages))
1148			return NULL;
1149	}
1150
1151	freepage = list_entry(cc->freepages.next, struct page, lru);
1152	list_del(&freepage->lru);
1153	cc->nr_freepages--;
1154
1155	return freepage;
1156}
1157
1158/*
1159 * This is a migrate-callback that "frees" freepages back to the isolated
1160 * freelist.  All pages on the freelist are from the same zone, so there is no
1161 * special handling needed for NUMA.
1162 */
1163static void compaction_free(struct page *page, unsigned long data)
1164{
1165	struct compact_control *cc = (struct compact_control *)data;
1166
1167	list_add(&page->lru, &cc->freepages);
1168	cc->nr_freepages++;
1169}
1170
1171/* possible outcome of isolate_migratepages */
1172typedef enum {
1173	ISOLATE_ABORT,		/* Abort compaction now */
1174	ISOLATE_NONE,		/* No pages isolated, continue scanning */
1175	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
1176} isolate_migrate_t;
1177
1178/*
1179 * Allow userspace to control policy on scanning the unevictable LRU for
1180 * compactable pages.
1181 */
1182int sysctl_compact_unevictable_allowed __read_mostly = 1;
1183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184/*
1185 * Isolate all pages that can be migrated from the first suitable block,
1186 * starting at the block pointed to by the migrate scanner pfn within
1187 * compact_control.
1188 */
1189static isolate_migrate_t isolate_migratepages(struct zone *zone,
1190					struct compact_control *cc)
1191{
1192	unsigned long block_start_pfn;
1193	unsigned long block_end_pfn;
1194	unsigned long low_pfn;
1195	struct page *page;
1196	const isolate_mode_t isolate_mode =
1197		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1198		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
 
1199
1200	/*
1201	 * Start at where we last stopped, or beginning of the zone as
1202	 * initialized by compact_zone()
 
1203	 */
1204	low_pfn = cc->migrate_pfn;
1205	block_start_pfn = pageblock_start_pfn(low_pfn);
1206	if (block_start_pfn < zone->zone_start_pfn)
1207		block_start_pfn = zone->zone_start_pfn;
 
 
 
 
 
 
 
1208
1209	/* Only scan within a pageblock boundary */
1210	block_end_pfn = pageblock_end_pfn(low_pfn);
1211
1212	/*
1213	 * Iterate over whole pageblocks until we find the first suitable.
1214	 * Do not cross the free scanner.
1215	 */
1216	for (; block_end_pfn <= cc->free_pfn;
 
1217			low_pfn = block_end_pfn,
1218			block_start_pfn = block_end_pfn,
1219			block_end_pfn += pageblock_nr_pages) {
1220
1221		/*
1222		 * This can potentially iterate a massively long zone with
1223		 * many pageblocks unsuitable, so periodically check if we
1224		 * need to schedule, or even abort async compaction.
1225		 */
1226		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1227						&& compact_should_abort(cc))
1228			break;
1229
1230		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1231									zone);
1232		if (!page)
1233			continue;
1234
1235		/* If isolation recently failed, do not retry */
1236		if (!isolation_suitable(cc, page))
 
 
 
 
 
 
 
1237			continue;
1238
1239		/*
1240		 * For async compaction, also only scan in MOVABLE blocks.
1241		 * Async compaction is optimistic to see if the minimum amount
1242		 * of work satisfies the allocation.
 
 
 
1243		 */
1244		if (cc->mode == MIGRATE_ASYNC &&
1245		    !migrate_async_suitable(get_pageblock_migratetype(page)))
1246			continue;
 
1247
1248		/* Perform the isolation */
1249		low_pfn = isolate_migratepages_block(cc, low_pfn,
1250						block_end_pfn, isolate_mode);
1251
1252		if (!low_pfn || cc->contended)
1253			return ISOLATE_ABORT;
1254
1255		/*
1256		 * Either we isolated something and proceed with migration. Or
1257		 * we failed and compact_zone should decide if we should
1258		 * continue or not.
1259		 */
1260		break;
1261	}
1262
1263	/* Record where migration scanner will be restarted. */
1264	cc->migrate_pfn = low_pfn;
1265
1266	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1267}
1268
1269/*
1270 * order == -1 is expected when compacting via
1271 * /proc/sys/vm/compact_memory
1272 */
1273static inline bool is_via_compact_memory(int order)
1274{
1275	return order == -1;
1276}
1277
1278static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
1279			    const int migratetype)
1280{
1281	unsigned int order;
1282	unsigned long watermark;
1283
1284	if (cc->contended || fatal_signal_pending(current))
1285		return COMPACT_CONTENDED;
1286
1287	/* Compaction run completes if the migrate and free scanner meet */
1288	if (compact_scanners_met(cc)) {
1289		/* Let the next compaction start anew. */
1290		reset_cached_positions(zone);
1291
1292		/*
1293		 * Mark that the PG_migrate_skip information should be cleared
1294		 * by kswapd when it goes to sleep. kcompactd does not set the
1295		 * flag itself as the decision to be clear should be directly
1296		 * based on an allocation request.
1297		 */
1298		if (cc->direct_compaction)
1299			zone->compact_blockskip_flush = true;
1300
1301		if (cc->whole_zone)
1302			return COMPACT_COMPLETE;
1303		else
1304			return COMPACT_PARTIAL_SKIPPED;
1305	}
1306
1307	if (is_via_compact_memory(cc->order))
1308		return COMPACT_CONTINUE;
1309
1310	/* Compaction run is not finished if the watermark is not met */
1311	watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK];
1312
1313	if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1314							cc->alloc_flags))
 
 
1315		return COMPACT_CONTINUE;
1316
1317	/* Direct compactor: Is a suitable page free? */
 
1318	for (order = cc->order; order < MAX_ORDER; order++) {
1319		struct free_area *area = &zone->free_area[order];
1320		bool can_steal;
1321
1322		/* Job done if page is free of the right migratetype */
1323		if (!list_empty(&area->free_list[migratetype]))
1324			return COMPACT_SUCCESS;
1325
1326#ifdef CONFIG_CMA
1327		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1328		if (migratetype == MIGRATE_MOVABLE &&
1329			!list_empty(&area->free_list[MIGRATE_CMA]))
1330			return COMPACT_SUCCESS;
1331#endif
1332		/*
1333		 * Job done if allocation would steal freepages from
1334		 * other migratetype buddy lists.
1335		 */
1336		if (find_suitable_fallback(area, order, migratetype,
1337						true, &can_steal) != -1)
1338			return COMPACT_SUCCESS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1339	}
1340
1341	return COMPACT_NO_SUITABLE_PAGE;
 
 
 
1342}
1343
1344static enum compact_result compact_finished(struct zone *zone,
1345			struct compact_control *cc,
1346			const int migratetype)
1347{
1348	int ret;
1349
1350	ret = __compact_finished(zone, cc, migratetype);
1351	trace_mm_compaction_finished(zone, cc->order, ret);
1352	if (ret == COMPACT_NO_SUITABLE_PAGE)
1353		ret = COMPACT_CONTINUE;
1354
1355	return ret;
1356}
1357
1358/*
1359 * compaction_suitable: Is this suitable to run compaction on this zone now?
1360 * Returns
1361 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1362 *   COMPACT_SUCCESS  - If the allocation would succeed without compaction
1363 *   COMPACT_CONTINUE - If compaction should run now
1364 */
1365static enum compact_result __compaction_suitable(struct zone *zone, int order,
1366					unsigned int alloc_flags,
1367					int classzone_idx,
1368					unsigned long wmark_target)
1369{
1370	unsigned long watermark;
1371
1372	if (is_via_compact_memory(order))
1373		return COMPACT_CONTINUE;
1374
1375	watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1376	/*
1377	 * If watermarks for high-order allocation are already met, there
1378	 * should be no need for compaction at all.
1379	 */
1380	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1381								alloc_flags))
1382		return COMPACT_SUCCESS;
1383
1384	/*
1385	 * Watermarks for order-0 must be met for compaction to be able to
1386	 * isolate free pages for migration targets. This means that the
1387	 * watermark and alloc_flags have to match, or be more pessimistic than
1388	 * the check in __isolate_free_page(). We don't use the direct
1389	 * compactor's alloc_flags, as they are not relevant for freepage
1390	 * isolation. We however do use the direct compactor's classzone_idx to
1391	 * skip over zones where lowmem reserves would prevent allocation even
1392	 * if compaction succeeds.
1393	 * For costly orders, we require low watermark instead of min for
1394	 * compaction to proceed to increase its chances.
1395	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1396	 * suitable migration targets
1397	 */
1398	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1399				low_wmark_pages(zone) : min_wmark_pages(zone);
1400	watermark += compact_gap(order);
1401	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1402						ALLOC_CMA, wmark_target))
1403		return COMPACT_SKIPPED;
1404
1405	return COMPACT_CONTINUE;
1406}
1407
1408enum compact_result compaction_suitable(struct zone *zone, int order,
1409					unsigned int alloc_flags,
1410					int classzone_idx)
1411{
1412	enum compact_result ret;
1413	int fragindex;
1414
1415	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1416				    zone_page_state(zone, NR_FREE_PAGES));
1417	/*
1418	 * fragmentation index determines if allocation failures are due to
1419	 * low memory or external fragmentation
1420	 *
1421	 * index of -1000 would imply allocations might succeed depending on
1422	 * watermarks, but we already failed the high-order watermark check
1423	 * index towards 0 implies failure is due to lack of memory
1424	 * index towards 1000 implies failure is due to fragmentation
1425	 *
1426	 * Only compact if a failure would be due to fragmentation. Also
1427	 * ignore fragindex for non-costly orders where the alternative to
1428	 * a successful reclaim/compaction is OOM. Fragindex and the
1429	 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
1430	 * excessive compaction for costly orders, but it should not be at the
1431	 * expense of system stability.
1432	 */
1433	if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
1434		fragindex = fragmentation_index(zone, order);
1435		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1436			ret = COMPACT_NOT_SUITABLE_ZONE;
1437	}
1438
1439	trace_mm_compaction_suitable(zone, order, ret);
1440	if (ret == COMPACT_NOT_SUITABLE_ZONE)
1441		ret = COMPACT_SKIPPED;
1442
1443	return ret;
1444}
1445
1446bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1447		int alloc_flags)
1448{
1449	struct zone *zone;
1450	struct zoneref *z;
1451
1452	/*
1453	 * Make sure at least one zone would pass __compaction_suitable if we continue
1454	 * retrying the reclaim.
1455	 */
1456	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1457					ac->nodemask) {
1458		unsigned long available;
1459		enum compact_result compact_result;
1460
1461		/*
1462		 * Do not consider all the reclaimable memory because we do not
1463		 * want to trash just for a single high order allocation which
1464		 * is even not guaranteed to appear even if __compaction_suitable
1465		 * is happy about the watermark check.
1466		 */
1467		available = zone_reclaimable_pages(zone) / order;
1468		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1469		compact_result = __compaction_suitable(zone, order, alloc_flags,
1470				ac_classzone_idx(ac), available);
1471		if (compact_result != COMPACT_SKIPPED)
1472			return true;
1473	}
1474
1475	return false;
1476}
1477
1478static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
 
1479{
1480	enum compact_result ret;
1481	unsigned long start_pfn = zone->zone_start_pfn;
1482	unsigned long end_pfn = zone_end_pfn(zone);
1483	const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1484	const bool sync = cc->mode != MIGRATE_ASYNC;
 
 
 
 
 
 
 
 
 
 
 
 
1485
1486	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
 
1487							cc->classzone_idx);
1488	/* Compaction is likely to fail */
1489	if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
1490		return ret;
1491
1492	/* huh, compaction_suitable is returning something unexpected */
1493	VM_BUG_ON(ret != COMPACT_CONTINUE);
1494
1495	/*
1496	 * Clear pageblock skip if there were failures recently and compaction
1497	 * is about to be retried after being deferred.
1498	 */
1499	if (compaction_restarting(zone, cc->order))
1500		__reset_isolation_suitable(zone);
1501
1502	/*
1503	 * Setup to move all movable pages to the end of the zone. Used cached
1504	 * information on where the scanners should start (unless we explicitly
1505	 * want to compact the whole zone), but check that it is initialised
1506	 * by ensuring the values are within zone boundaries.
1507	 */
 
1508	if (cc->whole_zone) {
1509		cc->migrate_pfn = start_pfn;
1510		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1511	} else {
1512		cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1513		cc->free_pfn = zone->compact_cached_free_pfn;
1514		if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1515			cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1516			zone->compact_cached_free_pfn = cc->free_pfn;
1517		}
1518		if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1519			cc->migrate_pfn = start_pfn;
1520			zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1521			zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1522		}
1523
1524		if (cc->migrate_pfn == start_pfn)
1525			cc->whole_zone = true;
1526	}
1527
1528	cc->last_migrated_pfn = 0;
 
 
 
 
 
 
 
 
 
 
 
1529
1530	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1531				cc->free_pfn, end_pfn, sync);
1532
1533	migrate_prep_local();
1534
1535	while ((ret = compact_finished(zone, cc, migratetype)) ==
1536						COMPACT_CONTINUE) {
1537		int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
1539		switch (isolate_migratepages(zone, cc)) {
1540		case ISOLATE_ABORT:
1541			ret = COMPACT_CONTENDED;
1542			putback_movable_pages(&cc->migratepages);
1543			cc->nr_migratepages = 0;
 
1544			goto out;
1545		case ISOLATE_NONE:
 
 
 
 
 
1546			/*
1547			 * We haven't isolated and migrated anything, but
1548			 * there might still be unflushed migrations from
1549			 * previous cc->order aligned block.
1550			 */
1551			goto check_drain;
1552		case ISOLATE_SUCCESS:
 
 
1553			;
1554		}
1555
1556		err = migrate_pages(&cc->migratepages, compaction_alloc,
1557				compaction_free, (unsigned long)cc, cc->mode,
1558				MR_COMPACTION);
1559
1560		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1561							&cc->migratepages);
1562
1563		/* All pages were either migrated or will be released */
1564		cc->nr_migratepages = 0;
1565		if (err) {
1566			putback_movable_pages(&cc->migratepages);
1567			/*
1568			 * migrate_pages() may return -ENOMEM when scanners meet
1569			 * and we want compact_finished() to detect it
1570			 */
1571			if (err == -ENOMEM && !compact_scanners_met(cc)) {
1572				ret = COMPACT_CONTENDED;
1573				goto out;
1574			}
1575			/*
1576			 * We failed to migrate at least one page in the current
1577			 * order-aligned block, so skip the rest of it.
1578			 */
1579			if (cc->direct_compaction &&
1580						(cc->mode == MIGRATE_ASYNC)) {
1581				cc->migrate_pfn = block_end_pfn(
1582						cc->migrate_pfn - 1, cc->order);
1583				/* Draining pcplists is useless in this case */
1584				cc->last_migrated_pfn = 0;
1585
1586			}
1587		}
1588
1589check_drain:
1590		/*
1591		 * Has the migration scanner moved away from the previous
1592		 * cc->order aligned block where we migrated from? If yes,
1593		 * flush the pages that were freed, so that they can merge and
1594		 * compact_finished() can detect immediately if allocation
1595		 * would succeed.
1596		 */
1597		if (cc->order > 0 && cc->last_migrated_pfn) {
1598			int cpu;
1599			unsigned long current_block_start =
1600				block_start_pfn(cc->migrate_pfn, cc->order);
1601
1602			if (cc->last_migrated_pfn < current_block_start) {
1603				cpu = get_cpu();
1604				lru_add_drain_cpu(cpu);
1605				drain_local_pages(zone);
1606				put_cpu();
1607				/* No more flushing until we migrate again */
1608				cc->last_migrated_pfn = 0;
1609			}
1610		}
1611
 
 
 
 
 
1612	}
1613
1614out:
1615	/*
1616	 * Release free pages and update where the free scanner should restart,
1617	 * so we don't leave any returned pages behind in the next attempt.
1618	 */
1619	if (cc->nr_freepages > 0) {
1620		unsigned long free_pfn = release_freepages(&cc->freepages);
1621
1622		cc->nr_freepages = 0;
1623		VM_BUG_ON(free_pfn == 0);
1624		/* The cached pfn is always the first in a pageblock */
1625		free_pfn = pageblock_start_pfn(free_pfn);
1626		/*
1627		 * Only go back, not forward. The cached pfn might have been
1628		 * already reset to zone end in compact_finished()
1629		 */
1630		if (free_pfn > zone->compact_cached_free_pfn)
1631			zone->compact_cached_free_pfn = free_pfn;
1632	}
1633
 
 
 
1634	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1635				cc->free_pfn, end_pfn, sync, ret);
1636
1637	return ret;
1638}
1639
1640static enum compact_result compact_zone_order(struct zone *zone, int order,
1641		gfp_t gfp_mask, enum compact_priority prio,
1642		unsigned int alloc_flags, int classzone_idx)
 
1643{
1644	enum compact_result ret;
1645	struct compact_control cc = {
1646		.nr_freepages = 0,
1647		.nr_migratepages = 0,
1648		.order = order,
 
1649		.gfp_mask = gfp_mask,
1650		.zone = zone,
1651		.mode = (prio == COMPACT_PRIO_ASYNC) ?
1652					MIGRATE_ASYNC :	MIGRATE_SYNC_LIGHT,
1653		.alloc_flags = alloc_flags,
1654		.classzone_idx = classzone_idx,
1655		.direct_compaction = true,
1656		.whole_zone = (prio == MIN_COMPACT_PRIORITY),
1657		.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
1658		.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
1659	};
1660	INIT_LIST_HEAD(&cc.freepages);
1661	INIT_LIST_HEAD(&cc.migratepages);
 
 
 
 
 
1662
1663	ret = compact_zone(zone, &cc);
1664
1665	VM_BUG_ON(!list_empty(&cc.freepages));
1666	VM_BUG_ON(!list_empty(&cc.migratepages));
1667
 
 
 
1668	return ret;
1669}
1670
1671int sysctl_extfrag_threshold = 500;
1672
1673/**
1674 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1675 * @gfp_mask: The GFP mask of the current allocation
1676 * @order: The order of the current allocation
1677 * @alloc_flags: The allocation flags of the current allocation
1678 * @ac: The context of current allocation
1679 * @mode: The migration mode for async, sync light, or sync migration
1680 *
1681 * This is the main entry point for direct page compaction.
1682 */
1683enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1684		unsigned int alloc_flags, const struct alloc_context *ac,
1685		enum compact_priority prio)
1686{
1687	int may_perform_io = gfp_mask & __GFP_IO;
1688	struct zoneref *z;
1689	struct zone *zone;
1690	enum compact_result rc = COMPACT_SKIPPED;
1691
1692	/*
1693	 * Check if the GFP flags allow compaction - GFP_NOIO is really
1694	 * tricky context because the migration might require IO
1695	 */
1696	if (!may_perform_io)
1697		return COMPACT_SKIPPED;
1698
1699	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
1700
1701	/* Compact each zone in the list */
1702	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1703								ac->nodemask) {
1704		enum compact_result status;
1705
1706		if (prio > MIN_COMPACT_PRIORITY
1707					&& compaction_deferred(zone, order)) {
1708			rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
1709			continue;
1710		}
1711
1712		status = compact_zone_order(zone, order, gfp_mask, prio,
1713					alloc_flags, ac_classzone_idx(ac));
1714		rc = max(status, rc);
1715
1716		/* The allocation should succeed, stop compacting */
1717		if (status == COMPACT_SUCCESS) {
1718			/*
1719			 * We think the allocation will succeed in this zone,
1720			 * but it is not certain, hence the false. The caller
1721			 * will repeat this with true if allocation indeed
1722			 * succeeds in this zone.
1723			 */
1724			compaction_defer_reset(zone, order, false);
1725
1726			break;
1727		}
1728
1729		if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
1730					status == COMPACT_PARTIAL_SKIPPED))
1731			/*
1732			 * We think that allocation won't succeed in this zone
1733			 * so we defer compaction there. If it ends up
1734			 * succeeding after all, it will be reset.
1735			 */
1736			defer_compaction(zone, order);
1737
1738		/*
1739		 * We might have stopped compacting due to need_resched() in
1740		 * async compaction, or due to a fatal signal detected. In that
1741		 * case do not try further zones
1742		 */
1743		if ((prio == COMPACT_PRIO_ASYNC && need_resched())
1744					|| fatal_signal_pending(current))
1745			break;
1746	}
1747
1748	return rc;
1749}
1750
1751
1752/* Compact all zones within a node */
1753static void compact_node(int nid)
1754{
1755	pg_data_t *pgdat = NODE_DATA(nid);
1756	int zoneid;
1757	struct zone *zone;
1758	struct compact_control cc = {
1759		.order = -1,
1760		.mode = MIGRATE_SYNC,
1761		.ignore_skip_hint = true,
1762		.whole_zone = true,
1763		.gfp_mask = GFP_KERNEL,
1764	};
1765
1766
1767	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1768
1769		zone = &pgdat->node_zones[zoneid];
1770		if (!populated_zone(zone))
1771			continue;
1772
1773		cc.nr_freepages = 0;
1774		cc.nr_migratepages = 0;
1775		cc.zone = zone;
1776		INIT_LIST_HEAD(&cc.freepages);
1777		INIT_LIST_HEAD(&cc.migratepages);
1778
1779		compact_zone(zone, &cc);
1780
1781		VM_BUG_ON(!list_empty(&cc.freepages));
1782		VM_BUG_ON(!list_empty(&cc.migratepages));
1783	}
1784}
1785
1786/* Compact all nodes in the system */
1787static void compact_nodes(void)
1788{
1789	int nid;
1790
1791	/* Flush pending updates to the LRU lists */
1792	lru_add_drain_all();
1793
1794	for_each_online_node(nid)
1795		compact_node(nid);
1796}
1797
1798/* The written value is actually unused, all memory is compacted */
1799int sysctl_compact_memory;
1800
1801/*
1802 * This is the entry point for compacting all nodes via
1803 * /proc/sys/vm/compact_memory
1804 */
1805int sysctl_compaction_handler(struct ctl_table *table, int write,
1806			void __user *buffer, size_t *length, loff_t *ppos)
1807{
1808	if (write)
1809		compact_nodes();
1810
1811	return 0;
1812}
1813
1814int sysctl_extfrag_handler(struct ctl_table *table, int write,
1815			void __user *buffer, size_t *length, loff_t *ppos)
1816{
1817	proc_dointvec_minmax(table, write, buffer, length, ppos);
1818
1819	return 0;
1820}
1821
1822#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1823static ssize_t sysfs_compact_node(struct device *dev,
1824			struct device_attribute *attr,
1825			const char *buf, size_t count)
1826{
1827	int nid = dev->id;
1828
1829	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1830		/* Flush pending updates to the LRU lists */
1831		lru_add_drain_all();
1832
1833		compact_node(nid);
1834	}
1835
1836	return count;
1837}
1838static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1839
1840int compaction_register_node(struct node *node)
1841{
1842	return device_create_file(&node->dev, &dev_attr_compact);
1843}
1844
1845void compaction_unregister_node(struct node *node)
1846{
1847	return device_remove_file(&node->dev, &dev_attr_compact);
1848}
1849#endif /* CONFIG_SYSFS && CONFIG_NUMA */
1850
1851static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1852{
1853	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1854}
1855
1856static bool kcompactd_node_suitable(pg_data_t *pgdat)
1857{
1858	int zoneid;
1859	struct zone *zone;
1860	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1861
1862	for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
1863		zone = &pgdat->node_zones[zoneid];
1864
1865		if (!populated_zone(zone))
1866			continue;
1867
1868		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1869					classzone_idx) == COMPACT_CONTINUE)
1870			return true;
1871	}
1872
1873	return false;
1874}
1875
1876static void kcompactd_do_work(pg_data_t *pgdat)
1877{
1878	/*
1879	 * With no special task, compact all zones so that a page of requested
1880	 * order is allocatable.
1881	 */
1882	int zoneid;
1883	struct zone *zone;
1884	struct compact_control cc = {
1885		.order = pgdat->kcompactd_max_order,
 
1886		.classzone_idx = pgdat->kcompactd_classzone_idx,
1887		.mode = MIGRATE_SYNC_LIGHT,
1888		.ignore_skip_hint = true,
1889		.gfp_mask = GFP_KERNEL,
1890
1891	};
1892	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1893							cc.classzone_idx);
1894	count_vm_event(KCOMPACTD_WAKE);
1895
1896	for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1897		int status;
1898
1899		zone = &pgdat->node_zones[zoneid];
1900		if (!populated_zone(zone))
1901			continue;
1902
1903		if (compaction_deferred(zone, cc.order))
1904			continue;
1905
1906		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1907							COMPACT_CONTINUE)
1908			continue;
1909
1910		cc.nr_freepages = 0;
1911		cc.nr_migratepages = 0;
1912		cc.zone = zone;
1913		INIT_LIST_HEAD(&cc.freepages);
1914		INIT_LIST_HEAD(&cc.migratepages);
1915
1916		if (kthread_should_stop())
1917			return;
1918		status = compact_zone(zone, &cc);
 
 
1919
1920		if (status == COMPACT_SUCCESS) {
1921			compaction_defer_reset(zone, cc.order, false);
1922		} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1923			/*
 
 
 
 
 
 
 
 
1924			 * We use sync migration mode here, so we defer like
1925			 * sync direct compaction does.
1926			 */
1927			defer_compaction(zone, cc.order);
1928		}
1929
 
 
 
 
 
1930		VM_BUG_ON(!list_empty(&cc.freepages));
1931		VM_BUG_ON(!list_empty(&cc.migratepages));
1932	}
1933
1934	/*
1935	 * Regardless of success, we are done until woken up next. But remember
1936	 * the requested order/classzone_idx in case it was higher/tighter than
1937	 * our current ones
1938	 */
1939	if (pgdat->kcompactd_max_order <= cc.order)
1940		pgdat->kcompactd_max_order = 0;
1941	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1942		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1943}
1944
1945void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1946{
1947	if (!order)
1948		return;
1949
1950	if (pgdat->kcompactd_max_order < order)
1951		pgdat->kcompactd_max_order = order;
1952
1953	if (pgdat->kcompactd_classzone_idx > classzone_idx)
1954		pgdat->kcompactd_classzone_idx = classzone_idx;
1955
1956	if (!waitqueue_active(&pgdat->kcompactd_wait))
 
 
 
 
1957		return;
1958
1959	if (!kcompactd_node_suitable(pgdat))
1960		return;
1961
1962	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1963							classzone_idx);
1964	wake_up_interruptible(&pgdat->kcompactd_wait);
1965}
1966
1967/*
1968 * The background compaction daemon, started as a kernel thread
1969 * from the init process.
1970 */
1971static int kcompactd(void *p)
1972{
1973	pg_data_t *pgdat = (pg_data_t*)p;
1974	struct task_struct *tsk = current;
1975
1976	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1977
1978	if (!cpumask_empty(cpumask))
1979		set_cpus_allowed_ptr(tsk, cpumask);
1980
1981	set_freezable();
1982
1983	pgdat->kcompactd_max_order = 0;
1984	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1985
1986	while (!kthread_should_stop()) {
 
 
1987		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
1988		wait_event_freezable(pgdat->kcompactd_wait,
1989				kcompactd_work_requested(pgdat));
1990
 
1991		kcompactd_do_work(pgdat);
 
1992	}
1993
1994	return 0;
1995}
1996
1997/*
1998 * This kcompactd start function will be called by init and node-hot-add.
1999 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2000 */
2001int kcompactd_run(int nid)
2002{
2003	pg_data_t *pgdat = NODE_DATA(nid);
2004	int ret = 0;
2005
2006	if (pgdat->kcompactd)
2007		return 0;
2008
2009	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2010	if (IS_ERR(pgdat->kcompactd)) {
2011		pr_err("Failed to start kcompactd on node %d\n", nid);
2012		ret = PTR_ERR(pgdat->kcompactd);
2013		pgdat->kcompactd = NULL;
2014	}
2015	return ret;
2016}
2017
2018/*
2019 * Called by memory hotplug when all memory in a node is offlined. Caller must
2020 * hold mem_hotplug_begin/end().
2021 */
2022void kcompactd_stop(int nid)
2023{
2024	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2025
2026	if (kcompactd) {
2027		kthread_stop(kcompactd);
2028		NODE_DATA(nid)->kcompactd = NULL;
2029	}
2030}
2031
2032/*
2033 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2034 * not required for correctness. So if the last cpu in a node goes
2035 * away, we get changed to run anywhere: as the first one comes back,
2036 * restore their cpu bindings.
2037 */
2038static int kcompactd_cpu_online(unsigned int cpu)
2039{
2040	int nid;
2041
2042	for_each_node_state(nid, N_MEMORY) {
2043		pg_data_t *pgdat = NODE_DATA(nid);
2044		const struct cpumask *mask;
2045
2046		mask = cpumask_of_node(pgdat->node_id);
2047
2048		if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2049			/* One of our CPUs online: restore mask */
2050			set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2051	}
2052	return 0;
2053}
2054
2055static int __init kcompactd_init(void)
2056{
2057	int nid;
2058	int ret;
2059
2060	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2061					"mm/compaction:online",
2062					kcompactd_cpu_online, NULL);
2063	if (ret < 0) {
2064		pr_err("kcompactd: failed to register hotplug callbacks.\n");
2065		return ret;
2066	}
2067
2068	for_each_node_state(nid, N_MEMORY)
2069		kcompactd_run(nid);
2070	return 0;
2071}
2072subsys_initcall(kcompactd_init)
2073
2074#endif /* CONFIG_COMPACTION */