Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/sched/mm.h>
   7#include <linux/sched/coredump.h>
   8#include <linux/mmu_notifier.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/mm_inline.h>
  12#include <linux/kthread.h>
  13#include <linux/khugepaged.h>
  14#include <linux/freezer.h>
  15#include <linux/mman.h>
  16#include <linux/hashtable.h>
  17#include <linux/userfaultfd_k.h>
  18#include <linux/page_idle.h>
  19#include <linux/page_table_check.h>
  20#include <linux/swapops.h>
  21#include <linux/shmem_fs.h>
  22
  23#include <asm/tlb.h>
  24#include <asm/pgalloc.h>
  25#include "internal.h"
  26#include "mm_slot.h"
  27
  28enum scan_result {
  29	SCAN_FAIL,
  30	SCAN_SUCCEED,
  31	SCAN_PMD_NULL,
  32	SCAN_PMD_NONE,
  33	SCAN_PMD_MAPPED,
  34	SCAN_EXCEED_NONE_PTE,
  35	SCAN_EXCEED_SWAP_PTE,
  36	SCAN_EXCEED_SHARED_PTE,
  37	SCAN_PTE_NON_PRESENT,
  38	SCAN_PTE_UFFD_WP,
  39	SCAN_PTE_MAPPED_HUGEPAGE,
  40	SCAN_PAGE_RO,
  41	SCAN_LACK_REFERENCED_PAGE,
  42	SCAN_PAGE_NULL,
  43	SCAN_SCAN_ABORT,
  44	SCAN_PAGE_COUNT,
  45	SCAN_PAGE_LRU,
  46	SCAN_PAGE_LOCK,
  47	SCAN_PAGE_ANON,
  48	SCAN_PAGE_COMPOUND,
  49	SCAN_ANY_PROCESS,
  50	SCAN_VMA_NULL,
  51	SCAN_VMA_CHECK,
  52	SCAN_ADDRESS_RANGE,
 
  53	SCAN_DEL_PAGE_LRU,
  54	SCAN_ALLOC_HUGE_PAGE_FAIL,
  55	SCAN_CGROUP_CHARGE_FAIL,
 
  56	SCAN_TRUNCATED,
  57	SCAN_PAGE_HAS_PRIVATE,
  58};
  59
  60#define CREATE_TRACE_POINTS
  61#include <trace/events/huge_memory.h>
  62
  63static struct task_struct *khugepaged_thread __read_mostly;
  64static DEFINE_MUTEX(khugepaged_mutex);
  65
  66/* default scan 8*512 pte (or vmas) every 30 second */
  67static unsigned int khugepaged_pages_to_scan __read_mostly;
  68static unsigned int khugepaged_pages_collapsed;
  69static unsigned int khugepaged_full_scans;
  70static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  71/* during fragmentation poll the hugepage allocator once every minute */
  72static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  73static unsigned long khugepaged_sleep_expire;
  74static DEFINE_SPINLOCK(khugepaged_mm_lock);
  75static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  76/*
  77 * default collapse hugepages if there is at least one pte mapped like
  78 * it would have happened if the vma was large enough during page
  79 * fault.
  80 *
  81 * Note that these are only respected if collapse was initiated by khugepaged.
  82 */
  83static unsigned int khugepaged_max_ptes_none __read_mostly;
  84static unsigned int khugepaged_max_ptes_swap __read_mostly;
  85static unsigned int khugepaged_max_ptes_shared __read_mostly;
  86
  87#define MM_SLOTS_HASH_BITS 10
  88static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  89
  90static struct kmem_cache *mm_slot_cache __read_mostly;
  91
  92#define MAX_PTE_MAPPED_THP 8
  93
  94struct collapse_control {
  95	bool is_khugepaged;
  96
  97	/* Num pages scanned per node */
  98	u32 node_load[MAX_NUMNODES];
  99
 100	/* nodemask for allocation fallback */
 101	nodemask_t alloc_nmask;
 102};
 103
 104/**
 105 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
 106 * @slot: hash lookup from mm to mm_slot
 107 * @nr_pte_mapped_thp: number of pte mapped THP
 108 * @pte_mapped_thp: address array corresponding pte mapped THP
 109 */
 110struct khugepaged_mm_slot {
 111	struct mm_slot slot;
 
 
 112
 113	/* pte-mapped THP in this mm */
 114	int nr_pte_mapped_thp;
 115	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
 116};
 117
 118/**
 119 * struct khugepaged_scan - cursor for scanning
 120 * @mm_head: the head of the mm list to scan
 121 * @mm_slot: the current mm_slot we are scanning
 122 * @address: the next address inside that to be scanned
 123 *
 124 * There is only the one khugepaged_scan instance of this cursor structure.
 125 */
 126struct khugepaged_scan {
 127	struct list_head mm_head;
 128	struct khugepaged_mm_slot *mm_slot;
 129	unsigned long address;
 130};
 131
 132static struct khugepaged_scan khugepaged_scan = {
 133	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 134};
 135
 136#ifdef CONFIG_SYSFS
 137static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 138					 struct kobj_attribute *attr,
 139					 char *buf)
 140{
 141	return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 142}
 143
 144static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 145					  struct kobj_attribute *attr,
 146					  const char *buf, size_t count)
 147{
 148	unsigned int msecs;
 149	int err;
 150
 151	err = kstrtouint(buf, 10, &msecs);
 152	if (err)
 153		return -EINVAL;
 154
 155	khugepaged_scan_sleep_millisecs = msecs;
 156	khugepaged_sleep_expire = 0;
 157	wake_up_interruptible(&khugepaged_wait);
 158
 159	return count;
 160}
 161static struct kobj_attribute scan_sleep_millisecs_attr =
 162	__ATTR_RW(scan_sleep_millisecs);
 
 163
 164static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 165					  struct kobj_attribute *attr,
 166					  char *buf)
 167{
 168	return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 169}
 170
 171static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 172					   struct kobj_attribute *attr,
 173					   const char *buf, size_t count)
 174{
 175	unsigned int msecs;
 176	int err;
 177
 178	err = kstrtouint(buf, 10, &msecs);
 179	if (err)
 180		return -EINVAL;
 181
 182	khugepaged_alloc_sleep_millisecs = msecs;
 183	khugepaged_sleep_expire = 0;
 184	wake_up_interruptible(&khugepaged_wait);
 185
 186	return count;
 187}
 188static struct kobj_attribute alloc_sleep_millisecs_attr =
 189	__ATTR_RW(alloc_sleep_millisecs);
 
 190
 191static ssize_t pages_to_scan_show(struct kobject *kobj,
 192				  struct kobj_attribute *attr,
 193				  char *buf)
 194{
 195	return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
 196}
 197static ssize_t pages_to_scan_store(struct kobject *kobj,
 198				   struct kobj_attribute *attr,
 199				   const char *buf, size_t count)
 200{
 201	unsigned int pages;
 202	int err;
 
 203
 204	err = kstrtouint(buf, 10, &pages);
 205	if (err || !pages)
 206		return -EINVAL;
 207
 208	khugepaged_pages_to_scan = pages;
 209
 210	return count;
 211}
 212static struct kobj_attribute pages_to_scan_attr =
 213	__ATTR_RW(pages_to_scan);
 
 214
 215static ssize_t pages_collapsed_show(struct kobject *kobj,
 216				    struct kobj_attribute *attr,
 217				    char *buf)
 218{
 219	return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
 220}
 221static struct kobj_attribute pages_collapsed_attr =
 222	__ATTR_RO(pages_collapsed);
 223
 224static ssize_t full_scans_show(struct kobject *kobj,
 225			       struct kobj_attribute *attr,
 226			       char *buf)
 227{
 228	return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
 229}
 230static struct kobj_attribute full_scans_attr =
 231	__ATTR_RO(full_scans);
 232
 233static ssize_t defrag_show(struct kobject *kobj,
 234			   struct kobj_attribute *attr, char *buf)
 235{
 236	return single_hugepage_flag_show(kobj, attr, buf,
 237					 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 238}
 239static ssize_t defrag_store(struct kobject *kobj,
 240			    struct kobj_attribute *attr,
 241			    const char *buf, size_t count)
 242{
 243	return single_hugepage_flag_store(kobj, attr, buf, count,
 244				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 245}
 246static struct kobj_attribute khugepaged_defrag_attr =
 247	__ATTR_RW(defrag);
 
 248
 249/*
 250 * max_ptes_none controls if khugepaged should collapse hugepages over
 251 * any unmapped ptes in turn potentially increasing the memory
 252 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 253 * reduce the available free memory in the system as it
 254 * runs. Increasing max_ptes_none will instead potentially reduce the
 255 * free memory in the system during the khugepaged scan.
 256 */
 257static ssize_t max_ptes_none_show(struct kobject *kobj,
 258				  struct kobj_attribute *attr,
 259				  char *buf)
 260{
 261	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
 262}
 263static ssize_t max_ptes_none_store(struct kobject *kobj,
 264				   struct kobj_attribute *attr,
 265				   const char *buf, size_t count)
 266{
 267	int err;
 268	unsigned long max_ptes_none;
 269
 270	err = kstrtoul(buf, 10, &max_ptes_none);
 271	if (err || max_ptes_none > HPAGE_PMD_NR - 1)
 272		return -EINVAL;
 273
 274	khugepaged_max_ptes_none = max_ptes_none;
 275
 276	return count;
 277}
 278static struct kobj_attribute khugepaged_max_ptes_none_attr =
 279	__ATTR_RW(max_ptes_none);
 
 280
 281static ssize_t max_ptes_swap_show(struct kobject *kobj,
 282				  struct kobj_attribute *attr,
 283				  char *buf)
 284{
 285	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
 286}
 287
 288static ssize_t max_ptes_swap_store(struct kobject *kobj,
 289				   struct kobj_attribute *attr,
 290				   const char *buf, size_t count)
 291{
 292	int err;
 293	unsigned long max_ptes_swap;
 294
 295	err  = kstrtoul(buf, 10, &max_ptes_swap);
 296	if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
 297		return -EINVAL;
 298
 299	khugepaged_max_ptes_swap = max_ptes_swap;
 300
 301	return count;
 302}
 303
 304static struct kobj_attribute khugepaged_max_ptes_swap_attr =
 305	__ATTR_RW(max_ptes_swap);
 306
 307static ssize_t max_ptes_shared_show(struct kobject *kobj,
 308				    struct kobj_attribute *attr,
 309				    char *buf)
 310{
 311	return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
 312}
 313
 314static ssize_t max_ptes_shared_store(struct kobject *kobj,
 315				     struct kobj_attribute *attr,
 316				     const char *buf, size_t count)
 317{
 318	int err;
 319	unsigned long max_ptes_shared;
 320
 321	err  = kstrtoul(buf, 10, &max_ptes_shared);
 322	if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
 323		return -EINVAL;
 324
 325	khugepaged_max_ptes_shared = max_ptes_shared;
 326
 327	return count;
 328}
 329
 330static struct kobj_attribute khugepaged_max_ptes_shared_attr =
 331	__ATTR_RW(max_ptes_shared);
 332
 333static struct attribute *khugepaged_attr[] = {
 334	&khugepaged_defrag_attr.attr,
 335	&khugepaged_max_ptes_none_attr.attr,
 336	&khugepaged_max_ptes_swap_attr.attr,
 337	&khugepaged_max_ptes_shared_attr.attr,
 338	&pages_to_scan_attr.attr,
 339	&pages_collapsed_attr.attr,
 340	&full_scans_attr.attr,
 341	&scan_sleep_millisecs_attr.attr,
 342	&alloc_sleep_millisecs_attr.attr,
 
 343	NULL,
 344};
 345
 346struct attribute_group khugepaged_attr_group = {
 347	.attrs = khugepaged_attr,
 348	.name = "khugepaged",
 349};
 350#endif /* CONFIG_SYSFS */
 351
 
 
 352int hugepage_madvise(struct vm_area_struct *vma,
 353		     unsigned long *vm_flags, int advice)
 354{
 355	switch (advice) {
 356	case MADV_HUGEPAGE:
 357#ifdef CONFIG_S390
 358		/*
 359		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
 360		 * can't handle this properly after s390_enable_sie, so we simply
 361		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
 362		 */
 363		if (mm_has_pgste(vma->vm_mm))
 364			return 0;
 365#endif
 366		*vm_flags &= ~VM_NOHUGEPAGE;
 367		*vm_flags |= VM_HUGEPAGE;
 368		/*
 369		 * If the vma become good for khugepaged to scan,
 370		 * register it here without waiting a page fault that
 371		 * may not happen any time soon.
 372		 */
 373		khugepaged_enter_vma(vma, *vm_flags);
 
 
 374		break;
 375	case MADV_NOHUGEPAGE:
 376		*vm_flags &= ~VM_HUGEPAGE;
 377		*vm_flags |= VM_NOHUGEPAGE;
 378		/*
 379		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
 380		 * this vma even if we leave the mm registered in khugepaged if
 381		 * it got registered before VM_NOHUGEPAGE was set.
 382		 */
 383		break;
 384	}
 385
 386	return 0;
 387}
 388
 389int __init khugepaged_init(void)
 390{
 391	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
 392					  sizeof(struct khugepaged_mm_slot),
 393					  __alignof__(struct khugepaged_mm_slot),
 394					  0, NULL);
 395	if (!mm_slot_cache)
 396		return -ENOMEM;
 397
 398	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 399	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 400	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
 401	khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
 402
 403	return 0;
 404}
 405
 406void __init khugepaged_destroy(void)
 407{
 408	kmem_cache_destroy(mm_slot_cache);
 409}
 410
 411static inline int hpage_collapse_test_exit(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 412{
 413	return atomic_read(&mm->mm_users) == 0;
 414}
 415
 416void __khugepaged_enter(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417{
 418	struct khugepaged_mm_slot *mm_slot;
 419	struct mm_slot *slot;
 420	int wakeup;
 421
 422	mm_slot = mm_slot_alloc(mm_slot_cache);
 423	if (!mm_slot)
 424		return;
 425
 426	slot = &mm_slot->slot;
 427
 428	/* __khugepaged_exit() must not run from under us */
 429	VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
 430	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 431		mm_slot_free(mm_slot_cache, mm_slot);
 432		return;
 433	}
 434
 435	spin_lock(&khugepaged_mm_lock);
 436	mm_slot_insert(mm_slots_hash, mm, slot);
 437	/*
 438	 * Insert just behind the scanning cursor, to let the area settle
 439	 * down a little.
 440	 */
 441	wakeup = list_empty(&khugepaged_scan.mm_head);
 442	list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
 443	spin_unlock(&khugepaged_mm_lock);
 444
 445	mmgrab(mm);
 446	if (wakeup)
 447		wake_up_interruptible(&khugepaged_wait);
 
 
 448}
 449
 450void khugepaged_enter_vma(struct vm_area_struct *vma,
 451			  unsigned long vm_flags)
 452{
 453	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
 454	    hugepage_flags_enabled()) {
 455		if (hugepage_vma_check(vma, vm_flags, false, false, true))
 456			__khugepaged_enter(vma->vm_mm);
 457	}
 
 
 
 
 
 
 
 
 
 
 458}
 459
 460void __khugepaged_exit(struct mm_struct *mm)
 461{
 462	struct khugepaged_mm_slot *mm_slot;
 463	struct mm_slot *slot;
 464	int free = 0;
 465
 466	spin_lock(&khugepaged_mm_lock);
 467	slot = mm_slot_lookup(mm_slots_hash, mm);
 468	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
 469	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
 470		hash_del(&slot->hash);
 471		list_del(&slot->mm_node);
 472		free = 1;
 473	}
 474	spin_unlock(&khugepaged_mm_lock);
 475
 476	if (free) {
 477		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
 478		mm_slot_free(mm_slot_cache, mm_slot);
 479		mmdrop(mm);
 480	} else if (mm_slot) {
 481		/*
 482		 * This is required to serialize against
 483		 * hpage_collapse_test_exit() (which is guaranteed to run
 484		 * under mmap sem read mode). Stop here (after we return all
 485		 * pagetables will be destroyed) until khugepaged has finished
 486		 * working on the pagetables under the mmap_lock.
 
 487		 */
 488		mmap_write_lock(mm);
 489		mmap_write_unlock(mm);
 490	}
 491}
 492
 493static void release_pte_page(struct page *page)
 494{
 495	mod_node_page_state(page_pgdat(page),
 496			NR_ISOLATED_ANON + page_is_file_lru(page),
 497			-compound_nr(page));
 498	unlock_page(page);
 499	putback_lru_page(page);
 500}
 501
 502static void release_pte_pages(pte_t *pte, pte_t *_pte,
 503		struct list_head *compound_pagelist)
 504{
 505	struct page *page, *tmp;
 506
 507	while (--_pte >= pte) {
 508		pte_t pteval = *_pte;
 509
 510		page = pte_page(pteval);
 511		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
 512				!PageCompound(page))
 513			release_pte_page(page);
 514	}
 515
 516	list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
 517		list_del(&page->lru);
 518		release_pte_page(page);
 519	}
 520}
 521
 522static bool is_refcount_suitable(struct page *page)
 523{
 524	int expected_refcount;
 525
 526	expected_refcount = total_mapcount(page);
 527	if (PageSwapCache(page))
 528		expected_refcount += compound_nr(page);
 529
 530	return page_count(page) == expected_refcount;
 531}
 532
 533static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 534					unsigned long address,
 535					pte_t *pte,
 536					struct collapse_control *cc,
 537					struct list_head *compound_pagelist)
 538{
 539	struct page *page = NULL;
 540	pte_t *_pte;
 541	int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
 542	bool writable = false;
 543
 544	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 545	     _pte++, address += PAGE_SIZE) {
 546		pte_t pteval = *_pte;
 547		if (pte_none(pteval) || (pte_present(pteval) &&
 548				is_zero_pfn(pte_pfn(pteval)))) {
 549			++none_or_zero;
 550			if (!userfaultfd_armed(vma) &&
 551			    (!cc->is_khugepaged ||
 552			     none_or_zero <= khugepaged_max_ptes_none)) {
 553				continue;
 554			} else {
 555				result = SCAN_EXCEED_NONE_PTE;
 556				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
 557				goto out;
 558			}
 559		}
 560		if (!pte_present(pteval)) {
 561			result = SCAN_PTE_NON_PRESENT;
 562			goto out;
 563		}
 564		page = vm_normal_page(vma, address, pteval);
 565		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
 566			result = SCAN_PAGE_NULL;
 567			goto out;
 568		}
 569
 570		VM_BUG_ON_PAGE(!PageAnon(page), page);
 571
 572		if (page_mapcount(page) > 1) {
 573			++shared;
 574			if (cc->is_khugepaged &&
 575			    shared > khugepaged_max_ptes_shared) {
 576				result = SCAN_EXCEED_SHARED_PTE;
 577				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
 578				goto out;
 579			}
 580		}
 581
 582		if (PageCompound(page)) {
 583			struct page *p;
 584			page = compound_head(page);
 585
 586			/*
 587			 * Check if we have dealt with the compound page
 588			 * already
 589			 */
 590			list_for_each_entry(p, compound_pagelist, lru) {
 591				if (page == p)
 592					goto next;
 593			}
 594		}
 595
 
 
 596		/*
 597		 * We can do it before isolate_lru_page because the
 598		 * page can't be freed from under us. NOTE: PG_lock
 599		 * is needed to serialize against split_huge_page
 600		 * when invoked from the VM.
 601		 */
 602		if (!trylock_page(page)) {
 603			result = SCAN_PAGE_LOCK;
 604			goto out;
 605		}
 606
 607		/*
 608		 * Check if the page has any GUP (or other external) pins.
 609		 *
 610		 * The page table that maps the page has been already unlinked
 611		 * from the page table tree and this process cannot get
 612		 * an additional pin on the page.
 613		 *
 614		 * New pins can come later if the page is shared across fork,
 615		 * but not from this process. The other process cannot write to
 616		 * the page, only trigger CoW.
 617		 */
 618		if (!is_refcount_suitable(page)) {
 619			unlock_page(page);
 620			result = SCAN_PAGE_COUNT;
 621			goto out;
 622		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 623
 624		/*
 625		 * Isolate the page to avoid collapsing an hugepage
 626		 * currently in use by the VM.
 627		 */
 628		if (isolate_lru_page(page)) {
 629			unlock_page(page);
 630			result = SCAN_DEL_PAGE_LRU;
 631			goto out;
 632		}
 633		mod_node_page_state(page_pgdat(page),
 634				NR_ISOLATED_ANON + page_is_file_lru(page),
 635				compound_nr(page));
 636		VM_BUG_ON_PAGE(!PageLocked(page), page);
 637		VM_BUG_ON_PAGE(PageLRU(page), page);
 638
 639		if (PageCompound(page))
 640			list_add_tail(&page->lru, compound_pagelist);
 641next:
 642		/*
 643		 * If collapse was initiated by khugepaged, check that there is
 644		 * enough young pte to justify collapsing the page
 645		 */
 646		if (cc->is_khugepaged &&
 647		    (pte_young(pteval) || page_is_young(page) ||
 648		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
 649								     address)))
 650			referenced++;
 651
 652		if (pte_write(pteval))
 653			writable = true;
 654	}
 655
 656	if (unlikely(!writable)) {
 657		result = SCAN_PAGE_RO;
 658	} else if (unlikely(cc->is_khugepaged && !referenced)) {
 659		result = SCAN_LACK_REFERENCED_PAGE;
 
 
 660	} else {
 661		result = SCAN_SUCCEED;
 662		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 663						    referenced, writable, result);
 664		return result;
 665	}
 
 666out:
 667	release_pte_pages(pte, _pte, compound_pagelist);
 668	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 669					    referenced, writable, result);
 670	return result;
 671}
 672
 673static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 674				      struct vm_area_struct *vma,
 675				      unsigned long address,
 676				      spinlock_t *ptl,
 677				      struct list_head *compound_pagelist)
 678{
 679	struct page *src_page, *tmp;
 680	pte_t *_pte;
 681	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 682				_pte++, page++, address += PAGE_SIZE) {
 683		pte_t pteval = *_pte;
 
 684
 685		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 686			clear_user_highpage(page, address);
 687			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
 688			if (is_zero_pfn(pte_pfn(pteval))) {
 689				/*
 690				 * ptl mostly unnecessary.
 691				 */
 692				spin_lock(ptl);
 693				ptep_clear(vma->vm_mm, address, _pte);
 
 
 
 
 694				spin_unlock(ptl);
 695			}
 696		} else {
 697			src_page = pte_page(pteval);
 698			copy_user_highpage(page, src_page, address, vma);
 699			if (!PageCompound(src_page))
 700				release_pte_page(src_page);
 701			/*
 702			 * ptl mostly unnecessary, but preempt has to
 703			 * be disabled to update the per-cpu stats
 704			 * inside page_remove_rmap().
 705			 */
 706			spin_lock(ptl);
 707			ptep_clear(vma->vm_mm, address, _pte);
 708			page_remove_rmap(src_page, vma, false);
 
 
 
 
 709			spin_unlock(ptl);
 710			free_page_and_swap_cache(src_page);
 711		}
 712	}
 713
 714	list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
 715		list_del(&src_page->lru);
 716		mod_node_page_state(page_pgdat(src_page),
 717				    NR_ISOLATED_ANON + page_is_file_lru(src_page),
 718				    -compound_nr(src_page));
 719		unlock_page(src_page);
 720		free_swap_cache(src_page);
 721		putback_lru_page(src_page);
 722	}
 723}
 724
 725static void khugepaged_alloc_sleep(void)
 726{
 727	DEFINE_WAIT(wait);
 728
 729	add_wait_queue(&khugepaged_wait, &wait);
 730	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
 731	schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 732	remove_wait_queue(&khugepaged_wait, &wait);
 733}
 734
 735struct collapse_control khugepaged_collapse_control = {
 736	.is_khugepaged = true,
 737};
 738
 739static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
 740{
 741	int i;
 742
 743	/*
 744	 * If node_reclaim_mode is disabled, then no extra effort is made to
 745	 * allocate memory locally.
 746	 */
 747	if (!node_reclaim_enabled())
 748		return false;
 749
 750	/* If there is a count for this node already, it must be acceptable */
 751	if (cc->node_load[nid])
 752		return false;
 753
 754	for (i = 0; i < MAX_NUMNODES; i++) {
 755		if (!cc->node_load[i])
 756			continue;
 757		if (node_distance(nid, i) > node_reclaim_distance)
 758			return true;
 759	}
 760	return false;
 761}
 762
 763#define khugepaged_defrag()					\
 764	(transparent_hugepage_flags &				\
 765	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
 766
 767/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
 768static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 769{
 770	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
 771}
 772
 773#ifdef CONFIG_NUMA
 774static int hpage_collapse_find_target_node(struct collapse_control *cc)
 775{
 
 776	int nid, target_node = 0, max_value = 0;
 777
 778	/* find first node with max normal pages hit */
 779	for (nid = 0; nid < MAX_NUMNODES; nid++)
 780		if (cc->node_load[nid] > max_value) {
 781			max_value = cc->node_load[nid];
 782			target_node = nid;
 783		}
 784
 785	for_each_online_node(nid) {
 786		if (max_value == cc->node_load[nid])
 787			node_set(nid, cc->alloc_nmask);
 788	}
 
 
 
 
 789
 
 790	return target_node;
 791}
 792#else
 793static int hpage_collapse_find_target_node(struct collapse_control *cc)
 794{
 795	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 796}
 797#endif
 798
 799static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
 800				      nodemask_t *nmask)
 801{
 802	*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
 
 
 803	if (unlikely(!*hpage)) {
 804		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 805		return false;
 
 806	}
 807
 808	prep_transhuge_page(*hpage);
 809	count_vm_event(THP_COLLAPSE_ALLOC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810	return true;
 811}
 812
 
 
 
 
 
 
 
 
 
 813/*
 814 * If mmap_lock temporarily dropped, revalidate vma
 815 * before taking mmap_lock.
 816 * Returns enum scan_result value.
 
 817 */
 818
 819static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 820				   bool expect_anon,
 821				   struct vm_area_struct **vmap,
 822				   struct collapse_control *cc)
 823{
 824	struct vm_area_struct *vma;
 
 825
 826	if (unlikely(hpage_collapse_test_exit(mm)))
 827		return SCAN_ANY_PROCESS;
 828
 829	*vmap = vma = find_vma(mm, address);
 830	if (!vma)
 831		return SCAN_VMA_NULL;
 832
 833	if (!transhuge_vma_suitable(vma, address))
 
 
 834		return SCAN_ADDRESS_RANGE;
 835	if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
 836				cc->is_khugepaged))
 837		return SCAN_VMA_CHECK;
 838	/*
 839	 * Anon VMA expected, the address may be unmapped then
 840	 * remapped to file after khugepaged reaquired the mmap_lock.
 841	 *
 842	 * hugepage_vma_check may return true for qualified file
 843	 * vmas.
 844	 */
 845	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
 846		return SCAN_PAGE_ANON;
 847	return SCAN_SUCCEED;
 848}
 849
 850/*
 851 * See pmd_trans_unstable() for how the result may change out from
 852 * underneath us, even if we hold mmap_lock in read.
 853 */
 854static int find_pmd_or_thp_or_none(struct mm_struct *mm,
 855				   unsigned long address,
 856				   pmd_t **pmd)
 857{
 858	pmd_t pmde;
 859
 860	*pmd = mm_find_pmd(mm, address);
 861	if (!*pmd)
 862		return SCAN_PMD_NULL;
 863
 864	pmde = pmdp_get_lockless(*pmd);
 865
 866#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 867	/* See comments in pmd_none_or_trans_huge_or_clear_bad() */
 868	barrier();
 869#endif
 870	if (pmd_none(pmde))
 871		return SCAN_PMD_NONE;
 872	if (!pmd_present(pmde))
 873		return SCAN_PMD_NULL;
 874	if (pmd_trans_huge(pmde))
 875		return SCAN_PMD_MAPPED;
 876	if (pmd_devmap(pmde))
 877		return SCAN_PMD_NULL;
 878	if (pmd_bad(pmde))
 879		return SCAN_PMD_NULL;
 880	return SCAN_SUCCEED;
 881}
 882
 883static int check_pmd_still_valid(struct mm_struct *mm,
 884				 unsigned long address,
 885				 pmd_t *pmd)
 886{
 887	pmd_t *new_pmd;
 888	int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
 889
 890	if (result != SCAN_SUCCEED)
 891		return result;
 892	if (new_pmd != pmd)
 893		return SCAN_FAIL;
 894	return SCAN_SUCCEED;
 895}
 896
 897/*
 898 * Bring missing pages in from swap, to complete THP collapse.
 899 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
 900 *
 901 * Called and returns without pte mapped or spinlocks held.
 902 * Note that if false is returned, mmap_lock will be released.
 903 */
 904
 905static int __collapse_huge_page_swapin(struct mm_struct *mm,
 906				       struct vm_area_struct *vma,
 907				       unsigned long haddr, pmd_t *pmd,
 908				       int referenced)
 909{
 910	int swapped_in = 0;
 911	vm_fault_t ret = 0;
 912	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
 913
 914	for (address = haddr; address < end; address += PAGE_SIZE) {
 915		struct vm_fault vmf = {
 916			.vma = vma,
 917			.address = address,
 918			.pgoff = linear_page_index(vma, haddr),
 919			.flags = FAULT_FLAG_ALLOW_RETRY,
 920			.pmd = pmd,
 921		};
 922
 923		vmf.pte = pte_offset_map(pmd, address);
 
 
 
 
 
 
 
 924		vmf.orig_pte = *vmf.pte;
 925		if (!is_swap_pte(vmf.orig_pte)) {
 926			pte_unmap(vmf.pte);
 927			continue;
 928		}
 929		ret = do_swap_page(&vmf);
 930
 931		/*
 932		 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
 933		 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
 934		 * we do not retry here and swap entry will remain in pagetable
 935		 * resulting in later failure.
 936		 */
 937		if (ret & VM_FAULT_RETRY) {
 938			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 939			/* Likely, but not guaranteed, that page lock failed */
 940			return SCAN_PAGE_LOCK;
 
 
 
 
 
 
 
 
 941		}
 942		if (ret & VM_FAULT_ERROR) {
 943			mmap_read_unlock(mm);
 944			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 945			return SCAN_FAIL;
 946		}
 947		swapped_in++;
 
 948	}
 949
 950	/* Drain LRU add pagevec to remove extra pin on the swapped in pages */
 951	if (swapped_in)
 952		lru_add_drain();
 953
 954	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
 955	return SCAN_SUCCEED;
 956}
 957
 958static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
 959			      struct collapse_control *cc)
 960{
 961	gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
 962		     GFP_TRANSHUGE);
 963	int node = hpage_collapse_find_target_node(cc);
 964
 965	if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
 966		return SCAN_ALLOC_HUGE_PAGE_FAIL;
 967	if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
 968		return SCAN_CGROUP_CHARGE_FAIL;
 969	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
 970	return SCAN_SUCCEED;
 971}
 972
 973static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 974			      int referenced, int unmapped,
 975			      struct collapse_control *cc)
 
 976{
 977	LIST_HEAD(compound_pagelist);
 978	pmd_t *pmd, _pmd;
 979	pte_t *pte;
 980	pgtable_t pgtable;
 981	struct page *hpage;
 982	spinlock_t *pmd_ptl, *pte_ptl;
 983	int result = SCAN_FAIL;
 
 984	struct vm_area_struct *vma;
 985	struct mmu_notifier_range range;
 
 986
 987	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 988
 
 
 
 989	/*
 990	 * Before allocating the hugepage, release the mmap_lock read lock.
 991	 * The allocation can take potentially a long time if it involves
 992	 * sync compaction, and we do not need to hold the mmap_lock during
 993	 * that. We will recheck the vma after taking it again in write mode.
 994	 */
 995	mmap_read_unlock(mm);
 
 
 
 
 
 996
 997	result = alloc_charge_hpage(&hpage, mm, cc);
 998	if (result != SCAN_SUCCEED)
 999		goto out_nolock;
 
1000
1001	mmap_read_lock(mm);
1002	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1003	if (result != SCAN_SUCCEED) {
1004		mmap_read_unlock(mm);
 
1005		goto out_nolock;
1006	}
1007
1008	result = find_pmd_or_thp_or_none(mm, address, &pmd);
1009	if (result != SCAN_SUCCEED) {
1010		mmap_read_unlock(mm);
 
 
1011		goto out_nolock;
1012	}
1013
1014	if (unmapped) {
1015		/*
1016		 * __collapse_huge_page_swapin will return with mmap_lock
1017		 * released when it fails. So we jump out_nolock directly in
1018		 * that case.  Continuing to collapse causes inconsistency.
1019		 */
1020		result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1021						     referenced);
1022		if (result != SCAN_SUCCEED)
1023			goto out_nolock;
1024	}
1025
1026	mmap_read_unlock(mm);
1027	/*
1028	 * Prevent all access to pagetables with the exception of
1029	 * gup_fast later handled by the ptep_clear_flush and the VM
1030	 * handled by the anon_vma lock + PG_lock.
1031	 */
1032	mmap_write_lock(mm);
1033	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1034	if (result != SCAN_SUCCEED)
1035		goto out_up_write;
 
 
 
1036	/* check if the pmd is still valid */
1037	result = check_pmd_still_valid(mm, address, pmd);
1038	if (result != SCAN_SUCCEED)
1039		goto out_up_write;
1040
1041	anon_vma_lock_write(vma->anon_vma);
1042
1043	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1044				address, address + HPAGE_PMD_SIZE);
1045	mmu_notifier_invalidate_range_start(&range);
1046
1047	pte = pte_offset_map(pmd, address);
1048	pte_ptl = pte_lockptr(mm, pmd);
1049
1050	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1051	/*
1052	 * This removes any huge TLB entry from the CPU so we won't allow
1053	 * huge and small TLB entries for the same virtual address to
1054	 * avoid the risk of CPU bugs in that area.
1055	 *
1056	 * Parallel fast GUP is fine since fast GUP will back off when
1057	 * it detects PMD is changed.
1058	 */
1059	_pmd = pmdp_collapse_flush(vma, address, pmd);
1060	spin_unlock(pmd_ptl);
1061	mmu_notifier_invalidate_range_end(&range);
1062	tlb_remove_table_sync_one();
1063
1064	spin_lock(pte_ptl);
1065	result =  __collapse_huge_page_isolate(vma, address, pte, cc,
1066					       &compound_pagelist);
1067	spin_unlock(pte_ptl);
1068
1069	if (unlikely(result != SCAN_SUCCEED)) {
1070		pte_unmap(pte);
1071		spin_lock(pmd_ptl);
1072		BUG_ON(!pmd_none(*pmd));
1073		/*
1074		 * We can only use set_pmd_at when establishing
1075		 * hugepmds and never for establishing regular pmds that
1076		 * points to regular pagetables. Use pmd_populate for that
1077		 */
1078		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1079		spin_unlock(pmd_ptl);
1080		anon_vma_unlock_write(vma->anon_vma);
1081		goto out_up_write;
 
1082	}
1083
1084	/*
1085	 * All pages are isolated and locked so anon_vma rmap
1086	 * can't run anymore.
1087	 */
1088	anon_vma_unlock_write(vma->anon_vma);
1089
1090	__collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
1091				  &compound_pagelist);
1092	pte_unmap(pte);
1093	/*
1094	 * spin_lock() below is not the equivalent of smp_wmb(), but
1095	 * the smp_wmb() inside __SetPageUptodate() can be reused to
1096	 * avoid the copy_huge_page writes to become visible after
1097	 * the set_pmd_at() write.
1098	 */
1099	__SetPageUptodate(hpage);
1100	pgtable = pmd_pgtable(_pmd);
1101
1102	_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1103	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1104
 
 
 
 
 
 
 
1105	spin_lock(pmd_ptl);
1106	BUG_ON(!pmd_none(*pmd));
1107	page_add_new_anon_rmap(hpage, vma, address);
1108	lru_cache_add_inactive_or_unevictable(hpage, vma);
 
 
1109	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1110	set_pmd_at(mm, address, pmd, _pmd);
1111	update_mmu_cache_pmd(vma, address, pmd);
1112	spin_unlock(pmd_ptl);
1113
1114	hpage = NULL;
1115
 
1116	result = SCAN_SUCCEED;
1117out_up_write:
1118	mmap_write_unlock(mm);
1119out_nolock:
1120	if (hpage) {
1121		mem_cgroup_uncharge(page_folio(hpage));
1122		put_page(hpage);
1123	}
1124	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1125	return result;
1126}
1127
1128static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1129				   struct vm_area_struct *vma,
1130				   unsigned long address, bool *mmap_locked,
1131				   struct collapse_control *cc)
1132{
1133	pmd_t *pmd;
1134	pte_t *pte, *_pte;
1135	int result = SCAN_FAIL, referenced = 0;
1136	int none_or_zero = 0, shared = 0;
1137	struct page *page = NULL;
1138	unsigned long _address;
1139	spinlock_t *ptl;
1140	int node = NUMA_NO_NODE, unmapped = 0;
1141	bool writable = false;
1142
1143	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1144
1145	result = find_pmd_or_thp_or_none(mm, address, &pmd);
1146	if (result != SCAN_SUCCEED)
 
1147		goto out;
 
1148
1149	memset(cc->node_load, 0, sizeof(cc->node_load));
1150	nodes_clear(cc->alloc_nmask);
1151	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1152	for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1153	     _pte++, _address += PAGE_SIZE) {
1154		pte_t pteval = *_pte;
1155		if (is_swap_pte(pteval)) {
1156			++unmapped;
1157			if (!cc->is_khugepaged ||
1158			    unmapped <= khugepaged_max_ptes_swap) {
1159				/*
1160				 * Always be strict with uffd-wp
1161				 * enabled swap entries.  Please see
1162				 * comment below for pte_uffd_wp().
1163				 */
1164				if (pte_swp_uffd_wp(pteval)) {
1165					result = SCAN_PTE_UFFD_WP;
1166					goto out_unmap;
1167				}
1168				continue;
1169			} else {
1170				result = SCAN_EXCEED_SWAP_PTE;
1171				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1172				goto out_unmap;
1173			}
1174		}
1175		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1176			++none_or_zero;
1177			if (!userfaultfd_armed(vma) &&
1178			    (!cc->is_khugepaged ||
1179			     none_or_zero <= khugepaged_max_ptes_none)) {
1180				continue;
1181			} else {
1182				result = SCAN_EXCEED_NONE_PTE;
1183				count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1184				goto out_unmap;
1185			}
1186		}
1187		if (pte_uffd_wp(pteval)) {
1188			/*
1189			 * Don't collapse the page if any of the small
1190			 * PTEs are armed with uffd write protection.
1191			 * Here we can also mark the new huge pmd as
1192			 * write protected if any of the small ones is
1193			 * marked but that could bring unknown
1194			 * userfault messages that falls outside of
1195			 * the registered range.  So, just be simple.
1196			 */
1197			result = SCAN_PTE_UFFD_WP;
1198			goto out_unmap;
1199		}
1200		if (pte_write(pteval))
1201			writable = true;
1202
1203		page = vm_normal_page(vma, _address, pteval);
1204		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1205			result = SCAN_PAGE_NULL;
1206			goto out_unmap;
1207		}
1208
1209		if (page_mapcount(page) > 1) {
1210			++shared;
1211			if (cc->is_khugepaged &&
1212			    shared > khugepaged_max_ptes_shared) {
1213				result = SCAN_EXCEED_SHARED_PTE;
1214				count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1215				goto out_unmap;
1216			}
1217		}
1218
1219		page = compound_head(page);
1220
1221		/*
1222		 * Record which node the original page is from and save this
1223		 * information to cc->node_load[].
1224		 * Khugepaged will allocate hugepage from the node has the max
1225		 * hit record.
1226		 */
1227		node = page_to_nid(page);
1228		if (hpage_collapse_scan_abort(node, cc)) {
1229			result = SCAN_SCAN_ABORT;
1230			goto out_unmap;
1231		}
1232		cc->node_load[node]++;
1233		if (!PageLRU(page)) {
1234			result = SCAN_PAGE_LRU;
1235			goto out_unmap;
1236		}
1237		if (PageLocked(page)) {
1238			result = SCAN_PAGE_LOCK;
1239			goto out_unmap;
1240		}
1241		if (!PageAnon(page)) {
1242			result = SCAN_PAGE_ANON;
1243			goto out_unmap;
1244		}
1245
1246		/*
1247		 * Check if the page has any GUP (or other external) pins.
1248		 *
1249		 * Here the check may be racy:
1250		 * it may see total_mapcount > refcount in some cases?
1251		 * But such case is ephemeral we could always retry collapse
1252		 * later.  However it may report false positive if the page
1253		 * has excessive GUP pins (i.e. 512).  Anyway the same check
1254		 * will be done again later the risk seems low.
1255		 */
1256		if (!is_refcount_suitable(page)) {
1257			result = SCAN_PAGE_COUNT;
1258			goto out_unmap;
1259		}
1260
1261		/*
1262		 * If collapse was initiated by khugepaged, check that there is
1263		 * enough young pte to justify collapsing the page
1264		 */
1265		if (cc->is_khugepaged &&
1266		    (pte_young(pteval) || page_is_young(page) ||
1267		     PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1268								     address)))
1269			referenced++;
1270	}
1271	if (!writable) {
1272		result = SCAN_PAGE_RO;
1273	} else if (cc->is_khugepaged &&
1274		   (!referenced ||
1275		    (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1276		result = SCAN_LACK_REFERENCED_PAGE;
 
1277	} else {
1278		result = SCAN_SUCCEED;
1279	}
1280out_unmap:
1281	pte_unmap_unlock(pte, ptl);
1282	if (result == SCAN_SUCCEED) {
1283		result = collapse_huge_page(mm, address, referenced,
1284					    unmapped, cc);
1285		/* collapse_huge_page will return with the mmap_lock released */
1286		*mmap_locked = false;
1287	}
1288out:
1289	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1290				     none_or_zero, result, unmapped);
1291	return result;
1292}
1293
1294static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1295{
1296	struct mm_slot *slot = &mm_slot->slot;
1297	struct mm_struct *mm = slot->mm;
1298
1299	lockdep_assert_held(&khugepaged_mm_lock);
1300
1301	if (hpage_collapse_test_exit(mm)) {
1302		/* free mm_slot */
1303		hash_del(&slot->hash);
1304		list_del(&slot->mm_node);
1305
1306		/*
1307		 * Not strictly needed because the mm exited already.
1308		 *
1309		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1310		 */
1311
1312		/* khugepaged_mm_lock actually not necessary for the below */
1313		mm_slot_free(mm_slot_cache, mm_slot);
1314		mmdrop(mm);
1315	}
1316}
1317
1318#ifdef CONFIG_SHMEM
1319/*
1320 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1321 * khugepaged should try to collapse the page table.
1322 *
1323 * Note that following race exists:
1324 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1325 *     emptying the A's ->pte_mapped_thp[] array.
1326 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1327 *     retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1328 *     (at virtual address X) and adds an entry (for X) into mm_struct A's
1329 *     ->pte-mapped_thp[] array.
1330 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1331 *     sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1332 *     (for X) into mm_struct A's ->pte-mapped_thp[] array.
1333 * Thus, it's possible the same address is added multiple times for the same
1334 * mm_struct.  Should this happen, we'll simply attempt
1335 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1336 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1337 * attempts will return quickly (without grabbing any additional locks) when
1338 * a huge pmd is found in find_pmd_or_thp_or_none().  Since this is a cheap
1339 * check, and since this is a rare occurrence, the cost of preventing this
1340 * "multiple-add" is thought to be more expensive than just handling it, should
1341 * it occur.
1342 */
1343static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1344					  unsigned long addr)
1345{
1346	struct khugepaged_mm_slot *mm_slot;
1347	struct mm_slot *slot;
1348	bool ret = false;
1349
1350	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1351
1352	spin_lock(&khugepaged_mm_lock);
1353	slot = mm_slot_lookup(mm_slots_hash, mm);
1354	mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
1355	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
1356		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1357		ret = true;
1358	}
1359	spin_unlock(&khugepaged_mm_lock);
1360	return ret;
1361}
1362
1363/* hpage must be locked, and mmap_lock must be held in write */
1364static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1365			pmd_t *pmdp, struct page *hpage)
1366{
1367	struct vm_fault vmf = {
1368		.vma = vma,
1369		.address = addr,
1370		.flags = 0,
1371		.pmd = pmdp,
1372	};
1373
1374	VM_BUG_ON(!PageTransHuge(hpage));
1375	mmap_assert_write_locked(vma->vm_mm);
1376
1377	if (do_set_pmd(&vmf, hpage))
1378		return SCAN_FAIL;
1379
1380	get_page(hpage);
1381	return SCAN_SUCCEED;
1382}
1383
1384/*
1385 * A note about locking:
1386 * Trying to take the page table spinlocks would be useless here because those
1387 * are only used to synchronize:
1388 *
1389 *  - modifying terminal entries (ones that point to a data page, not to another
1390 *    page table)
1391 *  - installing *new* non-terminal entries
1392 *
1393 * Instead, we need roughly the same kind of protection as free_pgtables() or
1394 * mm_take_all_locks() (but only for a single VMA):
1395 * The mmap lock together with this VMA's rmap locks covers all paths towards
1396 * the page table entries we're messing with here, except for hardware page
1397 * table walks and lockless_pages_from_mm().
1398 */
1399static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1400				  unsigned long addr, pmd_t *pmdp)
1401{
1402	pmd_t pmd;
1403	struct mmu_notifier_range range;
1404
1405	mmap_assert_write_locked(mm);
1406	if (vma->vm_file)
1407		lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1408	/*
1409	 * All anon_vmas attached to the VMA have the same root and are
1410	 * therefore locked by the same lock.
1411	 */
1412	if (vma->anon_vma)
1413		lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1414
1415	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, addr,
1416				addr + HPAGE_PMD_SIZE);
1417	mmu_notifier_invalidate_range_start(&range);
1418	pmd = pmdp_collapse_flush(vma, addr, pmdp);
1419	tlb_remove_table_sync_one();
1420	mmu_notifier_invalidate_range_end(&range);
1421	mm_dec_nr_ptes(mm);
1422	page_table_check_pte_clear_range(mm, addr, pmd);
1423	pte_free(mm, pmd_pgtable(pmd));
1424}
1425
1426/**
1427 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1428 * address haddr.
1429 *
1430 * @mm: process address space where collapse happens
1431 * @addr: THP collapse address
1432 * @install_pmd: If a huge PMD should be installed
1433 *
1434 * This function checks whether all the PTEs in the PMD are pointing to the
1435 * right THP. If so, retract the page table so the THP can refault in with
1436 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1437 */
1438int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1439			    bool install_pmd)
1440{
1441	unsigned long haddr = addr & HPAGE_PMD_MASK;
1442	struct vm_area_struct *vma = vma_lookup(mm, haddr);
1443	struct page *hpage;
1444	pte_t *start_pte, *pte;
1445	pmd_t *pmd;
1446	spinlock_t *ptl;
1447	int count = 0, result = SCAN_FAIL;
1448	int i;
1449
1450	mmap_assert_write_locked(mm);
1451
1452	/* Fast check before locking page if already PMD-mapped */
1453	result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1454	if (result == SCAN_PMD_MAPPED)
1455		return result;
1456
1457	if (!vma || !vma->vm_file ||
1458	    !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1459		return SCAN_VMA_CHECK;
1460
1461	/*
1462	 * If we are here, we've succeeded in replacing all the native pages
1463	 * in the page cache with a single hugepage. If a mm were to fault-in
1464	 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1465	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1466	 * analogously elide sysfs THP settings here.
1467	 */
1468	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
1469		return SCAN_VMA_CHECK;
1470
1471	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1472	if (userfaultfd_wp(vma))
1473		return SCAN_PTE_UFFD_WP;
1474
1475	hpage = find_lock_page(vma->vm_file->f_mapping,
1476			       linear_page_index(vma, haddr));
1477	if (!hpage)
1478		return SCAN_PAGE_NULL;
1479
1480	if (!PageHead(hpage)) {
1481		result = SCAN_FAIL;
1482		goto drop_hpage;
1483	}
1484
1485	if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1486		result = SCAN_PAGE_COMPOUND;
1487		goto drop_hpage;
1488	}
1489
1490	switch (result) {
1491	case SCAN_SUCCEED:
1492		break;
1493	case SCAN_PMD_NONE:
1494		/*
1495		 * In MADV_COLLAPSE path, possible race with khugepaged where
1496		 * all pte entries have been removed and pmd cleared.  If so,
1497		 * skip all the pte checks and just update the pmd mapping.
1498		 */
1499		goto maybe_install_pmd;
1500	default:
1501		goto drop_hpage;
1502	}
1503
1504	/*
1505	 * We need to lock the mapping so that from here on, only GUP-fast and
1506	 * hardware page walks can access the parts of the page tables that
1507	 * we're operating on.
1508	 * See collapse_and_free_pmd().
1509	 */
1510	i_mmap_lock_write(vma->vm_file->f_mapping);
1511
1512	/*
1513	 * This spinlock should be unnecessary: Nobody else should be accessing
1514	 * the page tables under spinlock protection here, only
1515	 * lockless_pages_from_mm() and the hardware page walker can access page
1516	 * tables while all the high-level locks are held in write mode.
1517	 */
1518	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1519	result = SCAN_FAIL;
1520
1521	/* step 1: check all mapped PTEs are to the right huge page */
1522	for (i = 0, addr = haddr, pte = start_pte;
1523	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1524		struct page *page;
1525
1526		/* empty pte, skip */
1527		if (pte_none(*pte))
1528			continue;
1529
1530		/* page swapped out, abort */
1531		if (!pte_present(*pte)) {
1532			result = SCAN_PTE_NON_PRESENT;
1533			goto abort;
1534		}
1535
1536		page = vm_normal_page(vma, addr, *pte);
1537		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1538			page = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1539		/*
1540		 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1541		 * page table, but the new page will not be a subpage of hpage.
 
 
 
1542		 */
1543		if (hpage + i != page)
1544			goto abort;
1545		count++;
1546	}
1547
1548	/* step 2: adjust rmap */
1549	for (i = 0, addr = haddr, pte = start_pte;
1550	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1551		struct page *page;
1552
1553		if (pte_none(*pte))
1554			continue;
1555		page = vm_normal_page(vma, addr, *pte);
1556		if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1557			goto abort;
1558		page_remove_rmap(page, vma, false);
1559	}
1560
1561	pte_unmap_unlock(start_pte, ptl);
1562
1563	/* step 3: set proper refcount and mm_counters. */
1564	if (count) {
1565		page_ref_sub(hpage, count);
1566		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1567	}
1568
1569	/* step 4: remove pte entries */
1570	/* we make no change to anon, but protect concurrent anon page lookup */
1571	if (vma->anon_vma)
1572		anon_vma_lock_write(vma->anon_vma);
1573
1574	collapse_and_free_pmd(mm, vma, haddr, pmd);
1575
1576	if (vma->anon_vma)
1577		anon_vma_unlock_write(vma->anon_vma);
1578	i_mmap_unlock_write(vma->vm_file->f_mapping);
1579
1580maybe_install_pmd:
1581	/* step 5: install pmd entry */
1582	result = install_pmd
1583			? set_huge_pmd(vma, haddr, pmd, hpage)
1584			: SCAN_SUCCEED;
1585
1586drop_hpage:
1587	unlock_page(hpage);
1588	put_page(hpage);
1589	return result;
1590
1591abort:
1592	pte_unmap_unlock(start_pte, ptl);
1593	i_mmap_unlock_write(vma->vm_file->f_mapping);
1594	goto drop_hpage;
1595}
1596
1597static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
1598{
1599	struct mm_slot *slot = &mm_slot->slot;
1600	struct mm_struct *mm = slot->mm;
1601	int i;
1602
1603	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1604		return;
1605
1606	if (!mmap_write_trylock(mm))
1607		return;
1608
1609	if (unlikely(hpage_collapse_test_exit(mm)))
1610		goto out;
1611
1612	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1613		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
1614
1615out:
1616	mm_slot->nr_pte_mapped_thp = 0;
1617	mmap_write_unlock(mm);
 
1618}
1619
1620static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1621			       struct mm_struct *target_mm,
1622			       unsigned long target_addr, struct page *hpage,
1623			       struct collapse_control *cc)
1624{
1625	struct vm_area_struct *vma;
1626	int target_result = SCAN_FAIL;
 
1627
1628	i_mmap_lock_write(mapping);
1629	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1630		int result = SCAN_FAIL;
1631		struct mm_struct *mm = NULL;
1632		unsigned long addr = 0;
1633		pmd_t *pmd;
1634		bool is_target = false;
1635
1636		/*
1637		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1638		 * got written to. These VMAs are likely not worth investing
1639		 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1640		 * later.
1641		 *
1642		 * Note that vma->anon_vma check is racy: it can be set up after
1643		 * the check but before we took mmap_lock by the fault path.
1644		 * But page lock would prevent establishing any new ptes of the
1645		 * page, so we are safe.
1646		 *
1647		 * An alternative would be drop the check, but check that page
1648		 * table is clear before calling pmdp_collapse_flush() under
1649		 * ptl. It has higher chance to recover THP for the VMA, but
1650		 * has higher cost too. It would also probably require locking
1651		 * the anon_vma.
1652		 */
1653		if (READ_ONCE(vma->anon_vma)) {
1654			result = SCAN_PAGE_ANON;
1655			goto next;
1656		}
1657		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1658		if (addr & ~HPAGE_PMD_MASK ||
1659		    vma->vm_end < addr + HPAGE_PMD_SIZE) {
1660			result = SCAN_VMA_CHECK;
1661			goto next;
1662		}
1663		mm = vma->vm_mm;
1664		is_target = mm == target_mm && addr == target_addr;
1665		result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1666		if (result != SCAN_SUCCEED)
1667			goto next;
1668		/*
1669		 * We need exclusive mmap_lock to retract page table.
1670		 *
1671		 * We use trylock due to lock inversion: we need to acquire
1672		 * mmap_lock while holding page lock. Fault path does it in
1673		 * reverse order. Trylock is a way to avoid deadlock.
1674		 *
1675		 * Also, it's not MADV_COLLAPSE's job to collapse other
1676		 * mappings - let khugepaged take care of them later.
1677		 */
1678		result = SCAN_PTE_MAPPED_HUGEPAGE;
1679		if ((cc->is_khugepaged || is_target) &&
1680		    mmap_write_trylock(mm)) {
1681			/*
1682			 * Re-check whether we have an ->anon_vma, because
1683			 * collapse_and_free_pmd() requires that either no
1684			 * ->anon_vma exists or the anon_vma is locked.
1685			 * We already checked ->anon_vma above, but that check
1686			 * is racy because ->anon_vma can be populated under the
1687			 * mmap lock in read mode.
1688			 */
1689			if (vma->anon_vma) {
1690				result = SCAN_PAGE_ANON;
1691				goto unlock_next;
1692			}
1693			/*
1694			 * When a vma is registered with uffd-wp, we can't
1695			 * recycle the pmd pgtable because there can be pte
1696			 * markers installed.  Skip it only, so the rest mm/vma
1697			 * can still have the same file mapped hugely, however
1698			 * it'll always mapped in small page size for uffd-wp
1699			 * registered ranges.
1700			 */
1701			if (hpage_collapse_test_exit(mm)) {
1702				result = SCAN_ANY_PROCESS;
1703				goto unlock_next;
1704			}
1705			if (userfaultfd_wp(vma)) {
1706				result = SCAN_PTE_UFFD_WP;
1707				goto unlock_next;
1708			}
1709			collapse_and_free_pmd(mm, vma, addr, pmd);
1710			if (!cc->is_khugepaged && is_target)
1711				result = set_huge_pmd(vma, addr, pmd, hpage);
1712			else
1713				result = SCAN_SUCCEED;
1714
1715unlock_next:
1716			mmap_write_unlock(mm);
1717			goto next;
1718		}
1719		/*
1720		 * Calling context will handle target mm/addr. Otherwise, let
1721		 * khugepaged try again later.
1722		 */
1723		if (!is_target) {
1724			khugepaged_add_pte_mapped_thp(mm, addr);
1725			continue;
1726		}
1727next:
1728		if (is_target)
1729			target_result = result;
1730	}
1731	i_mmap_unlock_write(mapping);
1732	return target_result;
1733}
1734
1735/**
1736 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1737 *
1738 * @mm: process address space where collapse happens
1739 * @addr: virtual collapse start address
1740 * @file: file that collapse on
1741 * @start: collapse start address
1742 * @cc: collapse context and scratchpad
1743 *
1744 * Basic scheme is simple, details are more complex:
1745 *  - allocate and lock a new huge page;
1746 *  - scan page cache replacing old pages with the new one
1747 *    + swap/gup in pages if necessary;
1748 *    + fill in gaps;
1749 *    + keep old pages around in case rollback is required;
1750 *  - if replacing succeeds:
1751 *    + copy data over;
1752 *    + free old pages;
1753 *    + unlock huge page;
1754 *  - if replacing failed;
1755 *    + put all pages back and unfreeze them;
1756 *    + restore gaps in the page cache;
1757 *    + unlock and free huge page;
1758 */
1759static int collapse_file(struct mm_struct *mm, unsigned long addr,
1760			 struct file *file, pgoff_t start,
1761			 struct collapse_control *cc)
1762{
1763	struct address_space *mapping = file->f_mapping;
1764	struct page *hpage;
1765	pgoff_t index = 0, end = start + HPAGE_PMD_NR;
 
 
1766	LIST_HEAD(pagelist);
1767	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1768	int nr_none = 0, result = SCAN_SUCCEED;
1769	bool is_shmem = shmem_file(file);
1770	int nr = 0;
1771
1772	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1773	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1774
1775	result = alloc_charge_hpage(&hpage, mm, cc);
1776	if (result != SCAN_SUCCEED)
 
 
 
 
1777		goto out;
 
1778
1779	/*
1780	 * Ensure we have slots for all the pages in the range.  This is
1781	 * almost certainly a no-op because most of the pages must be present
1782	 */
 
 
1783	do {
1784		xas_lock_irq(&xas);
1785		xas_create_range(&xas);
1786		if (!xas_error(&xas))
1787			break;
1788		xas_unlock_irq(&xas);
1789		if (!xas_nomem(&xas, GFP_KERNEL)) {
 
1790			result = SCAN_FAIL;
1791			goto out;
1792		}
1793	} while (1);
1794
1795	__SetPageLocked(hpage);
1796	if (is_shmem)
1797		__SetPageSwapBacked(hpage);
1798	hpage->index = start;
1799	hpage->mapping = mapping;
1800
1801	/*
1802	 * At this point the hpage is locked and not up-to-date.
1803	 * It's safe to insert it into the page cache, because nobody would
1804	 * be able to map it or use it in another way until we unlock it.
1805	 */
1806
1807	xas_set(&xas, start);
1808	for (index = start; index < end; index++) {
1809		struct page *page = xas_next(&xas);
1810		struct folio *folio;
1811
1812		VM_BUG_ON(index != xas.xa_index);
1813		if (is_shmem) {
1814			if (!page) {
1815				/*
1816				 * Stop if extent has been truncated or
1817				 * hole-punched, and is now completely
1818				 * empty.
1819				 */
1820				if (index == start) {
1821					if (!xas_next_entry(&xas, end - 1)) {
1822						result = SCAN_TRUNCATED;
1823						goto xa_locked;
1824					}
1825					xas_set(&xas, index);
1826				}
1827				if (!shmem_charge(mapping->host, 1)) {
1828					result = SCAN_FAIL;
1829					goto xa_locked;
1830				}
1831				xas_store(&xas, hpage);
1832				nr_none++;
1833				continue;
1834			}
1835
1836			if (xa_is_value(page) || !PageUptodate(page)) {
1837				xas_unlock_irq(&xas);
1838				/* swap in or instantiate fallocated page */
1839				if (shmem_get_folio(mapping->host, index,
1840						&folio, SGP_NOALLOC)) {
1841					result = SCAN_FAIL;
1842					goto xa_unlocked;
1843				}
1844				page = folio_file_page(folio, index);
1845			} else if (trylock_page(page)) {
1846				get_page(page);
1847				xas_unlock_irq(&xas);
1848			} else {
1849				result = SCAN_PAGE_LOCK;
1850				goto xa_locked;
1851			}
1852		} else {	/* !is_shmem */
1853			if (!page || xa_is_value(page)) {
1854				xas_unlock_irq(&xas);
1855				page_cache_sync_readahead(mapping, &file->f_ra,
1856							  file, index,
1857							  end - index);
1858				/* drain pagevecs to help isolate_lru_page() */
1859				lru_add_drain();
1860				page = find_lock_page(mapping, index);
1861				if (unlikely(page == NULL)) {
1862					result = SCAN_FAIL;
1863					goto xa_unlocked;
1864				}
1865			} else if (PageDirty(page)) {
1866				/*
1867				 * khugepaged only works on read-only fd,
1868				 * so this page is dirty because it hasn't
1869				 * been flushed since first write. There
1870				 * won't be new dirty pages.
1871				 *
1872				 * Trigger async flush here and hope the
1873				 * writeback is done when khugepaged
1874				 * revisits this page.
1875				 *
1876				 * This is a one-off situation. We are not
1877				 * forcing writeback in loop.
1878				 */
1879				xas_unlock_irq(&xas);
1880				filemap_flush(mapping);
1881				result = SCAN_FAIL;
1882				goto xa_unlocked;
1883			} else if (PageWriteback(page)) {
1884				xas_unlock_irq(&xas);
1885				result = SCAN_FAIL;
1886				goto xa_unlocked;
1887			} else if (trylock_page(page)) {
1888				get_page(page);
1889				xas_unlock_irq(&xas);
1890			} else {
1891				result = SCAN_PAGE_LOCK;
1892				goto xa_locked;
1893			}
1894		}
1895
1896		/*
1897		 * The page must be locked, so we can drop the i_pages lock
1898		 * without racing with truncate.
1899		 */
1900		VM_BUG_ON_PAGE(!PageLocked(page), page);
1901
1902		/* make sure the page is up to date */
1903		if (unlikely(!PageUptodate(page))) {
1904			result = SCAN_FAIL;
1905			goto out_unlock;
1906		}
1907
1908		/*
1909		 * If file was truncated then extended, or hole-punched, before
1910		 * we locked the first page, then a THP might be there already.
1911		 * This will be discovered on the first iteration.
1912		 */
1913		if (PageTransCompound(page)) {
1914			struct page *head = compound_head(page);
1915
1916			result = compound_order(head) == HPAGE_PMD_ORDER &&
1917					head->index == start
1918					/* Maybe PMD-mapped */
1919					? SCAN_PTE_MAPPED_HUGEPAGE
1920					: SCAN_PAGE_COMPOUND;
1921			goto out_unlock;
1922		}
1923
1924		folio = page_folio(page);
1925
1926		if (folio_mapping(folio) != mapping) {
1927			result = SCAN_TRUNCATED;
1928			goto out_unlock;
1929		}
1930
1931		if (!is_shmem && (folio_test_dirty(folio) ||
1932				  folio_test_writeback(folio))) {
1933			/*
1934			 * khugepaged only works on read-only fd, so this
1935			 * page is dirty because it hasn't been flushed
1936			 * since first write.
1937			 */
1938			result = SCAN_FAIL;
1939			goto out_unlock;
1940		}
1941
1942		if (folio_isolate_lru(folio)) {
1943			result = SCAN_DEL_PAGE_LRU;
1944			goto out_unlock;
1945		}
1946
1947		if (folio_has_private(folio) &&
1948		    !filemap_release_folio(folio, GFP_KERNEL)) {
1949			result = SCAN_PAGE_HAS_PRIVATE;
1950			folio_putback_lru(folio);
1951			goto out_unlock;
1952		}
1953
1954		if (folio_mapped(folio))
1955			try_to_unmap(folio,
1956					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1957
1958		xas_lock_irq(&xas);
1959		xas_set(&xas, index);
1960
1961		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
 
1962
1963		/*
1964		 * The page is expected to have page_count() == 3:
1965		 *  - we hold a pin on it;
1966		 *  - one reference from page cache;
1967		 *  - one from isolate_lru_page;
1968		 */
1969		if (!page_ref_freeze(page, 3)) {
1970			result = SCAN_PAGE_COUNT;
1971			xas_unlock_irq(&xas);
1972			putback_lru_page(page);
1973			goto out_unlock;
1974		}
1975
1976		/*
1977		 * Add the page to the list to be able to undo the collapse if
1978		 * something go wrong.
1979		 */
1980		list_add_tail(&page->lru, &pagelist);
1981
1982		/* Finally, replace with the new page. */
1983		xas_store(&xas, hpage);
1984		continue;
1985out_unlock:
1986		unlock_page(page);
1987		put_page(page);
1988		goto xa_unlocked;
1989	}
1990	nr = thp_nr_pages(hpage);
1991
1992	if (is_shmem)
1993		__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
1994	else {
1995		__mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
1996		filemap_nr_thps_inc(mapping);
1997		/*
1998		 * Paired with smp_mb() in do_dentry_open() to ensure
1999		 * i_writecount is up to date and the update to nr_thps is
2000		 * visible. Ensures the page cache will be truncated if the
2001		 * file is opened writable.
2002		 */
2003		smp_mb();
2004		if (inode_is_open_for_write(mapping->host)) {
2005			result = SCAN_FAIL;
2006			__mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
2007			filemap_nr_thps_dec(mapping);
2008			goto xa_locked;
2009		}
2010	}
2011
2012	if (nr_none) {
2013		__mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2014		/* nr_none is always 0 for non-shmem. */
2015		__mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
 
 
 
2016	}
2017
2018	/* Join all the small entries into a single multi-index entry */
2019	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2020	xas_store(&xas, hpage);
2021xa_locked:
2022	xas_unlock_irq(&xas);
2023xa_unlocked:
2024
2025	/*
2026	 * If collapse is successful, flush must be done now before copying.
2027	 * If collapse is unsuccessful, does flush actually need to be done?
2028	 * Do it anyway, to clear the state.
2029	 */
2030	try_to_unmap_flush();
2031
2032	if (result == SCAN_SUCCEED) {
2033		struct page *page, *tmp;
2034		struct folio *folio;
2035
2036		/*
2037		 * Replacing old pages with new one has succeeded, now we
2038		 * need to copy the content and free the old pages.
2039		 */
2040		index = start;
2041		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2042			while (index < page->index) {
2043				clear_highpage(hpage + (index % HPAGE_PMD_NR));
2044				index++;
2045			}
2046			copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
2047				      page);
2048			list_del(&page->lru);
2049			page->mapping = NULL;
2050			page_ref_unfreeze(page, 1);
2051			ClearPageActive(page);
2052			ClearPageUnevictable(page);
2053			unlock_page(page);
2054			put_page(page);
2055			index++;
2056		}
2057		while (index < end) {
2058			clear_highpage(hpage + (index % HPAGE_PMD_NR));
2059			index++;
2060		}
2061
2062		folio = page_folio(hpage);
2063		folio_mark_uptodate(folio);
2064		folio_ref_add(folio, HPAGE_PMD_NR - 1);
2065
2066		if (is_shmem)
2067			folio_mark_dirty(folio);
2068		folio_add_lru(folio);
 
 
 
 
2069
2070		/*
2071		 * Remove pte page tables, so we can re-fault the page as huge.
2072		 */
2073		result = retract_page_tables(mapping, start, mm, addr, hpage,
2074					     cc);
2075		unlock_page(hpage);
2076		hpage = NULL;
2077	} else {
2078		struct page *page;
2079
2080		/* Something went wrong: roll back page cache changes */
2081		xas_lock_irq(&xas);
2082		if (nr_none) {
2083			mapping->nrpages -= nr_none;
 
2084			shmem_uncharge(mapping->host, nr_none);
2085		}
2086
2087		xas_set(&xas, start);
2088		xas_for_each(&xas, page, end - 1) {
2089			page = list_first_entry_or_null(&pagelist,
2090					struct page, lru);
2091			if (!page || xas.xa_index < page->index) {
2092				if (!nr_none)
2093					break;
2094				nr_none--;
2095				/* Put holes back where they were */
2096				xas_store(&xas, NULL);
2097				continue;
2098			}
2099
2100			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2101
2102			/* Unfreeze the page. */
2103			list_del(&page->lru);
2104			page_ref_unfreeze(page, 2);
2105			xas_store(&xas, page);
2106			xas_pause(&xas);
2107			xas_unlock_irq(&xas);
2108			unlock_page(page);
2109			putback_lru_page(page);
2110			xas_lock_irq(&xas);
2111		}
2112		VM_BUG_ON(nr_none);
2113		xas_unlock_irq(&xas);
2114
2115		hpage->mapping = NULL;
 
2116	}
2117
2118	if (hpage)
2119		unlock_page(hpage);
2120out:
2121	VM_BUG_ON(!list_empty(&pagelist));
2122	if (hpage) {
2123		mem_cgroup_uncharge(page_folio(hpage));
2124		put_page(hpage);
2125	}
2126
2127	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
2128	return result;
2129}
2130
2131static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2132				    struct file *file, pgoff_t start,
2133				    struct collapse_control *cc)
2134{
2135	struct page *page = NULL;
2136	struct address_space *mapping = file->f_mapping;
2137	XA_STATE(xas, &mapping->i_pages, start);
2138	int present, swap;
2139	int node = NUMA_NO_NODE;
2140	int result = SCAN_SUCCEED;
2141
2142	present = 0;
2143	swap = 0;
2144	memset(cc->node_load, 0, sizeof(cc->node_load));
2145	nodes_clear(cc->alloc_nmask);
2146	rcu_read_lock();
2147	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2148		if (xas_retry(&xas, page))
2149			continue;
2150
2151		if (xa_is_value(page)) {
2152			++swap;
2153			if (cc->is_khugepaged &&
2154			    swap > khugepaged_max_ptes_swap) {
2155				result = SCAN_EXCEED_SWAP_PTE;
2156				count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2157				break;
2158			}
2159			continue;
2160		}
2161
2162		/*
2163		 * TODO: khugepaged should compact smaller compound pages
2164		 * into a PMD sized page
2165		 */
2166		if (PageTransCompound(page)) {
2167			struct page *head = compound_head(page);
2168
2169			result = compound_order(head) == HPAGE_PMD_ORDER &&
2170					head->index == start
2171					/* Maybe PMD-mapped */
2172					? SCAN_PTE_MAPPED_HUGEPAGE
2173					: SCAN_PAGE_COMPOUND;
2174			/*
2175			 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2176			 * by the caller won't touch the page cache, and so
2177			 * it's safe to skip LRU and refcount checks before
2178			 * returning.
2179			 */
2180			break;
2181		}
2182
2183		node = page_to_nid(page);
2184		if (hpage_collapse_scan_abort(node, cc)) {
2185			result = SCAN_SCAN_ABORT;
2186			break;
2187		}
2188		cc->node_load[node]++;
2189
2190		if (!PageLRU(page)) {
2191			result = SCAN_PAGE_LRU;
2192			break;
2193		}
2194
2195		if (page_count(page) !=
2196		    1 + page_mapcount(page) + page_has_private(page)) {
2197			result = SCAN_PAGE_COUNT;
2198			break;
2199		}
2200
2201		/*
2202		 * We probably should check if the page is referenced here, but
2203		 * nobody would transfer pte_young() to PageReferenced() for us.
2204		 * And rmap walk here is just too costly...
2205		 */
2206
2207		present++;
2208
2209		if (need_resched()) {
2210			xas_pause(&xas);
2211			cond_resched_rcu();
2212		}
2213	}
2214	rcu_read_unlock();
2215
2216	if (result == SCAN_SUCCEED) {
2217		if (cc->is_khugepaged &&
2218		    present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2219			result = SCAN_EXCEED_NONE_PTE;
2220			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2221		} else {
2222			result = collapse_file(mm, addr, file, start, cc);
 
2223		}
2224	}
2225
2226	trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
2227	return result;
2228}
2229#else
2230static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2231				    struct file *file, pgoff_t start,
2232				    struct collapse_control *cc)
2233{
2234	BUILD_BUG();
2235}
2236
2237static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
2238{
2239}
2240
2241static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2242					  unsigned long addr)
2243{
2244	return false;
2245}
2246#endif
2247
2248static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2249					    struct collapse_control *cc)
2250	__releases(&khugepaged_mm_lock)
2251	__acquires(&khugepaged_mm_lock)
2252{
2253	struct vma_iterator vmi;
2254	struct khugepaged_mm_slot *mm_slot;
2255	struct mm_slot *slot;
2256	struct mm_struct *mm;
2257	struct vm_area_struct *vma;
2258	int progress = 0;
2259
2260	VM_BUG_ON(!pages);
2261	lockdep_assert_held(&khugepaged_mm_lock);
2262	*result = SCAN_FAIL;
2263
2264	if (khugepaged_scan.mm_slot) {
2265		mm_slot = khugepaged_scan.mm_slot;
2266		slot = &mm_slot->slot;
2267	} else {
2268		slot = list_entry(khugepaged_scan.mm_head.next,
2269				     struct mm_slot, mm_node);
2270		mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2271		khugepaged_scan.address = 0;
2272		khugepaged_scan.mm_slot = mm_slot;
2273	}
2274	spin_unlock(&khugepaged_mm_lock);
2275	khugepaged_collapse_pte_mapped_thps(mm_slot);
2276
2277	mm = slot->mm;
2278	/*
2279	 * Don't wait for semaphore (to avoid long wait times).  Just move to
2280	 * the next mm on the list.
2281	 */
2282	vma = NULL;
2283	if (unlikely(!mmap_read_trylock(mm)))
2284		goto breakouterloop_mmap_lock;
 
 
2285
2286	progress++;
2287	if (unlikely(hpage_collapse_test_exit(mm)))
2288		goto breakouterloop;
2289
2290	vma_iter_init(&vmi, mm, khugepaged_scan.address);
2291	for_each_vma(vmi, vma) {
2292		unsigned long hstart, hend;
2293
2294		cond_resched();
2295		if (unlikely(hpage_collapse_test_exit(mm))) {
2296			progress++;
2297			break;
2298		}
2299		if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2300skip:
2301			progress++;
2302			continue;
2303		}
2304		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2305		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
 
 
2306		if (khugepaged_scan.address > hend)
2307			goto skip;
2308		if (khugepaged_scan.address < hstart)
2309			khugepaged_scan.address = hstart;
2310		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2311
2312		while (khugepaged_scan.address < hend) {
2313			bool mmap_locked = true;
2314
2315			cond_resched();
2316			if (unlikely(hpage_collapse_test_exit(mm)))
2317				goto breakouterloop;
2318
2319			VM_BUG_ON(khugepaged_scan.address < hstart ||
2320				  khugepaged_scan.address + HPAGE_PMD_SIZE >
2321				  hend);
2322			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2323				struct file *file = get_file(vma->vm_file);
2324				pgoff_t pgoff = linear_page_index(vma,
2325						khugepaged_scan.address);
2326
2327				mmap_read_unlock(mm);
2328				*result = hpage_collapse_scan_file(mm,
2329								   khugepaged_scan.address,
2330								   file, pgoff, cc);
2331				mmap_locked = false;
 
 
2332				fput(file);
2333			} else {
2334				*result = hpage_collapse_scan_pmd(mm, vma,
2335								  khugepaged_scan.address,
2336								  &mmap_locked,
2337								  cc);
2338			}
2339			switch (*result) {
2340			case SCAN_PTE_MAPPED_HUGEPAGE: {
2341				pmd_t *pmd;
2342
2343				*result = find_pmd_or_thp_or_none(mm,
2344								  khugepaged_scan.address,
2345								  &pmd);
2346				if (*result != SCAN_SUCCEED)
2347					break;
2348				if (!khugepaged_add_pte_mapped_thp(mm,
2349								   khugepaged_scan.address))
2350					break;
2351			} fallthrough;
2352			case SCAN_SUCCEED:
2353				++khugepaged_pages_collapsed;
2354				break;
2355			default:
2356				break;
2357			}
2358
2359			/* move to next address */
2360			khugepaged_scan.address += HPAGE_PMD_SIZE;
2361			progress += HPAGE_PMD_NR;
2362			if (!mmap_locked)
2363				/*
2364				 * We released mmap_lock so break loop.  Note
2365				 * that we drop mmap_lock before all hugepage
2366				 * allocations, so if allocation fails, we are
2367				 * guaranteed to break here and report the
2368				 * correct result back to caller.
2369				 */
2370				goto breakouterloop_mmap_lock;
2371			if (progress >= pages)
2372				goto breakouterloop;
2373		}
2374	}
2375breakouterloop:
2376	mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2377breakouterloop_mmap_lock:
2378
2379	spin_lock(&khugepaged_mm_lock);
2380	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2381	/*
2382	 * Release the current mm_slot if this mm is about to die, or
2383	 * if we scanned all vmas of this mm.
2384	 */
2385	if (hpage_collapse_test_exit(mm) || !vma) {
2386		/*
2387		 * Make sure that if mm_users is reaching zero while
2388		 * khugepaged runs here, khugepaged_exit will find
2389		 * mm_slot not pointing to the exiting mm.
2390		 */
2391		if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2392			slot = list_entry(slot->mm_node.next,
2393					  struct mm_slot, mm_node);
2394			khugepaged_scan.mm_slot =
2395				mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2396			khugepaged_scan.address = 0;
2397		} else {
2398			khugepaged_scan.mm_slot = NULL;
2399			khugepaged_full_scans++;
2400		}
2401
2402		collect_mm_slot(mm_slot);
2403	}
2404
2405	return progress;
2406}
2407
2408static int khugepaged_has_work(void)
2409{
2410	return !list_empty(&khugepaged_scan.mm_head) &&
2411		hugepage_flags_enabled();
2412}
2413
2414static int khugepaged_wait_event(void)
2415{
2416	return !list_empty(&khugepaged_scan.mm_head) ||
2417		kthread_should_stop();
2418}
2419
2420static void khugepaged_do_scan(struct collapse_control *cc)
2421{
 
2422	unsigned int progress = 0, pass_through_head = 0;
2423	unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2424	bool wait = true;
2425	int result = SCAN_SUCCEED;
2426
2427	lru_add_drain_all();
 
 
 
 
2428
2429	while (true) {
2430		cond_resched();
2431
2432		if (unlikely(kthread_should_stop() || try_to_freeze()))
2433			break;
2434
2435		spin_lock(&khugepaged_mm_lock);
2436		if (!khugepaged_scan.mm_slot)
2437			pass_through_head++;
2438		if (khugepaged_has_work() &&
2439		    pass_through_head < 2)
2440			progress += khugepaged_scan_mm_slot(pages - progress,
2441							    &result, cc);
2442		else
2443			progress = pages;
2444		spin_unlock(&khugepaged_mm_lock);
2445
2446		if (progress >= pages)
2447			break;
2448
2449		if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2450			/*
2451			 * If fail to allocate the first time, try to sleep for
2452			 * a while.  When hit again, cancel the scan.
2453			 */
2454			if (!wait)
2455				break;
2456			wait = false;
2457			khugepaged_alloc_sleep();
2458		}
2459	}
 
 
 
2460}
2461
2462static bool khugepaged_should_wakeup(void)
2463{
2464	return kthread_should_stop() ||
2465	       time_after_eq(jiffies, khugepaged_sleep_expire);
2466}
2467
2468static void khugepaged_wait_work(void)
2469{
2470	if (khugepaged_has_work()) {
2471		const unsigned long scan_sleep_jiffies =
2472			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2473
2474		if (!scan_sleep_jiffies)
2475			return;
2476
2477		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2478		wait_event_freezable_timeout(khugepaged_wait,
2479					     khugepaged_should_wakeup(),
2480					     scan_sleep_jiffies);
2481		return;
2482	}
2483
2484	if (hugepage_flags_enabled())
2485		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2486}
2487
2488static int khugepaged(void *none)
2489{
2490	struct khugepaged_mm_slot *mm_slot;
2491
2492	set_freezable();
2493	set_user_nice(current, MAX_NICE);
2494
2495	while (!kthread_should_stop()) {
2496		khugepaged_do_scan(&khugepaged_collapse_control);
2497		khugepaged_wait_work();
2498	}
2499
2500	spin_lock(&khugepaged_mm_lock);
2501	mm_slot = khugepaged_scan.mm_slot;
2502	khugepaged_scan.mm_slot = NULL;
2503	if (mm_slot)
2504		collect_mm_slot(mm_slot);
2505	spin_unlock(&khugepaged_mm_lock);
2506	return 0;
2507}
2508
2509static void set_recommended_min_free_kbytes(void)
2510{
2511	struct zone *zone;
2512	int nr_zones = 0;
2513	unsigned long recommended_min;
2514
2515	if (!hugepage_flags_enabled()) {
2516		calculate_min_free_kbytes();
2517		goto update_wmarks;
2518	}
2519
2520	for_each_populated_zone(zone) {
2521		/*
2522		 * We don't need to worry about fragmentation of
2523		 * ZONE_MOVABLE since it only has movable pages.
2524		 */
2525		if (zone_idx(zone) > gfp_zone(GFP_USER))
2526			continue;
2527
2528		nr_zones++;
2529	}
2530
2531	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2532	recommended_min = pageblock_nr_pages * nr_zones * 2;
2533
2534	/*
2535	 * Make sure that on average at least two pageblocks are almost free
2536	 * of another type, one for a migratetype to fall back to and a
2537	 * second to avoid subsequent fallbacks of other types There are 3
2538	 * MIGRATE_TYPES we care about.
2539	 */
2540	recommended_min += pageblock_nr_pages * nr_zones *
2541			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2542
2543	/* don't ever allow to reserve more than 5% of the lowmem */
2544	recommended_min = min(recommended_min,
2545			      (unsigned long) nr_free_buffer_pages() / 20);
2546	recommended_min <<= (PAGE_SHIFT-10);
2547
2548	if (recommended_min > min_free_kbytes) {
2549		if (user_min_free_kbytes >= 0)
2550			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2551				min_free_kbytes, recommended_min);
2552
2553		min_free_kbytes = recommended_min;
2554	}
2555
2556update_wmarks:
2557	setup_per_zone_wmarks();
2558}
2559
2560int start_stop_khugepaged(void)
2561{
 
 
2562	int err = 0;
2563
2564	mutex_lock(&khugepaged_mutex);
2565	if (hugepage_flags_enabled()) {
2566		if (!khugepaged_thread)
2567			khugepaged_thread = kthread_run(khugepaged, NULL,
2568							"khugepaged");
2569		if (IS_ERR(khugepaged_thread)) {
2570			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2571			err = PTR_ERR(khugepaged_thread);
2572			khugepaged_thread = NULL;
2573			goto fail;
2574		}
2575
2576		if (!list_empty(&khugepaged_scan.mm_head))
2577			wake_up_interruptible(&khugepaged_wait);
 
 
2578	} else if (khugepaged_thread) {
2579		kthread_stop(khugepaged_thread);
2580		khugepaged_thread = NULL;
2581	}
2582	set_recommended_min_free_kbytes();
2583fail:
2584	mutex_unlock(&khugepaged_mutex);
2585	return err;
2586}
2587
2588void khugepaged_min_free_kbytes_update(void)
2589{
2590	mutex_lock(&khugepaged_mutex);
2591	if (hugepage_flags_enabled() && khugepaged_thread)
2592		set_recommended_min_free_kbytes();
2593	mutex_unlock(&khugepaged_mutex);
2594}
2595
2596bool current_is_khugepaged(void)
2597{
2598	return kthread_func(current) == khugepaged;
2599}
2600
2601static int madvise_collapse_errno(enum scan_result r)
2602{
2603	/*
2604	 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2605	 * actionable feedback to caller, so they may take an appropriate
2606	 * fallback measure depending on the nature of the failure.
2607	 */
2608	switch (r) {
2609	case SCAN_ALLOC_HUGE_PAGE_FAIL:
2610		return -ENOMEM;
2611	case SCAN_CGROUP_CHARGE_FAIL:
2612		return -EBUSY;
2613	/* Resource temporary unavailable - trying again might succeed */
2614	case SCAN_PAGE_COUNT:
2615	case SCAN_PAGE_LOCK:
2616	case SCAN_PAGE_LRU:
2617	case SCAN_DEL_PAGE_LRU:
2618		return -EAGAIN;
2619	/*
2620	 * Other: Trying again likely not to succeed / error intrinsic to
2621	 * specified memory range. khugepaged likely won't be able to collapse
2622	 * either.
2623	 */
2624	default:
2625		return -EINVAL;
2626	}
2627}
2628
2629int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2630		     unsigned long start, unsigned long end)
2631{
2632	struct collapse_control *cc;
2633	struct mm_struct *mm = vma->vm_mm;
2634	unsigned long hstart, hend, addr;
2635	int thps = 0, last_fail = SCAN_FAIL;
2636	bool mmap_locked = true;
2637
2638	BUG_ON(vma->vm_start > start);
2639	BUG_ON(vma->vm_end < end);
2640
2641	*prev = vma;
2642
2643	if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2644		return -EINVAL;
2645
2646	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2647	if (!cc)
2648		return -ENOMEM;
2649	cc->is_khugepaged = false;
2650
2651	mmgrab(mm);
2652	lru_add_drain_all();
2653
2654	hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2655	hend = end & HPAGE_PMD_MASK;
2656
2657	for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2658		int result = SCAN_FAIL;
2659
2660		if (!mmap_locked) {
2661			cond_resched();
2662			mmap_read_lock(mm);
2663			mmap_locked = true;
2664			result = hugepage_vma_revalidate(mm, addr, false, &vma,
2665							 cc);
2666			if (result  != SCAN_SUCCEED) {
2667				last_fail = result;
2668				goto out_nolock;
2669			}
2670
2671			hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2672		}
2673		mmap_assert_locked(mm);
2674		memset(cc->node_load, 0, sizeof(cc->node_load));
2675		nodes_clear(cc->alloc_nmask);
2676		if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2677			struct file *file = get_file(vma->vm_file);
2678			pgoff_t pgoff = linear_page_index(vma, addr);
2679
2680			mmap_read_unlock(mm);
2681			mmap_locked = false;
2682			result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2683							  cc);
2684			fput(file);
2685		} else {
2686			result = hpage_collapse_scan_pmd(mm, vma, addr,
2687							 &mmap_locked, cc);
2688		}
2689		if (!mmap_locked)
2690			*prev = NULL;  /* Tell caller we dropped mmap_lock */
2691
2692handle_result:
2693		switch (result) {
2694		case SCAN_SUCCEED:
2695		case SCAN_PMD_MAPPED:
2696			++thps;
2697			break;
2698		case SCAN_PTE_MAPPED_HUGEPAGE:
2699			BUG_ON(mmap_locked);
2700			BUG_ON(*prev);
2701			mmap_write_lock(mm);
2702			result = collapse_pte_mapped_thp(mm, addr, true);
2703			mmap_write_unlock(mm);
2704			goto handle_result;
2705		/* Whitelisted set of results where continuing OK */
2706		case SCAN_PMD_NULL:
2707		case SCAN_PTE_NON_PRESENT:
2708		case SCAN_PTE_UFFD_WP:
2709		case SCAN_PAGE_RO:
2710		case SCAN_LACK_REFERENCED_PAGE:
2711		case SCAN_PAGE_NULL:
2712		case SCAN_PAGE_COUNT:
2713		case SCAN_PAGE_LOCK:
2714		case SCAN_PAGE_COMPOUND:
2715		case SCAN_PAGE_LRU:
2716		case SCAN_DEL_PAGE_LRU:
2717			last_fail = result;
2718			break;
2719		default:
2720			last_fail = result;
2721			/* Other error, exit */
2722			goto out_maybelock;
2723		}
2724	}
2725
2726out_maybelock:
2727	/* Caller expects us to hold mmap_lock on return */
2728	if (!mmap_locked)
2729		mmap_read_lock(mm);
2730out_nolock:
2731	mmap_assert_locked(mm);
2732	mmdrop(mm);
2733	kfree(cc);
2734
2735	return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2736			: madvise_collapse_errno(last_fail);
2737}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/sched/mm.h>
   7#include <linux/sched/coredump.h>
   8#include <linux/mmu_notifier.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/mm_inline.h>
  12#include <linux/kthread.h>
  13#include <linux/khugepaged.h>
  14#include <linux/freezer.h>
  15#include <linux/mman.h>
  16#include <linux/hashtable.h>
  17#include <linux/userfaultfd_k.h>
  18#include <linux/page_idle.h>
 
  19#include <linux/swapops.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/pgalloc.h>
  24#include "internal.h"
 
  25
  26enum scan_result {
  27	SCAN_FAIL,
  28	SCAN_SUCCEED,
  29	SCAN_PMD_NULL,
 
 
  30	SCAN_EXCEED_NONE_PTE,
 
 
  31	SCAN_PTE_NON_PRESENT,
 
 
  32	SCAN_PAGE_RO,
  33	SCAN_LACK_REFERENCED_PAGE,
  34	SCAN_PAGE_NULL,
  35	SCAN_SCAN_ABORT,
  36	SCAN_PAGE_COUNT,
  37	SCAN_PAGE_LRU,
  38	SCAN_PAGE_LOCK,
  39	SCAN_PAGE_ANON,
  40	SCAN_PAGE_COMPOUND,
  41	SCAN_ANY_PROCESS,
  42	SCAN_VMA_NULL,
  43	SCAN_VMA_CHECK,
  44	SCAN_ADDRESS_RANGE,
  45	SCAN_SWAP_CACHE_PAGE,
  46	SCAN_DEL_PAGE_LRU,
  47	SCAN_ALLOC_HUGE_PAGE_FAIL,
  48	SCAN_CGROUP_CHARGE_FAIL,
  49	SCAN_EXCEED_SWAP_PTE,
  50	SCAN_TRUNCATED,
  51	SCAN_PAGE_HAS_PRIVATE,
  52};
  53
  54#define CREATE_TRACE_POINTS
  55#include <trace/events/huge_memory.h>
  56
 
 
 
  57/* default scan 8*512 pte (or vmas) every 30 second */
  58static unsigned int khugepaged_pages_to_scan __read_mostly;
  59static unsigned int khugepaged_pages_collapsed;
  60static unsigned int khugepaged_full_scans;
  61static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  62/* during fragmentation poll the hugepage allocator once every minute */
  63static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  64static unsigned long khugepaged_sleep_expire;
  65static DEFINE_SPINLOCK(khugepaged_mm_lock);
  66static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  67/*
  68 * default collapse hugepages if there is at least one pte mapped like
  69 * it would have happened if the vma was large enough during page
  70 * fault.
 
 
  71 */
  72static unsigned int khugepaged_max_ptes_none __read_mostly;
  73static unsigned int khugepaged_max_ptes_swap __read_mostly;
 
  74
  75#define MM_SLOTS_HASH_BITS 10
  76static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  77
  78static struct kmem_cache *mm_slot_cache __read_mostly;
  79
  80#define MAX_PTE_MAPPED_THP 8
  81
 
 
 
 
 
 
 
 
 
 
  82/**
  83 * struct mm_slot - hash lookup from mm to mm_slot
  84 * @hash: hash collision list
  85 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  86 * @mm: the mm that this information is valid for
  87 */
  88struct mm_slot {
  89	struct hlist_node hash;
  90	struct list_head mm_node;
  91	struct mm_struct *mm;
  92
  93	/* pte-mapped THP in this mm */
  94	int nr_pte_mapped_thp;
  95	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
  96};
  97
  98/**
  99 * struct khugepaged_scan - cursor for scanning
 100 * @mm_head: the head of the mm list to scan
 101 * @mm_slot: the current mm_slot we are scanning
 102 * @address: the next address inside that to be scanned
 103 *
 104 * There is only the one khugepaged_scan instance of this cursor structure.
 105 */
 106struct khugepaged_scan {
 107	struct list_head mm_head;
 108	struct mm_slot *mm_slot;
 109	unsigned long address;
 110};
 111
 112static struct khugepaged_scan khugepaged_scan = {
 113	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 114};
 115
 116#ifdef CONFIG_SYSFS
 117static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 118					 struct kobj_attribute *attr,
 119					 char *buf)
 120{
 121	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 122}
 123
 124static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 125					  struct kobj_attribute *attr,
 126					  const char *buf, size_t count)
 127{
 128	unsigned long msecs;
 129	int err;
 130
 131	err = kstrtoul(buf, 10, &msecs);
 132	if (err || msecs > UINT_MAX)
 133		return -EINVAL;
 134
 135	khugepaged_scan_sleep_millisecs = msecs;
 136	khugepaged_sleep_expire = 0;
 137	wake_up_interruptible(&khugepaged_wait);
 138
 139	return count;
 140}
 141static struct kobj_attribute scan_sleep_millisecs_attr =
 142	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 143	       scan_sleep_millisecs_store);
 144
 145static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 146					  struct kobj_attribute *attr,
 147					  char *buf)
 148{
 149	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 150}
 151
 152static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 153					   struct kobj_attribute *attr,
 154					   const char *buf, size_t count)
 155{
 156	unsigned long msecs;
 157	int err;
 158
 159	err = kstrtoul(buf, 10, &msecs);
 160	if (err || msecs > UINT_MAX)
 161		return -EINVAL;
 162
 163	khugepaged_alloc_sleep_millisecs = msecs;
 164	khugepaged_sleep_expire = 0;
 165	wake_up_interruptible(&khugepaged_wait);
 166
 167	return count;
 168}
 169static struct kobj_attribute alloc_sleep_millisecs_attr =
 170	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 171	       alloc_sleep_millisecs_store);
 172
 173static ssize_t pages_to_scan_show(struct kobject *kobj,
 174				  struct kobj_attribute *attr,
 175				  char *buf)
 176{
 177	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 178}
 179static ssize_t pages_to_scan_store(struct kobject *kobj,
 180				   struct kobj_attribute *attr,
 181				   const char *buf, size_t count)
 182{
 
 183	int err;
 184	unsigned long pages;
 185
 186	err = kstrtoul(buf, 10, &pages);
 187	if (err || !pages || pages > UINT_MAX)
 188		return -EINVAL;
 189
 190	khugepaged_pages_to_scan = pages;
 191
 192	return count;
 193}
 194static struct kobj_attribute pages_to_scan_attr =
 195	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
 196	       pages_to_scan_store);
 197
 198static ssize_t pages_collapsed_show(struct kobject *kobj,
 199				    struct kobj_attribute *attr,
 200				    char *buf)
 201{
 202	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 203}
 204static struct kobj_attribute pages_collapsed_attr =
 205	__ATTR_RO(pages_collapsed);
 206
 207static ssize_t full_scans_show(struct kobject *kobj,
 208			       struct kobj_attribute *attr,
 209			       char *buf)
 210{
 211	return sprintf(buf, "%u\n", khugepaged_full_scans);
 212}
 213static struct kobj_attribute full_scans_attr =
 214	__ATTR_RO(full_scans);
 215
 216static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 217				      struct kobj_attribute *attr, char *buf)
 218{
 219	return single_hugepage_flag_show(kobj, attr, buf,
 220				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 221}
 222static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 223				       struct kobj_attribute *attr,
 224				       const char *buf, size_t count)
 225{
 226	return single_hugepage_flag_store(kobj, attr, buf, count,
 227				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 228}
 229static struct kobj_attribute khugepaged_defrag_attr =
 230	__ATTR(defrag, 0644, khugepaged_defrag_show,
 231	       khugepaged_defrag_store);
 232
 233/*
 234 * max_ptes_none controls if khugepaged should collapse hugepages over
 235 * any unmapped ptes in turn potentially increasing the memory
 236 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 237 * reduce the available free memory in the system as it
 238 * runs. Increasing max_ptes_none will instead potentially reduce the
 239 * free memory in the system during the khugepaged scan.
 240 */
 241static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 242					     struct kobj_attribute *attr,
 243					     char *buf)
 244{
 245	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 246}
 247static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 248					      struct kobj_attribute *attr,
 249					      const char *buf, size_t count)
 250{
 251	int err;
 252	unsigned long max_ptes_none;
 253
 254	err = kstrtoul(buf, 10, &max_ptes_none);
 255	if (err || max_ptes_none > HPAGE_PMD_NR-1)
 256		return -EINVAL;
 257
 258	khugepaged_max_ptes_none = max_ptes_none;
 259
 260	return count;
 261}
 262static struct kobj_attribute khugepaged_max_ptes_none_attr =
 263	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 264	       khugepaged_max_ptes_none_store);
 265
 266static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
 267					     struct kobj_attribute *attr,
 268					     char *buf)
 269{
 270	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
 271}
 272
 273static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
 274					      struct kobj_attribute *attr,
 275					      const char *buf, size_t count)
 276{
 277	int err;
 278	unsigned long max_ptes_swap;
 279
 280	err  = kstrtoul(buf, 10, &max_ptes_swap);
 281	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
 282		return -EINVAL;
 283
 284	khugepaged_max_ptes_swap = max_ptes_swap;
 285
 286	return count;
 287}
 288
 289static struct kobj_attribute khugepaged_max_ptes_swap_attr =
 290	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
 291	       khugepaged_max_ptes_swap_store);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 292
 293static struct attribute *khugepaged_attr[] = {
 294	&khugepaged_defrag_attr.attr,
 295	&khugepaged_max_ptes_none_attr.attr,
 
 
 296	&pages_to_scan_attr.attr,
 297	&pages_collapsed_attr.attr,
 298	&full_scans_attr.attr,
 299	&scan_sleep_millisecs_attr.attr,
 300	&alloc_sleep_millisecs_attr.attr,
 301	&khugepaged_max_ptes_swap_attr.attr,
 302	NULL,
 303};
 304
 305struct attribute_group khugepaged_attr_group = {
 306	.attrs = khugepaged_attr,
 307	.name = "khugepaged",
 308};
 309#endif /* CONFIG_SYSFS */
 310
 311#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
 312
 313int hugepage_madvise(struct vm_area_struct *vma,
 314		     unsigned long *vm_flags, int advice)
 315{
 316	switch (advice) {
 317	case MADV_HUGEPAGE:
 318#ifdef CONFIG_S390
 319		/*
 320		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
 321		 * can't handle this properly after s390_enable_sie, so we simply
 322		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
 323		 */
 324		if (mm_has_pgste(vma->vm_mm))
 325			return 0;
 326#endif
 327		*vm_flags &= ~VM_NOHUGEPAGE;
 328		*vm_flags |= VM_HUGEPAGE;
 329		/*
 330		 * If the vma become good for khugepaged to scan,
 331		 * register it here without waiting a page fault that
 332		 * may not happen any time soon.
 333		 */
 334		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
 335				khugepaged_enter_vma_merge(vma, *vm_flags))
 336			return -ENOMEM;
 337		break;
 338	case MADV_NOHUGEPAGE:
 339		*vm_flags &= ~VM_HUGEPAGE;
 340		*vm_flags |= VM_NOHUGEPAGE;
 341		/*
 342		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
 343		 * this vma even if we leave the mm registered in khugepaged if
 344		 * it got registered before VM_NOHUGEPAGE was set.
 345		 */
 346		break;
 347	}
 348
 349	return 0;
 350}
 351
 352int __init khugepaged_init(void)
 353{
 354	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
 355					  sizeof(struct mm_slot),
 356					  __alignof__(struct mm_slot), 0, NULL);
 
 357	if (!mm_slot_cache)
 358		return -ENOMEM;
 359
 360	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 361	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 362	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
 
 363
 364	return 0;
 365}
 366
 367void __init khugepaged_destroy(void)
 368{
 369	kmem_cache_destroy(mm_slot_cache);
 370}
 371
 372static inline struct mm_slot *alloc_mm_slot(void)
 373{
 374	if (!mm_slot_cache)	/* initialization failed */
 375		return NULL;
 376	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
 377}
 378
 379static inline void free_mm_slot(struct mm_slot *mm_slot)
 380{
 381	kmem_cache_free(mm_slot_cache, mm_slot);
 382}
 383
 384static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 385{
 386	struct mm_slot *mm_slot;
 387
 388	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
 389		if (mm == mm_slot->mm)
 390			return mm_slot;
 391
 392	return NULL;
 393}
 394
 395static void insert_to_mm_slots_hash(struct mm_struct *mm,
 396				    struct mm_slot *mm_slot)
 397{
 398	mm_slot->mm = mm;
 399	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 400}
 401
 402static inline int khugepaged_test_exit(struct mm_struct *mm)
 403{
 404	return atomic_read(&mm->mm_users) == 0;
 405}
 406
 407static bool hugepage_vma_check(struct vm_area_struct *vma,
 408			       unsigned long vm_flags)
 409{
 410	if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
 411	    (vm_flags & VM_NOHUGEPAGE) ||
 412	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
 413		return false;
 414
 415	if (shmem_file(vma->vm_file) ||
 416	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
 417	     vma->vm_file &&
 418	     (vm_flags & VM_DENYWRITE))) {
 419		if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
 420			return false;
 421		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
 422				HPAGE_PMD_NR);
 423	}
 424	if (!vma->anon_vma || vma->vm_ops)
 425		return false;
 426	if (is_vma_temporary_stack(vma))
 427		return false;
 428	return !(vm_flags & VM_NO_KHUGEPAGED);
 429}
 430
 431int __khugepaged_enter(struct mm_struct *mm)
 432{
 433	struct mm_slot *mm_slot;
 
 434	int wakeup;
 435
 436	mm_slot = alloc_mm_slot();
 437	if (!mm_slot)
 438		return -ENOMEM;
 
 
 439
 440	/* __khugepaged_exit() must not run from under us */
 441	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
 442	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 443		free_mm_slot(mm_slot);
 444		return 0;
 445	}
 446
 447	spin_lock(&khugepaged_mm_lock);
 448	insert_to_mm_slots_hash(mm, mm_slot);
 449	/*
 450	 * Insert just behind the scanning cursor, to let the area settle
 451	 * down a little.
 452	 */
 453	wakeup = list_empty(&khugepaged_scan.mm_head);
 454	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 455	spin_unlock(&khugepaged_mm_lock);
 456
 457	mmgrab(mm);
 458	if (wakeup)
 459		wake_up_interruptible(&khugepaged_wait);
 460
 461	return 0;
 462}
 463
 464int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
 465			       unsigned long vm_flags)
 466{
 467	unsigned long hstart, hend;
 468
 469	/*
 470	 * khugepaged only supports read-only files for non-shmem files.
 471	 * khugepaged does not yet work on special mappings. And
 472	 * file-private shmem THP is not supported.
 473	 */
 474	if (!hugepage_vma_check(vma, vm_flags))
 475		return 0;
 476
 477	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 478	hend = vma->vm_end & HPAGE_PMD_MASK;
 479	if (hstart < hend)
 480		return khugepaged_enter(vma, vm_flags);
 481	return 0;
 482}
 483
 484void __khugepaged_exit(struct mm_struct *mm)
 485{
 486	struct mm_slot *mm_slot;
 
 487	int free = 0;
 488
 489	spin_lock(&khugepaged_mm_lock);
 490	mm_slot = get_mm_slot(mm);
 
 491	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
 492		hash_del(&mm_slot->hash);
 493		list_del(&mm_slot->mm_node);
 494		free = 1;
 495	}
 496	spin_unlock(&khugepaged_mm_lock);
 497
 498	if (free) {
 499		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
 500		free_mm_slot(mm_slot);
 501		mmdrop(mm);
 502	} else if (mm_slot) {
 503		/*
 504		 * This is required to serialize against
 505		 * khugepaged_test_exit() (which is guaranteed to run
 506		 * under mmap sem read mode). Stop here (after we
 507		 * return all pagetables will be destroyed) until
 508		 * khugepaged has finished working on the pagetables
 509		 * under the mmap_sem.
 510		 */
 511		down_write(&mm->mmap_sem);
 512		up_write(&mm->mmap_sem);
 513	}
 514}
 515
 516static void release_pte_page(struct page *page)
 517{
 518	dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
 
 
 519	unlock_page(page);
 520	putback_lru_page(page);
 521}
 522
 523static void release_pte_pages(pte_t *pte, pte_t *_pte)
 
 524{
 
 
 525	while (--_pte >= pte) {
 526		pte_t pteval = *_pte;
 527		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
 528			release_pte_page(pte_page(pteval));
 
 
 
 529	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 530}
 531
 532static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 533					unsigned long address,
 534					pte_t *pte)
 
 
 535{
 536	struct page *page = NULL;
 537	pte_t *_pte;
 538	int none_or_zero = 0, result = 0, referenced = 0;
 539	bool writable = false;
 540
 541	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
 542	     _pte++, address += PAGE_SIZE) {
 543		pte_t pteval = *_pte;
 544		if (pte_none(pteval) || (pte_present(pteval) &&
 545				is_zero_pfn(pte_pfn(pteval)))) {
 
 546			if (!userfaultfd_armed(vma) &&
 547			    ++none_or_zero <= khugepaged_max_ptes_none) {
 
 548				continue;
 549			} else {
 550				result = SCAN_EXCEED_NONE_PTE;
 
 551				goto out;
 552			}
 553		}
 554		if (!pte_present(pteval)) {
 555			result = SCAN_PTE_NON_PRESENT;
 556			goto out;
 557		}
 558		page = vm_normal_page(vma, address, pteval);
 559		if (unlikely(!page)) {
 560			result = SCAN_PAGE_NULL;
 561			goto out;
 562		}
 563
 564		/* TODO: teach khugepaged to collapse THP mapped with pte */
 
 
 
 
 
 
 
 
 
 
 
 565		if (PageCompound(page)) {
 566			result = SCAN_PAGE_COMPOUND;
 567			goto out;
 
 
 
 
 
 
 
 
 
 568		}
 569
 570		VM_BUG_ON_PAGE(!PageAnon(page), page);
 571
 572		/*
 573		 * We can do it before isolate_lru_page because the
 574		 * page can't be freed from under us. NOTE: PG_lock
 575		 * is needed to serialize against split_huge_page
 576		 * when invoked from the VM.
 577		 */
 578		if (!trylock_page(page)) {
 579			result = SCAN_PAGE_LOCK;
 580			goto out;
 581		}
 582
 583		/*
 584		 * cannot use mapcount: can't collapse if there's a gup pin.
 585		 * The page must only be referenced by the scanned process
 586		 * and page swap cache.
 
 
 
 
 
 
 587		 */
 588		if (page_count(page) != 1 + PageSwapCache(page)) {
 589			unlock_page(page);
 590			result = SCAN_PAGE_COUNT;
 591			goto out;
 592		}
 593		if (pte_write(pteval)) {
 594			writable = true;
 595		} else {
 596			if (PageSwapCache(page) &&
 597			    !reuse_swap_page(page, NULL)) {
 598				unlock_page(page);
 599				result = SCAN_SWAP_CACHE_PAGE;
 600				goto out;
 601			}
 602			/*
 603			 * Page is not in the swap cache. It can be collapsed
 604			 * into a THP.
 605			 */
 606		}
 607
 608		/*
 609		 * Isolate the page to avoid collapsing an hugepage
 610		 * currently in use by the VM.
 611		 */
 612		if (isolate_lru_page(page)) {
 613			unlock_page(page);
 614			result = SCAN_DEL_PAGE_LRU;
 615			goto out;
 616		}
 617		inc_node_page_state(page,
 618				NR_ISOLATED_ANON + page_is_file_cache(page));
 
 619		VM_BUG_ON_PAGE(!PageLocked(page), page);
 620		VM_BUG_ON_PAGE(PageLRU(page), page);
 621
 622		/* There should be enough young pte to collapse the page */
 623		if (pte_young(pteval) ||
 624		    page_is_young(page) || PageReferenced(page) ||
 625		    mmu_notifier_test_young(vma->vm_mm, address))
 
 
 
 
 
 
 
 626			referenced++;
 
 
 
 627	}
 628	if (likely(writable)) {
 629		if (likely(referenced)) {
 630			result = SCAN_SUCCEED;
 631			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 632							    referenced, writable, result);
 633			return 1;
 634		}
 635	} else {
 636		result = SCAN_PAGE_RO;
 
 
 
 637	}
 638
 639out:
 640	release_pte_pages(pte, _pte);
 641	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 642					    referenced, writable, result);
 643	return 0;
 644}
 645
 646static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 647				      struct vm_area_struct *vma,
 648				      unsigned long address,
 649				      spinlock_t *ptl)
 
 650{
 
 651	pte_t *_pte;
 652	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 653				_pte++, page++, address += PAGE_SIZE) {
 654		pte_t pteval = *_pte;
 655		struct page *src_page;
 656
 657		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 658			clear_user_highpage(page, address);
 659			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
 660			if (is_zero_pfn(pte_pfn(pteval))) {
 661				/*
 662				 * ptl mostly unnecessary.
 663				 */
 664				spin_lock(ptl);
 665				/*
 666				 * paravirt calls inside pte_clear here are
 667				 * superfluous.
 668				 */
 669				pte_clear(vma->vm_mm, address, _pte);
 670				spin_unlock(ptl);
 671			}
 672		} else {
 673			src_page = pte_page(pteval);
 674			copy_user_highpage(page, src_page, address, vma);
 675			VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
 676			release_pte_page(src_page);
 677			/*
 678			 * ptl mostly unnecessary, but preempt has to
 679			 * be disabled to update the per-cpu stats
 680			 * inside page_remove_rmap().
 681			 */
 682			spin_lock(ptl);
 683			/*
 684			 * paravirt calls inside pte_clear here are
 685			 * superfluous.
 686			 */
 687			pte_clear(vma->vm_mm, address, _pte);
 688			page_remove_rmap(src_page, false);
 689			spin_unlock(ptl);
 690			free_page_and_swap_cache(src_page);
 691		}
 692	}
 
 
 
 
 
 
 
 
 
 
 693}
 694
 695static void khugepaged_alloc_sleep(void)
 696{
 697	DEFINE_WAIT(wait);
 698
 699	add_wait_queue(&khugepaged_wait, &wait);
 700	freezable_schedule_timeout_interruptible(
 701		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 702	remove_wait_queue(&khugepaged_wait, &wait);
 703}
 704
 705static int khugepaged_node_load[MAX_NUMNODES];
 
 
 706
 707static bool khugepaged_scan_abort(int nid)
 708{
 709	int i;
 710
 711	/*
 712	 * If node_reclaim_mode is disabled, then no extra effort is made to
 713	 * allocate memory locally.
 714	 */
 715	if (!node_reclaim_mode)
 716		return false;
 717
 718	/* If there is a count for this node already, it must be acceptable */
 719	if (khugepaged_node_load[nid])
 720		return false;
 721
 722	for (i = 0; i < MAX_NUMNODES; i++) {
 723		if (!khugepaged_node_load[i])
 724			continue;
 725		if (node_distance(nid, i) > node_reclaim_distance)
 726			return true;
 727	}
 728	return false;
 729}
 730
 
 
 
 
 731/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
 732static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 733{
 734	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
 735}
 736
 737#ifdef CONFIG_NUMA
 738static int khugepaged_find_target_node(void)
 739{
 740	static int last_khugepaged_target_node = NUMA_NO_NODE;
 741	int nid, target_node = 0, max_value = 0;
 742
 743	/* find first node with max normal pages hit */
 744	for (nid = 0; nid < MAX_NUMNODES; nid++)
 745		if (khugepaged_node_load[nid] > max_value) {
 746			max_value = khugepaged_node_load[nid];
 747			target_node = nid;
 748		}
 749
 750	/* do some balance if several nodes have the same hit record */
 751	if (target_node <= last_khugepaged_target_node)
 752		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
 753				nid++)
 754			if (max_value == khugepaged_node_load[nid]) {
 755				target_node = nid;
 756				break;
 757			}
 758
 759	last_khugepaged_target_node = target_node;
 760	return target_node;
 761}
 762
 763static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 764{
 765	if (IS_ERR(*hpage)) {
 766		if (!*wait)
 767			return false;
 768
 769		*wait = false;
 770		*hpage = NULL;
 771		khugepaged_alloc_sleep();
 772	} else if (*hpage) {
 773		put_page(*hpage);
 774		*hpage = NULL;
 775	}
 776
 777	return true;
 778}
 
 779
 780static struct page *
 781khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 782{
 783	VM_BUG_ON_PAGE(*hpage, *hpage);
 784
 785	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 786	if (unlikely(!*hpage)) {
 787		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 788		*hpage = ERR_PTR(-ENOMEM);
 789		return NULL;
 790	}
 791
 792	prep_transhuge_page(*hpage);
 793	count_vm_event(THP_COLLAPSE_ALLOC);
 794	return *hpage;
 795}
 796#else
 797static int khugepaged_find_target_node(void)
 798{
 799	return 0;
 800}
 801
 802static inline struct page *alloc_khugepaged_hugepage(void)
 803{
 804	struct page *page;
 805
 806	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
 807			   HPAGE_PMD_ORDER);
 808	if (page)
 809		prep_transhuge_page(page);
 810	return page;
 811}
 812
 813static struct page *khugepaged_alloc_hugepage(bool *wait)
 814{
 815	struct page *hpage;
 816
 817	do {
 818		hpage = alloc_khugepaged_hugepage();
 819		if (!hpage) {
 820			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 821			if (!*wait)
 822				return NULL;
 823
 824			*wait = false;
 825			khugepaged_alloc_sleep();
 826		} else
 827			count_vm_event(THP_COLLAPSE_ALLOC);
 828	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
 829
 830	return hpage;
 831}
 832
 833static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 834{
 835	if (!*hpage)
 836		*hpage = khugepaged_alloc_hugepage(wait);
 837
 838	if (unlikely(!*hpage))
 839		return false;
 840
 841	return true;
 842}
 843
 844static struct page *
 845khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 846{
 847	VM_BUG_ON(!*hpage);
 848
 849	return  *hpage;
 850}
 851#endif
 852
 853/*
 854 * If mmap_sem temporarily dropped, revalidate vma
 855 * before taking mmap_sem.
 856 * Return 0 if succeeds, otherwise return none-zero
 857 * value (scan code).
 858 */
 859
 860static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 861		struct vm_area_struct **vmap)
 
 
 862{
 863	struct vm_area_struct *vma;
 864	unsigned long hstart, hend;
 865
 866	if (unlikely(khugepaged_test_exit(mm)))
 867		return SCAN_ANY_PROCESS;
 868
 869	*vmap = vma = find_vma(mm, address);
 870	if (!vma)
 871		return SCAN_VMA_NULL;
 872
 873	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 874	hend = vma->vm_end & HPAGE_PMD_MASK;
 875	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
 876		return SCAN_ADDRESS_RANGE;
 877	if (!hugepage_vma_check(vma, vma->vm_flags))
 
 878		return SCAN_VMA_CHECK;
 879	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 880}
 881
 882/*
 883 * Bring missing pages in from swap, to complete THP collapse.
 884 * Only done if khugepaged_scan_pmd believes it is worthwhile.
 885 *
 886 * Called and returns without pte mapped or spinlocks held,
 887 * but with mmap_sem held to protect against vma changes.
 888 */
 889
 890static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 891					struct vm_area_struct *vma,
 892					unsigned long address, pmd_t *pmd,
 893					int referenced)
 894{
 895	int swapped_in = 0;
 896	vm_fault_t ret = 0;
 897	struct vm_fault vmf = {
 898		.vma = vma,
 899		.address = address,
 900		.flags = FAULT_FLAG_ALLOW_RETRY,
 901		.pmd = pmd,
 902		.pgoff = linear_page_index(vma, address),
 903	};
 
 
 
 904
 905	/* we only decide to swapin, if there is enough young ptes */
 906	if (referenced < HPAGE_PMD_NR/2) {
 907		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 908		return false;
 909	}
 910	vmf.pte = pte_offset_map(pmd, address);
 911	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
 912			vmf.pte++, vmf.address += PAGE_SIZE) {
 913		vmf.orig_pte = *vmf.pte;
 914		if (!is_swap_pte(vmf.orig_pte))
 
 915			continue;
 916		swapped_in++;
 917		ret = do_swap_page(&vmf);
 918
 919		/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
 
 
 
 
 
 920		if (ret & VM_FAULT_RETRY) {
 921			down_read(&mm->mmap_sem);
 922			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
 923				/* vma is no longer available, don't continue to swapin */
 924				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 925				return false;
 926			}
 927			/* check if the pmd is still valid */
 928			if (mm_find_pmd(mm, address) != pmd) {
 929				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 930				return false;
 931			}
 932		}
 933		if (ret & VM_FAULT_ERROR) {
 
 934			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 935			return false;
 936		}
 937		/* pte is unmapped now, we need to map it */
 938		vmf.pte = pte_offset_map(pmd, vmf.address);
 939	}
 940	vmf.pte--;
 941	pte_unmap(vmf.pte);
 
 
 
 942	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
 943	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944}
 945
 946static void collapse_huge_page(struct mm_struct *mm,
 947				   unsigned long address,
 948				   struct page **hpage,
 949				   int node, int referenced)
 950{
 
 951	pmd_t *pmd, _pmd;
 952	pte_t *pte;
 953	pgtable_t pgtable;
 954	struct page *new_page;
 955	spinlock_t *pmd_ptl, *pte_ptl;
 956	int isolated = 0, result = 0;
 957	struct mem_cgroup *memcg;
 958	struct vm_area_struct *vma;
 959	struct mmu_notifier_range range;
 960	gfp_t gfp;
 961
 962	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 963
 964	/* Only allocate from the target node */
 965	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 966
 967	/*
 968	 * Before allocating the hugepage, release the mmap_sem read lock.
 969	 * The allocation can take potentially a long time if it involves
 970	 * sync compaction, and we do not need to hold the mmap_sem during
 971	 * that. We will recheck the vma after taking it again in write mode.
 972	 */
 973	up_read(&mm->mmap_sem);
 974	new_page = khugepaged_alloc_page(hpage, gfp, node);
 975	if (!new_page) {
 976		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
 977		goto out_nolock;
 978	}
 979
 980	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
 981		result = SCAN_CGROUP_CHARGE_FAIL;
 982		goto out_nolock;
 983	}
 984
 985	down_read(&mm->mmap_sem);
 986	result = hugepage_vma_revalidate(mm, address, &vma);
 987	if (result) {
 988		mem_cgroup_cancel_charge(new_page, memcg, true);
 989		up_read(&mm->mmap_sem);
 990		goto out_nolock;
 991	}
 992
 993	pmd = mm_find_pmd(mm, address);
 994	if (!pmd) {
 995		result = SCAN_PMD_NULL;
 996		mem_cgroup_cancel_charge(new_page, memcg, true);
 997		up_read(&mm->mmap_sem);
 998		goto out_nolock;
 999	}
1000
1001	/*
1002	 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1003	 * If it fails, we release mmap_sem and jump out_nolock.
1004	 * Continuing to collapse causes inconsistency.
1005	 */
1006	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1007		mem_cgroup_cancel_charge(new_page, memcg, true);
1008		up_read(&mm->mmap_sem);
1009		goto out_nolock;
 
1010	}
1011
1012	up_read(&mm->mmap_sem);
1013	/*
1014	 * Prevent all access to pagetables with the exception of
1015	 * gup_fast later handled by the ptep_clear_flush and the VM
1016	 * handled by the anon_vma lock + PG_lock.
1017	 */
1018	down_write(&mm->mmap_sem);
1019	result = SCAN_ANY_PROCESS;
1020	if (!mmget_still_valid(mm))
1021		goto out;
1022	result = hugepage_vma_revalidate(mm, address, &vma);
1023	if (result)
1024		goto out;
1025	/* check if the pmd is still valid */
1026	if (mm_find_pmd(mm, address) != pmd)
1027		goto out;
 
1028
1029	anon_vma_lock_write(vma->anon_vma);
1030
1031	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1032				address, address + HPAGE_PMD_SIZE);
1033	mmu_notifier_invalidate_range_start(&range);
1034
1035	pte = pte_offset_map(pmd, address);
1036	pte_ptl = pte_lockptr(mm, pmd);
1037
1038	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1039	/*
1040	 * After this gup_fast can't run anymore. This also removes
1041	 * any huge TLB entry from the CPU so we won't allow
1042	 * huge and small TLB entries for the same virtual address
1043	 * to avoid the risk of CPU bugs in that area.
 
 
1044	 */
1045	_pmd = pmdp_collapse_flush(vma, address, pmd);
1046	spin_unlock(pmd_ptl);
1047	mmu_notifier_invalidate_range_end(&range);
 
1048
1049	spin_lock(pte_ptl);
1050	isolated = __collapse_huge_page_isolate(vma, address, pte);
 
1051	spin_unlock(pte_ptl);
1052
1053	if (unlikely(!isolated)) {
1054		pte_unmap(pte);
1055		spin_lock(pmd_ptl);
1056		BUG_ON(!pmd_none(*pmd));
1057		/*
1058		 * We can only use set_pmd_at when establishing
1059		 * hugepmds and never for establishing regular pmds that
1060		 * points to regular pagetables. Use pmd_populate for that
1061		 */
1062		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1063		spin_unlock(pmd_ptl);
1064		anon_vma_unlock_write(vma->anon_vma);
1065		result = SCAN_FAIL;
1066		goto out;
1067	}
1068
1069	/*
1070	 * All pages are isolated and locked so anon_vma rmap
1071	 * can't run anymore.
1072	 */
1073	anon_vma_unlock_write(vma->anon_vma);
1074
1075	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
 
1076	pte_unmap(pte);
1077	__SetPageUptodate(new_page);
 
 
 
 
 
 
1078	pgtable = pmd_pgtable(_pmd);
1079
1080	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1081	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1082
1083	/*
1084	 * spin_lock() below is not the equivalent of smp_wmb(), so
1085	 * this is needed to avoid the copy_huge_page writes to become
1086	 * visible after the set_pmd_at() write.
1087	 */
1088	smp_wmb();
1089
1090	spin_lock(pmd_ptl);
1091	BUG_ON(!pmd_none(*pmd));
1092	page_add_new_anon_rmap(new_page, vma, address, true);
1093	mem_cgroup_commit_charge(new_page, memcg, false, true);
1094	count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1095	lru_cache_add_active_or_unevictable(new_page, vma);
1096	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1097	set_pmd_at(mm, address, pmd, _pmd);
1098	update_mmu_cache_pmd(vma, address, pmd);
1099	spin_unlock(pmd_ptl);
1100
1101	*hpage = NULL;
1102
1103	khugepaged_pages_collapsed++;
1104	result = SCAN_SUCCEED;
1105out_up_write:
1106	up_write(&mm->mmap_sem);
1107out_nolock:
1108	trace_mm_collapse_huge_page(mm, isolated, result);
1109	return;
1110out:
1111	mem_cgroup_cancel_charge(new_page, memcg, true);
1112	goto out_up_write;
 
1113}
1114
1115static int khugepaged_scan_pmd(struct mm_struct *mm,
1116			       struct vm_area_struct *vma,
1117			       unsigned long address,
1118			       struct page **hpage)
1119{
1120	pmd_t *pmd;
1121	pte_t *pte, *_pte;
1122	int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
 
1123	struct page *page = NULL;
1124	unsigned long _address;
1125	spinlock_t *ptl;
1126	int node = NUMA_NO_NODE, unmapped = 0;
1127	bool writable = false;
1128
1129	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1130
1131	pmd = mm_find_pmd(mm, address);
1132	if (!pmd) {
1133		result = SCAN_PMD_NULL;
1134		goto out;
1135	}
1136
1137	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
 
1138	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1139	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1140	     _pte++, _address += PAGE_SIZE) {
1141		pte_t pteval = *_pte;
1142		if (is_swap_pte(pteval)) {
1143			if (++unmapped <= khugepaged_max_ptes_swap) {
 
 
 
 
 
 
 
 
 
 
 
1144				continue;
1145			} else {
1146				result = SCAN_EXCEED_SWAP_PTE;
 
1147				goto out_unmap;
1148			}
1149		}
1150		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 
1151			if (!userfaultfd_armed(vma) &&
1152			    ++none_or_zero <= khugepaged_max_ptes_none) {
 
1153				continue;
1154			} else {
1155				result = SCAN_EXCEED_NONE_PTE;
 
1156				goto out_unmap;
1157			}
1158		}
1159		if (!pte_present(pteval)) {
1160			result = SCAN_PTE_NON_PRESENT;
 
 
 
 
 
 
 
 
 
1161			goto out_unmap;
1162		}
1163		if (pte_write(pteval))
1164			writable = true;
1165
1166		page = vm_normal_page(vma, _address, pteval);
1167		if (unlikely(!page)) {
1168			result = SCAN_PAGE_NULL;
1169			goto out_unmap;
1170		}
1171
1172		/* TODO: teach khugepaged to collapse THP mapped with pte */
1173		if (PageCompound(page)) {
1174			result = SCAN_PAGE_COMPOUND;
1175			goto out_unmap;
 
 
 
 
1176		}
1177
 
 
1178		/*
1179		 * Record which node the original page is from and save this
1180		 * information to khugepaged_node_load[].
1181		 * Khupaged will allocate hugepage from the node has the max
1182		 * hit record.
1183		 */
1184		node = page_to_nid(page);
1185		if (khugepaged_scan_abort(node)) {
1186			result = SCAN_SCAN_ABORT;
1187			goto out_unmap;
1188		}
1189		khugepaged_node_load[node]++;
1190		if (!PageLRU(page)) {
1191			result = SCAN_PAGE_LRU;
1192			goto out_unmap;
1193		}
1194		if (PageLocked(page)) {
1195			result = SCAN_PAGE_LOCK;
1196			goto out_unmap;
1197		}
1198		if (!PageAnon(page)) {
1199			result = SCAN_PAGE_ANON;
1200			goto out_unmap;
1201		}
1202
1203		/*
1204		 * cannot use mapcount: can't collapse if there's a gup pin.
1205		 * The page must only be referenced by the scanned process
1206		 * and page swap cache.
 
 
 
 
 
1207		 */
1208		if (page_count(page) != 1 + PageSwapCache(page)) {
1209			result = SCAN_PAGE_COUNT;
1210			goto out_unmap;
1211		}
1212		if (pte_young(pteval) ||
1213		    page_is_young(page) || PageReferenced(page) ||
1214		    mmu_notifier_test_young(vma->vm_mm, address))
 
 
 
 
 
 
1215			referenced++;
1216	}
1217	if (writable) {
1218		if (referenced) {
1219			result = SCAN_SUCCEED;
1220			ret = 1;
1221		} else {
1222			result = SCAN_LACK_REFERENCED_PAGE;
1223		}
1224	} else {
1225		result = SCAN_PAGE_RO;
1226	}
1227out_unmap:
1228	pte_unmap_unlock(pte, ptl);
1229	if (ret) {
1230		node = khugepaged_find_target_node();
1231		/* collapse_huge_page will return with the mmap_sem released */
1232		collapse_huge_page(mm, address, hpage, node, referenced);
 
1233	}
1234out:
1235	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1236				     none_or_zero, result, unmapped);
1237	return ret;
1238}
1239
1240static void collect_mm_slot(struct mm_slot *mm_slot)
1241{
1242	struct mm_struct *mm = mm_slot->mm;
 
1243
1244	lockdep_assert_held(&khugepaged_mm_lock);
1245
1246	if (khugepaged_test_exit(mm)) {
1247		/* free mm_slot */
1248		hash_del(&mm_slot->hash);
1249		list_del(&mm_slot->mm_node);
1250
1251		/*
1252		 * Not strictly needed because the mm exited already.
1253		 *
1254		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1255		 */
1256
1257		/* khugepaged_mm_lock actually not necessary for the below */
1258		free_mm_slot(mm_slot);
1259		mmdrop(mm);
1260	}
1261}
1262
1263#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1264/*
1265 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1266 * khugepaged should try to collapse the page table.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267 */
1268static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1269					 unsigned long addr)
1270{
1271	struct mm_slot *mm_slot;
 
 
1272
1273	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1274
1275	spin_lock(&khugepaged_mm_lock);
1276	mm_slot = get_mm_slot(mm);
1277	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
 
1278		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
 
 
1279	spin_unlock(&khugepaged_mm_lock);
1280	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281}
1282
1283/**
1284 * Try to collapse a pte-mapped THP for mm at address haddr.
 
 
 
 
 
1285 *
1286 * This function checks whether all the PTEs in the PMD are pointing to the
1287 * right THP. If so, retract the page table so the THP can refault in with
1288 * as pmd-mapped.
1289 */
1290void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
 
1291{
1292	unsigned long haddr = addr & HPAGE_PMD_MASK;
1293	struct vm_area_struct *vma = find_vma(mm, haddr);
1294	struct page *hpage = NULL;
1295	pte_t *start_pte, *pte;
1296	pmd_t *pmd, _pmd;
1297	spinlock_t *ptl;
1298	int count = 0;
1299	int i;
1300
 
 
 
 
 
 
 
1301	if (!vma || !vma->vm_file ||
1302	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1303		return;
1304
1305	/*
1306	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1307	 * collapsed by this mm. But we can still collapse if the page is
1308	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1309	 * will not fail the vma for missing VM_HUGEPAGE
 
1310	 */
1311	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1312		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313
1314	pmd = mm_find_pmd(mm, haddr);
1315	if (!pmd)
1316		return;
 
 
 
 
1317
 
 
 
 
 
 
1318	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
 
1319
1320	/* step 1: check all mapped PTEs are to the right huge page */
1321	for (i = 0, addr = haddr, pte = start_pte;
1322	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1323		struct page *page;
1324
1325		/* empty pte, skip */
1326		if (pte_none(*pte))
1327			continue;
1328
1329		/* page swapped out, abort */
1330		if (!pte_present(*pte))
 
1331			goto abort;
 
1332
1333		page = vm_normal_page(vma, addr, *pte);
1334
1335		if (!page || !PageCompound(page))
1336			goto abort;
1337
1338		if (!hpage) {
1339			hpage = compound_head(page);
1340			/*
1341			 * The mapping of the THP should not change.
1342			 *
1343			 * Note that uprobe, debugger, or MAP_PRIVATE may
1344			 * change the page table, but the new page will
1345			 * not pass PageCompound() check.
1346			 */
1347			if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
1348				goto abort;
1349		}
1350
1351		/*
1352		 * Confirm the page maps to the correct subpage.
1353		 *
1354		 * Note that uprobe, debugger, or MAP_PRIVATE may change
1355		 * the page table, but the new page will not pass
1356		 * PageCompound() check.
1357		 */
1358		if (WARN_ON(hpage + i != page))
1359			goto abort;
1360		count++;
1361	}
1362
1363	/* step 2: adjust rmap */
1364	for (i = 0, addr = haddr, pte = start_pte;
1365	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1366		struct page *page;
1367
1368		if (pte_none(*pte))
1369			continue;
1370		page = vm_normal_page(vma, addr, *pte);
1371		page_remove_rmap(page, false);
 
 
1372	}
1373
1374	pte_unmap_unlock(start_pte, ptl);
1375
1376	/* step 3: set proper refcount and mm_counters. */
1377	if (hpage) {
1378		page_ref_sub(hpage, count);
1379		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1380	}
1381
1382	/* step 4: collapse pmd */
1383	ptl = pmd_lock(vma->vm_mm, pmd);
1384	_pmd = pmdp_collapse_flush(vma, addr, pmd);
1385	spin_unlock(ptl);
1386	mm_dec_nr_ptes(mm);
1387	pte_free(mm, pmd_pgtable(_pmd));
1388	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389
1390abort:
1391	pte_unmap_unlock(start_pte, ptl);
 
 
1392}
1393
1394static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1395{
1396	struct mm_struct *mm = mm_slot->mm;
 
1397	int i;
1398
1399	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1400		return 0;
1401
1402	if (!down_write_trylock(&mm->mmap_sem))
1403		return -EBUSY;
1404
1405	if (unlikely(khugepaged_test_exit(mm)))
1406		goto out;
1407
1408	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1409		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1410
1411out:
1412	mm_slot->nr_pte_mapped_thp = 0;
1413	up_write(&mm->mmap_sem);
1414	return 0;
1415}
1416
1417static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
 
 
 
1418{
1419	struct vm_area_struct *vma;
1420	unsigned long addr;
1421	pmd_t *pmd, _pmd;
1422
1423	i_mmap_lock_write(mapping);
1424	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
 
 
 
 
 
 
1425		/*
1426		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1427		 * got written to. These VMAs are likely not worth investing
1428		 * down_write(mmap_sem) as PMD-mapping is likely to be split
1429		 * later.
1430		 *
1431		 * Not that vma->anon_vma check is racy: it can be set up after
1432		 * the check but before we took mmap_sem by the fault path.
1433		 * But page lock would prevent establishing any new ptes of the
1434		 * page, so we are safe.
1435		 *
1436		 * An alternative would be drop the check, but check that page
1437		 * table is clear before calling pmdp_collapse_flush() under
1438		 * ptl. It has higher chance to recover THP for the VMA, but
1439		 * has higher cost too.
 
1440		 */
1441		if (vma->anon_vma)
1442			continue;
 
 
1443		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1444		if (addr & ~HPAGE_PMD_MASK)
1445			continue;
1446		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1447			continue;
1448		pmd = mm_find_pmd(vma->vm_mm, addr);
1449		if (!pmd)
1450			continue;
 
 
 
1451		/*
1452		 * We need exclusive mmap_sem to retract page table.
1453		 *
1454		 * We use trylock due to lock inversion: we need to acquire
1455		 * mmap_sem while holding page lock. Fault path does it in
1456		 * reverse order. Trylock is a way to avoid deadlock.
 
 
 
1457		 */
1458		if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1459			spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1460			/* assume page table is clear */
1461			_pmd = pmdp_collapse_flush(vma, addr, pmd);
1462			spin_unlock(ptl);
1463			up_write(&vma->vm_mm->mmap_sem);
1464			mm_dec_nr_ptes(vma->vm_mm);
1465			pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1466		} else {
1467			/* Try again later */
1468			khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469		}
 
 
 
1470	}
1471	i_mmap_unlock_write(mapping);
 
1472}
1473
1474/**
1475 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1476 *
 
 
 
 
 
 
1477 * Basic scheme is simple, details are more complex:
1478 *  - allocate and lock a new huge page;
1479 *  - scan page cache replacing old pages with the new one
1480 *    + swap/gup in pages if necessary;
1481 *    + fill in gaps;
1482 *    + keep old pages around in case rollback is required;
1483 *  - if replacing succeeds:
1484 *    + copy data over;
1485 *    + free old pages;
1486 *    + unlock huge page;
1487 *  - if replacing failed;
1488 *    + put all pages back and unfreeze them;
1489 *    + restore gaps in the page cache;
1490 *    + unlock and free huge page;
1491 */
1492static void collapse_file(struct mm_struct *mm,
1493		struct file *file, pgoff_t start,
1494		struct page **hpage, int node)
1495{
1496	struct address_space *mapping = file->f_mapping;
1497	gfp_t gfp;
1498	struct page *new_page;
1499	struct mem_cgroup *memcg;
1500	pgoff_t index, end = start + HPAGE_PMD_NR;
1501	LIST_HEAD(pagelist);
1502	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1503	int nr_none = 0, result = SCAN_SUCCEED;
1504	bool is_shmem = shmem_file(file);
 
1505
1506	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1507	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1508
1509	/* Only allocate from the target node */
1510	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1511
1512	new_page = khugepaged_alloc_page(hpage, gfp, node);
1513	if (!new_page) {
1514		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1515		goto out;
1516	}
1517
1518	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1519		result = SCAN_CGROUP_CHARGE_FAIL;
1520		goto out;
1521	}
1522
1523	/* This will be less messy when we use multi-index entries */
1524	do {
1525		xas_lock_irq(&xas);
1526		xas_create_range(&xas);
1527		if (!xas_error(&xas))
1528			break;
1529		xas_unlock_irq(&xas);
1530		if (!xas_nomem(&xas, GFP_KERNEL)) {
1531			mem_cgroup_cancel_charge(new_page, memcg, true);
1532			result = SCAN_FAIL;
1533			goto out;
1534		}
1535	} while (1);
1536
1537	__SetPageLocked(new_page);
1538	if (is_shmem)
1539		__SetPageSwapBacked(new_page);
1540	new_page->index = start;
1541	new_page->mapping = mapping;
1542
1543	/*
1544	 * At this point the new_page is locked and not up-to-date.
1545	 * It's safe to insert it into the page cache, because nobody would
1546	 * be able to map it or use it in another way until we unlock it.
1547	 */
1548
1549	xas_set(&xas, start);
1550	for (index = start; index < end; index++) {
1551		struct page *page = xas_next(&xas);
 
1552
1553		VM_BUG_ON(index != xas.xa_index);
1554		if (is_shmem) {
1555			if (!page) {
1556				/*
1557				 * Stop if extent has been truncated or
1558				 * hole-punched, and is now completely
1559				 * empty.
1560				 */
1561				if (index == start) {
1562					if (!xas_next_entry(&xas, end - 1)) {
1563						result = SCAN_TRUNCATED;
1564						goto xa_locked;
1565					}
1566					xas_set(&xas, index);
1567				}
1568				if (!shmem_charge(mapping->host, 1)) {
1569					result = SCAN_FAIL;
1570					goto xa_locked;
1571				}
1572				xas_store(&xas, new_page);
1573				nr_none++;
1574				continue;
1575			}
1576
1577			if (xa_is_value(page) || !PageUptodate(page)) {
1578				xas_unlock_irq(&xas);
1579				/* swap in or instantiate fallocated page */
1580				if (shmem_getpage(mapping->host, index, &page,
1581						  SGP_NOHUGE)) {
1582					result = SCAN_FAIL;
1583					goto xa_unlocked;
1584				}
 
1585			} else if (trylock_page(page)) {
1586				get_page(page);
1587				xas_unlock_irq(&xas);
1588			} else {
1589				result = SCAN_PAGE_LOCK;
1590				goto xa_locked;
1591			}
1592		} else {	/* !is_shmem */
1593			if (!page || xa_is_value(page)) {
1594				xas_unlock_irq(&xas);
1595				page_cache_sync_readahead(mapping, &file->f_ra,
1596							  file, index,
1597							  PAGE_SIZE);
1598				/* drain pagevecs to help isolate_lru_page() */
1599				lru_add_drain();
1600				page = find_lock_page(mapping, index);
1601				if (unlikely(page == NULL)) {
1602					result = SCAN_FAIL;
1603					goto xa_unlocked;
1604				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1605			} else if (trylock_page(page)) {
1606				get_page(page);
1607				xas_unlock_irq(&xas);
1608			} else {
1609				result = SCAN_PAGE_LOCK;
1610				goto xa_locked;
1611			}
1612		}
1613
1614		/*
1615		 * The page must be locked, so we can drop the i_pages lock
1616		 * without racing with truncate.
1617		 */
1618		VM_BUG_ON_PAGE(!PageLocked(page), page);
1619
1620		/* make sure the page is up to date */
1621		if (unlikely(!PageUptodate(page))) {
1622			result = SCAN_FAIL;
1623			goto out_unlock;
1624		}
1625
1626		/*
1627		 * If file was truncated then extended, or hole-punched, before
1628		 * we locked the first page, then a THP might be there already.
 
1629		 */
1630		if (PageTransCompound(page)) {
1631			result = SCAN_PAGE_COMPOUND;
 
 
 
 
 
 
1632			goto out_unlock;
1633		}
1634
1635		if (page_mapping(page) != mapping) {
 
 
1636			result = SCAN_TRUNCATED;
1637			goto out_unlock;
1638		}
1639
1640		if (!is_shmem && PageDirty(page)) {
 
1641			/*
1642			 * khugepaged only works on read-only fd, so this
1643			 * page is dirty because it hasn't been flushed
1644			 * since first write.
1645			 */
1646			result = SCAN_FAIL;
1647			goto out_unlock;
1648		}
1649
1650		if (isolate_lru_page(page)) {
1651			result = SCAN_DEL_PAGE_LRU;
1652			goto out_unlock;
1653		}
1654
1655		if (page_has_private(page) &&
1656		    !try_to_release_page(page, GFP_KERNEL)) {
1657			result = SCAN_PAGE_HAS_PRIVATE;
 
1658			goto out_unlock;
1659		}
1660
1661		if (page_mapped(page))
1662			unmap_mapping_pages(mapping, index, 1, false);
 
1663
1664		xas_lock_irq(&xas);
1665		xas_set(&xas, index);
1666
1667		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1668		VM_BUG_ON_PAGE(page_mapped(page), page);
1669
1670		/*
1671		 * The page is expected to have page_count() == 3:
1672		 *  - we hold a pin on it;
1673		 *  - one reference from page cache;
1674		 *  - one from isolate_lru_page;
1675		 */
1676		if (!page_ref_freeze(page, 3)) {
1677			result = SCAN_PAGE_COUNT;
1678			xas_unlock_irq(&xas);
1679			putback_lru_page(page);
1680			goto out_unlock;
1681		}
1682
1683		/*
1684		 * Add the page to the list to be able to undo the collapse if
1685		 * something go wrong.
1686		 */
1687		list_add_tail(&page->lru, &pagelist);
1688
1689		/* Finally, replace with the new page. */
1690		xas_store(&xas, new_page);
1691		continue;
1692out_unlock:
1693		unlock_page(page);
1694		put_page(page);
1695		goto xa_unlocked;
1696	}
 
1697
1698	if (is_shmem)
1699		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1700	else {
1701		__inc_node_page_state(new_page, NR_FILE_THPS);
1702		filemap_nr_thps_inc(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
1703	}
1704
1705	if (nr_none) {
1706		struct zone *zone = page_zone(new_page);
1707
1708		__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1709		if (is_shmem)
1710			__mod_node_page_state(zone->zone_pgdat,
1711					      NR_SHMEM, nr_none);
1712	}
1713
 
 
 
1714xa_locked:
1715	xas_unlock_irq(&xas);
1716xa_unlocked:
1717
 
 
 
 
 
 
 
1718	if (result == SCAN_SUCCEED) {
1719		struct page *page, *tmp;
 
1720
1721		/*
1722		 * Replacing old pages with new one has succeeded, now we
1723		 * need to copy the content and free the old pages.
1724		 */
1725		index = start;
1726		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1727			while (index < page->index) {
1728				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1729				index++;
1730			}
1731			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1732					page);
1733			list_del(&page->lru);
1734			page->mapping = NULL;
1735			page_ref_unfreeze(page, 1);
1736			ClearPageActive(page);
1737			ClearPageUnevictable(page);
1738			unlock_page(page);
1739			put_page(page);
1740			index++;
1741		}
1742		while (index < end) {
1743			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1744			index++;
1745		}
1746
1747		SetPageUptodate(new_page);
1748		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1749		mem_cgroup_commit_charge(new_page, memcg, false, true);
1750
1751		if (is_shmem) {
1752			set_page_dirty(new_page);
1753			lru_cache_add_anon(new_page);
1754		} else {
1755			lru_cache_add_file(new_page);
1756		}
1757		count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1758
1759		/*
1760		 * Remove pte page tables, so we can re-fault the page as huge.
1761		 */
1762		retract_page_tables(mapping, start);
1763		*hpage = NULL;
1764
1765		khugepaged_pages_collapsed++;
1766	} else {
1767		struct page *page;
1768
1769		/* Something went wrong: roll back page cache changes */
1770		xas_lock_irq(&xas);
1771		mapping->nrpages -= nr_none;
1772
1773		if (is_shmem)
1774			shmem_uncharge(mapping->host, nr_none);
 
1775
1776		xas_set(&xas, start);
1777		xas_for_each(&xas, page, end - 1) {
1778			page = list_first_entry_or_null(&pagelist,
1779					struct page, lru);
1780			if (!page || xas.xa_index < page->index) {
1781				if (!nr_none)
1782					break;
1783				nr_none--;
1784				/* Put holes back where they were */
1785				xas_store(&xas, NULL);
1786				continue;
1787			}
1788
1789			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1790
1791			/* Unfreeze the page. */
1792			list_del(&page->lru);
1793			page_ref_unfreeze(page, 2);
1794			xas_store(&xas, page);
1795			xas_pause(&xas);
1796			xas_unlock_irq(&xas);
1797			unlock_page(page);
1798			putback_lru_page(page);
1799			xas_lock_irq(&xas);
1800		}
1801		VM_BUG_ON(nr_none);
1802		xas_unlock_irq(&xas);
1803
1804		mem_cgroup_cancel_charge(new_page, memcg, true);
1805		new_page->mapping = NULL;
1806	}
1807
1808	unlock_page(new_page);
 
1809out:
1810	VM_BUG_ON(!list_empty(&pagelist));
1811	/* TODO: tracepoints */
 
 
 
 
 
 
1812}
1813
1814static void khugepaged_scan_file(struct mm_struct *mm,
1815		struct file *file, pgoff_t start, struct page **hpage)
 
1816{
1817	struct page *page = NULL;
1818	struct address_space *mapping = file->f_mapping;
1819	XA_STATE(xas, &mapping->i_pages, start);
1820	int present, swap;
1821	int node = NUMA_NO_NODE;
1822	int result = SCAN_SUCCEED;
1823
1824	present = 0;
1825	swap = 0;
1826	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
 
1827	rcu_read_lock();
1828	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1829		if (xas_retry(&xas, page))
1830			continue;
1831
1832		if (xa_is_value(page)) {
1833			if (++swap > khugepaged_max_ptes_swap) {
 
 
1834				result = SCAN_EXCEED_SWAP_PTE;
 
1835				break;
1836			}
1837			continue;
1838		}
1839
 
 
 
 
1840		if (PageTransCompound(page)) {
1841			result = SCAN_PAGE_COMPOUND;
 
 
 
 
 
 
 
 
 
 
 
 
1842			break;
1843		}
1844
1845		node = page_to_nid(page);
1846		if (khugepaged_scan_abort(node)) {
1847			result = SCAN_SCAN_ABORT;
1848			break;
1849		}
1850		khugepaged_node_load[node]++;
1851
1852		if (!PageLRU(page)) {
1853			result = SCAN_PAGE_LRU;
1854			break;
1855		}
1856
1857		if (page_count(page) !=
1858		    1 + page_mapcount(page) + page_has_private(page)) {
1859			result = SCAN_PAGE_COUNT;
1860			break;
1861		}
1862
1863		/*
1864		 * We probably should check if the page is referenced here, but
1865		 * nobody would transfer pte_young() to PageReferenced() for us.
1866		 * And rmap walk here is just too costly...
1867		 */
1868
1869		present++;
1870
1871		if (need_resched()) {
1872			xas_pause(&xas);
1873			cond_resched_rcu();
1874		}
1875	}
1876	rcu_read_unlock();
1877
1878	if (result == SCAN_SUCCEED) {
1879		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
 
1880			result = SCAN_EXCEED_NONE_PTE;
 
1881		} else {
1882			node = khugepaged_find_target_node();
1883			collapse_file(mm, file, start, hpage, node);
1884		}
1885	}
1886
1887	/* TODO: tracepoints */
 
1888}
1889#else
1890static void khugepaged_scan_file(struct mm_struct *mm,
1891		struct file *file, pgoff_t start, struct page **hpage)
 
1892{
1893	BUILD_BUG();
1894}
1895
1896static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
 
 
 
 
 
1897{
1898	return 0;
1899}
1900#endif
1901
1902static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1903					    struct page **hpage)
1904	__releases(&khugepaged_mm_lock)
1905	__acquires(&khugepaged_mm_lock)
1906{
1907	struct mm_slot *mm_slot;
 
 
1908	struct mm_struct *mm;
1909	struct vm_area_struct *vma;
1910	int progress = 0;
1911
1912	VM_BUG_ON(!pages);
1913	lockdep_assert_held(&khugepaged_mm_lock);
 
1914
1915	if (khugepaged_scan.mm_slot)
1916		mm_slot = khugepaged_scan.mm_slot;
1917	else {
1918		mm_slot = list_entry(khugepaged_scan.mm_head.next,
 
1919				     struct mm_slot, mm_node);
 
1920		khugepaged_scan.address = 0;
1921		khugepaged_scan.mm_slot = mm_slot;
1922	}
1923	spin_unlock(&khugepaged_mm_lock);
1924	khugepaged_collapse_pte_mapped_thps(mm_slot);
1925
1926	mm = mm_slot->mm;
1927	/*
1928	 * Don't wait for semaphore (to avoid long wait times).  Just move to
1929	 * the next mm on the list.
1930	 */
1931	vma = NULL;
1932	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1933		goto breakouterloop_mmap_sem;
1934	if (likely(!khugepaged_test_exit(mm)))
1935		vma = find_vma(mm, khugepaged_scan.address);
1936
1937	progress++;
1938	for (; vma; vma = vma->vm_next) {
 
 
 
 
1939		unsigned long hstart, hend;
1940
1941		cond_resched();
1942		if (unlikely(khugepaged_test_exit(mm))) {
1943			progress++;
1944			break;
1945		}
1946		if (!hugepage_vma_check(vma, vma->vm_flags)) {
1947skip:
1948			progress++;
1949			continue;
1950		}
1951		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1952		hend = vma->vm_end & HPAGE_PMD_MASK;
1953		if (hstart >= hend)
1954			goto skip;
1955		if (khugepaged_scan.address > hend)
1956			goto skip;
1957		if (khugepaged_scan.address < hstart)
1958			khugepaged_scan.address = hstart;
1959		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1960
1961		while (khugepaged_scan.address < hend) {
1962			int ret;
 
1963			cond_resched();
1964			if (unlikely(khugepaged_test_exit(mm)))
1965				goto breakouterloop;
1966
1967			VM_BUG_ON(khugepaged_scan.address < hstart ||
1968				  khugepaged_scan.address + HPAGE_PMD_SIZE >
1969				  hend);
1970			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
1971				struct file *file;
1972				pgoff_t pgoff = linear_page_index(vma,
1973						khugepaged_scan.address);
1974
1975				if (shmem_file(vma->vm_file)
1976				    && !shmem_huge_enabled(vma))
1977					goto skip;
1978				file = get_file(vma->vm_file);
1979				up_read(&mm->mmap_sem);
1980				ret = 1;
1981				khugepaged_scan_file(mm, file, pgoff, hpage);
1982				fput(file);
1983			} else {
1984				ret = khugepaged_scan_pmd(mm, vma,
1985						khugepaged_scan.address,
1986						hpage);
 
1987			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1988			/* move to next address */
1989			khugepaged_scan.address += HPAGE_PMD_SIZE;
1990			progress += HPAGE_PMD_NR;
1991			if (ret)
1992				/* we released mmap_sem so break loop */
1993				goto breakouterloop_mmap_sem;
 
 
 
 
 
 
1994			if (progress >= pages)
1995				goto breakouterloop;
1996		}
1997	}
1998breakouterloop:
1999	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2000breakouterloop_mmap_sem:
2001
2002	spin_lock(&khugepaged_mm_lock);
2003	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2004	/*
2005	 * Release the current mm_slot if this mm is about to die, or
2006	 * if we scanned all vmas of this mm.
2007	 */
2008	if (khugepaged_test_exit(mm) || !vma) {
2009		/*
2010		 * Make sure that if mm_users is reaching zero while
2011		 * khugepaged runs here, khugepaged_exit will find
2012		 * mm_slot not pointing to the exiting mm.
2013		 */
2014		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2015			khugepaged_scan.mm_slot = list_entry(
2016				mm_slot->mm_node.next,
2017				struct mm_slot, mm_node);
 
2018			khugepaged_scan.address = 0;
2019		} else {
2020			khugepaged_scan.mm_slot = NULL;
2021			khugepaged_full_scans++;
2022		}
2023
2024		collect_mm_slot(mm_slot);
2025	}
2026
2027	return progress;
2028}
2029
2030static int khugepaged_has_work(void)
2031{
2032	return !list_empty(&khugepaged_scan.mm_head) &&
2033		khugepaged_enabled();
2034}
2035
2036static int khugepaged_wait_event(void)
2037{
2038	return !list_empty(&khugepaged_scan.mm_head) ||
2039		kthread_should_stop();
2040}
2041
2042static void khugepaged_do_scan(void)
2043{
2044	struct page *hpage = NULL;
2045	unsigned int progress = 0, pass_through_head = 0;
2046	unsigned int pages = khugepaged_pages_to_scan;
2047	bool wait = true;
 
2048
2049	barrier(); /* write khugepaged_pages_to_scan to local stack */
2050
2051	while (progress < pages) {
2052		if (!khugepaged_prealloc_page(&hpage, &wait))
2053			break;
2054
 
2055		cond_resched();
2056
2057		if (unlikely(kthread_should_stop() || try_to_freeze()))
2058			break;
2059
2060		spin_lock(&khugepaged_mm_lock);
2061		if (!khugepaged_scan.mm_slot)
2062			pass_through_head++;
2063		if (khugepaged_has_work() &&
2064		    pass_through_head < 2)
2065			progress += khugepaged_scan_mm_slot(pages - progress,
2066							    &hpage);
2067		else
2068			progress = pages;
2069		spin_unlock(&khugepaged_mm_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2070	}
2071
2072	if (!IS_ERR_OR_NULL(hpage))
2073		put_page(hpage);
2074}
2075
2076static bool khugepaged_should_wakeup(void)
2077{
2078	return kthread_should_stop() ||
2079	       time_after_eq(jiffies, khugepaged_sleep_expire);
2080}
2081
2082static void khugepaged_wait_work(void)
2083{
2084	if (khugepaged_has_work()) {
2085		const unsigned long scan_sleep_jiffies =
2086			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2087
2088		if (!scan_sleep_jiffies)
2089			return;
2090
2091		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2092		wait_event_freezable_timeout(khugepaged_wait,
2093					     khugepaged_should_wakeup(),
2094					     scan_sleep_jiffies);
2095		return;
2096	}
2097
2098	if (khugepaged_enabled())
2099		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2100}
2101
2102static int khugepaged(void *none)
2103{
2104	struct mm_slot *mm_slot;
2105
2106	set_freezable();
2107	set_user_nice(current, MAX_NICE);
2108
2109	while (!kthread_should_stop()) {
2110		khugepaged_do_scan();
2111		khugepaged_wait_work();
2112	}
2113
2114	spin_lock(&khugepaged_mm_lock);
2115	mm_slot = khugepaged_scan.mm_slot;
2116	khugepaged_scan.mm_slot = NULL;
2117	if (mm_slot)
2118		collect_mm_slot(mm_slot);
2119	spin_unlock(&khugepaged_mm_lock);
2120	return 0;
2121}
2122
2123static void set_recommended_min_free_kbytes(void)
2124{
2125	struct zone *zone;
2126	int nr_zones = 0;
2127	unsigned long recommended_min;
2128
 
 
 
 
 
2129	for_each_populated_zone(zone) {
2130		/*
2131		 * We don't need to worry about fragmentation of
2132		 * ZONE_MOVABLE since it only has movable pages.
2133		 */
2134		if (zone_idx(zone) > gfp_zone(GFP_USER))
2135			continue;
2136
2137		nr_zones++;
2138	}
2139
2140	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2141	recommended_min = pageblock_nr_pages * nr_zones * 2;
2142
2143	/*
2144	 * Make sure that on average at least two pageblocks are almost free
2145	 * of another type, one for a migratetype to fall back to and a
2146	 * second to avoid subsequent fallbacks of other types There are 3
2147	 * MIGRATE_TYPES we care about.
2148	 */
2149	recommended_min += pageblock_nr_pages * nr_zones *
2150			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2151
2152	/* don't ever allow to reserve more than 5% of the lowmem */
2153	recommended_min = min(recommended_min,
2154			      (unsigned long) nr_free_buffer_pages() / 20);
2155	recommended_min <<= (PAGE_SHIFT-10);
2156
2157	if (recommended_min > min_free_kbytes) {
2158		if (user_min_free_kbytes >= 0)
2159			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2160				min_free_kbytes, recommended_min);
2161
2162		min_free_kbytes = recommended_min;
2163	}
 
 
2164	setup_per_zone_wmarks();
2165}
2166
2167int start_stop_khugepaged(void)
2168{
2169	static struct task_struct *khugepaged_thread __read_mostly;
2170	static DEFINE_MUTEX(khugepaged_mutex);
2171	int err = 0;
2172
2173	mutex_lock(&khugepaged_mutex);
2174	if (khugepaged_enabled()) {
2175		if (!khugepaged_thread)
2176			khugepaged_thread = kthread_run(khugepaged, NULL,
2177							"khugepaged");
2178		if (IS_ERR(khugepaged_thread)) {
2179			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2180			err = PTR_ERR(khugepaged_thread);
2181			khugepaged_thread = NULL;
2182			goto fail;
2183		}
2184
2185		if (!list_empty(&khugepaged_scan.mm_head))
2186			wake_up_interruptible(&khugepaged_wait);
2187
2188		set_recommended_min_free_kbytes();
2189	} else if (khugepaged_thread) {
2190		kthread_stop(khugepaged_thread);
2191		khugepaged_thread = NULL;
2192	}
 
2193fail:
2194	mutex_unlock(&khugepaged_mutex);
2195	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2196}