Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/sched/mm.h>
   7#include <linux/sched/coredump.h>
   8#include <linux/mmu_notifier.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/mm_inline.h>
  12#include <linux/kthread.h>
  13#include <linux/khugepaged.h>
  14#include <linux/freezer.h>
  15#include <linux/mman.h>
  16#include <linux/hashtable.h>
  17#include <linux/userfaultfd_k.h>
  18#include <linux/page_idle.h>
  19#include <linux/swapops.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/pgalloc.h>
  24#include "internal.h"
  25
  26enum scan_result {
  27	SCAN_FAIL,
  28	SCAN_SUCCEED,
  29	SCAN_PMD_NULL,
  30	SCAN_EXCEED_NONE_PTE,
  31	SCAN_PTE_NON_PRESENT,
  32	SCAN_PAGE_RO,
  33	SCAN_LACK_REFERENCED_PAGE,
  34	SCAN_PAGE_NULL,
  35	SCAN_SCAN_ABORT,
  36	SCAN_PAGE_COUNT,
  37	SCAN_PAGE_LRU,
  38	SCAN_PAGE_LOCK,
  39	SCAN_PAGE_ANON,
  40	SCAN_PAGE_COMPOUND,
  41	SCAN_ANY_PROCESS,
  42	SCAN_VMA_NULL,
  43	SCAN_VMA_CHECK,
  44	SCAN_ADDRESS_RANGE,
  45	SCAN_SWAP_CACHE_PAGE,
  46	SCAN_DEL_PAGE_LRU,
  47	SCAN_ALLOC_HUGE_PAGE_FAIL,
  48	SCAN_CGROUP_CHARGE_FAIL,
  49	SCAN_EXCEED_SWAP_PTE,
  50	SCAN_TRUNCATED,
 
  51};
  52
  53#define CREATE_TRACE_POINTS
  54#include <trace/events/huge_memory.h>
  55
  56/* default scan 8*512 pte (or vmas) every 30 second */
  57static unsigned int khugepaged_pages_to_scan __read_mostly;
  58static unsigned int khugepaged_pages_collapsed;
  59static unsigned int khugepaged_full_scans;
  60static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  61/* during fragmentation poll the hugepage allocator once every minute */
  62static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  63static unsigned long khugepaged_sleep_expire;
  64static DEFINE_SPINLOCK(khugepaged_mm_lock);
  65static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  66/*
  67 * default collapse hugepages if there is at least one pte mapped like
  68 * it would have happened if the vma was large enough during page
  69 * fault.
  70 */
  71static unsigned int khugepaged_max_ptes_none __read_mostly;
  72static unsigned int khugepaged_max_ptes_swap __read_mostly;
  73
  74#define MM_SLOTS_HASH_BITS 10
  75static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  76
  77static struct kmem_cache *mm_slot_cache __read_mostly;
  78
 
 
  79/**
  80 * struct mm_slot - hash lookup from mm to mm_slot
  81 * @hash: hash collision list
  82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  83 * @mm: the mm that this information is valid for
  84 */
  85struct mm_slot {
  86	struct hlist_node hash;
  87	struct list_head mm_node;
  88	struct mm_struct *mm;
 
 
 
 
  89};
  90
  91/**
  92 * struct khugepaged_scan - cursor for scanning
  93 * @mm_head: the head of the mm list to scan
  94 * @mm_slot: the current mm_slot we are scanning
  95 * @address: the next address inside that to be scanned
  96 *
  97 * There is only the one khugepaged_scan instance of this cursor structure.
  98 */
  99struct khugepaged_scan {
 100	struct list_head mm_head;
 101	struct mm_slot *mm_slot;
 102	unsigned long address;
 103};
 104
 105static struct khugepaged_scan khugepaged_scan = {
 106	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 107};
 108
 109#ifdef CONFIG_SYSFS
 110static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 111					 struct kobj_attribute *attr,
 112					 char *buf)
 113{
 114	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 115}
 116
 117static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 118					  struct kobj_attribute *attr,
 119					  const char *buf, size_t count)
 120{
 121	unsigned long msecs;
 122	int err;
 123
 124	err = kstrtoul(buf, 10, &msecs);
 125	if (err || msecs > UINT_MAX)
 126		return -EINVAL;
 127
 128	khugepaged_scan_sleep_millisecs = msecs;
 129	khugepaged_sleep_expire = 0;
 130	wake_up_interruptible(&khugepaged_wait);
 131
 132	return count;
 133}
 134static struct kobj_attribute scan_sleep_millisecs_attr =
 135	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 136	       scan_sleep_millisecs_store);
 137
 138static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 139					  struct kobj_attribute *attr,
 140					  char *buf)
 141{
 142	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 143}
 144
 145static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 146					   struct kobj_attribute *attr,
 147					   const char *buf, size_t count)
 148{
 149	unsigned long msecs;
 150	int err;
 151
 152	err = kstrtoul(buf, 10, &msecs);
 153	if (err || msecs > UINT_MAX)
 154		return -EINVAL;
 155
 156	khugepaged_alloc_sleep_millisecs = msecs;
 157	khugepaged_sleep_expire = 0;
 158	wake_up_interruptible(&khugepaged_wait);
 159
 160	return count;
 161}
 162static struct kobj_attribute alloc_sleep_millisecs_attr =
 163	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 164	       alloc_sleep_millisecs_store);
 165
 166static ssize_t pages_to_scan_show(struct kobject *kobj,
 167				  struct kobj_attribute *attr,
 168				  char *buf)
 169{
 170	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 171}
 172static ssize_t pages_to_scan_store(struct kobject *kobj,
 173				   struct kobj_attribute *attr,
 174				   const char *buf, size_t count)
 175{
 176	int err;
 177	unsigned long pages;
 178
 179	err = kstrtoul(buf, 10, &pages);
 180	if (err || !pages || pages > UINT_MAX)
 181		return -EINVAL;
 182
 183	khugepaged_pages_to_scan = pages;
 184
 185	return count;
 186}
 187static struct kobj_attribute pages_to_scan_attr =
 188	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
 189	       pages_to_scan_store);
 190
 191static ssize_t pages_collapsed_show(struct kobject *kobj,
 192				    struct kobj_attribute *attr,
 193				    char *buf)
 194{
 195	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 196}
 197static struct kobj_attribute pages_collapsed_attr =
 198	__ATTR_RO(pages_collapsed);
 199
 200static ssize_t full_scans_show(struct kobject *kobj,
 201			       struct kobj_attribute *attr,
 202			       char *buf)
 203{
 204	return sprintf(buf, "%u\n", khugepaged_full_scans);
 205}
 206static struct kobj_attribute full_scans_attr =
 207	__ATTR_RO(full_scans);
 208
 209static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 210				      struct kobj_attribute *attr, char *buf)
 211{
 212	return single_hugepage_flag_show(kobj, attr, buf,
 213				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 214}
 215static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 216				       struct kobj_attribute *attr,
 217				       const char *buf, size_t count)
 218{
 219	return single_hugepage_flag_store(kobj, attr, buf, count,
 220				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 221}
 222static struct kobj_attribute khugepaged_defrag_attr =
 223	__ATTR(defrag, 0644, khugepaged_defrag_show,
 224	       khugepaged_defrag_store);
 225
 226/*
 227 * max_ptes_none controls if khugepaged should collapse hugepages over
 228 * any unmapped ptes in turn potentially increasing the memory
 229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 230 * reduce the available free memory in the system as it
 231 * runs. Increasing max_ptes_none will instead potentially reduce the
 232 * free memory in the system during the khugepaged scan.
 233 */
 234static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 235					     struct kobj_attribute *attr,
 236					     char *buf)
 237{
 238	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 239}
 240static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 241					      struct kobj_attribute *attr,
 242					      const char *buf, size_t count)
 243{
 244	int err;
 245	unsigned long max_ptes_none;
 246
 247	err = kstrtoul(buf, 10, &max_ptes_none);
 248	if (err || max_ptes_none > HPAGE_PMD_NR-1)
 249		return -EINVAL;
 250
 251	khugepaged_max_ptes_none = max_ptes_none;
 252
 253	return count;
 254}
 255static struct kobj_attribute khugepaged_max_ptes_none_attr =
 256	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 257	       khugepaged_max_ptes_none_store);
 258
 259static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
 260					     struct kobj_attribute *attr,
 261					     char *buf)
 262{
 263	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
 264}
 265
 266static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
 267					      struct kobj_attribute *attr,
 268					      const char *buf, size_t count)
 269{
 270	int err;
 271	unsigned long max_ptes_swap;
 272
 273	err  = kstrtoul(buf, 10, &max_ptes_swap);
 274	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
 275		return -EINVAL;
 276
 277	khugepaged_max_ptes_swap = max_ptes_swap;
 278
 279	return count;
 280}
 281
 282static struct kobj_attribute khugepaged_max_ptes_swap_attr =
 283	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
 284	       khugepaged_max_ptes_swap_store);
 285
 286static struct attribute *khugepaged_attr[] = {
 287	&khugepaged_defrag_attr.attr,
 288	&khugepaged_max_ptes_none_attr.attr,
 289	&pages_to_scan_attr.attr,
 290	&pages_collapsed_attr.attr,
 291	&full_scans_attr.attr,
 292	&scan_sleep_millisecs_attr.attr,
 293	&alloc_sleep_millisecs_attr.attr,
 294	&khugepaged_max_ptes_swap_attr.attr,
 295	NULL,
 296};
 297
 298struct attribute_group khugepaged_attr_group = {
 299	.attrs = khugepaged_attr,
 300	.name = "khugepaged",
 301};
 302#endif /* CONFIG_SYSFS */
 303
 304#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
 305
 306int hugepage_madvise(struct vm_area_struct *vma,
 307		     unsigned long *vm_flags, int advice)
 308{
 309	switch (advice) {
 310	case MADV_HUGEPAGE:
 311#ifdef CONFIG_S390
 312		/*
 313		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
 314		 * can't handle this properly after s390_enable_sie, so we simply
 315		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
 316		 */
 317		if (mm_has_pgste(vma->vm_mm))
 318			return 0;
 319#endif
 320		*vm_flags &= ~VM_NOHUGEPAGE;
 321		*vm_flags |= VM_HUGEPAGE;
 322		/*
 323		 * If the vma become good for khugepaged to scan,
 324		 * register it here without waiting a page fault that
 325		 * may not happen any time soon.
 326		 */
 327		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
 328				khugepaged_enter_vma_merge(vma, *vm_flags))
 329			return -ENOMEM;
 330		break;
 331	case MADV_NOHUGEPAGE:
 332		*vm_flags &= ~VM_HUGEPAGE;
 333		*vm_flags |= VM_NOHUGEPAGE;
 334		/*
 335		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
 336		 * this vma even if we leave the mm registered in khugepaged if
 337		 * it got registered before VM_NOHUGEPAGE was set.
 338		 */
 339		break;
 340	}
 341
 342	return 0;
 343}
 344
 345int __init khugepaged_init(void)
 346{
 347	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
 348					  sizeof(struct mm_slot),
 349					  __alignof__(struct mm_slot), 0, NULL);
 350	if (!mm_slot_cache)
 351		return -ENOMEM;
 352
 353	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 354	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 355	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
 356
 357	return 0;
 358}
 359
 360void __init khugepaged_destroy(void)
 361{
 362	kmem_cache_destroy(mm_slot_cache);
 363}
 364
 365static inline struct mm_slot *alloc_mm_slot(void)
 366{
 367	if (!mm_slot_cache)	/* initialization failed */
 368		return NULL;
 369	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
 370}
 371
 372static inline void free_mm_slot(struct mm_slot *mm_slot)
 373{
 374	kmem_cache_free(mm_slot_cache, mm_slot);
 375}
 376
 377static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 378{
 379	struct mm_slot *mm_slot;
 380
 381	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
 382		if (mm == mm_slot->mm)
 383			return mm_slot;
 384
 385	return NULL;
 386}
 387
 388static void insert_to_mm_slots_hash(struct mm_struct *mm,
 389				    struct mm_slot *mm_slot)
 390{
 391	mm_slot->mm = mm;
 392	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 393}
 394
 395static inline int khugepaged_test_exit(struct mm_struct *mm)
 396{
 397	return atomic_read(&mm->mm_users) == 0;
 398}
 399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400int __khugepaged_enter(struct mm_struct *mm)
 401{
 402	struct mm_slot *mm_slot;
 403	int wakeup;
 404
 405	mm_slot = alloc_mm_slot();
 406	if (!mm_slot)
 407		return -ENOMEM;
 408
 409	/* __khugepaged_exit() must not run from under us */
 410	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
 411	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 412		free_mm_slot(mm_slot);
 413		return 0;
 414	}
 415
 416	spin_lock(&khugepaged_mm_lock);
 417	insert_to_mm_slots_hash(mm, mm_slot);
 418	/*
 419	 * Insert just behind the scanning cursor, to let the area settle
 420	 * down a little.
 421	 */
 422	wakeup = list_empty(&khugepaged_scan.mm_head);
 423	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 424	spin_unlock(&khugepaged_mm_lock);
 425
 426	mmgrab(mm);
 427	if (wakeup)
 428		wake_up_interruptible(&khugepaged_wait);
 429
 430	return 0;
 431}
 432
 433int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
 434			       unsigned long vm_flags)
 435{
 436	unsigned long hstart, hend;
 437	if (!vma->anon_vma)
 438		/*
 439		 * Not yet faulted in so we will register later in the
 440		 * page fault if needed.
 441		 */
 442		return 0;
 443	if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
 444		/* khugepaged not yet working on file or special mappings */
 445		return 0;
 
 446	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 447	hend = vma->vm_end & HPAGE_PMD_MASK;
 448	if (hstart < hend)
 449		return khugepaged_enter(vma, vm_flags);
 450	return 0;
 451}
 452
 453void __khugepaged_exit(struct mm_struct *mm)
 454{
 455	struct mm_slot *mm_slot;
 456	int free = 0;
 457
 458	spin_lock(&khugepaged_mm_lock);
 459	mm_slot = get_mm_slot(mm);
 460	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
 461		hash_del(&mm_slot->hash);
 462		list_del(&mm_slot->mm_node);
 463		free = 1;
 464	}
 465	spin_unlock(&khugepaged_mm_lock);
 466
 467	if (free) {
 468		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
 469		free_mm_slot(mm_slot);
 470		mmdrop(mm);
 471	} else if (mm_slot) {
 472		/*
 473		 * This is required to serialize against
 474		 * khugepaged_test_exit() (which is guaranteed to run
 475		 * under mmap sem read mode). Stop here (after we
 476		 * return all pagetables will be destroyed) until
 477		 * khugepaged has finished working on the pagetables
 478		 * under the mmap_sem.
 479		 */
 480		down_write(&mm->mmap_sem);
 481		up_write(&mm->mmap_sem);
 482	}
 483}
 484
 485static void release_pte_page(struct page *page)
 486{
 487	dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
 488	unlock_page(page);
 489	putback_lru_page(page);
 490}
 491
 492static void release_pte_pages(pte_t *pte, pte_t *_pte)
 493{
 494	while (--_pte >= pte) {
 495		pte_t pteval = *_pte;
 496		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
 497			release_pte_page(pte_page(pteval));
 498	}
 499}
 500
 501static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 502					unsigned long address,
 503					pte_t *pte)
 504{
 505	struct page *page = NULL;
 506	pte_t *_pte;
 507	int none_or_zero = 0, result = 0, referenced = 0;
 508	bool writable = false;
 509
 510	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
 511	     _pte++, address += PAGE_SIZE) {
 512		pte_t pteval = *_pte;
 513		if (pte_none(pteval) || (pte_present(pteval) &&
 514				is_zero_pfn(pte_pfn(pteval)))) {
 515			if (!userfaultfd_armed(vma) &&
 516			    ++none_or_zero <= khugepaged_max_ptes_none) {
 517				continue;
 518			} else {
 519				result = SCAN_EXCEED_NONE_PTE;
 520				goto out;
 521			}
 522		}
 523		if (!pte_present(pteval)) {
 524			result = SCAN_PTE_NON_PRESENT;
 525			goto out;
 526		}
 527		page = vm_normal_page(vma, address, pteval);
 528		if (unlikely(!page)) {
 529			result = SCAN_PAGE_NULL;
 530			goto out;
 531		}
 532
 533		/* TODO: teach khugepaged to collapse THP mapped with pte */
 534		if (PageCompound(page)) {
 535			result = SCAN_PAGE_COMPOUND;
 536			goto out;
 537		}
 538
 539		VM_BUG_ON_PAGE(!PageAnon(page), page);
 540
 541		/*
 542		 * We can do it before isolate_lru_page because the
 543		 * page can't be freed from under us. NOTE: PG_lock
 544		 * is needed to serialize against split_huge_page
 545		 * when invoked from the VM.
 546		 */
 547		if (!trylock_page(page)) {
 548			result = SCAN_PAGE_LOCK;
 549			goto out;
 550		}
 551
 552		/*
 553		 * cannot use mapcount: can't collapse if there's a gup pin.
 554		 * The page must only be referenced by the scanned process
 555		 * and page swap cache.
 556		 */
 557		if (page_count(page) != 1 + PageSwapCache(page)) {
 558			unlock_page(page);
 559			result = SCAN_PAGE_COUNT;
 560			goto out;
 561		}
 562		if (pte_write(pteval)) {
 563			writable = true;
 564		} else {
 565			if (PageSwapCache(page) &&
 566			    !reuse_swap_page(page, NULL)) {
 567				unlock_page(page);
 568				result = SCAN_SWAP_CACHE_PAGE;
 569				goto out;
 570			}
 571			/*
 572			 * Page is not in the swap cache. It can be collapsed
 573			 * into a THP.
 574			 */
 575		}
 576
 577		/*
 578		 * Isolate the page to avoid collapsing an hugepage
 579		 * currently in use by the VM.
 580		 */
 581		if (isolate_lru_page(page)) {
 582			unlock_page(page);
 583			result = SCAN_DEL_PAGE_LRU;
 584			goto out;
 585		}
 586		inc_node_page_state(page,
 587				NR_ISOLATED_ANON + page_is_file_cache(page));
 588		VM_BUG_ON_PAGE(!PageLocked(page), page);
 589		VM_BUG_ON_PAGE(PageLRU(page), page);
 590
 591		/* There should be enough young pte to collapse the page */
 592		if (pte_young(pteval) ||
 593		    page_is_young(page) || PageReferenced(page) ||
 594		    mmu_notifier_test_young(vma->vm_mm, address))
 595			referenced++;
 596	}
 597	if (likely(writable)) {
 598		if (likely(referenced)) {
 599			result = SCAN_SUCCEED;
 600			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 601							    referenced, writable, result);
 602			return 1;
 603		}
 604	} else {
 605		result = SCAN_PAGE_RO;
 606	}
 607
 608out:
 609	release_pte_pages(pte, _pte);
 610	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 611					    referenced, writable, result);
 612	return 0;
 613}
 614
 615static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 616				      struct vm_area_struct *vma,
 617				      unsigned long address,
 618				      spinlock_t *ptl)
 619{
 620	pte_t *_pte;
 621	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 622				_pte++, page++, address += PAGE_SIZE) {
 623		pte_t pteval = *_pte;
 624		struct page *src_page;
 625
 626		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 627			clear_user_highpage(page, address);
 628			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
 629			if (is_zero_pfn(pte_pfn(pteval))) {
 630				/*
 631				 * ptl mostly unnecessary.
 632				 */
 633				spin_lock(ptl);
 634				/*
 635				 * paravirt calls inside pte_clear here are
 636				 * superfluous.
 637				 */
 638				pte_clear(vma->vm_mm, address, _pte);
 639				spin_unlock(ptl);
 640			}
 641		} else {
 642			src_page = pte_page(pteval);
 643			copy_user_highpage(page, src_page, address, vma);
 644			VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
 645			release_pte_page(src_page);
 646			/*
 647			 * ptl mostly unnecessary, but preempt has to
 648			 * be disabled to update the per-cpu stats
 649			 * inside page_remove_rmap().
 650			 */
 651			spin_lock(ptl);
 652			/*
 653			 * paravirt calls inside pte_clear here are
 654			 * superfluous.
 655			 */
 656			pte_clear(vma->vm_mm, address, _pte);
 657			page_remove_rmap(src_page, false);
 658			spin_unlock(ptl);
 659			free_page_and_swap_cache(src_page);
 660		}
 661	}
 662}
 663
 664static void khugepaged_alloc_sleep(void)
 665{
 666	DEFINE_WAIT(wait);
 667
 668	add_wait_queue(&khugepaged_wait, &wait);
 669	freezable_schedule_timeout_interruptible(
 670		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 671	remove_wait_queue(&khugepaged_wait, &wait);
 672}
 673
 674static int khugepaged_node_load[MAX_NUMNODES];
 675
 676static bool khugepaged_scan_abort(int nid)
 677{
 678	int i;
 679
 680	/*
 681	 * If node_reclaim_mode is disabled, then no extra effort is made to
 682	 * allocate memory locally.
 683	 */
 684	if (!node_reclaim_mode)
 685		return false;
 686
 687	/* If there is a count for this node already, it must be acceptable */
 688	if (khugepaged_node_load[nid])
 689		return false;
 690
 691	for (i = 0; i < MAX_NUMNODES; i++) {
 692		if (!khugepaged_node_load[i])
 693			continue;
 694		if (node_distance(nid, i) > RECLAIM_DISTANCE)
 695			return true;
 696	}
 697	return false;
 698}
 699
 700/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
 701static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 702{
 703	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
 704}
 705
 706#ifdef CONFIG_NUMA
 707static int khugepaged_find_target_node(void)
 708{
 709	static int last_khugepaged_target_node = NUMA_NO_NODE;
 710	int nid, target_node = 0, max_value = 0;
 711
 712	/* find first node with max normal pages hit */
 713	for (nid = 0; nid < MAX_NUMNODES; nid++)
 714		if (khugepaged_node_load[nid] > max_value) {
 715			max_value = khugepaged_node_load[nid];
 716			target_node = nid;
 717		}
 718
 719	/* do some balance if several nodes have the same hit record */
 720	if (target_node <= last_khugepaged_target_node)
 721		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
 722				nid++)
 723			if (max_value == khugepaged_node_load[nid]) {
 724				target_node = nid;
 725				break;
 726			}
 727
 728	last_khugepaged_target_node = target_node;
 729	return target_node;
 730}
 731
 732static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 733{
 734	if (IS_ERR(*hpage)) {
 735		if (!*wait)
 736			return false;
 737
 738		*wait = false;
 739		*hpage = NULL;
 740		khugepaged_alloc_sleep();
 741	} else if (*hpage) {
 742		put_page(*hpage);
 743		*hpage = NULL;
 744	}
 745
 746	return true;
 747}
 748
 749static struct page *
 750khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 751{
 752	VM_BUG_ON_PAGE(*hpage, *hpage);
 753
 754	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 755	if (unlikely(!*hpage)) {
 756		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 757		*hpage = ERR_PTR(-ENOMEM);
 758		return NULL;
 759	}
 760
 761	prep_transhuge_page(*hpage);
 762	count_vm_event(THP_COLLAPSE_ALLOC);
 763	return *hpage;
 764}
 765#else
 766static int khugepaged_find_target_node(void)
 767{
 768	return 0;
 769}
 770
 771static inline struct page *alloc_khugepaged_hugepage(void)
 772{
 773	struct page *page;
 774
 775	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
 776			   HPAGE_PMD_ORDER);
 777	if (page)
 778		prep_transhuge_page(page);
 779	return page;
 780}
 781
 782static struct page *khugepaged_alloc_hugepage(bool *wait)
 783{
 784	struct page *hpage;
 785
 786	do {
 787		hpage = alloc_khugepaged_hugepage();
 788		if (!hpage) {
 789			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 790			if (!*wait)
 791				return NULL;
 792
 793			*wait = false;
 794			khugepaged_alloc_sleep();
 795		} else
 796			count_vm_event(THP_COLLAPSE_ALLOC);
 797	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
 798
 799	return hpage;
 800}
 801
 802static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 803{
 804	if (!*hpage)
 805		*hpage = khugepaged_alloc_hugepage(wait);
 806
 807	if (unlikely(!*hpage))
 808		return false;
 809
 810	return true;
 811}
 812
 813static struct page *
 814khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 815{
 816	VM_BUG_ON(!*hpage);
 817
 818	return  *hpage;
 819}
 820#endif
 821
 822static bool hugepage_vma_check(struct vm_area_struct *vma)
 823{
 824	if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
 825	    (vma->vm_flags & VM_NOHUGEPAGE) ||
 826	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
 827		return false;
 828	if (shmem_file(vma->vm_file)) {
 829		if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
 830			return false;
 831		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
 832				HPAGE_PMD_NR);
 833	}
 834	if (!vma->anon_vma || vma->vm_ops)
 835		return false;
 836	if (is_vma_temporary_stack(vma))
 837		return false;
 838	return !(vma->vm_flags & VM_NO_KHUGEPAGED);
 839}
 840
 841/*
 842 * If mmap_sem temporarily dropped, revalidate vma
 843 * before taking mmap_sem.
 844 * Return 0 if succeeds, otherwise return none-zero
 845 * value (scan code).
 846 */
 847
 848static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 849		struct vm_area_struct **vmap)
 850{
 851	struct vm_area_struct *vma;
 852	unsigned long hstart, hend;
 853
 854	if (unlikely(khugepaged_test_exit(mm)))
 855		return SCAN_ANY_PROCESS;
 856
 857	*vmap = vma = find_vma(mm, address);
 858	if (!vma)
 859		return SCAN_VMA_NULL;
 860
 861	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 862	hend = vma->vm_end & HPAGE_PMD_MASK;
 863	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
 864		return SCAN_ADDRESS_RANGE;
 865	if (!hugepage_vma_check(vma))
 866		return SCAN_VMA_CHECK;
 867	return 0;
 868}
 869
 870/*
 871 * Bring missing pages in from swap, to complete THP collapse.
 872 * Only done if khugepaged_scan_pmd believes it is worthwhile.
 873 *
 874 * Called and returns without pte mapped or spinlocks held,
 875 * but with mmap_sem held to protect against vma changes.
 876 */
 877
 878static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 879					struct vm_area_struct *vma,
 880					unsigned long address, pmd_t *pmd,
 881					int referenced)
 882{
 883	int swapped_in = 0, ret = 0;
 
 884	struct vm_fault vmf = {
 885		.vma = vma,
 886		.address = address,
 887		.flags = FAULT_FLAG_ALLOW_RETRY,
 888		.pmd = pmd,
 889		.pgoff = linear_page_index(vma, address),
 890	};
 891
 892	/* we only decide to swapin, if there is enough young ptes */
 893	if (referenced < HPAGE_PMD_NR/2) {
 894		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 895		return false;
 896	}
 897	vmf.pte = pte_offset_map(pmd, address);
 898	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
 899			vmf.pte++, vmf.address += PAGE_SIZE) {
 900		vmf.orig_pte = *vmf.pte;
 901		if (!is_swap_pte(vmf.orig_pte))
 902			continue;
 903		swapped_in++;
 904		ret = do_swap_page(&vmf);
 905
 906		/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
 907		if (ret & VM_FAULT_RETRY) {
 908			down_read(&mm->mmap_sem);
 909			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
 910				/* vma is no longer available, don't continue to swapin */
 911				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 912				return false;
 913			}
 914			/* check if the pmd is still valid */
 915			if (mm_find_pmd(mm, address) != pmd) {
 916				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 917				return false;
 918			}
 919		}
 920		if (ret & VM_FAULT_ERROR) {
 921			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 922			return false;
 923		}
 924		/* pte is unmapped now, we need to map it */
 925		vmf.pte = pte_offset_map(pmd, vmf.address);
 926	}
 927	vmf.pte--;
 928	pte_unmap(vmf.pte);
 929	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
 930	return true;
 931}
 932
 933static void collapse_huge_page(struct mm_struct *mm,
 934				   unsigned long address,
 935				   struct page **hpage,
 936				   int node, int referenced)
 937{
 938	pmd_t *pmd, _pmd;
 939	pte_t *pte;
 940	pgtable_t pgtable;
 941	struct page *new_page;
 942	spinlock_t *pmd_ptl, *pte_ptl;
 943	int isolated = 0, result = 0;
 944	struct mem_cgroup *memcg;
 945	struct vm_area_struct *vma;
 946	unsigned long mmun_start;	/* For mmu_notifiers */
 947	unsigned long mmun_end;		/* For mmu_notifiers */
 948	gfp_t gfp;
 949
 950	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 951
 952	/* Only allocate from the target node */
 953	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 954
 955	/*
 956	 * Before allocating the hugepage, release the mmap_sem read lock.
 957	 * The allocation can take potentially a long time if it involves
 958	 * sync compaction, and we do not need to hold the mmap_sem during
 959	 * that. We will recheck the vma after taking it again in write mode.
 960	 */
 961	up_read(&mm->mmap_sem);
 962	new_page = khugepaged_alloc_page(hpage, gfp, node);
 963	if (!new_page) {
 964		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
 965		goto out_nolock;
 966	}
 967
 968	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
 969		result = SCAN_CGROUP_CHARGE_FAIL;
 970		goto out_nolock;
 971	}
 972
 973	down_read(&mm->mmap_sem);
 974	result = hugepage_vma_revalidate(mm, address, &vma);
 975	if (result) {
 976		mem_cgroup_cancel_charge(new_page, memcg, true);
 977		up_read(&mm->mmap_sem);
 978		goto out_nolock;
 979	}
 980
 981	pmd = mm_find_pmd(mm, address);
 982	if (!pmd) {
 983		result = SCAN_PMD_NULL;
 984		mem_cgroup_cancel_charge(new_page, memcg, true);
 985		up_read(&mm->mmap_sem);
 986		goto out_nolock;
 987	}
 988
 989	/*
 990	 * __collapse_huge_page_swapin always returns with mmap_sem locked.
 991	 * If it fails, we release mmap_sem and jump out_nolock.
 992	 * Continuing to collapse causes inconsistency.
 993	 */
 994	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
 995		mem_cgroup_cancel_charge(new_page, memcg, true);
 996		up_read(&mm->mmap_sem);
 997		goto out_nolock;
 998	}
 999
1000	up_read(&mm->mmap_sem);
1001	/*
1002	 * Prevent all access to pagetables with the exception of
1003	 * gup_fast later handled by the ptep_clear_flush and the VM
1004	 * handled by the anon_vma lock + PG_lock.
1005	 */
1006	down_write(&mm->mmap_sem);
 
 
 
1007	result = hugepage_vma_revalidate(mm, address, &vma);
1008	if (result)
1009		goto out;
1010	/* check if the pmd is still valid */
1011	if (mm_find_pmd(mm, address) != pmd)
1012		goto out;
1013
1014	anon_vma_lock_write(vma->anon_vma);
1015
 
 
 
 
1016	pte = pte_offset_map(pmd, address);
1017	pte_ptl = pte_lockptr(mm, pmd);
1018
1019	mmun_start = address;
1020	mmun_end   = address + HPAGE_PMD_SIZE;
1021	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1022	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1023	/*
1024	 * After this gup_fast can't run anymore. This also removes
1025	 * any huge TLB entry from the CPU so we won't allow
1026	 * huge and small TLB entries for the same virtual address
1027	 * to avoid the risk of CPU bugs in that area.
1028	 */
1029	_pmd = pmdp_collapse_flush(vma, address, pmd);
1030	spin_unlock(pmd_ptl);
1031	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1032
1033	spin_lock(pte_ptl);
1034	isolated = __collapse_huge_page_isolate(vma, address, pte);
1035	spin_unlock(pte_ptl);
1036
1037	if (unlikely(!isolated)) {
1038		pte_unmap(pte);
1039		spin_lock(pmd_ptl);
1040		BUG_ON(!pmd_none(*pmd));
1041		/*
1042		 * We can only use set_pmd_at when establishing
1043		 * hugepmds and never for establishing regular pmds that
1044		 * points to regular pagetables. Use pmd_populate for that
1045		 */
1046		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1047		spin_unlock(pmd_ptl);
1048		anon_vma_unlock_write(vma->anon_vma);
1049		result = SCAN_FAIL;
1050		goto out;
1051	}
1052
1053	/*
1054	 * All pages are isolated and locked so anon_vma rmap
1055	 * can't run anymore.
1056	 */
1057	anon_vma_unlock_write(vma->anon_vma);
1058
1059	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1060	pte_unmap(pte);
1061	__SetPageUptodate(new_page);
1062	pgtable = pmd_pgtable(_pmd);
1063
1064	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1065	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1066
1067	/*
1068	 * spin_lock() below is not the equivalent of smp_wmb(), so
1069	 * this is needed to avoid the copy_huge_page writes to become
1070	 * visible after the set_pmd_at() write.
1071	 */
1072	smp_wmb();
1073
1074	spin_lock(pmd_ptl);
1075	BUG_ON(!pmd_none(*pmd));
1076	page_add_new_anon_rmap(new_page, vma, address, true);
1077	mem_cgroup_commit_charge(new_page, memcg, false, true);
 
1078	lru_cache_add_active_or_unevictable(new_page, vma);
1079	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1080	set_pmd_at(mm, address, pmd, _pmd);
1081	update_mmu_cache_pmd(vma, address, pmd);
1082	spin_unlock(pmd_ptl);
1083
1084	*hpage = NULL;
1085
1086	khugepaged_pages_collapsed++;
1087	result = SCAN_SUCCEED;
1088out_up_write:
1089	up_write(&mm->mmap_sem);
1090out_nolock:
1091	trace_mm_collapse_huge_page(mm, isolated, result);
1092	return;
1093out:
1094	mem_cgroup_cancel_charge(new_page, memcg, true);
1095	goto out_up_write;
1096}
1097
1098static int khugepaged_scan_pmd(struct mm_struct *mm,
1099			       struct vm_area_struct *vma,
1100			       unsigned long address,
1101			       struct page **hpage)
1102{
1103	pmd_t *pmd;
1104	pte_t *pte, *_pte;
1105	int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1106	struct page *page = NULL;
1107	unsigned long _address;
1108	spinlock_t *ptl;
1109	int node = NUMA_NO_NODE, unmapped = 0;
1110	bool writable = false;
1111
1112	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1113
1114	pmd = mm_find_pmd(mm, address);
1115	if (!pmd) {
1116		result = SCAN_PMD_NULL;
1117		goto out;
1118	}
1119
1120	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1121	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1122	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1123	     _pte++, _address += PAGE_SIZE) {
1124		pte_t pteval = *_pte;
1125		if (is_swap_pte(pteval)) {
1126			if (++unmapped <= khugepaged_max_ptes_swap) {
1127				continue;
1128			} else {
1129				result = SCAN_EXCEED_SWAP_PTE;
1130				goto out_unmap;
1131			}
1132		}
1133		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1134			if (!userfaultfd_armed(vma) &&
1135			    ++none_or_zero <= khugepaged_max_ptes_none) {
1136				continue;
1137			} else {
1138				result = SCAN_EXCEED_NONE_PTE;
1139				goto out_unmap;
1140			}
1141		}
1142		if (!pte_present(pteval)) {
1143			result = SCAN_PTE_NON_PRESENT;
1144			goto out_unmap;
1145		}
1146		if (pte_write(pteval))
1147			writable = true;
1148
1149		page = vm_normal_page(vma, _address, pteval);
1150		if (unlikely(!page)) {
1151			result = SCAN_PAGE_NULL;
1152			goto out_unmap;
1153		}
1154
1155		/* TODO: teach khugepaged to collapse THP mapped with pte */
1156		if (PageCompound(page)) {
1157			result = SCAN_PAGE_COMPOUND;
1158			goto out_unmap;
1159		}
1160
1161		/*
1162		 * Record which node the original page is from and save this
1163		 * information to khugepaged_node_load[].
1164		 * Khupaged will allocate hugepage from the node has the max
1165		 * hit record.
1166		 */
1167		node = page_to_nid(page);
1168		if (khugepaged_scan_abort(node)) {
1169			result = SCAN_SCAN_ABORT;
1170			goto out_unmap;
1171		}
1172		khugepaged_node_load[node]++;
1173		if (!PageLRU(page)) {
1174			result = SCAN_PAGE_LRU;
1175			goto out_unmap;
1176		}
1177		if (PageLocked(page)) {
1178			result = SCAN_PAGE_LOCK;
1179			goto out_unmap;
1180		}
1181		if (!PageAnon(page)) {
1182			result = SCAN_PAGE_ANON;
1183			goto out_unmap;
1184		}
1185
1186		/*
1187		 * cannot use mapcount: can't collapse if there's a gup pin.
1188		 * The page must only be referenced by the scanned process
1189		 * and page swap cache.
1190		 */
1191		if (page_count(page) != 1 + PageSwapCache(page)) {
1192			result = SCAN_PAGE_COUNT;
1193			goto out_unmap;
1194		}
1195		if (pte_young(pteval) ||
1196		    page_is_young(page) || PageReferenced(page) ||
1197		    mmu_notifier_test_young(vma->vm_mm, address))
1198			referenced++;
1199	}
1200	if (writable) {
1201		if (referenced) {
1202			result = SCAN_SUCCEED;
1203			ret = 1;
1204		} else {
1205			result = SCAN_LACK_REFERENCED_PAGE;
1206		}
1207	} else {
1208		result = SCAN_PAGE_RO;
1209	}
1210out_unmap:
1211	pte_unmap_unlock(pte, ptl);
1212	if (ret) {
1213		node = khugepaged_find_target_node();
1214		/* collapse_huge_page will return with the mmap_sem released */
1215		collapse_huge_page(mm, address, hpage, node, referenced);
1216	}
1217out:
1218	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1219				     none_or_zero, result, unmapped);
1220	return ret;
1221}
1222
1223static void collect_mm_slot(struct mm_slot *mm_slot)
1224{
1225	struct mm_struct *mm = mm_slot->mm;
1226
1227	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1228
1229	if (khugepaged_test_exit(mm)) {
1230		/* free mm_slot */
1231		hash_del(&mm_slot->hash);
1232		list_del(&mm_slot->mm_node);
1233
1234		/*
1235		 * Not strictly needed because the mm exited already.
1236		 *
1237		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1238		 */
1239
1240		/* khugepaged_mm_lock actually not necessary for the below */
1241		free_mm_slot(mm_slot);
1242		mmdrop(mm);
1243	}
1244}
1245
1246#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1248{
1249	struct vm_area_struct *vma;
1250	unsigned long addr;
1251	pmd_t *pmd, _pmd;
1252
1253	i_mmap_lock_write(mapping);
1254	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1255		/* probably overkill */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1256		if (vma->anon_vma)
1257			continue;
1258		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1259		if (addr & ~HPAGE_PMD_MASK)
1260			continue;
1261		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1262			continue;
1263		pmd = mm_find_pmd(vma->vm_mm, addr);
1264		if (!pmd)
1265			continue;
1266		/*
1267		 * We need exclusive mmap_sem to retract page table.
1268		 * If trylock fails we would end up with pte-mapped THP after
1269		 * re-fault. Not ideal, but it's more important to not disturb
1270		 * the system too much.
 
1271		 */
1272		if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1273			spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1274			/* assume page table is clear */
1275			_pmd = pmdp_collapse_flush(vma, addr, pmd);
1276			spin_unlock(ptl);
1277			up_write(&vma->vm_mm->mmap_sem);
1278			mm_dec_nr_ptes(vma->vm_mm);
1279			pte_free(vma->vm_mm, pmd_pgtable(_pmd));
 
 
 
1280		}
1281	}
1282	i_mmap_unlock_write(mapping);
1283}
1284
1285/**
1286 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1287 *
1288 * Basic scheme is simple, details are more complex:
1289 *  - allocate and freeze a new huge page;
1290 *  - scan over radix tree replacing old pages the new one
1291 *    + swap in pages if necessary;
1292 *    + fill in gaps;
1293 *    + keep old pages around in case if rollback is required;
1294 *  - if replacing succeed:
1295 *    + copy data over;
1296 *    + free old pages;
1297 *    + unfreeze huge page;
1298 *  - if replacing failed;
1299 *    + put all pages back and unfreeze them;
1300 *    + restore gaps in the radix-tree;
1301 *    + free huge page;
1302 */
1303static void collapse_shmem(struct mm_struct *mm,
1304		struct address_space *mapping, pgoff_t start,
1305		struct page **hpage, int node)
1306{
 
1307	gfp_t gfp;
1308	struct page *page, *new_page, *tmp;
1309	struct mem_cgroup *memcg;
1310	pgoff_t index, end = start + HPAGE_PMD_NR;
1311	LIST_HEAD(pagelist);
1312	struct radix_tree_iter iter;
1313	void **slot;
1314	int nr_none = 0, result = SCAN_SUCCEED;
 
1315
 
1316	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1317
1318	/* Only allocate from the target node */
1319	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1320
1321	new_page = khugepaged_alloc_page(hpage, gfp, node);
1322	if (!new_page) {
1323		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1324		goto out;
1325	}
1326
1327	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1328		result = SCAN_CGROUP_CHARGE_FAIL;
1329		goto out;
1330	}
1331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1332	new_page->index = start;
1333	new_page->mapping = mapping;
1334	__SetPageSwapBacked(new_page);
1335	__SetPageLocked(new_page);
1336	BUG_ON(!page_ref_freeze(new_page, 1));
1337
1338
1339	/*
1340	 * At this point the new_page is 'frozen' (page_count() is zero), locked
1341	 * and not up-to-date. It's safe to insert it into radix tree, because
1342	 * nobody would be able to map it or use it in other way until we
1343	 * unfreeze it.
1344	 */
1345
1346	index = start;
1347	xa_lock_irq(&mapping->i_pages);
1348	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1349		int n = min(iter.index, end) - index;
1350
1351		/*
1352		 * Handle holes in the radix tree: charge it from shmem and
1353		 * insert relevant subpage of new_page into the radix-tree.
1354		 */
1355		if (n && !shmem_charge(mapping->host, n)) {
1356			result = SCAN_FAIL;
1357			break;
1358		}
1359		nr_none += n;
1360		for (; index < min(iter.index, end); index++) {
1361			radix_tree_insert(&mapping->i_pages, index,
1362					new_page + (index % HPAGE_PMD_NR));
1363		}
1364
1365		/* We are done. */
1366		if (index >= end)
1367			break;
 
 
 
 
 
1368
1369		page = radix_tree_deref_slot_protected(slot,
1370				&mapping->i_pages.xa_lock);
1371		if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1372			xa_unlock_irq(&mapping->i_pages);
1373			/* swap in or instantiate fallocated page */
1374			if (shmem_getpage(mapping->host, index, &page,
1375						SGP_NOHUGE)) {
1376				result = SCAN_FAIL;
1377				goto tree_unlocked;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1378			}
1379			xa_lock_irq(&mapping->i_pages);
1380		} else if (trylock_page(page)) {
1381			get_page(page);
1382		} else {
1383			result = SCAN_PAGE_LOCK;
1384			break;
1385		}
1386
1387		/*
1388		 * The page must be locked, so we can drop the i_pages lock
1389		 * without racing with truncate.
1390		 */
1391		VM_BUG_ON_PAGE(!PageLocked(page), page);
1392		VM_BUG_ON_PAGE(!PageUptodate(page), page);
1393		VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
 
 
 
 
 
 
 
 
 
 
 
 
1394
1395		if (page_mapping(page) != mapping) {
1396			result = SCAN_TRUNCATED;
1397			goto out_unlock;
1398		}
1399		xa_unlock_irq(&mapping->i_pages);
 
 
 
 
 
 
 
 
 
1400
1401		if (isolate_lru_page(page)) {
1402			result = SCAN_DEL_PAGE_LRU;
1403			goto out_isolate_failed;
 
 
 
 
 
 
1404		}
1405
1406		if (page_mapped(page))
1407			unmap_mapping_pages(mapping, index, 1, false);
1408
1409		xa_lock_irq(&mapping->i_pages);
 
1410
1411		slot = radix_tree_lookup_slot(&mapping->i_pages, index);
1412		VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1413					&mapping->i_pages.xa_lock), page);
1414		VM_BUG_ON_PAGE(page_mapped(page), page);
1415
1416		/*
1417		 * The page is expected to have page_count() == 3:
1418		 *  - we hold a pin on it;
1419		 *  - one reference from radix tree;
1420		 *  - one from isolate_lru_page;
1421		 */
1422		if (!page_ref_freeze(page, 3)) {
1423			result = SCAN_PAGE_COUNT;
1424			goto out_lru;
 
 
1425		}
1426
1427		/*
1428		 * Add the page to the list to be able to undo the collapse if
1429		 * something go wrong.
1430		 */
1431		list_add_tail(&page->lru, &pagelist);
1432
1433		/* Finally, replace with the new page. */
1434		radix_tree_replace_slot(&mapping->i_pages, slot,
1435				new_page + (index % HPAGE_PMD_NR));
1436
1437		slot = radix_tree_iter_resume(slot, &iter);
1438		index++;
1439		continue;
1440out_lru:
1441		xa_unlock_irq(&mapping->i_pages);
1442		putback_lru_page(page);
1443out_isolate_failed:
1444		unlock_page(page);
1445		put_page(page);
1446		goto tree_unlocked;
1447out_unlock:
1448		unlock_page(page);
1449		put_page(page);
1450		break;
1451	}
1452
1453	/*
1454	 * Handle hole in radix tree at the end of the range.
1455	 * This code only triggers if there's nothing in radix tree
1456	 * beyond 'end'.
1457	 */
1458	if (result == SCAN_SUCCEED && index < end) {
1459		int n = end - index;
1460
1461		if (!shmem_charge(mapping->host, n)) {
1462			result = SCAN_FAIL;
1463			goto tree_locked;
1464		}
1465
1466		for (; index < end; index++) {
1467			radix_tree_insert(&mapping->i_pages, index,
1468					new_page + (index % HPAGE_PMD_NR));
1469		}
1470		nr_none += n;
1471	}
1472
1473tree_locked:
1474	xa_unlock_irq(&mapping->i_pages);
1475tree_unlocked:
1476
1477	if (result == SCAN_SUCCEED) {
1478		unsigned long flags;
1479		struct zone *zone = page_zone(new_page);
1480
1481		/*
1482		 * Replacing old pages with new one has succeed, now we need to
1483		 * copy the content and free old pages.
1484		 */
 
1485		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
 
 
 
 
1486			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1487					page);
1488			list_del(&page->lru);
1489			unlock_page(page);
1490			page_ref_unfreeze(page, 1);
1491			page->mapping = NULL;
 
1492			ClearPageActive(page);
1493			ClearPageUnevictable(page);
 
1494			put_page(page);
 
 
 
 
 
1495		}
1496
1497		local_irq_save(flags);
1498		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1499		if (nr_none) {
1500			__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1501			__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
 
 
 
 
1502		}
1503		local_irq_restore(flags);
1504
1505		/*
1506		 * Remove pte page tables, so we can re-faulti
1507		 * the page as huge.
1508		 */
1509		retract_page_tables(mapping, start);
1510
1511		/* Everything is ready, let's unfreeze the new_page */
1512		set_page_dirty(new_page);
1513		SetPageUptodate(new_page);
1514		page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1515		mem_cgroup_commit_charge(new_page, memcg, false, true);
1516		lru_cache_add_anon(new_page);
1517		unlock_page(new_page);
1518
1519		*hpage = NULL;
 
 
1520	} else {
1521		/* Something went wrong: rollback changes to the radix-tree */
1522		shmem_uncharge(mapping->host, nr_none);
1523		xa_lock_irq(&mapping->i_pages);
1524		radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1525			if (iter.index >= end)
1526				break;
 
 
 
 
 
1527			page = list_first_entry_or_null(&pagelist,
1528					struct page, lru);
1529			if (!page || iter.index < page->index) {
1530				if (!nr_none)
1531					break;
1532				nr_none--;
1533				/* Put holes back where they were */
1534				radix_tree_delete(&mapping->i_pages, iter.index);
1535				continue;
1536			}
1537
1538			VM_BUG_ON_PAGE(page->index != iter.index, page);
1539
1540			/* Unfreeze the page. */
1541			list_del(&page->lru);
1542			page_ref_unfreeze(page, 2);
1543			radix_tree_replace_slot(&mapping->i_pages, slot, page);
1544			slot = radix_tree_iter_resume(slot, &iter);
1545			xa_unlock_irq(&mapping->i_pages);
1546			putback_lru_page(page);
1547			unlock_page(page);
1548			xa_lock_irq(&mapping->i_pages);
 
1549		}
1550		VM_BUG_ON(nr_none);
1551		xa_unlock_irq(&mapping->i_pages);
1552
1553		/* Unfreeze new_page, caller would take care about freeing it */
1554		page_ref_unfreeze(new_page, 1);
1555		mem_cgroup_cancel_charge(new_page, memcg, true);
1556		unlock_page(new_page);
1557		new_page->mapping = NULL;
1558	}
 
 
1559out:
1560	VM_BUG_ON(!list_empty(&pagelist));
1561	/* TODO: tracepoints */
1562}
1563
1564static void khugepaged_scan_shmem(struct mm_struct *mm,
1565		struct address_space *mapping,
1566		pgoff_t start, struct page **hpage)
1567{
1568	struct page *page = NULL;
1569	struct radix_tree_iter iter;
1570	void **slot;
1571	int present, swap;
1572	int node = NUMA_NO_NODE;
1573	int result = SCAN_SUCCEED;
1574
1575	present = 0;
1576	swap = 0;
1577	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1578	rcu_read_lock();
1579	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1580		if (iter.index >= start + HPAGE_PMD_NR)
1581			break;
1582
1583		page = radix_tree_deref_slot(slot);
1584		if (radix_tree_deref_retry(page)) {
1585			slot = radix_tree_iter_retry(&iter);
1586			continue;
1587		}
1588
1589		if (radix_tree_exception(page)) {
1590			if (++swap > khugepaged_max_ptes_swap) {
1591				result = SCAN_EXCEED_SWAP_PTE;
1592				break;
1593			}
1594			continue;
1595		}
1596
1597		if (PageTransCompound(page)) {
1598			result = SCAN_PAGE_COMPOUND;
1599			break;
1600		}
1601
1602		node = page_to_nid(page);
1603		if (khugepaged_scan_abort(node)) {
1604			result = SCAN_SCAN_ABORT;
1605			break;
1606		}
1607		khugepaged_node_load[node]++;
1608
1609		if (!PageLRU(page)) {
1610			result = SCAN_PAGE_LRU;
1611			break;
1612		}
1613
1614		if (page_count(page) != 1 + page_mapcount(page)) {
 
1615			result = SCAN_PAGE_COUNT;
1616			break;
1617		}
1618
1619		/*
1620		 * We probably should check if the page is referenced here, but
1621		 * nobody would transfer pte_young() to PageReferenced() for us.
1622		 * And rmap walk here is just too costly...
1623		 */
1624
1625		present++;
1626
1627		if (need_resched()) {
1628			slot = radix_tree_iter_resume(slot, &iter);
1629			cond_resched_rcu();
1630		}
1631	}
1632	rcu_read_unlock();
1633
1634	if (result == SCAN_SUCCEED) {
1635		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1636			result = SCAN_EXCEED_NONE_PTE;
1637		} else {
1638			node = khugepaged_find_target_node();
1639			collapse_shmem(mm, mapping, start, hpage, node);
1640		}
1641	}
1642
1643	/* TODO: tracepoints */
1644}
1645#else
1646static void khugepaged_scan_shmem(struct mm_struct *mm,
1647		struct address_space *mapping,
1648		pgoff_t start, struct page **hpage)
1649{
1650	BUILD_BUG();
1651}
 
 
 
 
 
1652#endif
1653
1654static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1655					    struct page **hpage)
1656	__releases(&khugepaged_mm_lock)
1657	__acquires(&khugepaged_mm_lock)
1658{
1659	struct mm_slot *mm_slot;
1660	struct mm_struct *mm;
1661	struct vm_area_struct *vma;
1662	int progress = 0;
1663
1664	VM_BUG_ON(!pages);
1665	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1666
1667	if (khugepaged_scan.mm_slot)
1668		mm_slot = khugepaged_scan.mm_slot;
1669	else {
1670		mm_slot = list_entry(khugepaged_scan.mm_head.next,
1671				     struct mm_slot, mm_node);
1672		khugepaged_scan.address = 0;
1673		khugepaged_scan.mm_slot = mm_slot;
1674	}
1675	spin_unlock(&khugepaged_mm_lock);
 
1676
1677	mm = mm_slot->mm;
1678	/*
1679	 * Don't wait for semaphore (to avoid long wait times).  Just move to
1680	 * the next mm on the list.
1681	 */
1682	vma = NULL;
1683	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1684		goto breakouterloop_mmap_sem;
1685	if (likely(!khugepaged_test_exit(mm)))
1686		vma = find_vma(mm, khugepaged_scan.address);
1687
1688	progress++;
1689	for (; vma; vma = vma->vm_next) {
1690		unsigned long hstart, hend;
1691
1692		cond_resched();
1693		if (unlikely(khugepaged_test_exit(mm))) {
1694			progress++;
1695			break;
1696		}
1697		if (!hugepage_vma_check(vma)) {
1698skip:
1699			progress++;
1700			continue;
1701		}
1702		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1703		hend = vma->vm_end & HPAGE_PMD_MASK;
1704		if (hstart >= hend)
1705			goto skip;
1706		if (khugepaged_scan.address > hend)
1707			goto skip;
1708		if (khugepaged_scan.address < hstart)
1709			khugepaged_scan.address = hstart;
1710		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1711
1712		while (khugepaged_scan.address < hend) {
1713			int ret;
1714			cond_resched();
1715			if (unlikely(khugepaged_test_exit(mm)))
1716				goto breakouterloop;
1717
1718			VM_BUG_ON(khugepaged_scan.address < hstart ||
1719				  khugepaged_scan.address + HPAGE_PMD_SIZE >
1720				  hend);
1721			if (shmem_file(vma->vm_file)) {
1722				struct file *file;
1723				pgoff_t pgoff = linear_page_index(vma,
1724						khugepaged_scan.address);
1725				if (!shmem_huge_enabled(vma))
 
 
1726					goto skip;
1727				file = get_file(vma->vm_file);
1728				up_read(&mm->mmap_sem);
1729				ret = 1;
1730				khugepaged_scan_shmem(mm, file->f_mapping,
1731						pgoff, hpage);
1732				fput(file);
1733			} else {
1734				ret = khugepaged_scan_pmd(mm, vma,
1735						khugepaged_scan.address,
1736						hpage);
1737			}
1738			/* move to next address */
1739			khugepaged_scan.address += HPAGE_PMD_SIZE;
1740			progress += HPAGE_PMD_NR;
1741			if (ret)
1742				/* we released mmap_sem so break loop */
1743				goto breakouterloop_mmap_sem;
1744			if (progress >= pages)
1745				goto breakouterloop;
1746		}
1747	}
1748breakouterloop:
1749	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1750breakouterloop_mmap_sem:
1751
1752	spin_lock(&khugepaged_mm_lock);
1753	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1754	/*
1755	 * Release the current mm_slot if this mm is about to die, or
1756	 * if we scanned all vmas of this mm.
1757	 */
1758	if (khugepaged_test_exit(mm) || !vma) {
1759		/*
1760		 * Make sure that if mm_users is reaching zero while
1761		 * khugepaged runs here, khugepaged_exit will find
1762		 * mm_slot not pointing to the exiting mm.
1763		 */
1764		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1765			khugepaged_scan.mm_slot = list_entry(
1766				mm_slot->mm_node.next,
1767				struct mm_slot, mm_node);
1768			khugepaged_scan.address = 0;
1769		} else {
1770			khugepaged_scan.mm_slot = NULL;
1771			khugepaged_full_scans++;
1772		}
1773
1774		collect_mm_slot(mm_slot);
1775	}
1776
1777	return progress;
1778}
1779
1780static int khugepaged_has_work(void)
1781{
1782	return !list_empty(&khugepaged_scan.mm_head) &&
1783		khugepaged_enabled();
1784}
1785
1786static int khugepaged_wait_event(void)
1787{
1788	return !list_empty(&khugepaged_scan.mm_head) ||
1789		kthread_should_stop();
1790}
1791
1792static void khugepaged_do_scan(void)
1793{
1794	struct page *hpage = NULL;
1795	unsigned int progress = 0, pass_through_head = 0;
1796	unsigned int pages = khugepaged_pages_to_scan;
1797	bool wait = true;
1798
1799	barrier(); /* write khugepaged_pages_to_scan to local stack */
1800
1801	while (progress < pages) {
1802		if (!khugepaged_prealloc_page(&hpage, &wait))
1803			break;
1804
1805		cond_resched();
1806
1807		if (unlikely(kthread_should_stop() || try_to_freeze()))
1808			break;
1809
1810		spin_lock(&khugepaged_mm_lock);
1811		if (!khugepaged_scan.mm_slot)
1812			pass_through_head++;
1813		if (khugepaged_has_work() &&
1814		    pass_through_head < 2)
1815			progress += khugepaged_scan_mm_slot(pages - progress,
1816							    &hpage);
1817		else
1818			progress = pages;
1819		spin_unlock(&khugepaged_mm_lock);
1820	}
1821
1822	if (!IS_ERR_OR_NULL(hpage))
1823		put_page(hpage);
1824}
1825
1826static bool khugepaged_should_wakeup(void)
1827{
1828	return kthread_should_stop() ||
1829	       time_after_eq(jiffies, khugepaged_sleep_expire);
1830}
1831
1832static void khugepaged_wait_work(void)
1833{
1834	if (khugepaged_has_work()) {
1835		const unsigned long scan_sleep_jiffies =
1836			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1837
1838		if (!scan_sleep_jiffies)
1839			return;
1840
1841		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1842		wait_event_freezable_timeout(khugepaged_wait,
1843					     khugepaged_should_wakeup(),
1844					     scan_sleep_jiffies);
1845		return;
1846	}
1847
1848	if (khugepaged_enabled())
1849		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1850}
1851
1852static int khugepaged(void *none)
1853{
1854	struct mm_slot *mm_slot;
1855
1856	set_freezable();
1857	set_user_nice(current, MAX_NICE);
1858
1859	while (!kthread_should_stop()) {
1860		khugepaged_do_scan();
1861		khugepaged_wait_work();
1862	}
1863
1864	spin_lock(&khugepaged_mm_lock);
1865	mm_slot = khugepaged_scan.mm_slot;
1866	khugepaged_scan.mm_slot = NULL;
1867	if (mm_slot)
1868		collect_mm_slot(mm_slot);
1869	spin_unlock(&khugepaged_mm_lock);
1870	return 0;
1871}
1872
1873static void set_recommended_min_free_kbytes(void)
1874{
1875	struct zone *zone;
1876	int nr_zones = 0;
1877	unsigned long recommended_min;
1878
1879	for_each_populated_zone(zone) {
1880		/*
1881		 * We don't need to worry about fragmentation of
1882		 * ZONE_MOVABLE since it only has movable pages.
1883		 */
1884		if (zone_idx(zone) > gfp_zone(GFP_USER))
1885			continue;
1886
1887		nr_zones++;
1888	}
1889
1890	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1891	recommended_min = pageblock_nr_pages * nr_zones * 2;
1892
1893	/*
1894	 * Make sure that on average at least two pageblocks are almost free
1895	 * of another type, one for a migratetype to fall back to and a
1896	 * second to avoid subsequent fallbacks of other types There are 3
1897	 * MIGRATE_TYPES we care about.
1898	 */
1899	recommended_min += pageblock_nr_pages * nr_zones *
1900			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1901
1902	/* don't ever allow to reserve more than 5% of the lowmem */
1903	recommended_min = min(recommended_min,
1904			      (unsigned long) nr_free_buffer_pages() / 20);
1905	recommended_min <<= (PAGE_SHIFT-10);
1906
1907	if (recommended_min > min_free_kbytes) {
1908		if (user_min_free_kbytes >= 0)
1909			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1910				min_free_kbytes, recommended_min);
1911
1912		min_free_kbytes = recommended_min;
1913	}
1914	setup_per_zone_wmarks();
1915}
1916
1917int start_stop_khugepaged(void)
1918{
1919	static struct task_struct *khugepaged_thread __read_mostly;
1920	static DEFINE_MUTEX(khugepaged_mutex);
1921	int err = 0;
1922
1923	mutex_lock(&khugepaged_mutex);
1924	if (khugepaged_enabled()) {
1925		if (!khugepaged_thread)
1926			khugepaged_thread = kthread_run(khugepaged, NULL,
1927							"khugepaged");
1928		if (IS_ERR(khugepaged_thread)) {
1929			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1930			err = PTR_ERR(khugepaged_thread);
1931			khugepaged_thread = NULL;
1932			goto fail;
1933		}
1934
1935		if (!list_empty(&khugepaged_scan.mm_head))
1936			wake_up_interruptible(&khugepaged_wait);
1937
1938		set_recommended_min_free_kbytes();
1939	} else if (khugepaged_thread) {
1940		kthread_stop(khugepaged_thread);
1941		khugepaged_thread = NULL;
1942	}
1943fail:
1944	mutex_unlock(&khugepaged_mutex);
1945	return err;
1946}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/sched/mm.h>
   7#include <linux/sched/coredump.h>
   8#include <linux/mmu_notifier.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/mm_inline.h>
  12#include <linux/kthread.h>
  13#include <linux/khugepaged.h>
  14#include <linux/freezer.h>
  15#include <linux/mman.h>
  16#include <linux/hashtable.h>
  17#include <linux/userfaultfd_k.h>
  18#include <linux/page_idle.h>
  19#include <linux/swapops.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/pgalloc.h>
  24#include "internal.h"
  25
  26enum scan_result {
  27	SCAN_FAIL,
  28	SCAN_SUCCEED,
  29	SCAN_PMD_NULL,
  30	SCAN_EXCEED_NONE_PTE,
  31	SCAN_PTE_NON_PRESENT,
  32	SCAN_PAGE_RO,
  33	SCAN_LACK_REFERENCED_PAGE,
  34	SCAN_PAGE_NULL,
  35	SCAN_SCAN_ABORT,
  36	SCAN_PAGE_COUNT,
  37	SCAN_PAGE_LRU,
  38	SCAN_PAGE_LOCK,
  39	SCAN_PAGE_ANON,
  40	SCAN_PAGE_COMPOUND,
  41	SCAN_ANY_PROCESS,
  42	SCAN_VMA_NULL,
  43	SCAN_VMA_CHECK,
  44	SCAN_ADDRESS_RANGE,
  45	SCAN_SWAP_CACHE_PAGE,
  46	SCAN_DEL_PAGE_LRU,
  47	SCAN_ALLOC_HUGE_PAGE_FAIL,
  48	SCAN_CGROUP_CHARGE_FAIL,
  49	SCAN_EXCEED_SWAP_PTE,
  50	SCAN_TRUNCATED,
  51	SCAN_PAGE_HAS_PRIVATE,
  52};
  53
  54#define CREATE_TRACE_POINTS
  55#include <trace/events/huge_memory.h>
  56
  57/* default scan 8*512 pte (or vmas) every 30 second */
  58static unsigned int khugepaged_pages_to_scan __read_mostly;
  59static unsigned int khugepaged_pages_collapsed;
  60static unsigned int khugepaged_full_scans;
  61static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  62/* during fragmentation poll the hugepage allocator once every minute */
  63static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  64static unsigned long khugepaged_sleep_expire;
  65static DEFINE_SPINLOCK(khugepaged_mm_lock);
  66static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  67/*
  68 * default collapse hugepages if there is at least one pte mapped like
  69 * it would have happened if the vma was large enough during page
  70 * fault.
  71 */
  72static unsigned int khugepaged_max_ptes_none __read_mostly;
  73static unsigned int khugepaged_max_ptes_swap __read_mostly;
  74
  75#define MM_SLOTS_HASH_BITS 10
  76static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  77
  78static struct kmem_cache *mm_slot_cache __read_mostly;
  79
  80#define MAX_PTE_MAPPED_THP 8
  81
  82/**
  83 * struct mm_slot - hash lookup from mm to mm_slot
  84 * @hash: hash collision list
  85 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  86 * @mm: the mm that this information is valid for
  87 */
  88struct mm_slot {
  89	struct hlist_node hash;
  90	struct list_head mm_node;
  91	struct mm_struct *mm;
  92
  93	/* pte-mapped THP in this mm */
  94	int nr_pte_mapped_thp;
  95	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
  96};
  97
  98/**
  99 * struct khugepaged_scan - cursor for scanning
 100 * @mm_head: the head of the mm list to scan
 101 * @mm_slot: the current mm_slot we are scanning
 102 * @address: the next address inside that to be scanned
 103 *
 104 * There is only the one khugepaged_scan instance of this cursor structure.
 105 */
 106struct khugepaged_scan {
 107	struct list_head mm_head;
 108	struct mm_slot *mm_slot;
 109	unsigned long address;
 110};
 111
 112static struct khugepaged_scan khugepaged_scan = {
 113	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 114};
 115
 116#ifdef CONFIG_SYSFS
 117static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 118					 struct kobj_attribute *attr,
 119					 char *buf)
 120{
 121	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 122}
 123
 124static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 125					  struct kobj_attribute *attr,
 126					  const char *buf, size_t count)
 127{
 128	unsigned long msecs;
 129	int err;
 130
 131	err = kstrtoul(buf, 10, &msecs);
 132	if (err || msecs > UINT_MAX)
 133		return -EINVAL;
 134
 135	khugepaged_scan_sleep_millisecs = msecs;
 136	khugepaged_sleep_expire = 0;
 137	wake_up_interruptible(&khugepaged_wait);
 138
 139	return count;
 140}
 141static struct kobj_attribute scan_sleep_millisecs_attr =
 142	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 143	       scan_sleep_millisecs_store);
 144
 145static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 146					  struct kobj_attribute *attr,
 147					  char *buf)
 148{
 149	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 150}
 151
 152static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 153					   struct kobj_attribute *attr,
 154					   const char *buf, size_t count)
 155{
 156	unsigned long msecs;
 157	int err;
 158
 159	err = kstrtoul(buf, 10, &msecs);
 160	if (err || msecs > UINT_MAX)
 161		return -EINVAL;
 162
 163	khugepaged_alloc_sleep_millisecs = msecs;
 164	khugepaged_sleep_expire = 0;
 165	wake_up_interruptible(&khugepaged_wait);
 166
 167	return count;
 168}
 169static struct kobj_attribute alloc_sleep_millisecs_attr =
 170	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 171	       alloc_sleep_millisecs_store);
 172
 173static ssize_t pages_to_scan_show(struct kobject *kobj,
 174				  struct kobj_attribute *attr,
 175				  char *buf)
 176{
 177	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 178}
 179static ssize_t pages_to_scan_store(struct kobject *kobj,
 180				   struct kobj_attribute *attr,
 181				   const char *buf, size_t count)
 182{
 183	int err;
 184	unsigned long pages;
 185
 186	err = kstrtoul(buf, 10, &pages);
 187	if (err || !pages || pages > UINT_MAX)
 188		return -EINVAL;
 189
 190	khugepaged_pages_to_scan = pages;
 191
 192	return count;
 193}
 194static struct kobj_attribute pages_to_scan_attr =
 195	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
 196	       pages_to_scan_store);
 197
 198static ssize_t pages_collapsed_show(struct kobject *kobj,
 199				    struct kobj_attribute *attr,
 200				    char *buf)
 201{
 202	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 203}
 204static struct kobj_attribute pages_collapsed_attr =
 205	__ATTR_RO(pages_collapsed);
 206
 207static ssize_t full_scans_show(struct kobject *kobj,
 208			       struct kobj_attribute *attr,
 209			       char *buf)
 210{
 211	return sprintf(buf, "%u\n", khugepaged_full_scans);
 212}
 213static struct kobj_attribute full_scans_attr =
 214	__ATTR_RO(full_scans);
 215
 216static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 217				      struct kobj_attribute *attr, char *buf)
 218{
 219	return single_hugepage_flag_show(kobj, attr, buf,
 220				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 221}
 222static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 223				       struct kobj_attribute *attr,
 224				       const char *buf, size_t count)
 225{
 226	return single_hugepage_flag_store(kobj, attr, buf, count,
 227				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 228}
 229static struct kobj_attribute khugepaged_defrag_attr =
 230	__ATTR(defrag, 0644, khugepaged_defrag_show,
 231	       khugepaged_defrag_store);
 232
 233/*
 234 * max_ptes_none controls if khugepaged should collapse hugepages over
 235 * any unmapped ptes in turn potentially increasing the memory
 236 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 237 * reduce the available free memory in the system as it
 238 * runs. Increasing max_ptes_none will instead potentially reduce the
 239 * free memory in the system during the khugepaged scan.
 240 */
 241static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 242					     struct kobj_attribute *attr,
 243					     char *buf)
 244{
 245	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 246}
 247static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 248					      struct kobj_attribute *attr,
 249					      const char *buf, size_t count)
 250{
 251	int err;
 252	unsigned long max_ptes_none;
 253
 254	err = kstrtoul(buf, 10, &max_ptes_none);
 255	if (err || max_ptes_none > HPAGE_PMD_NR-1)
 256		return -EINVAL;
 257
 258	khugepaged_max_ptes_none = max_ptes_none;
 259
 260	return count;
 261}
 262static struct kobj_attribute khugepaged_max_ptes_none_attr =
 263	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 264	       khugepaged_max_ptes_none_store);
 265
 266static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
 267					     struct kobj_attribute *attr,
 268					     char *buf)
 269{
 270	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
 271}
 272
 273static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
 274					      struct kobj_attribute *attr,
 275					      const char *buf, size_t count)
 276{
 277	int err;
 278	unsigned long max_ptes_swap;
 279
 280	err  = kstrtoul(buf, 10, &max_ptes_swap);
 281	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
 282		return -EINVAL;
 283
 284	khugepaged_max_ptes_swap = max_ptes_swap;
 285
 286	return count;
 287}
 288
 289static struct kobj_attribute khugepaged_max_ptes_swap_attr =
 290	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
 291	       khugepaged_max_ptes_swap_store);
 292
 293static struct attribute *khugepaged_attr[] = {
 294	&khugepaged_defrag_attr.attr,
 295	&khugepaged_max_ptes_none_attr.attr,
 296	&pages_to_scan_attr.attr,
 297	&pages_collapsed_attr.attr,
 298	&full_scans_attr.attr,
 299	&scan_sleep_millisecs_attr.attr,
 300	&alloc_sleep_millisecs_attr.attr,
 301	&khugepaged_max_ptes_swap_attr.attr,
 302	NULL,
 303};
 304
 305struct attribute_group khugepaged_attr_group = {
 306	.attrs = khugepaged_attr,
 307	.name = "khugepaged",
 308};
 309#endif /* CONFIG_SYSFS */
 310
 311#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
 312
 313int hugepage_madvise(struct vm_area_struct *vma,
 314		     unsigned long *vm_flags, int advice)
 315{
 316	switch (advice) {
 317	case MADV_HUGEPAGE:
 318#ifdef CONFIG_S390
 319		/*
 320		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
 321		 * can't handle this properly after s390_enable_sie, so we simply
 322		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
 323		 */
 324		if (mm_has_pgste(vma->vm_mm))
 325			return 0;
 326#endif
 327		*vm_flags &= ~VM_NOHUGEPAGE;
 328		*vm_flags |= VM_HUGEPAGE;
 329		/*
 330		 * If the vma become good for khugepaged to scan,
 331		 * register it here without waiting a page fault that
 332		 * may not happen any time soon.
 333		 */
 334		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
 335				khugepaged_enter_vma_merge(vma, *vm_flags))
 336			return -ENOMEM;
 337		break;
 338	case MADV_NOHUGEPAGE:
 339		*vm_flags &= ~VM_HUGEPAGE;
 340		*vm_flags |= VM_NOHUGEPAGE;
 341		/*
 342		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
 343		 * this vma even if we leave the mm registered in khugepaged if
 344		 * it got registered before VM_NOHUGEPAGE was set.
 345		 */
 346		break;
 347	}
 348
 349	return 0;
 350}
 351
 352int __init khugepaged_init(void)
 353{
 354	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
 355					  sizeof(struct mm_slot),
 356					  __alignof__(struct mm_slot), 0, NULL);
 357	if (!mm_slot_cache)
 358		return -ENOMEM;
 359
 360	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 361	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 362	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
 363
 364	return 0;
 365}
 366
 367void __init khugepaged_destroy(void)
 368{
 369	kmem_cache_destroy(mm_slot_cache);
 370}
 371
 372static inline struct mm_slot *alloc_mm_slot(void)
 373{
 374	if (!mm_slot_cache)	/* initialization failed */
 375		return NULL;
 376	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
 377}
 378
 379static inline void free_mm_slot(struct mm_slot *mm_slot)
 380{
 381	kmem_cache_free(mm_slot_cache, mm_slot);
 382}
 383
 384static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 385{
 386	struct mm_slot *mm_slot;
 387
 388	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
 389		if (mm == mm_slot->mm)
 390			return mm_slot;
 391
 392	return NULL;
 393}
 394
 395static void insert_to_mm_slots_hash(struct mm_struct *mm,
 396				    struct mm_slot *mm_slot)
 397{
 398	mm_slot->mm = mm;
 399	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 400}
 401
 402static inline int khugepaged_test_exit(struct mm_struct *mm)
 403{
 404	return atomic_read(&mm->mm_users) == 0;
 405}
 406
 407static bool hugepage_vma_check(struct vm_area_struct *vma,
 408			       unsigned long vm_flags)
 409{
 410	if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
 411	    (vm_flags & VM_NOHUGEPAGE) ||
 412	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
 413		return false;
 414
 415	if (shmem_file(vma->vm_file) ||
 416	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
 417	     vma->vm_file &&
 418	     (vm_flags & VM_DENYWRITE))) {
 419		if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
 420			return false;
 421		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
 422				HPAGE_PMD_NR);
 423	}
 424	if (!vma->anon_vma || vma->vm_ops)
 425		return false;
 426	if (is_vma_temporary_stack(vma))
 427		return false;
 428	return !(vm_flags & VM_NO_KHUGEPAGED);
 429}
 430
 431int __khugepaged_enter(struct mm_struct *mm)
 432{
 433	struct mm_slot *mm_slot;
 434	int wakeup;
 435
 436	mm_slot = alloc_mm_slot();
 437	if (!mm_slot)
 438		return -ENOMEM;
 439
 440	/* __khugepaged_exit() must not run from under us */
 441	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
 442	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 443		free_mm_slot(mm_slot);
 444		return 0;
 445	}
 446
 447	spin_lock(&khugepaged_mm_lock);
 448	insert_to_mm_slots_hash(mm, mm_slot);
 449	/*
 450	 * Insert just behind the scanning cursor, to let the area settle
 451	 * down a little.
 452	 */
 453	wakeup = list_empty(&khugepaged_scan.mm_head);
 454	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 455	spin_unlock(&khugepaged_mm_lock);
 456
 457	mmgrab(mm);
 458	if (wakeup)
 459		wake_up_interruptible(&khugepaged_wait);
 460
 461	return 0;
 462}
 463
 464int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
 465			       unsigned long vm_flags)
 466{
 467	unsigned long hstart, hend;
 468
 469	/*
 470	 * khugepaged only supports read-only files for non-shmem files.
 471	 * khugepaged does not yet work on special mappings. And
 472	 * file-private shmem THP is not supported.
 473	 */
 474	if (!hugepage_vma_check(vma, vm_flags))
 
 475		return 0;
 476
 477	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 478	hend = vma->vm_end & HPAGE_PMD_MASK;
 479	if (hstart < hend)
 480		return khugepaged_enter(vma, vm_flags);
 481	return 0;
 482}
 483
 484void __khugepaged_exit(struct mm_struct *mm)
 485{
 486	struct mm_slot *mm_slot;
 487	int free = 0;
 488
 489	spin_lock(&khugepaged_mm_lock);
 490	mm_slot = get_mm_slot(mm);
 491	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
 492		hash_del(&mm_slot->hash);
 493		list_del(&mm_slot->mm_node);
 494		free = 1;
 495	}
 496	spin_unlock(&khugepaged_mm_lock);
 497
 498	if (free) {
 499		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
 500		free_mm_slot(mm_slot);
 501		mmdrop(mm);
 502	} else if (mm_slot) {
 503		/*
 504		 * This is required to serialize against
 505		 * khugepaged_test_exit() (which is guaranteed to run
 506		 * under mmap sem read mode). Stop here (after we
 507		 * return all pagetables will be destroyed) until
 508		 * khugepaged has finished working on the pagetables
 509		 * under the mmap_sem.
 510		 */
 511		down_write(&mm->mmap_sem);
 512		up_write(&mm->mmap_sem);
 513	}
 514}
 515
 516static void release_pte_page(struct page *page)
 517{
 518	dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
 519	unlock_page(page);
 520	putback_lru_page(page);
 521}
 522
 523static void release_pte_pages(pte_t *pte, pte_t *_pte)
 524{
 525	while (--_pte >= pte) {
 526		pte_t pteval = *_pte;
 527		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
 528			release_pte_page(pte_page(pteval));
 529	}
 530}
 531
 532static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 533					unsigned long address,
 534					pte_t *pte)
 535{
 536	struct page *page = NULL;
 537	pte_t *_pte;
 538	int none_or_zero = 0, result = 0, referenced = 0;
 539	bool writable = false;
 540
 541	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
 542	     _pte++, address += PAGE_SIZE) {
 543		pte_t pteval = *_pte;
 544		if (pte_none(pteval) || (pte_present(pteval) &&
 545				is_zero_pfn(pte_pfn(pteval)))) {
 546			if (!userfaultfd_armed(vma) &&
 547			    ++none_or_zero <= khugepaged_max_ptes_none) {
 548				continue;
 549			} else {
 550				result = SCAN_EXCEED_NONE_PTE;
 551				goto out;
 552			}
 553		}
 554		if (!pte_present(pteval)) {
 555			result = SCAN_PTE_NON_PRESENT;
 556			goto out;
 557		}
 558		page = vm_normal_page(vma, address, pteval);
 559		if (unlikely(!page)) {
 560			result = SCAN_PAGE_NULL;
 561			goto out;
 562		}
 563
 564		/* TODO: teach khugepaged to collapse THP mapped with pte */
 565		if (PageCompound(page)) {
 566			result = SCAN_PAGE_COMPOUND;
 567			goto out;
 568		}
 569
 570		VM_BUG_ON_PAGE(!PageAnon(page), page);
 571
 572		/*
 573		 * We can do it before isolate_lru_page because the
 574		 * page can't be freed from under us. NOTE: PG_lock
 575		 * is needed to serialize against split_huge_page
 576		 * when invoked from the VM.
 577		 */
 578		if (!trylock_page(page)) {
 579			result = SCAN_PAGE_LOCK;
 580			goto out;
 581		}
 582
 583		/*
 584		 * cannot use mapcount: can't collapse if there's a gup pin.
 585		 * The page must only be referenced by the scanned process
 586		 * and page swap cache.
 587		 */
 588		if (page_count(page) != 1 + PageSwapCache(page)) {
 589			unlock_page(page);
 590			result = SCAN_PAGE_COUNT;
 591			goto out;
 592		}
 593		if (pte_write(pteval)) {
 594			writable = true;
 595		} else {
 596			if (PageSwapCache(page) &&
 597			    !reuse_swap_page(page, NULL)) {
 598				unlock_page(page);
 599				result = SCAN_SWAP_CACHE_PAGE;
 600				goto out;
 601			}
 602			/*
 603			 * Page is not in the swap cache. It can be collapsed
 604			 * into a THP.
 605			 */
 606		}
 607
 608		/*
 609		 * Isolate the page to avoid collapsing an hugepage
 610		 * currently in use by the VM.
 611		 */
 612		if (isolate_lru_page(page)) {
 613			unlock_page(page);
 614			result = SCAN_DEL_PAGE_LRU;
 615			goto out;
 616		}
 617		inc_node_page_state(page,
 618				NR_ISOLATED_ANON + page_is_file_cache(page));
 619		VM_BUG_ON_PAGE(!PageLocked(page), page);
 620		VM_BUG_ON_PAGE(PageLRU(page), page);
 621
 622		/* There should be enough young pte to collapse the page */
 623		if (pte_young(pteval) ||
 624		    page_is_young(page) || PageReferenced(page) ||
 625		    mmu_notifier_test_young(vma->vm_mm, address))
 626			referenced++;
 627	}
 628	if (likely(writable)) {
 629		if (likely(referenced)) {
 630			result = SCAN_SUCCEED;
 631			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 632							    referenced, writable, result);
 633			return 1;
 634		}
 635	} else {
 636		result = SCAN_PAGE_RO;
 637	}
 638
 639out:
 640	release_pte_pages(pte, _pte);
 641	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 642					    referenced, writable, result);
 643	return 0;
 644}
 645
 646static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 647				      struct vm_area_struct *vma,
 648				      unsigned long address,
 649				      spinlock_t *ptl)
 650{
 651	pte_t *_pte;
 652	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 653				_pte++, page++, address += PAGE_SIZE) {
 654		pte_t pteval = *_pte;
 655		struct page *src_page;
 656
 657		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 658			clear_user_highpage(page, address);
 659			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
 660			if (is_zero_pfn(pte_pfn(pteval))) {
 661				/*
 662				 * ptl mostly unnecessary.
 663				 */
 664				spin_lock(ptl);
 665				/*
 666				 * paravirt calls inside pte_clear here are
 667				 * superfluous.
 668				 */
 669				pte_clear(vma->vm_mm, address, _pte);
 670				spin_unlock(ptl);
 671			}
 672		} else {
 673			src_page = pte_page(pteval);
 674			copy_user_highpage(page, src_page, address, vma);
 675			VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
 676			release_pte_page(src_page);
 677			/*
 678			 * ptl mostly unnecessary, but preempt has to
 679			 * be disabled to update the per-cpu stats
 680			 * inside page_remove_rmap().
 681			 */
 682			spin_lock(ptl);
 683			/*
 684			 * paravirt calls inside pte_clear here are
 685			 * superfluous.
 686			 */
 687			pte_clear(vma->vm_mm, address, _pte);
 688			page_remove_rmap(src_page, false);
 689			spin_unlock(ptl);
 690			free_page_and_swap_cache(src_page);
 691		}
 692	}
 693}
 694
 695static void khugepaged_alloc_sleep(void)
 696{
 697	DEFINE_WAIT(wait);
 698
 699	add_wait_queue(&khugepaged_wait, &wait);
 700	freezable_schedule_timeout_interruptible(
 701		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 702	remove_wait_queue(&khugepaged_wait, &wait);
 703}
 704
 705static int khugepaged_node_load[MAX_NUMNODES];
 706
 707static bool khugepaged_scan_abort(int nid)
 708{
 709	int i;
 710
 711	/*
 712	 * If node_reclaim_mode is disabled, then no extra effort is made to
 713	 * allocate memory locally.
 714	 */
 715	if (!node_reclaim_mode)
 716		return false;
 717
 718	/* If there is a count for this node already, it must be acceptable */
 719	if (khugepaged_node_load[nid])
 720		return false;
 721
 722	for (i = 0; i < MAX_NUMNODES; i++) {
 723		if (!khugepaged_node_load[i])
 724			continue;
 725		if (node_distance(nid, i) > node_reclaim_distance)
 726			return true;
 727	}
 728	return false;
 729}
 730
 731/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
 732static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 733{
 734	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
 735}
 736
 737#ifdef CONFIG_NUMA
 738static int khugepaged_find_target_node(void)
 739{
 740	static int last_khugepaged_target_node = NUMA_NO_NODE;
 741	int nid, target_node = 0, max_value = 0;
 742
 743	/* find first node with max normal pages hit */
 744	for (nid = 0; nid < MAX_NUMNODES; nid++)
 745		if (khugepaged_node_load[nid] > max_value) {
 746			max_value = khugepaged_node_load[nid];
 747			target_node = nid;
 748		}
 749
 750	/* do some balance if several nodes have the same hit record */
 751	if (target_node <= last_khugepaged_target_node)
 752		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
 753				nid++)
 754			if (max_value == khugepaged_node_load[nid]) {
 755				target_node = nid;
 756				break;
 757			}
 758
 759	last_khugepaged_target_node = target_node;
 760	return target_node;
 761}
 762
 763static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 764{
 765	if (IS_ERR(*hpage)) {
 766		if (!*wait)
 767			return false;
 768
 769		*wait = false;
 770		*hpage = NULL;
 771		khugepaged_alloc_sleep();
 772	} else if (*hpage) {
 773		put_page(*hpage);
 774		*hpage = NULL;
 775	}
 776
 777	return true;
 778}
 779
 780static struct page *
 781khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 782{
 783	VM_BUG_ON_PAGE(*hpage, *hpage);
 784
 785	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 786	if (unlikely(!*hpage)) {
 787		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 788		*hpage = ERR_PTR(-ENOMEM);
 789		return NULL;
 790	}
 791
 792	prep_transhuge_page(*hpage);
 793	count_vm_event(THP_COLLAPSE_ALLOC);
 794	return *hpage;
 795}
 796#else
 797static int khugepaged_find_target_node(void)
 798{
 799	return 0;
 800}
 801
 802static inline struct page *alloc_khugepaged_hugepage(void)
 803{
 804	struct page *page;
 805
 806	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
 807			   HPAGE_PMD_ORDER);
 808	if (page)
 809		prep_transhuge_page(page);
 810	return page;
 811}
 812
 813static struct page *khugepaged_alloc_hugepage(bool *wait)
 814{
 815	struct page *hpage;
 816
 817	do {
 818		hpage = alloc_khugepaged_hugepage();
 819		if (!hpage) {
 820			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 821			if (!*wait)
 822				return NULL;
 823
 824			*wait = false;
 825			khugepaged_alloc_sleep();
 826		} else
 827			count_vm_event(THP_COLLAPSE_ALLOC);
 828	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
 829
 830	return hpage;
 831}
 832
 833static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 834{
 835	if (!*hpage)
 836		*hpage = khugepaged_alloc_hugepage(wait);
 837
 838	if (unlikely(!*hpage))
 839		return false;
 840
 841	return true;
 842}
 843
 844static struct page *
 845khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 846{
 847	VM_BUG_ON(!*hpage);
 848
 849	return  *hpage;
 850}
 851#endif
 852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853/*
 854 * If mmap_sem temporarily dropped, revalidate vma
 855 * before taking mmap_sem.
 856 * Return 0 if succeeds, otherwise return none-zero
 857 * value (scan code).
 858 */
 859
 860static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 861		struct vm_area_struct **vmap)
 862{
 863	struct vm_area_struct *vma;
 864	unsigned long hstart, hend;
 865
 866	if (unlikely(khugepaged_test_exit(mm)))
 867		return SCAN_ANY_PROCESS;
 868
 869	*vmap = vma = find_vma(mm, address);
 870	if (!vma)
 871		return SCAN_VMA_NULL;
 872
 873	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 874	hend = vma->vm_end & HPAGE_PMD_MASK;
 875	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
 876		return SCAN_ADDRESS_RANGE;
 877	if (!hugepage_vma_check(vma, vma->vm_flags))
 878		return SCAN_VMA_CHECK;
 879	return 0;
 880}
 881
 882/*
 883 * Bring missing pages in from swap, to complete THP collapse.
 884 * Only done if khugepaged_scan_pmd believes it is worthwhile.
 885 *
 886 * Called and returns without pte mapped or spinlocks held,
 887 * but with mmap_sem held to protect against vma changes.
 888 */
 889
 890static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 891					struct vm_area_struct *vma,
 892					unsigned long address, pmd_t *pmd,
 893					int referenced)
 894{
 895	int swapped_in = 0;
 896	vm_fault_t ret = 0;
 897	struct vm_fault vmf = {
 898		.vma = vma,
 899		.address = address,
 900		.flags = FAULT_FLAG_ALLOW_RETRY,
 901		.pmd = pmd,
 902		.pgoff = linear_page_index(vma, address),
 903	};
 904
 905	/* we only decide to swapin, if there is enough young ptes */
 906	if (referenced < HPAGE_PMD_NR/2) {
 907		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 908		return false;
 909	}
 910	vmf.pte = pte_offset_map(pmd, address);
 911	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
 912			vmf.pte++, vmf.address += PAGE_SIZE) {
 913		vmf.orig_pte = *vmf.pte;
 914		if (!is_swap_pte(vmf.orig_pte))
 915			continue;
 916		swapped_in++;
 917		ret = do_swap_page(&vmf);
 918
 919		/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
 920		if (ret & VM_FAULT_RETRY) {
 921			down_read(&mm->mmap_sem);
 922			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
 923				/* vma is no longer available, don't continue to swapin */
 924				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 925				return false;
 926			}
 927			/* check if the pmd is still valid */
 928			if (mm_find_pmd(mm, address) != pmd) {
 929				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 930				return false;
 931			}
 932		}
 933		if (ret & VM_FAULT_ERROR) {
 934			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 935			return false;
 936		}
 937		/* pte is unmapped now, we need to map it */
 938		vmf.pte = pte_offset_map(pmd, vmf.address);
 939	}
 940	vmf.pte--;
 941	pte_unmap(vmf.pte);
 942	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
 943	return true;
 944}
 945
 946static void collapse_huge_page(struct mm_struct *mm,
 947				   unsigned long address,
 948				   struct page **hpage,
 949				   int node, int referenced)
 950{
 951	pmd_t *pmd, _pmd;
 952	pte_t *pte;
 953	pgtable_t pgtable;
 954	struct page *new_page;
 955	spinlock_t *pmd_ptl, *pte_ptl;
 956	int isolated = 0, result = 0;
 957	struct mem_cgroup *memcg;
 958	struct vm_area_struct *vma;
 959	struct mmu_notifier_range range;
 
 960	gfp_t gfp;
 961
 962	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 963
 964	/* Only allocate from the target node */
 965	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 966
 967	/*
 968	 * Before allocating the hugepage, release the mmap_sem read lock.
 969	 * The allocation can take potentially a long time if it involves
 970	 * sync compaction, and we do not need to hold the mmap_sem during
 971	 * that. We will recheck the vma after taking it again in write mode.
 972	 */
 973	up_read(&mm->mmap_sem);
 974	new_page = khugepaged_alloc_page(hpage, gfp, node);
 975	if (!new_page) {
 976		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
 977		goto out_nolock;
 978	}
 979
 980	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
 981		result = SCAN_CGROUP_CHARGE_FAIL;
 982		goto out_nolock;
 983	}
 984
 985	down_read(&mm->mmap_sem);
 986	result = hugepage_vma_revalidate(mm, address, &vma);
 987	if (result) {
 988		mem_cgroup_cancel_charge(new_page, memcg, true);
 989		up_read(&mm->mmap_sem);
 990		goto out_nolock;
 991	}
 992
 993	pmd = mm_find_pmd(mm, address);
 994	if (!pmd) {
 995		result = SCAN_PMD_NULL;
 996		mem_cgroup_cancel_charge(new_page, memcg, true);
 997		up_read(&mm->mmap_sem);
 998		goto out_nolock;
 999	}
1000
1001	/*
1002	 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1003	 * If it fails, we release mmap_sem and jump out_nolock.
1004	 * Continuing to collapse causes inconsistency.
1005	 */
1006	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1007		mem_cgroup_cancel_charge(new_page, memcg, true);
1008		up_read(&mm->mmap_sem);
1009		goto out_nolock;
1010	}
1011
1012	up_read(&mm->mmap_sem);
1013	/*
1014	 * Prevent all access to pagetables with the exception of
1015	 * gup_fast later handled by the ptep_clear_flush and the VM
1016	 * handled by the anon_vma lock + PG_lock.
1017	 */
1018	down_write(&mm->mmap_sem);
1019	result = SCAN_ANY_PROCESS;
1020	if (!mmget_still_valid(mm))
1021		goto out;
1022	result = hugepage_vma_revalidate(mm, address, &vma);
1023	if (result)
1024		goto out;
1025	/* check if the pmd is still valid */
1026	if (mm_find_pmd(mm, address) != pmd)
1027		goto out;
1028
1029	anon_vma_lock_write(vma->anon_vma);
1030
1031	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1032				address, address + HPAGE_PMD_SIZE);
1033	mmu_notifier_invalidate_range_start(&range);
1034
1035	pte = pte_offset_map(pmd, address);
1036	pte_ptl = pte_lockptr(mm, pmd);
1037
 
 
 
1038	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1039	/*
1040	 * After this gup_fast can't run anymore. This also removes
1041	 * any huge TLB entry from the CPU so we won't allow
1042	 * huge and small TLB entries for the same virtual address
1043	 * to avoid the risk of CPU bugs in that area.
1044	 */
1045	_pmd = pmdp_collapse_flush(vma, address, pmd);
1046	spin_unlock(pmd_ptl);
1047	mmu_notifier_invalidate_range_end(&range);
1048
1049	spin_lock(pte_ptl);
1050	isolated = __collapse_huge_page_isolate(vma, address, pte);
1051	spin_unlock(pte_ptl);
1052
1053	if (unlikely(!isolated)) {
1054		pte_unmap(pte);
1055		spin_lock(pmd_ptl);
1056		BUG_ON(!pmd_none(*pmd));
1057		/*
1058		 * We can only use set_pmd_at when establishing
1059		 * hugepmds and never for establishing regular pmds that
1060		 * points to regular pagetables. Use pmd_populate for that
1061		 */
1062		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1063		spin_unlock(pmd_ptl);
1064		anon_vma_unlock_write(vma->anon_vma);
1065		result = SCAN_FAIL;
1066		goto out;
1067	}
1068
1069	/*
1070	 * All pages are isolated and locked so anon_vma rmap
1071	 * can't run anymore.
1072	 */
1073	anon_vma_unlock_write(vma->anon_vma);
1074
1075	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1076	pte_unmap(pte);
1077	__SetPageUptodate(new_page);
1078	pgtable = pmd_pgtable(_pmd);
1079
1080	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1081	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1082
1083	/*
1084	 * spin_lock() below is not the equivalent of smp_wmb(), so
1085	 * this is needed to avoid the copy_huge_page writes to become
1086	 * visible after the set_pmd_at() write.
1087	 */
1088	smp_wmb();
1089
1090	spin_lock(pmd_ptl);
1091	BUG_ON(!pmd_none(*pmd));
1092	page_add_new_anon_rmap(new_page, vma, address, true);
1093	mem_cgroup_commit_charge(new_page, memcg, false, true);
1094	count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1095	lru_cache_add_active_or_unevictable(new_page, vma);
1096	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1097	set_pmd_at(mm, address, pmd, _pmd);
1098	update_mmu_cache_pmd(vma, address, pmd);
1099	spin_unlock(pmd_ptl);
1100
1101	*hpage = NULL;
1102
1103	khugepaged_pages_collapsed++;
1104	result = SCAN_SUCCEED;
1105out_up_write:
1106	up_write(&mm->mmap_sem);
1107out_nolock:
1108	trace_mm_collapse_huge_page(mm, isolated, result);
1109	return;
1110out:
1111	mem_cgroup_cancel_charge(new_page, memcg, true);
1112	goto out_up_write;
1113}
1114
1115static int khugepaged_scan_pmd(struct mm_struct *mm,
1116			       struct vm_area_struct *vma,
1117			       unsigned long address,
1118			       struct page **hpage)
1119{
1120	pmd_t *pmd;
1121	pte_t *pte, *_pte;
1122	int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1123	struct page *page = NULL;
1124	unsigned long _address;
1125	spinlock_t *ptl;
1126	int node = NUMA_NO_NODE, unmapped = 0;
1127	bool writable = false;
1128
1129	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1130
1131	pmd = mm_find_pmd(mm, address);
1132	if (!pmd) {
1133		result = SCAN_PMD_NULL;
1134		goto out;
1135	}
1136
1137	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1138	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1139	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1140	     _pte++, _address += PAGE_SIZE) {
1141		pte_t pteval = *_pte;
1142		if (is_swap_pte(pteval)) {
1143			if (++unmapped <= khugepaged_max_ptes_swap) {
1144				continue;
1145			} else {
1146				result = SCAN_EXCEED_SWAP_PTE;
1147				goto out_unmap;
1148			}
1149		}
1150		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1151			if (!userfaultfd_armed(vma) &&
1152			    ++none_or_zero <= khugepaged_max_ptes_none) {
1153				continue;
1154			} else {
1155				result = SCAN_EXCEED_NONE_PTE;
1156				goto out_unmap;
1157			}
1158		}
1159		if (!pte_present(pteval)) {
1160			result = SCAN_PTE_NON_PRESENT;
1161			goto out_unmap;
1162		}
1163		if (pte_write(pteval))
1164			writable = true;
1165
1166		page = vm_normal_page(vma, _address, pteval);
1167		if (unlikely(!page)) {
1168			result = SCAN_PAGE_NULL;
1169			goto out_unmap;
1170		}
1171
1172		/* TODO: teach khugepaged to collapse THP mapped with pte */
1173		if (PageCompound(page)) {
1174			result = SCAN_PAGE_COMPOUND;
1175			goto out_unmap;
1176		}
1177
1178		/*
1179		 * Record which node the original page is from and save this
1180		 * information to khugepaged_node_load[].
1181		 * Khupaged will allocate hugepage from the node has the max
1182		 * hit record.
1183		 */
1184		node = page_to_nid(page);
1185		if (khugepaged_scan_abort(node)) {
1186			result = SCAN_SCAN_ABORT;
1187			goto out_unmap;
1188		}
1189		khugepaged_node_load[node]++;
1190		if (!PageLRU(page)) {
1191			result = SCAN_PAGE_LRU;
1192			goto out_unmap;
1193		}
1194		if (PageLocked(page)) {
1195			result = SCAN_PAGE_LOCK;
1196			goto out_unmap;
1197		}
1198		if (!PageAnon(page)) {
1199			result = SCAN_PAGE_ANON;
1200			goto out_unmap;
1201		}
1202
1203		/*
1204		 * cannot use mapcount: can't collapse if there's a gup pin.
1205		 * The page must only be referenced by the scanned process
1206		 * and page swap cache.
1207		 */
1208		if (page_count(page) != 1 + PageSwapCache(page)) {
1209			result = SCAN_PAGE_COUNT;
1210			goto out_unmap;
1211		}
1212		if (pte_young(pteval) ||
1213		    page_is_young(page) || PageReferenced(page) ||
1214		    mmu_notifier_test_young(vma->vm_mm, address))
1215			referenced++;
1216	}
1217	if (writable) {
1218		if (referenced) {
1219			result = SCAN_SUCCEED;
1220			ret = 1;
1221		} else {
1222			result = SCAN_LACK_REFERENCED_PAGE;
1223		}
1224	} else {
1225		result = SCAN_PAGE_RO;
1226	}
1227out_unmap:
1228	pte_unmap_unlock(pte, ptl);
1229	if (ret) {
1230		node = khugepaged_find_target_node();
1231		/* collapse_huge_page will return with the mmap_sem released */
1232		collapse_huge_page(mm, address, hpage, node, referenced);
1233	}
1234out:
1235	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1236				     none_or_zero, result, unmapped);
1237	return ret;
1238}
1239
1240static void collect_mm_slot(struct mm_slot *mm_slot)
1241{
1242	struct mm_struct *mm = mm_slot->mm;
1243
1244	lockdep_assert_held(&khugepaged_mm_lock);
1245
1246	if (khugepaged_test_exit(mm)) {
1247		/* free mm_slot */
1248		hash_del(&mm_slot->hash);
1249		list_del(&mm_slot->mm_node);
1250
1251		/*
1252		 * Not strictly needed because the mm exited already.
1253		 *
1254		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1255		 */
1256
1257		/* khugepaged_mm_lock actually not necessary for the below */
1258		free_mm_slot(mm_slot);
1259		mmdrop(mm);
1260	}
1261}
1262
1263#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1264/*
1265 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1266 * khugepaged should try to collapse the page table.
1267 */
1268static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1269					 unsigned long addr)
1270{
1271	struct mm_slot *mm_slot;
1272
1273	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1274
1275	spin_lock(&khugepaged_mm_lock);
1276	mm_slot = get_mm_slot(mm);
1277	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1278		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1279	spin_unlock(&khugepaged_mm_lock);
1280	return 0;
1281}
1282
1283/**
1284 * Try to collapse a pte-mapped THP for mm at address haddr.
1285 *
1286 * This function checks whether all the PTEs in the PMD are pointing to the
1287 * right THP. If so, retract the page table so the THP can refault in with
1288 * as pmd-mapped.
1289 */
1290void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1291{
1292	unsigned long haddr = addr & HPAGE_PMD_MASK;
1293	struct vm_area_struct *vma = find_vma(mm, haddr);
1294	struct page *hpage = NULL;
1295	pte_t *start_pte, *pte;
1296	pmd_t *pmd, _pmd;
1297	spinlock_t *ptl;
1298	int count = 0;
1299	int i;
1300
1301	if (!vma || !vma->vm_file ||
1302	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1303		return;
1304
1305	/*
1306	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1307	 * collapsed by this mm. But we can still collapse if the page is
1308	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1309	 * will not fail the vma for missing VM_HUGEPAGE
1310	 */
1311	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1312		return;
1313
1314	pmd = mm_find_pmd(mm, haddr);
1315	if (!pmd)
1316		return;
1317
1318	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1319
1320	/* step 1: check all mapped PTEs are to the right huge page */
1321	for (i = 0, addr = haddr, pte = start_pte;
1322	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1323		struct page *page;
1324
1325		/* empty pte, skip */
1326		if (pte_none(*pte))
1327			continue;
1328
1329		/* page swapped out, abort */
1330		if (!pte_present(*pte))
1331			goto abort;
1332
1333		page = vm_normal_page(vma, addr, *pte);
1334
1335		if (!page || !PageCompound(page))
1336			goto abort;
1337
1338		if (!hpage) {
1339			hpage = compound_head(page);
1340			/*
1341			 * The mapping of the THP should not change.
1342			 *
1343			 * Note that uprobe, debugger, or MAP_PRIVATE may
1344			 * change the page table, but the new page will
1345			 * not pass PageCompound() check.
1346			 */
1347			if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
1348				goto abort;
1349		}
1350
1351		/*
1352		 * Confirm the page maps to the correct subpage.
1353		 *
1354		 * Note that uprobe, debugger, or MAP_PRIVATE may change
1355		 * the page table, but the new page will not pass
1356		 * PageCompound() check.
1357		 */
1358		if (WARN_ON(hpage + i != page))
1359			goto abort;
1360		count++;
1361	}
1362
1363	/* step 2: adjust rmap */
1364	for (i = 0, addr = haddr, pte = start_pte;
1365	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1366		struct page *page;
1367
1368		if (pte_none(*pte))
1369			continue;
1370		page = vm_normal_page(vma, addr, *pte);
1371		page_remove_rmap(page, false);
1372	}
1373
1374	pte_unmap_unlock(start_pte, ptl);
1375
1376	/* step 3: set proper refcount and mm_counters. */
1377	if (hpage) {
1378		page_ref_sub(hpage, count);
1379		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1380	}
1381
1382	/* step 4: collapse pmd */
1383	ptl = pmd_lock(vma->vm_mm, pmd);
1384	_pmd = pmdp_collapse_flush(vma, addr, pmd);
1385	spin_unlock(ptl);
1386	mm_dec_nr_ptes(mm);
1387	pte_free(mm, pmd_pgtable(_pmd));
1388	return;
1389
1390abort:
1391	pte_unmap_unlock(start_pte, ptl);
1392}
1393
1394static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1395{
1396	struct mm_struct *mm = mm_slot->mm;
1397	int i;
1398
1399	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1400		return 0;
1401
1402	if (!down_write_trylock(&mm->mmap_sem))
1403		return -EBUSY;
1404
1405	if (unlikely(khugepaged_test_exit(mm)))
1406		goto out;
1407
1408	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1409		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1410
1411out:
1412	mm_slot->nr_pte_mapped_thp = 0;
1413	up_write(&mm->mmap_sem);
1414	return 0;
1415}
1416
1417static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1418{
1419	struct vm_area_struct *vma;
1420	unsigned long addr;
1421	pmd_t *pmd, _pmd;
1422
1423	i_mmap_lock_write(mapping);
1424	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1425		/*
1426		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1427		 * got written to. These VMAs are likely not worth investing
1428		 * down_write(mmap_sem) as PMD-mapping is likely to be split
1429		 * later.
1430		 *
1431		 * Not that vma->anon_vma check is racy: it can be set up after
1432		 * the check but before we took mmap_sem by the fault path.
1433		 * But page lock would prevent establishing any new ptes of the
1434		 * page, so we are safe.
1435		 *
1436		 * An alternative would be drop the check, but check that page
1437		 * table is clear before calling pmdp_collapse_flush() under
1438		 * ptl. It has higher chance to recover THP for the VMA, but
1439		 * has higher cost too.
1440		 */
1441		if (vma->anon_vma)
1442			continue;
1443		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1444		if (addr & ~HPAGE_PMD_MASK)
1445			continue;
1446		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1447			continue;
1448		pmd = mm_find_pmd(vma->vm_mm, addr);
1449		if (!pmd)
1450			continue;
1451		/*
1452		 * We need exclusive mmap_sem to retract page table.
1453		 *
1454		 * We use trylock due to lock inversion: we need to acquire
1455		 * mmap_sem while holding page lock. Fault path does it in
1456		 * reverse order. Trylock is a way to avoid deadlock.
1457		 */
1458		if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1459			spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1460			/* assume page table is clear */
1461			_pmd = pmdp_collapse_flush(vma, addr, pmd);
1462			spin_unlock(ptl);
1463			up_write(&vma->vm_mm->mmap_sem);
1464			mm_dec_nr_ptes(vma->vm_mm);
1465			pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1466		} else {
1467			/* Try again later */
1468			khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
1469		}
1470	}
1471	i_mmap_unlock_write(mapping);
1472}
1473
1474/**
1475 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1476 *
1477 * Basic scheme is simple, details are more complex:
1478 *  - allocate and lock a new huge page;
1479 *  - scan page cache replacing old pages with the new one
1480 *    + swap/gup in pages if necessary;
1481 *    + fill in gaps;
1482 *    + keep old pages around in case rollback is required;
1483 *  - if replacing succeeds:
1484 *    + copy data over;
1485 *    + free old pages;
1486 *    + unlock huge page;
1487 *  - if replacing failed;
1488 *    + put all pages back and unfreeze them;
1489 *    + restore gaps in the page cache;
1490 *    + unlock and free huge page;
1491 */
1492static void collapse_file(struct mm_struct *mm,
1493		struct file *file, pgoff_t start,
1494		struct page **hpage, int node)
1495{
1496	struct address_space *mapping = file->f_mapping;
1497	gfp_t gfp;
1498	struct page *new_page;
1499	struct mem_cgroup *memcg;
1500	pgoff_t index, end = start + HPAGE_PMD_NR;
1501	LIST_HEAD(pagelist);
1502	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
 
1503	int nr_none = 0, result = SCAN_SUCCEED;
1504	bool is_shmem = shmem_file(file);
1505
1506	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1507	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1508
1509	/* Only allocate from the target node */
1510	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1511
1512	new_page = khugepaged_alloc_page(hpage, gfp, node);
1513	if (!new_page) {
1514		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1515		goto out;
1516	}
1517
1518	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1519		result = SCAN_CGROUP_CHARGE_FAIL;
1520		goto out;
1521	}
1522
1523	/* This will be less messy when we use multi-index entries */
1524	do {
1525		xas_lock_irq(&xas);
1526		xas_create_range(&xas);
1527		if (!xas_error(&xas))
1528			break;
1529		xas_unlock_irq(&xas);
1530		if (!xas_nomem(&xas, GFP_KERNEL)) {
1531			mem_cgroup_cancel_charge(new_page, memcg, true);
1532			result = SCAN_FAIL;
1533			goto out;
1534		}
1535	} while (1);
1536
1537	__SetPageLocked(new_page);
1538	if (is_shmem)
1539		__SetPageSwapBacked(new_page);
1540	new_page->index = start;
1541	new_page->mapping = mapping;
 
 
 
 
1542
1543	/*
1544	 * At this point the new_page is locked and not up-to-date.
1545	 * It's safe to insert it into the page cache, because nobody would
1546	 * be able to map it or use it in another way until we unlock it.
 
1547	 */
1548
1549	xas_set(&xas, start);
1550	for (index = start; index < end; index++) {
1551		struct page *page = xas_next(&xas);
1552
1553		VM_BUG_ON(index != xas.xa_index);
1554		if (is_shmem) {
1555			if (!page) {
1556				/*
1557				 * Stop if extent has been truncated or
1558				 * hole-punched, and is now completely
1559				 * empty.
1560				 */
1561				if (index == start) {
1562					if (!xas_next_entry(&xas, end - 1)) {
1563						result = SCAN_TRUNCATED;
1564						goto xa_locked;
1565					}
1566					xas_set(&xas, index);
1567				}
1568				if (!shmem_charge(mapping->host, 1)) {
1569					result = SCAN_FAIL;
1570					goto xa_locked;
1571				}
1572				xas_store(&xas, new_page);
1573				nr_none++;
1574				continue;
1575			}
1576
1577			if (xa_is_value(page) || !PageUptodate(page)) {
1578				xas_unlock_irq(&xas);
1579				/* swap in or instantiate fallocated page */
1580				if (shmem_getpage(mapping->host, index, &page,
1581						  SGP_NOHUGE)) {
1582					result = SCAN_FAIL;
1583					goto xa_unlocked;
1584				}
1585			} else if (trylock_page(page)) {
1586				get_page(page);
1587				xas_unlock_irq(&xas);
1588			} else {
1589				result = SCAN_PAGE_LOCK;
1590				goto xa_locked;
1591			}
1592		} else {	/* !is_shmem */
1593			if (!page || xa_is_value(page)) {
1594				xas_unlock_irq(&xas);
1595				page_cache_sync_readahead(mapping, &file->f_ra,
1596							  file, index,
1597							  PAGE_SIZE);
1598				/* drain pagevecs to help isolate_lru_page() */
1599				lru_add_drain();
1600				page = find_lock_page(mapping, index);
1601				if (unlikely(page == NULL)) {
1602					result = SCAN_FAIL;
1603					goto xa_unlocked;
1604				}
1605			} else if (trylock_page(page)) {
1606				get_page(page);
1607				xas_unlock_irq(&xas);
1608			} else {
1609				result = SCAN_PAGE_LOCK;
1610				goto xa_locked;
1611			}
 
 
 
 
 
 
1612		}
1613
1614		/*
1615		 * The page must be locked, so we can drop the i_pages lock
1616		 * without racing with truncate.
1617		 */
1618		VM_BUG_ON_PAGE(!PageLocked(page), page);
1619
1620		/* make sure the page is up to date */
1621		if (unlikely(!PageUptodate(page))) {
1622			result = SCAN_FAIL;
1623			goto out_unlock;
1624		}
1625
1626		/*
1627		 * If file was truncated then extended, or hole-punched, before
1628		 * we locked the first page, then a THP might be there already.
1629		 */
1630		if (PageTransCompound(page)) {
1631			result = SCAN_PAGE_COMPOUND;
1632			goto out_unlock;
1633		}
1634
1635		if (page_mapping(page) != mapping) {
1636			result = SCAN_TRUNCATED;
1637			goto out_unlock;
1638		}
1639
1640		if (!is_shmem && PageDirty(page)) {
1641			/*
1642			 * khugepaged only works on read-only fd, so this
1643			 * page is dirty because it hasn't been flushed
1644			 * since first write.
1645			 */
1646			result = SCAN_FAIL;
1647			goto out_unlock;
1648		}
1649
1650		if (isolate_lru_page(page)) {
1651			result = SCAN_DEL_PAGE_LRU;
1652			goto out_unlock;
1653		}
1654
1655		if (page_has_private(page) &&
1656		    !try_to_release_page(page, GFP_KERNEL)) {
1657			result = SCAN_PAGE_HAS_PRIVATE;
1658			goto out_unlock;
1659		}
1660
1661		if (page_mapped(page))
1662			unmap_mapping_pages(mapping, index, 1, false);
1663
1664		xas_lock_irq(&xas);
1665		xas_set(&xas, index);
1666
1667		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
 
 
1668		VM_BUG_ON_PAGE(page_mapped(page), page);
1669
1670		/*
1671		 * The page is expected to have page_count() == 3:
1672		 *  - we hold a pin on it;
1673		 *  - one reference from page cache;
1674		 *  - one from isolate_lru_page;
1675		 */
1676		if (!page_ref_freeze(page, 3)) {
1677			result = SCAN_PAGE_COUNT;
1678			xas_unlock_irq(&xas);
1679			putback_lru_page(page);
1680			goto out_unlock;
1681		}
1682
1683		/*
1684		 * Add the page to the list to be able to undo the collapse if
1685		 * something go wrong.
1686		 */
1687		list_add_tail(&page->lru, &pagelist);
1688
1689		/* Finally, replace with the new page. */
1690		xas_store(&xas, new_page);
 
 
 
 
1691		continue;
 
 
 
 
 
 
 
1692out_unlock:
1693		unlock_page(page);
1694		put_page(page);
1695		goto xa_unlocked;
1696	}
1697
1698	if (is_shmem)
1699		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1700	else {
1701		__inc_node_page_state(new_page, NR_FILE_THPS);
1702		filemap_nr_thps_inc(mapping);
1703	}
 
1704
1705	if (nr_none) {
1706		struct zone *zone = page_zone(new_page);
 
 
1707
1708		__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1709		if (is_shmem)
1710			__mod_node_page_state(zone->zone_pgdat,
1711					      NR_SHMEM, nr_none);
 
1712	}
1713
1714xa_locked:
1715	xas_unlock_irq(&xas);
1716xa_unlocked:
1717
1718	if (result == SCAN_SUCCEED) {
1719		struct page *page, *tmp;
 
1720
1721		/*
1722		 * Replacing old pages with new one has succeeded, now we
1723		 * need to copy the content and free the old pages.
1724		 */
1725		index = start;
1726		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1727			while (index < page->index) {
1728				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1729				index++;
1730			}
1731			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1732					page);
1733			list_del(&page->lru);
 
 
1734			page->mapping = NULL;
1735			page_ref_unfreeze(page, 1);
1736			ClearPageActive(page);
1737			ClearPageUnevictable(page);
1738			unlock_page(page);
1739			put_page(page);
1740			index++;
1741		}
1742		while (index < end) {
1743			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1744			index++;
1745		}
1746
1747		SetPageUptodate(new_page);
1748		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1749		mem_cgroup_commit_charge(new_page, memcg, false, true);
1750
1751		if (is_shmem) {
1752			set_page_dirty(new_page);
1753			lru_cache_add_anon(new_page);
1754		} else {
1755			lru_cache_add_file(new_page);
1756		}
1757		count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1758
1759		/*
1760		 * Remove pte page tables, so we can re-fault the page as huge.
 
1761		 */
1762		retract_page_tables(mapping, start);
 
 
 
 
 
 
 
 
 
1763		*hpage = NULL;
1764
1765		khugepaged_pages_collapsed++;
1766	} else {
1767		struct page *page;
1768
1769		/* Something went wrong: roll back page cache changes */
1770		xas_lock_irq(&xas);
1771		mapping->nrpages -= nr_none;
1772
1773		if (is_shmem)
1774			shmem_uncharge(mapping->host, nr_none);
1775
1776		xas_set(&xas, start);
1777		xas_for_each(&xas, page, end - 1) {
1778			page = list_first_entry_or_null(&pagelist,
1779					struct page, lru);
1780			if (!page || xas.xa_index < page->index) {
1781				if (!nr_none)
1782					break;
1783				nr_none--;
1784				/* Put holes back where they were */
1785				xas_store(&xas, NULL);
1786				continue;
1787			}
1788
1789			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1790
1791			/* Unfreeze the page. */
1792			list_del(&page->lru);
1793			page_ref_unfreeze(page, 2);
1794			xas_store(&xas, page);
1795			xas_pause(&xas);
1796			xas_unlock_irq(&xas);
 
1797			unlock_page(page);
1798			putback_lru_page(page);
1799			xas_lock_irq(&xas);
1800		}
1801		VM_BUG_ON(nr_none);
1802		xas_unlock_irq(&xas);
1803
 
 
1804		mem_cgroup_cancel_charge(new_page, memcg, true);
 
1805		new_page->mapping = NULL;
1806	}
1807
1808	unlock_page(new_page);
1809out:
1810	VM_BUG_ON(!list_empty(&pagelist));
1811	/* TODO: tracepoints */
1812}
1813
1814static void khugepaged_scan_file(struct mm_struct *mm,
1815		struct file *file, pgoff_t start, struct page **hpage)
 
1816{
1817	struct page *page = NULL;
1818	struct address_space *mapping = file->f_mapping;
1819	XA_STATE(xas, &mapping->i_pages, start);
1820	int present, swap;
1821	int node = NUMA_NO_NODE;
1822	int result = SCAN_SUCCEED;
1823
1824	present = 0;
1825	swap = 0;
1826	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1827	rcu_read_lock();
1828	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1829		if (xas_retry(&xas, page))
 
 
 
 
 
1830			continue;
 
1831
1832		if (xa_is_value(page)) {
1833			if (++swap > khugepaged_max_ptes_swap) {
1834				result = SCAN_EXCEED_SWAP_PTE;
1835				break;
1836			}
1837			continue;
1838		}
1839
1840		if (PageTransCompound(page)) {
1841			result = SCAN_PAGE_COMPOUND;
1842			break;
1843		}
1844
1845		node = page_to_nid(page);
1846		if (khugepaged_scan_abort(node)) {
1847			result = SCAN_SCAN_ABORT;
1848			break;
1849		}
1850		khugepaged_node_load[node]++;
1851
1852		if (!PageLRU(page)) {
1853			result = SCAN_PAGE_LRU;
1854			break;
1855		}
1856
1857		if (page_count(page) !=
1858		    1 + page_mapcount(page) + page_has_private(page)) {
1859			result = SCAN_PAGE_COUNT;
1860			break;
1861		}
1862
1863		/*
1864		 * We probably should check if the page is referenced here, but
1865		 * nobody would transfer pte_young() to PageReferenced() for us.
1866		 * And rmap walk here is just too costly...
1867		 */
1868
1869		present++;
1870
1871		if (need_resched()) {
1872			xas_pause(&xas);
1873			cond_resched_rcu();
1874		}
1875	}
1876	rcu_read_unlock();
1877
1878	if (result == SCAN_SUCCEED) {
1879		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1880			result = SCAN_EXCEED_NONE_PTE;
1881		} else {
1882			node = khugepaged_find_target_node();
1883			collapse_file(mm, file, start, hpage, node);
1884		}
1885	}
1886
1887	/* TODO: tracepoints */
1888}
1889#else
1890static void khugepaged_scan_file(struct mm_struct *mm,
1891		struct file *file, pgoff_t start, struct page **hpage)
 
1892{
1893	BUILD_BUG();
1894}
1895
1896static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1897{
1898	return 0;
1899}
1900#endif
1901
1902static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1903					    struct page **hpage)
1904	__releases(&khugepaged_mm_lock)
1905	__acquires(&khugepaged_mm_lock)
1906{
1907	struct mm_slot *mm_slot;
1908	struct mm_struct *mm;
1909	struct vm_area_struct *vma;
1910	int progress = 0;
1911
1912	VM_BUG_ON(!pages);
1913	lockdep_assert_held(&khugepaged_mm_lock);
1914
1915	if (khugepaged_scan.mm_slot)
1916		mm_slot = khugepaged_scan.mm_slot;
1917	else {
1918		mm_slot = list_entry(khugepaged_scan.mm_head.next,
1919				     struct mm_slot, mm_node);
1920		khugepaged_scan.address = 0;
1921		khugepaged_scan.mm_slot = mm_slot;
1922	}
1923	spin_unlock(&khugepaged_mm_lock);
1924	khugepaged_collapse_pte_mapped_thps(mm_slot);
1925
1926	mm = mm_slot->mm;
1927	/*
1928	 * Don't wait for semaphore (to avoid long wait times).  Just move to
1929	 * the next mm on the list.
1930	 */
1931	vma = NULL;
1932	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1933		goto breakouterloop_mmap_sem;
1934	if (likely(!khugepaged_test_exit(mm)))
1935		vma = find_vma(mm, khugepaged_scan.address);
1936
1937	progress++;
1938	for (; vma; vma = vma->vm_next) {
1939		unsigned long hstart, hend;
1940
1941		cond_resched();
1942		if (unlikely(khugepaged_test_exit(mm))) {
1943			progress++;
1944			break;
1945		}
1946		if (!hugepage_vma_check(vma, vma->vm_flags)) {
1947skip:
1948			progress++;
1949			continue;
1950		}
1951		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1952		hend = vma->vm_end & HPAGE_PMD_MASK;
1953		if (hstart >= hend)
1954			goto skip;
1955		if (khugepaged_scan.address > hend)
1956			goto skip;
1957		if (khugepaged_scan.address < hstart)
1958			khugepaged_scan.address = hstart;
1959		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1960
1961		while (khugepaged_scan.address < hend) {
1962			int ret;
1963			cond_resched();
1964			if (unlikely(khugepaged_test_exit(mm)))
1965				goto breakouterloop;
1966
1967			VM_BUG_ON(khugepaged_scan.address < hstart ||
1968				  khugepaged_scan.address + HPAGE_PMD_SIZE >
1969				  hend);
1970			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
1971				struct file *file;
1972				pgoff_t pgoff = linear_page_index(vma,
1973						khugepaged_scan.address);
1974
1975				if (shmem_file(vma->vm_file)
1976				    && !shmem_huge_enabled(vma))
1977					goto skip;
1978				file = get_file(vma->vm_file);
1979				up_read(&mm->mmap_sem);
1980				ret = 1;
1981				khugepaged_scan_file(mm, file, pgoff, hpage);
 
1982				fput(file);
1983			} else {
1984				ret = khugepaged_scan_pmd(mm, vma,
1985						khugepaged_scan.address,
1986						hpage);
1987			}
1988			/* move to next address */
1989			khugepaged_scan.address += HPAGE_PMD_SIZE;
1990			progress += HPAGE_PMD_NR;
1991			if (ret)
1992				/* we released mmap_sem so break loop */
1993				goto breakouterloop_mmap_sem;
1994			if (progress >= pages)
1995				goto breakouterloop;
1996		}
1997	}
1998breakouterloop:
1999	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2000breakouterloop_mmap_sem:
2001
2002	spin_lock(&khugepaged_mm_lock);
2003	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2004	/*
2005	 * Release the current mm_slot if this mm is about to die, or
2006	 * if we scanned all vmas of this mm.
2007	 */
2008	if (khugepaged_test_exit(mm) || !vma) {
2009		/*
2010		 * Make sure that if mm_users is reaching zero while
2011		 * khugepaged runs here, khugepaged_exit will find
2012		 * mm_slot not pointing to the exiting mm.
2013		 */
2014		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2015			khugepaged_scan.mm_slot = list_entry(
2016				mm_slot->mm_node.next,
2017				struct mm_slot, mm_node);
2018			khugepaged_scan.address = 0;
2019		} else {
2020			khugepaged_scan.mm_slot = NULL;
2021			khugepaged_full_scans++;
2022		}
2023
2024		collect_mm_slot(mm_slot);
2025	}
2026
2027	return progress;
2028}
2029
2030static int khugepaged_has_work(void)
2031{
2032	return !list_empty(&khugepaged_scan.mm_head) &&
2033		khugepaged_enabled();
2034}
2035
2036static int khugepaged_wait_event(void)
2037{
2038	return !list_empty(&khugepaged_scan.mm_head) ||
2039		kthread_should_stop();
2040}
2041
2042static void khugepaged_do_scan(void)
2043{
2044	struct page *hpage = NULL;
2045	unsigned int progress = 0, pass_through_head = 0;
2046	unsigned int pages = khugepaged_pages_to_scan;
2047	bool wait = true;
2048
2049	barrier(); /* write khugepaged_pages_to_scan to local stack */
2050
2051	while (progress < pages) {
2052		if (!khugepaged_prealloc_page(&hpage, &wait))
2053			break;
2054
2055		cond_resched();
2056
2057		if (unlikely(kthread_should_stop() || try_to_freeze()))
2058			break;
2059
2060		spin_lock(&khugepaged_mm_lock);
2061		if (!khugepaged_scan.mm_slot)
2062			pass_through_head++;
2063		if (khugepaged_has_work() &&
2064		    pass_through_head < 2)
2065			progress += khugepaged_scan_mm_slot(pages - progress,
2066							    &hpage);
2067		else
2068			progress = pages;
2069		spin_unlock(&khugepaged_mm_lock);
2070	}
2071
2072	if (!IS_ERR_OR_NULL(hpage))
2073		put_page(hpage);
2074}
2075
2076static bool khugepaged_should_wakeup(void)
2077{
2078	return kthread_should_stop() ||
2079	       time_after_eq(jiffies, khugepaged_sleep_expire);
2080}
2081
2082static void khugepaged_wait_work(void)
2083{
2084	if (khugepaged_has_work()) {
2085		const unsigned long scan_sleep_jiffies =
2086			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2087
2088		if (!scan_sleep_jiffies)
2089			return;
2090
2091		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2092		wait_event_freezable_timeout(khugepaged_wait,
2093					     khugepaged_should_wakeup(),
2094					     scan_sleep_jiffies);
2095		return;
2096	}
2097
2098	if (khugepaged_enabled())
2099		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2100}
2101
2102static int khugepaged(void *none)
2103{
2104	struct mm_slot *mm_slot;
2105
2106	set_freezable();
2107	set_user_nice(current, MAX_NICE);
2108
2109	while (!kthread_should_stop()) {
2110		khugepaged_do_scan();
2111		khugepaged_wait_work();
2112	}
2113
2114	spin_lock(&khugepaged_mm_lock);
2115	mm_slot = khugepaged_scan.mm_slot;
2116	khugepaged_scan.mm_slot = NULL;
2117	if (mm_slot)
2118		collect_mm_slot(mm_slot);
2119	spin_unlock(&khugepaged_mm_lock);
2120	return 0;
2121}
2122
2123static void set_recommended_min_free_kbytes(void)
2124{
2125	struct zone *zone;
2126	int nr_zones = 0;
2127	unsigned long recommended_min;
2128
2129	for_each_populated_zone(zone) {
2130		/*
2131		 * We don't need to worry about fragmentation of
2132		 * ZONE_MOVABLE since it only has movable pages.
2133		 */
2134		if (zone_idx(zone) > gfp_zone(GFP_USER))
2135			continue;
2136
2137		nr_zones++;
2138	}
2139
2140	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2141	recommended_min = pageblock_nr_pages * nr_zones * 2;
2142
2143	/*
2144	 * Make sure that on average at least two pageblocks are almost free
2145	 * of another type, one for a migratetype to fall back to and a
2146	 * second to avoid subsequent fallbacks of other types There are 3
2147	 * MIGRATE_TYPES we care about.
2148	 */
2149	recommended_min += pageblock_nr_pages * nr_zones *
2150			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2151
2152	/* don't ever allow to reserve more than 5% of the lowmem */
2153	recommended_min = min(recommended_min,
2154			      (unsigned long) nr_free_buffer_pages() / 20);
2155	recommended_min <<= (PAGE_SHIFT-10);
2156
2157	if (recommended_min > min_free_kbytes) {
2158		if (user_min_free_kbytes >= 0)
2159			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2160				min_free_kbytes, recommended_min);
2161
2162		min_free_kbytes = recommended_min;
2163	}
2164	setup_per_zone_wmarks();
2165}
2166
2167int start_stop_khugepaged(void)
2168{
2169	static struct task_struct *khugepaged_thread __read_mostly;
2170	static DEFINE_MUTEX(khugepaged_mutex);
2171	int err = 0;
2172
2173	mutex_lock(&khugepaged_mutex);
2174	if (khugepaged_enabled()) {
2175		if (!khugepaged_thread)
2176			khugepaged_thread = kthread_run(khugepaged, NULL,
2177							"khugepaged");
2178		if (IS_ERR(khugepaged_thread)) {
2179			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2180			err = PTR_ERR(khugepaged_thread);
2181			khugepaged_thread = NULL;
2182			goto fail;
2183		}
2184
2185		if (!list_empty(&khugepaged_scan.mm_head))
2186			wake_up_interruptible(&khugepaged_wait);
2187
2188		set_recommended_min_free_kbytes();
2189	} else if (khugepaged_thread) {
2190		kthread_stop(khugepaged_thread);
2191		khugepaged_thread = NULL;
2192	}
2193fail:
2194	mutex_unlock(&khugepaged_mutex);
2195	return err;
2196}