Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   3
   4#include <linux/mm.h>
   5#include <linux/sched.h>
   6#include <linux/sched/mm.h>
   7#include <linux/sched/coredump.h>
   8#include <linux/mmu_notifier.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/mm_inline.h>
  12#include <linux/kthread.h>
  13#include <linux/khugepaged.h>
  14#include <linux/freezer.h>
  15#include <linux/mman.h>
  16#include <linux/hashtable.h>
  17#include <linux/userfaultfd_k.h>
  18#include <linux/page_idle.h>
  19#include <linux/swapops.h>
  20#include <linux/shmem_fs.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/pgalloc.h>
  24#include "internal.h"
  25
  26enum scan_result {
  27	SCAN_FAIL,
  28	SCAN_SUCCEED,
  29	SCAN_PMD_NULL,
  30	SCAN_EXCEED_NONE_PTE,
  31	SCAN_PTE_NON_PRESENT,
  32	SCAN_PAGE_RO,
  33	SCAN_LACK_REFERENCED_PAGE,
  34	SCAN_PAGE_NULL,
  35	SCAN_SCAN_ABORT,
  36	SCAN_PAGE_COUNT,
  37	SCAN_PAGE_LRU,
  38	SCAN_PAGE_LOCK,
  39	SCAN_PAGE_ANON,
  40	SCAN_PAGE_COMPOUND,
  41	SCAN_ANY_PROCESS,
  42	SCAN_VMA_NULL,
  43	SCAN_VMA_CHECK,
  44	SCAN_ADDRESS_RANGE,
  45	SCAN_SWAP_CACHE_PAGE,
  46	SCAN_DEL_PAGE_LRU,
  47	SCAN_ALLOC_HUGE_PAGE_FAIL,
  48	SCAN_CGROUP_CHARGE_FAIL,
  49	SCAN_EXCEED_SWAP_PTE,
  50	SCAN_TRUNCATED,
  51};
  52
  53#define CREATE_TRACE_POINTS
  54#include <trace/events/huge_memory.h>
  55
  56/* default scan 8*512 pte (or vmas) every 30 second */
  57static unsigned int khugepaged_pages_to_scan __read_mostly;
  58static unsigned int khugepaged_pages_collapsed;
  59static unsigned int khugepaged_full_scans;
  60static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  61/* during fragmentation poll the hugepage allocator once every minute */
  62static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  63static unsigned long khugepaged_sleep_expire;
  64static DEFINE_SPINLOCK(khugepaged_mm_lock);
  65static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  66/*
  67 * default collapse hugepages if there is at least one pte mapped like
  68 * it would have happened if the vma was large enough during page
  69 * fault.
  70 */
  71static unsigned int khugepaged_max_ptes_none __read_mostly;
  72static unsigned int khugepaged_max_ptes_swap __read_mostly;
  73
  74#define MM_SLOTS_HASH_BITS 10
  75static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  76
  77static struct kmem_cache *mm_slot_cache __read_mostly;
  78
  79/**
  80 * struct mm_slot - hash lookup from mm to mm_slot
  81 * @hash: hash collision list
  82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  83 * @mm: the mm that this information is valid for
  84 */
  85struct mm_slot {
  86	struct hlist_node hash;
  87	struct list_head mm_node;
  88	struct mm_struct *mm;
  89};
  90
  91/**
  92 * struct khugepaged_scan - cursor for scanning
  93 * @mm_head: the head of the mm list to scan
  94 * @mm_slot: the current mm_slot we are scanning
  95 * @address: the next address inside that to be scanned
  96 *
  97 * There is only the one khugepaged_scan instance of this cursor structure.
  98 */
  99struct khugepaged_scan {
 100	struct list_head mm_head;
 101	struct mm_slot *mm_slot;
 102	unsigned long address;
 103};
 104
 105static struct khugepaged_scan khugepaged_scan = {
 106	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 107};
 108
 109#ifdef CONFIG_SYSFS
 110static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 111					 struct kobj_attribute *attr,
 112					 char *buf)
 113{
 114	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 115}
 116
 117static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 118					  struct kobj_attribute *attr,
 119					  const char *buf, size_t count)
 120{
 121	unsigned long msecs;
 122	int err;
 123
 124	err = kstrtoul(buf, 10, &msecs);
 125	if (err || msecs > UINT_MAX)
 126		return -EINVAL;
 127
 128	khugepaged_scan_sleep_millisecs = msecs;
 129	khugepaged_sleep_expire = 0;
 130	wake_up_interruptible(&khugepaged_wait);
 131
 132	return count;
 133}
 134static struct kobj_attribute scan_sleep_millisecs_attr =
 135	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 136	       scan_sleep_millisecs_store);
 137
 138static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 139					  struct kobj_attribute *attr,
 140					  char *buf)
 141{
 142	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 143}
 144
 145static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 146					   struct kobj_attribute *attr,
 147					   const char *buf, size_t count)
 148{
 149	unsigned long msecs;
 150	int err;
 151
 152	err = kstrtoul(buf, 10, &msecs);
 153	if (err || msecs > UINT_MAX)
 154		return -EINVAL;
 155
 156	khugepaged_alloc_sleep_millisecs = msecs;
 157	khugepaged_sleep_expire = 0;
 158	wake_up_interruptible(&khugepaged_wait);
 159
 160	return count;
 161}
 162static struct kobj_attribute alloc_sleep_millisecs_attr =
 163	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 164	       alloc_sleep_millisecs_store);
 165
 166static ssize_t pages_to_scan_show(struct kobject *kobj,
 167				  struct kobj_attribute *attr,
 168				  char *buf)
 169{
 170	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 171}
 172static ssize_t pages_to_scan_store(struct kobject *kobj,
 173				   struct kobj_attribute *attr,
 174				   const char *buf, size_t count)
 175{
 176	int err;
 177	unsigned long pages;
 178
 179	err = kstrtoul(buf, 10, &pages);
 180	if (err || !pages || pages > UINT_MAX)
 181		return -EINVAL;
 182
 183	khugepaged_pages_to_scan = pages;
 184
 185	return count;
 186}
 187static struct kobj_attribute pages_to_scan_attr =
 188	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
 189	       pages_to_scan_store);
 190
 191static ssize_t pages_collapsed_show(struct kobject *kobj,
 192				    struct kobj_attribute *attr,
 193				    char *buf)
 194{
 195	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 196}
 197static struct kobj_attribute pages_collapsed_attr =
 198	__ATTR_RO(pages_collapsed);
 199
 200static ssize_t full_scans_show(struct kobject *kobj,
 201			       struct kobj_attribute *attr,
 202			       char *buf)
 203{
 204	return sprintf(buf, "%u\n", khugepaged_full_scans);
 205}
 206static struct kobj_attribute full_scans_attr =
 207	__ATTR_RO(full_scans);
 208
 209static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 210				      struct kobj_attribute *attr, char *buf)
 211{
 212	return single_hugepage_flag_show(kobj, attr, buf,
 213				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 214}
 215static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 216				       struct kobj_attribute *attr,
 217				       const char *buf, size_t count)
 218{
 219	return single_hugepage_flag_store(kobj, attr, buf, count,
 220				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 221}
 222static struct kobj_attribute khugepaged_defrag_attr =
 223	__ATTR(defrag, 0644, khugepaged_defrag_show,
 224	       khugepaged_defrag_store);
 225
 226/*
 227 * max_ptes_none controls if khugepaged should collapse hugepages over
 228 * any unmapped ptes in turn potentially increasing the memory
 229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 230 * reduce the available free memory in the system as it
 231 * runs. Increasing max_ptes_none will instead potentially reduce the
 232 * free memory in the system during the khugepaged scan.
 233 */
 234static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 235					     struct kobj_attribute *attr,
 236					     char *buf)
 237{
 238	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 239}
 240static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 241					      struct kobj_attribute *attr,
 242					      const char *buf, size_t count)
 243{
 244	int err;
 245	unsigned long max_ptes_none;
 246
 247	err = kstrtoul(buf, 10, &max_ptes_none);
 248	if (err || max_ptes_none > HPAGE_PMD_NR-1)
 249		return -EINVAL;
 250
 251	khugepaged_max_ptes_none = max_ptes_none;
 252
 253	return count;
 254}
 255static struct kobj_attribute khugepaged_max_ptes_none_attr =
 256	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 257	       khugepaged_max_ptes_none_store);
 258
 259static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
 260					     struct kobj_attribute *attr,
 261					     char *buf)
 262{
 263	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
 264}
 265
 266static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
 267					      struct kobj_attribute *attr,
 268					      const char *buf, size_t count)
 269{
 270	int err;
 271	unsigned long max_ptes_swap;
 272
 273	err  = kstrtoul(buf, 10, &max_ptes_swap);
 274	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
 275		return -EINVAL;
 276
 277	khugepaged_max_ptes_swap = max_ptes_swap;
 278
 279	return count;
 280}
 281
 282static struct kobj_attribute khugepaged_max_ptes_swap_attr =
 283	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
 284	       khugepaged_max_ptes_swap_store);
 285
 286static struct attribute *khugepaged_attr[] = {
 287	&khugepaged_defrag_attr.attr,
 288	&khugepaged_max_ptes_none_attr.attr,
 289	&pages_to_scan_attr.attr,
 290	&pages_collapsed_attr.attr,
 291	&full_scans_attr.attr,
 292	&scan_sleep_millisecs_attr.attr,
 293	&alloc_sleep_millisecs_attr.attr,
 294	&khugepaged_max_ptes_swap_attr.attr,
 295	NULL,
 296};
 297
 298struct attribute_group khugepaged_attr_group = {
 299	.attrs = khugepaged_attr,
 300	.name = "khugepaged",
 301};
 302#endif /* CONFIG_SYSFS */
 303
 304#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
 305
 306int hugepage_madvise(struct vm_area_struct *vma,
 307		     unsigned long *vm_flags, int advice)
 308{
 309	switch (advice) {
 310	case MADV_HUGEPAGE:
 311#ifdef CONFIG_S390
 312		/*
 313		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
 314		 * can't handle this properly after s390_enable_sie, so we simply
 315		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
 316		 */
 317		if (mm_has_pgste(vma->vm_mm))
 318			return 0;
 319#endif
 320		*vm_flags &= ~VM_NOHUGEPAGE;
 321		*vm_flags |= VM_HUGEPAGE;
 322		/*
 323		 * If the vma become good for khugepaged to scan,
 324		 * register it here without waiting a page fault that
 325		 * may not happen any time soon.
 326		 */
 327		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
 328				khugepaged_enter_vma_merge(vma, *vm_flags))
 329			return -ENOMEM;
 330		break;
 331	case MADV_NOHUGEPAGE:
 332		*vm_flags &= ~VM_HUGEPAGE;
 333		*vm_flags |= VM_NOHUGEPAGE;
 334		/*
 335		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
 336		 * this vma even if we leave the mm registered in khugepaged if
 337		 * it got registered before VM_NOHUGEPAGE was set.
 338		 */
 339		break;
 340	}
 341
 342	return 0;
 343}
 344
 345int __init khugepaged_init(void)
 346{
 347	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
 348					  sizeof(struct mm_slot),
 349					  __alignof__(struct mm_slot), 0, NULL);
 350	if (!mm_slot_cache)
 351		return -ENOMEM;
 352
 353	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
 354	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
 355	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
 356
 357	return 0;
 358}
 359
 360void __init khugepaged_destroy(void)
 361{
 362	kmem_cache_destroy(mm_slot_cache);
 363}
 364
 365static inline struct mm_slot *alloc_mm_slot(void)
 366{
 367	if (!mm_slot_cache)	/* initialization failed */
 368		return NULL;
 369	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
 370}
 371
 372static inline void free_mm_slot(struct mm_slot *mm_slot)
 373{
 374	kmem_cache_free(mm_slot_cache, mm_slot);
 375}
 376
 377static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 378{
 379	struct mm_slot *mm_slot;
 380
 381	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
 382		if (mm == mm_slot->mm)
 383			return mm_slot;
 384
 385	return NULL;
 386}
 387
 388static void insert_to_mm_slots_hash(struct mm_struct *mm,
 389				    struct mm_slot *mm_slot)
 390{
 391	mm_slot->mm = mm;
 392	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 393}
 394
 395static inline int khugepaged_test_exit(struct mm_struct *mm)
 396{
 397	return atomic_read(&mm->mm_users) == 0;
 398}
 399
 400int __khugepaged_enter(struct mm_struct *mm)
 401{
 402	struct mm_slot *mm_slot;
 403	int wakeup;
 404
 405	mm_slot = alloc_mm_slot();
 406	if (!mm_slot)
 407		return -ENOMEM;
 408
 409	/* __khugepaged_exit() must not run from under us */
 410	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
 411	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
 412		free_mm_slot(mm_slot);
 413		return 0;
 414	}
 415
 416	spin_lock(&khugepaged_mm_lock);
 417	insert_to_mm_slots_hash(mm, mm_slot);
 418	/*
 419	 * Insert just behind the scanning cursor, to let the area settle
 420	 * down a little.
 421	 */
 422	wakeup = list_empty(&khugepaged_scan.mm_head);
 423	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
 424	spin_unlock(&khugepaged_mm_lock);
 425
 426	mmgrab(mm);
 427	if (wakeup)
 428		wake_up_interruptible(&khugepaged_wait);
 429
 430	return 0;
 431}
 432
 433int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
 434			       unsigned long vm_flags)
 435{
 436	unsigned long hstart, hend;
 437	if (!vma->anon_vma)
 438		/*
 439		 * Not yet faulted in so we will register later in the
 440		 * page fault if needed.
 441		 */
 442		return 0;
 443	if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
 444		/* khugepaged not yet working on file or special mappings */
 445		return 0;
 446	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 447	hend = vma->vm_end & HPAGE_PMD_MASK;
 448	if (hstart < hend)
 449		return khugepaged_enter(vma, vm_flags);
 450	return 0;
 451}
 452
 453void __khugepaged_exit(struct mm_struct *mm)
 454{
 455	struct mm_slot *mm_slot;
 456	int free = 0;
 457
 458	spin_lock(&khugepaged_mm_lock);
 459	mm_slot = get_mm_slot(mm);
 460	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
 461		hash_del(&mm_slot->hash);
 462		list_del(&mm_slot->mm_node);
 463		free = 1;
 464	}
 465	spin_unlock(&khugepaged_mm_lock);
 466
 467	if (free) {
 468		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
 469		free_mm_slot(mm_slot);
 470		mmdrop(mm);
 471	} else if (mm_slot) {
 472		/*
 473		 * This is required to serialize against
 474		 * khugepaged_test_exit() (which is guaranteed to run
 475		 * under mmap sem read mode). Stop here (after we
 476		 * return all pagetables will be destroyed) until
 477		 * khugepaged has finished working on the pagetables
 478		 * under the mmap_sem.
 479		 */
 480		down_write(&mm->mmap_sem);
 481		up_write(&mm->mmap_sem);
 482	}
 483}
 484
 485static void release_pte_page(struct page *page)
 486{
 487	dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
 488	unlock_page(page);
 489	putback_lru_page(page);
 490}
 491
 492static void release_pte_pages(pte_t *pte, pte_t *_pte)
 493{
 494	while (--_pte >= pte) {
 495		pte_t pteval = *_pte;
 496		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
 497			release_pte_page(pte_page(pteval));
 498	}
 499}
 500
 501static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 502					unsigned long address,
 503					pte_t *pte)
 504{
 505	struct page *page = NULL;
 506	pte_t *_pte;
 507	int none_or_zero = 0, result = 0, referenced = 0;
 508	bool writable = false;
 509
 510	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
 511	     _pte++, address += PAGE_SIZE) {
 512		pte_t pteval = *_pte;
 513		if (pte_none(pteval) || (pte_present(pteval) &&
 514				is_zero_pfn(pte_pfn(pteval)))) {
 515			if (!userfaultfd_armed(vma) &&
 516			    ++none_or_zero <= khugepaged_max_ptes_none) {
 517				continue;
 518			} else {
 519				result = SCAN_EXCEED_NONE_PTE;
 520				goto out;
 521			}
 522		}
 523		if (!pte_present(pteval)) {
 524			result = SCAN_PTE_NON_PRESENT;
 525			goto out;
 526		}
 527		page = vm_normal_page(vma, address, pteval);
 528		if (unlikely(!page)) {
 529			result = SCAN_PAGE_NULL;
 530			goto out;
 531		}
 532
 533		/* TODO: teach khugepaged to collapse THP mapped with pte */
 534		if (PageCompound(page)) {
 535			result = SCAN_PAGE_COMPOUND;
 536			goto out;
 537		}
 538
 539		VM_BUG_ON_PAGE(!PageAnon(page), page);
 540
 541		/*
 542		 * We can do it before isolate_lru_page because the
 543		 * page can't be freed from under us. NOTE: PG_lock
 544		 * is needed to serialize against split_huge_page
 545		 * when invoked from the VM.
 546		 */
 547		if (!trylock_page(page)) {
 548			result = SCAN_PAGE_LOCK;
 549			goto out;
 550		}
 551
 552		/*
 553		 * cannot use mapcount: can't collapse if there's a gup pin.
 554		 * The page must only be referenced by the scanned process
 555		 * and page swap cache.
 556		 */
 557		if (page_count(page) != 1 + PageSwapCache(page)) {
 558			unlock_page(page);
 559			result = SCAN_PAGE_COUNT;
 560			goto out;
 561		}
 562		if (pte_write(pteval)) {
 563			writable = true;
 564		} else {
 565			if (PageSwapCache(page) &&
 566			    !reuse_swap_page(page, NULL)) {
 567				unlock_page(page);
 568				result = SCAN_SWAP_CACHE_PAGE;
 569				goto out;
 570			}
 571			/*
 572			 * Page is not in the swap cache. It can be collapsed
 573			 * into a THP.
 574			 */
 575		}
 576
 577		/*
 578		 * Isolate the page to avoid collapsing an hugepage
 579		 * currently in use by the VM.
 580		 */
 581		if (isolate_lru_page(page)) {
 582			unlock_page(page);
 583			result = SCAN_DEL_PAGE_LRU;
 584			goto out;
 585		}
 586		inc_node_page_state(page,
 587				NR_ISOLATED_ANON + page_is_file_cache(page));
 588		VM_BUG_ON_PAGE(!PageLocked(page), page);
 589		VM_BUG_ON_PAGE(PageLRU(page), page);
 590
 591		/* There should be enough young pte to collapse the page */
 592		if (pte_young(pteval) ||
 593		    page_is_young(page) || PageReferenced(page) ||
 594		    mmu_notifier_test_young(vma->vm_mm, address))
 595			referenced++;
 596	}
 597	if (likely(writable)) {
 598		if (likely(referenced)) {
 599			result = SCAN_SUCCEED;
 600			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 601							    referenced, writable, result);
 602			return 1;
 603		}
 604	} else {
 605		result = SCAN_PAGE_RO;
 606	}
 607
 608out:
 609	release_pte_pages(pte, _pte);
 610	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 611					    referenced, writable, result);
 612	return 0;
 613}
 614
 615static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 616				      struct vm_area_struct *vma,
 617				      unsigned long address,
 618				      spinlock_t *ptl)
 619{
 620	pte_t *_pte;
 621	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
 622				_pte++, page++, address += PAGE_SIZE) {
 623		pte_t pteval = *_pte;
 624		struct page *src_page;
 625
 626		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
 627			clear_user_highpage(page, address);
 628			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
 629			if (is_zero_pfn(pte_pfn(pteval))) {
 630				/*
 631				 * ptl mostly unnecessary.
 632				 */
 633				spin_lock(ptl);
 634				/*
 635				 * paravirt calls inside pte_clear here are
 636				 * superfluous.
 637				 */
 638				pte_clear(vma->vm_mm, address, _pte);
 639				spin_unlock(ptl);
 640			}
 641		} else {
 642			src_page = pte_page(pteval);
 643			copy_user_highpage(page, src_page, address, vma);
 644			VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
 645			release_pte_page(src_page);
 646			/*
 647			 * ptl mostly unnecessary, but preempt has to
 648			 * be disabled to update the per-cpu stats
 649			 * inside page_remove_rmap().
 650			 */
 651			spin_lock(ptl);
 652			/*
 653			 * paravirt calls inside pte_clear here are
 654			 * superfluous.
 655			 */
 656			pte_clear(vma->vm_mm, address, _pte);
 657			page_remove_rmap(src_page, false);
 658			spin_unlock(ptl);
 659			free_page_and_swap_cache(src_page);
 660		}
 661	}
 662}
 663
 664static void khugepaged_alloc_sleep(void)
 665{
 666	DEFINE_WAIT(wait);
 667
 668	add_wait_queue(&khugepaged_wait, &wait);
 669	freezable_schedule_timeout_interruptible(
 670		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 671	remove_wait_queue(&khugepaged_wait, &wait);
 672}
 673
 674static int khugepaged_node_load[MAX_NUMNODES];
 675
 676static bool khugepaged_scan_abort(int nid)
 677{
 678	int i;
 679
 680	/*
 681	 * If node_reclaim_mode is disabled, then no extra effort is made to
 682	 * allocate memory locally.
 683	 */
 684	if (!node_reclaim_mode)
 685		return false;
 686
 687	/* If there is a count for this node already, it must be acceptable */
 688	if (khugepaged_node_load[nid])
 689		return false;
 690
 691	for (i = 0; i < MAX_NUMNODES; i++) {
 692		if (!khugepaged_node_load[i])
 693			continue;
 694		if (node_distance(nid, i) > RECLAIM_DISTANCE)
 695			return true;
 696	}
 697	return false;
 698}
 699
 700/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
 701static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
 702{
 703	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
 704}
 705
 706#ifdef CONFIG_NUMA
 707static int khugepaged_find_target_node(void)
 708{
 709	static int last_khugepaged_target_node = NUMA_NO_NODE;
 710	int nid, target_node = 0, max_value = 0;
 711
 712	/* find first node with max normal pages hit */
 713	for (nid = 0; nid < MAX_NUMNODES; nid++)
 714		if (khugepaged_node_load[nid] > max_value) {
 715			max_value = khugepaged_node_load[nid];
 716			target_node = nid;
 717		}
 718
 719	/* do some balance if several nodes have the same hit record */
 720	if (target_node <= last_khugepaged_target_node)
 721		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
 722				nid++)
 723			if (max_value == khugepaged_node_load[nid]) {
 724				target_node = nid;
 725				break;
 726			}
 727
 728	last_khugepaged_target_node = target_node;
 729	return target_node;
 730}
 731
 732static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 733{
 734	if (IS_ERR(*hpage)) {
 735		if (!*wait)
 736			return false;
 737
 738		*wait = false;
 739		*hpage = NULL;
 740		khugepaged_alloc_sleep();
 741	} else if (*hpage) {
 742		put_page(*hpage);
 743		*hpage = NULL;
 744	}
 745
 746	return true;
 747}
 748
 749static struct page *
 750khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 751{
 752	VM_BUG_ON_PAGE(*hpage, *hpage);
 753
 754	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
 755	if (unlikely(!*hpage)) {
 756		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 757		*hpage = ERR_PTR(-ENOMEM);
 758		return NULL;
 759	}
 760
 761	prep_transhuge_page(*hpage);
 762	count_vm_event(THP_COLLAPSE_ALLOC);
 763	return *hpage;
 764}
 765#else
 766static int khugepaged_find_target_node(void)
 767{
 768	return 0;
 769}
 770
 771static inline struct page *alloc_khugepaged_hugepage(void)
 772{
 773	struct page *page;
 774
 775	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
 776			   HPAGE_PMD_ORDER);
 777	if (page)
 778		prep_transhuge_page(page);
 779	return page;
 780}
 781
 782static struct page *khugepaged_alloc_hugepage(bool *wait)
 783{
 784	struct page *hpage;
 785
 786	do {
 787		hpage = alloc_khugepaged_hugepage();
 788		if (!hpage) {
 789			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 790			if (!*wait)
 791				return NULL;
 792
 793			*wait = false;
 794			khugepaged_alloc_sleep();
 795		} else
 796			count_vm_event(THP_COLLAPSE_ALLOC);
 797	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
 798
 799	return hpage;
 800}
 801
 802static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 803{
 804	if (!*hpage)
 805		*hpage = khugepaged_alloc_hugepage(wait);
 806
 807	if (unlikely(!*hpage))
 808		return false;
 809
 810	return true;
 811}
 812
 813static struct page *
 814khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 815{
 816	VM_BUG_ON(!*hpage);
 817
 818	return  *hpage;
 819}
 820#endif
 821
 822static bool hugepage_vma_check(struct vm_area_struct *vma)
 823{
 824	if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
 825	    (vma->vm_flags & VM_NOHUGEPAGE) ||
 826	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
 827		return false;
 828	if (shmem_file(vma->vm_file)) {
 829		if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
 830			return false;
 831		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
 832				HPAGE_PMD_NR);
 833	}
 834	if (!vma->anon_vma || vma->vm_ops)
 835		return false;
 836	if (is_vma_temporary_stack(vma))
 837		return false;
 838	return !(vma->vm_flags & VM_NO_KHUGEPAGED);
 839}
 840
 841/*
 842 * If mmap_sem temporarily dropped, revalidate vma
 843 * before taking mmap_sem.
 844 * Return 0 if succeeds, otherwise return none-zero
 845 * value (scan code).
 846 */
 847
 848static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 849		struct vm_area_struct **vmap)
 850{
 851	struct vm_area_struct *vma;
 852	unsigned long hstart, hend;
 853
 854	if (unlikely(khugepaged_test_exit(mm)))
 855		return SCAN_ANY_PROCESS;
 856
 857	*vmap = vma = find_vma(mm, address);
 858	if (!vma)
 859		return SCAN_VMA_NULL;
 860
 861	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 862	hend = vma->vm_end & HPAGE_PMD_MASK;
 863	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
 864		return SCAN_ADDRESS_RANGE;
 865	if (!hugepage_vma_check(vma))
 866		return SCAN_VMA_CHECK;
 867	return 0;
 868}
 869
 870/*
 871 * Bring missing pages in from swap, to complete THP collapse.
 872 * Only done if khugepaged_scan_pmd believes it is worthwhile.
 873 *
 874 * Called and returns without pte mapped or spinlocks held,
 875 * but with mmap_sem held to protect against vma changes.
 876 */
 877
 878static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 879					struct vm_area_struct *vma,
 880					unsigned long address, pmd_t *pmd,
 881					int referenced)
 882{
 883	int swapped_in = 0, ret = 0;
 884	struct vm_fault vmf = {
 885		.vma = vma,
 886		.address = address,
 887		.flags = FAULT_FLAG_ALLOW_RETRY,
 888		.pmd = pmd,
 889		.pgoff = linear_page_index(vma, address),
 890	};
 891
 892	/* we only decide to swapin, if there is enough young ptes */
 893	if (referenced < HPAGE_PMD_NR/2) {
 894		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 895		return false;
 896	}
 897	vmf.pte = pte_offset_map(pmd, address);
 898	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
 899			vmf.pte++, vmf.address += PAGE_SIZE) {
 900		vmf.orig_pte = *vmf.pte;
 901		if (!is_swap_pte(vmf.orig_pte))
 902			continue;
 903		swapped_in++;
 904		ret = do_swap_page(&vmf);
 905
 906		/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
 907		if (ret & VM_FAULT_RETRY) {
 908			down_read(&mm->mmap_sem);
 909			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
 910				/* vma is no longer available, don't continue to swapin */
 911				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 912				return false;
 913			}
 914			/* check if the pmd is still valid */
 915			if (mm_find_pmd(mm, address) != pmd) {
 916				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 917				return false;
 918			}
 919		}
 920		if (ret & VM_FAULT_ERROR) {
 921			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 922			return false;
 923		}
 924		/* pte is unmapped now, we need to map it */
 925		vmf.pte = pte_offset_map(pmd, vmf.address);
 926	}
 927	vmf.pte--;
 928	pte_unmap(vmf.pte);
 929	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
 930	return true;
 931}
 932
 933static void collapse_huge_page(struct mm_struct *mm,
 934				   unsigned long address,
 935				   struct page **hpage,
 936				   int node, int referenced)
 937{
 938	pmd_t *pmd, _pmd;
 939	pte_t *pte;
 940	pgtable_t pgtable;
 941	struct page *new_page;
 942	spinlock_t *pmd_ptl, *pte_ptl;
 943	int isolated = 0, result = 0;
 944	struct mem_cgroup *memcg;
 945	struct vm_area_struct *vma;
 946	unsigned long mmun_start;	/* For mmu_notifiers */
 947	unsigned long mmun_end;		/* For mmu_notifiers */
 948	gfp_t gfp;
 949
 950	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 951
 952	/* Only allocate from the target node */
 953	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 954
 955	/*
 956	 * Before allocating the hugepage, release the mmap_sem read lock.
 957	 * The allocation can take potentially a long time if it involves
 958	 * sync compaction, and we do not need to hold the mmap_sem during
 959	 * that. We will recheck the vma after taking it again in write mode.
 960	 */
 961	up_read(&mm->mmap_sem);
 962	new_page = khugepaged_alloc_page(hpage, gfp, node);
 963	if (!new_page) {
 964		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
 965		goto out_nolock;
 966	}
 967
 968	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
 969		result = SCAN_CGROUP_CHARGE_FAIL;
 970		goto out_nolock;
 971	}
 972
 973	down_read(&mm->mmap_sem);
 974	result = hugepage_vma_revalidate(mm, address, &vma);
 975	if (result) {
 976		mem_cgroup_cancel_charge(new_page, memcg, true);
 977		up_read(&mm->mmap_sem);
 978		goto out_nolock;
 979	}
 980
 981	pmd = mm_find_pmd(mm, address);
 982	if (!pmd) {
 983		result = SCAN_PMD_NULL;
 984		mem_cgroup_cancel_charge(new_page, memcg, true);
 985		up_read(&mm->mmap_sem);
 986		goto out_nolock;
 987	}
 988
 989	/*
 990	 * __collapse_huge_page_swapin always returns with mmap_sem locked.
 991	 * If it fails, we release mmap_sem and jump out_nolock.
 992	 * Continuing to collapse causes inconsistency.
 993	 */
 994	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
 995		mem_cgroup_cancel_charge(new_page, memcg, true);
 996		up_read(&mm->mmap_sem);
 997		goto out_nolock;
 998	}
 999
1000	up_read(&mm->mmap_sem);
1001	/*
1002	 * Prevent all access to pagetables with the exception of
1003	 * gup_fast later handled by the ptep_clear_flush and the VM
1004	 * handled by the anon_vma lock + PG_lock.
1005	 */
1006	down_write(&mm->mmap_sem);
1007	result = hugepage_vma_revalidate(mm, address, &vma);
1008	if (result)
1009		goto out;
1010	/* check if the pmd is still valid */
1011	if (mm_find_pmd(mm, address) != pmd)
1012		goto out;
1013
1014	anon_vma_lock_write(vma->anon_vma);
1015
1016	pte = pte_offset_map(pmd, address);
1017	pte_ptl = pte_lockptr(mm, pmd);
1018
1019	mmun_start = address;
1020	mmun_end   = address + HPAGE_PMD_SIZE;
1021	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1022	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1023	/*
1024	 * After this gup_fast can't run anymore. This also removes
1025	 * any huge TLB entry from the CPU so we won't allow
1026	 * huge and small TLB entries for the same virtual address
1027	 * to avoid the risk of CPU bugs in that area.
1028	 */
1029	_pmd = pmdp_collapse_flush(vma, address, pmd);
1030	spin_unlock(pmd_ptl);
1031	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1032
1033	spin_lock(pte_ptl);
1034	isolated = __collapse_huge_page_isolate(vma, address, pte);
1035	spin_unlock(pte_ptl);
1036
1037	if (unlikely(!isolated)) {
1038		pte_unmap(pte);
1039		spin_lock(pmd_ptl);
1040		BUG_ON(!pmd_none(*pmd));
1041		/*
1042		 * We can only use set_pmd_at when establishing
1043		 * hugepmds and never for establishing regular pmds that
1044		 * points to regular pagetables. Use pmd_populate for that
1045		 */
1046		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1047		spin_unlock(pmd_ptl);
1048		anon_vma_unlock_write(vma->anon_vma);
1049		result = SCAN_FAIL;
1050		goto out;
1051	}
1052
1053	/*
1054	 * All pages are isolated and locked so anon_vma rmap
1055	 * can't run anymore.
1056	 */
1057	anon_vma_unlock_write(vma->anon_vma);
1058
1059	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1060	pte_unmap(pte);
1061	__SetPageUptodate(new_page);
1062	pgtable = pmd_pgtable(_pmd);
1063
1064	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1065	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1066
1067	/*
1068	 * spin_lock() below is not the equivalent of smp_wmb(), so
1069	 * this is needed to avoid the copy_huge_page writes to become
1070	 * visible after the set_pmd_at() write.
1071	 */
1072	smp_wmb();
1073
1074	spin_lock(pmd_ptl);
1075	BUG_ON(!pmd_none(*pmd));
1076	page_add_new_anon_rmap(new_page, vma, address, true);
1077	mem_cgroup_commit_charge(new_page, memcg, false, true);
1078	lru_cache_add_active_or_unevictable(new_page, vma);
1079	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1080	set_pmd_at(mm, address, pmd, _pmd);
1081	update_mmu_cache_pmd(vma, address, pmd);
1082	spin_unlock(pmd_ptl);
1083
1084	*hpage = NULL;
1085
1086	khugepaged_pages_collapsed++;
1087	result = SCAN_SUCCEED;
1088out_up_write:
1089	up_write(&mm->mmap_sem);
1090out_nolock:
1091	trace_mm_collapse_huge_page(mm, isolated, result);
1092	return;
1093out:
1094	mem_cgroup_cancel_charge(new_page, memcg, true);
1095	goto out_up_write;
1096}
1097
1098static int khugepaged_scan_pmd(struct mm_struct *mm,
1099			       struct vm_area_struct *vma,
1100			       unsigned long address,
1101			       struct page **hpage)
1102{
1103	pmd_t *pmd;
1104	pte_t *pte, *_pte;
1105	int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1106	struct page *page = NULL;
1107	unsigned long _address;
1108	spinlock_t *ptl;
1109	int node = NUMA_NO_NODE, unmapped = 0;
1110	bool writable = false;
1111
1112	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1113
1114	pmd = mm_find_pmd(mm, address);
1115	if (!pmd) {
1116		result = SCAN_PMD_NULL;
1117		goto out;
1118	}
1119
1120	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1121	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1122	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1123	     _pte++, _address += PAGE_SIZE) {
1124		pte_t pteval = *_pte;
1125		if (is_swap_pte(pteval)) {
1126			if (++unmapped <= khugepaged_max_ptes_swap) {
1127				continue;
1128			} else {
1129				result = SCAN_EXCEED_SWAP_PTE;
1130				goto out_unmap;
1131			}
1132		}
1133		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1134			if (!userfaultfd_armed(vma) &&
1135			    ++none_or_zero <= khugepaged_max_ptes_none) {
1136				continue;
1137			} else {
1138				result = SCAN_EXCEED_NONE_PTE;
1139				goto out_unmap;
1140			}
1141		}
1142		if (!pte_present(pteval)) {
1143			result = SCAN_PTE_NON_PRESENT;
1144			goto out_unmap;
1145		}
1146		if (pte_write(pteval))
1147			writable = true;
1148
1149		page = vm_normal_page(vma, _address, pteval);
1150		if (unlikely(!page)) {
1151			result = SCAN_PAGE_NULL;
1152			goto out_unmap;
1153		}
1154
1155		/* TODO: teach khugepaged to collapse THP mapped with pte */
1156		if (PageCompound(page)) {
1157			result = SCAN_PAGE_COMPOUND;
1158			goto out_unmap;
1159		}
1160
1161		/*
1162		 * Record which node the original page is from and save this
1163		 * information to khugepaged_node_load[].
1164		 * Khupaged will allocate hugepage from the node has the max
1165		 * hit record.
1166		 */
1167		node = page_to_nid(page);
1168		if (khugepaged_scan_abort(node)) {
1169			result = SCAN_SCAN_ABORT;
1170			goto out_unmap;
1171		}
1172		khugepaged_node_load[node]++;
1173		if (!PageLRU(page)) {
1174			result = SCAN_PAGE_LRU;
1175			goto out_unmap;
1176		}
1177		if (PageLocked(page)) {
1178			result = SCAN_PAGE_LOCK;
1179			goto out_unmap;
1180		}
1181		if (!PageAnon(page)) {
1182			result = SCAN_PAGE_ANON;
1183			goto out_unmap;
1184		}
1185
1186		/*
1187		 * cannot use mapcount: can't collapse if there's a gup pin.
1188		 * The page must only be referenced by the scanned process
1189		 * and page swap cache.
1190		 */
1191		if (page_count(page) != 1 + PageSwapCache(page)) {
1192			result = SCAN_PAGE_COUNT;
1193			goto out_unmap;
1194		}
1195		if (pte_young(pteval) ||
1196		    page_is_young(page) || PageReferenced(page) ||
1197		    mmu_notifier_test_young(vma->vm_mm, address))
1198			referenced++;
1199	}
1200	if (writable) {
1201		if (referenced) {
1202			result = SCAN_SUCCEED;
1203			ret = 1;
1204		} else {
1205			result = SCAN_LACK_REFERENCED_PAGE;
1206		}
1207	} else {
1208		result = SCAN_PAGE_RO;
1209	}
1210out_unmap:
1211	pte_unmap_unlock(pte, ptl);
1212	if (ret) {
1213		node = khugepaged_find_target_node();
1214		/* collapse_huge_page will return with the mmap_sem released */
1215		collapse_huge_page(mm, address, hpage, node, referenced);
1216	}
1217out:
1218	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1219				     none_or_zero, result, unmapped);
1220	return ret;
1221}
1222
1223static void collect_mm_slot(struct mm_slot *mm_slot)
1224{
1225	struct mm_struct *mm = mm_slot->mm;
1226
1227	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1228
1229	if (khugepaged_test_exit(mm)) {
1230		/* free mm_slot */
1231		hash_del(&mm_slot->hash);
1232		list_del(&mm_slot->mm_node);
1233
1234		/*
1235		 * Not strictly needed because the mm exited already.
1236		 *
1237		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1238		 */
1239
1240		/* khugepaged_mm_lock actually not necessary for the below */
1241		free_mm_slot(mm_slot);
1242		mmdrop(mm);
1243	}
1244}
1245
1246#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1247static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1248{
1249	struct vm_area_struct *vma;
1250	unsigned long addr;
1251	pmd_t *pmd, _pmd;
1252
1253	i_mmap_lock_write(mapping);
1254	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1255		/* probably overkill */
1256		if (vma->anon_vma)
1257			continue;
1258		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1259		if (addr & ~HPAGE_PMD_MASK)
1260			continue;
1261		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1262			continue;
1263		pmd = mm_find_pmd(vma->vm_mm, addr);
1264		if (!pmd)
1265			continue;
1266		/*
1267		 * We need exclusive mmap_sem to retract page table.
1268		 * If trylock fails we would end up with pte-mapped THP after
1269		 * re-fault. Not ideal, but it's more important to not disturb
1270		 * the system too much.
1271		 */
1272		if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1273			spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1274			/* assume page table is clear */
1275			_pmd = pmdp_collapse_flush(vma, addr, pmd);
1276			spin_unlock(ptl);
1277			up_write(&vma->vm_mm->mmap_sem);
1278			mm_dec_nr_ptes(vma->vm_mm);
1279			pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1280		}
1281	}
1282	i_mmap_unlock_write(mapping);
1283}
1284
1285/**
1286 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1287 *
1288 * Basic scheme is simple, details are more complex:
1289 *  - allocate and freeze a new huge page;
1290 *  - scan over radix tree replacing old pages the new one
1291 *    + swap in pages if necessary;
1292 *    + fill in gaps;
1293 *    + keep old pages around in case if rollback is required;
1294 *  - if replacing succeed:
1295 *    + copy data over;
1296 *    + free old pages;
1297 *    + unfreeze huge page;
1298 *  - if replacing failed;
1299 *    + put all pages back and unfreeze them;
1300 *    + restore gaps in the radix-tree;
1301 *    + free huge page;
1302 */
1303static void collapse_shmem(struct mm_struct *mm,
1304		struct address_space *mapping, pgoff_t start,
1305		struct page **hpage, int node)
1306{
1307	gfp_t gfp;
1308	struct page *page, *new_page, *tmp;
1309	struct mem_cgroup *memcg;
1310	pgoff_t index, end = start + HPAGE_PMD_NR;
1311	LIST_HEAD(pagelist);
1312	struct radix_tree_iter iter;
1313	void **slot;
1314	int nr_none = 0, result = SCAN_SUCCEED;
1315
1316	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1317
1318	/* Only allocate from the target node */
1319	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1320
1321	new_page = khugepaged_alloc_page(hpage, gfp, node);
1322	if (!new_page) {
1323		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1324		goto out;
1325	}
1326
1327	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1328		result = SCAN_CGROUP_CHARGE_FAIL;
1329		goto out;
1330	}
1331
1332	new_page->index = start;
1333	new_page->mapping = mapping;
1334	__SetPageSwapBacked(new_page);
1335	__SetPageLocked(new_page);
1336	BUG_ON(!page_ref_freeze(new_page, 1));
1337
1338
1339	/*
1340	 * At this point the new_page is 'frozen' (page_count() is zero), locked
1341	 * and not up-to-date. It's safe to insert it into radix tree, because
1342	 * nobody would be able to map it or use it in other way until we
1343	 * unfreeze it.
1344	 */
1345
1346	index = start;
1347	xa_lock_irq(&mapping->i_pages);
1348	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1349		int n = min(iter.index, end) - index;
1350
1351		/*
1352		 * Handle holes in the radix tree: charge it from shmem and
1353		 * insert relevant subpage of new_page into the radix-tree.
1354		 */
1355		if (n && !shmem_charge(mapping->host, n)) {
1356			result = SCAN_FAIL;
1357			break;
1358		}
1359		nr_none += n;
1360		for (; index < min(iter.index, end); index++) {
1361			radix_tree_insert(&mapping->i_pages, index,
1362					new_page + (index % HPAGE_PMD_NR));
1363		}
1364
1365		/* We are done. */
1366		if (index >= end)
1367			break;
1368
1369		page = radix_tree_deref_slot_protected(slot,
1370				&mapping->i_pages.xa_lock);
1371		if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1372			xa_unlock_irq(&mapping->i_pages);
1373			/* swap in or instantiate fallocated page */
1374			if (shmem_getpage(mapping->host, index, &page,
1375						SGP_NOHUGE)) {
1376				result = SCAN_FAIL;
1377				goto tree_unlocked;
1378			}
1379			xa_lock_irq(&mapping->i_pages);
1380		} else if (trylock_page(page)) {
1381			get_page(page);
1382		} else {
1383			result = SCAN_PAGE_LOCK;
1384			break;
1385		}
1386
1387		/*
1388		 * The page must be locked, so we can drop the i_pages lock
1389		 * without racing with truncate.
1390		 */
1391		VM_BUG_ON_PAGE(!PageLocked(page), page);
1392		VM_BUG_ON_PAGE(!PageUptodate(page), page);
1393		VM_BUG_ON_PAGE(PageTransCompound(page), page);
1394
1395		if (page_mapping(page) != mapping) {
1396			result = SCAN_TRUNCATED;
1397			goto out_unlock;
1398		}
1399		xa_unlock_irq(&mapping->i_pages);
1400
1401		if (isolate_lru_page(page)) {
1402			result = SCAN_DEL_PAGE_LRU;
1403			goto out_isolate_failed;
1404		}
1405
1406		if (page_mapped(page))
1407			unmap_mapping_pages(mapping, index, 1, false);
1408
1409		xa_lock_irq(&mapping->i_pages);
1410
1411		slot = radix_tree_lookup_slot(&mapping->i_pages, index);
1412		VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1413					&mapping->i_pages.xa_lock), page);
1414		VM_BUG_ON_PAGE(page_mapped(page), page);
1415
1416		/*
1417		 * The page is expected to have page_count() == 3:
1418		 *  - we hold a pin on it;
1419		 *  - one reference from radix tree;
1420		 *  - one from isolate_lru_page;
1421		 */
1422		if (!page_ref_freeze(page, 3)) {
1423			result = SCAN_PAGE_COUNT;
1424			goto out_lru;
1425		}
1426
1427		/*
1428		 * Add the page to the list to be able to undo the collapse if
1429		 * something go wrong.
1430		 */
1431		list_add_tail(&page->lru, &pagelist);
1432
1433		/* Finally, replace with the new page. */
1434		radix_tree_replace_slot(&mapping->i_pages, slot,
1435				new_page + (index % HPAGE_PMD_NR));
1436
1437		slot = radix_tree_iter_resume(slot, &iter);
1438		index++;
1439		continue;
1440out_lru:
1441		xa_unlock_irq(&mapping->i_pages);
1442		putback_lru_page(page);
1443out_isolate_failed:
1444		unlock_page(page);
1445		put_page(page);
1446		goto tree_unlocked;
1447out_unlock:
1448		unlock_page(page);
1449		put_page(page);
1450		break;
1451	}
1452
1453	/*
1454	 * Handle hole in radix tree at the end of the range.
1455	 * This code only triggers if there's nothing in radix tree
1456	 * beyond 'end'.
1457	 */
1458	if (result == SCAN_SUCCEED && index < end) {
1459		int n = end - index;
1460
1461		if (!shmem_charge(mapping->host, n)) {
1462			result = SCAN_FAIL;
1463			goto tree_locked;
1464		}
1465
1466		for (; index < end; index++) {
1467			radix_tree_insert(&mapping->i_pages, index,
1468					new_page + (index % HPAGE_PMD_NR));
1469		}
1470		nr_none += n;
1471	}
1472
1473tree_locked:
1474	xa_unlock_irq(&mapping->i_pages);
1475tree_unlocked:
1476
1477	if (result == SCAN_SUCCEED) {
1478		unsigned long flags;
1479		struct zone *zone = page_zone(new_page);
1480
1481		/*
1482		 * Replacing old pages with new one has succeed, now we need to
1483		 * copy the content and free old pages.
1484		 */
1485		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1486			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1487					page);
1488			list_del(&page->lru);
1489			unlock_page(page);
1490			page_ref_unfreeze(page, 1);
1491			page->mapping = NULL;
1492			ClearPageActive(page);
1493			ClearPageUnevictable(page);
1494			put_page(page);
1495		}
1496
1497		local_irq_save(flags);
1498		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1499		if (nr_none) {
1500			__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1501			__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1502		}
1503		local_irq_restore(flags);
1504
1505		/*
1506		 * Remove pte page tables, so we can re-faulti
1507		 * the page as huge.
1508		 */
1509		retract_page_tables(mapping, start);
1510
1511		/* Everything is ready, let's unfreeze the new_page */
1512		set_page_dirty(new_page);
1513		SetPageUptodate(new_page);
1514		page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1515		mem_cgroup_commit_charge(new_page, memcg, false, true);
1516		lru_cache_add_anon(new_page);
1517		unlock_page(new_page);
1518
1519		*hpage = NULL;
1520	} else {
1521		/* Something went wrong: rollback changes to the radix-tree */
1522		shmem_uncharge(mapping->host, nr_none);
1523		xa_lock_irq(&mapping->i_pages);
1524		radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1525			if (iter.index >= end)
1526				break;
1527			page = list_first_entry_or_null(&pagelist,
1528					struct page, lru);
1529			if (!page || iter.index < page->index) {
1530				if (!nr_none)
1531					break;
1532				nr_none--;
1533				/* Put holes back where they were */
1534				radix_tree_delete(&mapping->i_pages, iter.index);
1535				continue;
1536			}
1537
1538			VM_BUG_ON_PAGE(page->index != iter.index, page);
1539
1540			/* Unfreeze the page. */
1541			list_del(&page->lru);
1542			page_ref_unfreeze(page, 2);
1543			radix_tree_replace_slot(&mapping->i_pages, slot, page);
1544			slot = radix_tree_iter_resume(slot, &iter);
1545			xa_unlock_irq(&mapping->i_pages);
1546			putback_lru_page(page);
1547			unlock_page(page);
1548			xa_lock_irq(&mapping->i_pages);
1549		}
1550		VM_BUG_ON(nr_none);
1551		xa_unlock_irq(&mapping->i_pages);
1552
1553		/* Unfreeze new_page, caller would take care about freeing it */
1554		page_ref_unfreeze(new_page, 1);
1555		mem_cgroup_cancel_charge(new_page, memcg, true);
1556		unlock_page(new_page);
1557		new_page->mapping = NULL;
1558	}
1559out:
1560	VM_BUG_ON(!list_empty(&pagelist));
1561	/* TODO: tracepoints */
1562}
1563
1564static void khugepaged_scan_shmem(struct mm_struct *mm,
1565		struct address_space *mapping,
1566		pgoff_t start, struct page **hpage)
1567{
1568	struct page *page = NULL;
1569	struct radix_tree_iter iter;
1570	void **slot;
1571	int present, swap;
1572	int node = NUMA_NO_NODE;
1573	int result = SCAN_SUCCEED;
1574
1575	present = 0;
1576	swap = 0;
1577	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1578	rcu_read_lock();
1579	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1580		if (iter.index >= start + HPAGE_PMD_NR)
1581			break;
1582
1583		page = radix_tree_deref_slot(slot);
1584		if (radix_tree_deref_retry(page)) {
1585			slot = radix_tree_iter_retry(&iter);
1586			continue;
1587		}
1588
1589		if (radix_tree_exception(page)) {
1590			if (++swap > khugepaged_max_ptes_swap) {
1591				result = SCAN_EXCEED_SWAP_PTE;
1592				break;
1593			}
1594			continue;
1595		}
1596
1597		if (PageTransCompound(page)) {
1598			result = SCAN_PAGE_COMPOUND;
1599			break;
1600		}
1601
1602		node = page_to_nid(page);
1603		if (khugepaged_scan_abort(node)) {
1604			result = SCAN_SCAN_ABORT;
1605			break;
1606		}
1607		khugepaged_node_load[node]++;
1608
1609		if (!PageLRU(page)) {
1610			result = SCAN_PAGE_LRU;
1611			break;
1612		}
1613
1614		if (page_count(page) != 1 + page_mapcount(page)) {
1615			result = SCAN_PAGE_COUNT;
1616			break;
1617		}
1618
1619		/*
1620		 * We probably should check if the page is referenced here, but
1621		 * nobody would transfer pte_young() to PageReferenced() for us.
1622		 * And rmap walk here is just too costly...
1623		 */
1624
1625		present++;
1626
1627		if (need_resched()) {
1628			slot = radix_tree_iter_resume(slot, &iter);
1629			cond_resched_rcu();
1630		}
1631	}
1632	rcu_read_unlock();
1633
1634	if (result == SCAN_SUCCEED) {
1635		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1636			result = SCAN_EXCEED_NONE_PTE;
1637		} else {
1638			node = khugepaged_find_target_node();
1639			collapse_shmem(mm, mapping, start, hpage, node);
1640		}
1641	}
1642
1643	/* TODO: tracepoints */
1644}
1645#else
1646static void khugepaged_scan_shmem(struct mm_struct *mm,
1647		struct address_space *mapping,
1648		pgoff_t start, struct page **hpage)
1649{
1650	BUILD_BUG();
1651}
1652#endif
1653
1654static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1655					    struct page **hpage)
1656	__releases(&khugepaged_mm_lock)
1657	__acquires(&khugepaged_mm_lock)
1658{
1659	struct mm_slot *mm_slot;
1660	struct mm_struct *mm;
1661	struct vm_area_struct *vma;
1662	int progress = 0;
1663
1664	VM_BUG_ON(!pages);
1665	VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1666
1667	if (khugepaged_scan.mm_slot)
1668		mm_slot = khugepaged_scan.mm_slot;
1669	else {
1670		mm_slot = list_entry(khugepaged_scan.mm_head.next,
1671				     struct mm_slot, mm_node);
1672		khugepaged_scan.address = 0;
1673		khugepaged_scan.mm_slot = mm_slot;
1674	}
1675	spin_unlock(&khugepaged_mm_lock);
1676
1677	mm = mm_slot->mm;
1678	/*
1679	 * Don't wait for semaphore (to avoid long wait times).  Just move to
1680	 * the next mm on the list.
1681	 */
1682	vma = NULL;
1683	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1684		goto breakouterloop_mmap_sem;
1685	if (likely(!khugepaged_test_exit(mm)))
1686		vma = find_vma(mm, khugepaged_scan.address);
1687
1688	progress++;
1689	for (; vma; vma = vma->vm_next) {
1690		unsigned long hstart, hend;
1691
1692		cond_resched();
1693		if (unlikely(khugepaged_test_exit(mm))) {
1694			progress++;
1695			break;
1696		}
1697		if (!hugepage_vma_check(vma)) {
1698skip:
1699			progress++;
1700			continue;
1701		}
1702		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1703		hend = vma->vm_end & HPAGE_PMD_MASK;
1704		if (hstart >= hend)
1705			goto skip;
1706		if (khugepaged_scan.address > hend)
1707			goto skip;
1708		if (khugepaged_scan.address < hstart)
1709			khugepaged_scan.address = hstart;
1710		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1711
1712		while (khugepaged_scan.address < hend) {
1713			int ret;
1714			cond_resched();
1715			if (unlikely(khugepaged_test_exit(mm)))
1716				goto breakouterloop;
1717
1718			VM_BUG_ON(khugepaged_scan.address < hstart ||
1719				  khugepaged_scan.address + HPAGE_PMD_SIZE >
1720				  hend);
1721			if (shmem_file(vma->vm_file)) {
1722				struct file *file;
1723				pgoff_t pgoff = linear_page_index(vma,
1724						khugepaged_scan.address);
1725				if (!shmem_huge_enabled(vma))
1726					goto skip;
1727				file = get_file(vma->vm_file);
1728				up_read(&mm->mmap_sem);
1729				ret = 1;
1730				khugepaged_scan_shmem(mm, file->f_mapping,
1731						pgoff, hpage);
1732				fput(file);
1733			} else {
1734				ret = khugepaged_scan_pmd(mm, vma,
1735						khugepaged_scan.address,
1736						hpage);
1737			}
1738			/* move to next address */
1739			khugepaged_scan.address += HPAGE_PMD_SIZE;
1740			progress += HPAGE_PMD_NR;
1741			if (ret)
1742				/* we released mmap_sem so break loop */
1743				goto breakouterloop_mmap_sem;
1744			if (progress >= pages)
1745				goto breakouterloop;
1746		}
1747	}
1748breakouterloop:
1749	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1750breakouterloop_mmap_sem:
1751
1752	spin_lock(&khugepaged_mm_lock);
1753	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1754	/*
1755	 * Release the current mm_slot if this mm is about to die, or
1756	 * if we scanned all vmas of this mm.
1757	 */
1758	if (khugepaged_test_exit(mm) || !vma) {
1759		/*
1760		 * Make sure that if mm_users is reaching zero while
1761		 * khugepaged runs here, khugepaged_exit will find
1762		 * mm_slot not pointing to the exiting mm.
1763		 */
1764		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1765			khugepaged_scan.mm_slot = list_entry(
1766				mm_slot->mm_node.next,
1767				struct mm_slot, mm_node);
1768			khugepaged_scan.address = 0;
1769		} else {
1770			khugepaged_scan.mm_slot = NULL;
1771			khugepaged_full_scans++;
1772		}
1773
1774		collect_mm_slot(mm_slot);
1775	}
1776
1777	return progress;
1778}
1779
1780static int khugepaged_has_work(void)
1781{
1782	return !list_empty(&khugepaged_scan.mm_head) &&
1783		khugepaged_enabled();
1784}
1785
1786static int khugepaged_wait_event(void)
1787{
1788	return !list_empty(&khugepaged_scan.mm_head) ||
1789		kthread_should_stop();
1790}
1791
1792static void khugepaged_do_scan(void)
1793{
1794	struct page *hpage = NULL;
1795	unsigned int progress = 0, pass_through_head = 0;
1796	unsigned int pages = khugepaged_pages_to_scan;
1797	bool wait = true;
1798
1799	barrier(); /* write khugepaged_pages_to_scan to local stack */
1800
1801	while (progress < pages) {
1802		if (!khugepaged_prealloc_page(&hpage, &wait))
1803			break;
1804
1805		cond_resched();
1806
1807		if (unlikely(kthread_should_stop() || try_to_freeze()))
1808			break;
1809
1810		spin_lock(&khugepaged_mm_lock);
1811		if (!khugepaged_scan.mm_slot)
1812			pass_through_head++;
1813		if (khugepaged_has_work() &&
1814		    pass_through_head < 2)
1815			progress += khugepaged_scan_mm_slot(pages - progress,
1816							    &hpage);
1817		else
1818			progress = pages;
1819		spin_unlock(&khugepaged_mm_lock);
1820	}
1821
1822	if (!IS_ERR_OR_NULL(hpage))
1823		put_page(hpage);
1824}
1825
1826static bool khugepaged_should_wakeup(void)
1827{
1828	return kthread_should_stop() ||
1829	       time_after_eq(jiffies, khugepaged_sleep_expire);
1830}
1831
1832static void khugepaged_wait_work(void)
1833{
1834	if (khugepaged_has_work()) {
1835		const unsigned long scan_sleep_jiffies =
1836			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1837
1838		if (!scan_sleep_jiffies)
1839			return;
1840
1841		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1842		wait_event_freezable_timeout(khugepaged_wait,
1843					     khugepaged_should_wakeup(),
1844					     scan_sleep_jiffies);
1845		return;
1846	}
1847
1848	if (khugepaged_enabled())
1849		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1850}
1851
1852static int khugepaged(void *none)
1853{
1854	struct mm_slot *mm_slot;
1855
1856	set_freezable();
1857	set_user_nice(current, MAX_NICE);
1858
1859	while (!kthread_should_stop()) {
1860		khugepaged_do_scan();
1861		khugepaged_wait_work();
1862	}
1863
1864	spin_lock(&khugepaged_mm_lock);
1865	mm_slot = khugepaged_scan.mm_slot;
1866	khugepaged_scan.mm_slot = NULL;
1867	if (mm_slot)
1868		collect_mm_slot(mm_slot);
1869	spin_unlock(&khugepaged_mm_lock);
1870	return 0;
1871}
1872
1873static void set_recommended_min_free_kbytes(void)
1874{
1875	struct zone *zone;
1876	int nr_zones = 0;
1877	unsigned long recommended_min;
1878
1879	for_each_populated_zone(zone) {
1880		/*
1881		 * We don't need to worry about fragmentation of
1882		 * ZONE_MOVABLE since it only has movable pages.
1883		 */
1884		if (zone_idx(zone) > gfp_zone(GFP_USER))
1885			continue;
1886
1887		nr_zones++;
1888	}
1889
1890	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1891	recommended_min = pageblock_nr_pages * nr_zones * 2;
1892
1893	/*
1894	 * Make sure that on average at least two pageblocks are almost free
1895	 * of another type, one for a migratetype to fall back to and a
1896	 * second to avoid subsequent fallbacks of other types There are 3
1897	 * MIGRATE_TYPES we care about.
1898	 */
1899	recommended_min += pageblock_nr_pages * nr_zones *
1900			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1901
1902	/* don't ever allow to reserve more than 5% of the lowmem */
1903	recommended_min = min(recommended_min,
1904			      (unsigned long) nr_free_buffer_pages() / 20);
1905	recommended_min <<= (PAGE_SHIFT-10);
1906
1907	if (recommended_min > min_free_kbytes) {
1908		if (user_min_free_kbytes >= 0)
1909			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1910				min_free_kbytes, recommended_min);
1911
1912		min_free_kbytes = recommended_min;
1913	}
1914	setup_per_zone_wmarks();
1915}
1916
1917int start_stop_khugepaged(void)
1918{
1919	static struct task_struct *khugepaged_thread __read_mostly;
1920	static DEFINE_MUTEX(khugepaged_mutex);
1921	int err = 0;
1922
1923	mutex_lock(&khugepaged_mutex);
1924	if (khugepaged_enabled()) {
1925		if (!khugepaged_thread)
1926			khugepaged_thread = kthread_run(khugepaged, NULL,
1927							"khugepaged");
1928		if (IS_ERR(khugepaged_thread)) {
1929			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1930			err = PTR_ERR(khugepaged_thread);
1931			khugepaged_thread = NULL;
1932			goto fail;
1933		}
1934
1935		if (!list_empty(&khugepaged_scan.mm_head))
1936			wake_up_interruptible(&khugepaged_wait);
1937
1938		set_recommended_min_free_kbytes();
1939	} else if (khugepaged_thread) {
1940		kthread_stop(khugepaged_thread);
1941		khugepaged_thread = NULL;
1942	}
1943fail:
1944	mutex_unlock(&khugepaged_mutex);
1945	return err;
1946}