Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/memory.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 */
   7
   8/*
   9 * demand-loading started 01.12.91 - seems it is high on the list of
  10 * things wanted, and it should be easy to implement. - Linus
  11 */
  12
  13/*
  14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  15 * pages started 02.12.91, seems to work. - Linus.
  16 *
  17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  18 * would have taken more than the 6M I have free, but it worked well as
  19 * far as I could see.
  20 *
  21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  22 */
  23
  24/*
  25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
  26 * thought has to go into this. Oh, well..
  27 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  28 *		Found it. Everything seems to work now.
  29 * 20.12.91  -  Ok, making the swap-device changeable like the root.
  30 */
  31
  32/*
  33 * 05.04.94  -  Multi-page memory management added for v1.1.
  34 *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  35 *
  36 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
  37 *		(Gerhard.Wichert@pdb.siemens.de)
  38 *
  39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  40 */
  41
  42#include <linux/kernel_stat.h>
  43#include <linux/mm.h>
  44#include <linux/sched/mm.h>
  45#include <linux/sched/coredump.h>
  46#include <linux/sched/numa_balancing.h>
  47#include <linux/sched/task.h>
  48#include <linux/hugetlb.h>
  49#include <linux/mman.h>
  50#include <linux/swap.h>
  51#include <linux/highmem.h>
  52#include <linux/pagemap.h>
  53#include <linux/memremap.h>
  54#include <linux/ksm.h>
  55#include <linux/rmap.h>
  56#include <linux/export.h>
  57#include <linux/delayacct.h>
  58#include <linux/init.h>
  59#include <linux/pfn_t.h>
  60#include <linux/writeback.h>
  61#include <linux/memcontrol.h>
  62#include <linux/mmu_notifier.h>
  63#include <linux/swapops.h>
  64#include <linux/elf.h>
  65#include <linux/gfp.h>
  66#include <linux/migrate.h>
  67#include <linux/string.h>
 
  68#include <linux/debugfs.h>
  69#include <linux/userfaultfd_k.h>
  70#include <linux/dax.h>
  71#include <linux/oom.h>
  72#include <linux/numa.h>
  73#include <linux/perf_event.h>
  74#include <linux/ptrace.h>
  75#include <linux/vmalloc.h>
  76
  77#include <trace/events/kmem.h>
  78
  79#include <asm/io.h>
  80#include <asm/mmu_context.h>
  81#include <asm/pgalloc.h>
  82#include <linux/uaccess.h>
  83#include <asm/tlb.h>
  84#include <asm/tlbflush.h>
  85
  86#include "pgalloc-track.h"
  87#include "internal.h"
  88
  89#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
  90#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
  91#endif
  92
  93#ifndef CONFIG_NUMA
 
  94unsigned long max_mapnr;
  95EXPORT_SYMBOL(max_mapnr);
  96
  97struct page *mem_map;
  98EXPORT_SYMBOL(mem_map);
  99#endif
 100
 101/*
 102 * A number of key systems in x86 including ioremap() rely on the assumption
 103 * that high_memory defines the upper bound on direct map memory, then end
 104 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
 105 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
 106 * and ZONE_HIGHMEM.
 107 */
 108void *high_memory;
 109EXPORT_SYMBOL(high_memory);
 110
 111/*
 112 * Randomize the address space (stacks, mmaps, brk, etc.).
 113 *
 114 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
 115 *   as ancient (libc5 based) binaries can segfault. )
 116 */
 117int randomize_va_space __read_mostly =
 118#ifdef CONFIG_COMPAT_BRK
 119					1;
 120#else
 121					2;
 122#endif
 123
 124#ifndef arch_faults_on_old_pte
 125static inline bool arch_faults_on_old_pte(void)
 126{
 127	/*
 128	 * Those arches which don't have hw access flag feature need to
 129	 * implement their own helper. By default, "true" means pagefault
 130	 * will be hit on old pte.
 131	 */
 132	return true;
 133}
 134#endif
 135
 136#ifndef arch_wants_old_prefaulted_pte
 137static inline bool arch_wants_old_prefaulted_pte(void)
 138{
 139	/*
 140	 * Transitioning a PTE from 'old' to 'young' can be expensive on
 141	 * some architectures, even if it's performed in hardware. By
 142	 * default, "false" means prefaulted entries will be 'young'.
 143	 */
 144	return false;
 145}
 146#endif
 147
 148static int __init disable_randmaps(char *s)
 149{
 150	randomize_va_space = 0;
 151	return 1;
 152}
 153__setup("norandmaps", disable_randmaps);
 154
 155unsigned long zero_pfn __read_mostly;
 156EXPORT_SYMBOL(zero_pfn);
 157
 158unsigned long highest_memmap_pfn __read_mostly;
 159
 160/*
 161 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
 162 */
 163static int __init init_zero_pfn(void)
 164{
 165	zero_pfn = page_to_pfn(ZERO_PAGE(0));
 166	return 0;
 167}
 168early_initcall(init_zero_pfn);
 169
 170void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 171{
 172	trace_rss_stat(mm, member, count);
 173}
 174
 175#if defined(SPLIT_RSS_COUNTING)
 176
 177void sync_mm_rss(struct mm_struct *mm)
 178{
 179	int i;
 180
 181	for (i = 0; i < NR_MM_COUNTERS; i++) {
 182		if (current->rss_stat.count[i]) {
 183			add_mm_counter(mm, i, current->rss_stat.count[i]);
 184			current->rss_stat.count[i] = 0;
 185		}
 186	}
 187	current->rss_stat.events = 0;
 188}
 189
 190static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
 191{
 192	struct task_struct *task = current;
 193
 194	if (likely(task->mm == mm))
 195		task->rss_stat.count[member] += val;
 196	else
 197		add_mm_counter(mm, member, val);
 198}
 199#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
 200#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
 201
 202/* sync counter once per 64 page faults */
 203#define TASK_RSS_EVENTS_THRESH	(64)
 204static void check_sync_rss_stat(struct task_struct *task)
 205{
 206	if (unlikely(task != current))
 207		return;
 208	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
 209		sync_mm_rss(task->mm);
 210}
 211#else /* SPLIT_RSS_COUNTING */
 212
 213#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
 214#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
 215
 216static void check_sync_rss_stat(struct task_struct *task)
 217{
 218}
 219
 220#endif /* SPLIT_RSS_COUNTING */
 221
 222/*
 223 * Note: this doesn't free the actual pages themselves. That
 224 * has been handled earlier when unmapping all the memory regions.
 225 */
 226static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
 227			   unsigned long addr)
 228{
 229	pgtable_t token = pmd_pgtable(*pmd);
 230	pmd_clear(pmd);
 231	pte_free_tlb(tlb, token, addr);
 232	mm_dec_nr_ptes(tlb->mm);
 233}
 234
 235static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 236				unsigned long addr, unsigned long end,
 237				unsigned long floor, unsigned long ceiling)
 238{
 239	pmd_t *pmd;
 240	unsigned long next;
 241	unsigned long start;
 242
 243	start = addr;
 244	pmd = pmd_offset(pud, addr);
 245	do {
 246		next = pmd_addr_end(addr, end);
 247		if (pmd_none_or_clear_bad(pmd))
 248			continue;
 249		free_pte_range(tlb, pmd, addr);
 250	} while (pmd++, addr = next, addr != end);
 251
 252	start &= PUD_MASK;
 253	if (start < floor)
 254		return;
 255	if (ceiling) {
 256		ceiling &= PUD_MASK;
 257		if (!ceiling)
 258			return;
 259	}
 260	if (end - 1 > ceiling - 1)
 261		return;
 262
 263	pmd = pmd_offset(pud, start);
 264	pud_clear(pud);
 265	pmd_free_tlb(tlb, pmd, start);
 266	mm_dec_nr_pmds(tlb->mm);
 267}
 268
 269static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
 270				unsigned long addr, unsigned long end,
 271				unsigned long floor, unsigned long ceiling)
 272{
 273	pud_t *pud;
 274	unsigned long next;
 275	unsigned long start;
 276
 277	start = addr;
 278	pud = pud_offset(p4d, addr);
 279	do {
 280		next = pud_addr_end(addr, end);
 281		if (pud_none_or_clear_bad(pud))
 282			continue;
 283		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
 284	} while (pud++, addr = next, addr != end);
 285
 286	start &= P4D_MASK;
 287	if (start < floor)
 288		return;
 289	if (ceiling) {
 290		ceiling &= P4D_MASK;
 291		if (!ceiling)
 292			return;
 293	}
 294	if (end - 1 > ceiling - 1)
 295		return;
 296
 297	pud = pud_offset(p4d, start);
 298	p4d_clear(p4d);
 299	pud_free_tlb(tlb, pud, start);
 300	mm_dec_nr_puds(tlb->mm);
 301}
 302
 303static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
 304				unsigned long addr, unsigned long end,
 305				unsigned long floor, unsigned long ceiling)
 306{
 307	p4d_t *p4d;
 308	unsigned long next;
 309	unsigned long start;
 310
 311	start = addr;
 312	p4d = p4d_offset(pgd, addr);
 313	do {
 314		next = p4d_addr_end(addr, end);
 315		if (p4d_none_or_clear_bad(p4d))
 316			continue;
 317		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
 318	} while (p4d++, addr = next, addr != end);
 319
 320	start &= PGDIR_MASK;
 321	if (start < floor)
 322		return;
 323	if (ceiling) {
 324		ceiling &= PGDIR_MASK;
 325		if (!ceiling)
 326			return;
 327	}
 328	if (end - 1 > ceiling - 1)
 329		return;
 330
 331	p4d = p4d_offset(pgd, start);
 332	pgd_clear(pgd);
 333	p4d_free_tlb(tlb, p4d, start);
 334}
 335
 336/*
 337 * This function frees user-level page tables of a process.
 338 */
 339void free_pgd_range(struct mmu_gather *tlb,
 340			unsigned long addr, unsigned long end,
 341			unsigned long floor, unsigned long ceiling)
 342{
 343	pgd_t *pgd;
 344	unsigned long next;
 345
 346	/*
 347	 * The next few lines have given us lots of grief...
 348	 *
 349	 * Why are we testing PMD* at this top level?  Because often
 350	 * there will be no work to do at all, and we'd prefer not to
 351	 * go all the way down to the bottom just to discover that.
 352	 *
 353	 * Why all these "- 1"s?  Because 0 represents both the bottom
 354	 * of the address space and the top of it (using -1 for the
 355	 * top wouldn't help much: the masks would do the wrong thing).
 356	 * The rule is that addr 0 and floor 0 refer to the bottom of
 357	 * the address space, but end 0 and ceiling 0 refer to the top
 358	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
 359	 * that end 0 case should be mythical).
 360	 *
 361	 * Wherever addr is brought up or ceiling brought down, we must
 362	 * be careful to reject "the opposite 0" before it confuses the
 363	 * subsequent tests.  But what about where end is brought down
 364	 * by PMD_SIZE below? no, end can't go down to 0 there.
 365	 *
 366	 * Whereas we round start (addr) and ceiling down, by different
 367	 * masks at different levels, in order to test whether a table
 368	 * now has no other vmas using it, so can be freed, we don't
 369	 * bother to round floor or end up - the tests don't need that.
 370	 */
 371
 372	addr &= PMD_MASK;
 373	if (addr < floor) {
 374		addr += PMD_SIZE;
 375		if (!addr)
 376			return;
 377	}
 378	if (ceiling) {
 379		ceiling &= PMD_MASK;
 380		if (!ceiling)
 381			return;
 382	}
 383	if (end - 1 > ceiling - 1)
 384		end -= PMD_SIZE;
 385	if (addr > end - 1)
 386		return;
 387	/*
 388	 * We add page table cache pages with PAGE_SIZE,
 389	 * (see pte_free_tlb()), flush the tlb if we need
 390	 */
 391	tlb_change_page_size(tlb, PAGE_SIZE);
 392	pgd = pgd_offset(tlb->mm, addr);
 393	do {
 394		next = pgd_addr_end(addr, end);
 395		if (pgd_none_or_clear_bad(pgd))
 396			continue;
 397		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
 398	} while (pgd++, addr = next, addr != end);
 399}
 400
 401void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 402		unsigned long floor, unsigned long ceiling)
 403{
 404	while (vma) {
 405		struct vm_area_struct *next = vma->vm_next;
 406		unsigned long addr = vma->vm_start;
 407
 408		/*
 409		 * Hide vma from rmap and truncate_pagecache before freeing
 410		 * pgtables
 411		 */
 412		unlink_anon_vmas(vma);
 413		unlink_file_vma(vma);
 414
 415		if (is_vm_hugetlb_page(vma)) {
 416			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
 417				floor, next ? next->vm_start : ceiling);
 418		} else {
 419			/*
 420			 * Optimization: gather nearby vmas into one call down
 421			 */
 422			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
 423			       && !is_vm_hugetlb_page(next)) {
 424				vma = next;
 425				next = vma->vm_next;
 426				unlink_anon_vmas(vma);
 427				unlink_file_vma(vma);
 428			}
 429			free_pgd_range(tlb, addr, vma->vm_end,
 430				floor, next ? next->vm_start : ceiling);
 431		}
 432		vma = next;
 433	}
 434}
 435
 436int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
 437{
 438	spinlock_t *ptl;
 439	pgtable_t new = pte_alloc_one(mm);
 440	if (!new)
 441		return -ENOMEM;
 442
 443	/*
 444	 * Ensure all pte setup (eg. pte page lock and page clearing) are
 445	 * visible before the pte is made visible to other CPUs by being
 446	 * put into page tables.
 447	 *
 448	 * The other side of the story is the pointer chasing in the page
 449	 * table walking code (when walking the page table without locking;
 450	 * ie. most of the time). Fortunately, these data accesses consist
 451	 * of a chain of data-dependent loads, meaning most CPUs (alpha
 452	 * being the notable exception) will already guarantee loads are
 453	 * seen in-order. See the alpha page table accessors for the
 454	 * smp_rmb() barriers in page table walking code.
 455	 */
 456	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 457
 458	ptl = pmd_lock(mm, pmd);
 459	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 460		mm_inc_nr_ptes(mm);
 461		pmd_populate(mm, pmd, new);
 462		new = NULL;
 463	}
 464	spin_unlock(ptl);
 465	if (new)
 466		pte_free(mm, new);
 467	return 0;
 468}
 469
 470int __pte_alloc_kernel(pmd_t *pmd)
 471{
 472	pte_t *new = pte_alloc_one_kernel(&init_mm);
 473	if (!new)
 474		return -ENOMEM;
 475
 476	smp_wmb(); /* See comment in __pte_alloc */
 477
 478	spin_lock(&init_mm.page_table_lock);
 479	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 480		pmd_populate_kernel(&init_mm, pmd, new);
 481		new = NULL;
 482	}
 483	spin_unlock(&init_mm.page_table_lock);
 484	if (new)
 485		pte_free_kernel(&init_mm, new);
 486	return 0;
 487}
 488
 489static inline void init_rss_vec(int *rss)
 490{
 491	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
 492}
 493
 494static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
 495{
 496	int i;
 497
 498	if (current->mm == mm)
 499		sync_mm_rss(mm);
 500	for (i = 0; i < NR_MM_COUNTERS; i++)
 501		if (rss[i])
 502			add_mm_counter(mm, i, rss[i]);
 503}
 504
 505/*
 506 * This function is called to print an error when a bad pte
 507 * is found. For example, we might have a PFN-mapped pte in
 508 * a region that doesn't allow it.
 509 *
 510 * The calling function must still handle the error.
 511 */
 512static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
 513			  pte_t pte, struct page *page)
 514{
 515	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
 516	p4d_t *p4d = p4d_offset(pgd, addr);
 517	pud_t *pud = pud_offset(p4d, addr);
 518	pmd_t *pmd = pmd_offset(pud, addr);
 519	struct address_space *mapping;
 520	pgoff_t index;
 521	static unsigned long resume;
 522	static unsigned long nr_shown;
 523	static unsigned long nr_unshown;
 524
 525	/*
 526	 * Allow a burst of 60 reports, then keep quiet for that minute;
 527	 * or allow a steady drip of one report per second.
 528	 */
 529	if (nr_shown == 60) {
 530		if (time_before(jiffies, resume)) {
 531			nr_unshown++;
 532			return;
 533		}
 534		if (nr_unshown) {
 535			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
 536				 nr_unshown);
 537			nr_unshown = 0;
 538		}
 539		nr_shown = 0;
 540	}
 541	if (nr_shown++ == 0)
 542		resume = jiffies + 60 * HZ;
 543
 544	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
 545	index = linear_page_index(vma, addr);
 546
 547	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
 548		 current->comm,
 549		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
 550	if (page)
 551		dump_page(page, "bad pte");
 552	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
 553		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
 554	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
 555		 vma->vm_file,
 556		 vma->vm_ops ? vma->vm_ops->fault : NULL,
 557		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
 558		 mapping ? mapping->a_ops->readpage : NULL);
 559	dump_stack();
 560	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 561}
 562
 563/*
 564 * vm_normal_page -- This function gets the "struct page" associated with a pte.
 565 *
 566 * "Special" mappings do not wish to be associated with a "struct page" (either
 567 * it doesn't exist, or it exists but they don't want to touch it). In this
 568 * case, NULL is returned here. "Normal" mappings do have a struct page.
 569 *
 570 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
 571 * pte bit, in which case this function is trivial. Secondly, an architecture
 572 * may not have a spare pte bit, which requires a more complicated scheme,
 573 * described below.
 574 *
 575 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
 576 * special mapping (even if there are underlying and valid "struct pages").
 577 * COWed pages of a VM_PFNMAP are always normal.
 578 *
 579 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
 580 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
 581 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
 582 * mapping will always honor the rule
 583 *
 584 *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
 585 *
 586 * And for normal mappings this is false.
 587 *
 588 * This restricts such mappings to be a linear translation from virtual address
 589 * to pfn. To get around this restriction, we allow arbitrary mappings so long
 590 * as the vma is not a COW mapping; in that case, we know that all ptes are
 591 * special (because none can have been COWed).
 592 *
 593 *
 594 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
 595 *
 596 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
 597 * page" backing, however the difference is that _all_ pages with a struct
 598 * page (that is, those where pfn_valid is true) are refcounted and considered
 599 * normal pages by the VM. The disadvantage is that pages are refcounted
 600 * (which can be slower and simply not an option for some PFNMAP users). The
 601 * advantage is that we don't have to follow the strict linearity rule of
 602 * PFNMAP mappings in order to support COWable mappings.
 603 *
 604 */
 605struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 606			    pte_t pte)
 607{
 608	unsigned long pfn = pte_pfn(pte);
 609
 610	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
 611		if (likely(!pte_special(pte)))
 612			goto check_pfn;
 613		if (vma->vm_ops && vma->vm_ops->find_special_page)
 614			return vma->vm_ops->find_special_page(vma, addr);
 615		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
 616			return NULL;
 617		if (is_zero_pfn(pfn))
 618			return NULL;
 619		if (pte_devmap(pte))
 620			return NULL;
 621
 622		print_bad_pte(vma, addr, pte, NULL);
 623		return NULL;
 624	}
 625
 626	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
 627
 628	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 629		if (vma->vm_flags & VM_MIXEDMAP) {
 630			if (!pfn_valid(pfn))
 631				return NULL;
 632			goto out;
 633		} else {
 634			unsigned long off;
 635			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 636			if (pfn == vma->vm_pgoff + off)
 637				return NULL;
 638			if (!is_cow_mapping(vma->vm_flags))
 639				return NULL;
 640		}
 641	}
 642
 643	if (is_zero_pfn(pfn))
 644		return NULL;
 645
 646check_pfn:
 647	if (unlikely(pfn > highest_memmap_pfn)) {
 648		print_bad_pte(vma, addr, pte, NULL);
 649		return NULL;
 650	}
 651
 652	/*
 653	 * NOTE! We still have PageReserved() pages in the page tables.
 654	 * eg. VDSO mappings can cause them to exist.
 655	 */
 656out:
 657	return pfn_to_page(pfn);
 658}
 659
 660#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 661struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 662				pmd_t pmd)
 663{
 664	unsigned long pfn = pmd_pfn(pmd);
 665
 666	/*
 667	 * There is no pmd_special() but there may be special pmds, e.g.
 668	 * in a direct-access (dax) mapping, so let's just replicate the
 669	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
 670	 */
 671	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 672		if (vma->vm_flags & VM_MIXEDMAP) {
 673			if (!pfn_valid(pfn))
 674				return NULL;
 675			goto out;
 676		} else {
 677			unsigned long off;
 678			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 679			if (pfn == vma->vm_pgoff + off)
 680				return NULL;
 681			if (!is_cow_mapping(vma->vm_flags))
 682				return NULL;
 683		}
 684	}
 685
 686	if (pmd_devmap(pmd))
 687		return NULL;
 688	if (is_huge_zero_pmd(pmd))
 689		return NULL;
 690	if (unlikely(pfn > highest_memmap_pfn))
 691		return NULL;
 692
 693	/*
 694	 * NOTE! We still have PageReserved() pages in the page tables.
 695	 * eg. VDSO mappings can cause them to exist.
 696	 */
 697out:
 698	return pfn_to_page(pfn);
 699}
 700#endif
 701
 702static void restore_exclusive_pte(struct vm_area_struct *vma,
 703				  struct page *page, unsigned long address,
 704				  pte_t *ptep)
 705{
 706	pte_t pte;
 707	swp_entry_t entry;
 708
 709	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
 710	if (pte_swp_soft_dirty(*ptep))
 711		pte = pte_mksoft_dirty(pte);
 712
 713	entry = pte_to_swp_entry(*ptep);
 714	if (pte_swp_uffd_wp(*ptep))
 715		pte = pte_mkuffd_wp(pte);
 716	else if (is_writable_device_exclusive_entry(entry))
 717		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
 718
 719	set_pte_at(vma->vm_mm, address, ptep, pte);
 720
 721	/*
 722	 * No need to take a page reference as one was already
 723	 * created when the swap entry was made.
 724	 */
 725	if (PageAnon(page))
 726		page_add_anon_rmap(page, vma, address, false);
 727	else
 728		/*
 729		 * Currently device exclusive access only supports anonymous
 730		 * memory so the entry shouldn't point to a filebacked page.
 731		 */
 732		WARN_ON_ONCE(!PageAnon(page));
 733
 734	if (vma->vm_flags & VM_LOCKED)
 735		mlock_vma_page(page);
 736
 737	/*
 738	 * No need to invalidate - it was non-present before. However
 739	 * secondary CPUs may have mappings that need invalidating.
 740	 */
 741	update_mmu_cache(vma, address, ptep);
 742}
 743
 744/*
 745 * Tries to restore an exclusive pte if the page lock can be acquired without
 746 * sleeping.
 747 */
 748static int
 749try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
 750			unsigned long addr)
 751{
 752	swp_entry_t entry = pte_to_swp_entry(*src_pte);
 753	struct page *page = pfn_swap_entry_to_page(entry);
 754
 755	if (trylock_page(page)) {
 756		restore_exclusive_pte(vma, page, addr, src_pte);
 757		unlock_page(page);
 758		return 0;
 759	}
 760
 761	return -EBUSY;
 762}
 763
 764/*
 765 * copy one vm_area from one task to the other. Assumes the page tables
 766 * already present in the new task to be cleared in the whole range
 767 * covered by this vma.
 768 */
 769
 770static unsigned long
 771copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 772		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
 773		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
 774{
 775	unsigned long vm_flags = dst_vma->vm_flags;
 776	pte_t pte = *src_pte;
 777	struct page *page;
 778	swp_entry_t entry = pte_to_swp_entry(pte);
 779
 780	if (likely(!non_swap_entry(entry))) {
 781		if (swap_duplicate(entry) < 0)
 782			return -EIO;
 783
 784		/* make sure dst_mm is on swapoff's mmlist. */
 785		if (unlikely(list_empty(&dst_mm->mmlist))) {
 786			spin_lock(&mmlist_lock);
 787			if (list_empty(&dst_mm->mmlist))
 788				list_add(&dst_mm->mmlist,
 789						&src_mm->mmlist);
 790			spin_unlock(&mmlist_lock);
 791		}
 792		rss[MM_SWAPENTS]++;
 793	} else if (is_migration_entry(entry)) {
 794		page = pfn_swap_entry_to_page(entry);
 795
 796		rss[mm_counter(page)]++;
 797
 798		if (is_writable_migration_entry(entry) &&
 799				is_cow_mapping(vm_flags)) {
 800			/*
 801			 * COW mappings require pages in both
 802			 * parent and child to be set to read.
 803			 */
 804			entry = make_readable_migration_entry(
 805							swp_offset(entry));
 806			pte = swp_entry_to_pte(entry);
 807			if (pte_swp_soft_dirty(*src_pte))
 808				pte = pte_swp_mksoft_dirty(pte);
 809			if (pte_swp_uffd_wp(*src_pte))
 810				pte = pte_swp_mkuffd_wp(pte);
 811			set_pte_at(src_mm, addr, src_pte, pte);
 812		}
 813	} else if (is_device_private_entry(entry)) {
 814		page = pfn_swap_entry_to_page(entry);
 815
 816		/*
 817		 * Update rss count even for unaddressable pages, as
 818		 * they should treated just like normal pages in this
 819		 * respect.
 820		 *
 821		 * We will likely want to have some new rss counters
 822		 * for unaddressable pages, at some point. But for now
 823		 * keep things as they are.
 824		 */
 825		get_page(page);
 826		rss[mm_counter(page)]++;
 827		page_dup_rmap(page, false);
 828
 829		/*
 830		 * We do not preserve soft-dirty information, because so
 831		 * far, checkpoint/restore is the only feature that
 832		 * requires that. And checkpoint/restore does not work
 833		 * when a device driver is involved (you cannot easily
 834		 * save and restore device driver state).
 835		 */
 836		if (is_writable_device_private_entry(entry) &&
 837		    is_cow_mapping(vm_flags)) {
 838			entry = make_readable_device_private_entry(
 839							swp_offset(entry));
 840			pte = swp_entry_to_pte(entry);
 841			if (pte_swp_uffd_wp(*src_pte))
 842				pte = pte_swp_mkuffd_wp(pte);
 843			set_pte_at(src_mm, addr, src_pte, pte);
 844		}
 845	} else if (is_device_exclusive_entry(entry)) {
 846		/*
 847		 * Make device exclusive entries present by restoring the
 848		 * original entry then copying as for a present pte. Device
 849		 * exclusive entries currently only support private writable
 850		 * (ie. COW) mappings.
 851		 */
 852		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
 853		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
 854			return -EBUSY;
 855		return -ENOENT;
 856	}
 857	if (!userfaultfd_wp(dst_vma))
 858		pte = pte_swp_clear_uffd_wp(pte);
 859	set_pte_at(dst_mm, addr, dst_pte, pte);
 860	return 0;
 861}
 862
 863/*
 864 * Copy a present and normal page if necessary.
 865 *
 866 * NOTE! The usual case is that this doesn't need to do
 867 * anything, and can just return a positive value. That
 868 * will let the caller know that it can just increase
 869 * the page refcount and re-use the pte the traditional
 870 * way.
 871 *
 872 * But _if_ we need to copy it because it needs to be
 873 * pinned in the parent (and the child should get its own
 874 * copy rather than just a reference to the same page),
 875 * we'll do that here and return zero to let the caller
 876 * know we're done.
 877 *
 878 * And if we need a pre-allocated page but don't yet have
 879 * one, return a negative error to let the preallocation
 880 * code know so that it can do so outside the page table
 881 * lock.
 882 */
 883static inline int
 884copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 885		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
 886		  struct page **prealloc, pte_t pte, struct page *page)
 
 
 887{
 888	struct page *new_page;
 889
 
 
 
 890	/*
 891	 * What we want to do is to check whether this page may
 892	 * have been pinned by the parent process.  If so,
 893	 * instead of wrprotect the pte on both sides, we copy
 894	 * the page immediately so that we'll always guarantee
 895	 * the pinned page won't be randomly replaced in the
 896	 * future.
 897	 *
 898	 * The page pinning checks are just "has this mm ever
 899	 * seen pinning", along with the (inexact) check of
 900	 * the page count. That might give false positives for
 901	 * for pinning, but it will work correctly.
 902	 */
 903	if (likely(!page_needs_cow_for_dma(src_vma, page)))
 
 
 904		return 1;
 905
 906	new_page = *prealloc;
 907	if (!new_page)
 908		return -EAGAIN;
 909
 910	/*
 911	 * We have a prealloc page, all good!  Take it
 912	 * over and copy the page & arm it.
 913	 */
 914	*prealloc = NULL;
 915	copy_user_highpage(new_page, page, addr, src_vma);
 916	__SetPageUptodate(new_page);
 917	page_add_new_anon_rmap(new_page, dst_vma, addr, false);
 918	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
 919	rss[mm_counter(new_page)]++;
 920
 921	/* All done, just insert the new page copy in the child */
 922	pte = mk_pte(new_page, dst_vma->vm_page_prot);
 923	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
 924	if (userfaultfd_pte_wp(dst_vma, *src_pte))
 925		/* Uffd-wp needs to be delivered to dest pte as well */
 926		pte = pte_wrprotect(pte_mkuffd_wp(pte));
 927	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
 928	return 0;
 929}
 930
 931/*
 932 * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
 933 * is required to copy this pte.
 934 */
 935static inline int
 936copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 937		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
 938		 struct page **prealloc)
 
 939{
 940	struct mm_struct *src_mm = src_vma->vm_mm;
 941	unsigned long vm_flags = src_vma->vm_flags;
 942	pte_t pte = *src_pte;
 943	struct page *page;
 944
 945	page = vm_normal_page(src_vma, addr, pte);
 946	if (page) {
 947		int retval;
 948
 949		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
 950					   addr, rss, prealloc, pte, page);
 
 
 
 951		if (retval <= 0)
 952			return retval;
 953
 954		get_page(page);
 955		page_dup_rmap(page, false);
 956		rss[mm_counter(page)]++;
 957	}
 958
 959	/*
 960	 * If it's a COW mapping, write protect it both
 961	 * in the parent and the child
 962	 */
 963	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
 964		ptep_set_wrprotect(src_mm, addr, src_pte);
 965		pte = pte_wrprotect(pte);
 966	}
 967
 968	/*
 969	 * If it's a shared mapping, mark it clean in
 970	 * the child
 971	 */
 972	if (vm_flags & VM_SHARED)
 973		pte = pte_mkclean(pte);
 974	pte = pte_mkold(pte);
 975
 976	if (!userfaultfd_wp(dst_vma))
 
 
 
 
 
 977		pte = pte_clear_uffd_wp(pte);
 978
 979	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
 980	return 0;
 981}
 982
 983static inline struct page *
 984page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
 985		   unsigned long addr)
 986{
 987	struct page *new_page;
 988
 989	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
 990	if (!new_page)
 991		return NULL;
 992
 993	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
 994		put_page(new_page);
 995		return NULL;
 996	}
 997	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
 998
 999	return new_page;
1000}
1001
1002static int
1003copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1004	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1005	       unsigned long end)
1006{
1007	struct mm_struct *dst_mm = dst_vma->vm_mm;
1008	struct mm_struct *src_mm = src_vma->vm_mm;
1009	pte_t *orig_src_pte, *orig_dst_pte;
1010	pte_t *src_pte, *dst_pte;
1011	spinlock_t *src_ptl, *dst_ptl;
1012	int progress, ret = 0;
1013	int rss[NR_MM_COUNTERS];
1014	swp_entry_t entry = (swp_entry_t){0};
1015	struct page *prealloc = NULL;
1016
1017again:
1018	progress = 0;
1019	init_rss_vec(rss);
1020
1021	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1022	if (!dst_pte) {
1023		ret = -ENOMEM;
1024		goto out;
1025	}
1026	src_pte = pte_offset_map(src_pmd, addr);
1027	src_ptl = pte_lockptr(src_mm, src_pmd);
1028	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1029	orig_src_pte = src_pte;
1030	orig_dst_pte = dst_pte;
1031	arch_enter_lazy_mmu_mode();
1032
1033	do {
1034		/*
1035		 * We are holding two locks at this point - either of them
1036		 * could generate latencies in another task on another CPU.
1037		 */
1038		if (progress >= 32) {
1039			progress = 0;
1040			if (need_resched() ||
1041			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1042				break;
1043		}
1044		if (pte_none(*src_pte)) {
1045			progress++;
1046			continue;
1047		}
1048		if (unlikely(!pte_present(*src_pte))) {
1049			ret = copy_nonpresent_pte(dst_mm, src_mm,
1050						  dst_pte, src_pte,
1051						  dst_vma, src_vma,
1052						  addr, rss);
1053			if (ret == -EIO) {
1054				entry = pte_to_swp_entry(*src_pte);
1055				break;
1056			} else if (ret == -EBUSY) {
1057				break;
1058			} else if (!ret) {
1059				progress += 8;
1060				continue;
1061			}
1062
1063			/*
1064			 * Device exclusive entry restored, continue by copying
1065			 * the now present pte.
1066			 */
1067			WARN_ON_ONCE(ret != -ENOENT);
1068		}
1069		/* copy_present_pte() will clear `*prealloc' if consumed */
1070		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1071				       addr, rss, &prealloc);
1072		/*
1073		 * If we need a pre-allocated page for this pte, drop the
1074		 * locks, allocate, and try again.
1075		 */
1076		if (unlikely(ret == -EAGAIN))
1077			break;
1078		if (unlikely(prealloc)) {
1079			/*
1080			 * pre-alloc page cannot be reused by next time so as
1081			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1082			 * will allocate page according to address).  This
1083			 * could only happen if one pinned pte changed.
1084			 */
1085			put_page(prealloc);
1086			prealloc = NULL;
1087		}
1088		progress += 8;
1089	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1090
1091	arch_leave_lazy_mmu_mode();
1092	spin_unlock(src_ptl);
1093	pte_unmap(orig_src_pte);
1094	add_mm_rss_vec(dst_mm, rss);
1095	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1096	cond_resched();
1097
1098	if (ret == -EIO) {
1099		VM_WARN_ON_ONCE(!entry.val);
1100		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1101			ret = -ENOMEM;
1102			goto out;
1103		}
1104		entry.val = 0;
1105	} else if (ret == -EBUSY) {
1106		goto out;
1107	} else if (ret ==  -EAGAIN) {
1108		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1109		if (!prealloc)
1110			return -ENOMEM;
1111	} else if (ret) {
1112		VM_WARN_ON_ONCE(1);
1113	}
1114
1115	/* We've captured and resolved the error. Reset, try again. */
1116	ret = 0;
1117
1118	if (addr != end)
1119		goto again;
1120out:
1121	if (unlikely(prealloc))
1122		put_page(prealloc);
1123	return ret;
1124}
1125
1126static inline int
1127copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1128	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1129	       unsigned long end)
1130{
1131	struct mm_struct *dst_mm = dst_vma->vm_mm;
1132	struct mm_struct *src_mm = src_vma->vm_mm;
1133	pmd_t *src_pmd, *dst_pmd;
1134	unsigned long next;
1135
1136	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1137	if (!dst_pmd)
1138		return -ENOMEM;
1139	src_pmd = pmd_offset(src_pud, addr);
1140	do {
1141		next = pmd_addr_end(addr, end);
1142		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1143			|| pmd_devmap(*src_pmd)) {
1144			int err;
1145			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1146			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1147					    addr, dst_vma, src_vma);
1148			if (err == -ENOMEM)
1149				return -ENOMEM;
1150			if (!err)
1151				continue;
1152			/* fall through */
1153		}
1154		if (pmd_none_or_clear_bad(src_pmd))
1155			continue;
1156		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1157				   addr, next))
1158			return -ENOMEM;
1159	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1160	return 0;
1161}
1162
1163static inline int
1164copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1165	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1166	       unsigned long end)
1167{
1168	struct mm_struct *dst_mm = dst_vma->vm_mm;
1169	struct mm_struct *src_mm = src_vma->vm_mm;
1170	pud_t *src_pud, *dst_pud;
1171	unsigned long next;
1172
1173	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1174	if (!dst_pud)
1175		return -ENOMEM;
1176	src_pud = pud_offset(src_p4d, addr);
1177	do {
1178		next = pud_addr_end(addr, end);
1179		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1180			int err;
1181
1182			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1183			err = copy_huge_pud(dst_mm, src_mm,
1184					    dst_pud, src_pud, addr, src_vma);
1185			if (err == -ENOMEM)
1186				return -ENOMEM;
1187			if (!err)
1188				continue;
1189			/* fall through */
1190		}
1191		if (pud_none_or_clear_bad(src_pud))
1192			continue;
1193		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1194				   addr, next))
1195			return -ENOMEM;
1196	} while (dst_pud++, src_pud++, addr = next, addr != end);
1197	return 0;
1198}
1199
1200static inline int
1201copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1202	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1203	       unsigned long end)
1204{
1205	struct mm_struct *dst_mm = dst_vma->vm_mm;
1206	p4d_t *src_p4d, *dst_p4d;
1207	unsigned long next;
1208
1209	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1210	if (!dst_p4d)
1211		return -ENOMEM;
1212	src_p4d = p4d_offset(src_pgd, addr);
1213	do {
1214		next = p4d_addr_end(addr, end);
1215		if (p4d_none_or_clear_bad(src_p4d))
1216			continue;
1217		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1218				   addr, next))
1219			return -ENOMEM;
1220	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1221	return 0;
1222}
1223
1224int
1225copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1226{
1227	pgd_t *src_pgd, *dst_pgd;
1228	unsigned long next;
1229	unsigned long addr = src_vma->vm_start;
1230	unsigned long end = src_vma->vm_end;
1231	struct mm_struct *dst_mm = dst_vma->vm_mm;
1232	struct mm_struct *src_mm = src_vma->vm_mm;
1233	struct mmu_notifier_range range;
1234	bool is_cow;
1235	int ret;
1236
1237	/*
1238	 * Don't copy ptes where a page fault will fill them correctly.
1239	 * Fork becomes much lighter when there are big shared or private
1240	 * readonly mappings. The tradeoff is that copy_page_range is more
1241	 * efficient than faulting.
1242	 */
1243	if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1244	    !src_vma->anon_vma)
1245		return 0;
1246
1247	if (is_vm_hugetlb_page(src_vma))
1248		return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
1249
1250	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1251		/*
1252		 * We do not free on error cases below as remove_vma
1253		 * gets called on error from higher level routine
1254		 */
1255		ret = track_pfn_copy(src_vma);
1256		if (ret)
1257			return ret;
1258	}
1259
1260	/*
1261	 * We need to invalidate the secondary MMU mappings only when
1262	 * there could be a permission downgrade on the ptes of the
1263	 * parent mm. And a permission downgrade will only happen if
1264	 * is_cow_mapping() returns true.
1265	 */
1266	is_cow = is_cow_mapping(src_vma->vm_flags);
1267
1268	if (is_cow) {
1269		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1270					0, src_vma, src_mm, addr, end);
1271		mmu_notifier_invalidate_range_start(&range);
1272		/*
1273		 * Disabling preemption is not needed for the write side, as
1274		 * the read side doesn't spin, but goes to the mmap_lock.
1275		 *
1276		 * Use the raw variant of the seqcount_t write API to avoid
1277		 * lockdep complaining about preemptibility.
1278		 */
1279		mmap_assert_write_locked(src_mm);
1280		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1281	}
1282
1283	ret = 0;
1284	dst_pgd = pgd_offset(dst_mm, addr);
1285	src_pgd = pgd_offset(src_mm, addr);
1286	do {
1287		next = pgd_addr_end(addr, end);
1288		if (pgd_none_or_clear_bad(src_pgd))
1289			continue;
1290		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1291					    addr, next))) {
1292			ret = -ENOMEM;
1293			break;
1294		}
1295	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1296
1297	if (is_cow) {
1298		raw_write_seqcount_end(&src_mm->write_protect_seq);
1299		mmu_notifier_invalidate_range_end(&range);
1300	}
1301	return ret;
1302}
1303
1304static unsigned long zap_pte_range(struct mmu_gather *tlb,
1305				struct vm_area_struct *vma, pmd_t *pmd,
1306				unsigned long addr, unsigned long end,
1307				struct zap_details *details)
1308{
1309	struct mm_struct *mm = tlb->mm;
1310	int force_flush = 0;
1311	int rss[NR_MM_COUNTERS];
1312	spinlock_t *ptl;
1313	pte_t *start_pte;
1314	pte_t *pte;
1315	swp_entry_t entry;
1316
1317	tlb_change_page_size(tlb, PAGE_SIZE);
1318again:
1319	init_rss_vec(rss);
1320	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1321	pte = start_pte;
1322	flush_tlb_batched_pending(mm);
1323	arch_enter_lazy_mmu_mode();
1324	do {
1325		pte_t ptent = *pte;
1326		if (pte_none(ptent))
1327			continue;
1328
1329		if (need_resched())
1330			break;
1331
1332		if (pte_present(ptent)) {
1333			struct page *page;
1334
1335			page = vm_normal_page(vma, addr, ptent);
1336			if (unlikely(details) && page) {
1337				/*
1338				 * unmap_shared_mapping_pages() wants to
1339				 * invalidate cache without truncating:
1340				 * unmap shared but keep private pages.
1341				 */
1342				if (details->check_mapping &&
1343				    details->check_mapping != page_rmapping(page))
1344					continue;
1345			}
1346			ptent = ptep_get_and_clear_full(mm, addr, pte,
1347							tlb->fullmm);
1348			tlb_remove_tlb_entry(tlb, pte, addr);
1349			if (unlikely(!page))
1350				continue;
1351
1352			if (!PageAnon(page)) {
1353				if (pte_dirty(ptent)) {
1354					force_flush = 1;
1355					set_page_dirty(page);
1356				}
1357				if (pte_young(ptent) &&
1358				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1359					mark_page_accessed(page);
1360			}
1361			rss[mm_counter(page)]--;
1362			page_remove_rmap(page, false);
1363			if (unlikely(page_mapcount(page) < 0))
1364				print_bad_pte(vma, addr, ptent, page);
1365			if (unlikely(__tlb_remove_page(tlb, page))) {
1366				force_flush = 1;
1367				addr += PAGE_SIZE;
1368				break;
1369			}
1370			continue;
1371		}
1372
1373		entry = pte_to_swp_entry(ptent);
1374		if (is_device_private_entry(entry) ||
1375		    is_device_exclusive_entry(entry)) {
1376			struct page *page = pfn_swap_entry_to_page(entry);
1377
1378			if (unlikely(details && details->check_mapping)) {
1379				/*
1380				 * unmap_shared_mapping_pages() wants to
1381				 * invalidate cache without truncating:
1382				 * unmap shared but keep private pages.
1383				 */
1384				if (details->check_mapping !=
1385				    page_rmapping(page))
1386					continue;
1387			}
1388
1389			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1390			rss[mm_counter(page)]--;
1391
1392			if (is_device_private_entry(entry))
1393				page_remove_rmap(page, false);
1394
1395			put_page(page);
1396			continue;
1397		}
1398
1399		/* If details->check_mapping, we leave swap entries. */
1400		if (unlikely(details))
1401			continue;
1402
1403		if (!non_swap_entry(entry))
1404			rss[MM_SWAPENTS]--;
1405		else if (is_migration_entry(entry)) {
1406			struct page *page;
1407
1408			page = pfn_swap_entry_to_page(entry);
1409			rss[mm_counter(page)]--;
1410		}
1411		if (unlikely(!free_swap_and_cache(entry)))
1412			print_bad_pte(vma, addr, ptent, NULL);
1413		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1414	} while (pte++, addr += PAGE_SIZE, addr != end);
1415
1416	add_mm_rss_vec(mm, rss);
1417	arch_leave_lazy_mmu_mode();
1418
1419	/* Do the actual TLB flush before dropping ptl */
1420	if (force_flush)
1421		tlb_flush_mmu_tlbonly(tlb);
1422	pte_unmap_unlock(start_pte, ptl);
1423
1424	/*
1425	 * If we forced a TLB flush (either due to running out of
1426	 * batch buffers or because we needed to flush dirty TLB
1427	 * entries before releasing the ptl), free the batched
1428	 * memory too. Restart if we didn't do everything.
1429	 */
1430	if (force_flush) {
1431		force_flush = 0;
1432		tlb_flush_mmu(tlb);
1433	}
1434
1435	if (addr != end) {
1436		cond_resched();
1437		goto again;
1438	}
1439
1440	return addr;
1441}
1442
1443static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1444				struct vm_area_struct *vma, pud_t *pud,
1445				unsigned long addr, unsigned long end,
1446				struct zap_details *details)
1447{
1448	pmd_t *pmd;
1449	unsigned long next;
1450
1451	pmd = pmd_offset(pud, addr);
1452	do {
1453		next = pmd_addr_end(addr, end);
1454		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1455			if (next - addr != HPAGE_PMD_SIZE)
1456				__split_huge_pmd(vma, pmd, addr, false, NULL);
1457			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1458				goto next;
1459			/* fall through */
1460		} else if (details && details->single_page &&
1461			   PageTransCompound(details->single_page) &&
1462			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1463			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1464			/*
1465			 * Take and drop THP pmd lock so that we cannot return
1466			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1467			 * but not yet decremented compound_mapcount().
1468			 */
1469			spin_unlock(ptl);
1470		}
1471
1472		/*
1473		 * Here there can be other concurrent MADV_DONTNEED or
1474		 * trans huge page faults running, and if the pmd is
1475		 * none or trans huge it can change under us. This is
1476		 * because MADV_DONTNEED holds the mmap_lock in read
1477		 * mode.
1478		 */
1479		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1480			goto next;
1481		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1482next:
1483		cond_resched();
1484	} while (pmd++, addr = next, addr != end);
1485
1486	return addr;
1487}
1488
1489static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1490				struct vm_area_struct *vma, p4d_t *p4d,
1491				unsigned long addr, unsigned long end,
1492				struct zap_details *details)
1493{
1494	pud_t *pud;
1495	unsigned long next;
1496
1497	pud = pud_offset(p4d, addr);
1498	do {
1499		next = pud_addr_end(addr, end);
1500		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1501			if (next - addr != HPAGE_PUD_SIZE) {
1502				mmap_assert_locked(tlb->mm);
1503				split_huge_pud(vma, pud, addr);
1504			} else if (zap_huge_pud(tlb, vma, pud, addr))
1505				goto next;
1506			/* fall through */
1507		}
1508		if (pud_none_or_clear_bad(pud))
1509			continue;
1510		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1511next:
1512		cond_resched();
1513	} while (pud++, addr = next, addr != end);
1514
1515	return addr;
1516}
1517
1518static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1519				struct vm_area_struct *vma, pgd_t *pgd,
1520				unsigned long addr, unsigned long end,
1521				struct zap_details *details)
1522{
1523	p4d_t *p4d;
1524	unsigned long next;
1525
1526	p4d = p4d_offset(pgd, addr);
1527	do {
1528		next = p4d_addr_end(addr, end);
1529		if (p4d_none_or_clear_bad(p4d))
1530			continue;
1531		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1532	} while (p4d++, addr = next, addr != end);
1533
1534	return addr;
1535}
1536
1537void unmap_page_range(struct mmu_gather *tlb,
1538			     struct vm_area_struct *vma,
1539			     unsigned long addr, unsigned long end,
1540			     struct zap_details *details)
1541{
1542	pgd_t *pgd;
1543	unsigned long next;
1544
1545	BUG_ON(addr >= end);
1546	tlb_start_vma(tlb, vma);
1547	pgd = pgd_offset(vma->vm_mm, addr);
1548	do {
1549		next = pgd_addr_end(addr, end);
1550		if (pgd_none_or_clear_bad(pgd))
1551			continue;
1552		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1553	} while (pgd++, addr = next, addr != end);
1554	tlb_end_vma(tlb, vma);
1555}
1556
1557
1558static void unmap_single_vma(struct mmu_gather *tlb,
1559		struct vm_area_struct *vma, unsigned long start_addr,
1560		unsigned long end_addr,
1561		struct zap_details *details)
1562{
1563	unsigned long start = max(vma->vm_start, start_addr);
1564	unsigned long end;
1565
1566	if (start >= vma->vm_end)
1567		return;
1568	end = min(vma->vm_end, end_addr);
1569	if (end <= vma->vm_start)
1570		return;
1571
1572	if (vma->vm_file)
1573		uprobe_munmap(vma, start, end);
1574
1575	if (unlikely(vma->vm_flags & VM_PFNMAP))
1576		untrack_pfn(vma, 0, 0);
1577
1578	if (start != end) {
1579		if (unlikely(is_vm_hugetlb_page(vma))) {
1580			/*
1581			 * It is undesirable to test vma->vm_file as it
1582			 * should be non-null for valid hugetlb area.
1583			 * However, vm_file will be NULL in the error
1584			 * cleanup path of mmap_region. When
1585			 * hugetlbfs ->mmap method fails,
1586			 * mmap_region() nullifies vma->vm_file
1587			 * before calling this function to clean up.
1588			 * Since no pte has actually been setup, it is
1589			 * safe to do nothing in this case.
1590			 */
1591			if (vma->vm_file) {
1592				i_mmap_lock_write(vma->vm_file->f_mapping);
1593				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1594				i_mmap_unlock_write(vma->vm_file->f_mapping);
1595			}
1596		} else
1597			unmap_page_range(tlb, vma, start, end, details);
1598	}
1599}
1600
1601/**
1602 * unmap_vmas - unmap a range of memory covered by a list of vma's
1603 * @tlb: address of the caller's struct mmu_gather
1604 * @vma: the starting vma
1605 * @start_addr: virtual address at which to start unmapping
1606 * @end_addr: virtual address at which to end unmapping
1607 *
1608 * Unmap all pages in the vma list.
1609 *
1610 * Only addresses between `start' and `end' will be unmapped.
1611 *
1612 * The VMA list must be sorted in ascending virtual address order.
1613 *
1614 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1615 * range after unmap_vmas() returns.  So the only responsibility here is to
1616 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1617 * drops the lock and schedules.
1618 */
1619void unmap_vmas(struct mmu_gather *tlb,
1620		struct vm_area_struct *vma, unsigned long start_addr,
1621		unsigned long end_addr)
1622{
1623	struct mmu_notifier_range range;
1624
1625	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1626				start_addr, end_addr);
1627	mmu_notifier_invalidate_range_start(&range);
1628	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1629		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1630	mmu_notifier_invalidate_range_end(&range);
1631}
1632
1633/**
1634 * zap_page_range - remove user pages in a given range
1635 * @vma: vm_area_struct holding the applicable pages
1636 * @start: starting address of pages to zap
1637 * @size: number of bytes to zap
1638 *
1639 * Caller must protect the VMA list
1640 */
1641void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1642		unsigned long size)
1643{
1644	struct mmu_notifier_range range;
1645	struct mmu_gather tlb;
1646
1647	lru_add_drain();
1648	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1649				start, start + size);
1650	tlb_gather_mmu(&tlb, vma->vm_mm);
1651	update_hiwater_rss(vma->vm_mm);
1652	mmu_notifier_invalidate_range_start(&range);
1653	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1654		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1655	mmu_notifier_invalidate_range_end(&range);
1656	tlb_finish_mmu(&tlb);
1657}
1658
1659/**
1660 * zap_page_range_single - remove user pages in a given range
1661 * @vma: vm_area_struct holding the applicable pages
1662 * @address: starting address of pages to zap
1663 * @size: number of bytes to zap
1664 * @details: details of shared cache invalidation
1665 *
1666 * The range must fit into one VMA.
1667 */
1668static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1669		unsigned long size, struct zap_details *details)
1670{
1671	struct mmu_notifier_range range;
1672	struct mmu_gather tlb;
1673
1674	lru_add_drain();
1675	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1676				address, address + size);
1677	tlb_gather_mmu(&tlb, vma->vm_mm);
1678	update_hiwater_rss(vma->vm_mm);
1679	mmu_notifier_invalidate_range_start(&range);
1680	unmap_single_vma(&tlb, vma, address, range.end, details);
1681	mmu_notifier_invalidate_range_end(&range);
1682	tlb_finish_mmu(&tlb);
1683}
1684
1685/**
1686 * zap_vma_ptes - remove ptes mapping the vma
1687 * @vma: vm_area_struct holding ptes to be zapped
1688 * @address: starting address of pages to zap
1689 * @size: number of bytes to zap
1690 *
1691 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1692 *
1693 * The entire address range must be fully contained within the vma.
1694 *
1695 */
1696void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1697		unsigned long size)
1698{
1699	if (address < vma->vm_start || address + size > vma->vm_end ||
1700	    		!(vma->vm_flags & VM_PFNMAP))
1701		return;
1702
1703	zap_page_range_single(vma, address, size, NULL);
1704}
1705EXPORT_SYMBOL_GPL(zap_vma_ptes);
1706
1707static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1708{
1709	pgd_t *pgd;
1710	p4d_t *p4d;
1711	pud_t *pud;
1712	pmd_t *pmd;
1713
1714	pgd = pgd_offset(mm, addr);
1715	p4d = p4d_alloc(mm, pgd, addr);
1716	if (!p4d)
1717		return NULL;
1718	pud = pud_alloc(mm, p4d, addr);
1719	if (!pud)
1720		return NULL;
1721	pmd = pmd_alloc(mm, pud, addr);
1722	if (!pmd)
1723		return NULL;
1724
1725	VM_BUG_ON(pmd_trans_huge(*pmd));
1726	return pmd;
1727}
1728
1729pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1730			spinlock_t **ptl)
1731{
1732	pmd_t *pmd = walk_to_pmd(mm, addr);
1733
1734	if (!pmd)
1735		return NULL;
1736	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1737}
1738
1739static int validate_page_before_insert(struct page *page)
1740{
1741	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1742		return -EINVAL;
1743	flush_dcache_page(page);
1744	return 0;
1745}
1746
1747static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1748			unsigned long addr, struct page *page, pgprot_t prot)
1749{
1750	if (!pte_none(*pte))
1751		return -EBUSY;
1752	/* Ok, finally just insert the thing.. */
1753	get_page(page);
1754	inc_mm_counter_fast(mm, mm_counter_file(page));
1755	page_add_file_rmap(page, false);
1756	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1757	return 0;
1758}
1759
1760/*
1761 * This is the old fallback for page remapping.
1762 *
1763 * For historical reasons, it only allows reserved pages. Only
1764 * old drivers should use this, and they needed to mark their
1765 * pages reserved for the old functions anyway.
1766 */
1767static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1768			struct page *page, pgprot_t prot)
1769{
1770	struct mm_struct *mm = vma->vm_mm;
1771	int retval;
1772	pte_t *pte;
1773	spinlock_t *ptl;
1774
1775	retval = validate_page_before_insert(page);
1776	if (retval)
1777		goto out;
1778	retval = -ENOMEM;
1779	pte = get_locked_pte(mm, addr, &ptl);
1780	if (!pte)
1781		goto out;
1782	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1783	pte_unmap_unlock(pte, ptl);
1784out:
1785	return retval;
1786}
1787
1788#ifdef pte_index
1789static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1790			unsigned long addr, struct page *page, pgprot_t prot)
1791{
1792	int err;
1793
1794	if (!page_count(page))
1795		return -EINVAL;
1796	err = validate_page_before_insert(page);
1797	if (err)
1798		return err;
1799	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1800}
1801
1802/* insert_pages() amortizes the cost of spinlock operations
1803 * when inserting pages in a loop. Arch *must* define pte_index.
1804 */
1805static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1806			struct page **pages, unsigned long *num, pgprot_t prot)
1807{
1808	pmd_t *pmd = NULL;
1809	pte_t *start_pte, *pte;
1810	spinlock_t *pte_lock;
1811	struct mm_struct *const mm = vma->vm_mm;
1812	unsigned long curr_page_idx = 0;
1813	unsigned long remaining_pages_total = *num;
1814	unsigned long pages_to_write_in_pmd;
1815	int ret;
1816more:
1817	ret = -EFAULT;
1818	pmd = walk_to_pmd(mm, addr);
1819	if (!pmd)
1820		goto out;
1821
1822	pages_to_write_in_pmd = min_t(unsigned long,
1823		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1824
1825	/* Allocate the PTE if necessary; takes PMD lock once only. */
1826	ret = -ENOMEM;
1827	if (pte_alloc(mm, pmd))
1828		goto out;
1829
1830	while (pages_to_write_in_pmd) {
1831		int pte_idx = 0;
1832		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1833
1834		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1835		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1836			int err = insert_page_in_batch_locked(mm, pte,
1837				addr, pages[curr_page_idx], prot);
1838			if (unlikely(err)) {
1839				pte_unmap_unlock(start_pte, pte_lock);
1840				ret = err;
1841				remaining_pages_total -= pte_idx;
1842				goto out;
1843			}
1844			addr += PAGE_SIZE;
1845			++curr_page_idx;
1846		}
1847		pte_unmap_unlock(start_pte, pte_lock);
1848		pages_to_write_in_pmd -= batch_size;
1849		remaining_pages_total -= batch_size;
1850	}
1851	if (remaining_pages_total)
1852		goto more;
1853	ret = 0;
1854out:
1855	*num = remaining_pages_total;
1856	return ret;
1857}
1858#endif  /* ifdef pte_index */
1859
1860/**
1861 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1862 * @vma: user vma to map to
1863 * @addr: target start user address of these pages
1864 * @pages: source kernel pages
1865 * @num: in: number of pages to map. out: number of pages that were *not*
1866 * mapped. (0 means all pages were successfully mapped).
1867 *
1868 * Preferred over vm_insert_page() when inserting multiple pages.
1869 *
1870 * In case of error, we may have mapped a subset of the provided
1871 * pages. It is the caller's responsibility to account for this case.
1872 *
1873 * The same restrictions apply as in vm_insert_page().
1874 */
1875int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1876			struct page **pages, unsigned long *num)
1877{
1878#ifdef pte_index
1879	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1880
1881	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1882		return -EFAULT;
1883	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1884		BUG_ON(mmap_read_trylock(vma->vm_mm));
1885		BUG_ON(vma->vm_flags & VM_PFNMAP);
1886		vma->vm_flags |= VM_MIXEDMAP;
1887	}
1888	/* Defer page refcount checking till we're about to map that page. */
1889	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1890#else
1891	unsigned long idx = 0, pgcount = *num;
1892	int err = -EINVAL;
1893
1894	for (; idx < pgcount; ++idx) {
1895		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1896		if (err)
1897			break;
1898	}
1899	*num = pgcount - idx;
1900	return err;
1901#endif  /* ifdef pte_index */
1902}
1903EXPORT_SYMBOL(vm_insert_pages);
1904
1905/**
1906 * vm_insert_page - insert single page into user vma
1907 * @vma: user vma to map to
1908 * @addr: target user address of this page
1909 * @page: source kernel page
1910 *
1911 * This allows drivers to insert individual pages they've allocated
1912 * into a user vma.
1913 *
1914 * The page has to be a nice clean _individual_ kernel allocation.
1915 * If you allocate a compound page, you need to have marked it as
1916 * such (__GFP_COMP), or manually just split the page up yourself
1917 * (see split_page()).
1918 *
1919 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1920 * took an arbitrary page protection parameter. This doesn't allow
1921 * that. Your vma protection will have to be set up correctly, which
1922 * means that if you want a shared writable mapping, you'd better
1923 * ask for a shared writable mapping!
1924 *
1925 * The page does not need to be reserved.
1926 *
1927 * Usually this function is called from f_op->mmap() handler
1928 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
1929 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1930 * function from other places, for example from page-fault handler.
1931 *
1932 * Return: %0 on success, negative error code otherwise.
1933 */
1934int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1935			struct page *page)
1936{
1937	if (addr < vma->vm_start || addr >= vma->vm_end)
1938		return -EFAULT;
1939	if (!page_count(page))
1940		return -EINVAL;
1941	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1942		BUG_ON(mmap_read_trylock(vma->vm_mm));
1943		BUG_ON(vma->vm_flags & VM_PFNMAP);
1944		vma->vm_flags |= VM_MIXEDMAP;
1945	}
1946	return insert_page(vma, addr, page, vma->vm_page_prot);
1947}
1948EXPORT_SYMBOL(vm_insert_page);
1949
1950/*
1951 * __vm_map_pages - maps range of kernel pages into user vma
1952 * @vma: user vma to map to
1953 * @pages: pointer to array of source kernel pages
1954 * @num: number of pages in page array
1955 * @offset: user's requested vm_pgoff
1956 *
1957 * This allows drivers to map range of kernel pages into a user vma.
1958 *
1959 * Return: 0 on success and error code otherwise.
1960 */
1961static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1962				unsigned long num, unsigned long offset)
1963{
1964	unsigned long count = vma_pages(vma);
1965	unsigned long uaddr = vma->vm_start;
1966	int ret, i;
1967
1968	/* Fail if the user requested offset is beyond the end of the object */
1969	if (offset >= num)
1970		return -ENXIO;
1971
1972	/* Fail if the user requested size exceeds available object size */
1973	if (count > num - offset)
1974		return -ENXIO;
1975
1976	for (i = 0; i < count; i++) {
1977		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1978		if (ret < 0)
1979			return ret;
1980		uaddr += PAGE_SIZE;
1981	}
1982
1983	return 0;
1984}
1985
1986/**
1987 * vm_map_pages - maps range of kernel pages starts with non zero offset
1988 * @vma: user vma to map to
1989 * @pages: pointer to array of source kernel pages
1990 * @num: number of pages in page array
1991 *
1992 * Maps an object consisting of @num pages, catering for the user's
1993 * requested vm_pgoff
1994 *
1995 * If we fail to insert any page into the vma, the function will return
1996 * immediately leaving any previously inserted pages present.  Callers
1997 * from the mmap handler may immediately return the error as their caller
1998 * will destroy the vma, removing any successfully inserted pages. Other
1999 * callers should make their own arrangements for calling unmap_region().
2000 *
2001 * Context: Process context. Called by mmap handlers.
2002 * Return: 0 on success and error code otherwise.
2003 */
2004int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2005				unsigned long num)
2006{
2007	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2008}
2009EXPORT_SYMBOL(vm_map_pages);
2010
2011/**
2012 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2013 * @vma: user vma to map to
2014 * @pages: pointer to array of source kernel pages
2015 * @num: number of pages in page array
2016 *
2017 * Similar to vm_map_pages(), except that it explicitly sets the offset
2018 * to 0. This function is intended for the drivers that did not consider
2019 * vm_pgoff.
2020 *
2021 * Context: Process context. Called by mmap handlers.
2022 * Return: 0 on success and error code otherwise.
2023 */
2024int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2025				unsigned long num)
2026{
2027	return __vm_map_pages(vma, pages, num, 0);
2028}
2029EXPORT_SYMBOL(vm_map_pages_zero);
2030
2031static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2032			pfn_t pfn, pgprot_t prot, bool mkwrite)
2033{
2034	struct mm_struct *mm = vma->vm_mm;
2035	pte_t *pte, entry;
2036	spinlock_t *ptl;
2037
2038	pte = get_locked_pte(mm, addr, &ptl);
2039	if (!pte)
2040		return VM_FAULT_OOM;
2041	if (!pte_none(*pte)) {
2042		if (mkwrite) {
2043			/*
2044			 * For read faults on private mappings the PFN passed
2045			 * in may not match the PFN we have mapped if the
2046			 * mapped PFN is a writeable COW page.  In the mkwrite
2047			 * case we are creating a writable PTE for a shared
2048			 * mapping and we expect the PFNs to match. If they
2049			 * don't match, we are likely racing with block
2050			 * allocation and mapping invalidation so just skip the
2051			 * update.
2052			 */
2053			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
2054				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
2055				goto out_unlock;
2056			}
2057			entry = pte_mkyoung(*pte);
2058			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2059			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2060				update_mmu_cache(vma, addr, pte);
2061		}
2062		goto out_unlock;
2063	}
2064
2065	/* Ok, finally just insert the thing.. */
2066	if (pfn_t_devmap(pfn))
2067		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2068	else
2069		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2070
2071	if (mkwrite) {
2072		entry = pte_mkyoung(entry);
2073		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2074	}
2075
2076	set_pte_at(mm, addr, pte, entry);
2077	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2078
2079out_unlock:
2080	pte_unmap_unlock(pte, ptl);
2081	return VM_FAULT_NOPAGE;
2082}
2083
2084/**
2085 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2086 * @vma: user vma to map to
2087 * @addr: target user address of this page
2088 * @pfn: source kernel pfn
2089 * @pgprot: pgprot flags for the inserted page
2090 *
2091 * This is exactly like vmf_insert_pfn(), except that it allows drivers
2092 * to override pgprot on a per-page basis.
2093 *
2094 * This only makes sense for IO mappings, and it makes no sense for
2095 * COW mappings.  In general, using multiple vmas is preferable;
2096 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2097 * impractical.
2098 *
2099 * See vmf_insert_mixed_prot() for a discussion of the implication of using
2100 * a value of @pgprot different from that of @vma->vm_page_prot.
2101 *
2102 * Context: Process context.  May allocate using %GFP_KERNEL.
2103 * Return: vm_fault_t value.
2104 */
2105vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2106			unsigned long pfn, pgprot_t pgprot)
2107{
2108	/*
2109	 * Technically, architectures with pte_special can avoid all these
2110	 * restrictions (same for remap_pfn_range).  However we would like
2111	 * consistency in testing and feature parity among all, so we should
2112	 * try to keep these invariants in place for everybody.
2113	 */
2114	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2115	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2116						(VM_PFNMAP|VM_MIXEDMAP));
2117	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2118	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2119
2120	if (addr < vma->vm_start || addr >= vma->vm_end)
2121		return VM_FAULT_SIGBUS;
2122
2123	if (!pfn_modify_allowed(pfn, pgprot))
2124		return VM_FAULT_SIGBUS;
2125
2126	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2127
2128	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2129			false);
2130}
2131EXPORT_SYMBOL(vmf_insert_pfn_prot);
2132
2133/**
2134 * vmf_insert_pfn - insert single pfn into user vma
2135 * @vma: user vma to map to
2136 * @addr: target user address of this page
2137 * @pfn: source kernel pfn
2138 *
2139 * Similar to vm_insert_page, this allows drivers to insert individual pages
2140 * they've allocated into a user vma. Same comments apply.
2141 *
2142 * This function should only be called from a vm_ops->fault handler, and
2143 * in that case the handler should return the result of this function.
2144 *
2145 * vma cannot be a COW mapping.
2146 *
2147 * As this is called only for pages that do not currently exist, we
2148 * do not need to flush old virtual caches or the TLB.
2149 *
2150 * Context: Process context.  May allocate using %GFP_KERNEL.
2151 * Return: vm_fault_t value.
2152 */
2153vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2154			unsigned long pfn)
2155{
2156	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2157}
2158EXPORT_SYMBOL(vmf_insert_pfn);
2159
2160static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2161{
2162	/* these checks mirror the abort conditions in vm_normal_page */
2163	if (vma->vm_flags & VM_MIXEDMAP)
2164		return true;
2165	if (pfn_t_devmap(pfn))
2166		return true;
2167	if (pfn_t_special(pfn))
2168		return true;
2169	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2170		return true;
2171	return false;
2172}
2173
2174static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2175		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2176		bool mkwrite)
2177{
2178	int err;
2179
2180	BUG_ON(!vm_mixed_ok(vma, pfn));
2181
2182	if (addr < vma->vm_start || addr >= vma->vm_end)
2183		return VM_FAULT_SIGBUS;
2184
2185	track_pfn_insert(vma, &pgprot, pfn);
2186
2187	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2188		return VM_FAULT_SIGBUS;
2189
2190	/*
2191	 * If we don't have pte special, then we have to use the pfn_valid()
2192	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2193	 * refcount the page if pfn_valid is true (hence insert_page rather
2194	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2195	 * without pte special, it would there be refcounted as a normal page.
2196	 */
2197	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2198	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2199		struct page *page;
2200
2201		/*
2202		 * At this point we are committed to insert_page()
2203		 * regardless of whether the caller specified flags that
2204		 * result in pfn_t_has_page() == false.
2205		 */
2206		page = pfn_to_page(pfn_t_to_pfn(pfn));
2207		err = insert_page(vma, addr, page, pgprot);
2208	} else {
2209		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2210	}
2211
2212	if (err == -ENOMEM)
2213		return VM_FAULT_OOM;
2214	if (err < 0 && err != -EBUSY)
2215		return VM_FAULT_SIGBUS;
2216
2217	return VM_FAULT_NOPAGE;
2218}
2219
2220/**
2221 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2222 * @vma: user vma to map to
2223 * @addr: target user address of this page
2224 * @pfn: source kernel pfn
2225 * @pgprot: pgprot flags for the inserted page
2226 *
2227 * This is exactly like vmf_insert_mixed(), except that it allows drivers
2228 * to override pgprot on a per-page basis.
2229 *
2230 * Typically this function should be used by drivers to set caching- and
2231 * encryption bits different than those of @vma->vm_page_prot, because
2232 * the caching- or encryption mode may not be known at mmap() time.
2233 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2234 * to set caching and encryption bits for those vmas (except for COW pages).
2235 * This is ensured by core vm only modifying these page table entries using
2236 * functions that don't touch caching- or encryption bits, using pte_modify()
2237 * if needed. (See for example mprotect()).
2238 * Also when new page-table entries are created, this is only done using the
2239 * fault() callback, and never using the value of vma->vm_page_prot,
2240 * except for page-table entries that point to anonymous pages as the result
2241 * of COW.
2242 *
2243 * Context: Process context.  May allocate using %GFP_KERNEL.
2244 * Return: vm_fault_t value.
2245 */
2246vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2247				 pfn_t pfn, pgprot_t pgprot)
2248{
2249	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2250}
2251EXPORT_SYMBOL(vmf_insert_mixed_prot);
2252
2253vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2254		pfn_t pfn)
2255{
2256	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2257}
2258EXPORT_SYMBOL(vmf_insert_mixed);
2259
2260/*
2261 *  If the insertion of PTE failed because someone else already added a
2262 *  different entry in the mean time, we treat that as success as we assume
2263 *  the same entry was actually inserted.
2264 */
2265vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2266		unsigned long addr, pfn_t pfn)
2267{
2268	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2269}
2270EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2271
2272/*
2273 * maps a range of physical memory into the requested pages. the old
2274 * mappings are removed. any references to nonexistent pages results
2275 * in null mappings (currently treated as "copy-on-access")
2276 */
2277static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2278			unsigned long addr, unsigned long end,
2279			unsigned long pfn, pgprot_t prot)
2280{
2281	pte_t *pte, *mapped_pte;
2282	spinlock_t *ptl;
2283	int err = 0;
2284
2285	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2286	if (!pte)
2287		return -ENOMEM;
2288	arch_enter_lazy_mmu_mode();
2289	do {
2290		BUG_ON(!pte_none(*pte));
2291		if (!pfn_modify_allowed(pfn, prot)) {
2292			err = -EACCES;
2293			break;
2294		}
2295		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2296		pfn++;
2297	} while (pte++, addr += PAGE_SIZE, addr != end);
2298	arch_leave_lazy_mmu_mode();
2299	pte_unmap_unlock(mapped_pte, ptl);
2300	return err;
2301}
2302
2303static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2304			unsigned long addr, unsigned long end,
2305			unsigned long pfn, pgprot_t prot)
2306{
2307	pmd_t *pmd;
2308	unsigned long next;
2309	int err;
2310
2311	pfn -= addr >> PAGE_SHIFT;
2312	pmd = pmd_alloc(mm, pud, addr);
2313	if (!pmd)
2314		return -ENOMEM;
2315	VM_BUG_ON(pmd_trans_huge(*pmd));
2316	do {
2317		next = pmd_addr_end(addr, end);
2318		err = remap_pte_range(mm, pmd, addr, next,
2319				pfn + (addr >> PAGE_SHIFT), prot);
2320		if (err)
2321			return err;
2322	} while (pmd++, addr = next, addr != end);
2323	return 0;
2324}
2325
2326static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2327			unsigned long addr, unsigned long end,
2328			unsigned long pfn, pgprot_t prot)
2329{
2330	pud_t *pud;
2331	unsigned long next;
2332	int err;
2333
2334	pfn -= addr >> PAGE_SHIFT;
2335	pud = pud_alloc(mm, p4d, addr);
2336	if (!pud)
2337		return -ENOMEM;
2338	do {
2339		next = pud_addr_end(addr, end);
2340		err = remap_pmd_range(mm, pud, addr, next,
2341				pfn + (addr >> PAGE_SHIFT), prot);
2342		if (err)
2343			return err;
2344	} while (pud++, addr = next, addr != end);
2345	return 0;
2346}
2347
2348static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2349			unsigned long addr, unsigned long end,
2350			unsigned long pfn, pgprot_t prot)
2351{
2352	p4d_t *p4d;
2353	unsigned long next;
2354	int err;
2355
2356	pfn -= addr >> PAGE_SHIFT;
2357	p4d = p4d_alloc(mm, pgd, addr);
2358	if (!p4d)
2359		return -ENOMEM;
2360	do {
2361		next = p4d_addr_end(addr, end);
2362		err = remap_pud_range(mm, p4d, addr, next,
2363				pfn + (addr >> PAGE_SHIFT), prot);
2364		if (err)
2365			return err;
2366	} while (p4d++, addr = next, addr != end);
2367	return 0;
2368}
2369
2370/*
2371 * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
2372 * must have pre-validated the caching bits of the pgprot_t.
 
 
 
 
 
 
 
 
2373 */
2374int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2375		unsigned long pfn, unsigned long size, pgprot_t prot)
2376{
2377	pgd_t *pgd;
2378	unsigned long next;
2379	unsigned long end = addr + PAGE_ALIGN(size);
2380	struct mm_struct *mm = vma->vm_mm;
 
2381	int err;
2382
2383	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2384		return -EINVAL;
2385
2386	/*
2387	 * Physically remapped pages are special. Tell the
2388	 * rest of the world about it:
2389	 *   VM_IO tells people not to look at these pages
2390	 *	(accesses can have side effects).
2391	 *   VM_PFNMAP tells the core MM that the base pages are just
2392	 *	raw PFN mappings, and do not have a "struct page" associated
2393	 *	with them.
2394	 *   VM_DONTEXPAND
2395	 *      Disable vma merging and expanding with mremap().
2396	 *   VM_DONTDUMP
2397	 *      Omit vma from core dump, even when VM_IO turned off.
2398	 *
2399	 * There's a horrible special case to handle copy-on-write
2400	 * behaviour that some programs depend on. We mark the "original"
2401	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2402	 * See vm_normal_page() for details.
2403	 */
2404	if (is_cow_mapping(vma->vm_flags)) {
2405		if (addr != vma->vm_start || end != vma->vm_end)
2406			return -EINVAL;
2407		vma->vm_pgoff = pfn;
2408	}
2409
 
 
 
 
2410	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2411
2412	BUG_ON(addr >= end);
2413	pfn -= addr >> PAGE_SHIFT;
2414	pgd = pgd_offset(mm, addr);
2415	flush_cache_range(vma, addr, end);
2416	do {
2417		next = pgd_addr_end(addr, end);
2418		err = remap_p4d_range(mm, pgd, addr, next,
2419				pfn + (addr >> PAGE_SHIFT), prot);
2420		if (err)
2421			return err;
2422	} while (pgd++, addr = next, addr != end);
2423
2424	return 0;
2425}
2426
2427/**
2428 * remap_pfn_range - remap kernel memory to userspace
2429 * @vma: user vma to map to
2430 * @addr: target page aligned user address to start at
2431 * @pfn: page frame number of kernel physical memory address
2432 * @size: size of mapping area
2433 * @prot: page protection flags for this mapping
2434 *
2435 * Note: this is only safe if the mm semaphore is held when called.
2436 *
2437 * Return: %0 on success, negative error code otherwise.
2438 */
2439int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2440		    unsigned long pfn, unsigned long size, pgprot_t prot)
2441{
2442	int err;
2443
2444	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2445	if (err)
2446		return -EINVAL;
2447
2448	err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2449	if (err)
2450		untrack_pfn(vma, pfn, PAGE_ALIGN(size));
2451	return err;
2452}
2453EXPORT_SYMBOL(remap_pfn_range);
2454
2455/**
2456 * vm_iomap_memory - remap memory to userspace
2457 * @vma: user vma to map to
2458 * @start: start of the physical memory to be mapped
2459 * @len: size of area
2460 *
2461 * This is a simplified io_remap_pfn_range() for common driver use. The
2462 * driver just needs to give us the physical memory range to be mapped,
2463 * we'll figure out the rest from the vma information.
2464 *
2465 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2466 * whatever write-combining details or similar.
2467 *
2468 * Return: %0 on success, negative error code otherwise.
2469 */
2470int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2471{
2472	unsigned long vm_len, pfn, pages;
2473
2474	/* Check that the physical memory area passed in looks valid */
2475	if (start + len < start)
2476		return -EINVAL;
2477	/*
2478	 * You *really* shouldn't map things that aren't page-aligned,
2479	 * but we've historically allowed it because IO memory might
2480	 * just have smaller alignment.
2481	 */
2482	len += start & ~PAGE_MASK;
2483	pfn = start >> PAGE_SHIFT;
2484	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2485	if (pfn + pages < pfn)
2486		return -EINVAL;
2487
2488	/* We start the mapping 'vm_pgoff' pages into the area */
2489	if (vma->vm_pgoff > pages)
2490		return -EINVAL;
2491	pfn += vma->vm_pgoff;
2492	pages -= vma->vm_pgoff;
2493
2494	/* Can we fit all of the mapping? */
2495	vm_len = vma->vm_end - vma->vm_start;
2496	if (vm_len >> PAGE_SHIFT > pages)
2497		return -EINVAL;
2498
2499	/* Ok, let it rip */
2500	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2501}
2502EXPORT_SYMBOL(vm_iomap_memory);
2503
2504static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2505				     unsigned long addr, unsigned long end,
2506				     pte_fn_t fn, void *data, bool create,
2507				     pgtbl_mod_mask *mask)
2508{
2509	pte_t *pte, *mapped_pte;
2510	int err = 0;
2511	spinlock_t *ptl;
2512
2513	if (create) {
2514		mapped_pte = pte = (mm == &init_mm) ?
2515			pte_alloc_kernel_track(pmd, addr, mask) :
2516			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2517		if (!pte)
2518			return -ENOMEM;
2519	} else {
2520		mapped_pte = pte = (mm == &init_mm) ?
2521			pte_offset_kernel(pmd, addr) :
2522			pte_offset_map_lock(mm, pmd, addr, &ptl);
2523	}
2524
2525	BUG_ON(pmd_huge(*pmd));
2526
2527	arch_enter_lazy_mmu_mode();
2528
2529	if (fn) {
2530		do {
2531			if (create || !pte_none(*pte)) {
2532				err = fn(pte++, addr, data);
2533				if (err)
2534					break;
2535			}
2536		} while (addr += PAGE_SIZE, addr != end);
2537	}
2538	*mask |= PGTBL_PTE_MODIFIED;
2539
2540	arch_leave_lazy_mmu_mode();
2541
2542	if (mm != &init_mm)
2543		pte_unmap_unlock(mapped_pte, ptl);
2544	return err;
2545}
2546
2547static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2548				     unsigned long addr, unsigned long end,
2549				     pte_fn_t fn, void *data, bool create,
2550				     pgtbl_mod_mask *mask)
2551{
2552	pmd_t *pmd;
2553	unsigned long next;
2554	int err = 0;
2555
2556	BUG_ON(pud_huge(*pud));
2557
2558	if (create) {
2559		pmd = pmd_alloc_track(mm, pud, addr, mask);
2560		if (!pmd)
2561			return -ENOMEM;
2562	} else {
2563		pmd = pmd_offset(pud, addr);
2564	}
2565	do {
2566		next = pmd_addr_end(addr, end);
2567		if (pmd_none(*pmd) && !create)
2568			continue;
2569		if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2570			return -EINVAL;
2571		if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2572			if (!create)
2573				continue;
2574			pmd_clear_bad(pmd);
2575		}
2576		err = apply_to_pte_range(mm, pmd, addr, next,
2577					 fn, data, create, mask);
2578		if (err)
2579			break;
2580	} while (pmd++, addr = next, addr != end);
2581
2582	return err;
2583}
2584
2585static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2586				     unsigned long addr, unsigned long end,
2587				     pte_fn_t fn, void *data, bool create,
2588				     pgtbl_mod_mask *mask)
2589{
2590	pud_t *pud;
2591	unsigned long next;
2592	int err = 0;
2593
2594	if (create) {
2595		pud = pud_alloc_track(mm, p4d, addr, mask);
2596		if (!pud)
2597			return -ENOMEM;
2598	} else {
2599		pud = pud_offset(p4d, addr);
2600	}
2601	do {
2602		next = pud_addr_end(addr, end);
2603		if (pud_none(*pud) && !create)
2604			continue;
2605		if (WARN_ON_ONCE(pud_leaf(*pud)))
2606			return -EINVAL;
2607		if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2608			if (!create)
2609				continue;
2610			pud_clear_bad(pud);
2611		}
2612		err = apply_to_pmd_range(mm, pud, addr, next,
2613					 fn, data, create, mask);
2614		if (err)
2615			break;
2616	} while (pud++, addr = next, addr != end);
2617
2618	return err;
2619}
2620
2621static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2622				     unsigned long addr, unsigned long end,
2623				     pte_fn_t fn, void *data, bool create,
2624				     pgtbl_mod_mask *mask)
2625{
2626	p4d_t *p4d;
2627	unsigned long next;
2628	int err = 0;
2629
2630	if (create) {
2631		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2632		if (!p4d)
2633			return -ENOMEM;
2634	} else {
2635		p4d = p4d_offset(pgd, addr);
2636	}
2637	do {
2638		next = p4d_addr_end(addr, end);
2639		if (p4d_none(*p4d) && !create)
2640			continue;
2641		if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2642			return -EINVAL;
2643		if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2644			if (!create)
2645				continue;
2646			p4d_clear_bad(p4d);
2647		}
2648		err = apply_to_pud_range(mm, p4d, addr, next,
2649					 fn, data, create, mask);
2650		if (err)
2651			break;
2652	} while (p4d++, addr = next, addr != end);
2653
2654	return err;
2655}
2656
2657static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2658				 unsigned long size, pte_fn_t fn,
2659				 void *data, bool create)
2660{
2661	pgd_t *pgd;
2662	unsigned long start = addr, next;
2663	unsigned long end = addr + size;
2664	pgtbl_mod_mask mask = 0;
2665	int err = 0;
2666
2667	if (WARN_ON(addr >= end))
2668		return -EINVAL;
2669
2670	pgd = pgd_offset(mm, addr);
2671	do {
2672		next = pgd_addr_end(addr, end);
2673		if (pgd_none(*pgd) && !create)
2674			continue;
2675		if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2676			return -EINVAL;
2677		if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2678			if (!create)
2679				continue;
2680			pgd_clear_bad(pgd);
2681		}
2682		err = apply_to_p4d_range(mm, pgd, addr, next,
2683					 fn, data, create, &mask);
2684		if (err)
2685			break;
2686	} while (pgd++, addr = next, addr != end);
2687
2688	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2689		arch_sync_kernel_mappings(start, start + size);
2690
2691	return err;
2692}
2693
2694/*
2695 * Scan a region of virtual memory, filling in page tables as necessary
2696 * and calling a provided function on each leaf page table.
2697 */
2698int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2699			unsigned long size, pte_fn_t fn, void *data)
2700{
2701	return __apply_to_page_range(mm, addr, size, fn, data, true);
2702}
2703EXPORT_SYMBOL_GPL(apply_to_page_range);
2704
2705/*
2706 * Scan a region of virtual memory, calling a provided function on
2707 * each leaf page table where it exists.
2708 *
2709 * Unlike apply_to_page_range, this does _not_ fill in page tables
2710 * where they are absent.
2711 */
2712int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2713				 unsigned long size, pte_fn_t fn, void *data)
2714{
2715	return __apply_to_page_range(mm, addr, size, fn, data, false);
2716}
2717EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2718
2719/*
2720 * handle_pte_fault chooses page fault handler according to an entry which was
2721 * read non-atomically.  Before making any commitment, on those architectures
2722 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2723 * parts, do_swap_page must check under lock before unmapping the pte and
2724 * proceeding (but do_wp_page is only called after already making such a check;
2725 * and do_anonymous_page can safely check later on).
2726 */
2727static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2728				pte_t *page_table, pte_t orig_pte)
2729{
2730	int same = 1;
2731#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2732	if (sizeof(pte_t) > sizeof(unsigned long)) {
2733		spinlock_t *ptl = pte_lockptr(mm, pmd);
2734		spin_lock(ptl);
2735		same = pte_same(*page_table, orig_pte);
2736		spin_unlock(ptl);
2737	}
2738#endif
2739	pte_unmap(page_table);
2740	return same;
2741}
2742
2743static inline bool cow_user_page(struct page *dst, struct page *src,
2744				 struct vm_fault *vmf)
2745{
2746	bool ret;
2747	void *kaddr;
2748	void __user *uaddr;
2749	bool locked = false;
2750	struct vm_area_struct *vma = vmf->vma;
2751	struct mm_struct *mm = vma->vm_mm;
2752	unsigned long addr = vmf->address;
2753
2754	if (likely(src)) {
2755		copy_user_highpage(dst, src, addr, vma);
2756		return true;
2757	}
2758
2759	/*
2760	 * If the source page was a PFN mapping, we don't have
2761	 * a "struct page" for it. We do a best-effort copy by
2762	 * just copying from the original user address. If that
2763	 * fails, we just zero-fill it. Live with it.
2764	 */
2765	kaddr = kmap_atomic(dst);
2766	uaddr = (void __user *)(addr & PAGE_MASK);
2767
2768	/*
2769	 * On architectures with software "accessed" bits, we would
2770	 * take a double page fault, so mark it accessed here.
2771	 */
2772	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2773		pte_t entry;
2774
2775		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2776		locked = true;
2777		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2778			/*
2779			 * Other thread has already handled the fault
2780			 * and update local tlb only
2781			 */
2782			update_mmu_tlb(vma, addr, vmf->pte);
2783			ret = false;
2784			goto pte_unlock;
2785		}
2786
2787		entry = pte_mkyoung(vmf->orig_pte);
2788		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2789			update_mmu_cache(vma, addr, vmf->pte);
2790	}
2791
2792	/*
2793	 * This really shouldn't fail, because the page is there
2794	 * in the page tables. But it might just be unreadable,
2795	 * in which case we just give up and fill the result with
2796	 * zeroes.
2797	 */
2798	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2799		if (locked)
2800			goto warn;
2801
2802		/* Re-validate under PTL if the page is still mapped */
2803		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2804		locked = true;
2805		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2806			/* The PTE changed under us, update local tlb */
2807			update_mmu_tlb(vma, addr, vmf->pte);
2808			ret = false;
2809			goto pte_unlock;
2810		}
2811
2812		/*
2813		 * The same page can be mapped back since last copy attempt.
2814		 * Try to copy again under PTL.
2815		 */
2816		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2817			/*
2818			 * Give a warn in case there can be some obscure
2819			 * use-case
2820			 */
2821warn:
2822			WARN_ON_ONCE(1);
2823			clear_page(kaddr);
2824		}
2825	}
2826
2827	ret = true;
2828
2829pte_unlock:
2830	if (locked)
2831		pte_unmap_unlock(vmf->pte, vmf->ptl);
2832	kunmap_atomic(kaddr);
2833	flush_dcache_page(dst);
2834
2835	return ret;
2836}
2837
2838static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2839{
2840	struct file *vm_file = vma->vm_file;
2841
2842	if (vm_file)
2843		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2844
2845	/*
2846	 * Special mappings (e.g. VDSO) do not have any file so fake
2847	 * a default GFP_KERNEL for them.
2848	 */
2849	return GFP_KERNEL;
2850}
2851
2852/*
2853 * Notify the address space that the page is about to become writable so that
2854 * it can prohibit this or wait for the page to get into an appropriate state.
2855 *
2856 * We do this without the lock held, so that it can sleep if it needs to.
2857 */
2858static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
2859{
2860	vm_fault_t ret;
2861	struct page *page = vmf->page;
2862	unsigned int old_flags = vmf->flags;
2863
2864	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2865
2866	if (vmf->vma->vm_file &&
2867	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2868		return VM_FAULT_SIGBUS;
2869
2870	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2871	/* Restore original flags so that caller is not surprised */
2872	vmf->flags = old_flags;
2873	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2874		return ret;
2875	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2876		lock_page(page);
2877		if (!page->mapping) {
2878			unlock_page(page);
2879			return 0; /* retry */
2880		}
2881		ret |= VM_FAULT_LOCKED;
2882	} else
2883		VM_BUG_ON_PAGE(!PageLocked(page), page);
2884	return ret;
2885}
2886
2887/*
2888 * Handle dirtying of a page in shared file mapping on a write fault.
2889 *
2890 * The function expects the page to be locked and unlocks it.
2891 */
2892static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2893{
2894	struct vm_area_struct *vma = vmf->vma;
2895	struct address_space *mapping;
2896	struct page *page = vmf->page;
2897	bool dirtied;
2898	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2899
2900	dirtied = set_page_dirty(page);
2901	VM_BUG_ON_PAGE(PageAnon(page), page);
2902	/*
2903	 * Take a local copy of the address_space - page.mapping may be zeroed
2904	 * by truncate after unlock_page().   The address_space itself remains
2905	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2906	 * release semantics to prevent the compiler from undoing this copying.
2907	 */
2908	mapping = page_rmapping(page);
2909	unlock_page(page);
2910
2911	if (!page_mkwrite)
2912		file_update_time(vma->vm_file);
2913
2914	/*
2915	 * Throttle page dirtying rate down to writeback speed.
2916	 *
2917	 * mapping may be NULL here because some device drivers do not
2918	 * set page.mapping but still dirty their pages
2919	 *
2920	 * Drop the mmap_lock before waiting on IO, if we can. The file
2921	 * is pinning the mapping, as per above.
2922	 */
2923	if ((dirtied || page_mkwrite) && mapping) {
2924		struct file *fpin;
2925
2926		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2927		balance_dirty_pages_ratelimited(mapping);
2928		if (fpin) {
2929			fput(fpin);
2930			return VM_FAULT_RETRY;
2931		}
2932	}
2933
2934	return 0;
2935}
2936
2937/*
2938 * Handle write page faults for pages that can be reused in the current vma
2939 *
2940 * This can happen either due to the mapping being with the VM_SHARED flag,
2941 * or due to us being the last reference standing to the page. In either
2942 * case, all we need to do here is to mark the page as writable and update
2943 * any related book-keeping.
2944 */
2945static inline void wp_page_reuse(struct vm_fault *vmf)
2946	__releases(vmf->ptl)
2947{
2948	struct vm_area_struct *vma = vmf->vma;
2949	struct page *page = vmf->page;
2950	pte_t entry;
2951	/*
2952	 * Clear the pages cpupid information as the existing
2953	 * information potentially belongs to a now completely
2954	 * unrelated process.
2955	 */
2956	if (page)
2957		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2958
2959	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2960	entry = pte_mkyoung(vmf->orig_pte);
2961	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2962	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2963		update_mmu_cache(vma, vmf->address, vmf->pte);
2964	pte_unmap_unlock(vmf->pte, vmf->ptl);
2965	count_vm_event(PGREUSE);
2966}
2967
2968/*
2969 * Handle the case of a page which we actually need to copy to a new page.
2970 *
2971 * Called with mmap_lock locked and the old page referenced, but
2972 * without the ptl held.
2973 *
2974 * High level logic flow:
2975 *
2976 * - Allocate a page, copy the content of the old page to the new one.
2977 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2978 * - Take the PTL. If the pte changed, bail out and release the allocated page
2979 * - If the pte is still the way we remember it, update the page table and all
2980 *   relevant references. This includes dropping the reference the page-table
2981 *   held to the old page, as well as updating the rmap.
2982 * - In any case, unlock the PTL and drop the reference we took to the old page.
2983 */
2984static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2985{
2986	struct vm_area_struct *vma = vmf->vma;
2987	struct mm_struct *mm = vma->vm_mm;
2988	struct page *old_page = vmf->page;
2989	struct page *new_page = NULL;
2990	pte_t entry;
2991	int page_copied = 0;
2992	struct mmu_notifier_range range;
2993
2994	if (unlikely(anon_vma_prepare(vma)))
2995		goto oom;
2996
2997	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2998		new_page = alloc_zeroed_user_highpage_movable(vma,
2999							      vmf->address);
3000		if (!new_page)
3001			goto oom;
3002	} else {
3003		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3004				vmf->address);
3005		if (!new_page)
3006			goto oom;
3007
3008		if (!cow_user_page(new_page, old_page, vmf)) {
3009			/*
3010			 * COW failed, if the fault was solved by other,
3011			 * it's fine. If not, userspace would re-fault on
3012			 * the same address and we will handle the fault
3013			 * from the second attempt.
3014			 */
3015			put_page(new_page);
3016			if (old_page)
3017				put_page(old_page);
3018			return 0;
3019		}
3020	}
3021
3022	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
3023		goto oom_free_new;
3024	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3025
3026	__SetPageUptodate(new_page);
3027
3028	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3029				vmf->address & PAGE_MASK,
3030				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3031	mmu_notifier_invalidate_range_start(&range);
3032
3033	/*
3034	 * Re-check the pte - we dropped the lock
3035	 */
3036	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3037	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3038		if (old_page) {
3039			if (!PageAnon(old_page)) {
3040				dec_mm_counter_fast(mm,
3041						mm_counter_file(old_page));
3042				inc_mm_counter_fast(mm, MM_ANONPAGES);
3043			}
3044		} else {
3045			inc_mm_counter_fast(mm, MM_ANONPAGES);
3046		}
3047		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3048		entry = mk_pte(new_page, vma->vm_page_prot);
3049		entry = pte_sw_mkyoung(entry);
3050		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3051
3052		/*
3053		 * Clear the pte entry and flush it first, before updating the
3054		 * pte with the new entry, to keep TLBs on different CPUs in
3055		 * sync. This code used to set the new PTE then flush TLBs, but
3056		 * that left a window where the new PTE could be loaded into
3057		 * some TLBs while the old PTE remains in others.
3058		 */
3059		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3060		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3061		lru_cache_add_inactive_or_unevictable(new_page, vma);
3062		/*
3063		 * We call the notify macro here because, when using secondary
3064		 * mmu page tables (such as kvm shadow page tables), we want the
3065		 * new page to be mapped directly into the secondary page table.
3066		 */
3067		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3068		update_mmu_cache(vma, vmf->address, vmf->pte);
3069		if (old_page) {
3070			/*
3071			 * Only after switching the pte to the new page may
3072			 * we remove the mapcount here. Otherwise another
3073			 * process may come and find the rmap count decremented
3074			 * before the pte is switched to the new page, and
3075			 * "reuse" the old page writing into it while our pte
3076			 * here still points into it and can be read by other
3077			 * threads.
3078			 *
3079			 * The critical issue is to order this
3080			 * page_remove_rmap with the ptp_clear_flush above.
3081			 * Those stores are ordered by (if nothing else,)
3082			 * the barrier present in the atomic_add_negative
3083			 * in page_remove_rmap.
3084			 *
3085			 * Then the TLB flush in ptep_clear_flush ensures that
3086			 * no process can access the old page before the
3087			 * decremented mapcount is visible. And the old page
3088			 * cannot be reused until after the decremented
3089			 * mapcount is visible. So transitively, TLBs to
3090			 * old page will be flushed before it can be reused.
3091			 */
3092			page_remove_rmap(old_page, false);
3093		}
3094
3095		/* Free the old page.. */
3096		new_page = old_page;
3097		page_copied = 1;
3098	} else {
3099		update_mmu_tlb(vma, vmf->address, vmf->pte);
3100	}
3101
3102	if (new_page)
3103		put_page(new_page);
3104
3105	pte_unmap_unlock(vmf->pte, vmf->ptl);
3106	/*
3107	 * No need to double call mmu_notifier->invalidate_range() callback as
3108	 * the above ptep_clear_flush_notify() did already call it.
3109	 */
3110	mmu_notifier_invalidate_range_only_end(&range);
3111	if (old_page) {
3112		/*
3113		 * Don't let another task, with possibly unlocked vma,
3114		 * keep the mlocked page.
3115		 */
3116		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
3117			lock_page(old_page);	/* LRU manipulation */
3118			if (PageMlocked(old_page))
3119				munlock_vma_page(old_page);
3120			unlock_page(old_page);
3121		}
3122		if (page_copied)
3123			free_swap_cache(old_page);
3124		put_page(old_page);
3125	}
3126	return page_copied ? VM_FAULT_WRITE : 0;
3127oom_free_new:
3128	put_page(new_page);
3129oom:
3130	if (old_page)
3131		put_page(old_page);
3132	return VM_FAULT_OOM;
3133}
3134
3135/**
3136 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3137 *			  writeable once the page is prepared
3138 *
3139 * @vmf: structure describing the fault
3140 *
3141 * This function handles all that is needed to finish a write page fault in a
3142 * shared mapping due to PTE being read-only once the mapped page is prepared.
3143 * It handles locking of PTE and modifying it.
3144 *
3145 * The function expects the page to be locked or other protection against
3146 * concurrent faults / writeback (such as DAX radix tree locks).
3147 *
3148 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
3149 * we acquired PTE lock.
3150 */
3151vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3152{
3153	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3154	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3155				       &vmf->ptl);
3156	/*
3157	 * We might have raced with another page fault while we released the
3158	 * pte_offset_map_lock.
3159	 */
3160	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3161		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3162		pte_unmap_unlock(vmf->pte, vmf->ptl);
3163		return VM_FAULT_NOPAGE;
3164	}
3165	wp_page_reuse(vmf);
3166	return 0;
3167}
3168
3169/*
3170 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3171 * mapping
3172 */
3173static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3174{
3175	struct vm_area_struct *vma = vmf->vma;
3176
3177	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3178		vm_fault_t ret;
3179
3180		pte_unmap_unlock(vmf->pte, vmf->ptl);
3181		vmf->flags |= FAULT_FLAG_MKWRITE;
3182		ret = vma->vm_ops->pfn_mkwrite(vmf);
3183		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3184			return ret;
3185		return finish_mkwrite_fault(vmf);
3186	}
3187	wp_page_reuse(vmf);
3188	return VM_FAULT_WRITE;
3189}
3190
3191static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3192	__releases(vmf->ptl)
3193{
3194	struct vm_area_struct *vma = vmf->vma;
3195	vm_fault_t ret = VM_FAULT_WRITE;
3196
3197	get_page(vmf->page);
3198
3199	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3200		vm_fault_t tmp;
3201
3202		pte_unmap_unlock(vmf->pte, vmf->ptl);
3203		tmp = do_page_mkwrite(vmf);
3204		if (unlikely(!tmp || (tmp &
3205				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3206			put_page(vmf->page);
3207			return tmp;
3208		}
3209		tmp = finish_mkwrite_fault(vmf);
3210		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3211			unlock_page(vmf->page);
3212			put_page(vmf->page);
3213			return tmp;
3214		}
3215	} else {
3216		wp_page_reuse(vmf);
3217		lock_page(vmf->page);
3218	}
3219	ret |= fault_dirty_shared_page(vmf);
3220	put_page(vmf->page);
3221
3222	return ret;
3223}
3224
3225/*
3226 * This routine handles present pages, when users try to write
3227 * to a shared page. It is done by copying the page to a new address
3228 * and decrementing the shared-page counter for the old page.
3229 *
3230 * Note that this routine assumes that the protection checks have been
3231 * done by the caller (the low-level page fault routine in most cases).
3232 * Thus we can safely just mark it writable once we've done any necessary
3233 * COW.
3234 *
3235 * We also mark the page dirty at this point even though the page will
3236 * change only once the write actually happens. This avoids a few races,
3237 * and potentially makes it more efficient.
3238 *
3239 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3240 * but allow concurrent faults), with pte both mapped and locked.
3241 * We return with mmap_lock still held, but pte unmapped and unlocked.
3242 */
3243static vm_fault_t do_wp_page(struct vm_fault *vmf)
3244	__releases(vmf->ptl)
3245{
3246	struct vm_area_struct *vma = vmf->vma;
3247
3248	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3249		pte_unmap_unlock(vmf->pte, vmf->ptl);
3250		return handle_userfault(vmf, VM_UFFD_WP);
3251	}
3252
3253	/*
3254	 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3255	 * is flushed in this case before copying.
3256	 */
3257	if (unlikely(userfaultfd_wp(vmf->vma) &&
3258		     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3259		flush_tlb_page(vmf->vma, vmf->address);
3260
3261	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3262	if (!vmf->page) {
3263		/*
3264		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3265		 * VM_PFNMAP VMA.
3266		 *
3267		 * We should not cow pages in a shared writeable mapping.
3268		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3269		 */
3270		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3271				     (VM_WRITE|VM_SHARED))
3272			return wp_pfn_shared(vmf);
3273
3274		pte_unmap_unlock(vmf->pte, vmf->ptl);
3275		return wp_page_copy(vmf);
3276	}
3277
3278	/*
3279	 * Take out anonymous pages first, anonymous shared vmas are
3280	 * not dirty accountable.
3281	 */
3282	if (PageAnon(vmf->page)) {
3283		struct page *page = vmf->page;
3284
3285		/* PageKsm() doesn't necessarily raise the page refcount */
3286		if (PageKsm(page) || page_count(page) != 1)
3287			goto copy;
3288		if (!trylock_page(page))
3289			goto copy;
3290		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3291			unlock_page(page);
3292			goto copy;
3293		}
3294		/*
3295		 * Ok, we've got the only map reference, and the only
3296		 * page count reference, and the page is locked,
3297		 * it's dark out, and we're wearing sunglasses. Hit it.
3298		 */
3299		unlock_page(page);
3300		wp_page_reuse(vmf);
3301		return VM_FAULT_WRITE;
3302	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3303					(VM_WRITE|VM_SHARED))) {
3304		return wp_page_shared(vmf);
3305	}
3306copy:
3307	/*
3308	 * Ok, we need to copy. Oh, well..
3309	 */
3310	get_page(vmf->page);
3311
3312	pte_unmap_unlock(vmf->pte, vmf->ptl);
3313	return wp_page_copy(vmf);
3314}
3315
3316static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3317		unsigned long start_addr, unsigned long end_addr,
3318		struct zap_details *details)
3319{
3320	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3321}
3322
3323static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3324					    struct zap_details *details)
3325{
3326	struct vm_area_struct *vma;
3327	pgoff_t vba, vea, zba, zea;
3328
3329	vma_interval_tree_foreach(vma, root,
3330			details->first_index, details->last_index) {
3331
3332		vba = vma->vm_pgoff;
3333		vea = vba + vma_pages(vma) - 1;
3334		zba = details->first_index;
3335		if (zba < vba)
3336			zba = vba;
3337		zea = details->last_index;
3338		if (zea > vea)
3339			zea = vea;
3340
3341		unmap_mapping_range_vma(vma,
3342			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3343			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3344				details);
3345	}
3346}
3347
3348/**
3349 * unmap_mapping_page() - Unmap single page from processes.
3350 * @page: The locked page to be unmapped.
3351 *
3352 * Unmap this page from any userspace process which still has it mmaped.
3353 * Typically, for efficiency, the range of nearby pages has already been
3354 * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3355 * truncation or invalidation holds the lock on a page, it may find that
3356 * the page has been remapped again: and then uses unmap_mapping_page()
3357 * to unmap it finally.
3358 */
3359void unmap_mapping_page(struct page *page)
3360{
3361	struct address_space *mapping = page->mapping;
3362	struct zap_details details = { };
3363
3364	VM_BUG_ON(!PageLocked(page));
3365	VM_BUG_ON(PageTail(page));
3366
3367	details.check_mapping = mapping;
3368	details.first_index = page->index;
3369	details.last_index = page->index + thp_nr_pages(page) - 1;
3370	details.single_page = page;
3371
3372	i_mmap_lock_write(mapping);
3373	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3374		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3375	i_mmap_unlock_write(mapping);
3376}
3377
3378/**
3379 * unmap_mapping_pages() - Unmap pages from processes.
3380 * @mapping: The address space containing pages to be unmapped.
3381 * @start: Index of first page to be unmapped.
3382 * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3383 * @even_cows: Whether to unmap even private COWed pages.
3384 *
3385 * Unmap the pages in this address space from any userspace process which
3386 * has them mmaped.  Generally, you want to remove COWed pages as well when
3387 * a file is being truncated, but not when invalidating pages from the page
3388 * cache.
3389 */
3390void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3391		pgoff_t nr, bool even_cows)
3392{
3393	struct zap_details details = { };
3394
3395	details.check_mapping = even_cows ? NULL : mapping;
3396	details.first_index = start;
3397	details.last_index = start + nr - 1;
3398	if (details.last_index < details.first_index)
3399		details.last_index = ULONG_MAX;
3400
3401	i_mmap_lock_write(mapping);
3402	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3403		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3404	i_mmap_unlock_write(mapping);
3405}
3406
3407/**
3408 * unmap_mapping_range - unmap the portion of all mmaps in the specified
3409 * address_space corresponding to the specified byte range in the underlying
3410 * file.
3411 *
3412 * @mapping: the address space containing mmaps to be unmapped.
3413 * @holebegin: byte in first page to unmap, relative to the start of
3414 * the underlying file.  This will be rounded down to a PAGE_SIZE
3415 * boundary.  Note that this is different from truncate_pagecache(), which
3416 * must keep the partial page.  In contrast, we must get rid of
3417 * partial pages.
3418 * @holelen: size of prospective hole in bytes.  This will be rounded
3419 * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3420 * end of the file.
3421 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3422 * but 0 when invalidating pagecache, don't throw away private data.
3423 */
3424void unmap_mapping_range(struct address_space *mapping,
3425		loff_t const holebegin, loff_t const holelen, int even_cows)
3426{
3427	pgoff_t hba = holebegin >> PAGE_SHIFT;
3428	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3429
3430	/* Check for overflow. */
3431	if (sizeof(holelen) > sizeof(hlen)) {
3432		long long holeend =
3433			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3434		if (holeend & ~(long long)ULONG_MAX)
3435			hlen = ULONG_MAX - hba + 1;
3436	}
3437
3438	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3439}
3440EXPORT_SYMBOL(unmap_mapping_range);
3441
3442/*
3443 * Restore a potential device exclusive pte to a working pte entry
3444 */
3445static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3446{
3447	struct page *page = vmf->page;
3448	struct vm_area_struct *vma = vmf->vma;
3449	struct mmu_notifier_range range;
3450
3451	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
3452		return VM_FAULT_RETRY;
3453	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
3454				vma->vm_mm, vmf->address & PAGE_MASK,
3455				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3456	mmu_notifier_invalidate_range_start(&range);
3457
3458	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3459				&vmf->ptl);
3460	if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3461		restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
3462
3463	pte_unmap_unlock(vmf->pte, vmf->ptl);
3464	unlock_page(page);
3465
3466	mmu_notifier_invalidate_range_end(&range);
3467	return 0;
3468}
3469
3470/*
3471 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3472 * but allow concurrent faults), and pte mapped but not yet locked.
3473 * We return with pte unmapped and unlocked.
3474 *
3475 * We return with the mmap_lock locked or unlocked in the same cases
3476 * as does filemap_fault().
3477 */
3478vm_fault_t do_swap_page(struct vm_fault *vmf)
3479{
3480	struct vm_area_struct *vma = vmf->vma;
3481	struct page *page = NULL, *swapcache;
3482	struct swap_info_struct *si = NULL;
3483	swp_entry_t entry;
3484	pte_t pte;
3485	int locked;
3486	int exclusive = 0;
3487	vm_fault_t ret = 0;
3488	void *shadow = NULL;
3489
3490	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
3491		goto out;
3492
3493	entry = pte_to_swp_entry(vmf->orig_pte);
3494	if (unlikely(non_swap_entry(entry))) {
3495		if (is_migration_entry(entry)) {
3496			migration_entry_wait(vma->vm_mm, vmf->pmd,
3497					     vmf->address);
3498		} else if (is_device_exclusive_entry(entry)) {
3499			vmf->page = pfn_swap_entry_to_page(entry);
3500			ret = remove_device_exclusive_entry(vmf);
3501		} else if (is_device_private_entry(entry)) {
3502			vmf->page = pfn_swap_entry_to_page(entry);
3503			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3504		} else if (is_hwpoison_entry(entry)) {
3505			ret = VM_FAULT_HWPOISON;
3506		} else {
3507			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3508			ret = VM_FAULT_SIGBUS;
3509		}
3510		goto out;
3511	}
3512
3513	/* Prevent swapoff from happening to us. */
3514	si = get_swap_device(entry);
3515	if (unlikely(!si))
3516		goto out;
3517
3518	delayacct_set_flag(current, DELAYACCT_PF_SWAPIN);
3519	page = lookup_swap_cache(entry, vma, vmf->address);
3520	swapcache = page;
3521
3522	if (!page) {
 
 
3523		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3524		    __swap_count(entry) == 1) {
3525			/* skip swapcache */
3526			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3527							vmf->address);
3528			if (page) {
 
 
3529				__SetPageLocked(page);
3530				__SetPageSwapBacked(page);
 
3531
3532				if (mem_cgroup_swapin_charge_page(page,
3533					vma->vm_mm, GFP_KERNEL, entry)) {
 
 
 
 
3534					ret = VM_FAULT_OOM;
3535					goto out_page;
3536				}
3537				mem_cgroup_swapin_uncharge_swap(entry);
3538
3539				shadow = get_shadow_from_swap_cache(entry);
3540				if (shadow)
3541					workingset_refault(page, shadow);
3542
3543				lru_cache_add(page);
3544
3545				/* To provide entry to swap_readpage() */
3546				set_page_private(page, entry.val);
3547				swap_readpage(page, true);
3548				set_page_private(page, 0);
3549			}
3550		} else {
3551			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3552						vmf);
3553			swapcache = page;
3554		}
3555
3556		if (!page) {
3557			/*
3558			 * Back out if somebody else faulted in this pte
3559			 * while we released the pte lock.
3560			 */
3561			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3562					vmf->address, &vmf->ptl);
3563			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3564				ret = VM_FAULT_OOM;
3565			delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3566			goto unlock;
3567		}
3568
3569		/* Had to read the page from swap area: Major fault */
3570		ret = VM_FAULT_MAJOR;
3571		count_vm_event(PGMAJFAULT);
3572		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3573	} else if (PageHWPoison(page)) {
3574		/*
3575		 * hwpoisoned dirty swapcache pages are kept for killing
3576		 * owner processes (which may be unknown at hwpoison time)
3577		 */
3578		ret = VM_FAULT_HWPOISON;
3579		delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3580		goto out_release;
3581	}
3582
3583	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3584
3585	delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
3586	if (!locked) {
3587		ret |= VM_FAULT_RETRY;
3588		goto out_release;
3589	}
3590
3591	/*
3592	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3593	 * release the swapcache from under us.  The page pin, and pte_same
3594	 * test below, are not enough to exclude that.  Even if it is still
3595	 * swapcache, we need to check that the page's swap has not changed.
3596	 */
3597	if (unlikely((!PageSwapCache(page) ||
3598			page_private(page) != entry.val)) && swapcache)
3599		goto out_page;
3600
3601	page = ksm_might_need_to_copy(page, vma, vmf->address);
3602	if (unlikely(!page)) {
3603		ret = VM_FAULT_OOM;
3604		page = swapcache;
3605		goto out_page;
3606	}
3607
3608	cgroup_throttle_swaprate(page, GFP_KERNEL);
3609
3610	/*
3611	 * Back out if somebody else already faulted in this pte.
3612	 */
3613	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3614			&vmf->ptl);
3615	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3616		goto out_nomap;
3617
3618	if (unlikely(!PageUptodate(page))) {
3619		ret = VM_FAULT_SIGBUS;
3620		goto out_nomap;
3621	}
3622
3623	/*
3624	 * The page isn't present yet, go ahead with the fault.
3625	 *
3626	 * Be careful about the sequence of operations here.
3627	 * To get its accounting right, reuse_swap_page() must be called
3628	 * while the page is counted on swap but not yet in mapcount i.e.
3629	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3630	 * must be called after the swap_free(), or it will never succeed.
3631	 */
3632
3633	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3634	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3635	pte = mk_pte(page, vma->vm_page_prot);
3636	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3637		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3638		vmf->flags &= ~FAULT_FLAG_WRITE;
3639		ret |= VM_FAULT_WRITE;
3640		exclusive = RMAP_EXCLUSIVE;
3641	}
3642	flush_icache_page(vma, page);
3643	if (pte_swp_soft_dirty(vmf->orig_pte))
3644		pte = pte_mksoft_dirty(pte);
3645	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3646		pte = pte_mkuffd_wp(pte);
3647		pte = pte_wrprotect(pte);
3648	}
3649	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3650	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3651	vmf->orig_pte = pte;
3652
3653	/* ksm created a completely new copy */
3654	if (unlikely(page != swapcache && swapcache)) {
3655		page_add_new_anon_rmap(page, vma, vmf->address, false);
3656		lru_cache_add_inactive_or_unevictable(page, vma);
3657	} else {
3658		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3659	}
3660
3661	swap_free(entry);
3662	if (mem_cgroup_swap_full(page) ||
3663	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3664		try_to_free_swap(page);
3665	unlock_page(page);
3666	if (page != swapcache && swapcache) {
3667		/*
3668		 * Hold the lock to avoid the swap entry to be reused
3669		 * until we take the PT lock for the pte_same() check
3670		 * (to avoid false positives from pte_same). For
3671		 * further safety release the lock after the swap_free
3672		 * so that the swap count won't change under a
3673		 * parallel locked swapcache.
3674		 */
3675		unlock_page(swapcache);
3676		put_page(swapcache);
3677	}
3678
3679	if (vmf->flags & FAULT_FLAG_WRITE) {
3680		ret |= do_wp_page(vmf);
3681		if (ret & VM_FAULT_ERROR)
3682			ret &= VM_FAULT_ERROR;
3683		goto out;
3684	}
3685
3686	/* No need to invalidate - it was non-present before */
3687	update_mmu_cache(vma, vmf->address, vmf->pte);
3688unlock:
3689	pte_unmap_unlock(vmf->pte, vmf->ptl);
3690out:
3691	if (si)
3692		put_swap_device(si);
3693	return ret;
3694out_nomap:
3695	pte_unmap_unlock(vmf->pte, vmf->ptl);
3696out_page:
3697	unlock_page(page);
3698out_release:
3699	put_page(page);
3700	if (page != swapcache && swapcache) {
3701		unlock_page(swapcache);
3702		put_page(swapcache);
3703	}
3704	if (si)
3705		put_swap_device(si);
3706	return ret;
3707}
3708
3709/*
3710 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3711 * but allow concurrent faults), and pte mapped but not yet locked.
3712 * We return with mmap_lock still held, but pte unmapped and unlocked.
3713 */
3714static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3715{
3716	struct vm_area_struct *vma = vmf->vma;
3717	struct page *page;
3718	vm_fault_t ret = 0;
3719	pte_t entry;
3720
3721	/* File mapping without ->vm_ops ? */
3722	if (vma->vm_flags & VM_SHARED)
3723		return VM_FAULT_SIGBUS;
3724
3725	/*
3726	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
3727	 * pte_offset_map() on pmds where a huge pmd might be created
3728	 * from a different thread.
3729	 *
3730	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3731	 * parallel threads are excluded by other means.
3732	 *
3733	 * Here we only have mmap_read_lock(mm).
3734	 */
3735	if (pte_alloc(vma->vm_mm, vmf->pmd))
3736		return VM_FAULT_OOM;
3737
3738	/* See comment in handle_pte_fault() */
3739	if (unlikely(pmd_trans_unstable(vmf->pmd)))
3740		return 0;
3741
3742	/* Use the zero-page for reads */
3743	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3744			!mm_forbids_zeropage(vma->vm_mm)) {
3745		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3746						vma->vm_page_prot));
3747		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3748				vmf->address, &vmf->ptl);
3749		if (!pte_none(*vmf->pte)) {
3750			update_mmu_tlb(vma, vmf->address, vmf->pte);
3751			goto unlock;
3752		}
3753		ret = check_stable_address_space(vma->vm_mm);
3754		if (ret)
3755			goto unlock;
3756		/* Deliver the page fault to userland, check inside PT lock */
3757		if (userfaultfd_missing(vma)) {
3758			pte_unmap_unlock(vmf->pte, vmf->ptl);
3759			return handle_userfault(vmf, VM_UFFD_MISSING);
3760		}
3761		goto setpte;
3762	}
3763
3764	/* Allocate our own private page. */
3765	if (unlikely(anon_vma_prepare(vma)))
3766		goto oom;
3767	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3768	if (!page)
3769		goto oom;
3770
3771	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3772		goto oom_free_page;
3773	cgroup_throttle_swaprate(page, GFP_KERNEL);
3774
3775	/*
3776	 * The memory barrier inside __SetPageUptodate makes sure that
3777	 * preceding stores to the page contents become visible before
3778	 * the set_pte_at() write.
3779	 */
3780	__SetPageUptodate(page);
3781
3782	entry = mk_pte(page, vma->vm_page_prot);
3783	entry = pte_sw_mkyoung(entry);
3784	if (vma->vm_flags & VM_WRITE)
3785		entry = pte_mkwrite(pte_mkdirty(entry));
3786
3787	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3788			&vmf->ptl);
3789	if (!pte_none(*vmf->pte)) {
3790		update_mmu_cache(vma, vmf->address, vmf->pte);
3791		goto release;
3792	}
3793
3794	ret = check_stable_address_space(vma->vm_mm);
3795	if (ret)
3796		goto release;
3797
3798	/* Deliver the page fault to userland, check inside PT lock */
3799	if (userfaultfd_missing(vma)) {
3800		pte_unmap_unlock(vmf->pte, vmf->ptl);
3801		put_page(page);
3802		return handle_userfault(vmf, VM_UFFD_MISSING);
3803	}
3804
3805	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3806	page_add_new_anon_rmap(page, vma, vmf->address, false);
3807	lru_cache_add_inactive_or_unevictable(page, vma);
3808setpte:
3809	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3810
3811	/* No need to invalidate - it was non-present before */
3812	update_mmu_cache(vma, vmf->address, vmf->pte);
3813unlock:
3814	pte_unmap_unlock(vmf->pte, vmf->ptl);
3815	return ret;
3816release:
3817	put_page(page);
3818	goto unlock;
3819oom_free_page:
3820	put_page(page);
3821oom:
3822	return VM_FAULT_OOM;
3823}
3824
3825/*
3826 * The mmap_lock must have been held on entry, and may have been
3827 * released depending on flags and vma->vm_ops->fault() return value.
3828 * See filemap_fault() and __lock_page_retry().
3829 */
3830static vm_fault_t __do_fault(struct vm_fault *vmf)
3831{
3832	struct vm_area_struct *vma = vmf->vma;
3833	vm_fault_t ret;
3834
3835	/*
3836	 * Preallocate pte before we take page_lock because this might lead to
3837	 * deadlocks for memcg reclaim which waits for pages under writeback:
3838	 *				lock_page(A)
3839	 *				SetPageWriteback(A)
3840	 *				unlock_page(A)
3841	 * lock_page(B)
3842	 *				lock_page(B)
3843	 * pte_alloc_one
3844	 *   shrink_page_list
3845	 *     wait_on_page_writeback(A)
3846	 *				SetPageWriteback(B)
3847	 *				unlock_page(B)
3848	 *				# flush A, B to clear the writeback
3849	 */
3850	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3851		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3852		if (!vmf->prealloc_pte)
3853			return VM_FAULT_OOM;
3854		smp_wmb(); /* See comment in __pte_alloc() */
3855	}
3856
3857	ret = vma->vm_ops->fault(vmf);
3858	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3859			    VM_FAULT_DONE_COW)))
3860		return ret;
3861
3862	if (unlikely(PageHWPoison(vmf->page))) {
3863		if (ret & VM_FAULT_LOCKED)
3864			unlock_page(vmf->page);
3865		put_page(vmf->page);
3866		vmf->page = NULL;
3867		return VM_FAULT_HWPOISON;
3868	}
3869
3870	if (unlikely(!(ret & VM_FAULT_LOCKED)))
3871		lock_page(vmf->page);
3872	else
3873		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3874
3875	return ret;
3876}
3877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3878#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3879static void deposit_prealloc_pte(struct vm_fault *vmf)
3880{
3881	struct vm_area_struct *vma = vmf->vma;
3882
3883	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3884	/*
3885	 * We are going to consume the prealloc table,
3886	 * count that as nr_ptes.
3887	 */
3888	mm_inc_nr_ptes(vma->vm_mm);
3889	vmf->prealloc_pte = NULL;
3890}
3891
3892vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3893{
3894	struct vm_area_struct *vma = vmf->vma;
3895	bool write = vmf->flags & FAULT_FLAG_WRITE;
3896	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3897	pmd_t entry;
3898	int i;
3899	vm_fault_t ret = VM_FAULT_FALLBACK;
3900
3901	if (!transhuge_vma_suitable(vma, haddr))
3902		return ret;
3903
 
3904	page = compound_head(page);
3905	if (compound_order(page) != HPAGE_PMD_ORDER)
3906		return ret;
3907
3908	/*
3909	 * Archs like ppc64 need additional space to store information
3910	 * related to pte entry. Use the preallocated table for that.
3911	 */
3912	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3913		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3914		if (!vmf->prealloc_pte)
3915			return VM_FAULT_OOM;
3916		smp_wmb(); /* See comment in __pte_alloc() */
3917	}
3918
3919	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3920	if (unlikely(!pmd_none(*vmf->pmd)))
3921		goto out;
3922
3923	for (i = 0; i < HPAGE_PMD_NR; i++)
3924		flush_icache_page(vma, page + i);
3925
3926	entry = mk_huge_pmd(page, vma->vm_page_prot);
3927	if (write)
3928		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3929
3930	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
3931	page_add_file_rmap(page, true);
3932	/*
3933	 * deposit and withdraw with pmd lock held
3934	 */
3935	if (arch_needs_pgtable_deposit())
3936		deposit_prealloc_pte(vmf);
3937
3938	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3939
3940	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3941
3942	/* fault is handled */
3943	ret = 0;
3944	count_vm_event(THP_FILE_MAPPED);
3945out:
3946	spin_unlock(vmf->ptl);
3947	return ret;
3948}
3949#else
3950vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3951{
3952	return VM_FAULT_FALLBACK;
 
3953}
3954#endif
3955
3956void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3957{
3958	struct vm_area_struct *vma = vmf->vma;
3959	bool write = vmf->flags & FAULT_FLAG_WRITE;
3960	bool prefault = vmf->address != addr;
3961	pte_t entry;
 
3962
3963	flush_icache_page(vma, page);
3964	entry = mk_pte(page, vma->vm_page_prot);
 
 
 
3965
3966	if (prefault && arch_wants_old_prefaulted_pte())
3967		entry = pte_mkold(entry);
3968	else
3969		entry = pte_sw_mkyoung(entry);
 
3970
 
 
 
 
 
 
 
 
 
3971	if (write)
3972		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3973	/* copy-on-write page */
3974	if (write && !(vma->vm_flags & VM_SHARED)) {
3975		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3976		page_add_new_anon_rmap(page, vma, addr, false);
3977		lru_cache_add_inactive_or_unevictable(page, vma);
3978	} else {
3979		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3980		page_add_file_rmap(page, false);
3981	}
3982	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
 
 
 
 
 
3983}
3984
 
3985/**
3986 * finish_fault - finish page fault once we have prepared the page to fault
3987 *
3988 * @vmf: structure describing the fault
3989 *
3990 * This function handles all that is needed to finish a page fault once the
3991 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3992 * given page, adds reverse page mapping, handles memcg charges and LRU
3993 * addition.
3994 *
3995 * The function expects the page to be locked and on success it consumes a
3996 * reference of a page being mapped (for the PTE which maps it).
3997 *
3998 * Return: %0 on success, %VM_FAULT_ code in case of error.
3999 */
4000vm_fault_t finish_fault(struct vm_fault *vmf)
4001{
4002	struct vm_area_struct *vma = vmf->vma;
4003	struct page *page;
4004	vm_fault_t ret;
4005
4006	/* Did we COW the page? */
4007	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
 
4008		page = vmf->cow_page;
4009	else
4010		page = vmf->page;
4011
4012	/*
4013	 * check even for read faults because we might have lost our CoWed
4014	 * page
4015	 */
4016	if (!(vma->vm_flags & VM_SHARED)) {
4017		ret = check_stable_address_space(vma->vm_mm);
4018		if (ret)
4019			return ret;
4020	}
4021
4022	if (pmd_none(*vmf->pmd)) {
4023		if (PageTransCompound(page)) {
4024			ret = do_set_pmd(vmf, page);
4025			if (ret != VM_FAULT_FALLBACK)
4026				return ret;
4027		}
4028
4029		if (vmf->prealloc_pte) {
4030			vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4031			if (likely(pmd_none(*vmf->pmd))) {
4032				mm_inc_nr_ptes(vma->vm_mm);
4033				pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4034				vmf->prealloc_pte = NULL;
4035			}
4036			spin_unlock(vmf->ptl);
4037		} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
4038			return VM_FAULT_OOM;
4039		}
4040	}
4041
4042	/* See comment in handle_pte_fault() */
4043	if (pmd_devmap_trans_unstable(vmf->pmd))
4044		return 0;
4045
4046	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4047				      vmf->address, &vmf->ptl);
4048	ret = 0;
4049	/* Re-check under ptl */
4050	if (likely(pte_none(*vmf->pte)))
4051		do_set_pte(vmf, page, vmf->address);
4052	else
4053		ret = VM_FAULT_NOPAGE;
4054
4055	update_mmu_tlb(vma, vmf->address, vmf->pte);
4056	pte_unmap_unlock(vmf->pte, vmf->ptl);
4057	return ret;
4058}
4059
4060static unsigned long fault_around_bytes __read_mostly =
4061	rounddown_pow_of_two(65536);
4062
4063#ifdef CONFIG_DEBUG_FS
4064static int fault_around_bytes_get(void *data, u64 *val)
4065{
4066	*val = fault_around_bytes;
4067	return 0;
4068}
4069
4070/*
4071 * fault_around_bytes must be rounded down to the nearest page order as it's
4072 * what do_fault_around() expects to see.
4073 */
4074static int fault_around_bytes_set(void *data, u64 val)
4075{
4076	if (val / PAGE_SIZE > PTRS_PER_PTE)
4077		return -EINVAL;
4078	if (val > PAGE_SIZE)
4079		fault_around_bytes = rounddown_pow_of_two(val);
4080	else
4081		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
4082	return 0;
4083}
4084DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4085		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4086
4087static int __init fault_around_debugfs(void)
4088{
4089	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4090				   &fault_around_bytes_fops);
4091	return 0;
4092}
4093late_initcall(fault_around_debugfs);
4094#endif
4095
4096/*
4097 * do_fault_around() tries to map few pages around the fault address. The hope
4098 * is that the pages will be needed soon and this will lower the number of
4099 * faults to handle.
4100 *
4101 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4102 * not ready to be mapped: not up-to-date, locked, etc.
4103 *
4104 * This function is called with the page table lock taken. In the split ptlock
4105 * case the page table lock only protects only those entries which belong to
4106 * the page table corresponding to the fault address.
4107 *
4108 * This function doesn't cross the VMA boundaries, in order to call map_pages()
4109 * only once.
4110 *
4111 * fault_around_bytes defines how many bytes we'll try to map.
4112 * do_fault_around() expects it to be set to a power of two less than or equal
4113 * to PTRS_PER_PTE.
4114 *
4115 * The virtual address of the area that we map is naturally aligned to
4116 * fault_around_bytes rounded down to the machine page size
4117 * (and therefore to page order).  This way it's easier to guarantee
4118 * that we don't cross page table boundaries.
4119 */
4120static vm_fault_t do_fault_around(struct vm_fault *vmf)
4121{
4122	unsigned long address = vmf->address, nr_pages, mask;
4123	pgoff_t start_pgoff = vmf->pgoff;
4124	pgoff_t end_pgoff;
4125	int off;
 
4126
4127	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
4128	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4129
4130	address = max(address & mask, vmf->vma->vm_start);
4131	off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4132	start_pgoff -= off;
4133
4134	/*
4135	 *  end_pgoff is either the end of the page table, the end of
4136	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
4137	 */
4138	end_pgoff = start_pgoff -
4139		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4140		PTRS_PER_PTE - 1;
4141	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4142			start_pgoff + nr_pages - 1);
4143
4144	if (pmd_none(*vmf->pmd)) {
4145		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4146		if (!vmf->prealloc_pte)
4147			return VM_FAULT_OOM;
4148		smp_wmb(); /* See comment in __pte_alloc() */
4149	}
4150
4151	return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4152}
4153
4154static vm_fault_t do_read_fault(struct vm_fault *vmf)
4155{
4156	struct vm_area_struct *vma = vmf->vma;
4157	vm_fault_t ret = 0;
4158
4159	/*
4160	 * Let's call ->map_pages() first and use ->fault() as fallback
4161	 * if page by the offset is not ready to be mapped (cold cache or
4162	 * something).
4163	 */
4164	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
4165		if (likely(!userfaultfd_minor(vmf->vma))) {
4166			ret = do_fault_around(vmf);
4167			if (ret)
4168				return ret;
4169		}
4170	}
4171
4172	ret = __do_fault(vmf);
4173	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4174		return ret;
4175
4176	ret |= finish_fault(vmf);
4177	unlock_page(vmf->page);
4178	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4179		put_page(vmf->page);
4180	return ret;
4181}
4182
4183static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4184{
4185	struct vm_area_struct *vma = vmf->vma;
4186	vm_fault_t ret;
4187
4188	if (unlikely(anon_vma_prepare(vma)))
4189		return VM_FAULT_OOM;
4190
4191	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4192	if (!vmf->cow_page)
4193		return VM_FAULT_OOM;
4194
4195	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4196		put_page(vmf->cow_page);
4197		return VM_FAULT_OOM;
4198	}
4199	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4200
4201	ret = __do_fault(vmf);
4202	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4203		goto uncharge_out;
4204	if (ret & VM_FAULT_DONE_COW)
4205		return ret;
4206
4207	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4208	__SetPageUptodate(vmf->cow_page);
4209
4210	ret |= finish_fault(vmf);
4211	unlock_page(vmf->page);
4212	put_page(vmf->page);
4213	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4214		goto uncharge_out;
4215	return ret;
4216uncharge_out:
4217	put_page(vmf->cow_page);
4218	return ret;
4219}
4220
4221static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4222{
4223	struct vm_area_struct *vma = vmf->vma;
4224	vm_fault_t ret, tmp;
4225
4226	ret = __do_fault(vmf);
4227	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4228		return ret;
4229
4230	/*
4231	 * Check if the backing address space wants to know that the page is
4232	 * about to become writable
4233	 */
4234	if (vma->vm_ops->page_mkwrite) {
4235		unlock_page(vmf->page);
4236		tmp = do_page_mkwrite(vmf);
4237		if (unlikely(!tmp ||
4238				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4239			put_page(vmf->page);
4240			return tmp;
4241		}
4242	}
4243
4244	ret |= finish_fault(vmf);
4245	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4246					VM_FAULT_RETRY))) {
4247		unlock_page(vmf->page);
4248		put_page(vmf->page);
4249		return ret;
4250	}
4251
4252	ret |= fault_dirty_shared_page(vmf);
4253	return ret;
4254}
4255
4256/*
4257 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4258 * but allow concurrent faults).
4259 * The mmap_lock may have been released depending on flags and our
4260 * return value.  See filemap_fault() and __lock_page_or_retry().
4261 * If mmap_lock is released, vma may become invalid (for example
4262 * by other thread calling munmap()).
4263 */
4264static vm_fault_t do_fault(struct vm_fault *vmf)
4265{
4266	struct vm_area_struct *vma = vmf->vma;
4267	struct mm_struct *vm_mm = vma->vm_mm;
4268	vm_fault_t ret;
4269
4270	/*
4271	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4272	 */
4273	if (!vma->vm_ops->fault) {
4274		/*
4275		 * If we find a migration pmd entry or a none pmd entry, which
4276		 * should never happen, return SIGBUS
4277		 */
4278		if (unlikely(!pmd_present(*vmf->pmd)))
4279			ret = VM_FAULT_SIGBUS;
4280		else {
4281			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4282						       vmf->pmd,
4283						       vmf->address,
4284						       &vmf->ptl);
4285			/*
4286			 * Make sure this is not a temporary clearing of pte
4287			 * by holding ptl and checking again. A R/M/W update
4288			 * of pte involves: take ptl, clearing the pte so that
4289			 * we don't have concurrent modification by hardware
4290			 * followed by an update.
4291			 */
4292			if (unlikely(pte_none(*vmf->pte)))
4293				ret = VM_FAULT_SIGBUS;
4294			else
4295				ret = VM_FAULT_NOPAGE;
4296
4297			pte_unmap_unlock(vmf->pte, vmf->ptl);
4298		}
4299	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4300		ret = do_read_fault(vmf);
4301	else if (!(vma->vm_flags & VM_SHARED))
4302		ret = do_cow_fault(vmf);
4303	else
4304		ret = do_shared_fault(vmf);
4305
4306	/* preallocated pagetable is unused: free it */
4307	if (vmf->prealloc_pte) {
4308		pte_free(vm_mm, vmf->prealloc_pte);
4309		vmf->prealloc_pte = NULL;
4310	}
4311	return ret;
4312}
4313
4314int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4315		      unsigned long addr, int page_nid, int *flags)
 
4316{
4317	get_page(page);
4318
4319	count_vm_numa_event(NUMA_HINT_FAULTS);
4320	if (page_nid == numa_node_id()) {
4321		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4322		*flags |= TNF_FAULT_LOCAL;
4323	}
4324
4325	return mpol_misplaced(page, vma, addr);
4326}
4327
4328static vm_fault_t do_numa_page(struct vm_fault *vmf)
4329{
4330	struct vm_area_struct *vma = vmf->vma;
4331	struct page *page = NULL;
4332	int page_nid = NUMA_NO_NODE;
4333	int last_cpupid;
4334	int target_nid;
 
4335	pte_t pte, old_pte;
4336	bool was_writable = pte_savedwrite(vmf->orig_pte);
4337	int flags = 0;
4338
4339	/*
4340	 * The "pte" at this point cannot be used safely without
4341	 * validation through pte_unmap_same(). It's of NUMA type but
4342	 * the pfn may be screwed if the read is non atomic.
4343	 */
4344	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4345	spin_lock(vmf->ptl);
4346	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4347		pte_unmap_unlock(vmf->pte, vmf->ptl);
4348		goto out;
4349	}
4350
4351	/* Get the normal PTE  */
4352	old_pte = ptep_get(vmf->pte);
 
 
 
4353	pte = pte_modify(old_pte, vma->vm_page_prot);
 
 
 
 
 
4354
4355	page = vm_normal_page(vma, vmf->address, pte);
4356	if (!page)
4357		goto out_map;
 
 
4358
4359	/* TODO: handle PTE-mapped THP */
4360	if (PageCompound(page))
4361		goto out_map;
 
 
4362
4363	/*
4364	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4365	 * much anyway since they can be in shared cache state. This misses
4366	 * the case where a mapping is writable but the process never writes
4367	 * to it but pte_write gets cleared during protection updates and
4368	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4369	 * background writeback, dirty balancing and application behaviour.
4370	 */
4371	if (!was_writable)
4372		flags |= TNF_NO_GROUP;
4373
4374	/*
4375	 * Flag if the page is shared between multiple address spaces. This
4376	 * is later used when determining whether to group tasks together
4377	 */
4378	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4379		flags |= TNF_SHARED;
4380
4381	last_cpupid = page_cpupid_last(page);
4382	page_nid = page_to_nid(page);
4383	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4384			&flags);
 
4385	if (target_nid == NUMA_NO_NODE) {
4386		put_page(page);
4387		goto out_map;
4388	}
4389	pte_unmap_unlock(vmf->pte, vmf->ptl);
4390
4391	/* Migrate to the requested node */
4392	if (migrate_misplaced_page(page, vma, target_nid)) {
 
4393		page_nid = target_nid;
4394		flags |= TNF_MIGRATED;
4395	} else {
4396		flags |= TNF_MIGRATE_FAIL;
4397		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4398		spin_lock(vmf->ptl);
4399		if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4400			pte_unmap_unlock(vmf->pte, vmf->ptl);
4401			goto out;
4402		}
4403		goto out_map;
4404	}
4405
4406out:
4407	if (page_nid != NUMA_NO_NODE)
4408		task_numa_fault(last_cpupid, page_nid, 1, flags);
4409	return 0;
4410out_map:
4411	/*
4412	 * Make it present again, depending on how arch implements
4413	 * non-accessible ptes, some can allow access by kernel mode.
4414	 */
4415	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4416	pte = pte_modify(old_pte, vma->vm_page_prot);
4417	pte = pte_mkyoung(pte);
4418	if (was_writable)
4419		pte = pte_mkwrite(pte);
4420	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4421	update_mmu_cache(vma, vmf->address, vmf->pte);
4422	pte_unmap_unlock(vmf->pte, vmf->ptl);
4423	goto out;
4424}
4425
4426static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4427{
4428	if (vma_is_anonymous(vmf->vma))
4429		return do_huge_pmd_anonymous_page(vmf);
4430	if (vmf->vma->vm_ops->huge_fault)
4431		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4432	return VM_FAULT_FALLBACK;
4433}
4434
4435/* `inline' is required to avoid gcc 4.1.2 build error */
4436static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4437{
4438	if (vma_is_anonymous(vmf->vma)) {
4439		if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
4440			return handle_userfault(vmf, VM_UFFD_WP);
4441		return do_huge_pmd_wp_page(vmf);
4442	}
4443	if (vmf->vma->vm_ops->huge_fault) {
4444		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4445
4446		if (!(ret & VM_FAULT_FALLBACK))
4447			return ret;
4448	}
4449
4450	/* COW or write-notify handled on pte level: split pmd. */
4451	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4452
4453	return VM_FAULT_FALLBACK;
4454}
4455
4456static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4457{
4458#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4459	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4460	/* No support for anonymous transparent PUD pages yet */
4461	if (vma_is_anonymous(vmf->vma))
4462		goto split;
4463	if (vmf->vma->vm_ops->huge_fault) {
4464		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4465
4466		if (!(ret & VM_FAULT_FALLBACK))
4467			return ret;
4468	}
4469split:
4470	/* COW or write-notify not handled on PUD level: split pud.*/
4471	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4472#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4473	return VM_FAULT_FALLBACK;
4474}
4475
4476static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4477{
4478#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4479	/* No support for anonymous transparent PUD pages yet */
4480	if (vma_is_anonymous(vmf->vma))
4481		return VM_FAULT_FALLBACK;
4482	if (vmf->vma->vm_ops->huge_fault)
4483		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4484#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4485	return VM_FAULT_FALLBACK;
4486}
4487
4488/*
4489 * These routines also need to handle stuff like marking pages dirty
4490 * and/or accessed for architectures that don't do it in hardware (most
4491 * RISC architectures).  The early dirtying is also good on the i386.
4492 *
4493 * There is also a hook called "update_mmu_cache()" that architectures
4494 * with external mmu caches can use to update those (ie the Sparc or
4495 * PowerPC hashed page tables that act as extended TLBs).
4496 *
4497 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4498 * concurrent faults).
4499 *
4500 * The mmap_lock may have been released depending on flags and our return value.
4501 * See filemap_fault() and __lock_page_or_retry().
4502 */
4503static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4504{
4505	pte_t entry;
4506
4507	if (unlikely(pmd_none(*vmf->pmd))) {
4508		/*
4509		 * Leave __pte_alloc() until later: because vm_ops->fault may
4510		 * want to allocate huge page, and if we expose page table
4511		 * for an instant, it will be difficult to retract from
4512		 * concurrent faults and from rmap lookups.
4513		 */
4514		vmf->pte = NULL;
4515	} else {
4516		/*
4517		 * If a huge pmd materialized under us just retry later.  Use
4518		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
4519		 * of pmd_trans_huge() to ensure the pmd didn't become
4520		 * pmd_trans_huge under us and then back to pmd_none, as a
4521		 * result of MADV_DONTNEED running immediately after a huge pmd
4522		 * fault in a different thread of this mm, in turn leading to a
4523		 * misleading pmd_trans_huge() retval. All we have to ensure is
4524		 * that it is a regular pmd that we can walk with
4525		 * pte_offset_map() and we can do that through an atomic read
4526		 * in C, which is what pmd_trans_unstable() provides.
4527		 */
4528		if (pmd_devmap_trans_unstable(vmf->pmd))
4529			return 0;
4530		/*
4531		 * A regular pmd is established and it can't morph into a huge
4532		 * pmd from under us anymore at this point because we hold the
4533		 * mmap_lock read mode and khugepaged takes it in write mode.
4534		 * So now it's safe to run pte_offset_map().
4535		 */
4536		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4537		vmf->orig_pte = *vmf->pte;
4538
4539		/*
4540		 * some architectures can have larger ptes than wordsize,
4541		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4542		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4543		 * accesses.  The code below just needs a consistent view
4544		 * for the ifs and we later double check anyway with the
4545		 * ptl lock held. So here a barrier will do.
4546		 */
4547		barrier();
4548		if (pte_none(vmf->orig_pte)) {
4549			pte_unmap(vmf->pte);
4550			vmf->pte = NULL;
4551		}
4552	}
4553
4554	if (!vmf->pte) {
4555		if (vma_is_anonymous(vmf->vma))
4556			return do_anonymous_page(vmf);
4557		else
4558			return do_fault(vmf);
4559	}
4560
4561	if (!pte_present(vmf->orig_pte))
4562		return do_swap_page(vmf);
4563
4564	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4565		return do_numa_page(vmf);
4566
4567	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4568	spin_lock(vmf->ptl);
4569	entry = vmf->orig_pte;
4570	if (unlikely(!pte_same(*vmf->pte, entry))) {
4571		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4572		goto unlock;
4573	}
4574	if (vmf->flags & FAULT_FLAG_WRITE) {
4575		if (!pte_write(entry))
4576			return do_wp_page(vmf);
4577		entry = pte_mkdirty(entry);
4578	}
4579	entry = pte_mkyoung(entry);
4580	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4581				vmf->flags & FAULT_FLAG_WRITE)) {
4582		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4583	} else {
4584		/* Skip spurious TLB flush for retried page fault */
4585		if (vmf->flags & FAULT_FLAG_TRIED)
4586			goto unlock;
4587		/*
4588		 * This is needed only for protection faults but the arch code
4589		 * is not yet telling us if this is a protection fault or not.
4590		 * This still avoids useless tlb flushes for .text page faults
4591		 * with threads.
4592		 */
4593		if (vmf->flags & FAULT_FLAG_WRITE)
4594			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4595	}
4596unlock:
4597	pte_unmap_unlock(vmf->pte, vmf->ptl);
4598	return 0;
4599}
4600
4601/*
4602 * By the time we get here, we already hold the mm semaphore
4603 *
4604 * The mmap_lock may have been released depending on flags and our
4605 * return value.  See filemap_fault() and __lock_page_or_retry().
4606 */
4607static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4608		unsigned long address, unsigned int flags)
4609{
4610	struct vm_fault vmf = {
4611		.vma = vma,
4612		.address = address & PAGE_MASK,
4613		.flags = flags,
4614		.pgoff = linear_page_index(vma, address),
4615		.gfp_mask = __get_fault_gfp_mask(vma),
4616	};
4617	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4618	struct mm_struct *mm = vma->vm_mm;
4619	pgd_t *pgd;
4620	p4d_t *p4d;
4621	vm_fault_t ret;
4622
4623	pgd = pgd_offset(mm, address);
4624	p4d = p4d_alloc(mm, pgd, address);
4625	if (!p4d)
4626		return VM_FAULT_OOM;
4627
4628	vmf.pud = pud_alloc(mm, p4d, address);
4629	if (!vmf.pud)
4630		return VM_FAULT_OOM;
4631retry_pud:
4632	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4633		ret = create_huge_pud(&vmf);
4634		if (!(ret & VM_FAULT_FALLBACK))
4635			return ret;
4636	} else {
4637		pud_t orig_pud = *vmf.pud;
4638
4639		barrier();
4640		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4641
4642			/* NUMA case for anonymous PUDs would go here */
4643
4644			if (dirty && !pud_write(orig_pud)) {
4645				ret = wp_huge_pud(&vmf, orig_pud);
4646				if (!(ret & VM_FAULT_FALLBACK))
4647					return ret;
4648			} else {
4649				huge_pud_set_accessed(&vmf, orig_pud);
4650				return 0;
4651			}
4652		}
4653	}
4654
4655	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4656	if (!vmf.pmd)
4657		return VM_FAULT_OOM;
4658
4659	/* Huge pud page fault raced with pmd_alloc? */
4660	if (pud_trans_unstable(vmf.pud))
4661		goto retry_pud;
4662
4663	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4664		ret = create_huge_pmd(&vmf);
4665		if (!(ret & VM_FAULT_FALLBACK))
4666			return ret;
4667	} else {
4668		vmf.orig_pmd = *vmf.pmd;
4669
4670		barrier();
4671		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
4672			VM_BUG_ON(thp_migration_supported() &&
4673					  !is_pmd_migration_entry(vmf.orig_pmd));
4674			if (is_pmd_migration_entry(vmf.orig_pmd))
4675				pmd_migration_entry_wait(mm, vmf.pmd);
4676			return 0;
4677		}
4678		if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
4679			if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
4680				return do_huge_pmd_numa_page(&vmf);
4681
4682			if (dirty && !pmd_write(vmf.orig_pmd)) {
4683				ret = wp_huge_pmd(&vmf);
4684				if (!(ret & VM_FAULT_FALLBACK))
4685					return ret;
4686			} else {
4687				huge_pmd_set_accessed(&vmf);
4688				return 0;
4689			}
4690		}
4691	}
4692
4693	return handle_pte_fault(&vmf);
4694}
4695
4696/**
4697 * mm_account_fault - Do page fault accounting
4698 *
4699 * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
4700 *        of perf event counters, but we'll still do the per-task accounting to
4701 *        the task who triggered this page fault.
4702 * @address: the faulted address.
4703 * @flags: the fault flags.
4704 * @ret: the fault retcode.
4705 *
4706 * This will take care of most of the page fault accounting.  Meanwhile, it
4707 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4708 * updates.  However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4709 * still be in per-arch page fault handlers at the entry of page fault.
4710 */
4711static inline void mm_account_fault(struct pt_regs *regs,
4712				    unsigned long address, unsigned int flags,
4713				    vm_fault_t ret)
4714{
4715	bool major;
4716
4717	/*
4718	 * We don't do accounting for some specific faults:
4719	 *
4720	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
4721	 *   includes arch_vma_access_permitted() failing before reaching here.
4722	 *   So this is not a "this many hardware page faults" counter.  We
4723	 *   should use the hw profiling for that.
4724	 *
4725	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
4726	 *   once they're completed.
4727	 */
4728	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4729		return;
4730
4731	/*
4732	 * We define the fault as a major fault when the final successful fault
4733	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4734	 * handle it immediately previously).
4735	 */
4736	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4737
4738	if (major)
4739		current->maj_flt++;
4740	else
4741		current->min_flt++;
4742
4743	/*
4744	 * If the fault is done for GUP, regs will be NULL.  We only do the
4745	 * accounting for the per thread fault counters who triggered the
4746	 * fault, and we skip the perf event updates.
4747	 */
4748	if (!regs)
4749		return;
4750
4751	if (major)
4752		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4753	else
4754		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4755}
4756
4757/*
4758 * By the time we get here, we already hold the mm semaphore
4759 *
4760 * The mmap_lock may have been released depending on flags and our
4761 * return value.  See filemap_fault() and __lock_page_or_retry().
4762 */
4763vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4764			   unsigned int flags, struct pt_regs *regs)
4765{
4766	vm_fault_t ret;
4767
4768	__set_current_state(TASK_RUNNING);
4769
4770	count_vm_event(PGFAULT);
4771	count_memcg_event_mm(vma->vm_mm, PGFAULT);
4772
4773	/* do counter updates before entering really critical section. */
4774	check_sync_rss_stat(current);
4775
4776	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4777					    flags & FAULT_FLAG_INSTRUCTION,
4778					    flags & FAULT_FLAG_REMOTE))
4779		return VM_FAULT_SIGSEGV;
4780
4781	/*
4782	 * Enable the memcg OOM handling for faults triggered in user
4783	 * space.  Kernel faults are handled more gracefully.
4784	 */
4785	if (flags & FAULT_FLAG_USER)
4786		mem_cgroup_enter_user_fault();
4787
4788	if (unlikely(is_vm_hugetlb_page(vma)))
4789		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4790	else
4791		ret = __handle_mm_fault(vma, address, flags);
4792
4793	if (flags & FAULT_FLAG_USER) {
4794		mem_cgroup_exit_user_fault();
4795		/*
4796		 * The task may have entered a memcg OOM situation but
4797		 * if the allocation error was handled gracefully (no
4798		 * VM_FAULT_OOM), there is no need to kill anything.
4799		 * Just clean up the OOM state peacefully.
4800		 */
4801		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4802			mem_cgroup_oom_synchronize(false);
4803	}
4804
4805	mm_account_fault(regs, address, flags, ret);
4806
4807	return ret;
4808}
4809EXPORT_SYMBOL_GPL(handle_mm_fault);
4810
4811#ifndef __PAGETABLE_P4D_FOLDED
4812/*
4813 * Allocate p4d page table.
4814 * We've already handled the fast-path in-line.
4815 */
4816int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4817{
4818	p4d_t *new = p4d_alloc_one(mm, address);
4819	if (!new)
4820		return -ENOMEM;
4821
4822	smp_wmb(); /* See comment in __pte_alloc */
4823
4824	spin_lock(&mm->page_table_lock);
4825	if (pgd_present(*pgd))		/* Another has populated it */
4826		p4d_free(mm, new);
4827	else
4828		pgd_populate(mm, pgd, new);
4829	spin_unlock(&mm->page_table_lock);
4830	return 0;
4831}
4832#endif /* __PAGETABLE_P4D_FOLDED */
4833
4834#ifndef __PAGETABLE_PUD_FOLDED
4835/*
4836 * Allocate page upper directory.
4837 * We've already handled the fast-path in-line.
4838 */
4839int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4840{
4841	pud_t *new = pud_alloc_one(mm, address);
4842	if (!new)
4843		return -ENOMEM;
4844
4845	smp_wmb(); /* See comment in __pte_alloc */
4846
4847	spin_lock(&mm->page_table_lock);
4848	if (!p4d_present(*p4d)) {
4849		mm_inc_nr_puds(mm);
4850		p4d_populate(mm, p4d, new);
4851	} else	/* Another has populated it */
4852		pud_free(mm, new);
4853	spin_unlock(&mm->page_table_lock);
4854	return 0;
4855}
4856#endif /* __PAGETABLE_PUD_FOLDED */
4857
4858#ifndef __PAGETABLE_PMD_FOLDED
4859/*
4860 * Allocate page middle directory.
4861 * We've already handled the fast-path in-line.
4862 */
4863int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4864{
4865	spinlock_t *ptl;
4866	pmd_t *new = pmd_alloc_one(mm, address);
4867	if (!new)
4868		return -ENOMEM;
4869
4870	smp_wmb(); /* See comment in __pte_alloc */
4871
4872	ptl = pud_lock(mm, pud);
4873	if (!pud_present(*pud)) {
4874		mm_inc_nr_pmds(mm);
4875		pud_populate(mm, pud, new);
4876	} else	/* Another has populated it */
4877		pmd_free(mm, new);
4878	spin_unlock(ptl);
4879	return 0;
4880}
4881#endif /* __PAGETABLE_PMD_FOLDED */
4882
4883int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
4884			  struct mmu_notifier_range *range, pte_t **ptepp,
4885			  pmd_t **pmdpp, spinlock_t **ptlp)
4886{
4887	pgd_t *pgd;
4888	p4d_t *p4d;
4889	pud_t *pud;
4890	pmd_t *pmd;
4891	pte_t *ptep;
4892
4893	pgd = pgd_offset(mm, address);
4894	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4895		goto out;
4896
4897	p4d = p4d_offset(pgd, address);
4898	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4899		goto out;
4900
4901	pud = pud_offset(p4d, address);
4902	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4903		goto out;
4904
4905	pmd = pmd_offset(pud, address);
4906	VM_BUG_ON(pmd_trans_huge(*pmd));
4907
4908	if (pmd_huge(*pmd)) {
4909		if (!pmdpp)
4910			goto out;
4911
4912		if (range) {
4913			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
4914						NULL, mm, address & PMD_MASK,
4915						(address & PMD_MASK) + PMD_SIZE);
4916			mmu_notifier_invalidate_range_start(range);
4917		}
4918		*ptlp = pmd_lock(mm, pmd);
4919		if (pmd_huge(*pmd)) {
4920			*pmdpp = pmd;
4921			return 0;
4922		}
4923		spin_unlock(*ptlp);
4924		if (range)
4925			mmu_notifier_invalidate_range_end(range);
4926	}
4927
4928	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4929		goto out;
4930
4931	if (range) {
4932		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
4933					address & PAGE_MASK,
4934					(address & PAGE_MASK) + PAGE_SIZE);
4935		mmu_notifier_invalidate_range_start(range);
4936	}
4937	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4938	if (!pte_present(*ptep))
4939		goto unlock;
4940	*ptepp = ptep;
4941	return 0;
4942unlock:
4943	pte_unmap_unlock(ptep, *ptlp);
4944	if (range)
4945		mmu_notifier_invalidate_range_end(range);
4946out:
4947	return -EINVAL;
4948}
4949
4950/**
4951 * follow_pte - look up PTE at a user virtual address
4952 * @mm: the mm_struct of the target address space
4953 * @address: user virtual address
4954 * @ptepp: location to store found PTE
4955 * @ptlp: location to store the lock for the PTE
4956 *
4957 * On a successful return, the pointer to the PTE is stored in @ptepp;
4958 * the corresponding lock is taken and its location is stored in @ptlp.
4959 * The contents of the PTE are only stable until @ptlp is released;
4960 * any further use, if any, must be protected against invalidation
4961 * with MMU notifiers.
4962 *
4963 * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
4964 * should be taken for read.
4965 *
4966 * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
4967 * it is not a good general-purpose API.
4968 *
4969 * Return: zero on success, -ve otherwise.
4970 */
4971int follow_pte(struct mm_struct *mm, unsigned long address,
4972	       pte_t **ptepp, spinlock_t **ptlp)
4973{
4974	return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
 
 
 
 
 
 
4975}
4976EXPORT_SYMBOL_GPL(follow_pte);
4977
4978/**
4979 * follow_pfn - look up PFN at a user virtual address
4980 * @vma: memory mapping
4981 * @address: user virtual address
4982 * @pfn: location to store found PFN
4983 *
4984 * Only IO mappings and raw PFN mappings are allowed.
4985 *
4986 * This function does not allow the caller to read the permissions
4987 * of the PTE.  Do not use it.
4988 *
4989 * Return: zero and the pfn at @pfn on success, -ve otherwise.
4990 */
4991int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4992	unsigned long *pfn)
4993{
4994	int ret = -EINVAL;
4995	spinlock_t *ptl;
4996	pte_t *ptep;
4997
4998	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4999		return ret;
5000
5001	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5002	if (ret)
5003		return ret;
5004	*pfn = pte_pfn(*ptep);
5005	pte_unmap_unlock(ptep, ptl);
5006	return 0;
5007}
5008EXPORT_SYMBOL(follow_pfn);
5009
5010#ifdef CONFIG_HAVE_IOREMAP_PROT
5011int follow_phys(struct vm_area_struct *vma,
5012		unsigned long address, unsigned int flags,
5013		unsigned long *prot, resource_size_t *phys)
5014{
5015	int ret = -EINVAL;
5016	pte_t *ptep, pte;
5017	spinlock_t *ptl;
5018
5019	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5020		goto out;
5021
5022	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5023		goto out;
5024	pte = *ptep;
5025
5026	if ((flags & FOLL_WRITE) && !pte_write(pte))
5027		goto unlock;
5028
5029	*prot = pgprot_val(pte_pgprot(pte));
5030	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5031
5032	ret = 0;
5033unlock:
5034	pte_unmap_unlock(ptep, ptl);
5035out:
5036	return ret;
5037}
5038
5039/**
5040 * generic_access_phys - generic implementation for iomem mmap access
5041 * @vma: the vma to access
5042 * @addr: userspace address, not relative offset within @vma
5043 * @buf: buffer to read/write
5044 * @len: length of transfer
5045 * @write: set to FOLL_WRITE when writing, otherwise reading
5046 *
5047 * This is a generic implementation for &vm_operations_struct.access for an
5048 * iomem mapping. This callback is used by access_process_vm() when the @vma is
5049 * not page based.
5050 */
5051int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5052			void *buf, int len, int write)
5053{
5054	resource_size_t phys_addr;
5055	unsigned long prot = 0;
5056	void __iomem *maddr;
5057	pte_t *ptep, pte;
5058	spinlock_t *ptl;
5059	int offset = offset_in_page(addr);
5060	int ret = -EINVAL;
5061
5062	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5063		return -EINVAL;
5064
5065retry:
5066	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5067		return -EINVAL;
5068	pte = *ptep;
5069	pte_unmap_unlock(ptep, ptl);
5070
5071	prot = pgprot_val(pte_pgprot(pte));
5072	phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5073
5074	if ((write & FOLL_WRITE) && !pte_write(pte))
5075		return -EINVAL;
5076
5077	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5078	if (!maddr)
5079		return -ENOMEM;
5080
5081	if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5082		goto out_unmap;
5083
5084	if (!pte_same(pte, *ptep)) {
5085		pte_unmap_unlock(ptep, ptl);
5086		iounmap(maddr);
5087
5088		goto retry;
5089	}
5090
5091	if (write)
5092		memcpy_toio(maddr + offset, buf, len);
5093	else
5094		memcpy_fromio(buf, maddr + offset, len);
5095	ret = len;
5096	pte_unmap_unlock(ptep, ptl);
5097out_unmap:
5098	iounmap(maddr);
5099
5100	return ret;
5101}
5102EXPORT_SYMBOL_GPL(generic_access_phys);
5103#endif
5104
5105/*
5106 * Access another process' address space as given in mm.
 
5107 */
5108int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5109		       int len, unsigned int gup_flags)
5110{
5111	struct vm_area_struct *vma;
5112	void *old_buf = buf;
5113	int write = gup_flags & FOLL_WRITE;
5114
5115	if (mmap_read_lock_killable(mm))
5116		return 0;
5117
5118	/* ignore errors, just check how much was successfully transferred */
5119	while (len) {
5120		int bytes, ret, offset;
5121		void *maddr;
5122		struct page *page = NULL;
5123
5124		ret = get_user_pages_remote(mm, addr, 1,
5125				gup_flags, &page, &vma, NULL);
5126		if (ret <= 0) {
5127#ifndef CONFIG_HAVE_IOREMAP_PROT
5128			break;
5129#else
5130			/*
5131			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5132			 * we can access using slightly different code.
5133			 */
5134			vma = vma_lookup(mm, addr);
5135			if (!vma)
5136				break;
5137			if (vma->vm_ops && vma->vm_ops->access)
5138				ret = vma->vm_ops->access(vma, addr, buf,
5139							  len, write);
5140			if (ret <= 0)
5141				break;
5142			bytes = ret;
5143#endif
5144		} else {
5145			bytes = len;
5146			offset = addr & (PAGE_SIZE-1);
5147			if (bytes > PAGE_SIZE-offset)
5148				bytes = PAGE_SIZE-offset;
5149
5150			maddr = kmap(page);
5151			if (write) {
5152				copy_to_user_page(vma, page, addr,
5153						  maddr + offset, buf, bytes);
5154				set_page_dirty_lock(page);
5155			} else {
5156				copy_from_user_page(vma, page, addr,
5157						    buf, maddr + offset, bytes);
5158			}
5159			kunmap(page);
5160			put_page(page);
5161		}
5162		len -= bytes;
5163		buf += bytes;
5164		addr += bytes;
5165	}
5166	mmap_read_unlock(mm);
5167
5168	return buf - old_buf;
5169}
5170
5171/**
5172 * access_remote_vm - access another process' address space
5173 * @mm:		the mm_struct of the target address space
5174 * @addr:	start address to access
5175 * @buf:	source or destination buffer
5176 * @len:	number of bytes to transfer
5177 * @gup_flags:	flags modifying lookup behaviour
5178 *
5179 * The caller must hold a reference on @mm.
5180 *
5181 * Return: number of bytes copied from source to destination.
5182 */
5183int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5184		void *buf, int len, unsigned int gup_flags)
5185{
5186	return __access_remote_vm(mm, addr, buf, len, gup_flags);
5187}
5188
5189/*
5190 * Access another process' address space.
5191 * Source/target buffer must be kernel space,
5192 * Do not walk the page table directly, use get_user_pages
5193 */
5194int access_process_vm(struct task_struct *tsk, unsigned long addr,
5195		void *buf, int len, unsigned int gup_flags)
5196{
5197	struct mm_struct *mm;
5198	int ret;
5199
5200	mm = get_task_mm(tsk);
5201	if (!mm)
5202		return 0;
5203
5204	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
5205
5206	mmput(mm);
5207
5208	return ret;
5209}
5210EXPORT_SYMBOL_GPL(access_process_vm);
5211
5212/*
5213 * Print the name of a VMA.
5214 */
5215void print_vma_addr(char *prefix, unsigned long ip)
5216{
5217	struct mm_struct *mm = current->mm;
5218	struct vm_area_struct *vma;
5219
5220	/*
5221	 * we might be running from an atomic context so we cannot sleep
5222	 */
5223	if (!mmap_read_trylock(mm))
5224		return;
5225
5226	vma = find_vma(mm, ip);
5227	if (vma && vma->vm_file) {
5228		struct file *f = vma->vm_file;
5229		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5230		if (buf) {
5231			char *p;
5232
5233			p = file_path(f, buf, PAGE_SIZE);
5234			if (IS_ERR(p))
5235				p = "?";
5236			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5237					vma->vm_start,
5238					vma->vm_end - vma->vm_start);
5239			free_page((unsigned long)buf);
5240		}
5241	}
5242	mmap_read_unlock(mm);
5243}
5244
5245#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5246void __might_fault(const char *file, int line)
5247{
5248	/*
5249	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5250	 * holding the mmap_lock, this is safe because kernel memory doesn't
5251	 * get paged out, therefore we'll never actually fault, and the
5252	 * below annotations will generate false positives.
5253	 */
5254	if (uaccess_kernel())
5255		return;
5256	if (pagefault_disabled())
5257		return;
5258	__might_sleep(file, line, 0);
5259#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5260	if (current->mm)
5261		might_lock_read(&current->mm->mmap_lock);
5262#endif
5263}
5264EXPORT_SYMBOL(__might_fault);
5265#endif
5266
5267#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5268/*
5269 * Process all subpages of the specified huge page with the specified
5270 * operation.  The target subpage will be processed last to keep its
5271 * cache lines hot.
5272 */
5273static inline void process_huge_page(
5274	unsigned long addr_hint, unsigned int pages_per_huge_page,
5275	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5276	void *arg)
5277{
5278	int i, n, base, l;
5279	unsigned long addr = addr_hint &
5280		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5281
5282	/* Process target subpage last to keep its cache lines hot */
5283	might_sleep();
5284	n = (addr_hint - addr) / PAGE_SIZE;
5285	if (2 * n <= pages_per_huge_page) {
5286		/* If target subpage in first half of huge page */
5287		base = 0;
5288		l = n;
5289		/* Process subpages at the end of huge page */
5290		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5291			cond_resched();
5292			process_subpage(addr + i * PAGE_SIZE, i, arg);
5293		}
5294	} else {
5295		/* If target subpage in second half of huge page */
5296		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5297		l = pages_per_huge_page - n;
5298		/* Process subpages at the begin of huge page */
5299		for (i = 0; i < base; i++) {
5300			cond_resched();
5301			process_subpage(addr + i * PAGE_SIZE, i, arg);
5302		}
5303	}
5304	/*
5305	 * Process remaining subpages in left-right-left-right pattern
5306	 * towards the target subpage
5307	 */
5308	for (i = 0; i < l; i++) {
5309		int left_idx = base + i;
5310		int right_idx = base + 2 * l - 1 - i;
5311
5312		cond_resched();
5313		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5314		cond_resched();
5315		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5316	}
5317}
5318
5319static void clear_gigantic_page(struct page *page,
5320				unsigned long addr,
5321				unsigned int pages_per_huge_page)
5322{
5323	int i;
5324	struct page *p = page;
5325
5326	might_sleep();
5327	for (i = 0; i < pages_per_huge_page;
5328	     i++, p = mem_map_next(p, page, i)) {
5329		cond_resched();
5330		clear_user_highpage(p, addr + i * PAGE_SIZE);
5331	}
5332}
5333
5334static void clear_subpage(unsigned long addr, int idx, void *arg)
5335{
5336	struct page *page = arg;
5337
5338	clear_user_highpage(page + idx, addr);
5339}
5340
5341void clear_huge_page(struct page *page,
5342		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5343{
5344	unsigned long addr = addr_hint &
5345		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5346
5347	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5348		clear_gigantic_page(page, addr, pages_per_huge_page);
5349		return;
5350	}
5351
5352	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5353}
5354
5355static void copy_user_gigantic_page(struct page *dst, struct page *src,
5356				    unsigned long addr,
5357				    struct vm_area_struct *vma,
5358				    unsigned int pages_per_huge_page)
5359{
5360	int i;
5361	struct page *dst_base = dst;
5362	struct page *src_base = src;
5363
5364	for (i = 0; i < pages_per_huge_page; ) {
5365		cond_resched();
5366		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5367
5368		i++;
5369		dst = mem_map_next(dst, dst_base, i);
5370		src = mem_map_next(src, src_base, i);
5371	}
5372}
5373
5374struct copy_subpage_arg {
5375	struct page *dst;
5376	struct page *src;
5377	struct vm_area_struct *vma;
5378};
5379
5380static void copy_subpage(unsigned long addr, int idx, void *arg)
5381{
5382	struct copy_subpage_arg *copy_arg = arg;
5383
5384	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5385			   addr, copy_arg->vma);
5386}
5387
5388void copy_user_huge_page(struct page *dst, struct page *src,
5389			 unsigned long addr_hint, struct vm_area_struct *vma,
5390			 unsigned int pages_per_huge_page)
5391{
5392	unsigned long addr = addr_hint &
5393		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5394	struct copy_subpage_arg arg = {
5395		.dst = dst,
5396		.src = src,
5397		.vma = vma,
5398	};
5399
5400	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5401		copy_user_gigantic_page(dst, src, addr, vma,
5402					pages_per_huge_page);
5403		return;
5404	}
5405
5406	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5407}
5408
5409long copy_huge_page_from_user(struct page *dst_page,
5410				const void __user *usr_src,
5411				unsigned int pages_per_huge_page,
5412				bool allow_pagefault)
5413{
5414	void *src = (void *)usr_src;
5415	void *page_kaddr;
5416	unsigned long i, rc = 0;
5417	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5418	struct page *subpage = dst_page;
5419
5420	for (i = 0; i < pages_per_huge_page;
5421	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
5422		if (allow_pagefault)
5423			page_kaddr = kmap(subpage);
5424		else
5425			page_kaddr = kmap_atomic(subpage);
5426		rc = copy_from_user(page_kaddr,
5427				(const void __user *)(src + i * PAGE_SIZE),
5428				PAGE_SIZE);
5429		if (allow_pagefault)
5430			kunmap(subpage);
5431		else
5432			kunmap_atomic(page_kaddr);
5433
5434		ret_val -= (PAGE_SIZE - rc);
5435		if (rc)
5436			break;
5437
5438		cond_resched();
5439	}
5440	return ret_val;
5441}
5442#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5443
5444#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5445
5446static struct kmem_cache *page_ptl_cachep;
5447
5448void __init ptlock_cache_init(void)
5449{
5450	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5451			SLAB_PANIC, NULL);
5452}
5453
5454bool ptlock_alloc(struct page *page)
5455{
5456	spinlock_t *ptl;
5457
5458	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5459	if (!ptl)
5460		return false;
5461	page->ptl = ptl;
5462	return true;
5463}
5464
5465void ptlock_free(struct page *page)
5466{
5467	kmem_cache_free(page_ptl_cachep, page->ptl);
5468}
5469#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/memory.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 */
   7
   8/*
   9 * demand-loading started 01.12.91 - seems it is high on the list of
  10 * things wanted, and it should be easy to implement. - Linus
  11 */
  12
  13/*
  14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  15 * pages started 02.12.91, seems to work. - Linus.
  16 *
  17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  18 * would have taken more than the 6M I have free, but it worked well as
  19 * far as I could see.
  20 *
  21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  22 */
  23
  24/*
  25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
  26 * thought has to go into this. Oh, well..
  27 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  28 *		Found it. Everything seems to work now.
  29 * 20.12.91  -  Ok, making the swap-device changeable like the root.
  30 */
  31
  32/*
  33 * 05.04.94  -  Multi-page memory management added for v1.1.
  34 *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  35 *
  36 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
  37 *		(Gerhard.Wichert@pdb.siemens.de)
  38 *
  39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  40 */
  41
  42#include <linux/kernel_stat.h>
  43#include <linux/mm.h>
  44#include <linux/sched/mm.h>
  45#include <linux/sched/coredump.h>
  46#include <linux/sched/numa_balancing.h>
  47#include <linux/sched/task.h>
  48#include <linux/hugetlb.h>
  49#include <linux/mman.h>
  50#include <linux/swap.h>
  51#include <linux/highmem.h>
  52#include <linux/pagemap.h>
  53#include <linux/memremap.h>
  54#include <linux/ksm.h>
  55#include <linux/rmap.h>
  56#include <linux/export.h>
  57#include <linux/delayacct.h>
  58#include <linux/init.h>
  59#include <linux/pfn_t.h>
  60#include <linux/writeback.h>
  61#include <linux/memcontrol.h>
  62#include <linux/mmu_notifier.h>
  63#include <linux/swapops.h>
  64#include <linux/elf.h>
  65#include <linux/gfp.h>
  66#include <linux/migrate.h>
  67#include <linux/string.h>
  68#include <linux/dma-debug.h>
  69#include <linux/debugfs.h>
  70#include <linux/userfaultfd_k.h>
  71#include <linux/dax.h>
  72#include <linux/oom.h>
  73#include <linux/numa.h>
  74#include <linux/perf_event.h>
  75#include <linux/ptrace.h>
  76#include <linux/vmalloc.h>
  77
  78#include <trace/events/kmem.h>
  79
  80#include <asm/io.h>
  81#include <asm/mmu_context.h>
  82#include <asm/pgalloc.h>
  83#include <linux/uaccess.h>
  84#include <asm/tlb.h>
  85#include <asm/tlbflush.h>
  86
  87#include "pgalloc-track.h"
  88#include "internal.h"
  89
  90#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
  91#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
  92#endif
  93
  94#ifndef CONFIG_NEED_MULTIPLE_NODES
  95/* use the per-pgdat data instead for discontigmem - mbligh */
  96unsigned long max_mapnr;
  97EXPORT_SYMBOL(max_mapnr);
  98
  99struct page *mem_map;
 100EXPORT_SYMBOL(mem_map);
 101#endif
 102
 103/*
 104 * A number of key systems in x86 including ioremap() rely on the assumption
 105 * that high_memory defines the upper bound on direct map memory, then end
 106 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
 107 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
 108 * and ZONE_HIGHMEM.
 109 */
 110void *high_memory;
 111EXPORT_SYMBOL(high_memory);
 112
 113/*
 114 * Randomize the address space (stacks, mmaps, brk, etc.).
 115 *
 116 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
 117 *   as ancient (libc5 based) binaries can segfault. )
 118 */
 119int randomize_va_space __read_mostly =
 120#ifdef CONFIG_COMPAT_BRK
 121					1;
 122#else
 123					2;
 124#endif
 125
 126#ifndef arch_faults_on_old_pte
 127static inline bool arch_faults_on_old_pte(void)
 128{
 129	/*
 130	 * Those arches which don't have hw access flag feature need to
 131	 * implement their own helper. By default, "true" means pagefault
 132	 * will be hit on old pte.
 133	 */
 134	return true;
 135}
 136#endif
 137
 
 
 
 
 
 
 
 
 
 
 
 
 138static int __init disable_randmaps(char *s)
 139{
 140	randomize_va_space = 0;
 141	return 1;
 142}
 143__setup("norandmaps", disable_randmaps);
 144
 145unsigned long zero_pfn __read_mostly;
 146EXPORT_SYMBOL(zero_pfn);
 147
 148unsigned long highest_memmap_pfn __read_mostly;
 149
 150/*
 151 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
 152 */
 153static int __init init_zero_pfn(void)
 154{
 155	zero_pfn = page_to_pfn(ZERO_PAGE(0));
 156	return 0;
 157}
 158core_initcall(init_zero_pfn);
 159
 160void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 161{
 162	trace_rss_stat(mm, member, count);
 163}
 164
 165#if defined(SPLIT_RSS_COUNTING)
 166
 167void sync_mm_rss(struct mm_struct *mm)
 168{
 169	int i;
 170
 171	for (i = 0; i < NR_MM_COUNTERS; i++) {
 172		if (current->rss_stat.count[i]) {
 173			add_mm_counter(mm, i, current->rss_stat.count[i]);
 174			current->rss_stat.count[i] = 0;
 175		}
 176	}
 177	current->rss_stat.events = 0;
 178}
 179
 180static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
 181{
 182	struct task_struct *task = current;
 183
 184	if (likely(task->mm == mm))
 185		task->rss_stat.count[member] += val;
 186	else
 187		add_mm_counter(mm, member, val);
 188}
 189#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
 190#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
 191
 192/* sync counter once per 64 page faults */
 193#define TASK_RSS_EVENTS_THRESH	(64)
 194static void check_sync_rss_stat(struct task_struct *task)
 195{
 196	if (unlikely(task != current))
 197		return;
 198	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
 199		sync_mm_rss(task->mm);
 200}
 201#else /* SPLIT_RSS_COUNTING */
 202
 203#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
 204#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
 205
 206static void check_sync_rss_stat(struct task_struct *task)
 207{
 208}
 209
 210#endif /* SPLIT_RSS_COUNTING */
 211
 212/*
 213 * Note: this doesn't free the actual pages themselves. That
 214 * has been handled earlier when unmapping all the memory regions.
 215 */
 216static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
 217			   unsigned long addr)
 218{
 219	pgtable_t token = pmd_pgtable(*pmd);
 220	pmd_clear(pmd);
 221	pte_free_tlb(tlb, token, addr);
 222	mm_dec_nr_ptes(tlb->mm);
 223}
 224
 225static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 226				unsigned long addr, unsigned long end,
 227				unsigned long floor, unsigned long ceiling)
 228{
 229	pmd_t *pmd;
 230	unsigned long next;
 231	unsigned long start;
 232
 233	start = addr;
 234	pmd = pmd_offset(pud, addr);
 235	do {
 236		next = pmd_addr_end(addr, end);
 237		if (pmd_none_or_clear_bad(pmd))
 238			continue;
 239		free_pte_range(tlb, pmd, addr);
 240	} while (pmd++, addr = next, addr != end);
 241
 242	start &= PUD_MASK;
 243	if (start < floor)
 244		return;
 245	if (ceiling) {
 246		ceiling &= PUD_MASK;
 247		if (!ceiling)
 248			return;
 249	}
 250	if (end - 1 > ceiling - 1)
 251		return;
 252
 253	pmd = pmd_offset(pud, start);
 254	pud_clear(pud);
 255	pmd_free_tlb(tlb, pmd, start);
 256	mm_dec_nr_pmds(tlb->mm);
 257}
 258
 259static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
 260				unsigned long addr, unsigned long end,
 261				unsigned long floor, unsigned long ceiling)
 262{
 263	pud_t *pud;
 264	unsigned long next;
 265	unsigned long start;
 266
 267	start = addr;
 268	pud = pud_offset(p4d, addr);
 269	do {
 270		next = pud_addr_end(addr, end);
 271		if (pud_none_or_clear_bad(pud))
 272			continue;
 273		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
 274	} while (pud++, addr = next, addr != end);
 275
 276	start &= P4D_MASK;
 277	if (start < floor)
 278		return;
 279	if (ceiling) {
 280		ceiling &= P4D_MASK;
 281		if (!ceiling)
 282			return;
 283	}
 284	if (end - 1 > ceiling - 1)
 285		return;
 286
 287	pud = pud_offset(p4d, start);
 288	p4d_clear(p4d);
 289	pud_free_tlb(tlb, pud, start);
 290	mm_dec_nr_puds(tlb->mm);
 291}
 292
 293static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
 294				unsigned long addr, unsigned long end,
 295				unsigned long floor, unsigned long ceiling)
 296{
 297	p4d_t *p4d;
 298	unsigned long next;
 299	unsigned long start;
 300
 301	start = addr;
 302	p4d = p4d_offset(pgd, addr);
 303	do {
 304		next = p4d_addr_end(addr, end);
 305		if (p4d_none_or_clear_bad(p4d))
 306			continue;
 307		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
 308	} while (p4d++, addr = next, addr != end);
 309
 310	start &= PGDIR_MASK;
 311	if (start < floor)
 312		return;
 313	if (ceiling) {
 314		ceiling &= PGDIR_MASK;
 315		if (!ceiling)
 316			return;
 317	}
 318	if (end - 1 > ceiling - 1)
 319		return;
 320
 321	p4d = p4d_offset(pgd, start);
 322	pgd_clear(pgd);
 323	p4d_free_tlb(tlb, p4d, start);
 324}
 325
 326/*
 327 * This function frees user-level page tables of a process.
 328 */
 329void free_pgd_range(struct mmu_gather *tlb,
 330			unsigned long addr, unsigned long end,
 331			unsigned long floor, unsigned long ceiling)
 332{
 333	pgd_t *pgd;
 334	unsigned long next;
 335
 336	/*
 337	 * The next few lines have given us lots of grief...
 338	 *
 339	 * Why are we testing PMD* at this top level?  Because often
 340	 * there will be no work to do at all, and we'd prefer not to
 341	 * go all the way down to the bottom just to discover that.
 342	 *
 343	 * Why all these "- 1"s?  Because 0 represents both the bottom
 344	 * of the address space and the top of it (using -1 for the
 345	 * top wouldn't help much: the masks would do the wrong thing).
 346	 * The rule is that addr 0 and floor 0 refer to the bottom of
 347	 * the address space, but end 0 and ceiling 0 refer to the top
 348	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
 349	 * that end 0 case should be mythical).
 350	 *
 351	 * Wherever addr is brought up or ceiling brought down, we must
 352	 * be careful to reject "the opposite 0" before it confuses the
 353	 * subsequent tests.  But what about where end is brought down
 354	 * by PMD_SIZE below? no, end can't go down to 0 there.
 355	 *
 356	 * Whereas we round start (addr) and ceiling down, by different
 357	 * masks at different levels, in order to test whether a table
 358	 * now has no other vmas using it, so can be freed, we don't
 359	 * bother to round floor or end up - the tests don't need that.
 360	 */
 361
 362	addr &= PMD_MASK;
 363	if (addr < floor) {
 364		addr += PMD_SIZE;
 365		if (!addr)
 366			return;
 367	}
 368	if (ceiling) {
 369		ceiling &= PMD_MASK;
 370		if (!ceiling)
 371			return;
 372	}
 373	if (end - 1 > ceiling - 1)
 374		end -= PMD_SIZE;
 375	if (addr > end - 1)
 376		return;
 377	/*
 378	 * We add page table cache pages with PAGE_SIZE,
 379	 * (see pte_free_tlb()), flush the tlb if we need
 380	 */
 381	tlb_change_page_size(tlb, PAGE_SIZE);
 382	pgd = pgd_offset(tlb->mm, addr);
 383	do {
 384		next = pgd_addr_end(addr, end);
 385		if (pgd_none_or_clear_bad(pgd))
 386			continue;
 387		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
 388	} while (pgd++, addr = next, addr != end);
 389}
 390
 391void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 392		unsigned long floor, unsigned long ceiling)
 393{
 394	while (vma) {
 395		struct vm_area_struct *next = vma->vm_next;
 396		unsigned long addr = vma->vm_start;
 397
 398		/*
 399		 * Hide vma from rmap and truncate_pagecache before freeing
 400		 * pgtables
 401		 */
 402		unlink_anon_vmas(vma);
 403		unlink_file_vma(vma);
 404
 405		if (is_vm_hugetlb_page(vma)) {
 406			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
 407				floor, next ? next->vm_start : ceiling);
 408		} else {
 409			/*
 410			 * Optimization: gather nearby vmas into one call down
 411			 */
 412			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
 413			       && !is_vm_hugetlb_page(next)) {
 414				vma = next;
 415				next = vma->vm_next;
 416				unlink_anon_vmas(vma);
 417				unlink_file_vma(vma);
 418			}
 419			free_pgd_range(tlb, addr, vma->vm_end,
 420				floor, next ? next->vm_start : ceiling);
 421		}
 422		vma = next;
 423	}
 424}
 425
 426int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
 427{
 428	spinlock_t *ptl;
 429	pgtable_t new = pte_alloc_one(mm);
 430	if (!new)
 431		return -ENOMEM;
 432
 433	/*
 434	 * Ensure all pte setup (eg. pte page lock and page clearing) are
 435	 * visible before the pte is made visible to other CPUs by being
 436	 * put into page tables.
 437	 *
 438	 * The other side of the story is the pointer chasing in the page
 439	 * table walking code (when walking the page table without locking;
 440	 * ie. most of the time). Fortunately, these data accesses consist
 441	 * of a chain of data-dependent loads, meaning most CPUs (alpha
 442	 * being the notable exception) will already guarantee loads are
 443	 * seen in-order. See the alpha page table accessors for the
 444	 * smp_rmb() barriers in page table walking code.
 445	 */
 446	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 447
 448	ptl = pmd_lock(mm, pmd);
 449	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 450		mm_inc_nr_ptes(mm);
 451		pmd_populate(mm, pmd, new);
 452		new = NULL;
 453	}
 454	spin_unlock(ptl);
 455	if (new)
 456		pte_free(mm, new);
 457	return 0;
 458}
 459
 460int __pte_alloc_kernel(pmd_t *pmd)
 461{
 462	pte_t *new = pte_alloc_one_kernel(&init_mm);
 463	if (!new)
 464		return -ENOMEM;
 465
 466	smp_wmb(); /* See comment in __pte_alloc */
 467
 468	spin_lock(&init_mm.page_table_lock);
 469	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 470		pmd_populate_kernel(&init_mm, pmd, new);
 471		new = NULL;
 472	}
 473	spin_unlock(&init_mm.page_table_lock);
 474	if (new)
 475		pte_free_kernel(&init_mm, new);
 476	return 0;
 477}
 478
 479static inline void init_rss_vec(int *rss)
 480{
 481	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
 482}
 483
 484static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
 485{
 486	int i;
 487
 488	if (current->mm == mm)
 489		sync_mm_rss(mm);
 490	for (i = 0; i < NR_MM_COUNTERS; i++)
 491		if (rss[i])
 492			add_mm_counter(mm, i, rss[i]);
 493}
 494
 495/*
 496 * This function is called to print an error when a bad pte
 497 * is found. For example, we might have a PFN-mapped pte in
 498 * a region that doesn't allow it.
 499 *
 500 * The calling function must still handle the error.
 501 */
 502static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
 503			  pte_t pte, struct page *page)
 504{
 505	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
 506	p4d_t *p4d = p4d_offset(pgd, addr);
 507	pud_t *pud = pud_offset(p4d, addr);
 508	pmd_t *pmd = pmd_offset(pud, addr);
 509	struct address_space *mapping;
 510	pgoff_t index;
 511	static unsigned long resume;
 512	static unsigned long nr_shown;
 513	static unsigned long nr_unshown;
 514
 515	/*
 516	 * Allow a burst of 60 reports, then keep quiet for that minute;
 517	 * or allow a steady drip of one report per second.
 518	 */
 519	if (nr_shown == 60) {
 520		if (time_before(jiffies, resume)) {
 521			nr_unshown++;
 522			return;
 523		}
 524		if (nr_unshown) {
 525			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
 526				 nr_unshown);
 527			nr_unshown = 0;
 528		}
 529		nr_shown = 0;
 530	}
 531	if (nr_shown++ == 0)
 532		resume = jiffies + 60 * HZ;
 533
 534	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
 535	index = linear_page_index(vma, addr);
 536
 537	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
 538		 current->comm,
 539		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
 540	if (page)
 541		dump_page(page, "bad pte");
 542	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
 543		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
 544	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
 545		 vma->vm_file,
 546		 vma->vm_ops ? vma->vm_ops->fault : NULL,
 547		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
 548		 mapping ? mapping->a_ops->readpage : NULL);
 549	dump_stack();
 550	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 551}
 552
 553/*
 554 * vm_normal_page -- This function gets the "struct page" associated with a pte.
 555 *
 556 * "Special" mappings do not wish to be associated with a "struct page" (either
 557 * it doesn't exist, or it exists but they don't want to touch it). In this
 558 * case, NULL is returned here. "Normal" mappings do have a struct page.
 559 *
 560 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
 561 * pte bit, in which case this function is trivial. Secondly, an architecture
 562 * may not have a spare pte bit, which requires a more complicated scheme,
 563 * described below.
 564 *
 565 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
 566 * special mapping (even if there are underlying and valid "struct pages").
 567 * COWed pages of a VM_PFNMAP are always normal.
 568 *
 569 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
 570 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
 571 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
 572 * mapping will always honor the rule
 573 *
 574 *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
 575 *
 576 * And for normal mappings this is false.
 577 *
 578 * This restricts such mappings to be a linear translation from virtual address
 579 * to pfn. To get around this restriction, we allow arbitrary mappings so long
 580 * as the vma is not a COW mapping; in that case, we know that all ptes are
 581 * special (because none can have been COWed).
 582 *
 583 *
 584 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
 585 *
 586 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
 587 * page" backing, however the difference is that _all_ pages with a struct
 588 * page (that is, those where pfn_valid is true) are refcounted and considered
 589 * normal pages by the VM. The disadvantage is that pages are refcounted
 590 * (which can be slower and simply not an option for some PFNMAP users). The
 591 * advantage is that we don't have to follow the strict linearity rule of
 592 * PFNMAP mappings in order to support COWable mappings.
 593 *
 594 */
 595struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 596			    pte_t pte)
 597{
 598	unsigned long pfn = pte_pfn(pte);
 599
 600	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
 601		if (likely(!pte_special(pte)))
 602			goto check_pfn;
 603		if (vma->vm_ops && vma->vm_ops->find_special_page)
 604			return vma->vm_ops->find_special_page(vma, addr);
 605		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
 606			return NULL;
 607		if (is_zero_pfn(pfn))
 608			return NULL;
 609		if (pte_devmap(pte))
 610			return NULL;
 611
 612		print_bad_pte(vma, addr, pte, NULL);
 613		return NULL;
 614	}
 615
 616	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
 617
 618	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 619		if (vma->vm_flags & VM_MIXEDMAP) {
 620			if (!pfn_valid(pfn))
 621				return NULL;
 622			goto out;
 623		} else {
 624			unsigned long off;
 625			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 626			if (pfn == vma->vm_pgoff + off)
 627				return NULL;
 628			if (!is_cow_mapping(vma->vm_flags))
 629				return NULL;
 630		}
 631	}
 632
 633	if (is_zero_pfn(pfn))
 634		return NULL;
 635
 636check_pfn:
 637	if (unlikely(pfn > highest_memmap_pfn)) {
 638		print_bad_pte(vma, addr, pte, NULL);
 639		return NULL;
 640	}
 641
 642	/*
 643	 * NOTE! We still have PageReserved() pages in the page tables.
 644	 * eg. VDSO mappings can cause them to exist.
 645	 */
 646out:
 647	return pfn_to_page(pfn);
 648}
 649
 650#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 651struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 652				pmd_t pmd)
 653{
 654	unsigned long pfn = pmd_pfn(pmd);
 655
 656	/*
 657	 * There is no pmd_special() but there may be special pmds, e.g.
 658	 * in a direct-access (dax) mapping, so let's just replicate the
 659	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
 660	 */
 661	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 662		if (vma->vm_flags & VM_MIXEDMAP) {
 663			if (!pfn_valid(pfn))
 664				return NULL;
 665			goto out;
 666		} else {
 667			unsigned long off;
 668			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 669			if (pfn == vma->vm_pgoff + off)
 670				return NULL;
 671			if (!is_cow_mapping(vma->vm_flags))
 672				return NULL;
 673		}
 674	}
 675
 676	if (pmd_devmap(pmd))
 677		return NULL;
 678	if (is_huge_zero_pmd(pmd))
 679		return NULL;
 680	if (unlikely(pfn > highest_memmap_pfn))
 681		return NULL;
 682
 683	/*
 684	 * NOTE! We still have PageReserved() pages in the page tables.
 685	 * eg. VDSO mappings can cause them to exist.
 686	 */
 687out:
 688	return pfn_to_page(pfn);
 689}
 690#endif
 691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692/*
 693 * copy one vm_area from one task to the other. Assumes the page tables
 694 * already present in the new task to be cleared in the whole range
 695 * covered by this vma.
 696 */
 697
 698static unsigned long
 699copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 700		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
 701		unsigned long addr, int *rss)
 702{
 703	unsigned long vm_flags = vma->vm_flags;
 704	pte_t pte = *src_pte;
 705	struct page *page;
 706	swp_entry_t entry = pte_to_swp_entry(pte);
 707
 708	if (likely(!non_swap_entry(entry))) {
 709		if (swap_duplicate(entry) < 0)
 710			return entry.val;
 711
 712		/* make sure dst_mm is on swapoff's mmlist. */
 713		if (unlikely(list_empty(&dst_mm->mmlist))) {
 714			spin_lock(&mmlist_lock);
 715			if (list_empty(&dst_mm->mmlist))
 716				list_add(&dst_mm->mmlist,
 717						&src_mm->mmlist);
 718			spin_unlock(&mmlist_lock);
 719		}
 720		rss[MM_SWAPENTS]++;
 721	} else if (is_migration_entry(entry)) {
 722		page = migration_entry_to_page(entry);
 723
 724		rss[mm_counter(page)]++;
 725
 726		if (is_write_migration_entry(entry) &&
 727				is_cow_mapping(vm_flags)) {
 728			/*
 729			 * COW mappings require pages in both
 730			 * parent and child to be set to read.
 731			 */
 732			make_migration_entry_read(&entry);
 
 733			pte = swp_entry_to_pte(entry);
 734			if (pte_swp_soft_dirty(*src_pte))
 735				pte = pte_swp_mksoft_dirty(pte);
 736			if (pte_swp_uffd_wp(*src_pte))
 737				pte = pte_swp_mkuffd_wp(pte);
 738			set_pte_at(src_mm, addr, src_pte, pte);
 739		}
 740	} else if (is_device_private_entry(entry)) {
 741		page = device_private_entry_to_page(entry);
 742
 743		/*
 744		 * Update rss count even for unaddressable pages, as
 745		 * they should treated just like normal pages in this
 746		 * respect.
 747		 *
 748		 * We will likely want to have some new rss counters
 749		 * for unaddressable pages, at some point. But for now
 750		 * keep things as they are.
 751		 */
 752		get_page(page);
 753		rss[mm_counter(page)]++;
 754		page_dup_rmap(page, false);
 755
 756		/*
 757		 * We do not preserve soft-dirty information, because so
 758		 * far, checkpoint/restore is the only feature that
 759		 * requires that. And checkpoint/restore does not work
 760		 * when a device driver is involved (you cannot easily
 761		 * save and restore device driver state).
 762		 */
 763		if (is_write_device_private_entry(entry) &&
 764		    is_cow_mapping(vm_flags)) {
 765			make_device_private_entry_read(&entry);
 
 766			pte = swp_entry_to_pte(entry);
 767			if (pte_swp_uffd_wp(*src_pte))
 768				pte = pte_swp_mkuffd_wp(pte);
 769			set_pte_at(src_mm, addr, src_pte, pte);
 770		}
 
 
 
 
 
 
 
 
 
 
 
 771	}
 
 
 772	set_pte_at(dst_mm, addr, dst_pte, pte);
 773	return 0;
 774}
 775
 776/*
 777 * Copy a present and normal page if necessary.
 778 *
 779 * NOTE! The usual case is that this doesn't need to do
 780 * anything, and can just return a positive value. That
 781 * will let the caller know that it can just increase
 782 * the page refcount and re-use the pte the traditional
 783 * way.
 784 *
 785 * But _if_ we need to copy it because it needs to be
 786 * pinned in the parent (and the child should get its own
 787 * copy rather than just a reference to the same page),
 788 * we'll do that here and return zero to let the caller
 789 * know we're done.
 790 *
 791 * And if we need a pre-allocated page but don't yet have
 792 * one, return a negative error to let the preallocation
 793 * code know so that it can do so outside the page table
 794 * lock.
 795 */
 796static inline int
 797copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 798		pte_t *dst_pte, pte_t *src_pte,
 799		struct vm_area_struct *vma, struct vm_area_struct *new,
 800		unsigned long addr, int *rss, struct page **prealloc,
 801		pte_t pte, struct page *page)
 802{
 803	struct page *new_page;
 804
 805	if (!is_cow_mapping(vma->vm_flags))
 806		return 1;
 807
 808	/*
 809	 * What we want to do is to check whether this page may
 810	 * have been pinned by the parent process.  If so,
 811	 * instead of wrprotect the pte on both sides, we copy
 812	 * the page immediately so that we'll always guarantee
 813	 * the pinned page won't be randomly replaced in the
 814	 * future.
 815	 *
 816	 * The page pinning checks are just "has this mm ever
 817	 * seen pinning", along with the (inexact) check of
 818	 * the page count. That might give false positives for
 819	 * for pinning, but it will work correctly.
 820	 */
 821	if (likely(!atomic_read(&src_mm->has_pinned)))
 822		return 1;
 823	if (likely(!page_maybe_dma_pinned(page)))
 824		return 1;
 825
 826	new_page = *prealloc;
 827	if (!new_page)
 828		return -EAGAIN;
 829
 830	/*
 831	 * We have a prealloc page, all good!  Take it
 832	 * over and copy the page & arm it.
 833	 */
 834	*prealloc = NULL;
 835	copy_user_highpage(new_page, page, addr, vma);
 836	__SetPageUptodate(new_page);
 837	page_add_new_anon_rmap(new_page, new, addr, false);
 838	lru_cache_add_inactive_or_unevictable(new_page, new);
 839	rss[mm_counter(new_page)]++;
 840
 841	/* All done, just insert the new page copy in the child */
 842	pte = mk_pte(new_page, new->vm_page_prot);
 843	pte = maybe_mkwrite(pte_mkdirty(pte), new);
 844	set_pte_at(dst_mm, addr, dst_pte, pte);
 
 
 
 845	return 0;
 846}
 847
 848/*
 849 * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
 850 * is required to copy this pte.
 851 */
 852static inline int
 853copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 854		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
 855		struct vm_area_struct *new,
 856		unsigned long addr, int *rss, struct page **prealloc)
 857{
 858	unsigned long vm_flags = vma->vm_flags;
 
 859	pte_t pte = *src_pte;
 860	struct page *page;
 861
 862	page = vm_normal_page(vma, addr, pte);
 863	if (page) {
 864		int retval;
 865
 866		retval = copy_present_page(dst_mm, src_mm,
 867			dst_pte, src_pte,
 868			vma, new,
 869			addr, rss, prealloc,
 870			pte, page);
 871		if (retval <= 0)
 872			return retval;
 873
 874		get_page(page);
 875		page_dup_rmap(page, false);
 876		rss[mm_counter(page)]++;
 877	}
 878
 879	/*
 880	 * If it's a COW mapping, write protect it both
 881	 * in the parent and the child
 882	 */
 883	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
 884		ptep_set_wrprotect(src_mm, addr, src_pte);
 885		pte = pte_wrprotect(pte);
 886	}
 887
 888	/*
 889	 * If it's a shared mapping, mark it clean in
 890	 * the child
 891	 */
 892	if (vm_flags & VM_SHARED)
 893		pte = pte_mkclean(pte);
 894	pte = pte_mkold(pte);
 895
 896	/*
 897	 * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
 898	 * does not have the VM_UFFD_WP, which means that the uffd
 899	 * fork event is not enabled.
 900	 */
 901	if (!(vm_flags & VM_UFFD_WP))
 902		pte = pte_clear_uffd_wp(pte);
 903
 904	set_pte_at(dst_mm, addr, dst_pte, pte);
 905	return 0;
 906}
 907
 908static inline struct page *
 909page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
 910		   unsigned long addr)
 911{
 912	struct page *new_page;
 913
 914	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
 915	if (!new_page)
 916		return NULL;
 917
 918	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
 919		put_page(new_page);
 920		return NULL;
 921	}
 922	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
 923
 924	return new_page;
 925}
 926
 927static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 928		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
 929		   struct vm_area_struct *new,
 930		   unsigned long addr, unsigned long end)
 931{
 
 
 932	pte_t *orig_src_pte, *orig_dst_pte;
 933	pte_t *src_pte, *dst_pte;
 934	spinlock_t *src_ptl, *dst_ptl;
 935	int progress, ret = 0;
 936	int rss[NR_MM_COUNTERS];
 937	swp_entry_t entry = (swp_entry_t){0};
 938	struct page *prealloc = NULL;
 939
 940again:
 941	progress = 0;
 942	init_rss_vec(rss);
 943
 944	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
 945	if (!dst_pte) {
 946		ret = -ENOMEM;
 947		goto out;
 948	}
 949	src_pte = pte_offset_map(src_pmd, addr);
 950	src_ptl = pte_lockptr(src_mm, src_pmd);
 951	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 952	orig_src_pte = src_pte;
 953	orig_dst_pte = dst_pte;
 954	arch_enter_lazy_mmu_mode();
 955
 956	do {
 957		/*
 958		 * We are holding two locks at this point - either of them
 959		 * could generate latencies in another task on another CPU.
 960		 */
 961		if (progress >= 32) {
 962			progress = 0;
 963			if (need_resched() ||
 964			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
 965				break;
 966		}
 967		if (pte_none(*src_pte)) {
 968			progress++;
 969			continue;
 970		}
 971		if (unlikely(!pte_present(*src_pte))) {
 972			entry.val = copy_nonpresent_pte(dst_mm, src_mm,
 973							dst_pte, src_pte,
 974							vma, addr, rss);
 975			if (entry.val)
 
 
 976				break;
 977			progress += 8;
 978			continue;
 
 
 
 
 
 
 
 
 
 
 979		}
 980		/* copy_present_pte() will clear `*prealloc' if consumed */
 981		ret = copy_present_pte(dst_mm, src_mm, dst_pte, src_pte,
 982				       vma, new, addr, rss, &prealloc);
 983		/*
 984		 * If we need a pre-allocated page for this pte, drop the
 985		 * locks, allocate, and try again.
 986		 */
 987		if (unlikely(ret == -EAGAIN))
 988			break;
 989		if (unlikely(prealloc)) {
 990			/*
 991			 * pre-alloc page cannot be reused by next time so as
 992			 * to strictly follow mempolicy (e.g., alloc_page_vma()
 993			 * will allocate page according to address).  This
 994			 * could only happen if one pinned pte changed.
 995			 */
 996			put_page(prealloc);
 997			prealloc = NULL;
 998		}
 999		progress += 8;
1000	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1001
1002	arch_leave_lazy_mmu_mode();
1003	spin_unlock(src_ptl);
1004	pte_unmap(orig_src_pte);
1005	add_mm_rss_vec(dst_mm, rss);
1006	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1007	cond_resched();
1008
1009	if (entry.val) {
 
1010		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1011			ret = -ENOMEM;
1012			goto out;
1013		}
1014		entry.val = 0;
1015	} else if (ret) {
1016		WARN_ON_ONCE(ret != -EAGAIN);
1017		prealloc = page_copy_prealloc(src_mm, vma, addr);
 
1018		if (!prealloc)
1019			return -ENOMEM;
1020		/* We've captured and resolved the error. Reset, try again. */
1021		ret = 0;
1022	}
 
 
 
 
1023	if (addr != end)
1024		goto again;
1025out:
1026	if (unlikely(prealloc))
1027		put_page(prealloc);
1028	return ret;
1029}
1030
1031static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1032		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
1033		struct vm_area_struct *new,
1034		unsigned long addr, unsigned long end)
1035{
 
 
1036	pmd_t *src_pmd, *dst_pmd;
1037	unsigned long next;
1038
1039	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1040	if (!dst_pmd)
1041		return -ENOMEM;
1042	src_pmd = pmd_offset(src_pud, addr);
1043	do {
1044		next = pmd_addr_end(addr, end);
1045		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1046			|| pmd_devmap(*src_pmd)) {
1047			int err;
1048			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
1049			err = copy_huge_pmd(dst_mm, src_mm,
1050					    dst_pmd, src_pmd, addr, vma);
1051			if (err == -ENOMEM)
1052				return -ENOMEM;
1053			if (!err)
1054				continue;
1055			/* fall through */
1056		}
1057		if (pmd_none_or_clear_bad(src_pmd))
1058			continue;
1059		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1060				   vma, new, addr, next))
1061			return -ENOMEM;
1062	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1063	return 0;
1064}
1065
1066static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1067		p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1068		struct vm_area_struct *new,
1069		unsigned long addr, unsigned long end)
1070{
 
 
1071	pud_t *src_pud, *dst_pud;
1072	unsigned long next;
1073
1074	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1075	if (!dst_pud)
1076		return -ENOMEM;
1077	src_pud = pud_offset(src_p4d, addr);
1078	do {
1079		next = pud_addr_end(addr, end);
1080		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1081			int err;
1082
1083			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
1084			err = copy_huge_pud(dst_mm, src_mm,
1085					    dst_pud, src_pud, addr, vma);
1086			if (err == -ENOMEM)
1087				return -ENOMEM;
1088			if (!err)
1089				continue;
1090			/* fall through */
1091		}
1092		if (pud_none_or_clear_bad(src_pud))
1093			continue;
1094		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1095				   vma, new, addr, next))
1096			return -ENOMEM;
1097	} while (dst_pud++, src_pud++, addr = next, addr != end);
1098	return 0;
1099}
1100
1101static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1102		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1103		struct vm_area_struct *new,
1104		unsigned long addr, unsigned long end)
1105{
 
1106	p4d_t *src_p4d, *dst_p4d;
1107	unsigned long next;
1108
1109	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1110	if (!dst_p4d)
1111		return -ENOMEM;
1112	src_p4d = p4d_offset(src_pgd, addr);
1113	do {
1114		next = p4d_addr_end(addr, end);
1115		if (p4d_none_or_clear_bad(src_p4d))
1116			continue;
1117		if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1118				   vma, new, addr, next))
1119			return -ENOMEM;
1120	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1121	return 0;
1122}
1123
1124int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1125		    struct vm_area_struct *vma, struct vm_area_struct *new)
1126{
1127	pgd_t *src_pgd, *dst_pgd;
1128	unsigned long next;
1129	unsigned long addr = vma->vm_start;
1130	unsigned long end = vma->vm_end;
 
 
1131	struct mmu_notifier_range range;
1132	bool is_cow;
1133	int ret;
1134
1135	/*
1136	 * Don't copy ptes where a page fault will fill them correctly.
1137	 * Fork becomes much lighter when there are big shared or private
1138	 * readonly mappings. The tradeoff is that copy_page_range is more
1139	 * efficient than faulting.
1140	 */
1141	if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1142			!vma->anon_vma)
1143		return 0;
1144
1145	if (is_vm_hugetlb_page(vma))
1146		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1147
1148	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1149		/*
1150		 * We do not free on error cases below as remove_vma
1151		 * gets called on error from higher level routine
1152		 */
1153		ret = track_pfn_copy(vma);
1154		if (ret)
1155			return ret;
1156	}
1157
1158	/*
1159	 * We need to invalidate the secondary MMU mappings only when
1160	 * there could be a permission downgrade on the ptes of the
1161	 * parent mm. And a permission downgrade will only happen if
1162	 * is_cow_mapping() returns true.
1163	 */
1164	is_cow = is_cow_mapping(vma->vm_flags);
1165
1166	if (is_cow) {
1167		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1168					0, vma, src_mm, addr, end);
1169		mmu_notifier_invalidate_range_start(&range);
 
 
 
 
 
 
 
 
 
1170	}
1171
1172	ret = 0;
1173	dst_pgd = pgd_offset(dst_mm, addr);
1174	src_pgd = pgd_offset(src_mm, addr);
1175	do {
1176		next = pgd_addr_end(addr, end);
1177		if (pgd_none_or_clear_bad(src_pgd))
1178			continue;
1179		if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1180					    vma, new, addr, next))) {
1181			ret = -ENOMEM;
1182			break;
1183		}
1184	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1185
1186	if (is_cow)
 
1187		mmu_notifier_invalidate_range_end(&range);
 
1188	return ret;
1189}
1190
1191static unsigned long zap_pte_range(struct mmu_gather *tlb,
1192				struct vm_area_struct *vma, pmd_t *pmd,
1193				unsigned long addr, unsigned long end,
1194				struct zap_details *details)
1195{
1196	struct mm_struct *mm = tlb->mm;
1197	int force_flush = 0;
1198	int rss[NR_MM_COUNTERS];
1199	spinlock_t *ptl;
1200	pte_t *start_pte;
1201	pte_t *pte;
1202	swp_entry_t entry;
1203
1204	tlb_change_page_size(tlb, PAGE_SIZE);
1205again:
1206	init_rss_vec(rss);
1207	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1208	pte = start_pte;
1209	flush_tlb_batched_pending(mm);
1210	arch_enter_lazy_mmu_mode();
1211	do {
1212		pte_t ptent = *pte;
1213		if (pte_none(ptent))
1214			continue;
1215
1216		if (need_resched())
1217			break;
1218
1219		if (pte_present(ptent)) {
1220			struct page *page;
1221
1222			page = vm_normal_page(vma, addr, ptent);
1223			if (unlikely(details) && page) {
1224				/*
1225				 * unmap_shared_mapping_pages() wants to
1226				 * invalidate cache without truncating:
1227				 * unmap shared but keep private pages.
1228				 */
1229				if (details->check_mapping &&
1230				    details->check_mapping != page_rmapping(page))
1231					continue;
1232			}
1233			ptent = ptep_get_and_clear_full(mm, addr, pte,
1234							tlb->fullmm);
1235			tlb_remove_tlb_entry(tlb, pte, addr);
1236			if (unlikely(!page))
1237				continue;
1238
1239			if (!PageAnon(page)) {
1240				if (pte_dirty(ptent)) {
1241					force_flush = 1;
1242					set_page_dirty(page);
1243				}
1244				if (pte_young(ptent) &&
1245				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1246					mark_page_accessed(page);
1247			}
1248			rss[mm_counter(page)]--;
1249			page_remove_rmap(page, false);
1250			if (unlikely(page_mapcount(page) < 0))
1251				print_bad_pte(vma, addr, ptent, page);
1252			if (unlikely(__tlb_remove_page(tlb, page))) {
1253				force_flush = 1;
1254				addr += PAGE_SIZE;
1255				break;
1256			}
1257			continue;
1258		}
1259
1260		entry = pte_to_swp_entry(ptent);
1261		if (is_device_private_entry(entry)) {
1262			struct page *page = device_private_entry_to_page(entry);
 
1263
1264			if (unlikely(details && details->check_mapping)) {
1265				/*
1266				 * unmap_shared_mapping_pages() wants to
1267				 * invalidate cache without truncating:
1268				 * unmap shared but keep private pages.
1269				 */
1270				if (details->check_mapping !=
1271				    page_rmapping(page))
1272					continue;
1273			}
1274
1275			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1276			rss[mm_counter(page)]--;
1277			page_remove_rmap(page, false);
 
 
 
1278			put_page(page);
1279			continue;
1280		}
1281
1282		/* If details->check_mapping, we leave swap entries. */
1283		if (unlikely(details))
1284			continue;
1285
1286		if (!non_swap_entry(entry))
1287			rss[MM_SWAPENTS]--;
1288		else if (is_migration_entry(entry)) {
1289			struct page *page;
1290
1291			page = migration_entry_to_page(entry);
1292			rss[mm_counter(page)]--;
1293		}
1294		if (unlikely(!free_swap_and_cache(entry)))
1295			print_bad_pte(vma, addr, ptent, NULL);
1296		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1297	} while (pte++, addr += PAGE_SIZE, addr != end);
1298
1299	add_mm_rss_vec(mm, rss);
1300	arch_leave_lazy_mmu_mode();
1301
1302	/* Do the actual TLB flush before dropping ptl */
1303	if (force_flush)
1304		tlb_flush_mmu_tlbonly(tlb);
1305	pte_unmap_unlock(start_pte, ptl);
1306
1307	/*
1308	 * If we forced a TLB flush (either due to running out of
1309	 * batch buffers or because we needed to flush dirty TLB
1310	 * entries before releasing the ptl), free the batched
1311	 * memory too. Restart if we didn't do everything.
1312	 */
1313	if (force_flush) {
1314		force_flush = 0;
1315		tlb_flush_mmu(tlb);
1316	}
1317
1318	if (addr != end) {
1319		cond_resched();
1320		goto again;
1321	}
1322
1323	return addr;
1324}
1325
1326static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1327				struct vm_area_struct *vma, pud_t *pud,
1328				unsigned long addr, unsigned long end,
1329				struct zap_details *details)
1330{
1331	pmd_t *pmd;
1332	unsigned long next;
1333
1334	pmd = pmd_offset(pud, addr);
1335	do {
1336		next = pmd_addr_end(addr, end);
1337		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1338			if (next - addr != HPAGE_PMD_SIZE)
1339				__split_huge_pmd(vma, pmd, addr, false, NULL);
1340			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1341				goto next;
1342			/* fall through */
 
 
 
 
 
 
 
 
 
 
1343		}
 
1344		/*
1345		 * Here there can be other concurrent MADV_DONTNEED or
1346		 * trans huge page faults running, and if the pmd is
1347		 * none or trans huge it can change under us. This is
1348		 * because MADV_DONTNEED holds the mmap_lock in read
1349		 * mode.
1350		 */
1351		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1352			goto next;
1353		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1354next:
1355		cond_resched();
1356	} while (pmd++, addr = next, addr != end);
1357
1358	return addr;
1359}
1360
1361static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1362				struct vm_area_struct *vma, p4d_t *p4d,
1363				unsigned long addr, unsigned long end,
1364				struct zap_details *details)
1365{
1366	pud_t *pud;
1367	unsigned long next;
1368
1369	pud = pud_offset(p4d, addr);
1370	do {
1371		next = pud_addr_end(addr, end);
1372		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1373			if (next - addr != HPAGE_PUD_SIZE) {
1374				mmap_assert_locked(tlb->mm);
1375				split_huge_pud(vma, pud, addr);
1376			} else if (zap_huge_pud(tlb, vma, pud, addr))
1377				goto next;
1378			/* fall through */
1379		}
1380		if (pud_none_or_clear_bad(pud))
1381			continue;
1382		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1383next:
1384		cond_resched();
1385	} while (pud++, addr = next, addr != end);
1386
1387	return addr;
1388}
1389
1390static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1391				struct vm_area_struct *vma, pgd_t *pgd,
1392				unsigned long addr, unsigned long end,
1393				struct zap_details *details)
1394{
1395	p4d_t *p4d;
1396	unsigned long next;
1397
1398	p4d = p4d_offset(pgd, addr);
1399	do {
1400		next = p4d_addr_end(addr, end);
1401		if (p4d_none_or_clear_bad(p4d))
1402			continue;
1403		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1404	} while (p4d++, addr = next, addr != end);
1405
1406	return addr;
1407}
1408
1409void unmap_page_range(struct mmu_gather *tlb,
1410			     struct vm_area_struct *vma,
1411			     unsigned long addr, unsigned long end,
1412			     struct zap_details *details)
1413{
1414	pgd_t *pgd;
1415	unsigned long next;
1416
1417	BUG_ON(addr >= end);
1418	tlb_start_vma(tlb, vma);
1419	pgd = pgd_offset(vma->vm_mm, addr);
1420	do {
1421		next = pgd_addr_end(addr, end);
1422		if (pgd_none_or_clear_bad(pgd))
1423			continue;
1424		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1425	} while (pgd++, addr = next, addr != end);
1426	tlb_end_vma(tlb, vma);
1427}
1428
1429
1430static void unmap_single_vma(struct mmu_gather *tlb,
1431		struct vm_area_struct *vma, unsigned long start_addr,
1432		unsigned long end_addr,
1433		struct zap_details *details)
1434{
1435	unsigned long start = max(vma->vm_start, start_addr);
1436	unsigned long end;
1437
1438	if (start >= vma->vm_end)
1439		return;
1440	end = min(vma->vm_end, end_addr);
1441	if (end <= vma->vm_start)
1442		return;
1443
1444	if (vma->vm_file)
1445		uprobe_munmap(vma, start, end);
1446
1447	if (unlikely(vma->vm_flags & VM_PFNMAP))
1448		untrack_pfn(vma, 0, 0);
1449
1450	if (start != end) {
1451		if (unlikely(is_vm_hugetlb_page(vma))) {
1452			/*
1453			 * It is undesirable to test vma->vm_file as it
1454			 * should be non-null for valid hugetlb area.
1455			 * However, vm_file will be NULL in the error
1456			 * cleanup path of mmap_region. When
1457			 * hugetlbfs ->mmap method fails,
1458			 * mmap_region() nullifies vma->vm_file
1459			 * before calling this function to clean up.
1460			 * Since no pte has actually been setup, it is
1461			 * safe to do nothing in this case.
1462			 */
1463			if (vma->vm_file) {
1464				i_mmap_lock_write(vma->vm_file->f_mapping);
1465				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1466				i_mmap_unlock_write(vma->vm_file->f_mapping);
1467			}
1468		} else
1469			unmap_page_range(tlb, vma, start, end, details);
1470	}
1471}
1472
1473/**
1474 * unmap_vmas - unmap a range of memory covered by a list of vma's
1475 * @tlb: address of the caller's struct mmu_gather
1476 * @vma: the starting vma
1477 * @start_addr: virtual address at which to start unmapping
1478 * @end_addr: virtual address at which to end unmapping
1479 *
1480 * Unmap all pages in the vma list.
1481 *
1482 * Only addresses between `start' and `end' will be unmapped.
1483 *
1484 * The VMA list must be sorted in ascending virtual address order.
1485 *
1486 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1487 * range after unmap_vmas() returns.  So the only responsibility here is to
1488 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1489 * drops the lock and schedules.
1490 */
1491void unmap_vmas(struct mmu_gather *tlb,
1492		struct vm_area_struct *vma, unsigned long start_addr,
1493		unsigned long end_addr)
1494{
1495	struct mmu_notifier_range range;
1496
1497	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1498				start_addr, end_addr);
1499	mmu_notifier_invalidate_range_start(&range);
1500	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1501		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1502	mmu_notifier_invalidate_range_end(&range);
1503}
1504
1505/**
1506 * zap_page_range - remove user pages in a given range
1507 * @vma: vm_area_struct holding the applicable pages
1508 * @start: starting address of pages to zap
1509 * @size: number of bytes to zap
1510 *
1511 * Caller must protect the VMA list
1512 */
1513void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1514		unsigned long size)
1515{
1516	struct mmu_notifier_range range;
1517	struct mmu_gather tlb;
1518
1519	lru_add_drain();
1520	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1521				start, start + size);
1522	tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1523	update_hiwater_rss(vma->vm_mm);
1524	mmu_notifier_invalidate_range_start(&range);
1525	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1526		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1527	mmu_notifier_invalidate_range_end(&range);
1528	tlb_finish_mmu(&tlb, start, range.end);
1529}
1530
1531/**
1532 * zap_page_range_single - remove user pages in a given range
1533 * @vma: vm_area_struct holding the applicable pages
1534 * @address: starting address of pages to zap
1535 * @size: number of bytes to zap
1536 * @details: details of shared cache invalidation
1537 *
1538 * The range must fit into one VMA.
1539 */
1540static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1541		unsigned long size, struct zap_details *details)
1542{
1543	struct mmu_notifier_range range;
1544	struct mmu_gather tlb;
1545
1546	lru_add_drain();
1547	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1548				address, address + size);
1549	tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1550	update_hiwater_rss(vma->vm_mm);
1551	mmu_notifier_invalidate_range_start(&range);
1552	unmap_single_vma(&tlb, vma, address, range.end, details);
1553	mmu_notifier_invalidate_range_end(&range);
1554	tlb_finish_mmu(&tlb, address, range.end);
1555}
1556
1557/**
1558 * zap_vma_ptes - remove ptes mapping the vma
1559 * @vma: vm_area_struct holding ptes to be zapped
1560 * @address: starting address of pages to zap
1561 * @size: number of bytes to zap
1562 *
1563 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1564 *
1565 * The entire address range must be fully contained within the vma.
1566 *
1567 */
1568void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1569		unsigned long size)
1570{
1571	if (address < vma->vm_start || address + size > vma->vm_end ||
1572	    		!(vma->vm_flags & VM_PFNMAP))
1573		return;
1574
1575	zap_page_range_single(vma, address, size, NULL);
1576}
1577EXPORT_SYMBOL_GPL(zap_vma_ptes);
1578
1579static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1580{
1581	pgd_t *pgd;
1582	p4d_t *p4d;
1583	pud_t *pud;
1584	pmd_t *pmd;
1585
1586	pgd = pgd_offset(mm, addr);
1587	p4d = p4d_alloc(mm, pgd, addr);
1588	if (!p4d)
1589		return NULL;
1590	pud = pud_alloc(mm, p4d, addr);
1591	if (!pud)
1592		return NULL;
1593	pmd = pmd_alloc(mm, pud, addr);
1594	if (!pmd)
1595		return NULL;
1596
1597	VM_BUG_ON(pmd_trans_huge(*pmd));
1598	return pmd;
1599}
1600
1601pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1602			spinlock_t **ptl)
1603{
1604	pmd_t *pmd = walk_to_pmd(mm, addr);
1605
1606	if (!pmd)
1607		return NULL;
1608	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1609}
1610
1611static int validate_page_before_insert(struct page *page)
1612{
1613	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1614		return -EINVAL;
1615	flush_dcache_page(page);
1616	return 0;
1617}
1618
1619static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1620			unsigned long addr, struct page *page, pgprot_t prot)
1621{
1622	if (!pte_none(*pte))
1623		return -EBUSY;
1624	/* Ok, finally just insert the thing.. */
1625	get_page(page);
1626	inc_mm_counter_fast(mm, mm_counter_file(page));
1627	page_add_file_rmap(page, false);
1628	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1629	return 0;
1630}
1631
1632/*
1633 * This is the old fallback for page remapping.
1634 *
1635 * For historical reasons, it only allows reserved pages. Only
1636 * old drivers should use this, and they needed to mark their
1637 * pages reserved for the old functions anyway.
1638 */
1639static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1640			struct page *page, pgprot_t prot)
1641{
1642	struct mm_struct *mm = vma->vm_mm;
1643	int retval;
1644	pte_t *pte;
1645	spinlock_t *ptl;
1646
1647	retval = validate_page_before_insert(page);
1648	if (retval)
1649		goto out;
1650	retval = -ENOMEM;
1651	pte = get_locked_pte(mm, addr, &ptl);
1652	if (!pte)
1653		goto out;
1654	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1655	pte_unmap_unlock(pte, ptl);
1656out:
1657	return retval;
1658}
1659
1660#ifdef pte_index
1661static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1662			unsigned long addr, struct page *page, pgprot_t prot)
1663{
1664	int err;
1665
1666	if (!page_count(page))
1667		return -EINVAL;
1668	err = validate_page_before_insert(page);
1669	if (err)
1670		return err;
1671	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1672}
1673
1674/* insert_pages() amortizes the cost of spinlock operations
1675 * when inserting pages in a loop. Arch *must* define pte_index.
1676 */
1677static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1678			struct page **pages, unsigned long *num, pgprot_t prot)
1679{
1680	pmd_t *pmd = NULL;
1681	pte_t *start_pte, *pte;
1682	spinlock_t *pte_lock;
1683	struct mm_struct *const mm = vma->vm_mm;
1684	unsigned long curr_page_idx = 0;
1685	unsigned long remaining_pages_total = *num;
1686	unsigned long pages_to_write_in_pmd;
1687	int ret;
1688more:
1689	ret = -EFAULT;
1690	pmd = walk_to_pmd(mm, addr);
1691	if (!pmd)
1692		goto out;
1693
1694	pages_to_write_in_pmd = min_t(unsigned long,
1695		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1696
1697	/* Allocate the PTE if necessary; takes PMD lock once only. */
1698	ret = -ENOMEM;
1699	if (pte_alloc(mm, pmd))
1700		goto out;
1701
1702	while (pages_to_write_in_pmd) {
1703		int pte_idx = 0;
1704		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1705
1706		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1707		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1708			int err = insert_page_in_batch_locked(mm, pte,
1709				addr, pages[curr_page_idx], prot);
1710			if (unlikely(err)) {
1711				pte_unmap_unlock(start_pte, pte_lock);
1712				ret = err;
1713				remaining_pages_total -= pte_idx;
1714				goto out;
1715			}
1716			addr += PAGE_SIZE;
1717			++curr_page_idx;
1718		}
1719		pte_unmap_unlock(start_pte, pte_lock);
1720		pages_to_write_in_pmd -= batch_size;
1721		remaining_pages_total -= batch_size;
1722	}
1723	if (remaining_pages_total)
1724		goto more;
1725	ret = 0;
1726out:
1727	*num = remaining_pages_total;
1728	return ret;
1729}
1730#endif  /* ifdef pte_index */
1731
1732/**
1733 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1734 * @vma: user vma to map to
1735 * @addr: target start user address of these pages
1736 * @pages: source kernel pages
1737 * @num: in: number of pages to map. out: number of pages that were *not*
1738 * mapped. (0 means all pages were successfully mapped).
1739 *
1740 * Preferred over vm_insert_page() when inserting multiple pages.
1741 *
1742 * In case of error, we may have mapped a subset of the provided
1743 * pages. It is the caller's responsibility to account for this case.
1744 *
1745 * The same restrictions apply as in vm_insert_page().
1746 */
1747int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1748			struct page **pages, unsigned long *num)
1749{
1750#ifdef pte_index
1751	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1752
1753	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1754		return -EFAULT;
1755	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1756		BUG_ON(mmap_read_trylock(vma->vm_mm));
1757		BUG_ON(vma->vm_flags & VM_PFNMAP);
1758		vma->vm_flags |= VM_MIXEDMAP;
1759	}
1760	/* Defer page refcount checking till we're about to map that page. */
1761	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1762#else
1763	unsigned long idx = 0, pgcount = *num;
1764	int err = -EINVAL;
1765
1766	for (; idx < pgcount; ++idx) {
1767		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1768		if (err)
1769			break;
1770	}
1771	*num = pgcount - idx;
1772	return err;
1773#endif  /* ifdef pte_index */
1774}
1775EXPORT_SYMBOL(vm_insert_pages);
1776
1777/**
1778 * vm_insert_page - insert single page into user vma
1779 * @vma: user vma to map to
1780 * @addr: target user address of this page
1781 * @page: source kernel page
1782 *
1783 * This allows drivers to insert individual pages they've allocated
1784 * into a user vma.
1785 *
1786 * The page has to be a nice clean _individual_ kernel allocation.
1787 * If you allocate a compound page, you need to have marked it as
1788 * such (__GFP_COMP), or manually just split the page up yourself
1789 * (see split_page()).
1790 *
1791 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1792 * took an arbitrary page protection parameter. This doesn't allow
1793 * that. Your vma protection will have to be set up correctly, which
1794 * means that if you want a shared writable mapping, you'd better
1795 * ask for a shared writable mapping!
1796 *
1797 * The page does not need to be reserved.
1798 *
1799 * Usually this function is called from f_op->mmap() handler
1800 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
1801 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1802 * function from other places, for example from page-fault handler.
1803 *
1804 * Return: %0 on success, negative error code otherwise.
1805 */
1806int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1807			struct page *page)
1808{
1809	if (addr < vma->vm_start || addr >= vma->vm_end)
1810		return -EFAULT;
1811	if (!page_count(page))
1812		return -EINVAL;
1813	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1814		BUG_ON(mmap_read_trylock(vma->vm_mm));
1815		BUG_ON(vma->vm_flags & VM_PFNMAP);
1816		vma->vm_flags |= VM_MIXEDMAP;
1817	}
1818	return insert_page(vma, addr, page, vma->vm_page_prot);
1819}
1820EXPORT_SYMBOL(vm_insert_page);
1821
1822/*
1823 * __vm_map_pages - maps range of kernel pages into user vma
1824 * @vma: user vma to map to
1825 * @pages: pointer to array of source kernel pages
1826 * @num: number of pages in page array
1827 * @offset: user's requested vm_pgoff
1828 *
1829 * This allows drivers to map range of kernel pages into a user vma.
1830 *
1831 * Return: 0 on success and error code otherwise.
1832 */
1833static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1834				unsigned long num, unsigned long offset)
1835{
1836	unsigned long count = vma_pages(vma);
1837	unsigned long uaddr = vma->vm_start;
1838	int ret, i;
1839
1840	/* Fail if the user requested offset is beyond the end of the object */
1841	if (offset >= num)
1842		return -ENXIO;
1843
1844	/* Fail if the user requested size exceeds available object size */
1845	if (count > num - offset)
1846		return -ENXIO;
1847
1848	for (i = 0; i < count; i++) {
1849		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1850		if (ret < 0)
1851			return ret;
1852		uaddr += PAGE_SIZE;
1853	}
1854
1855	return 0;
1856}
1857
1858/**
1859 * vm_map_pages - maps range of kernel pages starts with non zero offset
1860 * @vma: user vma to map to
1861 * @pages: pointer to array of source kernel pages
1862 * @num: number of pages in page array
1863 *
1864 * Maps an object consisting of @num pages, catering for the user's
1865 * requested vm_pgoff
1866 *
1867 * If we fail to insert any page into the vma, the function will return
1868 * immediately leaving any previously inserted pages present.  Callers
1869 * from the mmap handler may immediately return the error as their caller
1870 * will destroy the vma, removing any successfully inserted pages. Other
1871 * callers should make their own arrangements for calling unmap_region().
1872 *
1873 * Context: Process context. Called by mmap handlers.
1874 * Return: 0 on success and error code otherwise.
1875 */
1876int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1877				unsigned long num)
1878{
1879	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1880}
1881EXPORT_SYMBOL(vm_map_pages);
1882
1883/**
1884 * vm_map_pages_zero - map range of kernel pages starts with zero offset
1885 * @vma: user vma to map to
1886 * @pages: pointer to array of source kernel pages
1887 * @num: number of pages in page array
1888 *
1889 * Similar to vm_map_pages(), except that it explicitly sets the offset
1890 * to 0. This function is intended for the drivers that did not consider
1891 * vm_pgoff.
1892 *
1893 * Context: Process context. Called by mmap handlers.
1894 * Return: 0 on success and error code otherwise.
1895 */
1896int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1897				unsigned long num)
1898{
1899	return __vm_map_pages(vma, pages, num, 0);
1900}
1901EXPORT_SYMBOL(vm_map_pages_zero);
1902
1903static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1904			pfn_t pfn, pgprot_t prot, bool mkwrite)
1905{
1906	struct mm_struct *mm = vma->vm_mm;
1907	pte_t *pte, entry;
1908	spinlock_t *ptl;
1909
1910	pte = get_locked_pte(mm, addr, &ptl);
1911	if (!pte)
1912		return VM_FAULT_OOM;
1913	if (!pte_none(*pte)) {
1914		if (mkwrite) {
1915			/*
1916			 * For read faults on private mappings the PFN passed
1917			 * in may not match the PFN we have mapped if the
1918			 * mapped PFN is a writeable COW page.  In the mkwrite
1919			 * case we are creating a writable PTE for a shared
1920			 * mapping and we expect the PFNs to match. If they
1921			 * don't match, we are likely racing with block
1922			 * allocation and mapping invalidation so just skip the
1923			 * update.
1924			 */
1925			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1926				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
1927				goto out_unlock;
1928			}
1929			entry = pte_mkyoung(*pte);
1930			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1931			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1932				update_mmu_cache(vma, addr, pte);
1933		}
1934		goto out_unlock;
1935	}
1936
1937	/* Ok, finally just insert the thing.. */
1938	if (pfn_t_devmap(pfn))
1939		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1940	else
1941		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1942
1943	if (mkwrite) {
1944		entry = pte_mkyoung(entry);
1945		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1946	}
1947
1948	set_pte_at(mm, addr, pte, entry);
1949	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1950
1951out_unlock:
1952	pte_unmap_unlock(pte, ptl);
1953	return VM_FAULT_NOPAGE;
1954}
1955
1956/**
1957 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1958 * @vma: user vma to map to
1959 * @addr: target user address of this page
1960 * @pfn: source kernel pfn
1961 * @pgprot: pgprot flags for the inserted page
1962 *
1963 * This is exactly like vmf_insert_pfn(), except that it allows drivers
1964 * to override pgprot on a per-page basis.
1965 *
1966 * This only makes sense for IO mappings, and it makes no sense for
1967 * COW mappings.  In general, using multiple vmas is preferable;
1968 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
1969 * impractical.
1970 *
1971 * See vmf_insert_mixed_prot() for a discussion of the implication of using
1972 * a value of @pgprot different from that of @vma->vm_page_prot.
1973 *
1974 * Context: Process context.  May allocate using %GFP_KERNEL.
1975 * Return: vm_fault_t value.
1976 */
1977vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1978			unsigned long pfn, pgprot_t pgprot)
1979{
1980	/*
1981	 * Technically, architectures with pte_special can avoid all these
1982	 * restrictions (same for remap_pfn_range).  However we would like
1983	 * consistency in testing and feature parity among all, so we should
1984	 * try to keep these invariants in place for everybody.
1985	 */
1986	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1987	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1988						(VM_PFNMAP|VM_MIXEDMAP));
1989	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1990	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1991
1992	if (addr < vma->vm_start || addr >= vma->vm_end)
1993		return VM_FAULT_SIGBUS;
1994
1995	if (!pfn_modify_allowed(pfn, pgprot))
1996		return VM_FAULT_SIGBUS;
1997
1998	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
1999
2000	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2001			false);
2002}
2003EXPORT_SYMBOL(vmf_insert_pfn_prot);
2004
2005/**
2006 * vmf_insert_pfn - insert single pfn into user vma
2007 * @vma: user vma to map to
2008 * @addr: target user address of this page
2009 * @pfn: source kernel pfn
2010 *
2011 * Similar to vm_insert_page, this allows drivers to insert individual pages
2012 * they've allocated into a user vma. Same comments apply.
2013 *
2014 * This function should only be called from a vm_ops->fault handler, and
2015 * in that case the handler should return the result of this function.
2016 *
2017 * vma cannot be a COW mapping.
2018 *
2019 * As this is called only for pages that do not currently exist, we
2020 * do not need to flush old virtual caches or the TLB.
2021 *
2022 * Context: Process context.  May allocate using %GFP_KERNEL.
2023 * Return: vm_fault_t value.
2024 */
2025vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2026			unsigned long pfn)
2027{
2028	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2029}
2030EXPORT_SYMBOL(vmf_insert_pfn);
2031
2032static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2033{
2034	/* these checks mirror the abort conditions in vm_normal_page */
2035	if (vma->vm_flags & VM_MIXEDMAP)
2036		return true;
2037	if (pfn_t_devmap(pfn))
2038		return true;
2039	if (pfn_t_special(pfn))
2040		return true;
2041	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2042		return true;
2043	return false;
2044}
2045
2046static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2047		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2048		bool mkwrite)
2049{
2050	int err;
2051
2052	BUG_ON(!vm_mixed_ok(vma, pfn));
2053
2054	if (addr < vma->vm_start || addr >= vma->vm_end)
2055		return VM_FAULT_SIGBUS;
2056
2057	track_pfn_insert(vma, &pgprot, pfn);
2058
2059	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2060		return VM_FAULT_SIGBUS;
2061
2062	/*
2063	 * If we don't have pte special, then we have to use the pfn_valid()
2064	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2065	 * refcount the page if pfn_valid is true (hence insert_page rather
2066	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2067	 * without pte special, it would there be refcounted as a normal page.
2068	 */
2069	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2070	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2071		struct page *page;
2072
2073		/*
2074		 * At this point we are committed to insert_page()
2075		 * regardless of whether the caller specified flags that
2076		 * result in pfn_t_has_page() == false.
2077		 */
2078		page = pfn_to_page(pfn_t_to_pfn(pfn));
2079		err = insert_page(vma, addr, page, pgprot);
2080	} else {
2081		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2082	}
2083
2084	if (err == -ENOMEM)
2085		return VM_FAULT_OOM;
2086	if (err < 0 && err != -EBUSY)
2087		return VM_FAULT_SIGBUS;
2088
2089	return VM_FAULT_NOPAGE;
2090}
2091
2092/**
2093 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2094 * @vma: user vma to map to
2095 * @addr: target user address of this page
2096 * @pfn: source kernel pfn
2097 * @pgprot: pgprot flags for the inserted page
2098 *
2099 * This is exactly like vmf_insert_mixed(), except that it allows drivers
2100 * to override pgprot on a per-page basis.
2101 *
2102 * Typically this function should be used by drivers to set caching- and
2103 * encryption bits different than those of @vma->vm_page_prot, because
2104 * the caching- or encryption mode may not be known at mmap() time.
2105 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2106 * to set caching and encryption bits for those vmas (except for COW pages).
2107 * This is ensured by core vm only modifying these page table entries using
2108 * functions that don't touch caching- or encryption bits, using pte_modify()
2109 * if needed. (See for example mprotect()).
2110 * Also when new page-table entries are created, this is only done using the
2111 * fault() callback, and never using the value of vma->vm_page_prot,
2112 * except for page-table entries that point to anonymous pages as the result
2113 * of COW.
2114 *
2115 * Context: Process context.  May allocate using %GFP_KERNEL.
2116 * Return: vm_fault_t value.
2117 */
2118vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2119				 pfn_t pfn, pgprot_t pgprot)
2120{
2121	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2122}
2123EXPORT_SYMBOL(vmf_insert_mixed_prot);
2124
2125vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2126		pfn_t pfn)
2127{
2128	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2129}
2130EXPORT_SYMBOL(vmf_insert_mixed);
2131
2132/*
2133 *  If the insertion of PTE failed because someone else already added a
2134 *  different entry in the mean time, we treat that as success as we assume
2135 *  the same entry was actually inserted.
2136 */
2137vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2138		unsigned long addr, pfn_t pfn)
2139{
2140	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2141}
2142EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2143
2144/*
2145 * maps a range of physical memory into the requested pages. the old
2146 * mappings are removed. any references to nonexistent pages results
2147 * in null mappings (currently treated as "copy-on-access")
2148 */
2149static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2150			unsigned long addr, unsigned long end,
2151			unsigned long pfn, pgprot_t prot)
2152{
2153	pte_t *pte;
2154	spinlock_t *ptl;
2155	int err = 0;
2156
2157	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2158	if (!pte)
2159		return -ENOMEM;
2160	arch_enter_lazy_mmu_mode();
2161	do {
2162		BUG_ON(!pte_none(*pte));
2163		if (!pfn_modify_allowed(pfn, prot)) {
2164			err = -EACCES;
2165			break;
2166		}
2167		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2168		pfn++;
2169	} while (pte++, addr += PAGE_SIZE, addr != end);
2170	arch_leave_lazy_mmu_mode();
2171	pte_unmap_unlock(pte - 1, ptl);
2172	return err;
2173}
2174
2175static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2176			unsigned long addr, unsigned long end,
2177			unsigned long pfn, pgprot_t prot)
2178{
2179	pmd_t *pmd;
2180	unsigned long next;
2181	int err;
2182
2183	pfn -= addr >> PAGE_SHIFT;
2184	pmd = pmd_alloc(mm, pud, addr);
2185	if (!pmd)
2186		return -ENOMEM;
2187	VM_BUG_ON(pmd_trans_huge(*pmd));
2188	do {
2189		next = pmd_addr_end(addr, end);
2190		err = remap_pte_range(mm, pmd, addr, next,
2191				pfn + (addr >> PAGE_SHIFT), prot);
2192		if (err)
2193			return err;
2194	} while (pmd++, addr = next, addr != end);
2195	return 0;
2196}
2197
2198static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2199			unsigned long addr, unsigned long end,
2200			unsigned long pfn, pgprot_t prot)
2201{
2202	pud_t *pud;
2203	unsigned long next;
2204	int err;
2205
2206	pfn -= addr >> PAGE_SHIFT;
2207	pud = pud_alloc(mm, p4d, addr);
2208	if (!pud)
2209		return -ENOMEM;
2210	do {
2211		next = pud_addr_end(addr, end);
2212		err = remap_pmd_range(mm, pud, addr, next,
2213				pfn + (addr >> PAGE_SHIFT), prot);
2214		if (err)
2215			return err;
2216	} while (pud++, addr = next, addr != end);
2217	return 0;
2218}
2219
2220static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2221			unsigned long addr, unsigned long end,
2222			unsigned long pfn, pgprot_t prot)
2223{
2224	p4d_t *p4d;
2225	unsigned long next;
2226	int err;
2227
2228	pfn -= addr >> PAGE_SHIFT;
2229	p4d = p4d_alloc(mm, pgd, addr);
2230	if (!p4d)
2231		return -ENOMEM;
2232	do {
2233		next = p4d_addr_end(addr, end);
2234		err = remap_pud_range(mm, p4d, addr, next,
2235				pfn + (addr >> PAGE_SHIFT), prot);
2236		if (err)
2237			return err;
2238	} while (p4d++, addr = next, addr != end);
2239	return 0;
2240}
2241
2242/**
2243 * remap_pfn_range - remap kernel memory to userspace
2244 * @vma: user vma to map to
2245 * @addr: target page aligned user address to start at
2246 * @pfn: page frame number of kernel physical memory address
2247 * @size: size of mapping area
2248 * @prot: page protection flags for this mapping
2249 *
2250 * Note: this is only safe if the mm semaphore is held when called.
2251 *
2252 * Return: %0 on success, negative error code otherwise.
2253 */
2254int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2255		    unsigned long pfn, unsigned long size, pgprot_t prot)
2256{
2257	pgd_t *pgd;
2258	unsigned long next;
2259	unsigned long end = addr + PAGE_ALIGN(size);
2260	struct mm_struct *mm = vma->vm_mm;
2261	unsigned long remap_pfn = pfn;
2262	int err;
2263
2264	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2265		return -EINVAL;
2266
2267	/*
2268	 * Physically remapped pages are special. Tell the
2269	 * rest of the world about it:
2270	 *   VM_IO tells people not to look at these pages
2271	 *	(accesses can have side effects).
2272	 *   VM_PFNMAP tells the core MM that the base pages are just
2273	 *	raw PFN mappings, and do not have a "struct page" associated
2274	 *	with them.
2275	 *   VM_DONTEXPAND
2276	 *      Disable vma merging and expanding with mremap().
2277	 *   VM_DONTDUMP
2278	 *      Omit vma from core dump, even when VM_IO turned off.
2279	 *
2280	 * There's a horrible special case to handle copy-on-write
2281	 * behaviour that some programs depend on. We mark the "original"
2282	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2283	 * See vm_normal_page() for details.
2284	 */
2285	if (is_cow_mapping(vma->vm_flags)) {
2286		if (addr != vma->vm_start || end != vma->vm_end)
2287			return -EINVAL;
2288		vma->vm_pgoff = pfn;
2289	}
2290
2291	err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
2292	if (err)
2293		return -EINVAL;
2294
2295	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2296
2297	BUG_ON(addr >= end);
2298	pfn -= addr >> PAGE_SHIFT;
2299	pgd = pgd_offset(mm, addr);
2300	flush_cache_range(vma, addr, end);
2301	do {
2302		next = pgd_addr_end(addr, end);
2303		err = remap_p4d_range(mm, pgd, addr, next,
2304				pfn + (addr >> PAGE_SHIFT), prot);
2305		if (err)
2306			break;
2307	} while (pgd++, addr = next, addr != end);
2308
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2309	if (err)
2310		untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2311
 
 
 
2312	return err;
2313}
2314EXPORT_SYMBOL(remap_pfn_range);
2315
2316/**
2317 * vm_iomap_memory - remap memory to userspace
2318 * @vma: user vma to map to
2319 * @start: start of the physical memory to be mapped
2320 * @len: size of area
2321 *
2322 * This is a simplified io_remap_pfn_range() for common driver use. The
2323 * driver just needs to give us the physical memory range to be mapped,
2324 * we'll figure out the rest from the vma information.
2325 *
2326 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2327 * whatever write-combining details or similar.
2328 *
2329 * Return: %0 on success, negative error code otherwise.
2330 */
2331int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2332{
2333	unsigned long vm_len, pfn, pages;
2334
2335	/* Check that the physical memory area passed in looks valid */
2336	if (start + len < start)
2337		return -EINVAL;
2338	/*
2339	 * You *really* shouldn't map things that aren't page-aligned,
2340	 * but we've historically allowed it because IO memory might
2341	 * just have smaller alignment.
2342	 */
2343	len += start & ~PAGE_MASK;
2344	pfn = start >> PAGE_SHIFT;
2345	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2346	if (pfn + pages < pfn)
2347		return -EINVAL;
2348
2349	/* We start the mapping 'vm_pgoff' pages into the area */
2350	if (vma->vm_pgoff > pages)
2351		return -EINVAL;
2352	pfn += vma->vm_pgoff;
2353	pages -= vma->vm_pgoff;
2354
2355	/* Can we fit all of the mapping? */
2356	vm_len = vma->vm_end - vma->vm_start;
2357	if (vm_len >> PAGE_SHIFT > pages)
2358		return -EINVAL;
2359
2360	/* Ok, let it rip */
2361	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2362}
2363EXPORT_SYMBOL(vm_iomap_memory);
2364
2365static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2366				     unsigned long addr, unsigned long end,
2367				     pte_fn_t fn, void *data, bool create,
2368				     pgtbl_mod_mask *mask)
2369{
2370	pte_t *pte;
2371	int err = 0;
2372	spinlock_t *ptl;
2373
2374	if (create) {
2375		pte = (mm == &init_mm) ?
2376			pte_alloc_kernel_track(pmd, addr, mask) :
2377			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2378		if (!pte)
2379			return -ENOMEM;
2380	} else {
2381		pte = (mm == &init_mm) ?
2382			pte_offset_kernel(pmd, addr) :
2383			pte_offset_map_lock(mm, pmd, addr, &ptl);
2384	}
2385
2386	BUG_ON(pmd_huge(*pmd));
2387
2388	arch_enter_lazy_mmu_mode();
2389
2390	do {
2391		if (create || !pte_none(*pte)) {
2392			err = fn(pte++, addr, data);
2393			if (err)
2394				break;
2395		}
2396	} while (addr += PAGE_SIZE, addr != end);
 
 
2397	*mask |= PGTBL_PTE_MODIFIED;
2398
2399	arch_leave_lazy_mmu_mode();
2400
2401	if (mm != &init_mm)
2402		pte_unmap_unlock(pte-1, ptl);
2403	return err;
2404}
2405
2406static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2407				     unsigned long addr, unsigned long end,
2408				     pte_fn_t fn, void *data, bool create,
2409				     pgtbl_mod_mask *mask)
2410{
2411	pmd_t *pmd;
2412	unsigned long next;
2413	int err = 0;
2414
2415	BUG_ON(pud_huge(*pud));
2416
2417	if (create) {
2418		pmd = pmd_alloc_track(mm, pud, addr, mask);
2419		if (!pmd)
2420			return -ENOMEM;
2421	} else {
2422		pmd = pmd_offset(pud, addr);
2423	}
2424	do {
2425		next = pmd_addr_end(addr, end);
2426		if (create || !pmd_none_or_clear_bad(pmd)) {
2427			err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
2428						 create, mask);
2429			if (err)
2430				break;
 
 
 
2431		}
 
 
 
 
2432	} while (pmd++, addr = next, addr != end);
 
2433	return err;
2434}
2435
2436static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2437				     unsigned long addr, unsigned long end,
2438				     pte_fn_t fn, void *data, bool create,
2439				     pgtbl_mod_mask *mask)
2440{
2441	pud_t *pud;
2442	unsigned long next;
2443	int err = 0;
2444
2445	if (create) {
2446		pud = pud_alloc_track(mm, p4d, addr, mask);
2447		if (!pud)
2448			return -ENOMEM;
2449	} else {
2450		pud = pud_offset(p4d, addr);
2451	}
2452	do {
2453		next = pud_addr_end(addr, end);
2454		if (create || !pud_none_or_clear_bad(pud)) {
2455			err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
2456						 create, mask);
2457			if (err)
2458				break;
 
 
 
2459		}
 
 
 
 
2460	} while (pud++, addr = next, addr != end);
 
2461	return err;
2462}
2463
2464static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2465				     unsigned long addr, unsigned long end,
2466				     pte_fn_t fn, void *data, bool create,
2467				     pgtbl_mod_mask *mask)
2468{
2469	p4d_t *p4d;
2470	unsigned long next;
2471	int err = 0;
2472
2473	if (create) {
2474		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2475		if (!p4d)
2476			return -ENOMEM;
2477	} else {
2478		p4d = p4d_offset(pgd, addr);
2479	}
2480	do {
2481		next = p4d_addr_end(addr, end);
2482		if (create || !p4d_none_or_clear_bad(p4d)) {
2483			err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
2484						 create, mask);
2485			if (err)
2486				break;
 
 
 
2487		}
 
 
 
 
2488	} while (p4d++, addr = next, addr != end);
 
2489	return err;
2490}
2491
2492static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2493				 unsigned long size, pte_fn_t fn,
2494				 void *data, bool create)
2495{
2496	pgd_t *pgd;
2497	unsigned long start = addr, next;
2498	unsigned long end = addr + size;
2499	pgtbl_mod_mask mask = 0;
2500	int err = 0;
2501
2502	if (WARN_ON(addr >= end))
2503		return -EINVAL;
2504
2505	pgd = pgd_offset(mm, addr);
2506	do {
2507		next = pgd_addr_end(addr, end);
2508		if (!create && pgd_none_or_clear_bad(pgd))
2509			continue;
2510		err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
 
 
 
 
 
 
 
 
2511		if (err)
2512			break;
2513	} while (pgd++, addr = next, addr != end);
2514
2515	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2516		arch_sync_kernel_mappings(start, start + size);
2517
2518	return err;
2519}
2520
2521/*
2522 * Scan a region of virtual memory, filling in page tables as necessary
2523 * and calling a provided function on each leaf page table.
2524 */
2525int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2526			unsigned long size, pte_fn_t fn, void *data)
2527{
2528	return __apply_to_page_range(mm, addr, size, fn, data, true);
2529}
2530EXPORT_SYMBOL_GPL(apply_to_page_range);
2531
2532/*
2533 * Scan a region of virtual memory, calling a provided function on
2534 * each leaf page table where it exists.
2535 *
2536 * Unlike apply_to_page_range, this does _not_ fill in page tables
2537 * where they are absent.
2538 */
2539int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2540				 unsigned long size, pte_fn_t fn, void *data)
2541{
2542	return __apply_to_page_range(mm, addr, size, fn, data, false);
2543}
2544EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2545
2546/*
2547 * handle_pte_fault chooses page fault handler according to an entry which was
2548 * read non-atomically.  Before making any commitment, on those architectures
2549 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2550 * parts, do_swap_page must check under lock before unmapping the pte and
2551 * proceeding (but do_wp_page is only called after already making such a check;
2552 * and do_anonymous_page can safely check later on).
2553 */
2554static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2555				pte_t *page_table, pte_t orig_pte)
2556{
2557	int same = 1;
2558#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2559	if (sizeof(pte_t) > sizeof(unsigned long)) {
2560		spinlock_t *ptl = pte_lockptr(mm, pmd);
2561		spin_lock(ptl);
2562		same = pte_same(*page_table, orig_pte);
2563		spin_unlock(ptl);
2564	}
2565#endif
2566	pte_unmap(page_table);
2567	return same;
2568}
2569
2570static inline bool cow_user_page(struct page *dst, struct page *src,
2571				 struct vm_fault *vmf)
2572{
2573	bool ret;
2574	void *kaddr;
2575	void __user *uaddr;
2576	bool locked = false;
2577	struct vm_area_struct *vma = vmf->vma;
2578	struct mm_struct *mm = vma->vm_mm;
2579	unsigned long addr = vmf->address;
2580
2581	if (likely(src)) {
2582		copy_user_highpage(dst, src, addr, vma);
2583		return true;
2584	}
2585
2586	/*
2587	 * If the source page was a PFN mapping, we don't have
2588	 * a "struct page" for it. We do a best-effort copy by
2589	 * just copying from the original user address. If that
2590	 * fails, we just zero-fill it. Live with it.
2591	 */
2592	kaddr = kmap_atomic(dst);
2593	uaddr = (void __user *)(addr & PAGE_MASK);
2594
2595	/*
2596	 * On architectures with software "accessed" bits, we would
2597	 * take a double page fault, so mark it accessed here.
2598	 */
2599	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2600		pte_t entry;
2601
2602		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2603		locked = true;
2604		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2605			/*
2606			 * Other thread has already handled the fault
2607			 * and update local tlb only
2608			 */
2609			update_mmu_tlb(vma, addr, vmf->pte);
2610			ret = false;
2611			goto pte_unlock;
2612		}
2613
2614		entry = pte_mkyoung(vmf->orig_pte);
2615		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2616			update_mmu_cache(vma, addr, vmf->pte);
2617	}
2618
2619	/*
2620	 * This really shouldn't fail, because the page is there
2621	 * in the page tables. But it might just be unreadable,
2622	 * in which case we just give up and fill the result with
2623	 * zeroes.
2624	 */
2625	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2626		if (locked)
2627			goto warn;
2628
2629		/* Re-validate under PTL if the page is still mapped */
2630		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2631		locked = true;
2632		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2633			/* The PTE changed under us, update local tlb */
2634			update_mmu_tlb(vma, addr, vmf->pte);
2635			ret = false;
2636			goto pte_unlock;
2637		}
2638
2639		/*
2640		 * The same page can be mapped back since last copy attempt.
2641		 * Try to copy again under PTL.
2642		 */
2643		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2644			/*
2645			 * Give a warn in case there can be some obscure
2646			 * use-case
2647			 */
2648warn:
2649			WARN_ON_ONCE(1);
2650			clear_page(kaddr);
2651		}
2652	}
2653
2654	ret = true;
2655
2656pte_unlock:
2657	if (locked)
2658		pte_unmap_unlock(vmf->pte, vmf->ptl);
2659	kunmap_atomic(kaddr);
2660	flush_dcache_page(dst);
2661
2662	return ret;
2663}
2664
2665static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2666{
2667	struct file *vm_file = vma->vm_file;
2668
2669	if (vm_file)
2670		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2671
2672	/*
2673	 * Special mappings (e.g. VDSO) do not have any file so fake
2674	 * a default GFP_KERNEL for them.
2675	 */
2676	return GFP_KERNEL;
2677}
2678
2679/*
2680 * Notify the address space that the page is about to become writable so that
2681 * it can prohibit this or wait for the page to get into an appropriate state.
2682 *
2683 * We do this without the lock held, so that it can sleep if it needs to.
2684 */
2685static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
2686{
2687	vm_fault_t ret;
2688	struct page *page = vmf->page;
2689	unsigned int old_flags = vmf->flags;
2690
2691	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2692
2693	if (vmf->vma->vm_file &&
2694	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2695		return VM_FAULT_SIGBUS;
2696
2697	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2698	/* Restore original flags so that caller is not surprised */
2699	vmf->flags = old_flags;
2700	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2701		return ret;
2702	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2703		lock_page(page);
2704		if (!page->mapping) {
2705			unlock_page(page);
2706			return 0; /* retry */
2707		}
2708		ret |= VM_FAULT_LOCKED;
2709	} else
2710		VM_BUG_ON_PAGE(!PageLocked(page), page);
2711	return ret;
2712}
2713
2714/*
2715 * Handle dirtying of a page in shared file mapping on a write fault.
2716 *
2717 * The function expects the page to be locked and unlocks it.
2718 */
2719static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2720{
2721	struct vm_area_struct *vma = vmf->vma;
2722	struct address_space *mapping;
2723	struct page *page = vmf->page;
2724	bool dirtied;
2725	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2726
2727	dirtied = set_page_dirty(page);
2728	VM_BUG_ON_PAGE(PageAnon(page), page);
2729	/*
2730	 * Take a local copy of the address_space - page.mapping may be zeroed
2731	 * by truncate after unlock_page().   The address_space itself remains
2732	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2733	 * release semantics to prevent the compiler from undoing this copying.
2734	 */
2735	mapping = page_rmapping(page);
2736	unlock_page(page);
2737
2738	if (!page_mkwrite)
2739		file_update_time(vma->vm_file);
2740
2741	/*
2742	 * Throttle page dirtying rate down to writeback speed.
2743	 *
2744	 * mapping may be NULL here because some device drivers do not
2745	 * set page.mapping but still dirty their pages
2746	 *
2747	 * Drop the mmap_lock before waiting on IO, if we can. The file
2748	 * is pinning the mapping, as per above.
2749	 */
2750	if ((dirtied || page_mkwrite) && mapping) {
2751		struct file *fpin;
2752
2753		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2754		balance_dirty_pages_ratelimited(mapping);
2755		if (fpin) {
2756			fput(fpin);
2757			return VM_FAULT_RETRY;
2758		}
2759	}
2760
2761	return 0;
2762}
2763
2764/*
2765 * Handle write page faults for pages that can be reused in the current vma
2766 *
2767 * This can happen either due to the mapping being with the VM_SHARED flag,
2768 * or due to us being the last reference standing to the page. In either
2769 * case, all we need to do here is to mark the page as writable and update
2770 * any related book-keeping.
2771 */
2772static inline void wp_page_reuse(struct vm_fault *vmf)
2773	__releases(vmf->ptl)
2774{
2775	struct vm_area_struct *vma = vmf->vma;
2776	struct page *page = vmf->page;
2777	pte_t entry;
2778	/*
2779	 * Clear the pages cpupid information as the existing
2780	 * information potentially belongs to a now completely
2781	 * unrelated process.
2782	 */
2783	if (page)
2784		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2785
2786	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2787	entry = pte_mkyoung(vmf->orig_pte);
2788	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2789	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2790		update_mmu_cache(vma, vmf->address, vmf->pte);
2791	pte_unmap_unlock(vmf->pte, vmf->ptl);
2792	count_vm_event(PGREUSE);
2793}
2794
2795/*
2796 * Handle the case of a page which we actually need to copy to a new page.
2797 *
2798 * Called with mmap_lock locked and the old page referenced, but
2799 * without the ptl held.
2800 *
2801 * High level logic flow:
2802 *
2803 * - Allocate a page, copy the content of the old page to the new one.
2804 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2805 * - Take the PTL. If the pte changed, bail out and release the allocated page
2806 * - If the pte is still the way we remember it, update the page table and all
2807 *   relevant references. This includes dropping the reference the page-table
2808 *   held to the old page, as well as updating the rmap.
2809 * - In any case, unlock the PTL and drop the reference we took to the old page.
2810 */
2811static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2812{
2813	struct vm_area_struct *vma = vmf->vma;
2814	struct mm_struct *mm = vma->vm_mm;
2815	struct page *old_page = vmf->page;
2816	struct page *new_page = NULL;
2817	pte_t entry;
2818	int page_copied = 0;
2819	struct mmu_notifier_range range;
2820
2821	if (unlikely(anon_vma_prepare(vma)))
2822		goto oom;
2823
2824	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2825		new_page = alloc_zeroed_user_highpage_movable(vma,
2826							      vmf->address);
2827		if (!new_page)
2828			goto oom;
2829	} else {
2830		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
2831				vmf->address);
2832		if (!new_page)
2833			goto oom;
2834
2835		if (!cow_user_page(new_page, old_page, vmf)) {
2836			/*
2837			 * COW failed, if the fault was solved by other,
2838			 * it's fine. If not, userspace would re-fault on
2839			 * the same address and we will handle the fault
2840			 * from the second attempt.
2841			 */
2842			put_page(new_page);
2843			if (old_page)
2844				put_page(old_page);
2845			return 0;
2846		}
2847	}
2848
2849	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
2850		goto oom_free_new;
2851	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
2852
2853	__SetPageUptodate(new_page);
2854
2855	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
2856				vmf->address & PAGE_MASK,
2857				(vmf->address & PAGE_MASK) + PAGE_SIZE);
2858	mmu_notifier_invalidate_range_start(&range);
2859
2860	/*
2861	 * Re-check the pte - we dropped the lock
2862	 */
2863	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2864	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2865		if (old_page) {
2866			if (!PageAnon(old_page)) {
2867				dec_mm_counter_fast(mm,
2868						mm_counter_file(old_page));
2869				inc_mm_counter_fast(mm, MM_ANONPAGES);
2870			}
2871		} else {
2872			inc_mm_counter_fast(mm, MM_ANONPAGES);
2873		}
2874		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2875		entry = mk_pte(new_page, vma->vm_page_prot);
2876		entry = pte_sw_mkyoung(entry);
2877		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
2878		/*
2879		 * Clear the pte entry and flush it first, before updating the
2880		 * pte with the new entry. This will avoid a race condition
2881		 * seen in the presence of one thread doing SMC and another
2882		 * thread doing COW.
 
2883		 */
2884		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2885		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
2886		lru_cache_add_inactive_or_unevictable(new_page, vma);
2887		/*
2888		 * We call the notify macro here because, when using secondary
2889		 * mmu page tables (such as kvm shadow page tables), we want the
2890		 * new page to be mapped directly into the secondary page table.
2891		 */
2892		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2893		update_mmu_cache(vma, vmf->address, vmf->pte);
2894		if (old_page) {
2895			/*
2896			 * Only after switching the pte to the new page may
2897			 * we remove the mapcount here. Otherwise another
2898			 * process may come and find the rmap count decremented
2899			 * before the pte is switched to the new page, and
2900			 * "reuse" the old page writing into it while our pte
2901			 * here still points into it and can be read by other
2902			 * threads.
2903			 *
2904			 * The critical issue is to order this
2905			 * page_remove_rmap with the ptp_clear_flush above.
2906			 * Those stores are ordered by (if nothing else,)
2907			 * the barrier present in the atomic_add_negative
2908			 * in page_remove_rmap.
2909			 *
2910			 * Then the TLB flush in ptep_clear_flush ensures that
2911			 * no process can access the old page before the
2912			 * decremented mapcount is visible. And the old page
2913			 * cannot be reused until after the decremented
2914			 * mapcount is visible. So transitively, TLBs to
2915			 * old page will be flushed before it can be reused.
2916			 */
2917			page_remove_rmap(old_page, false);
2918		}
2919
2920		/* Free the old page.. */
2921		new_page = old_page;
2922		page_copied = 1;
2923	} else {
2924		update_mmu_tlb(vma, vmf->address, vmf->pte);
2925	}
2926
2927	if (new_page)
2928		put_page(new_page);
2929
2930	pte_unmap_unlock(vmf->pte, vmf->ptl);
2931	/*
2932	 * No need to double call mmu_notifier->invalidate_range() callback as
2933	 * the above ptep_clear_flush_notify() did already call it.
2934	 */
2935	mmu_notifier_invalidate_range_only_end(&range);
2936	if (old_page) {
2937		/*
2938		 * Don't let another task, with possibly unlocked vma,
2939		 * keep the mlocked page.
2940		 */
2941		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2942			lock_page(old_page);	/* LRU manipulation */
2943			if (PageMlocked(old_page))
2944				munlock_vma_page(old_page);
2945			unlock_page(old_page);
2946		}
 
 
2947		put_page(old_page);
2948	}
2949	return page_copied ? VM_FAULT_WRITE : 0;
2950oom_free_new:
2951	put_page(new_page);
2952oom:
2953	if (old_page)
2954		put_page(old_page);
2955	return VM_FAULT_OOM;
2956}
2957
2958/**
2959 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2960 *			  writeable once the page is prepared
2961 *
2962 * @vmf: structure describing the fault
2963 *
2964 * This function handles all that is needed to finish a write page fault in a
2965 * shared mapping due to PTE being read-only once the mapped page is prepared.
2966 * It handles locking of PTE and modifying it.
2967 *
2968 * The function expects the page to be locked or other protection against
2969 * concurrent faults / writeback (such as DAX radix tree locks).
2970 *
2971 * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
2972 * we acquired PTE lock.
2973 */
2974vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
2975{
2976	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
2977	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
2978				       &vmf->ptl);
2979	/*
2980	 * We might have raced with another page fault while we released the
2981	 * pte_offset_map_lock.
2982	 */
2983	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2984		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
2985		pte_unmap_unlock(vmf->pte, vmf->ptl);
2986		return VM_FAULT_NOPAGE;
2987	}
2988	wp_page_reuse(vmf);
2989	return 0;
2990}
2991
2992/*
2993 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2994 * mapping
2995 */
2996static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
2997{
2998	struct vm_area_struct *vma = vmf->vma;
2999
3000	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3001		vm_fault_t ret;
3002
3003		pte_unmap_unlock(vmf->pte, vmf->ptl);
3004		vmf->flags |= FAULT_FLAG_MKWRITE;
3005		ret = vma->vm_ops->pfn_mkwrite(vmf);
3006		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3007			return ret;
3008		return finish_mkwrite_fault(vmf);
3009	}
3010	wp_page_reuse(vmf);
3011	return VM_FAULT_WRITE;
3012}
3013
3014static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3015	__releases(vmf->ptl)
3016{
3017	struct vm_area_struct *vma = vmf->vma;
3018	vm_fault_t ret = VM_FAULT_WRITE;
3019
3020	get_page(vmf->page);
3021
3022	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3023		vm_fault_t tmp;
3024
3025		pte_unmap_unlock(vmf->pte, vmf->ptl);
3026		tmp = do_page_mkwrite(vmf);
3027		if (unlikely(!tmp || (tmp &
3028				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3029			put_page(vmf->page);
3030			return tmp;
3031		}
3032		tmp = finish_mkwrite_fault(vmf);
3033		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3034			unlock_page(vmf->page);
3035			put_page(vmf->page);
3036			return tmp;
3037		}
3038	} else {
3039		wp_page_reuse(vmf);
3040		lock_page(vmf->page);
3041	}
3042	ret |= fault_dirty_shared_page(vmf);
3043	put_page(vmf->page);
3044
3045	return ret;
3046}
3047
3048/*
3049 * This routine handles present pages, when users try to write
3050 * to a shared page. It is done by copying the page to a new address
3051 * and decrementing the shared-page counter for the old page.
3052 *
3053 * Note that this routine assumes that the protection checks have been
3054 * done by the caller (the low-level page fault routine in most cases).
3055 * Thus we can safely just mark it writable once we've done any necessary
3056 * COW.
3057 *
3058 * We also mark the page dirty at this point even though the page will
3059 * change only once the write actually happens. This avoids a few races,
3060 * and potentially makes it more efficient.
3061 *
3062 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3063 * but allow concurrent faults), with pte both mapped and locked.
3064 * We return with mmap_lock still held, but pte unmapped and unlocked.
3065 */
3066static vm_fault_t do_wp_page(struct vm_fault *vmf)
3067	__releases(vmf->ptl)
3068{
3069	struct vm_area_struct *vma = vmf->vma;
3070
3071	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3072		pte_unmap_unlock(vmf->pte, vmf->ptl);
3073		return handle_userfault(vmf, VM_UFFD_WP);
3074	}
3075
 
 
 
 
 
 
 
 
3076	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3077	if (!vmf->page) {
3078		/*
3079		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3080		 * VM_PFNMAP VMA.
3081		 *
3082		 * We should not cow pages in a shared writeable mapping.
3083		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3084		 */
3085		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3086				     (VM_WRITE|VM_SHARED))
3087			return wp_pfn_shared(vmf);
3088
3089		pte_unmap_unlock(vmf->pte, vmf->ptl);
3090		return wp_page_copy(vmf);
3091	}
3092
3093	/*
3094	 * Take out anonymous pages first, anonymous shared vmas are
3095	 * not dirty accountable.
3096	 */
3097	if (PageAnon(vmf->page)) {
3098		struct page *page = vmf->page;
3099
3100		/* PageKsm() doesn't necessarily raise the page refcount */
3101		if (PageKsm(page) || page_count(page) != 1)
3102			goto copy;
3103		if (!trylock_page(page))
3104			goto copy;
3105		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3106			unlock_page(page);
3107			goto copy;
3108		}
3109		/*
3110		 * Ok, we've got the only map reference, and the only
3111		 * page count reference, and the page is locked,
3112		 * it's dark out, and we're wearing sunglasses. Hit it.
3113		 */
3114		unlock_page(page);
3115		wp_page_reuse(vmf);
3116		return VM_FAULT_WRITE;
3117	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3118					(VM_WRITE|VM_SHARED))) {
3119		return wp_page_shared(vmf);
3120	}
3121copy:
3122	/*
3123	 * Ok, we need to copy. Oh, well..
3124	 */
3125	get_page(vmf->page);
3126
3127	pte_unmap_unlock(vmf->pte, vmf->ptl);
3128	return wp_page_copy(vmf);
3129}
3130
3131static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3132		unsigned long start_addr, unsigned long end_addr,
3133		struct zap_details *details)
3134{
3135	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3136}
3137
3138static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3139					    struct zap_details *details)
3140{
3141	struct vm_area_struct *vma;
3142	pgoff_t vba, vea, zba, zea;
3143
3144	vma_interval_tree_foreach(vma, root,
3145			details->first_index, details->last_index) {
3146
3147		vba = vma->vm_pgoff;
3148		vea = vba + vma_pages(vma) - 1;
3149		zba = details->first_index;
3150		if (zba < vba)
3151			zba = vba;
3152		zea = details->last_index;
3153		if (zea > vea)
3154			zea = vea;
3155
3156		unmap_mapping_range_vma(vma,
3157			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3158			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3159				details);
3160	}
3161}
3162
3163/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3164 * unmap_mapping_pages() - Unmap pages from processes.
3165 * @mapping: The address space containing pages to be unmapped.
3166 * @start: Index of first page to be unmapped.
3167 * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3168 * @even_cows: Whether to unmap even private COWed pages.
3169 *
3170 * Unmap the pages in this address space from any userspace process which
3171 * has them mmaped.  Generally, you want to remove COWed pages as well when
3172 * a file is being truncated, but not when invalidating pages from the page
3173 * cache.
3174 */
3175void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3176		pgoff_t nr, bool even_cows)
3177{
3178	struct zap_details details = { };
3179
3180	details.check_mapping = even_cows ? NULL : mapping;
3181	details.first_index = start;
3182	details.last_index = start + nr - 1;
3183	if (details.last_index < details.first_index)
3184		details.last_index = ULONG_MAX;
3185
3186	i_mmap_lock_write(mapping);
3187	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3188		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3189	i_mmap_unlock_write(mapping);
3190}
3191
3192/**
3193 * unmap_mapping_range - unmap the portion of all mmaps in the specified
3194 * address_space corresponding to the specified byte range in the underlying
3195 * file.
3196 *
3197 * @mapping: the address space containing mmaps to be unmapped.
3198 * @holebegin: byte in first page to unmap, relative to the start of
3199 * the underlying file.  This will be rounded down to a PAGE_SIZE
3200 * boundary.  Note that this is different from truncate_pagecache(), which
3201 * must keep the partial page.  In contrast, we must get rid of
3202 * partial pages.
3203 * @holelen: size of prospective hole in bytes.  This will be rounded
3204 * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3205 * end of the file.
3206 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3207 * but 0 when invalidating pagecache, don't throw away private data.
3208 */
3209void unmap_mapping_range(struct address_space *mapping,
3210		loff_t const holebegin, loff_t const holelen, int even_cows)
3211{
3212	pgoff_t hba = holebegin >> PAGE_SHIFT;
3213	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3214
3215	/* Check for overflow. */
3216	if (sizeof(holelen) > sizeof(hlen)) {
3217		long long holeend =
3218			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3219		if (holeend & ~(long long)ULONG_MAX)
3220			hlen = ULONG_MAX - hba + 1;
3221	}
3222
3223	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3224}
3225EXPORT_SYMBOL(unmap_mapping_range);
3226
3227/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3228 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3229 * but allow concurrent faults), and pte mapped but not yet locked.
3230 * We return with pte unmapped and unlocked.
3231 *
3232 * We return with the mmap_lock locked or unlocked in the same cases
3233 * as does filemap_fault().
3234 */
3235vm_fault_t do_swap_page(struct vm_fault *vmf)
3236{
3237	struct vm_area_struct *vma = vmf->vma;
3238	struct page *page = NULL, *swapcache;
 
3239	swp_entry_t entry;
3240	pte_t pte;
3241	int locked;
3242	int exclusive = 0;
3243	vm_fault_t ret = 0;
3244	void *shadow = NULL;
3245
3246	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
3247		goto out;
3248
3249	entry = pte_to_swp_entry(vmf->orig_pte);
3250	if (unlikely(non_swap_entry(entry))) {
3251		if (is_migration_entry(entry)) {
3252			migration_entry_wait(vma->vm_mm, vmf->pmd,
3253					     vmf->address);
 
 
 
3254		} else if (is_device_private_entry(entry)) {
3255			vmf->page = device_private_entry_to_page(entry);
3256			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3257		} else if (is_hwpoison_entry(entry)) {
3258			ret = VM_FAULT_HWPOISON;
3259		} else {
3260			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3261			ret = VM_FAULT_SIGBUS;
3262		}
3263		goto out;
3264	}
3265
 
 
 
 
3266
3267	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
3268	page = lookup_swap_cache(entry, vma, vmf->address);
3269	swapcache = page;
3270
3271	if (!page) {
3272		struct swap_info_struct *si = swp_swap_info(entry);
3273
3274		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3275		    __swap_count(entry) == 1) {
3276			/* skip swapcache */
3277			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3278							vmf->address);
3279			if (page) {
3280				int err;
3281
3282				__SetPageLocked(page);
3283				__SetPageSwapBacked(page);
3284				set_page_private(page, entry.val);
3285
3286				/* Tell memcg to use swap ownership records */
3287				SetPageSwapCache(page);
3288				err = mem_cgroup_charge(page, vma->vm_mm,
3289							GFP_KERNEL);
3290				ClearPageSwapCache(page);
3291				if (err) {
3292					ret = VM_FAULT_OOM;
3293					goto out_page;
3294				}
 
3295
3296				shadow = get_shadow_from_swap_cache(entry);
3297				if (shadow)
3298					workingset_refault(page, shadow);
3299
3300				lru_cache_add(page);
 
 
 
3301				swap_readpage(page, true);
 
3302			}
3303		} else {
3304			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3305						vmf);
3306			swapcache = page;
3307		}
3308
3309		if (!page) {
3310			/*
3311			 * Back out if somebody else faulted in this pte
3312			 * while we released the pte lock.
3313			 */
3314			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3315					vmf->address, &vmf->ptl);
3316			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3317				ret = VM_FAULT_OOM;
3318			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3319			goto unlock;
3320		}
3321
3322		/* Had to read the page from swap area: Major fault */
3323		ret = VM_FAULT_MAJOR;
3324		count_vm_event(PGMAJFAULT);
3325		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3326	} else if (PageHWPoison(page)) {
3327		/*
3328		 * hwpoisoned dirty swapcache pages are kept for killing
3329		 * owner processes (which may be unknown at hwpoison time)
3330		 */
3331		ret = VM_FAULT_HWPOISON;
3332		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3333		goto out_release;
3334	}
3335
3336	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3337
3338	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3339	if (!locked) {
3340		ret |= VM_FAULT_RETRY;
3341		goto out_release;
3342	}
3343
3344	/*
3345	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3346	 * release the swapcache from under us.  The page pin, and pte_same
3347	 * test below, are not enough to exclude that.  Even if it is still
3348	 * swapcache, we need to check that the page's swap has not changed.
3349	 */
3350	if (unlikely((!PageSwapCache(page) ||
3351			page_private(page) != entry.val)) && swapcache)
3352		goto out_page;
3353
3354	page = ksm_might_need_to_copy(page, vma, vmf->address);
3355	if (unlikely(!page)) {
3356		ret = VM_FAULT_OOM;
3357		page = swapcache;
3358		goto out_page;
3359	}
3360
3361	cgroup_throttle_swaprate(page, GFP_KERNEL);
3362
3363	/*
3364	 * Back out if somebody else already faulted in this pte.
3365	 */
3366	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3367			&vmf->ptl);
3368	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3369		goto out_nomap;
3370
3371	if (unlikely(!PageUptodate(page))) {
3372		ret = VM_FAULT_SIGBUS;
3373		goto out_nomap;
3374	}
3375
3376	/*
3377	 * The page isn't present yet, go ahead with the fault.
3378	 *
3379	 * Be careful about the sequence of operations here.
3380	 * To get its accounting right, reuse_swap_page() must be called
3381	 * while the page is counted on swap but not yet in mapcount i.e.
3382	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3383	 * must be called after the swap_free(), or it will never succeed.
3384	 */
3385
3386	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3387	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3388	pte = mk_pte(page, vma->vm_page_prot);
3389	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3390		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3391		vmf->flags &= ~FAULT_FLAG_WRITE;
3392		ret |= VM_FAULT_WRITE;
3393		exclusive = RMAP_EXCLUSIVE;
3394	}
3395	flush_icache_page(vma, page);
3396	if (pte_swp_soft_dirty(vmf->orig_pte))
3397		pte = pte_mksoft_dirty(pte);
3398	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3399		pte = pte_mkuffd_wp(pte);
3400		pte = pte_wrprotect(pte);
3401	}
3402	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3403	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3404	vmf->orig_pte = pte;
3405
3406	/* ksm created a completely new copy */
3407	if (unlikely(page != swapcache && swapcache)) {
3408		page_add_new_anon_rmap(page, vma, vmf->address, false);
3409		lru_cache_add_inactive_or_unevictable(page, vma);
3410	} else {
3411		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3412	}
3413
3414	swap_free(entry);
3415	if (mem_cgroup_swap_full(page) ||
3416	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3417		try_to_free_swap(page);
3418	unlock_page(page);
3419	if (page != swapcache && swapcache) {
3420		/*
3421		 * Hold the lock to avoid the swap entry to be reused
3422		 * until we take the PT lock for the pte_same() check
3423		 * (to avoid false positives from pte_same). For
3424		 * further safety release the lock after the swap_free
3425		 * so that the swap count won't change under a
3426		 * parallel locked swapcache.
3427		 */
3428		unlock_page(swapcache);
3429		put_page(swapcache);
3430	}
3431
3432	if (vmf->flags & FAULT_FLAG_WRITE) {
3433		ret |= do_wp_page(vmf);
3434		if (ret & VM_FAULT_ERROR)
3435			ret &= VM_FAULT_ERROR;
3436		goto out;
3437	}
3438
3439	/* No need to invalidate - it was non-present before */
3440	update_mmu_cache(vma, vmf->address, vmf->pte);
3441unlock:
3442	pte_unmap_unlock(vmf->pte, vmf->ptl);
3443out:
 
 
3444	return ret;
3445out_nomap:
3446	pte_unmap_unlock(vmf->pte, vmf->ptl);
3447out_page:
3448	unlock_page(page);
3449out_release:
3450	put_page(page);
3451	if (page != swapcache && swapcache) {
3452		unlock_page(swapcache);
3453		put_page(swapcache);
3454	}
 
 
3455	return ret;
3456}
3457
3458/*
3459 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3460 * but allow concurrent faults), and pte mapped but not yet locked.
3461 * We return with mmap_lock still held, but pte unmapped and unlocked.
3462 */
3463static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3464{
3465	struct vm_area_struct *vma = vmf->vma;
3466	struct page *page;
3467	vm_fault_t ret = 0;
3468	pte_t entry;
3469
3470	/* File mapping without ->vm_ops ? */
3471	if (vma->vm_flags & VM_SHARED)
3472		return VM_FAULT_SIGBUS;
3473
3474	/*
3475	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
3476	 * pte_offset_map() on pmds where a huge pmd might be created
3477	 * from a different thread.
3478	 *
3479	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3480	 * parallel threads are excluded by other means.
3481	 *
3482	 * Here we only have mmap_read_lock(mm).
3483	 */
3484	if (pte_alloc(vma->vm_mm, vmf->pmd))
3485		return VM_FAULT_OOM;
3486
3487	/* See the comment in pte_alloc_one_map() */
3488	if (unlikely(pmd_trans_unstable(vmf->pmd)))
3489		return 0;
3490
3491	/* Use the zero-page for reads */
3492	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3493			!mm_forbids_zeropage(vma->vm_mm)) {
3494		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3495						vma->vm_page_prot));
3496		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3497				vmf->address, &vmf->ptl);
3498		if (!pte_none(*vmf->pte)) {
3499			update_mmu_tlb(vma, vmf->address, vmf->pte);
3500			goto unlock;
3501		}
3502		ret = check_stable_address_space(vma->vm_mm);
3503		if (ret)
3504			goto unlock;
3505		/* Deliver the page fault to userland, check inside PT lock */
3506		if (userfaultfd_missing(vma)) {
3507			pte_unmap_unlock(vmf->pte, vmf->ptl);
3508			return handle_userfault(vmf, VM_UFFD_MISSING);
3509		}
3510		goto setpte;
3511	}
3512
3513	/* Allocate our own private page. */
3514	if (unlikely(anon_vma_prepare(vma)))
3515		goto oom;
3516	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3517	if (!page)
3518		goto oom;
3519
3520	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3521		goto oom_free_page;
3522	cgroup_throttle_swaprate(page, GFP_KERNEL);
3523
3524	/*
3525	 * The memory barrier inside __SetPageUptodate makes sure that
3526	 * preceding stores to the page contents become visible before
3527	 * the set_pte_at() write.
3528	 */
3529	__SetPageUptodate(page);
3530
3531	entry = mk_pte(page, vma->vm_page_prot);
3532	entry = pte_sw_mkyoung(entry);
3533	if (vma->vm_flags & VM_WRITE)
3534		entry = pte_mkwrite(pte_mkdirty(entry));
3535
3536	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3537			&vmf->ptl);
3538	if (!pte_none(*vmf->pte)) {
3539		update_mmu_cache(vma, vmf->address, vmf->pte);
3540		goto release;
3541	}
3542
3543	ret = check_stable_address_space(vma->vm_mm);
3544	if (ret)
3545		goto release;
3546
3547	/* Deliver the page fault to userland, check inside PT lock */
3548	if (userfaultfd_missing(vma)) {
3549		pte_unmap_unlock(vmf->pte, vmf->ptl);
3550		put_page(page);
3551		return handle_userfault(vmf, VM_UFFD_MISSING);
3552	}
3553
3554	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3555	page_add_new_anon_rmap(page, vma, vmf->address, false);
3556	lru_cache_add_inactive_or_unevictable(page, vma);
3557setpte:
3558	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3559
3560	/* No need to invalidate - it was non-present before */
3561	update_mmu_cache(vma, vmf->address, vmf->pte);
3562unlock:
3563	pte_unmap_unlock(vmf->pte, vmf->ptl);
3564	return ret;
3565release:
3566	put_page(page);
3567	goto unlock;
3568oom_free_page:
3569	put_page(page);
3570oom:
3571	return VM_FAULT_OOM;
3572}
3573
3574/*
3575 * The mmap_lock must have been held on entry, and may have been
3576 * released depending on flags and vma->vm_ops->fault() return value.
3577 * See filemap_fault() and __lock_page_retry().
3578 */
3579static vm_fault_t __do_fault(struct vm_fault *vmf)
3580{
3581	struct vm_area_struct *vma = vmf->vma;
3582	vm_fault_t ret;
3583
3584	/*
3585	 * Preallocate pte before we take page_lock because this might lead to
3586	 * deadlocks for memcg reclaim which waits for pages under writeback:
3587	 *				lock_page(A)
3588	 *				SetPageWriteback(A)
3589	 *				unlock_page(A)
3590	 * lock_page(B)
3591	 *				lock_page(B)
3592	 * pte_alloc_pne
3593	 *   shrink_page_list
3594	 *     wait_on_page_writeback(A)
3595	 *				SetPageWriteback(B)
3596	 *				unlock_page(B)
3597	 *				# flush A, B to clear the writeback
3598	 */
3599	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3600		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3601		if (!vmf->prealloc_pte)
3602			return VM_FAULT_OOM;
3603		smp_wmb(); /* See comment in __pte_alloc() */
3604	}
3605
3606	ret = vma->vm_ops->fault(vmf);
3607	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3608			    VM_FAULT_DONE_COW)))
3609		return ret;
3610
3611	if (unlikely(PageHWPoison(vmf->page))) {
3612		if (ret & VM_FAULT_LOCKED)
3613			unlock_page(vmf->page);
3614		put_page(vmf->page);
3615		vmf->page = NULL;
3616		return VM_FAULT_HWPOISON;
3617	}
3618
3619	if (unlikely(!(ret & VM_FAULT_LOCKED)))
3620		lock_page(vmf->page);
3621	else
3622		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3623
3624	return ret;
3625}
3626
3627/*
3628 * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3629 * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3630 * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3631 * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3632 */
3633static int pmd_devmap_trans_unstable(pmd_t *pmd)
3634{
3635	return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3636}
3637
3638static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
3639{
3640	struct vm_area_struct *vma = vmf->vma;
3641
3642	if (!pmd_none(*vmf->pmd))
3643		goto map_pte;
3644	if (vmf->prealloc_pte) {
3645		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3646		if (unlikely(!pmd_none(*vmf->pmd))) {
3647			spin_unlock(vmf->ptl);
3648			goto map_pte;
3649		}
3650
3651		mm_inc_nr_ptes(vma->vm_mm);
3652		pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3653		spin_unlock(vmf->ptl);
3654		vmf->prealloc_pte = NULL;
3655	} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
3656		return VM_FAULT_OOM;
3657	}
3658map_pte:
3659	/*
3660	 * If a huge pmd materialized under us just retry later.  Use
3661	 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3662	 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3663	 * under us and then back to pmd_none, as a result of MADV_DONTNEED
3664	 * running immediately after a huge pmd fault in a different thread of
3665	 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3666	 * All we have to ensure is that it is a regular pmd that we can walk
3667	 * with pte_offset_map() and we can do that through an atomic read in
3668	 * C, which is what pmd_trans_unstable() provides.
3669	 */
3670	if (pmd_devmap_trans_unstable(vmf->pmd))
3671		return VM_FAULT_NOPAGE;
3672
3673	/*
3674	 * At this point we know that our vmf->pmd points to a page of ptes
3675	 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3676	 * for the duration of the fault.  If a racing MADV_DONTNEED runs and
3677	 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3678	 * be valid and we will re-check to make sure the vmf->pte isn't
3679	 * pte_none() under vmf->ptl protection when we return to
3680	 * alloc_set_pte().
3681	 */
3682	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3683			&vmf->ptl);
3684	return 0;
3685}
3686
3687#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3688static void deposit_prealloc_pte(struct vm_fault *vmf)
3689{
3690	struct vm_area_struct *vma = vmf->vma;
3691
3692	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3693	/*
3694	 * We are going to consume the prealloc table,
3695	 * count that as nr_ptes.
3696	 */
3697	mm_inc_nr_ptes(vma->vm_mm);
3698	vmf->prealloc_pte = NULL;
3699}
3700
3701static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3702{
3703	struct vm_area_struct *vma = vmf->vma;
3704	bool write = vmf->flags & FAULT_FLAG_WRITE;
3705	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3706	pmd_t entry;
3707	int i;
3708	vm_fault_t ret;
3709
3710	if (!transhuge_vma_suitable(vma, haddr))
3711		return VM_FAULT_FALLBACK;
3712
3713	ret = VM_FAULT_FALLBACK;
3714	page = compound_head(page);
 
 
3715
3716	/*
3717	 * Archs like ppc64 need additonal space to store information
3718	 * related to pte entry. Use the preallocated table for that.
3719	 */
3720	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3721		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3722		if (!vmf->prealloc_pte)
3723			return VM_FAULT_OOM;
3724		smp_wmb(); /* See comment in __pte_alloc() */
3725	}
3726
3727	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3728	if (unlikely(!pmd_none(*vmf->pmd)))
3729		goto out;
3730
3731	for (i = 0; i < HPAGE_PMD_NR; i++)
3732		flush_icache_page(vma, page + i);
3733
3734	entry = mk_huge_pmd(page, vma->vm_page_prot);
3735	if (write)
3736		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3737
3738	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
3739	page_add_file_rmap(page, true);
3740	/*
3741	 * deposit and withdraw with pmd lock held
3742	 */
3743	if (arch_needs_pgtable_deposit())
3744		deposit_prealloc_pte(vmf);
3745
3746	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3747
3748	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3749
3750	/* fault is handled */
3751	ret = 0;
3752	count_vm_event(THP_FILE_MAPPED);
3753out:
3754	spin_unlock(vmf->ptl);
3755	return ret;
3756}
3757#else
3758static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3759{
3760	BUILD_BUG();
3761	return 0;
3762}
3763#endif
3764
3765/**
3766 * alloc_set_pte - setup new PTE entry for given page and add reverse page
3767 * mapping. If needed, the fucntion allocates page table or use pre-allocated.
3768 *
3769 * @vmf: fault environment
3770 * @page: page to map
3771 *
3772 * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3773 * return.
3774 *
3775 * Target users are page handler itself and implementations of
3776 * vm_ops->map_pages.
3777 *
3778 * Return: %0 on success, %VM_FAULT_ code in case of error.
3779 */
3780vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
3781{
3782	struct vm_area_struct *vma = vmf->vma;
3783	bool write = vmf->flags & FAULT_FLAG_WRITE;
 
3784	pte_t entry;
3785	vm_fault_t ret;
3786
3787	if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
3788		ret = do_set_pmd(vmf, page);
3789		if (ret != VM_FAULT_FALLBACK)
3790			return ret;
3791	}
3792
3793	if (!vmf->pte) {
3794		ret = pte_alloc_one_map(vmf);
3795		if (ret)
3796			return ret;
3797	}
3798
3799	/* Re-check under ptl */
3800	if (unlikely(!pte_none(*vmf->pte))) {
3801		update_mmu_tlb(vma, vmf->address, vmf->pte);
3802		return VM_FAULT_NOPAGE;
3803	}
3804
3805	flush_icache_page(vma, page);
3806	entry = mk_pte(page, vma->vm_page_prot);
3807	entry = pte_sw_mkyoung(entry);
3808	if (write)
3809		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3810	/* copy-on-write page */
3811	if (write && !(vma->vm_flags & VM_SHARED)) {
3812		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3813		page_add_new_anon_rmap(page, vma, vmf->address, false);
3814		lru_cache_add_inactive_or_unevictable(page, vma);
3815	} else {
3816		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3817		page_add_file_rmap(page, false);
3818	}
3819	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3820
3821	/* no need to invalidate: a not-present page won't be cached */
3822	update_mmu_cache(vma, vmf->address, vmf->pte);
3823
3824	return 0;
3825}
3826
3827
3828/**
3829 * finish_fault - finish page fault once we have prepared the page to fault
3830 *
3831 * @vmf: structure describing the fault
3832 *
3833 * This function handles all that is needed to finish a page fault once the
3834 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3835 * given page, adds reverse page mapping, handles memcg charges and LRU
3836 * addition.
3837 *
3838 * The function expects the page to be locked and on success it consumes a
3839 * reference of a page being mapped (for the PTE which maps it).
3840 *
3841 * Return: %0 on success, %VM_FAULT_ code in case of error.
3842 */
3843vm_fault_t finish_fault(struct vm_fault *vmf)
3844{
 
3845	struct page *page;
3846	vm_fault_t ret = 0;
3847
3848	/* Did we COW the page? */
3849	if ((vmf->flags & FAULT_FLAG_WRITE) &&
3850	    !(vmf->vma->vm_flags & VM_SHARED))
3851		page = vmf->cow_page;
3852	else
3853		page = vmf->page;
3854
3855	/*
3856	 * check even for read faults because we might have lost our CoWed
3857	 * page
3858	 */
3859	if (!(vmf->vma->vm_flags & VM_SHARED))
3860		ret = check_stable_address_space(vmf->vma->vm_mm);
3861	if (!ret)
3862		ret = alloc_set_pte(vmf, page);
3863	if (vmf->pte)
3864		pte_unmap_unlock(vmf->pte, vmf->ptl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3865	return ret;
3866}
3867
3868static unsigned long fault_around_bytes __read_mostly =
3869	rounddown_pow_of_two(65536);
3870
3871#ifdef CONFIG_DEBUG_FS
3872static int fault_around_bytes_get(void *data, u64 *val)
3873{
3874	*val = fault_around_bytes;
3875	return 0;
3876}
3877
3878/*
3879 * fault_around_bytes must be rounded down to the nearest page order as it's
3880 * what do_fault_around() expects to see.
3881 */
3882static int fault_around_bytes_set(void *data, u64 val)
3883{
3884	if (val / PAGE_SIZE > PTRS_PER_PTE)
3885		return -EINVAL;
3886	if (val > PAGE_SIZE)
3887		fault_around_bytes = rounddown_pow_of_two(val);
3888	else
3889		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
3890	return 0;
3891}
3892DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
3893		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
3894
3895static int __init fault_around_debugfs(void)
3896{
3897	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3898				   &fault_around_bytes_fops);
3899	return 0;
3900}
3901late_initcall(fault_around_debugfs);
3902#endif
3903
3904/*
3905 * do_fault_around() tries to map few pages around the fault address. The hope
3906 * is that the pages will be needed soon and this will lower the number of
3907 * faults to handle.
3908 *
3909 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
3910 * not ready to be mapped: not up-to-date, locked, etc.
3911 *
3912 * This function is called with the page table lock taken. In the split ptlock
3913 * case the page table lock only protects only those entries which belong to
3914 * the page table corresponding to the fault address.
3915 *
3916 * This function doesn't cross the VMA boundaries, in order to call map_pages()
3917 * only once.
3918 *
3919 * fault_around_bytes defines how many bytes we'll try to map.
3920 * do_fault_around() expects it to be set to a power of two less than or equal
3921 * to PTRS_PER_PTE.
3922 *
3923 * The virtual address of the area that we map is naturally aligned to
3924 * fault_around_bytes rounded down to the machine page size
3925 * (and therefore to page order).  This way it's easier to guarantee
3926 * that we don't cross page table boundaries.
3927 */
3928static vm_fault_t do_fault_around(struct vm_fault *vmf)
3929{
3930	unsigned long address = vmf->address, nr_pages, mask;
3931	pgoff_t start_pgoff = vmf->pgoff;
3932	pgoff_t end_pgoff;
3933	int off;
3934	vm_fault_t ret = 0;
3935
3936	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
3937	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
3938
3939	vmf->address = max(address & mask, vmf->vma->vm_start);
3940	off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
3941	start_pgoff -= off;
3942
3943	/*
3944	 *  end_pgoff is either the end of the page table, the end of
3945	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
3946	 */
3947	end_pgoff = start_pgoff -
3948		((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
3949		PTRS_PER_PTE - 1;
3950	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
3951			start_pgoff + nr_pages - 1);
3952
3953	if (pmd_none(*vmf->pmd)) {
3954		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3955		if (!vmf->prealloc_pte)
3956			goto out;
3957		smp_wmb(); /* See comment in __pte_alloc() */
3958	}
3959
3960	vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
3961
3962	/* Huge page is mapped? Page fault is solved */
3963	if (pmd_trans_huge(*vmf->pmd)) {
3964		ret = VM_FAULT_NOPAGE;
3965		goto out;
3966	}
3967
3968	/* ->map_pages() haven't done anything useful. Cold page cache? */
3969	if (!vmf->pte)
3970		goto out;
3971
3972	/* check if the page fault is solved */
3973	vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
3974	if (!pte_none(*vmf->pte))
3975		ret = VM_FAULT_NOPAGE;
3976	pte_unmap_unlock(vmf->pte, vmf->ptl);
3977out:
3978	vmf->address = address;
3979	vmf->pte = NULL;
3980	return ret;
3981}
3982
3983static vm_fault_t do_read_fault(struct vm_fault *vmf)
3984{
3985	struct vm_area_struct *vma = vmf->vma;
3986	vm_fault_t ret = 0;
3987
3988	/*
3989	 * Let's call ->map_pages() first and use ->fault() as fallback
3990	 * if page by the offset is not ready to be mapped (cold cache or
3991	 * something).
3992	 */
3993	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3994		ret = do_fault_around(vmf);
3995		if (ret)
3996			return ret;
 
 
3997	}
3998
3999	ret = __do_fault(vmf);
4000	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4001		return ret;
4002
4003	ret |= finish_fault(vmf);
4004	unlock_page(vmf->page);
4005	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4006		put_page(vmf->page);
4007	return ret;
4008}
4009
4010static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4011{
4012	struct vm_area_struct *vma = vmf->vma;
4013	vm_fault_t ret;
4014
4015	if (unlikely(anon_vma_prepare(vma)))
4016		return VM_FAULT_OOM;
4017
4018	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4019	if (!vmf->cow_page)
4020		return VM_FAULT_OOM;
4021
4022	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4023		put_page(vmf->cow_page);
4024		return VM_FAULT_OOM;
4025	}
4026	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4027
4028	ret = __do_fault(vmf);
4029	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4030		goto uncharge_out;
4031	if (ret & VM_FAULT_DONE_COW)
4032		return ret;
4033
4034	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4035	__SetPageUptodate(vmf->cow_page);
4036
4037	ret |= finish_fault(vmf);
4038	unlock_page(vmf->page);
4039	put_page(vmf->page);
4040	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4041		goto uncharge_out;
4042	return ret;
4043uncharge_out:
4044	put_page(vmf->cow_page);
4045	return ret;
4046}
4047
4048static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4049{
4050	struct vm_area_struct *vma = vmf->vma;
4051	vm_fault_t ret, tmp;
4052
4053	ret = __do_fault(vmf);
4054	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4055		return ret;
4056
4057	/*
4058	 * Check if the backing address space wants to know that the page is
4059	 * about to become writable
4060	 */
4061	if (vma->vm_ops->page_mkwrite) {
4062		unlock_page(vmf->page);
4063		tmp = do_page_mkwrite(vmf);
4064		if (unlikely(!tmp ||
4065				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4066			put_page(vmf->page);
4067			return tmp;
4068		}
4069	}
4070
4071	ret |= finish_fault(vmf);
4072	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4073					VM_FAULT_RETRY))) {
4074		unlock_page(vmf->page);
4075		put_page(vmf->page);
4076		return ret;
4077	}
4078
4079	ret |= fault_dirty_shared_page(vmf);
4080	return ret;
4081}
4082
4083/*
4084 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4085 * but allow concurrent faults).
4086 * The mmap_lock may have been released depending on flags and our
4087 * return value.  See filemap_fault() and __lock_page_or_retry().
4088 * If mmap_lock is released, vma may become invalid (for example
4089 * by other thread calling munmap()).
4090 */
4091static vm_fault_t do_fault(struct vm_fault *vmf)
4092{
4093	struct vm_area_struct *vma = vmf->vma;
4094	struct mm_struct *vm_mm = vma->vm_mm;
4095	vm_fault_t ret;
4096
4097	/*
4098	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4099	 */
4100	if (!vma->vm_ops->fault) {
4101		/*
4102		 * If we find a migration pmd entry or a none pmd entry, which
4103		 * should never happen, return SIGBUS
4104		 */
4105		if (unlikely(!pmd_present(*vmf->pmd)))
4106			ret = VM_FAULT_SIGBUS;
4107		else {
4108			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4109						       vmf->pmd,
4110						       vmf->address,
4111						       &vmf->ptl);
4112			/*
4113			 * Make sure this is not a temporary clearing of pte
4114			 * by holding ptl and checking again. A R/M/W update
4115			 * of pte involves: take ptl, clearing the pte so that
4116			 * we don't have concurrent modification by hardware
4117			 * followed by an update.
4118			 */
4119			if (unlikely(pte_none(*vmf->pte)))
4120				ret = VM_FAULT_SIGBUS;
4121			else
4122				ret = VM_FAULT_NOPAGE;
4123
4124			pte_unmap_unlock(vmf->pte, vmf->ptl);
4125		}
4126	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4127		ret = do_read_fault(vmf);
4128	else if (!(vma->vm_flags & VM_SHARED))
4129		ret = do_cow_fault(vmf);
4130	else
4131		ret = do_shared_fault(vmf);
4132
4133	/* preallocated pagetable is unused: free it */
4134	if (vmf->prealloc_pte) {
4135		pte_free(vm_mm, vmf->prealloc_pte);
4136		vmf->prealloc_pte = NULL;
4137	}
4138	return ret;
4139}
4140
4141static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4142				unsigned long addr, int page_nid,
4143				int *flags)
4144{
4145	get_page(page);
4146
4147	count_vm_numa_event(NUMA_HINT_FAULTS);
4148	if (page_nid == numa_node_id()) {
4149		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4150		*flags |= TNF_FAULT_LOCAL;
4151	}
4152
4153	return mpol_misplaced(page, vma, addr);
4154}
4155
4156static vm_fault_t do_numa_page(struct vm_fault *vmf)
4157{
4158	struct vm_area_struct *vma = vmf->vma;
4159	struct page *page = NULL;
4160	int page_nid = NUMA_NO_NODE;
4161	int last_cpupid;
4162	int target_nid;
4163	bool migrated = false;
4164	pte_t pte, old_pte;
4165	bool was_writable = pte_savedwrite(vmf->orig_pte);
4166	int flags = 0;
4167
4168	/*
4169	 * The "pte" at this point cannot be used safely without
4170	 * validation through pte_unmap_same(). It's of NUMA type but
4171	 * the pfn may be screwed if the read is non atomic.
4172	 */
4173	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4174	spin_lock(vmf->ptl);
4175	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4176		pte_unmap_unlock(vmf->pte, vmf->ptl);
4177		goto out;
4178	}
4179
4180	/*
4181	 * Make it present again, Depending on how arch implementes non
4182	 * accessible ptes, some can allow access by kernel mode.
4183	 */
4184	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4185	pte = pte_modify(old_pte, vma->vm_page_prot);
4186	pte = pte_mkyoung(pte);
4187	if (was_writable)
4188		pte = pte_mkwrite(pte);
4189	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4190	update_mmu_cache(vma, vmf->address, vmf->pte);
4191
4192	page = vm_normal_page(vma, vmf->address, pte);
4193	if (!page) {
4194		pte_unmap_unlock(vmf->pte, vmf->ptl);
4195		return 0;
4196	}
4197
4198	/* TODO: handle PTE-mapped THP */
4199	if (PageCompound(page)) {
4200		pte_unmap_unlock(vmf->pte, vmf->ptl);
4201		return 0;
4202	}
4203
4204	/*
4205	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4206	 * much anyway since they can be in shared cache state. This misses
4207	 * the case where a mapping is writable but the process never writes
4208	 * to it but pte_write gets cleared during protection updates and
4209	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4210	 * background writeback, dirty balancing and application behaviour.
4211	 */
4212	if (!pte_write(pte))
4213		flags |= TNF_NO_GROUP;
4214
4215	/*
4216	 * Flag if the page is shared between multiple address spaces. This
4217	 * is later used when determining whether to group tasks together
4218	 */
4219	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4220		flags |= TNF_SHARED;
4221
4222	last_cpupid = page_cpupid_last(page);
4223	page_nid = page_to_nid(page);
4224	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4225			&flags);
4226	pte_unmap_unlock(vmf->pte, vmf->ptl);
4227	if (target_nid == NUMA_NO_NODE) {
4228		put_page(page);
4229		goto out;
4230	}
 
4231
4232	/* Migrate to the requested node */
4233	migrated = migrate_misplaced_page(page, vma, target_nid);
4234	if (migrated) {
4235		page_nid = target_nid;
4236		flags |= TNF_MIGRATED;
4237	} else
4238		flags |= TNF_MIGRATE_FAIL;
 
 
 
 
 
 
 
 
4239
4240out:
4241	if (page_nid != NUMA_NO_NODE)
4242		task_numa_fault(last_cpupid, page_nid, 1, flags);
4243	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4244}
4245
4246static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4247{
4248	if (vma_is_anonymous(vmf->vma))
4249		return do_huge_pmd_anonymous_page(vmf);
4250	if (vmf->vma->vm_ops->huge_fault)
4251		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4252	return VM_FAULT_FALLBACK;
4253}
4254
4255/* `inline' is required to avoid gcc 4.1.2 build error */
4256static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
4257{
4258	if (vma_is_anonymous(vmf->vma)) {
4259		if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
4260			return handle_userfault(vmf, VM_UFFD_WP);
4261		return do_huge_pmd_wp_page(vmf, orig_pmd);
4262	}
4263	if (vmf->vma->vm_ops->huge_fault) {
4264		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4265
4266		if (!(ret & VM_FAULT_FALLBACK))
4267			return ret;
4268	}
4269
4270	/* COW or write-notify handled on pte level: split pmd. */
4271	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4272
4273	return VM_FAULT_FALLBACK;
4274}
4275
4276static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4277{
4278#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4279	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4280	/* No support for anonymous transparent PUD pages yet */
4281	if (vma_is_anonymous(vmf->vma))
4282		goto split;
4283	if (vmf->vma->vm_ops->huge_fault) {
4284		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4285
4286		if (!(ret & VM_FAULT_FALLBACK))
4287			return ret;
4288	}
4289split:
4290	/* COW or write-notify not handled on PUD level: split pud.*/
4291	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4292#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4293	return VM_FAULT_FALLBACK;
4294}
4295
4296static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4297{
4298#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4299	/* No support for anonymous transparent PUD pages yet */
4300	if (vma_is_anonymous(vmf->vma))
4301		return VM_FAULT_FALLBACK;
4302	if (vmf->vma->vm_ops->huge_fault)
4303		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4304#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4305	return VM_FAULT_FALLBACK;
4306}
4307
4308/*
4309 * These routines also need to handle stuff like marking pages dirty
4310 * and/or accessed for architectures that don't do it in hardware (most
4311 * RISC architectures).  The early dirtying is also good on the i386.
4312 *
4313 * There is also a hook called "update_mmu_cache()" that architectures
4314 * with external mmu caches can use to update those (ie the Sparc or
4315 * PowerPC hashed page tables that act as extended TLBs).
4316 *
4317 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4318 * concurrent faults).
4319 *
4320 * The mmap_lock may have been released depending on flags and our return value.
4321 * See filemap_fault() and __lock_page_or_retry().
4322 */
4323static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4324{
4325	pte_t entry;
4326
4327	if (unlikely(pmd_none(*vmf->pmd))) {
4328		/*
4329		 * Leave __pte_alloc() until later: because vm_ops->fault may
4330		 * want to allocate huge page, and if we expose page table
4331		 * for an instant, it will be difficult to retract from
4332		 * concurrent faults and from rmap lookups.
4333		 */
4334		vmf->pte = NULL;
4335	} else {
4336		/* See comment in pte_alloc_one_map() */
 
 
 
 
 
 
 
 
 
 
 
4337		if (pmd_devmap_trans_unstable(vmf->pmd))
4338			return 0;
4339		/*
4340		 * A regular pmd is established and it can't morph into a huge
4341		 * pmd from under us anymore at this point because we hold the
4342		 * mmap_lock read mode and khugepaged takes it in write mode.
4343		 * So now it's safe to run pte_offset_map().
4344		 */
4345		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4346		vmf->orig_pte = *vmf->pte;
4347
4348		/*
4349		 * some architectures can have larger ptes than wordsize,
4350		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4351		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4352		 * accesses.  The code below just needs a consistent view
4353		 * for the ifs and we later double check anyway with the
4354		 * ptl lock held. So here a barrier will do.
4355		 */
4356		barrier();
4357		if (pte_none(vmf->orig_pte)) {
4358			pte_unmap(vmf->pte);
4359			vmf->pte = NULL;
4360		}
4361	}
4362
4363	if (!vmf->pte) {
4364		if (vma_is_anonymous(vmf->vma))
4365			return do_anonymous_page(vmf);
4366		else
4367			return do_fault(vmf);
4368	}
4369
4370	if (!pte_present(vmf->orig_pte))
4371		return do_swap_page(vmf);
4372
4373	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4374		return do_numa_page(vmf);
4375
4376	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4377	spin_lock(vmf->ptl);
4378	entry = vmf->orig_pte;
4379	if (unlikely(!pte_same(*vmf->pte, entry))) {
4380		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4381		goto unlock;
4382	}
4383	if (vmf->flags & FAULT_FLAG_WRITE) {
4384		if (!pte_write(entry))
4385			return do_wp_page(vmf);
4386		entry = pte_mkdirty(entry);
4387	}
4388	entry = pte_mkyoung(entry);
4389	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4390				vmf->flags & FAULT_FLAG_WRITE)) {
4391		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4392	} else {
4393		/* Skip spurious TLB flush for retried page fault */
4394		if (vmf->flags & FAULT_FLAG_TRIED)
4395			goto unlock;
4396		/*
4397		 * This is needed only for protection faults but the arch code
4398		 * is not yet telling us if this is a protection fault or not.
4399		 * This still avoids useless tlb flushes for .text page faults
4400		 * with threads.
4401		 */
4402		if (vmf->flags & FAULT_FLAG_WRITE)
4403			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4404	}
4405unlock:
4406	pte_unmap_unlock(vmf->pte, vmf->ptl);
4407	return 0;
4408}
4409
4410/*
4411 * By the time we get here, we already hold the mm semaphore
4412 *
4413 * The mmap_lock may have been released depending on flags and our
4414 * return value.  See filemap_fault() and __lock_page_or_retry().
4415 */
4416static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4417		unsigned long address, unsigned int flags)
4418{
4419	struct vm_fault vmf = {
4420		.vma = vma,
4421		.address = address & PAGE_MASK,
4422		.flags = flags,
4423		.pgoff = linear_page_index(vma, address),
4424		.gfp_mask = __get_fault_gfp_mask(vma),
4425	};
4426	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4427	struct mm_struct *mm = vma->vm_mm;
4428	pgd_t *pgd;
4429	p4d_t *p4d;
4430	vm_fault_t ret;
4431
4432	pgd = pgd_offset(mm, address);
4433	p4d = p4d_alloc(mm, pgd, address);
4434	if (!p4d)
4435		return VM_FAULT_OOM;
4436
4437	vmf.pud = pud_alloc(mm, p4d, address);
4438	if (!vmf.pud)
4439		return VM_FAULT_OOM;
4440retry_pud:
4441	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4442		ret = create_huge_pud(&vmf);
4443		if (!(ret & VM_FAULT_FALLBACK))
4444			return ret;
4445	} else {
4446		pud_t orig_pud = *vmf.pud;
4447
4448		barrier();
4449		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4450
4451			/* NUMA case for anonymous PUDs would go here */
4452
4453			if (dirty && !pud_write(orig_pud)) {
4454				ret = wp_huge_pud(&vmf, orig_pud);
4455				if (!(ret & VM_FAULT_FALLBACK))
4456					return ret;
4457			} else {
4458				huge_pud_set_accessed(&vmf, orig_pud);
4459				return 0;
4460			}
4461		}
4462	}
4463
4464	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4465	if (!vmf.pmd)
4466		return VM_FAULT_OOM;
4467
4468	/* Huge pud page fault raced with pmd_alloc? */
4469	if (pud_trans_unstable(vmf.pud))
4470		goto retry_pud;
4471
4472	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4473		ret = create_huge_pmd(&vmf);
4474		if (!(ret & VM_FAULT_FALLBACK))
4475			return ret;
4476	} else {
4477		pmd_t orig_pmd = *vmf.pmd;
4478
4479		barrier();
4480		if (unlikely(is_swap_pmd(orig_pmd))) {
4481			VM_BUG_ON(thp_migration_supported() &&
4482					  !is_pmd_migration_entry(orig_pmd));
4483			if (is_pmd_migration_entry(orig_pmd))
4484				pmd_migration_entry_wait(mm, vmf.pmd);
4485			return 0;
4486		}
4487		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
4488			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4489				return do_huge_pmd_numa_page(&vmf, orig_pmd);
4490
4491			if (dirty && !pmd_write(orig_pmd)) {
4492				ret = wp_huge_pmd(&vmf, orig_pmd);
4493				if (!(ret & VM_FAULT_FALLBACK))
4494					return ret;
4495			} else {
4496				huge_pmd_set_accessed(&vmf, orig_pmd);
4497				return 0;
4498			}
4499		}
4500	}
4501
4502	return handle_pte_fault(&vmf);
4503}
4504
4505/**
4506 * mm_account_fault - Do page fault accountings
4507 *
4508 * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
4509 *        of perf event counters, but we'll still do the per-task accounting to
4510 *        the task who triggered this page fault.
4511 * @address: the faulted address.
4512 * @flags: the fault flags.
4513 * @ret: the fault retcode.
4514 *
4515 * This will take care of most of the page fault accountings.  Meanwhile, it
4516 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4517 * updates.  However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4518 * still be in per-arch page fault handlers at the entry of page fault.
4519 */
4520static inline void mm_account_fault(struct pt_regs *regs,
4521				    unsigned long address, unsigned int flags,
4522				    vm_fault_t ret)
4523{
4524	bool major;
4525
4526	/*
4527	 * We don't do accounting for some specific faults:
4528	 *
4529	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
4530	 *   includes arch_vma_access_permitted() failing before reaching here.
4531	 *   So this is not a "this many hardware page faults" counter.  We
4532	 *   should use the hw profiling for that.
4533	 *
4534	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
4535	 *   once they're completed.
4536	 */
4537	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4538		return;
4539
4540	/*
4541	 * We define the fault as a major fault when the final successful fault
4542	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4543	 * handle it immediately previously).
4544	 */
4545	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4546
4547	if (major)
4548		current->maj_flt++;
4549	else
4550		current->min_flt++;
4551
4552	/*
4553	 * If the fault is done for GUP, regs will be NULL.  We only do the
4554	 * accounting for the per thread fault counters who triggered the
4555	 * fault, and we skip the perf event updates.
4556	 */
4557	if (!regs)
4558		return;
4559
4560	if (major)
4561		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4562	else
4563		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4564}
4565
4566/*
4567 * By the time we get here, we already hold the mm semaphore
4568 *
4569 * The mmap_lock may have been released depending on flags and our
4570 * return value.  See filemap_fault() and __lock_page_or_retry().
4571 */
4572vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4573			   unsigned int flags, struct pt_regs *regs)
4574{
4575	vm_fault_t ret;
4576
4577	__set_current_state(TASK_RUNNING);
4578
4579	count_vm_event(PGFAULT);
4580	count_memcg_event_mm(vma->vm_mm, PGFAULT);
4581
4582	/* do counter updates before entering really critical section. */
4583	check_sync_rss_stat(current);
4584
4585	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4586					    flags & FAULT_FLAG_INSTRUCTION,
4587					    flags & FAULT_FLAG_REMOTE))
4588		return VM_FAULT_SIGSEGV;
4589
4590	/*
4591	 * Enable the memcg OOM handling for faults triggered in user
4592	 * space.  Kernel faults are handled more gracefully.
4593	 */
4594	if (flags & FAULT_FLAG_USER)
4595		mem_cgroup_enter_user_fault();
4596
4597	if (unlikely(is_vm_hugetlb_page(vma)))
4598		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4599	else
4600		ret = __handle_mm_fault(vma, address, flags);
4601
4602	if (flags & FAULT_FLAG_USER) {
4603		mem_cgroup_exit_user_fault();
4604		/*
4605		 * The task may have entered a memcg OOM situation but
4606		 * if the allocation error was handled gracefully (no
4607		 * VM_FAULT_OOM), there is no need to kill anything.
4608		 * Just clean up the OOM state peacefully.
4609		 */
4610		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4611			mem_cgroup_oom_synchronize(false);
4612	}
4613
4614	mm_account_fault(regs, address, flags, ret);
4615
4616	return ret;
4617}
4618EXPORT_SYMBOL_GPL(handle_mm_fault);
4619
4620#ifndef __PAGETABLE_P4D_FOLDED
4621/*
4622 * Allocate p4d page table.
4623 * We've already handled the fast-path in-line.
4624 */
4625int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4626{
4627	p4d_t *new = p4d_alloc_one(mm, address);
4628	if (!new)
4629		return -ENOMEM;
4630
4631	smp_wmb(); /* See comment in __pte_alloc */
4632
4633	spin_lock(&mm->page_table_lock);
4634	if (pgd_present(*pgd))		/* Another has populated it */
4635		p4d_free(mm, new);
4636	else
4637		pgd_populate(mm, pgd, new);
4638	spin_unlock(&mm->page_table_lock);
4639	return 0;
4640}
4641#endif /* __PAGETABLE_P4D_FOLDED */
4642
4643#ifndef __PAGETABLE_PUD_FOLDED
4644/*
4645 * Allocate page upper directory.
4646 * We've already handled the fast-path in-line.
4647 */
4648int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4649{
4650	pud_t *new = pud_alloc_one(mm, address);
4651	if (!new)
4652		return -ENOMEM;
4653
4654	smp_wmb(); /* See comment in __pte_alloc */
4655
4656	spin_lock(&mm->page_table_lock);
4657	if (!p4d_present(*p4d)) {
4658		mm_inc_nr_puds(mm);
4659		p4d_populate(mm, p4d, new);
4660	} else	/* Another has populated it */
4661		pud_free(mm, new);
4662	spin_unlock(&mm->page_table_lock);
4663	return 0;
4664}
4665#endif /* __PAGETABLE_PUD_FOLDED */
4666
4667#ifndef __PAGETABLE_PMD_FOLDED
4668/*
4669 * Allocate page middle directory.
4670 * We've already handled the fast-path in-line.
4671 */
4672int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4673{
4674	spinlock_t *ptl;
4675	pmd_t *new = pmd_alloc_one(mm, address);
4676	if (!new)
4677		return -ENOMEM;
4678
4679	smp_wmb(); /* See comment in __pte_alloc */
4680
4681	ptl = pud_lock(mm, pud);
4682	if (!pud_present(*pud)) {
4683		mm_inc_nr_pmds(mm);
4684		pud_populate(mm, pud, new);
4685	} else	/* Another has populated it */
4686		pmd_free(mm, new);
4687	spin_unlock(ptl);
4688	return 0;
4689}
4690#endif /* __PAGETABLE_PMD_FOLDED */
4691
4692static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4693			    struct mmu_notifier_range *range,
4694			    pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4695{
4696	pgd_t *pgd;
4697	p4d_t *p4d;
4698	pud_t *pud;
4699	pmd_t *pmd;
4700	pte_t *ptep;
4701
4702	pgd = pgd_offset(mm, address);
4703	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4704		goto out;
4705
4706	p4d = p4d_offset(pgd, address);
4707	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4708		goto out;
4709
4710	pud = pud_offset(p4d, address);
4711	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4712		goto out;
4713
4714	pmd = pmd_offset(pud, address);
4715	VM_BUG_ON(pmd_trans_huge(*pmd));
4716
4717	if (pmd_huge(*pmd)) {
4718		if (!pmdpp)
4719			goto out;
4720
4721		if (range) {
4722			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
4723						NULL, mm, address & PMD_MASK,
4724						(address & PMD_MASK) + PMD_SIZE);
4725			mmu_notifier_invalidate_range_start(range);
4726		}
4727		*ptlp = pmd_lock(mm, pmd);
4728		if (pmd_huge(*pmd)) {
4729			*pmdpp = pmd;
4730			return 0;
4731		}
4732		spin_unlock(*ptlp);
4733		if (range)
4734			mmu_notifier_invalidate_range_end(range);
4735	}
4736
4737	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4738		goto out;
4739
4740	if (range) {
4741		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
4742					address & PAGE_MASK,
4743					(address & PAGE_MASK) + PAGE_SIZE);
4744		mmu_notifier_invalidate_range_start(range);
4745	}
4746	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4747	if (!pte_present(*ptep))
4748		goto unlock;
4749	*ptepp = ptep;
4750	return 0;
4751unlock:
4752	pte_unmap_unlock(ptep, *ptlp);
4753	if (range)
4754		mmu_notifier_invalidate_range_end(range);
4755out:
4756	return -EINVAL;
4757}
4758
4759static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4760			     pte_t **ptepp, spinlock_t **ptlp)
4761{
4762	int res;
4763
4764	/* (void) is needed to make gcc happy */
4765	(void) __cond_lock(*ptlp,
4766			   !(res = __follow_pte_pmd(mm, address, NULL,
4767						    ptepp, NULL, ptlp)));
4768	return res;
4769}
4770
4771int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4772		   struct mmu_notifier_range *range,
4773		   pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 
 
 
 
 
 
 
 
4774{
4775	int res;
4776
4777	/* (void) is needed to make gcc happy */
4778	(void) __cond_lock(*ptlp,
4779			   !(res = __follow_pte_pmd(mm, address, range,
4780						    ptepp, pmdpp, ptlp)));
4781	return res;
4782}
4783EXPORT_SYMBOL(follow_pte_pmd);
4784
4785/**
4786 * follow_pfn - look up PFN at a user virtual address
4787 * @vma: memory mapping
4788 * @address: user virtual address
4789 * @pfn: location to store found PFN
4790 *
4791 * Only IO mappings and raw PFN mappings are allowed.
4792 *
 
 
 
4793 * Return: zero and the pfn at @pfn on success, -ve otherwise.
4794 */
4795int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4796	unsigned long *pfn)
4797{
4798	int ret = -EINVAL;
4799	spinlock_t *ptl;
4800	pte_t *ptep;
4801
4802	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4803		return ret;
4804
4805	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4806	if (ret)
4807		return ret;
4808	*pfn = pte_pfn(*ptep);
4809	pte_unmap_unlock(ptep, ptl);
4810	return 0;
4811}
4812EXPORT_SYMBOL(follow_pfn);
4813
4814#ifdef CONFIG_HAVE_IOREMAP_PROT
4815int follow_phys(struct vm_area_struct *vma,
4816		unsigned long address, unsigned int flags,
4817		unsigned long *prot, resource_size_t *phys)
4818{
4819	int ret = -EINVAL;
4820	pte_t *ptep, pte;
4821	spinlock_t *ptl;
4822
4823	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4824		goto out;
4825
4826	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
4827		goto out;
4828	pte = *ptep;
4829
4830	if ((flags & FOLL_WRITE) && !pte_write(pte))
4831		goto unlock;
4832
4833	*prot = pgprot_val(pte_pgprot(pte));
4834	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
4835
4836	ret = 0;
4837unlock:
4838	pte_unmap_unlock(ptep, ptl);
4839out:
4840	return ret;
4841}
4842
 
 
 
 
 
 
 
 
 
 
 
 
4843int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4844			void *buf, int len, int write)
4845{
4846	resource_size_t phys_addr;
4847	unsigned long prot = 0;
4848	void __iomem *maddr;
4849	int offset = addr & (PAGE_SIZE-1);
 
 
 
4850
4851	if (follow_phys(vma, addr, write, &prot, &phys_addr))
 
 
 
 
 
 
 
 
 
 
 
 
4852		return -EINVAL;
4853
4854	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
4855	if (!maddr)
4856		return -ENOMEM;
4857
 
 
 
 
 
 
 
 
 
 
4858	if (write)
4859		memcpy_toio(maddr + offset, buf, len);
4860	else
4861		memcpy_fromio(buf, maddr + offset, len);
 
 
 
4862	iounmap(maddr);
4863
4864	return len;
4865}
4866EXPORT_SYMBOL_GPL(generic_access_phys);
4867#endif
4868
4869/*
4870 * Access another process' address space as given in mm.  If non-NULL, use the
4871 * given task for page fault accounting.
4872 */
4873int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
4874		unsigned long addr, void *buf, int len, unsigned int gup_flags)
4875{
4876	struct vm_area_struct *vma;
4877	void *old_buf = buf;
4878	int write = gup_flags & FOLL_WRITE;
4879
4880	if (mmap_read_lock_killable(mm))
4881		return 0;
4882
4883	/* ignore errors, just check how much was successfully transferred */
4884	while (len) {
4885		int bytes, ret, offset;
4886		void *maddr;
4887		struct page *page = NULL;
4888
4889		ret = get_user_pages_remote(mm, addr, 1,
4890				gup_flags, &page, &vma, NULL);
4891		if (ret <= 0) {
4892#ifndef CONFIG_HAVE_IOREMAP_PROT
4893			break;
4894#else
4895			/*
4896			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
4897			 * we can access using slightly different code.
4898			 */
4899			vma = find_vma(mm, addr);
4900			if (!vma || vma->vm_start > addr)
4901				break;
4902			if (vma->vm_ops && vma->vm_ops->access)
4903				ret = vma->vm_ops->access(vma, addr, buf,
4904							  len, write);
4905			if (ret <= 0)
4906				break;
4907			bytes = ret;
4908#endif
4909		} else {
4910			bytes = len;
4911			offset = addr & (PAGE_SIZE-1);
4912			if (bytes > PAGE_SIZE-offset)
4913				bytes = PAGE_SIZE-offset;
4914
4915			maddr = kmap(page);
4916			if (write) {
4917				copy_to_user_page(vma, page, addr,
4918						  maddr + offset, buf, bytes);
4919				set_page_dirty_lock(page);
4920			} else {
4921				copy_from_user_page(vma, page, addr,
4922						    buf, maddr + offset, bytes);
4923			}
4924			kunmap(page);
4925			put_page(page);
4926		}
4927		len -= bytes;
4928		buf += bytes;
4929		addr += bytes;
4930	}
4931	mmap_read_unlock(mm);
4932
4933	return buf - old_buf;
4934}
4935
4936/**
4937 * access_remote_vm - access another process' address space
4938 * @mm:		the mm_struct of the target address space
4939 * @addr:	start address to access
4940 * @buf:	source or destination buffer
4941 * @len:	number of bytes to transfer
4942 * @gup_flags:	flags modifying lookup behaviour
4943 *
4944 * The caller must hold a reference on @mm.
4945 *
4946 * Return: number of bytes copied from source to destination.
4947 */
4948int access_remote_vm(struct mm_struct *mm, unsigned long addr,
4949		void *buf, int len, unsigned int gup_flags)
4950{
4951	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
4952}
4953
4954/*
4955 * Access another process' address space.
4956 * Source/target buffer must be kernel space,
4957 * Do not walk the page table directly, use get_user_pages
4958 */
4959int access_process_vm(struct task_struct *tsk, unsigned long addr,
4960		void *buf, int len, unsigned int gup_flags)
4961{
4962	struct mm_struct *mm;
4963	int ret;
4964
4965	mm = get_task_mm(tsk);
4966	if (!mm)
4967		return 0;
4968
4969	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
4970
4971	mmput(mm);
4972
4973	return ret;
4974}
4975EXPORT_SYMBOL_GPL(access_process_vm);
4976
4977/*
4978 * Print the name of a VMA.
4979 */
4980void print_vma_addr(char *prefix, unsigned long ip)
4981{
4982	struct mm_struct *mm = current->mm;
4983	struct vm_area_struct *vma;
4984
4985	/*
4986	 * we might be running from an atomic context so we cannot sleep
4987	 */
4988	if (!mmap_read_trylock(mm))
4989		return;
4990
4991	vma = find_vma(mm, ip);
4992	if (vma && vma->vm_file) {
4993		struct file *f = vma->vm_file;
4994		char *buf = (char *)__get_free_page(GFP_NOWAIT);
4995		if (buf) {
4996			char *p;
4997
4998			p = file_path(f, buf, PAGE_SIZE);
4999			if (IS_ERR(p))
5000				p = "?";
5001			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5002					vma->vm_start,
5003					vma->vm_end - vma->vm_start);
5004			free_page((unsigned long)buf);
5005		}
5006	}
5007	mmap_read_unlock(mm);
5008}
5009
5010#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5011void __might_fault(const char *file, int line)
5012{
5013	/*
5014	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5015	 * holding the mmap_lock, this is safe because kernel memory doesn't
5016	 * get paged out, therefore we'll never actually fault, and the
5017	 * below annotations will generate false positives.
5018	 */
5019	if (uaccess_kernel())
5020		return;
5021	if (pagefault_disabled())
5022		return;
5023	__might_sleep(file, line, 0);
5024#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5025	if (current->mm)
5026		might_lock_read(&current->mm->mmap_lock);
5027#endif
5028}
5029EXPORT_SYMBOL(__might_fault);
5030#endif
5031
5032#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5033/*
5034 * Process all subpages of the specified huge page with the specified
5035 * operation.  The target subpage will be processed last to keep its
5036 * cache lines hot.
5037 */
5038static inline void process_huge_page(
5039	unsigned long addr_hint, unsigned int pages_per_huge_page,
5040	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5041	void *arg)
5042{
5043	int i, n, base, l;
5044	unsigned long addr = addr_hint &
5045		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5046
5047	/* Process target subpage last to keep its cache lines hot */
5048	might_sleep();
5049	n = (addr_hint - addr) / PAGE_SIZE;
5050	if (2 * n <= pages_per_huge_page) {
5051		/* If target subpage in first half of huge page */
5052		base = 0;
5053		l = n;
5054		/* Process subpages at the end of huge page */
5055		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5056			cond_resched();
5057			process_subpage(addr + i * PAGE_SIZE, i, arg);
5058		}
5059	} else {
5060		/* If target subpage in second half of huge page */
5061		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5062		l = pages_per_huge_page - n;
5063		/* Process subpages at the begin of huge page */
5064		for (i = 0; i < base; i++) {
5065			cond_resched();
5066			process_subpage(addr + i * PAGE_SIZE, i, arg);
5067		}
5068	}
5069	/*
5070	 * Process remaining subpages in left-right-left-right pattern
5071	 * towards the target subpage
5072	 */
5073	for (i = 0; i < l; i++) {
5074		int left_idx = base + i;
5075		int right_idx = base + 2 * l - 1 - i;
5076
5077		cond_resched();
5078		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5079		cond_resched();
5080		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5081	}
5082}
5083
5084static void clear_gigantic_page(struct page *page,
5085				unsigned long addr,
5086				unsigned int pages_per_huge_page)
5087{
5088	int i;
5089	struct page *p = page;
5090
5091	might_sleep();
5092	for (i = 0; i < pages_per_huge_page;
5093	     i++, p = mem_map_next(p, page, i)) {
5094		cond_resched();
5095		clear_user_highpage(p, addr + i * PAGE_SIZE);
5096	}
5097}
5098
5099static void clear_subpage(unsigned long addr, int idx, void *arg)
5100{
5101	struct page *page = arg;
5102
5103	clear_user_highpage(page + idx, addr);
5104}
5105
5106void clear_huge_page(struct page *page,
5107		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5108{
5109	unsigned long addr = addr_hint &
5110		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5111
5112	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5113		clear_gigantic_page(page, addr, pages_per_huge_page);
5114		return;
5115	}
5116
5117	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5118}
5119
5120static void copy_user_gigantic_page(struct page *dst, struct page *src,
5121				    unsigned long addr,
5122				    struct vm_area_struct *vma,
5123				    unsigned int pages_per_huge_page)
5124{
5125	int i;
5126	struct page *dst_base = dst;
5127	struct page *src_base = src;
5128
5129	for (i = 0; i < pages_per_huge_page; ) {
5130		cond_resched();
5131		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5132
5133		i++;
5134		dst = mem_map_next(dst, dst_base, i);
5135		src = mem_map_next(src, src_base, i);
5136	}
5137}
5138
5139struct copy_subpage_arg {
5140	struct page *dst;
5141	struct page *src;
5142	struct vm_area_struct *vma;
5143};
5144
5145static void copy_subpage(unsigned long addr, int idx, void *arg)
5146{
5147	struct copy_subpage_arg *copy_arg = arg;
5148
5149	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5150			   addr, copy_arg->vma);
5151}
5152
5153void copy_user_huge_page(struct page *dst, struct page *src,
5154			 unsigned long addr_hint, struct vm_area_struct *vma,
5155			 unsigned int pages_per_huge_page)
5156{
5157	unsigned long addr = addr_hint &
5158		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5159	struct copy_subpage_arg arg = {
5160		.dst = dst,
5161		.src = src,
5162		.vma = vma,
5163	};
5164
5165	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5166		copy_user_gigantic_page(dst, src, addr, vma,
5167					pages_per_huge_page);
5168		return;
5169	}
5170
5171	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5172}
5173
5174long copy_huge_page_from_user(struct page *dst_page,
5175				const void __user *usr_src,
5176				unsigned int pages_per_huge_page,
5177				bool allow_pagefault)
5178{
5179	void *src = (void *)usr_src;
5180	void *page_kaddr;
5181	unsigned long i, rc = 0;
5182	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
 
5183
5184	for (i = 0; i < pages_per_huge_page; i++) {
 
5185		if (allow_pagefault)
5186			page_kaddr = kmap(dst_page + i);
5187		else
5188			page_kaddr = kmap_atomic(dst_page + i);
5189		rc = copy_from_user(page_kaddr,
5190				(const void __user *)(src + i * PAGE_SIZE),
5191				PAGE_SIZE);
5192		if (allow_pagefault)
5193			kunmap(dst_page + i);
5194		else
5195			kunmap_atomic(page_kaddr);
5196
5197		ret_val -= (PAGE_SIZE - rc);
5198		if (rc)
5199			break;
5200
5201		cond_resched();
5202	}
5203	return ret_val;
5204}
5205#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5206
5207#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5208
5209static struct kmem_cache *page_ptl_cachep;
5210
5211void __init ptlock_cache_init(void)
5212{
5213	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5214			SLAB_PANIC, NULL);
5215}
5216
5217bool ptlock_alloc(struct page *page)
5218{
5219	spinlock_t *ptl;
5220
5221	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5222	if (!ptl)
5223		return false;
5224	page->ptl = ptl;
5225	return true;
5226}
5227
5228void ptlock_free(struct page *page)
5229{
5230	kmem_cache_free(page_ptl_cachep, page->ptl);
5231}
5232#endif