Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/memory.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 */
   7
   8/*
   9 * demand-loading started 01.12.91 - seems it is high on the list of
  10 * things wanted, and it should be easy to implement. - Linus
  11 */
  12
  13/*
  14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  15 * pages started 02.12.91, seems to work. - Linus.
  16 *
  17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  18 * would have taken more than the 6M I have free, but it worked well as
  19 * far as I could see.
  20 *
  21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  22 */
  23
  24/*
  25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
  26 * thought has to go into this. Oh, well..
  27 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  28 *		Found it. Everything seems to work now.
  29 * 20.12.91  -  Ok, making the swap-device changeable like the root.
  30 */
  31
  32/*
  33 * 05.04.94  -  Multi-page memory management added for v1.1.
  34 *              Idea by Alex Bligh (alex@cconcepts.co.uk)
  35 *
  36 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
  37 *		(Gerhard.Wichert@pdb.siemens.de)
  38 *
  39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  40 */
  41
  42#include <linux/kernel_stat.h>
  43#include <linux/mm.h>
  44#include <linux/sched/mm.h>
  45#include <linux/sched/coredump.h>
  46#include <linux/sched/numa_balancing.h>
  47#include <linux/sched/task.h>
  48#include <linux/hugetlb.h>
  49#include <linux/mman.h>
  50#include <linux/swap.h>
  51#include <linux/highmem.h>
  52#include <linux/pagemap.h>
  53#include <linux/memremap.h>
  54#include <linux/ksm.h>
  55#include <linux/rmap.h>
  56#include <linux/export.h>
  57#include <linux/delayacct.h>
  58#include <linux/init.h>
  59#include <linux/pfn_t.h>
  60#include <linux/writeback.h>
  61#include <linux/memcontrol.h>
  62#include <linux/mmu_notifier.h>
 
  63#include <linux/swapops.h>
  64#include <linux/elf.h>
  65#include <linux/gfp.h>
  66#include <linux/migrate.h>
  67#include <linux/string.h>
  68#include <linux/dma-debug.h>
  69#include <linux/debugfs.h>
  70#include <linux/userfaultfd_k.h>
  71#include <linux/dax.h>
  72#include <linux/oom.h>
  73#include <linux/numa.h>
  74#include <linux/perf_event.h>
  75#include <linux/ptrace.h>
  76#include <linux/vmalloc.h>
  77
  78#include <trace/events/kmem.h>
  79
  80#include <asm/io.h>
  81#include <asm/mmu_context.h>
  82#include <asm/pgalloc.h>
  83#include <linux/uaccess.h>
  84#include <asm/tlb.h>
  85#include <asm/tlbflush.h>
 
  86
  87#include "pgalloc-track.h"
  88#include "internal.h"
  89
  90#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
  91#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
  92#endif
  93
  94#ifndef CONFIG_NEED_MULTIPLE_NODES
  95/* use the per-pgdat data instead for discontigmem - mbligh */
  96unsigned long max_mapnr;
  97EXPORT_SYMBOL(max_mapnr);
  98
  99struct page *mem_map;
 
 
 100EXPORT_SYMBOL(mem_map);
 101#endif
 102
 103/*
 104 * A number of key systems in x86 including ioremap() rely on the assumption
 105 * that high_memory defines the upper bound on direct map memory, then end
 106 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
 107 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
 108 * and ZONE_HIGHMEM.
 109 */
 110void *high_memory;
 
 111EXPORT_SYMBOL(high_memory);
 112
 113/*
 114 * Randomize the address space (stacks, mmaps, brk, etc.).
 115 *
 116 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
 117 *   as ancient (libc5 based) binaries can segfault. )
 118 */
 119int randomize_va_space __read_mostly =
 120#ifdef CONFIG_COMPAT_BRK
 121					1;
 122#else
 123					2;
 124#endif
 125
 126#ifndef arch_faults_on_old_pte
 127static inline bool arch_faults_on_old_pte(void)
 128{
 129	/*
 130	 * Those arches which don't have hw access flag feature need to
 131	 * implement their own helper. By default, "true" means pagefault
 132	 * will be hit on old pte.
 133	 */
 134	return true;
 135}
 136#endif
 137
 138static int __init disable_randmaps(char *s)
 139{
 140	randomize_va_space = 0;
 141	return 1;
 142}
 143__setup("norandmaps", disable_randmaps);
 144
 145unsigned long zero_pfn __read_mostly;
 146EXPORT_SYMBOL(zero_pfn);
 147
 148unsigned long highest_memmap_pfn __read_mostly;
 149
 
 
 150/*
 151 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
 152 */
 153static int __init init_zero_pfn(void)
 154{
 155	zero_pfn = page_to_pfn(ZERO_PAGE(0));
 156	return 0;
 157}
 158core_initcall(init_zero_pfn);
 159
 160void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 161{
 162	trace_rss_stat(mm, member, count);
 163}
 164
 165#if defined(SPLIT_RSS_COUNTING)
 166
 167void sync_mm_rss(struct mm_struct *mm)
 168{
 169	int i;
 170
 171	for (i = 0; i < NR_MM_COUNTERS; i++) {
 172		if (current->rss_stat.count[i]) {
 173			add_mm_counter(mm, i, current->rss_stat.count[i]);
 174			current->rss_stat.count[i] = 0;
 175		}
 176	}
 177	current->rss_stat.events = 0;
 178}
 179
 180static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
 181{
 182	struct task_struct *task = current;
 183
 184	if (likely(task->mm == mm))
 185		task->rss_stat.count[member] += val;
 186	else
 187		add_mm_counter(mm, member, val);
 188}
 189#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
 190#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
 191
 192/* sync counter once per 64 page faults */
 193#define TASK_RSS_EVENTS_THRESH	(64)
 194static void check_sync_rss_stat(struct task_struct *task)
 195{
 196	if (unlikely(task != current))
 197		return;
 198	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
 199		sync_mm_rss(task->mm);
 200}
 201#else /* SPLIT_RSS_COUNTING */
 202
 203#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
 204#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
 205
 206static void check_sync_rss_stat(struct task_struct *task)
 207{
 208}
 209
 210#endif /* SPLIT_RSS_COUNTING */
 211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212/*
 213 * Note: this doesn't free the actual pages themselves. That
 214 * has been handled earlier when unmapping all the memory regions.
 215 */
 216static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
 217			   unsigned long addr)
 218{
 219	pgtable_t token = pmd_pgtable(*pmd);
 220	pmd_clear(pmd);
 221	pte_free_tlb(tlb, token, addr);
 222	mm_dec_nr_ptes(tlb->mm);
 223}
 224
 225static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 226				unsigned long addr, unsigned long end,
 227				unsigned long floor, unsigned long ceiling)
 228{
 229	pmd_t *pmd;
 230	unsigned long next;
 231	unsigned long start;
 232
 233	start = addr;
 234	pmd = pmd_offset(pud, addr);
 235	do {
 236		next = pmd_addr_end(addr, end);
 237		if (pmd_none_or_clear_bad(pmd))
 238			continue;
 239		free_pte_range(tlb, pmd, addr);
 240	} while (pmd++, addr = next, addr != end);
 241
 242	start &= PUD_MASK;
 243	if (start < floor)
 244		return;
 245	if (ceiling) {
 246		ceiling &= PUD_MASK;
 247		if (!ceiling)
 248			return;
 249	}
 250	if (end - 1 > ceiling - 1)
 251		return;
 252
 253	pmd = pmd_offset(pud, start);
 254	pud_clear(pud);
 255	pmd_free_tlb(tlb, pmd, start);
 256	mm_dec_nr_pmds(tlb->mm);
 257}
 258
 259static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
 260				unsigned long addr, unsigned long end,
 261				unsigned long floor, unsigned long ceiling)
 262{
 263	pud_t *pud;
 264	unsigned long next;
 265	unsigned long start;
 266
 267	start = addr;
 268	pud = pud_offset(p4d, addr);
 269	do {
 270		next = pud_addr_end(addr, end);
 271		if (pud_none_or_clear_bad(pud))
 272			continue;
 273		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
 274	} while (pud++, addr = next, addr != end);
 275
 276	start &= P4D_MASK;
 277	if (start < floor)
 278		return;
 279	if (ceiling) {
 280		ceiling &= P4D_MASK;
 281		if (!ceiling)
 282			return;
 283	}
 284	if (end - 1 > ceiling - 1)
 285		return;
 286
 287	pud = pud_offset(p4d, start);
 288	p4d_clear(p4d);
 289	pud_free_tlb(tlb, pud, start);
 290	mm_dec_nr_puds(tlb->mm);
 291}
 292
 293static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
 294				unsigned long addr, unsigned long end,
 295				unsigned long floor, unsigned long ceiling)
 296{
 297	p4d_t *p4d;
 298	unsigned long next;
 299	unsigned long start;
 300
 301	start = addr;
 302	p4d = p4d_offset(pgd, addr);
 303	do {
 304		next = p4d_addr_end(addr, end);
 305		if (p4d_none_or_clear_bad(p4d))
 306			continue;
 307		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
 308	} while (p4d++, addr = next, addr != end);
 309
 310	start &= PGDIR_MASK;
 311	if (start < floor)
 312		return;
 313	if (ceiling) {
 314		ceiling &= PGDIR_MASK;
 315		if (!ceiling)
 316			return;
 317	}
 318	if (end - 1 > ceiling - 1)
 319		return;
 320
 321	p4d = p4d_offset(pgd, start);
 322	pgd_clear(pgd);
 323	p4d_free_tlb(tlb, p4d, start);
 324}
 325
 326/*
 327 * This function frees user-level page tables of a process.
 328 */
 329void free_pgd_range(struct mmu_gather *tlb,
 330			unsigned long addr, unsigned long end,
 331			unsigned long floor, unsigned long ceiling)
 332{
 333	pgd_t *pgd;
 334	unsigned long next;
 335
 336	/*
 337	 * The next few lines have given us lots of grief...
 338	 *
 339	 * Why are we testing PMD* at this top level?  Because often
 340	 * there will be no work to do at all, and we'd prefer not to
 341	 * go all the way down to the bottom just to discover that.
 342	 *
 343	 * Why all these "- 1"s?  Because 0 represents both the bottom
 344	 * of the address space and the top of it (using -1 for the
 345	 * top wouldn't help much: the masks would do the wrong thing).
 346	 * The rule is that addr 0 and floor 0 refer to the bottom of
 347	 * the address space, but end 0 and ceiling 0 refer to the top
 348	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
 349	 * that end 0 case should be mythical).
 350	 *
 351	 * Wherever addr is brought up or ceiling brought down, we must
 352	 * be careful to reject "the opposite 0" before it confuses the
 353	 * subsequent tests.  But what about where end is brought down
 354	 * by PMD_SIZE below? no, end can't go down to 0 there.
 355	 *
 356	 * Whereas we round start (addr) and ceiling down, by different
 357	 * masks at different levels, in order to test whether a table
 358	 * now has no other vmas using it, so can be freed, we don't
 359	 * bother to round floor or end up - the tests don't need that.
 360	 */
 361
 362	addr &= PMD_MASK;
 363	if (addr < floor) {
 364		addr += PMD_SIZE;
 365		if (!addr)
 366			return;
 367	}
 368	if (ceiling) {
 369		ceiling &= PMD_MASK;
 370		if (!ceiling)
 371			return;
 372	}
 373	if (end - 1 > ceiling - 1)
 374		end -= PMD_SIZE;
 375	if (addr > end - 1)
 376		return;
 377	/*
 378	 * We add page table cache pages with PAGE_SIZE,
 379	 * (see pte_free_tlb()), flush the tlb if we need
 380	 */
 381	tlb_change_page_size(tlb, PAGE_SIZE);
 382	pgd = pgd_offset(tlb->mm, addr);
 383	do {
 384		next = pgd_addr_end(addr, end);
 385		if (pgd_none_or_clear_bad(pgd))
 386			continue;
 387		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
 388	} while (pgd++, addr = next, addr != end);
 389}
 390
 391void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 392		unsigned long floor, unsigned long ceiling)
 393{
 394	while (vma) {
 395		struct vm_area_struct *next = vma->vm_next;
 396		unsigned long addr = vma->vm_start;
 397
 398		/*
 399		 * Hide vma from rmap and truncate_pagecache before freeing
 400		 * pgtables
 401		 */
 402		unlink_anon_vmas(vma);
 403		unlink_file_vma(vma);
 404
 405		if (is_vm_hugetlb_page(vma)) {
 406			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
 407				floor, next ? next->vm_start : ceiling);
 408		} else {
 409			/*
 410			 * Optimization: gather nearby vmas into one call down
 411			 */
 412			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
 413			       && !is_vm_hugetlb_page(next)) {
 414				vma = next;
 415				next = vma->vm_next;
 416				unlink_anon_vmas(vma);
 417				unlink_file_vma(vma);
 418			}
 419			free_pgd_range(tlb, addr, vma->vm_end,
 420				floor, next ? next->vm_start : ceiling);
 421		}
 422		vma = next;
 423	}
 424}
 425
 426int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
 427{
 428	spinlock_t *ptl;
 429	pgtable_t new = pte_alloc_one(mm);
 430	if (!new)
 431		return -ENOMEM;
 432
 433	/*
 434	 * Ensure all pte setup (eg. pte page lock and page clearing) are
 435	 * visible before the pte is made visible to other CPUs by being
 436	 * put into page tables.
 437	 *
 438	 * The other side of the story is the pointer chasing in the page
 439	 * table walking code (when walking the page table without locking;
 440	 * ie. most of the time). Fortunately, these data accesses consist
 441	 * of a chain of data-dependent loads, meaning most CPUs (alpha
 442	 * being the notable exception) will already guarantee loads are
 443	 * seen in-order. See the alpha page table accessors for the
 444	 * smp_rmb() barriers in page table walking code.
 445	 */
 446	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 447
 448	ptl = pmd_lock(mm, pmd);
 449	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 450		mm_inc_nr_ptes(mm);
 451		pmd_populate(mm, pmd, new);
 452		new = NULL;
 453	}
 454	spin_unlock(ptl);
 455	if (new)
 456		pte_free(mm, new);
 457	return 0;
 458}
 459
 460int __pte_alloc_kernel(pmd_t *pmd)
 461{
 462	pte_t *new = pte_alloc_one_kernel(&init_mm);
 463	if (!new)
 464		return -ENOMEM;
 465
 466	smp_wmb(); /* See comment in __pte_alloc */
 467
 468	spin_lock(&init_mm.page_table_lock);
 469	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 470		pmd_populate_kernel(&init_mm, pmd, new);
 471		new = NULL;
 472	}
 473	spin_unlock(&init_mm.page_table_lock);
 474	if (new)
 475		pte_free_kernel(&init_mm, new);
 476	return 0;
 477}
 478
 479static inline void init_rss_vec(int *rss)
 480{
 481	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
 482}
 483
 484static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
 485{
 486	int i;
 487
 488	if (current->mm == mm)
 489		sync_mm_rss(mm);
 490	for (i = 0; i < NR_MM_COUNTERS; i++)
 491		if (rss[i])
 492			add_mm_counter(mm, i, rss[i]);
 493}
 494
 495/*
 496 * This function is called to print an error when a bad pte
 497 * is found. For example, we might have a PFN-mapped pte in
 498 * a region that doesn't allow it.
 499 *
 500 * The calling function must still handle the error.
 501 */
 502static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
 503			  pte_t pte, struct page *page)
 504{
 505	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
 506	p4d_t *p4d = p4d_offset(pgd, addr);
 507	pud_t *pud = pud_offset(p4d, addr);
 508	pmd_t *pmd = pmd_offset(pud, addr);
 509	struct address_space *mapping;
 510	pgoff_t index;
 511	static unsigned long resume;
 512	static unsigned long nr_shown;
 513	static unsigned long nr_unshown;
 514
 515	/*
 516	 * Allow a burst of 60 reports, then keep quiet for that minute;
 517	 * or allow a steady drip of one report per second.
 518	 */
 519	if (nr_shown == 60) {
 520		if (time_before(jiffies, resume)) {
 521			nr_unshown++;
 522			return;
 523		}
 524		if (nr_unshown) {
 525			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
 526				 nr_unshown);
 527			nr_unshown = 0;
 528		}
 529		nr_shown = 0;
 530	}
 531	if (nr_shown++ == 0)
 532		resume = jiffies + 60 * HZ;
 533
 534	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
 535	index = linear_page_index(vma, addr);
 536
 537	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
 538		 current->comm,
 539		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
 540	if (page)
 541		dump_page(page, "bad pte");
 542	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
 543		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
 544	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
 
 
 
 545		 vma->vm_file,
 546		 vma->vm_ops ? vma->vm_ops->fault : NULL,
 547		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
 548		 mapping ? mapping->a_ops->readpage : NULL);
 549	dump_stack();
 550	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 551}
 552
 553/*
 554 * vm_normal_page -- This function gets the "struct page" associated with a pte.
 555 *
 556 * "Special" mappings do not wish to be associated with a "struct page" (either
 557 * it doesn't exist, or it exists but they don't want to touch it). In this
 558 * case, NULL is returned here. "Normal" mappings do have a struct page.
 559 *
 560 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
 561 * pte bit, in which case this function is trivial. Secondly, an architecture
 562 * may not have a spare pte bit, which requires a more complicated scheme,
 563 * described below.
 564 *
 565 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
 566 * special mapping (even if there are underlying and valid "struct pages").
 567 * COWed pages of a VM_PFNMAP are always normal.
 568 *
 569 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
 570 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
 571 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
 572 * mapping will always honor the rule
 573 *
 574 *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
 575 *
 576 * And for normal mappings this is false.
 577 *
 578 * This restricts such mappings to be a linear translation from virtual address
 579 * to pfn. To get around this restriction, we allow arbitrary mappings so long
 580 * as the vma is not a COW mapping; in that case, we know that all ptes are
 581 * special (because none can have been COWed).
 582 *
 583 *
 584 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
 585 *
 586 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
 587 * page" backing, however the difference is that _all_ pages with a struct
 588 * page (that is, those where pfn_valid is true) are refcounted and considered
 589 * normal pages by the VM. The disadvantage is that pages are refcounted
 590 * (which can be slower and simply not an option for some PFNMAP users). The
 591 * advantage is that we don't have to follow the strict linearity rule of
 592 * PFNMAP mappings in order to support COWable mappings.
 593 *
 594 */
 
 
 
 
 
 595struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 596			    pte_t pte)
 597{
 598	unsigned long pfn = pte_pfn(pte);
 599
 600	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
 601		if (likely(!pte_special(pte)))
 602			goto check_pfn;
 603		if (vma->vm_ops && vma->vm_ops->find_special_page)
 604			return vma->vm_ops->find_special_page(vma, addr);
 605		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
 606			return NULL;
 607		if (is_zero_pfn(pfn))
 608			return NULL;
 609		if (pte_devmap(pte))
 610			return NULL;
 611
 612		print_bad_pte(vma, addr, pte, NULL);
 613		return NULL;
 614	}
 615
 616	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
 617
 618	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 619		if (vma->vm_flags & VM_MIXEDMAP) {
 620			if (!pfn_valid(pfn))
 621				return NULL;
 622			goto out;
 623		} else {
 624			unsigned long off;
 625			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 626			if (pfn == vma->vm_pgoff + off)
 627				return NULL;
 628			if (!is_cow_mapping(vma->vm_flags))
 629				return NULL;
 630		}
 631	}
 632
 633	if (is_zero_pfn(pfn))
 634		return NULL;
 635
 636check_pfn:
 637	if (unlikely(pfn > highest_memmap_pfn)) {
 638		print_bad_pte(vma, addr, pte, NULL);
 639		return NULL;
 640	}
 641
 642	/*
 643	 * NOTE! We still have PageReserved() pages in the page tables.
 644	 * eg. VDSO mappings can cause them to exist.
 645	 */
 646out:
 647	return pfn_to_page(pfn);
 648}
 649
 650#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 651struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 652				pmd_t pmd)
 653{
 654	unsigned long pfn = pmd_pfn(pmd);
 655
 656	/*
 657	 * There is no pmd_special() but there may be special pmds, e.g.
 658	 * in a direct-access (dax) mapping, so let's just replicate the
 659	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
 660	 */
 661	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 662		if (vma->vm_flags & VM_MIXEDMAP) {
 663			if (!pfn_valid(pfn))
 664				return NULL;
 665			goto out;
 666		} else {
 667			unsigned long off;
 668			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 669			if (pfn == vma->vm_pgoff + off)
 670				return NULL;
 671			if (!is_cow_mapping(vma->vm_flags))
 672				return NULL;
 673		}
 674	}
 675
 676	if (pmd_devmap(pmd))
 677		return NULL;
 678	if (is_huge_zero_pmd(pmd))
 679		return NULL;
 680	if (unlikely(pfn > highest_memmap_pfn))
 681		return NULL;
 682
 683	/*
 684	 * NOTE! We still have PageReserved() pages in the page tables.
 685	 * eg. VDSO mappings can cause them to exist.
 686	 */
 687out:
 688	return pfn_to_page(pfn);
 689}
 690#endif
 691
 692/*
 693 * copy one vm_area from one task to the other. Assumes the page tables
 694 * already present in the new task to be cleared in the whole range
 695 * covered by this vma.
 696 */
 697
 698static unsigned long
 699copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 700		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
 701		unsigned long addr, int *rss)
 702{
 703	unsigned long vm_flags = vma->vm_flags;
 704	pte_t pte = *src_pte;
 705	struct page *page;
 706	swp_entry_t entry = pte_to_swp_entry(pte);
 707
 708	if (likely(!non_swap_entry(entry))) {
 709		if (swap_duplicate(entry) < 0)
 710			return entry.val;
 711
 712		/* make sure dst_mm is on swapoff's mmlist. */
 713		if (unlikely(list_empty(&dst_mm->mmlist))) {
 714			spin_lock(&mmlist_lock);
 715			if (list_empty(&dst_mm->mmlist))
 716				list_add(&dst_mm->mmlist,
 717						&src_mm->mmlist);
 718			spin_unlock(&mmlist_lock);
 719		}
 720		rss[MM_SWAPENTS]++;
 721	} else if (is_migration_entry(entry)) {
 722		page = migration_entry_to_page(entry);
 723
 724		rss[mm_counter(page)]++;
 725
 726		if (is_write_migration_entry(entry) &&
 727				is_cow_mapping(vm_flags)) {
 728			/*
 729			 * COW mappings require pages in both
 730			 * parent and child to be set to read.
 731			 */
 732			make_migration_entry_read(&entry);
 733			pte = swp_entry_to_pte(entry);
 734			if (pte_swp_soft_dirty(*src_pte))
 735				pte = pte_swp_mksoft_dirty(pte);
 736			if (pte_swp_uffd_wp(*src_pte))
 737				pte = pte_swp_mkuffd_wp(pte);
 738			set_pte_at(src_mm, addr, src_pte, pte);
 739		}
 740	} else if (is_device_private_entry(entry)) {
 741		page = device_private_entry_to_page(entry);
 742
 743		/*
 744		 * Update rss count even for unaddressable pages, as
 745		 * they should treated just like normal pages in this
 746		 * respect.
 747		 *
 748		 * We will likely want to have some new rss counters
 749		 * for unaddressable pages, at some point. But for now
 750		 * keep things as they are.
 751		 */
 752		get_page(page);
 753		rss[mm_counter(page)]++;
 754		page_dup_rmap(page, false);
 755
 756		/*
 757		 * We do not preserve soft-dirty information, because so
 758		 * far, checkpoint/restore is the only feature that
 759		 * requires that. And checkpoint/restore does not work
 760		 * when a device driver is involved (you cannot easily
 761		 * save and restore device driver state).
 762		 */
 763		if (is_write_device_private_entry(entry) &&
 764		    is_cow_mapping(vm_flags)) {
 765			make_device_private_entry_read(&entry);
 766			pte = swp_entry_to_pte(entry);
 767			if (pte_swp_uffd_wp(*src_pte))
 768				pte = pte_swp_mkuffd_wp(pte);
 769			set_pte_at(src_mm, addr, src_pte, pte);
 770		}
 771	}
 772	set_pte_at(dst_mm, addr, dst_pte, pte);
 773	return 0;
 774}
 775
 776/*
 777 * Copy a present and normal page if necessary.
 778 *
 779 * NOTE! The usual case is that this doesn't need to do
 780 * anything, and can just return a positive value. That
 781 * will let the caller know that it can just increase
 782 * the page refcount and re-use the pte the traditional
 783 * way.
 784 *
 785 * But _if_ we need to copy it because it needs to be
 786 * pinned in the parent (and the child should get its own
 787 * copy rather than just a reference to the same page),
 788 * we'll do that here and return zero to let the caller
 789 * know we're done.
 790 *
 791 * And if we need a pre-allocated page but don't yet have
 792 * one, return a negative error to let the preallocation
 793 * code know so that it can do so outside the page table
 794 * lock.
 795 */
 796static inline int
 797copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 798		pte_t *dst_pte, pte_t *src_pte,
 799		struct vm_area_struct *vma, struct vm_area_struct *new,
 800		unsigned long addr, int *rss, struct page **prealloc,
 801		pte_t pte, struct page *page)
 802{
 803	struct page *new_page;
 804
 805	if (!is_cow_mapping(vma->vm_flags))
 806		return 1;
 807
 808	/*
 809	 * What we want to do is to check whether this page may
 810	 * have been pinned by the parent process.  If so,
 811	 * instead of wrprotect the pte on both sides, we copy
 812	 * the page immediately so that we'll always guarantee
 813	 * the pinned page won't be randomly replaced in the
 814	 * future.
 815	 *
 816	 * The page pinning checks are just "has this mm ever
 817	 * seen pinning", along with the (inexact) check of
 818	 * the page count. That might give false positives for
 819	 * for pinning, but it will work correctly.
 820	 */
 821	if (likely(!atomic_read(&src_mm->has_pinned)))
 822		return 1;
 823	if (likely(!page_maybe_dma_pinned(page)))
 824		return 1;
 825
 826	new_page = *prealloc;
 827	if (!new_page)
 828		return -EAGAIN;
 829
 830	/*
 831	 * We have a prealloc page, all good!  Take it
 832	 * over and copy the page & arm it.
 833	 */
 834	*prealloc = NULL;
 835	copy_user_highpage(new_page, page, addr, vma);
 836	__SetPageUptodate(new_page);
 837	page_add_new_anon_rmap(new_page, new, addr, false);
 838	lru_cache_add_inactive_or_unevictable(new_page, new);
 839	rss[mm_counter(new_page)]++;
 840
 841	/* All done, just insert the new page copy in the child */
 842	pte = mk_pte(new_page, new->vm_page_prot);
 843	pte = maybe_mkwrite(pte_mkdirty(pte), new);
 844	set_pte_at(dst_mm, addr, dst_pte, pte);
 845	return 0;
 846}
 847
 848/*
 849 * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
 850 * is required to copy this pte.
 851 */
 852static inline int
 853copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 854		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
 855		struct vm_area_struct *new,
 856		unsigned long addr, int *rss, struct page **prealloc)
 857{
 858	unsigned long vm_flags = vma->vm_flags;
 859	pte_t pte = *src_pte;
 860	struct page *page;
 861
 862	page = vm_normal_page(vma, addr, pte);
 863	if (page) {
 864		int retval;
 865
 866		retval = copy_present_page(dst_mm, src_mm,
 867			dst_pte, src_pte,
 868			vma, new,
 869			addr, rss, prealloc,
 870			pte, page);
 871		if (retval <= 0)
 872			return retval;
 873
 874		get_page(page);
 875		page_dup_rmap(page, false);
 876		rss[mm_counter(page)]++;
 877	}
 878
 879	/*
 880	 * If it's a COW mapping, write protect it both
 881	 * in the parent and the child
 882	 */
 883	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
 884		ptep_set_wrprotect(src_mm, addr, src_pte);
 885		pte = pte_wrprotect(pte);
 886	}
 887
 888	/*
 889	 * If it's a shared mapping, mark it clean in
 890	 * the child
 891	 */
 892	if (vm_flags & VM_SHARED)
 893		pte = pte_mkclean(pte);
 894	pte = pte_mkold(pte);
 895
 896	/*
 897	 * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
 898	 * does not have the VM_UFFD_WP, which means that the uffd
 899	 * fork event is not enabled.
 900	 */
 901	if (!(vm_flags & VM_UFFD_WP))
 902		pte = pte_clear_uffd_wp(pte);
 903
 
 904	set_pte_at(dst_mm, addr, dst_pte, pte);
 905	return 0;
 906}
 907
 908static inline struct page *
 909page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
 910		   unsigned long addr)
 911{
 912	struct page *new_page;
 913
 914	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
 915	if (!new_page)
 916		return NULL;
 917
 918	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
 919		put_page(new_page);
 920		return NULL;
 921	}
 922	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
 923
 924	return new_page;
 925}
 926
 927static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 928		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
 929		   struct vm_area_struct *new,
 930		   unsigned long addr, unsigned long end)
 931{
 932	pte_t *orig_src_pte, *orig_dst_pte;
 933	pte_t *src_pte, *dst_pte;
 934	spinlock_t *src_ptl, *dst_ptl;
 935	int progress, ret = 0;
 936	int rss[NR_MM_COUNTERS];
 937	swp_entry_t entry = (swp_entry_t){0};
 938	struct page *prealloc = NULL;
 939
 940again:
 941	progress = 0;
 942	init_rss_vec(rss);
 943
 944	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
 945	if (!dst_pte) {
 946		ret = -ENOMEM;
 947		goto out;
 948	}
 949	src_pte = pte_offset_map(src_pmd, addr);
 950	src_ptl = pte_lockptr(src_mm, src_pmd);
 951	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 952	orig_src_pte = src_pte;
 953	orig_dst_pte = dst_pte;
 954	arch_enter_lazy_mmu_mode();
 955
 956	do {
 957		/*
 958		 * We are holding two locks at this point - either of them
 959		 * could generate latencies in another task on another CPU.
 960		 */
 961		if (progress >= 32) {
 962			progress = 0;
 963			if (need_resched() ||
 964			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
 965				break;
 966		}
 967		if (pte_none(*src_pte)) {
 968			progress++;
 969			continue;
 970		}
 971		if (unlikely(!pte_present(*src_pte))) {
 972			entry.val = copy_nonpresent_pte(dst_mm, src_mm,
 973							dst_pte, src_pte,
 974							vma, addr, rss);
 975			if (entry.val)
 976				break;
 977			progress += 8;
 978			continue;
 979		}
 980		/* copy_present_pte() will clear `*prealloc' if consumed */
 981		ret = copy_present_pte(dst_mm, src_mm, dst_pte, src_pte,
 982				       vma, new, addr, rss, &prealloc);
 983		/*
 984		 * If we need a pre-allocated page for this pte, drop the
 985		 * locks, allocate, and try again.
 986		 */
 987		if (unlikely(ret == -EAGAIN))
 988			break;
 989		if (unlikely(prealloc)) {
 990			/*
 991			 * pre-alloc page cannot be reused by next time so as
 992			 * to strictly follow mempolicy (e.g., alloc_page_vma()
 993			 * will allocate page according to address).  This
 994			 * could only happen if one pinned pte changed.
 995			 */
 996			put_page(prealloc);
 997			prealloc = NULL;
 998		}
 999		progress += 8;
1000	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1001
1002	arch_leave_lazy_mmu_mode();
1003	spin_unlock(src_ptl);
1004	pte_unmap(orig_src_pte);
1005	add_mm_rss_vec(dst_mm, rss);
1006	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1007	cond_resched();
1008
1009	if (entry.val) {
1010		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1011			ret = -ENOMEM;
1012			goto out;
1013		}
1014		entry.val = 0;
1015	} else if (ret) {
1016		WARN_ON_ONCE(ret != -EAGAIN);
1017		prealloc = page_copy_prealloc(src_mm, vma, addr);
1018		if (!prealloc)
1019			return -ENOMEM;
1020		/* We've captured and resolved the error. Reset, try again. */
1021		ret = 0;
1022	}
1023	if (addr != end)
1024		goto again;
1025out:
1026	if (unlikely(prealloc))
1027		put_page(prealloc);
1028	return ret;
1029}
1030
1031static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1032		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
1033		struct vm_area_struct *new,
1034		unsigned long addr, unsigned long end)
1035{
1036	pmd_t *src_pmd, *dst_pmd;
1037	unsigned long next;
1038
1039	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1040	if (!dst_pmd)
1041		return -ENOMEM;
1042	src_pmd = pmd_offset(src_pud, addr);
1043	do {
1044		next = pmd_addr_end(addr, end);
1045		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1046			|| pmd_devmap(*src_pmd)) {
1047			int err;
1048			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
1049			err = copy_huge_pmd(dst_mm, src_mm,
1050					    dst_pmd, src_pmd, addr, vma);
1051			if (err == -ENOMEM)
1052				return -ENOMEM;
1053			if (!err)
1054				continue;
1055			/* fall through */
1056		}
1057		if (pmd_none_or_clear_bad(src_pmd))
1058			continue;
1059		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1060				   vma, new, addr, next))
1061			return -ENOMEM;
1062	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1063	return 0;
1064}
1065
1066static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1067		p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1068		struct vm_area_struct *new,
1069		unsigned long addr, unsigned long end)
1070{
1071	pud_t *src_pud, *dst_pud;
1072	unsigned long next;
1073
1074	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1075	if (!dst_pud)
1076		return -ENOMEM;
1077	src_pud = pud_offset(src_p4d, addr);
1078	do {
1079		next = pud_addr_end(addr, end);
1080		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1081			int err;
1082
1083			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
1084			err = copy_huge_pud(dst_mm, src_mm,
1085					    dst_pud, src_pud, addr, vma);
1086			if (err == -ENOMEM)
1087				return -ENOMEM;
1088			if (!err)
1089				continue;
1090			/* fall through */
1091		}
1092		if (pud_none_or_clear_bad(src_pud))
1093			continue;
1094		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1095				   vma, new, addr, next))
1096			return -ENOMEM;
1097	} while (dst_pud++, src_pud++, addr = next, addr != end);
1098	return 0;
1099}
1100
1101static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1102		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1103		struct vm_area_struct *new,
1104		unsigned long addr, unsigned long end)
1105{
1106	p4d_t *src_p4d, *dst_p4d;
1107	unsigned long next;
1108
1109	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1110	if (!dst_p4d)
1111		return -ENOMEM;
1112	src_p4d = p4d_offset(src_pgd, addr);
1113	do {
1114		next = p4d_addr_end(addr, end);
1115		if (p4d_none_or_clear_bad(src_p4d))
1116			continue;
1117		if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1118				   vma, new, addr, next))
1119			return -ENOMEM;
1120	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1121	return 0;
1122}
1123
1124int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1125		    struct vm_area_struct *vma, struct vm_area_struct *new)
1126{
1127	pgd_t *src_pgd, *dst_pgd;
1128	unsigned long next;
1129	unsigned long addr = vma->vm_start;
1130	unsigned long end = vma->vm_end;
1131	struct mmu_notifier_range range;
 
1132	bool is_cow;
1133	int ret;
1134
1135	/*
1136	 * Don't copy ptes where a page fault will fill them correctly.
1137	 * Fork becomes much lighter when there are big shared or private
1138	 * readonly mappings. The tradeoff is that copy_page_range is more
1139	 * efficient than faulting.
1140	 */
1141	if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1142			!vma->anon_vma)
1143		return 0;
1144
1145	if (is_vm_hugetlb_page(vma))
1146		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1147
1148	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1149		/*
1150		 * We do not free on error cases below as remove_vma
1151		 * gets called on error from higher level routine
1152		 */
1153		ret = track_pfn_copy(vma);
1154		if (ret)
1155			return ret;
1156	}
1157
1158	/*
1159	 * We need to invalidate the secondary MMU mappings only when
1160	 * there could be a permission downgrade on the ptes of the
1161	 * parent mm. And a permission downgrade will only happen if
1162	 * is_cow_mapping() returns true.
1163	 */
1164	is_cow = is_cow_mapping(vma->vm_flags);
1165
1166	if (is_cow) {
1167		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1168					0, vma, src_mm, addr, end);
1169		mmu_notifier_invalidate_range_start(&range);
1170	}
1171
1172	ret = 0;
1173	dst_pgd = pgd_offset(dst_mm, addr);
1174	src_pgd = pgd_offset(src_mm, addr);
1175	do {
1176		next = pgd_addr_end(addr, end);
1177		if (pgd_none_or_clear_bad(src_pgd))
1178			continue;
1179		if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1180					    vma, new, addr, next))) {
1181			ret = -ENOMEM;
1182			break;
1183		}
1184	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1185
1186	if (is_cow)
1187		mmu_notifier_invalidate_range_end(&range);
1188	return ret;
1189}
1190
1191static unsigned long zap_pte_range(struct mmu_gather *tlb,
1192				struct vm_area_struct *vma, pmd_t *pmd,
1193				unsigned long addr, unsigned long end,
1194				struct zap_details *details)
1195{
1196	struct mm_struct *mm = tlb->mm;
1197	int force_flush = 0;
1198	int rss[NR_MM_COUNTERS];
1199	spinlock_t *ptl;
1200	pte_t *start_pte;
1201	pte_t *pte;
1202	swp_entry_t entry;
1203
1204	tlb_change_page_size(tlb, PAGE_SIZE);
1205again:
1206	init_rss_vec(rss);
1207	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1208	pte = start_pte;
1209	flush_tlb_batched_pending(mm);
1210	arch_enter_lazy_mmu_mode();
1211	do {
1212		pte_t ptent = *pte;
1213		if (pte_none(ptent))
1214			continue;
1215
1216		if (need_resched())
1217			break;
1218
1219		if (pte_present(ptent)) {
1220			struct page *page;
1221
1222			page = vm_normal_page(vma, addr, ptent);
1223			if (unlikely(details) && page) {
1224				/*
1225				 * unmap_shared_mapping_pages() wants to
1226				 * invalidate cache without truncating:
1227				 * unmap shared but keep private pages.
1228				 */
1229				if (details->check_mapping &&
1230				    details->check_mapping != page_rmapping(page))
1231					continue;
1232			}
1233			ptent = ptep_get_and_clear_full(mm, addr, pte,
1234							tlb->fullmm);
1235			tlb_remove_tlb_entry(tlb, pte, addr);
1236			if (unlikely(!page))
1237				continue;
1238
1239			if (!PageAnon(page)) {
1240				if (pte_dirty(ptent)) {
 
 
 
 
 
 
1241					force_flush = 1;
1242					set_page_dirty(page);
1243				}
1244				if (pte_young(ptent) &&
1245				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1246					mark_page_accessed(page);
1247			}
1248			rss[mm_counter(page)]--;
1249			page_remove_rmap(page, false);
1250			if (unlikely(page_mapcount(page) < 0))
1251				print_bad_pte(vma, addr, ptent, page);
1252			if (unlikely(__tlb_remove_page(tlb, page))) {
1253				force_flush = 1;
1254				addr += PAGE_SIZE;
1255				break;
1256			}
1257			continue;
1258		}
1259
1260		entry = pte_to_swp_entry(ptent);
1261		if (is_device_private_entry(entry)) {
1262			struct page *page = device_private_entry_to_page(entry);
1263
1264			if (unlikely(details && details->check_mapping)) {
1265				/*
1266				 * unmap_shared_mapping_pages() wants to
1267				 * invalidate cache without truncating:
1268				 * unmap shared but keep private pages.
1269				 */
1270				if (details->check_mapping !=
1271				    page_rmapping(page))
1272					continue;
1273			}
1274
1275			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1276			rss[mm_counter(page)]--;
1277			page_remove_rmap(page, false);
1278			put_page(page);
1279			continue;
1280		}
1281
1282		/* If details->check_mapping, we leave swap entries. */
1283		if (unlikely(details))
1284			continue;
1285
 
1286		if (!non_swap_entry(entry))
1287			rss[MM_SWAPENTS]--;
1288		else if (is_migration_entry(entry)) {
1289			struct page *page;
1290
1291			page = migration_entry_to_page(entry);
1292			rss[mm_counter(page)]--;
1293		}
1294		if (unlikely(!free_swap_and_cache(entry)))
1295			print_bad_pte(vma, addr, ptent, NULL);
1296		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1297	} while (pte++, addr += PAGE_SIZE, addr != end);
1298
1299	add_mm_rss_vec(mm, rss);
1300	arch_leave_lazy_mmu_mode();
1301
1302	/* Do the actual TLB flush before dropping ptl */
1303	if (force_flush)
1304		tlb_flush_mmu_tlbonly(tlb);
1305	pte_unmap_unlock(start_pte, ptl);
1306
1307	/*
1308	 * If we forced a TLB flush (either due to running out of
1309	 * batch buffers or because we needed to flush dirty TLB
1310	 * entries before releasing the ptl), free the batched
1311	 * memory too. Restart if we didn't do everything.
1312	 */
1313	if (force_flush) {
1314		force_flush = 0;
1315		tlb_flush_mmu(tlb);
1316	}
1317
1318	if (addr != end) {
1319		cond_resched();
1320		goto again;
1321	}
1322
1323	return addr;
1324}
1325
1326static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1327				struct vm_area_struct *vma, pud_t *pud,
1328				unsigned long addr, unsigned long end,
1329				struct zap_details *details)
1330{
1331	pmd_t *pmd;
1332	unsigned long next;
1333
1334	pmd = pmd_offset(pud, addr);
1335	do {
1336		next = pmd_addr_end(addr, end);
1337		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1338			if (next - addr != HPAGE_PMD_SIZE)
1339				__split_huge_pmd(vma, pmd, addr, false, NULL);
1340			else if (zap_huge_pmd(tlb, vma, pmd, addr))
 
 
1341				goto next;
1342			/* fall through */
1343		}
1344		/*
1345		 * Here there can be other concurrent MADV_DONTNEED or
1346		 * trans huge page faults running, and if the pmd is
1347		 * none or trans huge it can change under us. This is
1348		 * because MADV_DONTNEED holds the mmap_lock in read
1349		 * mode.
1350		 */
1351		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1352			goto next;
1353		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1354next:
1355		cond_resched();
1356	} while (pmd++, addr = next, addr != end);
1357
1358	return addr;
1359}
1360
1361static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1362				struct vm_area_struct *vma, p4d_t *p4d,
1363				unsigned long addr, unsigned long end,
1364				struct zap_details *details)
1365{
1366	pud_t *pud;
1367	unsigned long next;
1368
1369	pud = pud_offset(p4d, addr);
1370	do {
1371		next = pud_addr_end(addr, end);
1372		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1373			if (next - addr != HPAGE_PUD_SIZE) {
1374				mmap_assert_locked(tlb->mm);
1375				split_huge_pud(vma, pud, addr);
1376			} else if (zap_huge_pud(tlb, vma, pud, addr))
1377				goto next;
1378			/* fall through */
1379		}
1380		if (pud_none_or_clear_bad(pud))
1381			continue;
1382		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1383next:
1384		cond_resched();
1385	} while (pud++, addr = next, addr != end);
1386
1387	return addr;
1388}
1389
1390static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1391				struct vm_area_struct *vma, pgd_t *pgd,
1392				unsigned long addr, unsigned long end,
1393				struct zap_details *details)
1394{
1395	p4d_t *p4d;
1396	unsigned long next;
1397
1398	p4d = p4d_offset(pgd, addr);
1399	do {
1400		next = p4d_addr_end(addr, end);
1401		if (p4d_none_or_clear_bad(p4d))
1402			continue;
1403		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1404	} while (p4d++, addr = next, addr != end);
1405
1406	return addr;
1407}
1408
1409void unmap_page_range(struct mmu_gather *tlb,
1410			     struct vm_area_struct *vma,
1411			     unsigned long addr, unsigned long end,
1412			     struct zap_details *details)
1413{
1414	pgd_t *pgd;
1415	unsigned long next;
1416
1417	BUG_ON(addr >= end);
1418	tlb_start_vma(tlb, vma);
1419	pgd = pgd_offset(vma->vm_mm, addr);
1420	do {
1421		next = pgd_addr_end(addr, end);
1422		if (pgd_none_or_clear_bad(pgd))
1423			continue;
1424		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1425	} while (pgd++, addr = next, addr != end);
1426	tlb_end_vma(tlb, vma);
1427}
1428
1429
1430static void unmap_single_vma(struct mmu_gather *tlb,
1431		struct vm_area_struct *vma, unsigned long start_addr,
1432		unsigned long end_addr,
1433		struct zap_details *details)
1434{
1435	unsigned long start = max(vma->vm_start, start_addr);
1436	unsigned long end;
1437
1438	if (start >= vma->vm_end)
1439		return;
1440	end = min(vma->vm_end, end_addr);
1441	if (end <= vma->vm_start)
1442		return;
1443
1444	if (vma->vm_file)
1445		uprobe_munmap(vma, start, end);
1446
1447	if (unlikely(vma->vm_flags & VM_PFNMAP))
1448		untrack_pfn(vma, 0, 0);
1449
1450	if (start != end) {
1451		if (unlikely(is_vm_hugetlb_page(vma))) {
1452			/*
1453			 * It is undesirable to test vma->vm_file as it
1454			 * should be non-null for valid hugetlb area.
1455			 * However, vm_file will be NULL in the error
1456			 * cleanup path of mmap_region. When
1457			 * hugetlbfs ->mmap method fails,
1458			 * mmap_region() nullifies vma->vm_file
1459			 * before calling this function to clean up.
1460			 * Since no pte has actually been setup, it is
1461			 * safe to do nothing in this case.
1462			 */
1463			if (vma->vm_file) {
1464				i_mmap_lock_write(vma->vm_file->f_mapping);
1465				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1466				i_mmap_unlock_write(vma->vm_file->f_mapping);
1467			}
1468		} else
1469			unmap_page_range(tlb, vma, start, end, details);
1470	}
1471}
1472
1473/**
1474 * unmap_vmas - unmap a range of memory covered by a list of vma's
1475 * @tlb: address of the caller's struct mmu_gather
1476 * @vma: the starting vma
1477 * @start_addr: virtual address at which to start unmapping
1478 * @end_addr: virtual address at which to end unmapping
1479 *
1480 * Unmap all pages in the vma list.
1481 *
1482 * Only addresses between `start' and `end' will be unmapped.
1483 *
1484 * The VMA list must be sorted in ascending virtual address order.
1485 *
1486 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1487 * range after unmap_vmas() returns.  So the only responsibility here is to
1488 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1489 * drops the lock and schedules.
1490 */
1491void unmap_vmas(struct mmu_gather *tlb,
1492		struct vm_area_struct *vma, unsigned long start_addr,
1493		unsigned long end_addr)
1494{
1495	struct mmu_notifier_range range;
1496
1497	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1498				start_addr, end_addr);
1499	mmu_notifier_invalidate_range_start(&range);
1500	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1501		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1502	mmu_notifier_invalidate_range_end(&range);
1503}
1504
1505/**
1506 * zap_page_range - remove user pages in a given range
1507 * @vma: vm_area_struct holding the applicable pages
1508 * @start: starting address of pages to zap
1509 * @size: number of bytes to zap
 
1510 *
1511 * Caller must protect the VMA list
1512 */
1513void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1514		unsigned long size)
1515{
1516	struct mmu_notifier_range range;
1517	struct mmu_gather tlb;
 
1518
1519	lru_add_drain();
1520	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1521				start, start + size);
1522	tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1523	update_hiwater_rss(vma->vm_mm);
1524	mmu_notifier_invalidate_range_start(&range);
1525	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1526		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1527	mmu_notifier_invalidate_range_end(&range);
1528	tlb_finish_mmu(&tlb, start, range.end);
1529}
1530
1531/**
1532 * zap_page_range_single - remove user pages in a given range
1533 * @vma: vm_area_struct holding the applicable pages
1534 * @address: starting address of pages to zap
1535 * @size: number of bytes to zap
1536 * @details: details of shared cache invalidation
1537 *
1538 * The range must fit into one VMA.
1539 */
1540static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1541		unsigned long size, struct zap_details *details)
1542{
1543	struct mmu_notifier_range range;
1544	struct mmu_gather tlb;
 
1545
1546	lru_add_drain();
1547	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1548				address, address + size);
1549	tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1550	update_hiwater_rss(vma->vm_mm);
1551	mmu_notifier_invalidate_range_start(&range);
1552	unmap_single_vma(&tlb, vma, address, range.end, details);
1553	mmu_notifier_invalidate_range_end(&range);
1554	tlb_finish_mmu(&tlb, address, range.end);
1555}
1556
1557/**
1558 * zap_vma_ptes - remove ptes mapping the vma
1559 * @vma: vm_area_struct holding ptes to be zapped
1560 * @address: starting address of pages to zap
1561 * @size: number of bytes to zap
1562 *
1563 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1564 *
1565 * The entire address range must be fully contained within the vma.
1566 *
 
1567 */
1568void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1569		unsigned long size)
1570{
1571	if (address < vma->vm_start || address + size > vma->vm_end ||
1572	    		!(vma->vm_flags & VM_PFNMAP))
1573		return;
1574
1575	zap_page_range_single(vma, address, size, NULL);
 
1576}
1577EXPORT_SYMBOL_GPL(zap_vma_ptes);
1578
1579static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1580{
1581	pgd_t *pgd;
1582	p4d_t *p4d;
1583	pud_t *pud;
1584	pmd_t *pmd;
1585
1586	pgd = pgd_offset(mm, addr);
1587	p4d = p4d_alloc(mm, pgd, addr);
1588	if (!p4d)
1589		return NULL;
1590	pud = pud_alloc(mm, p4d, addr);
1591	if (!pud)
1592		return NULL;
1593	pmd = pmd_alloc(mm, pud, addr);
1594	if (!pmd)
1595		return NULL;
1596
1597	VM_BUG_ON(pmd_trans_huge(*pmd));
1598	return pmd;
1599}
1600
1601pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1602			spinlock_t **ptl)
1603{
1604	pmd_t *pmd = walk_to_pmd(mm, addr);
1605
1606	if (!pmd)
1607		return NULL;
1608	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1609}
1610
1611static int validate_page_before_insert(struct page *page)
1612{
1613	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1614		return -EINVAL;
1615	flush_dcache_page(page);
1616	return 0;
1617}
1618
1619static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1620			unsigned long addr, struct page *page, pgprot_t prot)
1621{
1622	if (!pte_none(*pte))
1623		return -EBUSY;
1624	/* Ok, finally just insert the thing.. */
1625	get_page(page);
1626	inc_mm_counter_fast(mm, mm_counter_file(page));
1627	page_add_file_rmap(page, false);
1628	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1629	return 0;
1630}
1631
1632/*
1633 * This is the old fallback for page remapping.
1634 *
1635 * For historical reasons, it only allows reserved pages. Only
1636 * old drivers should use this, and they needed to mark their
1637 * pages reserved for the old functions anyway.
1638 */
1639static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1640			struct page *page, pgprot_t prot)
1641{
1642	struct mm_struct *mm = vma->vm_mm;
1643	int retval;
1644	pte_t *pte;
1645	spinlock_t *ptl;
1646
1647	retval = validate_page_before_insert(page);
1648	if (retval)
1649		goto out;
1650	retval = -ENOMEM;
 
1651	pte = get_locked_pte(mm, addr, &ptl);
1652	if (!pte)
1653		goto out;
1654	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1655	pte_unmap_unlock(pte, ptl);
1656out:
1657	return retval;
1658}
1659
1660#ifdef pte_index
1661static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1662			unsigned long addr, struct page *page, pgprot_t prot)
1663{
1664	int err;
1665
1666	if (!page_count(page))
1667		return -EINVAL;
1668	err = validate_page_before_insert(page);
1669	if (err)
1670		return err;
1671	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1672}
1673
1674/* insert_pages() amortizes the cost of spinlock operations
1675 * when inserting pages in a loop. Arch *must* define pte_index.
1676 */
1677static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1678			struct page **pages, unsigned long *num, pgprot_t prot)
1679{
1680	pmd_t *pmd = NULL;
1681	pte_t *start_pte, *pte;
1682	spinlock_t *pte_lock;
1683	struct mm_struct *const mm = vma->vm_mm;
1684	unsigned long curr_page_idx = 0;
1685	unsigned long remaining_pages_total = *num;
1686	unsigned long pages_to_write_in_pmd;
1687	int ret;
1688more:
1689	ret = -EFAULT;
1690	pmd = walk_to_pmd(mm, addr);
1691	if (!pmd)
1692		goto out;
1693
1694	pages_to_write_in_pmd = min_t(unsigned long,
1695		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1696
1697	/* Allocate the PTE if necessary; takes PMD lock once only. */
1698	ret = -ENOMEM;
1699	if (pte_alloc(mm, pmd))
1700		goto out;
 
1701
1702	while (pages_to_write_in_pmd) {
1703		int pte_idx = 0;
1704		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1705
1706		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1707		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1708			int err = insert_page_in_batch_locked(mm, pte,
1709				addr, pages[curr_page_idx], prot);
1710			if (unlikely(err)) {
1711				pte_unmap_unlock(start_pte, pte_lock);
1712				ret = err;
1713				remaining_pages_total -= pte_idx;
1714				goto out;
1715			}
1716			addr += PAGE_SIZE;
1717			++curr_page_idx;
1718		}
1719		pte_unmap_unlock(start_pte, pte_lock);
1720		pages_to_write_in_pmd -= batch_size;
1721		remaining_pages_total -= batch_size;
1722	}
1723	if (remaining_pages_total)
1724		goto more;
1725	ret = 0;
1726out:
1727	*num = remaining_pages_total;
1728	return ret;
1729}
1730#endif  /* ifdef pte_index */
1731
1732/**
1733 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1734 * @vma: user vma to map to
1735 * @addr: target start user address of these pages
1736 * @pages: source kernel pages
1737 * @num: in: number of pages to map. out: number of pages that were *not*
1738 * mapped. (0 means all pages were successfully mapped).
1739 *
1740 * Preferred over vm_insert_page() when inserting multiple pages.
1741 *
1742 * In case of error, we may have mapped a subset of the provided
1743 * pages. It is the caller's responsibility to account for this case.
1744 *
1745 * The same restrictions apply as in vm_insert_page().
1746 */
1747int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1748			struct page **pages, unsigned long *num)
1749{
1750#ifdef pte_index
1751	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1752
1753	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1754		return -EFAULT;
1755	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1756		BUG_ON(mmap_read_trylock(vma->vm_mm));
1757		BUG_ON(vma->vm_flags & VM_PFNMAP);
1758		vma->vm_flags |= VM_MIXEDMAP;
1759	}
1760	/* Defer page refcount checking till we're about to map that page. */
1761	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1762#else
1763	unsigned long idx = 0, pgcount = *num;
1764	int err = -EINVAL;
1765
1766	for (; idx < pgcount; ++idx) {
1767		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1768		if (err)
1769			break;
1770	}
1771	*num = pgcount - idx;
1772	return err;
1773#endif  /* ifdef pte_index */
1774}
1775EXPORT_SYMBOL(vm_insert_pages);
1776
1777/**
1778 * vm_insert_page - insert single page into user vma
1779 * @vma: user vma to map to
1780 * @addr: target user address of this page
1781 * @page: source kernel page
1782 *
1783 * This allows drivers to insert individual pages they've allocated
1784 * into a user vma.
1785 *
1786 * The page has to be a nice clean _individual_ kernel allocation.
1787 * If you allocate a compound page, you need to have marked it as
1788 * such (__GFP_COMP), or manually just split the page up yourself
1789 * (see split_page()).
1790 *
1791 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1792 * took an arbitrary page protection parameter. This doesn't allow
1793 * that. Your vma protection will have to be set up correctly, which
1794 * means that if you want a shared writable mapping, you'd better
1795 * ask for a shared writable mapping!
1796 *
1797 * The page does not need to be reserved.
1798 *
1799 * Usually this function is called from f_op->mmap() handler
1800 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
1801 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1802 * function from other places, for example from page-fault handler.
1803 *
1804 * Return: %0 on success, negative error code otherwise.
1805 */
1806int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1807			struct page *page)
1808{
1809	if (addr < vma->vm_start || addr >= vma->vm_end)
1810		return -EFAULT;
1811	if (!page_count(page))
1812		return -EINVAL;
1813	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1814		BUG_ON(mmap_read_trylock(vma->vm_mm));
1815		BUG_ON(vma->vm_flags & VM_PFNMAP);
1816		vma->vm_flags |= VM_MIXEDMAP;
1817	}
1818	return insert_page(vma, addr, page, vma->vm_page_prot);
1819}
1820EXPORT_SYMBOL(vm_insert_page);
1821
1822/*
1823 * __vm_map_pages - maps range of kernel pages into user vma
1824 * @vma: user vma to map to
1825 * @pages: pointer to array of source kernel pages
1826 * @num: number of pages in page array
1827 * @offset: user's requested vm_pgoff
1828 *
1829 * This allows drivers to map range of kernel pages into a user vma.
1830 *
1831 * Return: 0 on success and error code otherwise.
1832 */
1833static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1834				unsigned long num, unsigned long offset)
1835{
1836	unsigned long count = vma_pages(vma);
1837	unsigned long uaddr = vma->vm_start;
1838	int ret, i;
1839
1840	/* Fail if the user requested offset is beyond the end of the object */
1841	if (offset >= num)
1842		return -ENXIO;
1843
1844	/* Fail if the user requested size exceeds available object size */
1845	if (count > num - offset)
1846		return -ENXIO;
1847
1848	for (i = 0; i < count; i++) {
1849		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1850		if (ret < 0)
1851			return ret;
1852		uaddr += PAGE_SIZE;
1853	}
1854
1855	return 0;
1856}
1857
1858/**
1859 * vm_map_pages - maps range of kernel pages starts with non zero offset
1860 * @vma: user vma to map to
1861 * @pages: pointer to array of source kernel pages
1862 * @num: number of pages in page array
1863 *
1864 * Maps an object consisting of @num pages, catering for the user's
1865 * requested vm_pgoff
1866 *
1867 * If we fail to insert any page into the vma, the function will return
1868 * immediately leaving any previously inserted pages present.  Callers
1869 * from the mmap handler may immediately return the error as their caller
1870 * will destroy the vma, removing any successfully inserted pages. Other
1871 * callers should make their own arrangements for calling unmap_region().
1872 *
1873 * Context: Process context. Called by mmap handlers.
1874 * Return: 0 on success and error code otherwise.
1875 */
1876int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1877				unsigned long num)
1878{
1879	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1880}
1881EXPORT_SYMBOL(vm_map_pages);
1882
1883/**
1884 * vm_map_pages_zero - map range of kernel pages starts with zero offset
1885 * @vma: user vma to map to
1886 * @pages: pointer to array of source kernel pages
1887 * @num: number of pages in page array
1888 *
1889 * Similar to vm_map_pages(), except that it explicitly sets the offset
1890 * to 0. This function is intended for the drivers that did not consider
1891 * vm_pgoff.
1892 *
1893 * Context: Process context. Called by mmap handlers.
1894 * Return: 0 on success and error code otherwise.
1895 */
1896int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1897				unsigned long num)
1898{
1899	return __vm_map_pages(vma, pages, num, 0);
1900}
1901EXPORT_SYMBOL(vm_map_pages_zero);
1902
1903static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1904			pfn_t pfn, pgprot_t prot, bool mkwrite)
1905{
1906	struct mm_struct *mm = vma->vm_mm;
 
1907	pte_t *pte, entry;
1908	spinlock_t *ptl;
1909
 
1910	pte = get_locked_pte(mm, addr, &ptl);
1911	if (!pte)
1912		return VM_FAULT_OOM;
1913	if (!pte_none(*pte)) {
1914		if (mkwrite) {
1915			/*
1916			 * For read faults on private mappings the PFN passed
1917			 * in may not match the PFN we have mapped if the
1918			 * mapped PFN is a writeable COW page.  In the mkwrite
1919			 * case we are creating a writable PTE for a shared
1920			 * mapping and we expect the PFNs to match. If they
1921			 * don't match, we are likely racing with block
1922			 * allocation and mapping invalidation so just skip the
1923			 * update.
1924			 */
1925			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
1926				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
1927				goto out_unlock;
1928			}
1929			entry = pte_mkyoung(*pte);
1930			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1931			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1932				update_mmu_cache(vma, addr, pte);
1933		}
1934		goto out_unlock;
1935	}
1936
1937	/* Ok, finally just insert the thing.. */
1938	if (pfn_t_devmap(pfn))
1939		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1940	else
1941		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1942
1943	if (mkwrite) {
1944		entry = pte_mkyoung(entry);
1945		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1946	}
1947
1948	set_pte_at(mm, addr, pte, entry);
1949	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1950
 
1951out_unlock:
1952	pte_unmap_unlock(pte, ptl);
1953	return VM_FAULT_NOPAGE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1954}
 
1955
1956/**
1957 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1958 * @vma: user vma to map to
1959 * @addr: target user address of this page
1960 * @pfn: source kernel pfn
1961 * @pgprot: pgprot flags for the inserted page
1962 *
1963 * This is exactly like vmf_insert_pfn(), except that it allows drivers
1964 * to override pgprot on a per-page basis.
1965 *
1966 * This only makes sense for IO mappings, and it makes no sense for
1967 * COW mappings.  In general, using multiple vmas is preferable;
1968 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
1969 * impractical.
1970 *
1971 * See vmf_insert_mixed_prot() for a discussion of the implication of using
1972 * a value of @pgprot different from that of @vma->vm_page_prot.
1973 *
1974 * Context: Process context.  May allocate using %GFP_KERNEL.
1975 * Return: vm_fault_t value.
1976 */
1977vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1978			unsigned long pfn, pgprot_t pgprot)
1979{
 
1980	/*
1981	 * Technically, architectures with pte_special can avoid all these
1982	 * restrictions (same for remap_pfn_range).  However we would like
1983	 * consistency in testing and feature parity among all, so we should
1984	 * try to keep these invariants in place for everybody.
1985	 */
1986	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1987	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1988						(VM_PFNMAP|VM_MIXEDMAP));
1989	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1990	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1991
1992	if (addr < vma->vm_start || addr >= vma->vm_end)
1993		return VM_FAULT_SIGBUS;
1994
1995	if (!pfn_modify_allowed(pfn, pgprot))
1996		return VM_FAULT_SIGBUS;
1997
1998	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
1999
2000	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2001			false);
2002}
2003EXPORT_SYMBOL(vmf_insert_pfn_prot);
2004
2005/**
2006 * vmf_insert_pfn - insert single pfn into user vma
2007 * @vma: user vma to map to
2008 * @addr: target user address of this page
2009 * @pfn: source kernel pfn
2010 *
2011 * Similar to vm_insert_page, this allows drivers to insert individual pages
2012 * they've allocated into a user vma. Same comments apply.
2013 *
2014 * This function should only be called from a vm_ops->fault handler, and
2015 * in that case the handler should return the result of this function.
2016 *
2017 * vma cannot be a COW mapping.
2018 *
2019 * As this is called only for pages that do not currently exist, we
2020 * do not need to flush old virtual caches or the TLB.
2021 *
2022 * Context: Process context.  May allocate using %GFP_KERNEL.
2023 * Return: vm_fault_t value.
2024 */
2025vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2026			unsigned long pfn)
2027{
2028	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2029}
2030EXPORT_SYMBOL(vmf_insert_pfn);
2031
2032static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2033{
2034	/* these checks mirror the abort conditions in vm_normal_page */
2035	if (vma->vm_flags & VM_MIXEDMAP)
2036		return true;
2037	if (pfn_t_devmap(pfn))
2038		return true;
2039	if (pfn_t_special(pfn))
2040		return true;
2041	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2042		return true;
2043	return false;
2044}
 
2045
2046static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2047		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2048		bool mkwrite)
2049{
2050	int err;
2051
2052	BUG_ON(!vm_mixed_ok(vma, pfn));
2053
2054	if (addr < vma->vm_start || addr >= vma->vm_end)
2055		return VM_FAULT_SIGBUS;
2056
2057	track_pfn_insert(vma, &pgprot, pfn);
2058
2059	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2060		return VM_FAULT_SIGBUS;
2061
2062	/*
2063	 * If we don't have pte special, then we have to use the pfn_valid()
2064	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2065	 * refcount the page if pfn_valid is true (hence insert_page rather
2066	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2067	 * without pte special, it would there be refcounted as a normal page.
2068	 */
2069	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2070	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2071		struct page *page;
2072
2073		/*
2074		 * At this point we are committed to insert_page()
2075		 * regardless of whether the caller specified flags that
2076		 * result in pfn_t_has_page() == false.
2077		 */
2078		page = pfn_to_page(pfn_t_to_pfn(pfn));
2079		err = insert_page(vma, addr, page, pgprot);
2080	} else {
2081		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2082	}
2083
2084	if (err == -ENOMEM)
2085		return VM_FAULT_OOM;
2086	if (err < 0 && err != -EBUSY)
2087		return VM_FAULT_SIGBUS;
2088
2089	return VM_FAULT_NOPAGE;
2090}
2091
2092/**
2093 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2094 * @vma: user vma to map to
2095 * @addr: target user address of this page
2096 * @pfn: source kernel pfn
2097 * @pgprot: pgprot flags for the inserted page
2098 *
2099 * This is exactly like vmf_insert_mixed(), except that it allows drivers
2100 * to override pgprot on a per-page basis.
2101 *
2102 * Typically this function should be used by drivers to set caching- and
2103 * encryption bits different than those of @vma->vm_page_prot, because
2104 * the caching- or encryption mode may not be known at mmap() time.
2105 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2106 * to set caching and encryption bits for those vmas (except for COW pages).
2107 * This is ensured by core vm only modifying these page table entries using
2108 * functions that don't touch caching- or encryption bits, using pte_modify()
2109 * if needed. (See for example mprotect()).
2110 * Also when new page-table entries are created, this is only done using the
2111 * fault() callback, and never using the value of vma->vm_page_prot,
2112 * except for page-table entries that point to anonymous pages as the result
2113 * of COW.
2114 *
2115 * Context: Process context.  May allocate using %GFP_KERNEL.
2116 * Return: vm_fault_t value.
2117 */
2118vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2119				 pfn_t pfn, pgprot_t pgprot)
2120{
2121	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2122}
2123EXPORT_SYMBOL(vmf_insert_mixed_prot);
2124
2125vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2126		pfn_t pfn)
2127{
2128	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2129}
2130EXPORT_SYMBOL(vmf_insert_mixed);
2131
2132/*
2133 *  If the insertion of PTE failed because someone else already added a
2134 *  different entry in the mean time, we treat that as success as we assume
2135 *  the same entry was actually inserted.
2136 */
2137vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2138		unsigned long addr, pfn_t pfn)
2139{
2140	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2141}
2142EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2143
2144/*
2145 * maps a range of physical memory into the requested pages. the old
2146 * mappings are removed. any references to nonexistent pages results
2147 * in null mappings (currently treated as "copy-on-access")
2148 */
2149static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2150			unsigned long addr, unsigned long end,
2151			unsigned long pfn, pgprot_t prot)
2152{
2153	pte_t *pte;
2154	spinlock_t *ptl;
2155	int err = 0;
2156
2157	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2158	if (!pte)
2159		return -ENOMEM;
2160	arch_enter_lazy_mmu_mode();
2161	do {
2162		BUG_ON(!pte_none(*pte));
2163		if (!pfn_modify_allowed(pfn, prot)) {
2164			err = -EACCES;
2165			break;
2166		}
2167		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2168		pfn++;
2169	} while (pte++, addr += PAGE_SIZE, addr != end);
2170	arch_leave_lazy_mmu_mode();
2171	pte_unmap_unlock(pte - 1, ptl);
2172	return err;
2173}
2174
2175static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2176			unsigned long addr, unsigned long end,
2177			unsigned long pfn, pgprot_t prot)
2178{
2179	pmd_t *pmd;
2180	unsigned long next;
2181	int err;
2182
2183	pfn -= addr >> PAGE_SHIFT;
2184	pmd = pmd_alloc(mm, pud, addr);
2185	if (!pmd)
2186		return -ENOMEM;
2187	VM_BUG_ON(pmd_trans_huge(*pmd));
2188	do {
2189		next = pmd_addr_end(addr, end);
2190		err = remap_pte_range(mm, pmd, addr, next,
2191				pfn + (addr >> PAGE_SHIFT), prot);
2192		if (err)
2193			return err;
2194	} while (pmd++, addr = next, addr != end);
2195	return 0;
2196}
2197
2198static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2199			unsigned long addr, unsigned long end,
2200			unsigned long pfn, pgprot_t prot)
2201{
2202	pud_t *pud;
2203	unsigned long next;
2204	int err;
2205
2206	pfn -= addr >> PAGE_SHIFT;
2207	pud = pud_alloc(mm, p4d, addr);
2208	if (!pud)
2209		return -ENOMEM;
2210	do {
2211		next = pud_addr_end(addr, end);
2212		err = remap_pmd_range(mm, pud, addr, next,
2213				pfn + (addr >> PAGE_SHIFT), prot);
2214		if (err)
2215			return err;
2216	} while (pud++, addr = next, addr != end);
2217	return 0;
2218}
2219
2220static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2221			unsigned long addr, unsigned long end,
2222			unsigned long pfn, pgprot_t prot)
2223{
2224	p4d_t *p4d;
2225	unsigned long next;
2226	int err;
2227
2228	pfn -= addr >> PAGE_SHIFT;
2229	p4d = p4d_alloc(mm, pgd, addr);
2230	if (!p4d)
2231		return -ENOMEM;
2232	do {
2233		next = p4d_addr_end(addr, end);
2234		err = remap_pud_range(mm, p4d, addr, next,
2235				pfn + (addr >> PAGE_SHIFT), prot);
2236		if (err)
2237			return err;
2238	} while (p4d++, addr = next, addr != end);
2239	return 0;
2240}
2241
2242/**
2243 * remap_pfn_range - remap kernel memory to userspace
2244 * @vma: user vma to map to
2245 * @addr: target page aligned user address to start at
2246 * @pfn: page frame number of kernel physical memory address
2247 * @size: size of mapping area
2248 * @prot: page protection flags for this mapping
2249 *
2250 * Note: this is only safe if the mm semaphore is held when called.
2251 *
2252 * Return: %0 on success, negative error code otherwise.
2253 */
2254int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2255		    unsigned long pfn, unsigned long size, pgprot_t prot)
2256{
2257	pgd_t *pgd;
2258	unsigned long next;
2259	unsigned long end = addr + PAGE_ALIGN(size);
2260	struct mm_struct *mm = vma->vm_mm;
2261	unsigned long remap_pfn = pfn;
2262	int err;
2263
2264	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2265		return -EINVAL;
2266
2267	/*
2268	 * Physically remapped pages are special. Tell the
2269	 * rest of the world about it:
2270	 *   VM_IO tells people not to look at these pages
2271	 *	(accesses can have side effects).
2272	 *   VM_PFNMAP tells the core MM that the base pages are just
2273	 *	raw PFN mappings, and do not have a "struct page" associated
2274	 *	with them.
2275	 *   VM_DONTEXPAND
2276	 *      Disable vma merging and expanding with mremap().
2277	 *   VM_DONTDUMP
2278	 *      Omit vma from core dump, even when VM_IO turned off.
2279	 *
2280	 * There's a horrible special case to handle copy-on-write
2281	 * behaviour that some programs depend on. We mark the "original"
2282	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2283	 * See vm_normal_page() for details.
2284	 */
2285	if (is_cow_mapping(vma->vm_flags)) {
2286		if (addr != vma->vm_start || end != vma->vm_end)
2287			return -EINVAL;
2288		vma->vm_pgoff = pfn;
2289	}
2290
2291	err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
2292	if (err)
2293		return -EINVAL;
2294
2295	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2296
2297	BUG_ON(addr >= end);
2298	pfn -= addr >> PAGE_SHIFT;
2299	pgd = pgd_offset(mm, addr);
2300	flush_cache_range(vma, addr, end);
2301	do {
2302		next = pgd_addr_end(addr, end);
2303		err = remap_p4d_range(mm, pgd, addr, next,
2304				pfn + (addr >> PAGE_SHIFT), prot);
2305		if (err)
2306			break;
2307	} while (pgd++, addr = next, addr != end);
2308
2309	if (err)
2310		untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2311
2312	return err;
2313}
2314EXPORT_SYMBOL(remap_pfn_range);
2315
2316/**
2317 * vm_iomap_memory - remap memory to userspace
2318 * @vma: user vma to map to
2319 * @start: start of the physical memory to be mapped
2320 * @len: size of area
2321 *
2322 * This is a simplified io_remap_pfn_range() for common driver use. The
2323 * driver just needs to give us the physical memory range to be mapped,
2324 * we'll figure out the rest from the vma information.
2325 *
2326 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2327 * whatever write-combining details or similar.
2328 *
2329 * Return: %0 on success, negative error code otherwise.
2330 */
2331int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2332{
2333	unsigned long vm_len, pfn, pages;
2334
2335	/* Check that the physical memory area passed in looks valid */
2336	if (start + len < start)
2337		return -EINVAL;
2338	/*
2339	 * You *really* shouldn't map things that aren't page-aligned,
2340	 * but we've historically allowed it because IO memory might
2341	 * just have smaller alignment.
2342	 */
2343	len += start & ~PAGE_MASK;
2344	pfn = start >> PAGE_SHIFT;
2345	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2346	if (pfn + pages < pfn)
2347		return -EINVAL;
2348
2349	/* We start the mapping 'vm_pgoff' pages into the area */
2350	if (vma->vm_pgoff > pages)
2351		return -EINVAL;
2352	pfn += vma->vm_pgoff;
2353	pages -= vma->vm_pgoff;
2354
2355	/* Can we fit all of the mapping? */
2356	vm_len = vma->vm_end - vma->vm_start;
2357	if (vm_len >> PAGE_SHIFT > pages)
2358		return -EINVAL;
2359
2360	/* Ok, let it rip */
2361	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2362}
2363EXPORT_SYMBOL(vm_iomap_memory);
2364
2365static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2366				     unsigned long addr, unsigned long end,
2367				     pte_fn_t fn, void *data, bool create,
2368				     pgtbl_mod_mask *mask)
2369{
2370	pte_t *pte;
2371	int err = 0;
2372	spinlock_t *ptl;
 
2373
2374	if (create) {
2375		pte = (mm == &init_mm) ?
2376			pte_alloc_kernel_track(pmd, addr, mask) :
2377			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2378		if (!pte)
2379			return -ENOMEM;
2380	} else {
2381		pte = (mm == &init_mm) ?
2382			pte_offset_kernel(pmd, addr) :
2383			pte_offset_map_lock(mm, pmd, addr, &ptl);
2384	}
2385
2386	BUG_ON(pmd_huge(*pmd));
2387
2388	arch_enter_lazy_mmu_mode();
2389
 
 
2390	do {
2391		if (create || !pte_none(*pte)) {
2392			err = fn(pte++, addr, data);
2393			if (err)
2394				break;
2395		}
2396	} while (addr += PAGE_SIZE, addr != end);
2397	*mask |= PGTBL_PTE_MODIFIED;
2398
2399	arch_leave_lazy_mmu_mode();
2400
2401	if (mm != &init_mm)
2402		pte_unmap_unlock(pte-1, ptl);
2403	return err;
2404}
2405
2406static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2407				     unsigned long addr, unsigned long end,
2408				     pte_fn_t fn, void *data, bool create,
2409				     pgtbl_mod_mask *mask)
2410{
2411	pmd_t *pmd;
2412	unsigned long next;
2413	int err = 0;
2414
2415	BUG_ON(pud_huge(*pud));
2416
2417	if (create) {
2418		pmd = pmd_alloc_track(mm, pud, addr, mask);
2419		if (!pmd)
2420			return -ENOMEM;
2421	} else {
2422		pmd = pmd_offset(pud, addr);
2423	}
2424	do {
2425		next = pmd_addr_end(addr, end);
2426		if (create || !pmd_none_or_clear_bad(pmd)) {
2427			err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
2428						 create, mask);
2429			if (err)
2430				break;
2431		}
2432	} while (pmd++, addr = next, addr != end);
2433	return err;
2434}
2435
2436static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2437				     unsigned long addr, unsigned long end,
2438				     pte_fn_t fn, void *data, bool create,
2439				     pgtbl_mod_mask *mask)
2440{
2441	pud_t *pud;
2442	unsigned long next;
2443	int err = 0;
2444
2445	if (create) {
2446		pud = pud_alloc_track(mm, p4d, addr, mask);
2447		if (!pud)
2448			return -ENOMEM;
2449	} else {
2450		pud = pud_offset(p4d, addr);
2451	}
2452	do {
2453		next = pud_addr_end(addr, end);
2454		if (create || !pud_none_or_clear_bad(pud)) {
2455			err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
2456						 create, mask);
2457			if (err)
2458				break;
2459		}
2460	} while (pud++, addr = next, addr != end);
2461	return err;
2462}
2463
2464static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2465				     unsigned long addr, unsigned long end,
2466				     pte_fn_t fn, void *data, bool create,
2467				     pgtbl_mod_mask *mask)
2468{
2469	p4d_t *p4d;
2470	unsigned long next;
2471	int err = 0;
2472
2473	if (create) {
2474		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2475		if (!p4d)
2476			return -ENOMEM;
2477	} else {
2478		p4d = p4d_offset(pgd, addr);
2479	}
2480	do {
2481		next = p4d_addr_end(addr, end);
2482		if (create || !p4d_none_or_clear_bad(p4d)) {
2483			err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
2484						 create, mask);
2485			if (err)
2486				break;
2487		}
2488	} while (p4d++, addr = next, addr != end);
2489	return err;
2490}
2491
2492static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2493				 unsigned long size, pte_fn_t fn,
2494				 void *data, bool create)
2495{
2496	pgd_t *pgd;
2497	unsigned long start = addr, next;
2498	unsigned long end = addr + size;
2499	pgtbl_mod_mask mask = 0;
2500	int err = 0;
2501
2502	if (WARN_ON(addr >= end))
2503		return -EINVAL;
2504
2505	pgd = pgd_offset(mm, addr);
2506	do {
2507		next = pgd_addr_end(addr, end);
2508		if (!create && pgd_none_or_clear_bad(pgd))
2509			continue;
2510		err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
2511		if (err)
2512			break;
2513	} while (pgd++, addr = next, addr != end);
2514
2515	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2516		arch_sync_kernel_mappings(start, start + size);
2517
2518	return err;
2519}
2520
2521/*
2522 * Scan a region of virtual memory, filling in page tables as necessary
2523 * and calling a provided function on each leaf page table.
2524 */
2525int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2526			unsigned long size, pte_fn_t fn, void *data)
2527{
2528	return __apply_to_page_range(mm, addr, size, fn, data, true);
2529}
2530EXPORT_SYMBOL_GPL(apply_to_page_range);
2531
2532/*
2533 * Scan a region of virtual memory, calling a provided function on
2534 * each leaf page table where it exists.
2535 *
2536 * Unlike apply_to_page_range, this does _not_ fill in page tables
2537 * where they are absent.
2538 */
2539int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2540				 unsigned long size, pte_fn_t fn, void *data)
2541{
2542	return __apply_to_page_range(mm, addr, size, fn, data, false);
2543}
2544EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2545
2546/*
2547 * handle_pte_fault chooses page fault handler according to an entry which was
2548 * read non-atomically.  Before making any commitment, on those architectures
2549 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2550 * parts, do_swap_page must check under lock before unmapping the pte and
2551 * proceeding (but do_wp_page is only called after already making such a check;
2552 * and do_anonymous_page can safely check later on).
2553 */
2554static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2555				pte_t *page_table, pte_t orig_pte)
2556{
2557	int same = 1;
2558#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
2559	if (sizeof(pte_t) > sizeof(unsigned long)) {
2560		spinlock_t *ptl = pte_lockptr(mm, pmd);
2561		spin_lock(ptl);
2562		same = pte_same(*page_table, orig_pte);
2563		spin_unlock(ptl);
2564	}
2565#endif
2566	pte_unmap(page_table);
2567	return same;
2568}
2569
2570static inline bool cow_user_page(struct page *dst, struct page *src,
2571				 struct vm_fault *vmf)
2572{
2573	bool ret;
2574	void *kaddr;
2575	void __user *uaddr;
2576	bool locked = false;
2577	struct vm_area_struct *vma = vmf->vma;
2578	struct mm_struct *mm = vma->vm_mm;
2579	unsigned long addr = vmf->address;
2580
2581	if (likely(src)) {
2582		copy_user_highpage(dst, src, addr, vma);
2583		return true;
2584	}
2585
2586	/*
2587	 * If the source page was a PFN mapping, we don't have
2588	 * a "struct page" for it. We do a best-effort copy by
2589	 * just copying from the original user address. If that
2590	 * fails, we just zero-fill it. Live with it.
2591	 */
2592	kaddr = kmap_atomic(dst);
2593	uaddr = (void __user *)(addr & PAGE_MASK);
2594
2595	/*
2596	 * On architectures with software "accessed" bits, we would
2597	 * take a double page fault, so mark it accessed here.
2598	 */
2599	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2600		pte_t entry;
2601
2602		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2603		locked = true;
2604		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2605			/*
2606			 * Other thread has already handled the fault
2607			 * and update local tlb only
2608			 */
2609			update_mmu_tlb(vma, addr, vmf->pte);
2610			ret = false;
2611			goto pte_unlock;
2612		}
2613
2614		entry = pte_mkyoung(vmf->orig_pte);
2615		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2616			update_mmu_cache(vma, addr, vmf->pte);
2617	}
2618
2619	/*
2620	 * This really shouldn't fail, because the page is there
2621	 * in the page tables. But it might just be unreadable,
2622	 * in which case we just give up and fill the result with
2623	 * zeroes.
2624	 */
2625	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2626		if (locked)
2627			goto warn;
2628
2629		/* Re-validate under PTL if the page is still mapped */
2630		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2631		locked = true;
2632		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2633			/* The PTE changed under us, update local tlb */
2634			update_mmu_tlb(vma, addr, vmf->pte);
2635			ret = false;
2636			goto pte_unlock;
2637		}
2638
2639		/*
2640		 * The same page can be mapped back since last copy attempt.
2641		 * Try to copy again under PTL.
 
 
2642		 */
2643		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2644			/*
2645			 * Give a warn in case there can be some obscure
2646			 * use-case
2647			 */
2648warn:
2649			WARN_ON_ONCE(1);
2650			clear_page(kaddr);
2651		}
2652	}
2653
2654	ret = true;
2655
2656pte_unlock:
2657	if (locked)
2658		pte_unmap_unlock(vmf->pte, vmf->ptl);
2659	kunmap_atomic(kaddr);
2660	flush_dcache_page(dst);
2661
2662	return ret;
2663}
2664
2665static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2666{
2667	struct file *vm_file = vma->vm_file;
2668
2669	if (vm_file)
2670		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2671
2672	/*
2673	 * Special mappings (e.g. VDSO) do not have any file so fake
2674	 * a default GFP_KERNEL for them.
2675	 */
2676	return GFP_KERNEL;
2677}
2678
2679/*
2680 * Notify the address space that the page is about to become writable so that
2681 * it can prohibit this or wait for the page to get into an appropriate state.
2682 *
2683 * We do this without the lock held, so that it can sleep if it needs to.
2684 */
2685static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
 
2686{
2687	vm_fault_t ret;
2688	struct page *page = vmf->page;
2689	unsigned int old_flags = vmf->flags;
2690
2691	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2692
2693	if (vmf->vma->vm_file &&
2694	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2695		return VM_FAULT_SIGBUS;
 
 
 
2696
2697	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2698	/* Restore original flags so that caller is not surprised */
2699	vmf->flags = old_flags;
2700	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2701		return ret;
2702	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2703		lock_page(page);
2704		if (!page->mapping) {
2705			unlock_page(page);
2706			return 0; /* retry */
2707		}
2708		ret |= VM_FAULT_LOCKED;
2709	} else
2710		VM_BUG_ON_PAGE(!PageLocked(page), page);
2711	return ret;
2712}
2713
2714/*
2715 * Handle dirtying of a page in shared file mapping on a write fault.
2716 *
2717 * The function expects the page to be locked and unlocks it.
2718 */
2719static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2720{
2721	struct vm_area_struct *vma = vmf->vma;
2722	struct address_space *mapping;
2723	struct page *page = vmf->page;
2724	bool dirtied;
2725	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2726
2727	dirtied = set_page_dirty(page);
2728	VM_BUG_ON_PAGE(PageAnon(page), page);
2729	/*
2730	 * Take a local copy of the address_space - page.mapping may be zeroed
2731	 * by truncate after unlock_page().   The address_space itself remains
2732	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2733	 * release semantics to prevent the compiler from undoing this copying.
2734	 */
2735	mapping = page_rmapping(page);
2736	unlock_page(page);
2737
2738	if (!page_mkwrite)
2739		file_update_time(vma->vm_file);
2740
2741	/*
2742	 * Throttle page dirtying rate down to writeback speed.
2743	 *
2744	 * mapping may be NULL here because some device drivers do not
2745	 * set page.mapping but still dirty their pages
2746	 *
2747	 * Drop the mmap_lock before waiting on IO, if we can. The file
2748	 * is pinning the mapping, as per above.
2749	 */
2750	if ((dirtied || page_mkwrite) && mapping) {
2751		struct file *fpin;
2752
2753		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2754		balance_dirty_pages_ratelimited(mapping);
2755		if (fpin) {
2756			fput(fpin);
2757			return VM_FAULT_RETRY;
2758		}
2759	}
2760
2761	return 0;
2762}
2763
2764/*
2765 * Handle write page faults for pages that can be reused in the current vma
2766 *
2767 * This can happen either due to the mapping being with the VM_SHARED flag,
2768 * or due to us being the last reference standing to the page. In either
2769 * case, all we need to do here is to mark the page as writable and update
2770 * any related book-keeping.
2771 */
2772static inline void wp_page_reuse(struct vm_fault *vmf)
2773	__releases(vmf->ptl)
 
 
 
 
2774{
2775	struct vm_area_struct *vma = vmf->vma;
2776	struct page *page = vmf->page;
2777	pte_t entry;
2778	/*
2779	 * Clear the pages cpupid information as the existing
2780	 * information potentially belongs to a now completely
2781	 * unrelated process.
2782	 */
2783	if (page)
2784		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2785
2786	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2787	entry = pte_mkyoung(vmf->orig_pte);
2788	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2789	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2790		update_mmu_cache(vma, vmf->address, vmf->pte);
2791	pte_unmap_unlock(vmf->pte, vmf->ptl);
2792	count_vm_event(PGREUSE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2793}
2794
2795/*
2796 * Handle the case of a page which we actually need to copy to a new page.
2797 *
2798 * Called with mmap_lock locked and the old page referenced, but
2799 * without the ptl held.
2800 *
2801 * High level logic flow:
2802 *
2803 * - Allocate a page, copy the content of the old page to the new one.
2804 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2805 * - Take the PTL. If the pte changed, bail out and release the allocated page
2806 * - If the pte is still the way we remember it, update the page table and all
2807 *   relevant references. This includes dropping the reference the page-table
2808 *   held to the old page, as well as updating the rmap.
2809 * - In any case, unlock the PTL and drop the reference we took to the old page.
2810 */
2811static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 
 
2812{
2813	struct vm_area_struct *vma = vmf->vma;
2814	struct mm_struct *mm = vma->vm_mm;
2815	struct page *old_page = vmf->page;
2816	struct page *new_page = NULL;
 
2817	pte_t entry;
2818	int page_copied = 0;
2819	struct mmu_notifier_range range;
 
 
2820
2821	if (unlikely(anon_vma_prepare(vma)))
2822		goto oom;
2823
2824	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2825		new_page = alloc_zeroed_user_highpage_movable(vma,
2826							      vmf->address);
2827		if (!new_page)
2828			goto oom;
2829	} else {
2830		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
2831				vmf->address);
2832		if (!new_page)
2833			goto oom;
2834
2835		if (!cow_user_page(new_page, old_page, vmf)) {
2836			/*
2837			 * COW failed, if the fault was solved by other,
2838			 * it's fine. If not, userspace would re-fault on
2839			 * the same address and we will handle the fault
2840			 * from the second attempt.
2841			 */
2842			put_page(new_page);
2843			if (old_page)
2844				put_page(old_page);
2845			return 0;
2846		}
2847	}
2848
2849	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
2850		goto oom_free_new;
2851	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
2852
2853	__SetPageUptodate(new_page);
2854
2855	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
2856				vmf->address & PAGE_MASK,
2857				(vmf->address & PAGE_MASK) + PAGE_SIZE);
2858	mmu_notifier_invalidate_range_start(&range);
2859
2860	/*
2861	 * Re-check the pte - we dropped the lock
2862	 */
2863	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2864	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2865		if (old_page) {
2866			if (!PageAnon(old_page)) {
2867				dec_mm_counter_fast(mm,
2868						mm_counter_file(old_page));
2869				inc_mm_counter_fast(mm, MM_ANONPAGES);
2870			}
2871		} else {
2872			inc_mm_counter_fast(mm, MM_ANONPAGES);
2873		}
2874		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2875		entry = mk_pte(new_page, vma->vm_page_prot);
2876		entry = pte_sw_mkyoung(entry);
2877		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2878		/*
2879		 * Clear the pte entry and flush it first, before updating the
2880		 * pte with the new entry. This will avoid a race condition
2881		 * seen in the presence of one thread doing SMC and another
2882		 * thread doing COW.
2883		 */
2884		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2885		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
2886		lru_cache_add_inactive_or_unevictable(new_page, vma);
 
2887		/*
2888		 * We call the notify macro here because, when using secondary
2889		 * mmu page tables (such as kvm shadow page tables), we want the
2890		 * new page to be mapped directly into the secondary page table.
2891		 */
2892		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2893		update_mmu_cache(vma, vmf->address, vmf->pte);
2894		if (old_page) {
2895			/*
2896			 * Only after switching the pte to the new page may
2897			 * we remove the mapcount here. Otherwise another
2898			 * process may come and find the rmap count decremented
2899			 * before the pte is switched to the new page, and
2900			 * "reuse" the old page writing into it while our pte
2901			 * here still points into it and can be read by other
2902			 * threads.
2903			 *
2904			 * The critical issue is to order this
2905			 * page_remove_rmap with the ptp_clear_flush above.
2906			 * Those stores are ordered by (if nothing else,)
2907			 * the barrier present in the atomic_add_negative
2908			 * in page_remove_rmap.
2909			 *
2910			 * Then the TLB flush in ptep_clear_flush ensures that
2911			 * no process can access the old page before the
2912			 * decremented mapcount is visible. And the old page
2913			 * cannot be reused until after the decremented
2914			 * mapcount is visible. So transitively, TLBs to
2915			 * old page will be flushed before it can be reused.
2916			 */
2917			page_remove_rmap(old_page, false);
2918		}
2919
2920		/* Free the old page.. */
2921		new_page = old_page;
2922		page_copied = 1;
2923	} else {
2924		update_mmu_tlb(vma, vmf->address, vmf->pte);
2925	}
2926
2927	if (new_page)
2928		put_page(new_page);
2929
2930	pte_unmap_unlock(vmf->pte, vmf->ptl);
2931	/*
2932	 * No need to double call mmu_notifier->invalidate_range() callback as
2933	 * the above ptep_clear_flush_notify() did already call it.
2934	 */
2935	mmu_notifier_invalidate_range_only_end(&range);
2936	if (old_page) {
2937		/*
2938		 * Don't let another task, with possibly unlocked vma,
2939		 * keep the mlocked page.
2940		 */
2941		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2942			lock_page(old_page);	/* LRU manipulation */
2943			if (PageMlocked(old_page))
2944				munlock_vma_page(old_page);
2945			unlock_page(old_page);
2946		}
2947		put_page(old_page);
2948	}
2949	return page_copied ? VM_FAULT_WRITE : 0;
2950oom_free_new:
2951	put_page(new_page);
2952oom:
2953	if (old_page)
2954		put_page(old_page);
2955	return VM_FAULT_OOM;
2956}
2957
2958/**
2959 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2960 *			  writeable once the page is prepared
2961 *
2962 * @vmf: structure describing the fault
2963 *
2964 * This function handles all that is needed to finish a write page fault in a
2965 * shared mapping due to PTE being read-only once the mapped page is prepared.
2966 * It handles locking of PTE and modifying it.
2967 *
2968 * The function expects the page to be locked or other protection against
2969 * concurrent faults / writeback (such as DAX radix tree locks).
2970 *
2971 * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
2972 * we acquired PTE lock.
2973 */
2974vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
2975{
2976	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
2977	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
2978				       &vmf->ptl);
2979	/*
2980	 * We might have raced with another page fault while we released the
2981	 * pte_offset_map_lock.
2982	 */
2983	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2984		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
2985		pte_unmap_unlock(vmf->pte, vmf->ptl);
2986		return VM_FAULT_NOPAGE;
2987	}
2988	wp_page_reuse(vmf);
2989	return 0;
2990}
2991
2992/*
2993 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2994 * mapping
2995 */
2996static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
 
 
 
2997{
2998	struct vm_area_struct *vma = vmf->vma;
2999
3000	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3001		vm_fault_t ret;
 
 
 
 
 
 
3002
3003		pte_unmap_unlock(vmf->pte, vmf->ptl);
3004		vmf->flags |= FAULT_FLAG_MKWRITE;
3005		ret = vma->vm_ops->pfn_mkwrite(vmf);
3006		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3007			return ret;
3008		return finish_mkwrite_fault(vmf);
 
 
 
 
 
 
 
 
3009	}
3010	wp_page_reuse(vmf);
3011	return VM_FAULT_WRITE;
3012}
3013
3014static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3015	__releases(vmf->ptl)
 
 
 
3016{
3017	struct vm_area_struct *vma = vmf->vma;
3018	vm_fault_t ret = VM_FAULT_WRITE;
3019
3020	get_page(vmf->page);
3021
3022	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3023		vm_fault_t tmp;
3024
3025		pte_unmap_unlock(vmf->pte, vmf->ptl);
3026		tmp = do_page_mkwrite(vmf);
3027		if (unlikely(!tmp || (tmp &
3028				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3029			put_page(vmf->page);
3030			return tmp;
3031		}
3032		tmp = finish_mkwrite_fault(vmf);
3033		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3034			unlock_page(vmf->page);
3035			put_page(vmf->page);
3036			return tmp;
 
 
 
 
 
 
 
 
3037		}
3038	} else {
3039		wp_page_reuse(vmf);
3040		lock_page(vmf->page);
3041	}
3042	ret |= fault_dirty_shared_page(vmf);
3043	put_page(vmf->page);
3044
3045	return ret;
 
3046}
3047
3048/*
3049 * This routine handles present pages, when users try to write
3050 * to a shared page. It is done by copying the page to a new address
3051 * and decrementing the shared-page counter for the old page.
3052 *
3053 * Note that this routine assumes that the protection checks have been
3054 * done by the caller (the low-level page fault routine in most cases).
3055 * Thus we can safely just mark it writable once we've done any necessary
3056 * COW.
3057 *
3058 * We also mark the page dirty at this point even though the page will
3059 * change only once the write actually happens. This avoids a few races,
3060 * and potentially makes it more efficient.
3061 *
3062 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3063 * but allow concurrent faults), with pte both mapped and locked.
3064 * We return with mmap_lock still held, but pte unmapped and unlocked.
3065 */
3066static vm_fault_t do_wp_page(struct vm_fault *vmf)
3067	__releases(vmf->ptl)
 
 
3068{
3069	struct vm_area_struct *vma = vmf->vma;
3070
3071	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3072		pte_unmap_unlock(vmf->pte, vmf->ptl);
3073		return handle_userfault(vmf, VM_UFFD_WP);
3074	}
3075
3076	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3077	if (!vmf->page) {
3078		/*
3079		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3080		 * VM_PFNMAP VMA.
3081		 *
3082		 * We should not cow pages in a shared writeable mapping.
3083		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3084		 */
3085		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3086				     (VM_WRITE|VM_SHARED))
3087			return wp_pfn_shared(vmf);
 
3088
3089		pte_unmap_unlock(vmf->pte, vmf->ptl);
3090		return wp_page_copy(vmf);
 
3091	}
3092
3093	/*
3094	 * Take out anonymous pages first, anonymous shared vmas are
3095	 * not dirty accountable.
3096	 */
3097	if (PageAnon(vmf->page)) {
3098		struct page *page = vmf->page;
3099
3100		/* PageKsm() doesn't necessarily raise the page refcount */
3101		if (PageKsm(page) || page_count(page) != 1)
3102			goto copy;
3103		if (!trylock_page(page))
3104			goto copy;
3105		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3106			unlock_page(page);
3107			goto copy;
 
 
 
 
3108		}
3109		/*
3110		 * Ok, we've got the only map reference, and the only
3111		 * page count reference, and the page is locked,
3112		 * it's dark out, and we're wearing sunglasses. Hit it.
3113		 */
3114		unlock_page(page);
3115		wp_page_reuse(vmf);
3116		return VM_FAULT_WRITE;
 
 
 
 
 
 
 
 
 
3117	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
3118					(VM_WRITE|VM_SHARED))) {
3119		return wp_page_shared(vmf);
 
3120	}
3121copy:
3122	/*
3123	 * Ok, we need to copy. Oh, well..
3124	 */
3125	get_page(vmf->page);
3126
3127	pte_unmap_unlock(vmf->pte, vmf->ptl);
3128	return wp_page_copy(vmf);
 
3129}
3130
3131static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3132		unsigned long start_addr, unsigned long end_addr,
3133		struct zap_details *details)
3134{
3135	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3136}
3137
3138static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3139					    struct zap_details *details)
3140{
3141	struct vm_area_struct *vma;
3142	pgoff_t vba, vea, zba, zea;
3143
3144	vma_interval_tree_foreach(vma, root,
3145			details->first_index, details->last_index) {
3146
3147		vba = vma->vm_pgoff;
3148		vea = vba + vma_pages(vma) - 1;
3149		zba = details->first_index;
3150		if (zba < vba)
3151			zba = vba;
3152		zea = details->last_index;
3153		if (zea > vea)
3154			zea = vea;
3155
3156		unmap_mapping_range_vma(vma,
3157			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3158			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3159				details);
3160	}
3161}
3162
3163/**
3164 * unmap_mapping_pages() - Unmap pages from processes.
3165 * @mapping: The address space containing pages to be unmapped.
3166 * @start: Index of first page to be unmapped.
3167 * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3168 * @even_cows: Whether to unmap even private COWed pages.
3169 *
3170 * Unmap the pages in this address space from any userspace process which
3171 * has them mmaped.  Generally, you want to remove COWed pages as well when
3172 * a file is being truncated, but not when invalidating pages from the page
3173 * cache.
3174 */
3175void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3176		pgoff_t nr, bool even_cows)
3177{
3178	struct zap_details details = { };
3179
3180	details.check_mapping = even_cows ? NULL : mapping;
3181	details.first_index = start;
3182	details.last_index = start + nr - 1;
3183	if (details.last_index < details.first_index)
3184		details.last_index = ULONG_MAX;
3185
3186	i_mmap_lock_write(mapping);
3187	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3188		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3189	i_mmap_unlock_write(mapping);
3190}
3191
3192/**
3193 * unmap_mapping_range - unmap the portion of all mmaps in the specified
3194 * address_space corresponding to the specified byte range in the underlying
3195 * file.
3196 *
3197 * @mapping: the address space containing mmaps to be unmapped.
3198 * @holebegin: byte in first page to unmap, relative to the start of
3199 * the underlying file.  This will be rounded down to a PAGE_SIZE
3200 * boundary.  Note that this is different from truncate_pagecache(), which
3201 * must keep the partial page.  In contrast, we must get rid of
3202 * partial pages.
3203 * @holelen: size of prospective hole in bytes.  This will be rounded
3204 * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3205 * end of the file.
3206 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3207 * but 0 when invalidating pagecache, don't throw away private data.
3208 */
3209void unmap_mapping_range(struct address_space *mapping,
3210		loff_t const holebegin, loff_t const holelen, int even_cows)
3211{
 
3212	pgoff_t hba = holebegin >> PAGE_SHIFT;
3213	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3214
3215	/* Check for overflow. */
3216	if (sizeof(holelen) > sizeof(hlen)) {
3217		long long holeend =
3218			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3219		if (holeend & ~(long long)ULONG_MAX)
3220			hlen = ULONG_MAX - hba + 1;
3221	}
3222
3223	unmap_mapping_pages(mapping, hba, hlen, even_cows);
 
 
 
 
 
 
 
 
 
 
 
3224}
3225EXPORT_SYMBOL(unmap_mapping_range);
3226
3227/*
3228 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3229 * but allow concurrent faults), and pte mapped but not yet locked.
3230 * We return with pte unmapped and unlocked.
3231 *
3232 * We return with the mmap_lock locked or unlocked in the same cases
3233 * as does filemap_fault().
3234 */
3235vm_fault_t do_swap_page(struct vm_fault *vmf)
 
 
3236{
3237	struct vm_area_struct *vma = vmf->vma;
3238	struct page *page = NULL, *swapcache;
 
3239	swp_entry_t entry;
3240	pte_t pte;
3241	int locked;
3242	int exclusive = 0;
3243	vm_fault_t ret = 0;
3244	void *shadow = NULL;
3245
3246	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
3247		goto out;
3248
3249	entry = pte_to_swp_entry(vmf->orig_pte);
3250	if (unlikely(non_swap_entry(entry))) {
3251		if (is_migration_entry(entry)) {
3252			migration_entry_wait(vma->vm_mm, vmf->pmd,
3253					     vmf->address);
3254		} else if (is_device_private_entry(entry)) {
3255			vmf->page = device_private_entry_to_page(entry);
3256			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3257		} else if (is_hwpoison_entry(entry)) {
3258			ret = VM_FAULT_HWPOISON;
3259		} else {
3260			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3261			ret = VM_FAULT_SIGBUS;
3262		}
3263		goto out;
3264	}
3265
3266
3267	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
3268	page = lookup_swap_cache(entry, vma, vmf->address);
3269	swapcache = page;
3270
3271	if (!page) {
3272		struct swap_info_struct *si = swp_swap_info(entry);
3273
3274		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3275		    __swap_count(entry) == 1) {
3276			/* skip swapcache */
3277			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3278							vmf->address);
3279			if (page) {
3280				int err;
3281
3282				__SetPageLocked(page);
3283				__SetPageSwapBacked(page);
3284				set_page_private(page, entry.val);
3285
3286				/* Tell memcg to use swap ownership records */
3287				SetPageSwapCache(page);
3288				err = mem_cgroup_charge(page, vma->vm_mm,
3289							GFP_KERNEL);
3290				ClearPageSwapCache(page);
3291				if (err) {
3292					ret = VM_FAULT_OOM;
3293					goto out_page;
3294				}
3295
3296				shadow = get_shadow_from_swap_cache(entry);
3297				if (shadow)
3298					workingset_refault(page, shadow);
3299
3300				lru_cache_add(page);
3301				swap_readpage(page, true);
3302			}
3303		} else {
3304			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3305						vmf);
3306			swapcache = page;
3307		}
3308
3309		if (!page) {
3310			/*
3311			 * Back out if somebody else faulted in this pte
3312			 * while we released the pte lock.
3313			 */
3314			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3315					vmf->address, &vmf->ptl);
3316			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3317				ret = VM_FAULT_OOM;
3318			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3319			goto unlock;
3320		}
3321
3322		/* Had to read the page from swap area: Major fault */
3323		ret = VM_FAULT_MAJOR;
3324		count_vm_event(PGMAJFAULT);
3325		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3326	} else if (PageHWPoison(page)) {
3327		/*
3328		 * hwpoisoned dirty swapcache pages are kept for killing
3329		 * owner processes (which may be unknown at hwpoison time)
3330		 */
3331		ret = VM_FAULT_HWPOISON;
3332		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
3333		goto out_release;
3334	}
3335
3336	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
 
3337
3338	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3339	if (!locked) {
3340		ret |= VM_FAULT_RETRY;
3341		goto out_release;
3342	}
3343
3344	/*
3345	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3346	 * release the swapcache from under us.  The page pin, and pte_same
3347	 * test below, are not enough to exclude that.  Even if it is still
3348	 * swapcache, we need to check that the page's swap has not changed.
3349	 */
3350	if (unlikely((!PageSwapCache(page) ||
3351			page_private(page) != entry.val)) && swapcache)
3352		goto out_page;
3353
3354	page = ksm_might_need_to_copy(page, vma, vmf->address);
3355	if (unlikely(!page)) {
3356		ret = VM_FAULT_OOM;
3357		page = swapcache;
3358		goto out_page;
3359	}
3360
3361	cgroup_throttle_swaprate(page, GFP_KERNEL);
 
 
 
3362
3363	/*
3364	 * Back out if somebody else already faulted in this pte.
3365	 */
3366	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3367			&vmf->ptl);
3368	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3369		goto out_nomap;
3370
3371	if (unlikely(!PageUptodate(page))) {
3372		ret = VM_FAULT_SIGBUS;
3373		goto out_nomap;
3374	}
3375
3376	/*
3377	 * The page isn't present yet, go ahead with the fault.
3378	 *
3379	 * Be careful about the sequence of operations here.
3380	 * To get its accounting right, reuse_swap_page() must be called
3381	 * while the page is counted on swap but not yet in mapcount i.e.
3382	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3383	 * must be called after the swap_free(), or it will never succeed.
3384	 */
3385
3386	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3387	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3388	pte = mk_pte(page, vma->vm_page_prot);
3389	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3390		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3391		vmf->flags &= ~FAULT_FLAG_WRITE;
3392		ret |= VM_FAULT_WRITE;
3393		exclusive = RMAP_EXCLUSIVE;
3394	}
3395	flush_icache_page(vma, page);
3396	if (pte_swp_soft_dirty(vmf->orig_pte))
3397		pte = pte_mksoft_dirty(pte);
3398	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3399		pte = pte_mkuffd_wp(pte);
3400		pte = pte_wrprotect(pte);
3401	}
3402	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3403	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3404	vmf->orig_pte = pte;
3405
3406	/* ksm created a completely new copy */
3407	if (unlikely(page != swapcache && swapcache)) {
3408		page_add_new_anon_rmap(page, vma, vmf->address, false);
3409		lru_cache_add_inactive_or_unevictable(page, vma);
3410	} else {
3411		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3412	}
3413
3414	swap_free(entry);
3415	if (mem_cgroup_swap_full(page) ||
3416	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3417		try_to_free_swap(page);
3418	unlock_page(page);
3419	if (page != swapcache && swapcache) {
3420		/*
3421		 * Hold the lock to avoid the swap entry to be reused
3422		 * until we take the PT lock for the pte_same() check
3423		 * (to avoid false positives from pte_same). For
3424		 * further safety release the lock after the swap_free
3425		 * so that the swap count won't change under a
3426		 * parallel locked swapcache.
3427		 */
3428		unlock_page(swapcache);
3429		put_page(swapcache);
3430	}
3431
3432	if (vmf->flags & FAULT_FLAG_WRITE) {
3433		ret |= do_wp_page(vmf);
3434		if (ret & VM_FAULT_ERROR)
3435			ret &= VM_FAULT_ERROR;
3436		goto out;
3437	}
3438
3439	/* No need to invalidate - it was non-present before */
3440	update_mmu_cache(vma, vmf->address, vmf->pte);
3441unlock:
3442	pte_unmap_unlock(vmf->pte, vmf->ptl);
3443out:
3444	return ret;
3445out_nomap:
3446	pte_unmap_unlock(vmf->pte, vmf->ptl);
 
3447out_page:
3448	unlock_page(page);
3449out_release:
3450	put_page(page);
3451	if (page != swapcache && swapcache) {
3452		unlock_page(swapcache);
3453		put_page(swapcache);
3454	}
3455	return ret;
3456}
3457
3458/*
3459 * We enter with non-exclusive mmap_lock (to exclude vma changes,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3460 * but allow concurrent faults), and pte mapped but not yet locked.
3461 * We return with mmap_lock still held, but pte unmapped and unlocked.
3462 */
3463static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 
 
3464{
3465	struct vm_area_struct *vma = vmf->vma;
3466	struct page *page;
3467	vm_fault_t ret = 0;
3468	pte_t entry;
3469
 
 
3470	/* File mapping without ->vm_ops ? */
3471	if (vma->vm_flags & VM_SHARED)
3472		return VM_FAULT_SIGBUS;
3473
3474	/*
3475	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
3476	 * pte_offset_map() on pmds where a huge pmd might be created
3477	 * from a different thread.
3478	 *
3479	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3480	 * parallel threads are excluded by other means.
3481	 *
3482	 * Here we only have mmap_read_lock(mm).
3483	 */
3484	if (pte_alloc(vma->vm_mm, vmf->pmd))
3485		return VM_FAULT_OOM;
3486
3487	/* See the comment in pte_alloc_one_map() */
3488	if (unlikely(pmd_trans_unstable(vmf->pmd)))
3489		return 0;
3490
3491	/* Use the zero-page for reads */
3492	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3493			!mm_forbids_zeropage(vma->vm_mm)) {
3494		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3495						vma->vm_page_prot));
3496		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3497				vmf->address, &vmf->ptl);
3498		if (!pte_none(*vmf->pte)) {
3499			update_mmu_tlb(vma, vmf->address, vmf->pte);
3500			goto unlock;
3501		}
3502		ret = check_stable_address_space(vma->vm_mm);
3503		if (ret)
3504			goto unlock;
3505		/* Deliver the page fault to userland, check inside PT lock */
3506		if (userfaultfd_missing(vma)) {
3507			pte_unmap_unlock(vmf->pte, vmf->ptl);
3508			return handle_userfault(vmf, VM_UFFD_MISSING);
 
3509		}
3510		goto setpte;
3511	}
3512
3513	/* Allocate our own private page. */
3514	if (unlikely(anon_vma_prepare(vma)))
3515		goto oom;
3516	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3517	if (!page)
3518		goto oom;
3519
3520	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3521		goto oom_free_page;
3522	cgroup_throttle_swaprate(page, GFP_KERNEL);
3523
3524	/*
3525	 * The memory barrier inside __SetPageUptodate makes sure that
3526	 * preceding stores to the page contents become visible before
3527	 * the set_pte_at() write.
3528	 */
3529	__SetPageUptodate(page);
3530
3531	entry = mk_pte(page, vma->vm_page_prot);
3532	entry = pte_sw_mkyoung(entry);
3533	if (vma->vm_flags & VM_WRITE)
3534		entry = pte_mkwrite(pte_mkdirty(entry));
3535
3536	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3537			&vmf->ptl);
3538	if (!pte_none(*vmf->pte)) {
3539		update_mmu_cache(vma, vmf->address, vmf->pte);
3540		goto release;
3541	}
3542
3543	ret = check_stable_address_space(vma->vm_mm);
3544	if (ret)
3545		goto release;
3546
3547	/* Deliver the page fault to userland, check inside PT lock */
3548	if (userfaultfd_missing(vma)) {
3549		pte_unmap_unlock(vmf->pte, vmf->ptl);
 
3550		put_page(page);
3551		return handle_userfault(vmf, VM_UFFD_MISSING);
 
3552	}
3553
3554	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3555	page_add_new_anon_rmap(page, vma, vmf->address, false);
3556	lru_cache_add_inactive_or_unevictable(page, vma);
 
3557setpte:
3558	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3559
3560	/* No need to invalidate - it was non-present before */
3561	update_mmu_cache(vma, vmf->address, vmf->pte);
3562unlock:
3563	pte_unmap_unlock(vmf->pte, vmf->ptl);
3564	return ret;
3565release:
 
3566	put_page(page);
3567	goto unlock;
3568oom_free_page:
3569	put_page(page);
3570oom:
3571	return VM_FAULT_OOM;
3572}
3573
3574/*
3575 * The mmap_lock must have been held on entry, and may have been
3576 * released depending on flags and vma->vm_ops->fault() return value.
3577 * See filemap_fault() and __lock_page_retry().
3578 */
3579static vm_fault_t __do_fault(struct vm_fault *vmf)
 
 
3580{
3581	struct vm_area_struct *vma = vmf->vma;
3582	vm_fault_t ret;
3583
3584	/*
3585	 * Preallocate pte before we take page_lock because this might lead to
3586	 * deadlocks for memcg reclaim which waits for pages under writeback:
3587	 *				lock_page(A)
3588	 *				SetPageWriteback(A)
3589	 *				unlock_page(A)
3590	 * lock_page(B)
3591	 *				lock_page(B)
3592	 * pte_alloc_pne
3593	 *   shrink_page_list
3594	 *     wait_on_page_writeback(A)
3595	 *				SetPageWriteback(B)
3596	 *				unlock_page(B)
3597	 *				# flush A, B to clear the writeback
3598	 */
3599	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3600		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3601		if (!vmf->prealloc_pte)
3602			return VM_FAULT_OOM;
3603		smp_wmb(); /* See comment in __pte_alloc() */
3604	}
3605
3606	ret = vma->vm_ops->fault(vmf);
3607	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3608			    VM_FAULT_DONE_COW)))
3609		return ret;
 
 
3610
3611	if (unlikely(PageHWPoison(vmf->page))) {
3612		if (ret & VM_FAULT_LOCKED)
3613			unlock_page(vmf->page);
3614		put_page(vmf->page);
3615		vmf->page = NULL;
3616		return VM_FAULT_HWPOISON;
3617	}
3618
3619	if (unlikely(!(ret & VM_FAULT_LOCKED)))
3620		lock_page(vmf->page);
3621	else
3622		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3623
 
 
3624	return ret;
3625}
3626
3627/*
3628 * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3629 * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3630 * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3631 * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3632 */
3633static int pmd_devmap_trans_unstable(pmd_t *pmd)
3634{
3635	return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3636}
3637
3638static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
3639{
3640	struct vm_area_struct *vma = vmf->vma;
3641
3642	if (!pmd_none(*vmf->pmd))
3643		goto map_pte;
3644	if (vmf->prealloc_pte) {
3645		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3646		if (unlikely(!pmd_none(*vmf->pmd))) {
3647			spin_unlock(vmf->ptl);
3648			goto map_pte;
3649		}
3650
3651		mm_inc_nr_ptes(vma->vm_mm);
3652		pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3653		spin_unlock(vmf->ptl);
3654		vmf->prealloc_pte = NULL;
3655	} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
3656		return VM_FAULT_OOM;
3657	}
3658map_pte:
3659	/*
3660	 * If a huge pmd materialized under us just retry later.  Use
3661	 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3662	 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3663	 * under us and then back to pmd_none, as a result of MADV_DONTNEED
3664	 * running immediately after a huge pmd fault in a different thread of
3665	 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3666	 * All we have to ensure is that it is a regular pmd that we can walk
3667	 * with pte_offset_map() and we can do that through an atomic read in
3668	 * C, which is what pmd_trans_unstable() provides.
3669	 */
3670	if (pmd_devmap_trans_unstable(vmf->pmd))
3671		return VM_FAULT_NOPAGE;
3672
3673	/*
3674	 * At this point we know that our vmf->pmd points to a page of ptes
3675	 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3676	 * for the duration of the fault.  If a racing MADV_DONTNEED runs and
3677	 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3678	 * be valid and we will re-check to make sure the vmf->pte isn't
3679	 * pte_none() under vmf->ptl protection when we return to
3680	 * alloc_set_pte().
3681	 */
3682	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3683			&vmf->ptl);
3684	return 0;
3685}
3686
3687#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3688static void deposit_prealloc_pte(struct vm_fault *vmf)
3689{
3690	struct vm_area_struct *vma = vmf->vma;
3691
3692	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3693	/*
3694	 * We are going to consume the prealloc table,
3695	 * count that as nr_ptes.
3696	 */
3697	mm_inc_nr_ptes(vma->vm_mm);
3698	vmf->prealloc_pte = NULL;
3699}
3700
3701static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3702{
3703	struct vm_area_struct *vma = vmf->vma;
3704	bool write = vmf->flags & FAULT_FLAG_WRITE;
3705	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3706	pmd_t entry;
3707	int i;
3708	vm_fault_t ret;
3709
3710	if (!transhuge_vma_suitable(vma, haddr))
3711		return VM_FAULT_FALLBACK;
3712
3713	ret = VM_FAULT_FALLBACK;
3714	page = compound_head(page);
3715
3716	/*
3717	 * Archs like ppc64 need additonal space to store information
3718	 * related to pte entry. Use the preallocated table for that.
3719	 */
3720	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3721		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3722		if (!vmf->prealloc_pte)
3723			return VM_FAULT_OOM;
3724		smp_wmb(); /* See comment in __pte_alloc() */
3725	}
3726
3727	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3728	if (unlikely(!pmd_none(*vmf->pmd)))
3729		goto out;
3730
3731	for (i = 0; i < HPAGE_PMD_NR; i++)
3732		flush_icache_page(vma, page + i);
3733
3734	entry = mk_huge_pmd(page, vma->vm_page_prot);
3735	if (write)
3736		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3737
3738	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
3739	page_add_file_rmap(page, true);
3740	/*
3741	 * deposit and withdraw with pmd lock held
3742	 */
3743	if (arch_needs_pgtable_deposit())
3744		deposit_prealloc_pte(vmf);
3745
3746	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3747
3748	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3749
3750	/* fault is handled */
3751	ret = 0;
3752	count_vm_event(THP_FILE_MAPPED);
3753out:
3754	spin_unlock(vmf->ptl);
3755	return ret;
3756}
3757#else
3758static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3759{
3760	BUILD_BUG();
3761	return 0;
3762}
3763#endif
3764
3765/**
3766 * alloc_set_pte - setup new PTE entry for given page and add reverse page
3767 * mapping. If needed, the fucntion allocates page table or use pre-allocated.
3768 *
3769 * @vmf: fault environment
 
3770 * @page: page to map
 
 
 
3771 *
3772 * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3773 * return.
3774 *
3775 * Target users are page handler itself and implementations of
3776 * vm_ops->map_pages.
3777 *
3778 * Return: %0 on success, %VM_FAULT_ code in case of error.
3779 */
3780vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
 
3781{
3782	struct vm_area_struct *vma = vmf->vma;
3783	bool write = vmf->flags & FAULT_FLAG_WRITE;
3784	pte_t entry;
3785	vm_fault_t ret;
3786
3787	if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
3788		ret = do_set_pmd(vmf, page);
3789		if (ret != VM_FAULT_FALLBACK)
3790			return ret;
3791	}
3792
3793	if (!vmf->pte) {
3794		ret = pte_alloc_one_map(vmf);
3795		if (ret)
3796			return ret;
3797	}
3798
3799	/* Re-check under ptl */
3800	if (unlikely(!pte_none(*vmf->pte))) {
3801		update_mmu_tlb(vma, vmf->address, vmf->pte);
3802		return VM_FAULT_NOPAGE;
3803	}
3804
3805	flush_icache_page(vma, page);
3806	entry = mk_pte(page, vma->vm_page_prot);
3807	entry = pte_sw_mkyoung(entry);
3808	if (write)
3809		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3810	/* copy-on-write page */
3811	if (write && !(vma->vm_flags & VM_SHARED)) {
3812		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3813		page_add_new_anon_rmap(page, vma, vmf->address, false);
3814		lru_cache_add_inactive_or_unevictable(page, vma);
3815	} else {
3816		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3817		page_add_file_rmap(page, false);
3818	}
3819	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3820
3821	/* no need to invalidate: a not-present page won't be cached */
3822	update_mmu_cache(vma, vmf->address, vmf->pte);
3823
3824	return 0;
3825}
3826
3827
3828/**
3829 * finish_fault - finish page fault once we have prepared the page to fault
3830 *
3831 * @vmf: structure describing the fault
3832 *
3833 * This function handles all that is needed to finish a page fault once the
3834 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3835 * given page, adds reverse page mapping, handles memcg charges and LRU
3836 * addition.
3837 *
3838 * The function expects the page to be locked and on success it consumes a
3839 * reference of a page being mapped (for the PTE which maps it).
3840 *
3841 * Return: %0 on success, %VM_FAULT_ code in case of error.
3842 */
3843vm_fault_t finish_fault(struct vm_fault *vmf)
3844{
3845	struct page *page;
3846	vm_fault_t ret = 0;
3847
3848	/* Did we COW the page? */
3849	if ((vmf->flags & FAULT_FLAG_WRITE) &&
3850	    !(vmf->vma->vm_flags & VM_SHARED))
3851		page = vmf->cow_page;
3852	else
3853		page = vmf->page;
3854
3855	/*
3856	 * check even for read faults because we might have lost our CoWed
3857	 * page
3858	 */
3859	if (!(vmf->vma->vm_flags & VM_SHARED))
3860		ret = check_stable_address_space(vmf->vma->vm_mm);
3861	if (!ret)
3862		ret = alloc_set_pte(vmf, page);
3863	if (vmf->pte)
3864		pte_unmap_unlock(vmf->pte, vmf->ptl);
3865	return ret;
3866}
3867
3868static unsigned long fault_around_bytes __read_mostly =
3869	rounddown_pow_of_two(65536);
3870
3871#ifdef CONFIG_DEBUG_FS
3872static int fault_around_bytes_get(void *data, u64 *val)
3873{
3874	*val = fault_around_bytes;
3875	return 0;
3876}
3877
3878/*
3879 * fault_around_bytes must be rounded down to the nearest page order as it's
3880 * what do_fault_around() expects to see.
 
3881 */
3882static int fault_around_bytes_set(void *data, u64 val)
3883{
3884	if (val / PAGE_SIZE > PTRS_PER_PTE)
3885		return -EINVAL;
3886	if (val > PAGE_SIZE)
3887		fault_around_bytes = rounddown_pow_of_two(val);
3888	else
3889		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
3890	return 0;
3891}
3892DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
3893		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
3894
3895static int __init fault_around_debugfs(void)
3896{
3897	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3898				   &fault_around_bytes_fops);
 
 
 
 
3899	return 0;
3900}
3901late_initcall(fault_around_debugfs);
3902#endif
3903
3904/*
3905 * do_fault_around() tries to map few pages around the fault address. The hope
3906 * is that the pages will be needed soon and this will lower the number of
3907 * faults to handle.
3908 *
3909 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
3910 * not ready to be mapped: not up-to-date, locked, etc.
3911 *
3912 * This function is called with the page table lock taken. In the split ptlock
3913 * case the page table lock only protects only those entries which belong to
3914 * the page table corresponding to the fault address.
3915 *
3916 * This function doesn't cross the VMA boundaries, in order to call map_pages()
3917 * only once.
3918 *
3919 * fault_around_bytes defines how many bytes we'll try to map.
3920 * do_fault_around() expects it to be set to a power of two less than or equal
3921 * to PTRS_PER_PTE.
3922 *
3923 * The virtual address of the area that we map is naturally aligned to
3924 * fault_around_bytes rounded down to the machine page size
3925 * (and therefore to page order).  This way it's easier to guarantee
3926 * that we don't cross page table boundaries.
3927 */
3928static vm_fault_t do_fault_around(struct vm_fault *vmf)
3929{
3930	unsigned long address = vmf->address, nr_pages, mask;
3931	pgoff_t start_pgoff = vmf->pgoff;
3932	pgoff_t end_pgoff;
3933	int off;
3934	vm_fault_t ret = 0;
3935
3936	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
3937	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
3938
3939	vmf->address = max(address & mask, vmf->vma->vm_start);
3940	off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
3941	start_pgoff -= off;
 
3942
3943	/*
3944	 *  end_pgoff is either the end of the page table, the end of
3945	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
3946	 */
3947	end_pgoff = start_pgoff -
3948		((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
3949		PTRS_PER_PTE - 1;
3950	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
3951			start_pgoff + nr_pages - 1);
3952
3953	if (pmd_none(*vmf->pmd)) {
3954		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3955		if (!vmf->prealloc_pte)
3956			goto out;
3957		smp_wmb(); /* See comment in __pte_alloc() */
3958	}
3959
3960	vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
3961
3962	/* Huge page is mapped? Page fault is solved */
3963	if (pmd_trans_huge(*vmf->pmd)) {
3964		ret = VM_FAULT_NOPAGE;
3965		goto out;
 
 
3966	}
3967
3968	/* ->map_pages() haven't done anything useful. Cold page cache? */
3969	if (!vmf->pte)
3970		goto out;
3971
3972	/* check if the page fault is solved */
3973	vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
3974	if (!pte_none(*vmf->pte))
3975		ret = VM_FAULT_NOPAGE;
3976	pte_unmap_unlock(vmf->pte, vmf->ptl);
3977out:
3978	vmf->address = address;
3979	vmf->pte = NULL;
3980	return ret;
3981}
3982
3983static vm_fault_t do_read_fault(struct vm_fault *vmf)
 
 
3984{
3985	struct vm_area_struct *vma = vmf->vma;
3986	vm_fault_t ret = 0;
 
 
3987
3988	/*
3989	 * Let's call ->map_pages() first and use ->fault() as fallback
3990	 * if page by the offset is not ready to be mapped (cold cache or
3991	 * something).
3992	 */
3993	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3994		ret = do_fault_around(vmf);
3995		if (ret)
3996			return ret;
 
 
3997	}
3998
3999	ret = __do_fault(vmf);
4000	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4001		return ret;
4002
4003	ret |= finish_fault(vmf);
4004	unlock_page(vmf->page);
4005	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4006		put_page(vmf->page);
 
 
 
 
 
 
 
4007	return ret;
4008}
4009
4010static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 
 
4011{
4012	struct vm_area_struct *vma = vmf->vma;
4013	vm_fault_t ret;
 
 
 
4014
4015	if (unlikely(anon_vma_prepare(vma)))
4016		return VM_FAULT_OOM;
4017
4018	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4019	if (!vmf->cow_page)
4020		return VM_FAULT_OOM;
4021
4022	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4023		put_page(vmf->cow_page);
4024		return VM_FAULT_OOM;
4025	}
4026	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4027
4028	ret = __do_fault(vmf);
4029	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4030		goto uncharge_out;
4031	if (ret & VM_FAULT_DONE_COW)
4032		return ret;
4033
4034	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4035	__SetPageUptodate(vmf->cow_page);
 
4036
4037	ret |= finish_fault(vmf);
4038	unlock_page(vmf->page);
4039	put_page(vmf->page);
4040	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 
 
 
 
 
 
 
 
 
4041		goto uncharge_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4042	return ret;
4043uncharge_out:
4044	put_page(vmf->cow_page);
 
4045	return ret;
4046}
4047
4048static vm_fault_t do_shared_fault(struct vm_fault *vmf)
 
 
4049{
4050	struct vm_area_struct *vma = vmf->vma;
4051	vm_fault_t ret, tmp;
 
 
 
 
4052
4053	ret = __do_fault(vmf);
4054	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4055		return ret;
4056
4057	/*
4058	 * Check if the backing address space wants to know that the page is
4059	 * about to become writable
4060	 */
4061	if (vma->vm_ops->page_mkwrite) {
4062		unlock_page(vmf->page);
4063		tmp = do_page_mkwrite(vmf);
4064		if (unlikely(!tmp ||
4065				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4066			put_page(vmf->page);
4067			return tmp;
4068		}
4069	}
4070
4071	ret |= finish_fault(vmf);
4072	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4073					VM_FAULT_RETRY))) {
4074		unlock_page(vmf->page);
4075		put_page(vmf->page);
4076		return ret;
4077	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4078
4079	ret |= fault_dirty_shared_page(vmf);
4080	return ret;
4081}
4082
4083/*
4084 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4085 * but allow concurrent faults).
4086 * The mmap_lock may have been released depending on flags and our
4087 * return value.  See filemap_fault() and __lock_page_or_retry().
4088 * If mmap_lock is released, vma may become invalid (for example
4089 * by other thread calling munmap()).
4090 */
4091static vm_fault_t do_fault(struct vm_fault *vmf)
 
 
4092{
4093	struct vm_area_struct *vma = vmf->vma;
4094	struct mm_struct *vm_mm = vma->vm_mm;
4095	vm_fault_t ret;
4096
4097	/*
4098	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4099	 */
4100	if (!vma->vm_ops->fault) {
4101		/*
4102		 * If we find a migration pmd entry or a none pmd entry, which
4103		 * should never happen, return SIGBUS
4104		 */
4105		if (unlikely(!pmd_present(*vmf->pmd)))
4106			ret = VM_FAULT_SIGBUS;
4107		else {
4108			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4109						       vmf->pmd,
4110						       vmf->address,
4111						       &vmf->ptl);
4112			/*
4113			 * Make sure this is not a temporary clearing of pte
4114			 * by holding ptl and checking again. A R/M/W update
4115			 * of pte involves: take ptl, clearing the pte so that
4116			 * we don't have concurrent modification by hardware
4117			 * followed by an update.
4118			 */
4119			if (unlikely(pte_none(*vmf->pte)))
4120				ret = VM_FAULT_SIGBUS;
4121			else
4122				ret = VM_FAULT_NOPAGE;
4123
4124			pte_unmap_unlock(vmf->pte, vmf->ptl);
4125		}
4126	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4127		ret = do_read_fault(vmf);
4128	else if (!(vma->vm_flags & VM_SHARED))
4129		ret = do_cow_fault(vmf);
4130	else
4131		ret = do_shared_fault(vmf);
4132
4133	/* preallocated pagetable is unused: free it */
4134	if (vmf->prealloc_pte) {
4135		pte_free(vm_mm, vmf->prealloc_pte);
4136		vmf->prealloc_pte = NULL;
4137	}
4138	return ret;
4139}
4140
4141static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4142				unsigned long addr, int page_nid,
4143				int *flags)
4144{
4145	get_page(page);
4146
4147	count_vm_numa_event(NUMA_HINT_FAULTS);
4148	if (page_nid == numa_node_id()) {
4149		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4150		*flags |= TNF_FAULT_LOCAL;
4151	}
4152
4153	return mpol_misplaced(page, vma, addr);
4154}
4155
4156static vm_fault_t do_numa_page(struct vm_fault *vmf)
 
4157{
4158	struct vm_area_struct *vma = vmf->vma;
4159	struct page *page = NULL;
4160	int page_nid = NUMA_NO_NODE;
 
4161	int last_cpupid;
4162	int target_nid;
4163	bool migrated = false;
4164	pte_t pte, old_pte;
4165	bool was_writable = pte_savedwrite(vmf->orig_pte);
4166	int flags = 0;
4167
 
 
 
4168	/*
4169	 * The "pte" at this point cannot be used safely without
4170	 * validation through pte_unmap_same(). It's of NUMA type but
4171	 * the pfn may be screwed if the read is non atomic.
4172	 */
4173	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4174	spin_lock(vmf->ptl);
4175	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4176		pte_unmap_unlock(vmf->pte, vmf->ptl);
 
 
 
 
4177		goto out;
4178	}
4179
4180	/*
4181	 * Make it present again, Depending on how arch implementes non
4182	 * accessible ptes, some can allow access by kernel mode.
4183	 */
4184	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4185	pte = pte_modify(old_pte, vma->vm_page_prot);
4186	pte = pte_mkyoung(pte);
4187	if (was_writable)
4188		pte = pte_mkwrite(pte);
4189	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4190	update_mmu_cache(vma, vmf->address, vmf->pte);
4191
4192	page = vm_normal_page(vma, vmf->address, pte);
4193	if (!page) {
4194		pte_unmap_unlock(vmf->pte, vmf->ptl);
4195		return 0;
4196	}
4197
4198	/* TODO: handle PTE-mapped THP */
4199	if (PageCompound(page)) {
4200		pte_unmap_unlock(vmf->pte, vmf->ptl);
4201		return 0;
4202	}
4203
4204	/*
4205	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4206	 * much anyway since they can be in shared cache state. This misses
4207	 * the case where a mapping is writable but the process never writes
4208	 * to it but pte_write gets cleared during protection updates and
4209	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4210	 * background writeback, dirty balancing and application behaviour.
4211	 */
4212	if (!pte_write(pte))
4213		flags |= TNF_NO_GROUP;
4214
4215	/*
4216	 * Flag if the page is shared between multiple address spaces. This
4217	 * is later used when determining whether to group tasks together
4218	 */
4219	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4220		flags |= TNF_SHARED;
4221
4222	last_cpupid = page_cpupid_last(page);
4223	page_nid = page_to_nid(page);
4224	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4225			&flags);
4226	pte_unmap_unlock(vmf->pte, vmf->ptl);
4227	if (target_nid == NUMA_NO_NODE) {
4228		put_page(page);
4229		goto out;
4230	}
4231
4232	/* Migrate to the requested node */
4233	migrated = migrate_misplaced_page(page, vma, target_nid);
4234	if (migrated) {
4235		page_nid = target_nid;
4236		flags |= TNF_MIGRATED;
4237	} else
4238		flags |= TNF_MIGRATE_FAIL;
4239
4240out:
4241	if (page_nid != NUMA_NO_NODE)
4242		task_numa_fault(last_cpupid, page_nid, 1, flags);
4243	return 0;
4244}
4245
4246static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
 
4247{
4248	if (vma_is_anonymous(vmf->vma))
4249		return do_huge_pmd_anonymous_page(vmf);
4250	if (vmf->vma->vm_ops->huge_fault)
4251		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4252	return VM_FAULT_FALLBACK;
4253}
4254
4255/* `inline' is required to avoid gcc 4.1.2 build error */
4256static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
4257{
4258	if (vma_is_anonymous(vmf->vma)) {
4259		if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
4260			return handle_userfault(vmf, VM_UFFD_WP);
4261		return do_huge_pmd_wp_page(vmf, orig_pmd);
4262	}
4263	if (vmf->vma->vm_ops->huge_fault) {
4264		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4265
4266		if (!(ret & VM_FAULT_FALLBACK))
4267			return ret;
4268	}
4269
4270	/* COW or write-notify handled on pte level: split pmd. */
4271	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4272
4273	return VM_FAULT_FALLBACK;
4274}
4275
4276static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4277{
4278#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4279	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4280	/* No support for anonymous transparent PUD pages yet */
4281	if (vma_is_anonymous(vmf->vma))
4282		goto split;
4283	if (vmf->vma->vm_ops->huge_fault) {
4284		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4285
4286		if (!(ret & VM_FAULT_FALLBACK))
4287			return ret;
4288	}
4289split:
4290	/* COW or write-notify not handled on PUD level: split pud.*/
4291	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4292#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4293	return VM_FAULT_FALLBACK;
4294}
4295
4296static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4297{
4298#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4299	/* No support for anonymous transparent PUD pages yet */
4300	if (vma_is_anonymous(vmf->vma))
4301		return VM_FAULT_FALLBACK;
4302	if (vmf->vma->vm_ops->huge_fault)
4303		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4304#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4305	return VM_FAULT_FALLBACK;
4306}
4307
4308/*
4309 * These routines also need to handle stuff like marking pages dirty
4310 * and/or accessed for architectures that don't do it in hardware (most
4311 * RISC architectures).  The early dirtying is also good on the i386.
4312 *
4313 * There is also a hook called "update_mmu_cache()" that architectures
4314 * with external mmu caches can use to update those (ie the Sparc or
4315 * PowerPC hashed page tables that act as extended TLBs).
4316 *
4317 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4318 * concurrent faults).
 
4319 *
4320 * The mmap_lock may have been released depending on flags and our return value.
4321 * See filemap_fault() and __lock_page_or_retry().
4322 */
4323static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 
 
4324{
4325	pte_t entry;
 
4326
4327	if (unlikely(pmd_none(*vmf->pmd))) {
4328		/*
4329		 * Leave __pte_alloc() until later: because vm_ops->fault may
4330		 * want to allocate huge page, and if we expose page table
4331		 * for an instant, it will be difficult to retract from
4332		 * concurrent faults and from rmap lookups.
4333		 */
4334		vmf->pte = NULL;
4335	} else {
4336		/* See comment in pte_alloc_one_map() */
4337		if (pmd_devmap_trans_unstable(vmf->pmd))
4338			return 0;
4339		/*
4340		 * A regular pmd is established and it can't morph into a huge
4341		 * pmd from under us anymore at this point because we hold the
4342		 * mmap_lock read mode and khugepaged takes it in write mode.
4343		 * So now it's safe to run pte_offset_map().
4344		 */
4345		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4346		vmf->orig_pte = *vmf->pte;
4347
4348		/*
4349		 * some architectures can have larger ptes than wordsize,
4350		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4351		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4352		 * accesses.  The code below just needs a consistent view
4353		 * for the ifs and we later double check anyway with the
4354		 * ptl lock held. So here a barrier will do.
4355		 */
4356		barrier();
4357		if (pte_none(vmf->orig_pte)) {
4358			pte_unmap(vmf->pte);
4359			vmf->pte = NULL;
4360		}
 
 
4361	}
4362
4363	if (!vmf->pte) {
4364		if (vma_is_anonymous(vmf->vma))
4365			return do_anonymous_page(vmf);
4366		else
4367			return do_fault(vmf);
4368	}
4369
4370	if (!pte_present(vmf->orig_pte))
4371		return do_swap_page(vmf);
4372
4373	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4374		return do_numa_page(vmf);
4375
4376	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4377	spin_lock(vmf->ptl);
4378	entry = vmf->orig_pte;
4379	if (unlikely(!pte_same(*vmf->pte, entry))) {
4380		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4381		goto unlock;
4382	}
4383	if (vmf->flags & FAULT_FLAG_WRITE) {
4384		if (!pte_write(entry))
4385			return do_wp_page(vmf);
 
4386		entry = pte_mkdirty(entry);
4387	}
4388	entry = pte_mkyoung(entry);
4389	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4390				vmf->flags & FAULT_FLAG_WRITE)) {
4391		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4392	} else {
4393		/* Skip spurious TLB flush for retried page fault */
4394		if (vmf->flags & FAULT_FLAG_TRIED)
4395			goto unlock;
4396		/*
4397		 * This is needed only for protection faults but the arch code
4398		 * is not yet telling us if this is a protection fault or not.
4399		 * This still avoids useless tlb flushes for .text page faults
4400		 * with threads.
4401		 */
4402		if (vmf->flags & FAULT_FLAG_WRITE)
4403			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4404	}
4405unlock:
4406	pte_unmap_unlock(vmf->pte, vmf->ptl);
4407	return 0;
4408}
4409
4410/*
4411 * By the time we get here, we already hold the mm semaphore
4412 *
4413 * The mmap_lock may have been released depending on flags and our
4414 * return value.  See filemap_fault() and __lock_page_or_retry().
4415 */
4416static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4417		unsigned long address, unsigned int flags)
4418{
4419	struct vm_fault vmf = {
4420		.vma = vma,
4421		.address = address & PAGE_MASK,
4422		.flags = flags,
4423		.pgoff = linear_page_index(vma, address),
4424		.gfp_mask = __get_fault_gfp_mask(vma),
4425	};
4426	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4427	struct mm_struct *mm = vma->vm_mm;
4428	pgd_t *pgd;
4429	p4d_t *p4d;
4430	vm_fault_t ret;
4431
4432	pgd = pgd_offset(mm, address);
4433	p4d = p4d_alloc(mm, pgd, address);
4434	if (!p4d)
4435		return VM_FAULT_OOM;
4436
4437	vmf.pud = pud_alloc(mm, p4d, address);
4438	if (!vmf.pud)
4439		return VM_FAULT_OOM;
4440retry_pud:
4441	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4442		ret = create_huge_pud(&vmf);
4443		if (!(ret & VM_FAULT_FALLBACK))
4444			return ret;
4445	} else {
4446		pud_t orig_pud = *vmf.pud;
4447
4448		barrier();
4449		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4450
4451			/* NUMA case for anonymous PUDs would go here */
 
 
 
4452
4453			if (dirty && !pud_write(orig_pud)) {
4454				ret = wp_huge_pud(&vmf, orig_pud);
4455				if (!(ret & VM_FAULT_FALLBACK))
4456					return ret;
4457			} else {
4458				huge_pud_set_accessed(&vmf, orig_pud);
4459				return 0;
4460			}
4461		}
4462	}
4463
4464	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4465	if (!vmf.pmd)
 
4466		return VM_FAULT_OOM;
4467
4468	/* Huge pud page fault raced with pmd_alloc? */
4469	if (pud_trans_unstable(vmf.pud))
4470		goto retry_pud;
4471
4472	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4473		ret = create_huge_pmd(&vmf);
4474		if (!(ret & VM_FAULT_FALLBACK))
4475			return ret;
4476	} else {
4477		pmd_t orig_pmd = *vmf.pmd;
 
4478
4479		barrier();
4480		if (unlikely(is_swap_pmd(orig_pmd))) {
4481			VM_BUG_ON(thp_migration_supported() &&
4482					  !is_pmd_migration_entry(orig_pmd));
4483			if (is_pmd_migration_entry(orig_pmd))
4484				pmd_migration_entry_wait(mm, vmf.pmd);
4485			return 0;
4486		}
4487		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
4488			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4489				return do_huge_pmd_numa_page(&vmf, orig_pmd);
 
 
 
4490
4491			if (dirty && !pmd_write(orig_pmd)) {
4492				ret = wp_huge_pmd(&vmf, orig_pmd);
 
4493				if (!(ret & VM_FAULT_FALLBACK))
4494					return ret;
4495			} else {
4496				huge_pmd_set_accessed(&vmf, orig_pmd);
 
4497				return 0;
4498			}
4499		}
4500	}
4501
4502	return handle_pte_fault(&vmf);
4503}
4504
4505/**
4506 * mm_account_fault - Do page fault accountings
4507 *
4508 * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
4509 *        of perf event counters, but we'll still do the per-task accounting to
4510 *        the task who triggered this page fault.
4511 * @address: the faulted address.
4512 * @flags: the fault flags.
4513 * @ret: the fault retcode.
4514 *
4515 * This will take care of most of the page fault accountings.  Meanwhile, it
4516 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4517 * updates.  However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4518 * still be in per-arch page fault handlers at the entry of page fault.
4519 */
4520static inline void mm_account_fault(struct pt_regs *regs,
4521				    unsigned long address, unsigned int flags,
4522				    vm_fault_t ret)
4523{
4524	bool major;
4525
4526	/*
4527	 * We don't do accounting for some specific faults:
4528	 *
4529	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
4530	 *   includes arch_vma_access_permitted() failing before reaching here.
4531	 *   So this is not a "this many hardware page faults" counter.  We
4532	 *   should use the hw profiling for that.
4533	 *
4534	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
4535	 *   once they're completed.
4536	 */
4537	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4538		return;
4539
4540	/*
4541	 * We define the fault as a major fault when the final successful fault
4542	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4543	 * handle it immediately previously).
 
 
 
 
 
 
4544	 */
4545	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4546
4547	if (major)
4548		current->maj_flt++;
4549	else
4550		current->min_flt++;
4551
4552	/*
4553	 * If the fault is done for GUP, regs will be NULL.  We only do the
4554	 * accounting for the per thread fault counters who triggered the
4555	 * fault, and we skip the perf event updates.
 
4556	 */
4557	if (!regs)
4558		return;
4559
4560	if (major)
4561		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4562	else
4563		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4564}
4565
4566/*
4567 * By the time we get here, we already hold the mm semaphore
4568 *
4569 * The mmap_lock may have been released depending on flags and our
4570 * return value.  See filemap_fault() and __lock_page_or_retry().
4571 */
4572vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4573			   unsigned int flags, struct pt_regs *regs)
4574{
4575	vm_fault_t ret;
4576
4577	__set_current_state(TASK_RUNNING);
4578
4579	count_vm_event(PGFAULT);
4580	count_memcg_event_mm(vma->vm_mm, PGFAULT);
4581
4582	/* do counter updates before entering really critical section. */
4583	check_sync_rss_stat(current);
4584
4585	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4586					    flags & FAULT_FLAG_INSTRUCTION,
4587					    flags & FAULT_FLAG_REMOTE))
4588		return VM_FAULT_SIGSEGV;
4589
4590	/*
4591	 * Enable the memcg OOM handling for faults triggered in user
4592	 * space.  Kernel faults are handled more gracefully.
4593	 */
4594	if (flags & FAULT_FLAG_USER)
4595		mem_cgroup_enter_user_fault();
4596
4597	if (unlikely(is_vm_hugetlb_page(vma)))
4598		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4599	else
4600		ret = __handle_mm_fault(vma, address, flags);
4601
4602	if (flags & FAULT_FLAG_USER) {
4603		mem_cgroup_exit_user_fault();
4604		/*
4605		 * The task may have entered a memcg OOM situation but
4606		 * if the allocation error was handled gracefully (no
4607		 * VM_FAULT_OOM), there is no need to kill anything.
4608		 * Just clean up the OOM state peacefully.
4609		 */
4610		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4611			mem_cgroup_oom_synchronize(false);
4612	}
4613
4614	mm_account_fault(regs, address, flags, ret);
4615
4616	return ret;
4617}
4618EXPORT_SYMBOL_GPL(handle_mm_fault);
4619
4620#ifndef __PAGETABLE_P4D_FOLDED
4621/*
4622 * Allocate p4d page table.
4623 * We've already handled the fast-path in-line.
4624 */
4625int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4626{
4627	p4d_t *new = p4d_alloc_one(mm, address);
4628	if (!new)
4629		return -ENOMEM;
4630
4631	smp_wmb(); /* See comment in __pte_alloc */
4632
4633	spin_lock(&mm->page_table_lock);
4634	if (pgd_present(*pgd))		/* Another has populated it */
4635		p4d_free(mm, new);
4636	else
4637		pgd_populate(mm, pgd, new);
4638	spin_unlock(&mm->page_table_lock);
4639	return 0;
4640}
4641#endif /* __PAGETABLE_P4D_FOLDED */
4642
4643#ifndef __PAGETABLE_PUD_FOLDED
4644/*
4645 * Allocate page upper directory.
4646 * We've already handled the fast-path in-line.
4647 */
4648int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4649{
4650	pud_t *new = pud_alloc_one(mm, address);
4651	if (!new)
4652		return -ENOMEM;
4653
4654	smp_wmb(); /* See comment in __pte_alloc */
4655
4656	spin_lock(&mm->page_table_lock);
4657	if (!p4d_present(*p4d)) {
4658		mm_inc_nr_puds(mm);
4659		p4d_populate(mm, p4d, new);
4660	} else	/* Another has populated it */
4661		pud_free(mm, new);
 
 
4662	spin_unlock(&mm->page_table_lock);
4663	return 0;
4664}
4665#endif /* __PAGETABLE_PUD_FOLDED */
4666
4667#ifndef __PAGETABLE_PMD_FOLDED
4668/*
4669 * Allocate page middle directory.
4670 * We've already handled the fast-path in-line.
4671 */
4672int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4673{
4674	spinlock_t *ptl;
4675	pmd_t *new = pmd_alloc_one(mm, address);
4676	if (!new)
4677		return -ENOMEM;
4678
4679	smp_wmb(); /* See comment in __pte_alloc */
4680
4681	ptl = pud_lock(mm, pud);
 
4682	if (!pud_present(*pud)) {
4683		mm_inc_nr_pmds(mm);
4684		pud_populate(mm, pud, new);
4685	} else	/* Another has populated it */
4686		pmd_free(mm, new);
4687	spin_unlock(ptl);
 
 
 
 
 
 
 
4688	return 0;
4689}
4690#endif /* __PAGETABLE_PMD_FOLDED */
4691
4692static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4693			    struct mmu_notifier_range *range,
4694			    pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4695{
4696	pgd_t *pgd;
4697	p4d_t *p4d;
4698	pud_t *pud;
4699	pmd_t *pmd;
4700	pte_t *ptep;
4701
4702	pgd = pgd_offset(mm, address);
4703	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4704		goto out;
4705
4706	p4d = p4d_offset(pgd, address);
4707	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4708		goto out;
4709
4710	pud = pud_offset(p4d, address);
4711	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4712		goto out;
4713
4714	pmd = pmd_offset(pud, address);
4715	VM_BUG_ON(pmd_trans_huge(*pmd));
4716
4717	if (pmd_huge(*pmd)) {
4718		if (!pmdpp)
4719			goto out;
4720
4721		if (range) {
4722			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
4723						NULL, mm, address & PMD_MASK,
4724						(address & PMD_MASK) + PMD_SIZE);
4725			mmu_notifier_invalidate_range_start(range);
4726		}
4727		*ptlp = pmd_lock(mm, pmd);
4728		if (pmd_huge(*pmd)) {
4729			*pmdpp = pmd;
4730			return 0;
4731		}
4732		spin_unlock(*ptlp);
4733		if (range)
4734			mmu_notifier_invalidate_range_end(range);
4735	}
4736
4737	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4738		goto out;
4739
4740	if (range) {
4741		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
4742					address & PAGE_MASK,
4743					(address & PAGE_MASK) + PAGE_SIZE);
4744		mmu_notifier_invalidate_range_start(range);
4745	}
4746	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
 
 
4747	if (!pte_present(*ptep))
4748		goto unlock;
4749	*ptepp = ptep;
4750	return 0;
4751unlock:
4752	pte_unmap_unlock(ptep, *ptlp);
4753	if (range)
4754		mmu_notifier_invalidate_range_end(range);
4755out:
4756	return -EINVAL;
4757}
4758
4759static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4760			     pte_t **ptepp, spinlock_t **ptlp)
4761{
4762	int res;
4763
4764	/* (void) is needed to make gcc happy */
4765	(void) __cond_lock(*ptlp,
4766			   !(res = __follow_pte_pmd(mm, address, NULL,
4767						    ptepp, NULL, ptlp)));
4768	return res;
4769}
4770
4771int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4772		   struct mmu_notifier_range *range,
4773		   pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4774{
4775	int res;
4776
4777	/* (void) is needed to make gcc happy */
4778	(void) __cond_lock(*ptlp,
4779			   !(res = __follow_pte_pmd(mm, address, range,
4780						    ptepp, pmdpp, ptlp)));
4781	return res;
4782}
4783EXPORT_SYMBOL(follow_pte_pmd);
4784
4785/**
4786 * follow_pfn - look up PFN at a user virtual address
4787 * @vma: memory mapping
4788 * @address: user virtual address
4789 * @pfn: location to store found PFN
4790 *
4791 * Only IO mappings and raw PFN mappings are allowed.
4792 *
4793 * Return: zero and the pfn at @pfn on success, -ve otherwise.
4794 */
4795int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4796	unsigned long *pfn)
4797{
4798	int ret = -EINVAL;
4799	spinlock_t *ptl;
4800	pte_t *ptep;
4801
4802	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4803		return ret;
4804
4805	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4806	if (ret)
4807		return ret;
4808	*pfn = pte_pfn(*ptep);
4809	pte_unmap_unlock(ptep, ptl);
4810	return 0;
4811}
4812EXPORT_SYMBOL(follow_pfn);
4813
4814#ifdef CONFIG_HAVE_IOREMAP_PROT
4815int follow_phys(struct vm_area_struct *vma,
4816		unsigned long address, unsigned int flags,
4817		unsigned long *prot, resource_size_t *phys)
4818{
4819	int ret = -EINVAL;
4820	pte_t *ptep, pte;
4821	spinlock_t *ptl;
4822
4823	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4824		goto out;
4825
4826	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
4827		goto out;
4828	pte = *ptep;
4829
4830	if ((flags & FOLL_WRITE) && !pte_write(pte))
4831		goto unlock;
4832
4833	*prot = pgprot_val(pte_pgprot(pte));
4834	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
4835
4836	ret = 0;
4837unlock:
4838	pte_unmap_unlock(ptep, ptl);
4839out:
4840	return ret;
4841}
4842
4843int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4844			void *buf, int len, int write)
4845{
4846	resource_size_t phys_addr;
4847	unsigned long prot = 0;
4848	void __iomem *maddr;
4849	int offset = addr & (PAGE_SIZE-1);
4850
4851	if (follow_phys(vma, addr, write, &prot, &phys_addr))
4852		return -EINVAL;
4853
4854	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
4855	if (!maddr)
4856		return -ENOMEM;
4857
4858	if (write)
4859		memcpy_toio(maddr + offset, buf, len);
4860	else
4861		memcpy_fromio(buf, maddr + offset, len);
4862	iounmap(maddr);
4863
4864	return len;
4865}
4866EXPORT_SYMBOL_GPL(generic_access_phys);
4867#endif
4868
4869/*
4870 * Access another process' address space as given in mm.  If non-NULL, use the
4871 * given task for page fault accounting.
4872 */
4873int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
4874		unsigned long addr, void *buf, int len, unsigned int gup_flags)
4875{
4876	struct vm_area_struct *vma;
4877	void *old_buf = buf;
4878	int write = gup_flags & FOLL_WRITE;
4879
4880	if (mmap_read_lock_killable(mm))
4881		return 0;
4882
 
4883	/* ignore errors, just check how much was successfully transferred */
4884	while (len) {
4885		int bytes, ret, offset;
4886		void *maddr;
4887		struct page *page = NULL;
4888
4889		ret = get_user_pages_remote(mm, addr, 1,
4890				gup_flags, &page, &vma, NULL);
4891		if (ret <= 0) {
4892#ifndef CONFIG_HAVE_IOREMAP_PROT
4893			break;
4894#else
4895			/*
4896			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
4897			 * we can access using slightly different code.
4898			 */
4899			vma = find_vma(mm, addr);
4900			if (!vma || vma->vm_start > addr)
4901				break;
4902			if (vma->vm_ops && vma->vm_ops->access)
4903				ret = vma->vm_ops->access(vma, addr, buf,
4904							  len, write);
4905			if (ret <= 0)
4906				break;
4907			bytes = ret;
4908#endif
4909		} else {
4910			bytes = len;
4911			offset = addr & (PAGE_SIZE-1);
4912			if (bytes > PAGE_SIZE-offset)
4913				bytes = PAGE_SIZE-offset;
4914
4915			maddr = kmap(page);
4916			if (write) {
4917				copy_to_user_page(vma, page, addr,
4918						  maddr + offset, buf, bytes);
4919				set_page_dirty_lock(page);
4920			} else {
4921				copy_from_user_page(vma, page, addr,
4922						    buf, maddr + offset, bytes);
4923			}
4924			kunmap(page);
4925			put_page(page);
4926		}
4927		len -= bytes;
4928		buf += bytes;
4929		addr += bytes;
4930	}
4931	mmap_read_unlock(mm);
4932
4933	return buf - old_buf;
4934}
4935
4936/**
4937 * access_remote_vm - access another process' address space
4938 * @mm:		the mm_struct of the target address space
4939 * @addr:	start address to access
4940 * @buf:	source or destination buffer
4941 * @len:	number of bytes to transfer
4942 * @gup_flags:	flags modifying lookup behaviour
4943 *
4944 * The caller must hold a reference on @mm.
4945 *
4946 * Return: number of bytes copied from source to destination.
4947 */
4948int access_remote_vm(struct mm_struct *mm, unsigned long addr,
4949		void *buf, int len, unsigned int gup_flags)
4950{
4951	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
4952}
4953
4954/*
4955 * Access another process' address space.
4956 * Source/target buffer must be kernel space,
4957 * Do not walk the page table directly, use get_user_pages
4958 */
4959int access_process_vm(struct task_struct *tsk, unsigned long addr,
4960		void *buf, int len, unsigned int gup_flags)
4961{
4962	struct mm_struct *mm;
4963	int ret;
4964
4965	mm = get_task_mm(tsk);
4966	if (!mm)
4967		return 0;
4968
4969	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
4970
4971	mmput(mm);
4972
4973	return ret;
4974}
4975EXPORT_SYMBOL_GPL(access_process_vm);
4976
4977/*
4978 * Print the name of a VMA.
4979 */
4980void print_vma_addr(char *prefix, unsigned long ip)
4981{
4982	struct mm_struct *mm = current->mm;
4983	struct vm_area_struct *vma;
4984
4985	/*
4986	 * we might be running from an atomic context so we cannot sleep
 
4987	 */
4988	if (!mmap_read_trylock(mm))
4989		return;
4990
 
4991	vma = find_vma(mm, ip);
4992	if (vma && vma->vm_file) {
4993		struct file *f = vma->vm_file;
4994		char *buf = (char *)__get_free_page(GFP_NOWAIT);
4995		if (buf) {
4996			char *p;
4997
4998			p = file_path(f, buf, PAGE_SIZE);
4999			if (IS_ERR(p))
5000				p = "?";
5001			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5002					vma->vm_start,
5003					vma->vm_end - vma->vm_start);
5004			free_page((unsigned long)buf);
5005		}
5006	}
5007	mmap_read_unlock(mm);
5008}
5009
5010#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5011void __might_fault(const char *file, int line)
5012{
5013	/*
5014	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5015	 * holding the mmap_lock, this is safe because kernel memory doesn't
5016	 * get paged out, therefore we'll never actually fault, and the
5017	 * below annotations will generate false positives.
5018	 */
5019	if (uaccess_kernel())
5020		return;
5021	if (pagefault_disabled())
5022		return;
5023	__might_sleep(file, line, 0);
5024#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5025	if (current->mm)
5026		might_lock_read(&current->mm->mmap_lock);
5027#endif
5028}
5029EXPORT_SYMBOL(__might_fault);
5030#endif
5031
5032#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5033/*
5034 * Process all subpages of the specified huge page with the specified
5035 * operation.  The target subpage will be processed last to keep its
5036 * cache lines hot.
5037 */
5038static inline void process_huge_page(
5039	unsigned long addr_hint, unsigned int pages_per_huge_page,
5040	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5041	void *arg)
5042{
5043	int i, n, base, l;
5044	unsigned long addr = addr_hint &
5045		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5046
5047	/* Process target subpage last to keep its cache lines hot */
5048	might_sleep();
5049	n = (addr_hint - addr) / PAGE_SIZE;
5050	if (2 * n <= pages_per_huge_page) {
5051		/* If target subpage in first half of huge page */
5052		base = 0;
5053		l = n;
5054		/* Process subpages at the end of huge page */
5055		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5056			cond_resched();
5057			process_subpage(addr + i * PAGE_SIZE, i, arg);
5058		}
5059	} else {
5060		/* If target subpage in second half of huge page */
5061		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5062		l = pages_per_huge_page - n;
5063		/* Process subpages at the begin of huge page */
5064		for (i = 0; i < base; i++) {
5065			cond_resched();
5066			process_subpage(addr + i * PAGE_SIZE, i, arg);
5067		}
5068	}
5069	/*
5070	 * Process remaining subpages in left-right-left-right pattern
5071	 * towards the target subpage
5072	 */
5073	for (i = 0; i < l; i++) {
5074		int left_idx = base + i;
5075		int right_idx = base + 2 * l - 1 - i;
5076
5077		cond_resched();
5078		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5079		cond_resched();
5080		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5081	}
5082}
5083
5084static void clear_gigantic_page(struct page *page,
5085				unsigned long addr,
5086				unsigned int pages_per_huge_page)
5087{
5088	int i;
5089	struct page *p = page;
5090
5091	might_sleep();
5092	for (i = 0; i < pages_per_huge_page;
5093	     i++, p = mem_map_next(p, page, i)) {
5094		cond_resched();
5095		clear_user_highpage(p, addr + i * PAGE_SIZE);
5096	}
5097}
5098
5099static void clear_subpage(unsigned long addr, int idx, void *arg)
5100{
5101	struct page *page = arg;
5102
5103	clear_user_highpage(page + idx, addr);
5104}
5105
5106void clear_huge_page(struct page *page,
5107		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5108{
5109	unsigned long addr = addr_hint &
5110		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5111
5112	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5113		clear_gigantic_page(page, addr, pages_per_huge_page);
5114		return;
5115	}
5116
5117	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
 
 
 
 
5118}
5119
5120static void copy_user_gigantic_page(struct page *dst, struct page *src,
5121				    unsigned long addr,
5122				    struct vm_area_struct *vma,
5123				    unsigned int pages_per_huge_page)
5124{
5125	int i;
5126	struct page *dst_base = dst;
5127	struct page *src_base = src;
5128
5129	for (i = 0; i < pages_per_huge_page; ) {
5130		cond_resched();
5131		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5132
5133		i++;
5134		dst = mem_map_next(dst, dst_base, i);
5135		src = mem_map_next(src, src_base, i);
5136	}
5137}
5138
5139struct copy_subpage_arg {
5140	struct page *dst;
5141	struct page *src;
5142	struct vm_area_struct *vma;
5143};
5144
5145static void copy_subpage(unsigned long addr, int idx, void *arg)
5146{
5147	struct copy_subpage_arg *copy_arg = arg;
5148
5149	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5150			   addr, copy_arg->vma);
5151}
5152
5153void copy_user_huge_page(struct page *dst, struct page *src,
5154			 unsigned long addr_hint, struct vm_area_struct *vma,
5155			 unsigned int pages_per_huge_page)
5156{
5157	unsigned long addr = addr_hint &
5158		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5159	struct copy_subpage_arg arg = {
5160		.dst = dst,
5161		.src = src,
5162		.vma = vma,
5163	};
5164
5165	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5166		copy_user_gigantic_page(dst, src, addr, vma,
5167					pages_per_huge_page);
5168		return;
5169	}
5170
5171	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5172}
5173
5174long copy_huge_page_from_user(struct page *dst_page,
5175				const void __user *usr_src,
5176				unsigned int pages_per_huge_page,
5177				bool allow_pagefault)
5178{
5179	void *src = (void *)usr_src;
5180	void *page_kaddr;
5181	unsigned long i, rc = 0;
5182	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5183
5184	for (i = 0; i < pages_per_huge_page; i++) {
5185		if (allow_pagefault)
5186			page_kaddr = kmap(dst_page + i);
5187		else
5188			page_kaddr = kmap_atomic(dst_page + i);
5189		rc = copy_from_user(page_kaddr,
5190				(const void __user *)(src + i * PAGE_SIZE),
5191				PAGE_SIZE);
5192		if (allow_pagefault)
5193			kunmap(dst_page + i);
5194		else
5195			kunmap_atomic(page_kaddr);
5196
5197		ret_val -= (PAGE_SIZE - rc);
5198		if (rc)
5199			break;
5200
5201		cond_resched();
 
5202	}
5203	return ret_val;
5204}
5205#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5206
5207#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5208
5209static struct kmem_cache *page_ptl_cachep;
5210
5211void __init ptlock_cache_init(void)
5212{
5213	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5214			SLAB_PANIC, NULL);
5215}
5216
5217bool ptlock_alloc(struct page *page)
5218{
5219	spinlock_t *ptl;
5220
5221	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5222	if (!ptl)
5223		return false;
5224	page->ptl = ptl;
5225	return true;
5226}
5227
5228void ptlock_free(struct page *page)
5229{
5230	kmem_cache_free(page_ptl_cachep, page->ptl);
5231}
5232#endif
v4.6
 
   1/*
   2 *  linux/mm/memory.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 */
   6
   7/*
   8 * demand-loading started 01.12.91 - seems it is high on the list of
   9 * things wanted, and it should be easy to implement. - Linus
  10 */
  11
  12/*
  13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  14 * pages started 02.12.91, seems to work. - Linus.
  15 *
  16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  17 * would have taken more than the 6M I have free, but it worked well as
  18 * far as I could see.
  19 *
  20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  21 */
  22
  23/*
  24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
  25 * thought has to go into this. Oh, well..
  26 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
  27 *		Found it. Everything seems to work now.
  28 * 20.12.91  -  Ok, making the swap-device changeable like the root.
  29 */
  30
  31/*
  32 * 05.04.94  -  Multi-page memory management added for v1.1.
  33 * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
  34 *
  35 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
  36 *		(Gerhard.Wichert@pdb.siemens.de)
  37 *
  38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  39 */
  40
  41#include <linux/kernel_stat.h>
  42#include <linux/mm.h>
 
 
 
 
  43#include <linux/hugetlb.h>
  44#include <linux/mman.h>
  45#include <linux/swap.h>
  46#include <linux/highmem.h>
  47#include <linux/pagemap.h>
 
  48#include <linux/ksm.h>
  49#include <linux/rmap.h>
  50#include <linux/export.h>
  51#include <linux/delayacct.h>
  52#include <linux/init.h>
  53#include <linux/pfn_t.h>
  54#include <linux/writeback.h>
  55#include <linux/memcontrol.h>
  56#include <linux/mmu_notifier.h>
  57#include <linux/kallsyms.h>
  58#include <linux/swapops.h>
  59#include <linux/elf.h>
  60#include <linux/gfp.h>
  61#include <linux/migrate.h>
  62#include <linux/string.h>
  63#include <linux/dma-debug.h>
  64#include <linux/debugfs.h>
  65#include <linux/userfaultfd_k.h>
 
 
 
 
 
 
 
 
  66
  67#include <asm/io.h>
  68#include <asm/mmu_context.h>
  69#include <asm/pgalloc.h>
  70#include <asm/uaccess.h>
  71#include <asm/tlb.h>
  72#include <asm/tlbflush.h>
  73#include <asm/pgtable.h>
  74
 
  75#include "internal.h"
  76
  77#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
  78#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
  79#endif
  80
  81#ifndef CONFIG_NEED_MULTIPLE_NODES
  82/* use the per-pgdat data instead for discontigmem - mbligh */
  83unsigned long max_mapnr;
 
 
  84struct page *mem_map;
  85
  86EXPORT_SYMBOL(max_mapnr);
  87EXPORT_SYMBOL(mem_map);
  88#endif
  89
  90/*
  91 * A number of key systems in x86 including ioremap() rely on the assumption
  92 * that high_memory defines the upper bound on direct map memory, then end
  93 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
  94 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
  95 * and ZONE_HIGHMEM.
  96 */
  97void * high_memory;
  98
  99EXPORT_SYMBOL(high_memory);
 100
 101/*
 102 * Randomize the address space (stacks, mmaps, brk, etc.).
 103 *
 104 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
 105 *   as ancient (libc5 based) binaries can segfault. )
 106 */
 107int randomize_va_space __read_mostly =
 108#ifdef CONFIG_COMPAT_BRK
 109					1;
 110#else
 111					2;
 112#endif
 113
 
 
 
 
 
 
 
 
 
 
 
 
 114static int __init disable_randmaps(char *s)
 115{
 116	randomize_va_space = 0;
 117	return 1;
 118}
 119__setup("norandmaps", disable_randmaps);
 120
 121unsigned long zero_pfn __read_mostly;
 
 
 122unsigned long highest_memmap_pfn __read_mostly;
 123
 124EXPORT_SYMBOL(zero_pfn);
 125
 126/*
 127 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
 128 */
 129static int __init init_zero_pfn(void)
 130{
 131	zero_pfn = page_to_pfn(ZERO_PAGE(0));
 132	return 0;
 133}
 134core_initcall(init_zero_pfn);
 135
 
 
 
 
 136
 137#if defined(SPLIT_RSS_COUNTING)
 138
 139void sync_mm_rss(struct mm_struct *mm)
 140{
 141	int i;
 142
 143	for (i = 0; i < NR_MM_COUNTERS; i++) {
 144		if (current->rss_stat.count[i]) {
 145			add_mm_counter(mm, i, current->rss_stat.count[i]);
 146			current->rss_stat.count[i] = 0;
 147		}
 148	}
 149	current->rss_stat.events = 0;
 150}
 151
 152static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
 153{
 154	struct task_struct *task = current;
 155
 156	if (likely(task->mm == mm))
 157		task->rss_stat.count[member] += val;
 158	else
 159		add_mm_counter(mm, member, val);
 160}
 161#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
 162#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
 163
 164/* sync counter once per 64 page faults */
 165#define TASK_RSS_EVENTS_THRESH	(64)
 166static void check_sync_rss_stat(struct task_struct *task)
 167{
 168	if (unlikely(task != current))
 169		return;
 170	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
 171		sync_mm_rss(task->mm);
 172}
 173#else /* SPLIT_RSS_COUNTING */
 174
 175#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
 176#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
 177
 178static void check_sync_rss_stat(struct task_struct *task)
 179{
 180}
 181
 182#endif /* SPLIT_RSS_COUNTING */
 183
 184#ifdef HAVE_GENERIC_MMU_GATHER
 185
 186static bool tlb_next_batch(struct mmu_gather *tlb)
 187{
 188	struct mmu_gather_batch *batch;
 189
 190	batch = tlb->active;
 191	if (batch->next) {
 192		tlb->active = batch->next;
 193		return true;
 194	}
 195
 196	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
 197		return false;
 198
 199	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 200	if (!batch)
 201		return false;
 202
 203	tlb->batch_count++;
 204	batch->next = NULL;
 205	batch->nr   = 0;
 206	batch->max  = MAX_GATHER_BATCH;
 207
 208	tlb->active->next = batch;
 209	tlb->active = batch;
 210
 211	return true;
 212}
 213
 214/* tlb_gather_mmu
 215 *	Called to initialize an (on-stack) mmu_gather structure for page-table
 216 *	tear-down from @mm. The @fullmm argument is used when @mm is without
 217 *	users and we're going to destroy the full address space (exit/execve).
 218 */
 219void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 220{
 221	tlb->mm = mm;
 222
 223	/* Is it from 0 to ~0? */
 224	tlb->fullmm     = !(start | (end+1));
 225	tlb->need_flush_all = 0;
 226	tlb->local.next = NULL;
 227	tlb->local.nr   = 0;
 228	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
 229	tlb->active     = &tlb->local;
 230	tlb->batch_count = 0;
 231
 232#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 233	tlb->batch = NULL;
 234#endif
 235
 236	__tlb_reset_range(tlb);
 237}
 238
 239static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 240{
 241	if (!tlb->end)
 242		return;
 243
 244	tlb_flush(tlb);
 245	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 246#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 247	tlb_table_flush(tlb);
 248#endif
 249	__tlb_reset_range(tlb);
 250}
 251
 252static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 253{
 254	struct mmu_gather_batch *batch;
 255
 256	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
 257		free_pages_and_swap_cache(batch->pages, batch->nr);
 258		batch->nr = 0;
 259	}
 260	tlb->active = &tlb->local;
 261}
 262
 263void tlb_flush_mmu(struct mmu_gather *tlb)
 264{
 265	tlb_flush_mmu_tlbonly(tlb);
 266	tlb_flush_mmu_free(tlb);
 267}
 268
 269/* tlb_finish_mmu
 270 *	Called at the end of the shootdown operation to free up any resources
 271 *	that were required.
 272 */
 273void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 274{
 275	struct mmu_gather_batch *batch, *next;
 276
 277	tlb_flush_mmu(tlb);
 278
 279	/* keep the page table cache within bounds */
 280	check_pgt_cache();
 281
 282	for (batch = tlb->local.next; batch; batch = next) {
 283		next = batch->next;
 284		free_pages((unsigned long)batch, 0);
 285	}
 286	tlb->local.next = NULL;
 287}
 288
 289/* __tlb_remove_page
 290 *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
 291 *	handling the additional races in SMP caused by other CPUs caching valid
 292 *	mappings in their TLBs. Returns the number of free page slots left.
 293 *	When out of page slots we must call tlb_flush_mmu().
 294 */
 295int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 296{
 297	struct mmu_gather_batch *batch;
 298
 299	VM_BUG_ON(!tlb->end);
 300
 301	batch = tlb->active;
 302	batch->pages[batch->nr++] = page;
 303	if (batch->nr == batch->max) {
 304		if (!tlb_next_batch(tlb))
 305			return 0;
 306		batch = tlb->active;
 307	}
 308	VM_BUG_ON_PAGE(batch->nr > batch->max, page);
 309
 310	return batch->max - batch->nr;
 311}
 312
 313#endif /* HAVE_GENERIC_MMU_GATHER */
 314
 315#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 316
 317/*
 318 * See the comment near struct mmu_table_batch.
 319 */
 320
 321static void tlb_remove_table_smp_sync(void *arg)
 322{
 323	/* Simply deliver the interrupt */
 324}
 325
 326static void tlb_remove_table_one(void *table)
 327{
 328	/*
 329	 * This isn't an RCU grace period and hence the page-tables cannot be
 330	 * assumed to be actually RCU-freed.
 331	 *
 332	 * It is however sufficient for software page-table walkers that rely on
 333	 * IRQ disabling. See the comment near struct mmu_table_batch.
 334	 */
 335	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
 336	__tlb_remove_table(table);
 337}
 338
 339static void tlb_remove_table_rcu(struct rcu_head *head)
 340{
 341	struct mmu_table_batch *batch;
 342	int i;
 343
 344	batch = container_of(head, struct mmu_table_batch, rcu);
 345
 346	for (i = 0; i < batch->nr; i++)
 347		__tlb_remove_table(batch->tables[i]);
 348
 349	free_page((unsigned long)batch);
 350}
 351
 352void tlb_table_flush(struct mmu_gather *tlb)
 353{
 354	struct mmu_table_batch **batch = &tlb->batch;
 355
 356	if (*batch) {
 357		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
 358		*batch = NULL;
 359	}
 360}
 361
 362void tlb_remove_table(struct mmu_gather *tlb, void *table)
 363{
 364	struct mmu_table_batch **batch = &tlb->batch;
 365
 366	/*
 367	 * When there's less then two users of this mm there cannot be a
 368	 * concurrent page-table walk.
 369	 */
 370	if (atomic_read(&tlb->mm->mm_users) < 2) {
 371		__tlb_remove_table(table);
 372		return;
 373	}
 374
 375	if (*batch == NULL) {
 376		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
 377		if (*batch == NULL) {
 378			tlb_remove_table_one(table);
 379			return;
 380		}
 381		(*batch)->nr = 0;
 382	}
 383	(*batch)->tables[(*batch)->nr++] = table;
 384	if ((*batch)->nr == MAX_TABLE_BATCH)
 385		tlb_table_flush(tlb);
 386}
 387
 388#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 389
 390/*
 391 * Note: this doesn't free the actual pages themselves. That
 392 * has been handled earlier when unmapping all the memory regions.
 393 */
 394static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
 395			   unsigned long addr)
 396{
 397	pgtable_t token = pmd_pgtable(*pmd);
 398	pmd_clear(pmd);
 399	pte_free_tlb(tlb, token, addr);
 400	atomic_long_dec(&tlb->mm->nr_ptes);
 401}
 402
 403static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 404				unsigned long addr, unsigned long end,
 405				unsigned long floor, unsigned long ceiling)
 406{
 407	pmd_t *pmd;
 408	unsigned long next;
 409	unsigned long start;
 410
 411	start = addr;
 412	pmd = pmd_offset(pud, addr);
 413	do {
 414		next = pmd_addr_end(addr, end);
 415		if (pmd_none_or_clear_bad(pmd))
 416			continue;
 417		free_pte_range(tlb, pmd, addr);
 418	} while (pmd++, addr = next, addr != end);
 419
 420	start &= PUD_MASK;
 421	if (start < floor)
 422		return;
 423	if (ceiling) {
 424		ceiling &= PUD_MASK;
 425		if (!ceiling)
 426			return;
 427	}
 428	if (end - 1 > ceiling - 1)
 429		return;
 430
 431	pmd = pmd_offset(pud, start);
 432	pud_clear(pud);
 433	pmd_free_tlb(tlb, pmd, start);
 434	mm_dec_nr_pmds(tlb->mm);
 435}
 436
 437static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 438				unsigned long addr, unsigned long end,
 439				unsigned long floor, unsigned long ceiling)
 440{
 441	pud_t *pud;
 442	unsigned long next;
 443	unsigned long start;
 444
 445	start = addr;
 446	pud = pud_offset(pgd, addr);
 447	do {
 448		next = pud_addr_end(addr, end);
 449		if (pud_none_or_clear_bad(pud))
 450			continue;
 451		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
 452	} while (pud++, addr = next, addr != end);
 453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454	start &= PGDIR_MASK;
 455	if (start < floor)
 456		return;
 457	if (ceiling) {
 458		ceiling &= PGDIR_MASK;
 459		if (!ceiling)
 460			return;
 461	}
 462	if (end - 1 > ceiling - 1)
 463		return;
 464
 465	pud = pud_offset(pgd, start);
 466	pgd_clear(pgd);
 467	pud_free_tlb(tlb, pud, start);
 468}
 469
 470/*
 471 * This function frees user-level page tables of a process.
 472 */
 473void free_pgd_range(struct mmu_gather *tlb,
 474			unsigned long addr, unsigned long end,
 475			unsigned long floor, unsigned long ceiling)
 476{
 477	pgd_t *pgd;
 478	unsigned long next;
 479
 480	/*
 481	 * The next few lines have given us lots of grief...
 482	 *
 483	 * Why are we testing PMD* at this top level?  Because often
 484	 * there will be no work to do at all, and we'd prefer not to
 485	 * go all the way down to the bottom just to discover that.
 486	 *
 487	 * Why all these "- 1"s?  Because 0 represents both the bottom
 488	 * of the address space and the top of it (using -1 for the
 489	 * top wouldn't help much: the masks would do the wrong thing).
 490	 * The rule is that addr 0 and floor 0 refer to the bottom of
 491	 * the address space, but end 0 and ceiling 0 refer to the top
 492	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
 493	 * that end 0 case should be mythical).
 494	 *
 495	 * Wherever addr is brought up or ceiling brought down, we must
 496	 * be careful to reject "the opposite 0" before it confuses the
 497	 * subsequent tests.  But what about where end is brought down
 498	 * by PMD_SIZE below? no, end can't go down to 0 there.
 499	 *
 500	 * Whereas we round start (addr) and ceiling down, by different
 501	 * masks at different levels, in order to test whether a table
 502	 * now has no other vmas using it, so can be freed, we don't
 503	 * bother to round floor or end up - the tests don't need that.
 504	 */
 505
 506	addr &= PMD_MASK;
 507	if (addr < floor) {
 508		addr += PMD_SIZE;
 509		if (!addr)
 510			return;
 511	}
 512	if (ceiling) {
 513		ceiling &= PMD_MASK;
 514		if (!ceiling)
 515			return;
 516	}
 517	if (end - 1 > ceiling - 1)
 518		end -= PMD_SIZE;
 519	if (addr > end - 1)
 520		return;
 521
 
 
 
 
 522	pgd = pgd_offset(tlb->mm, addr);
 523	do {
 524		next = pgd_addr_end(addr, end);
 525		if (pgd_none_or_clear_bad(pgd))
 526			continue;
 527		free_pud_range(tlb, pgd, addr, next, floor, ceiling);
 528	} while (pgd++, addr = next, addr != end);
 529}
 530
 531void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 532		unsigned long floor, unsigned long ceiling)
 533{
 534	while (vma) {
 535		struct vm_area_struct *next = vma->vm_next;
 536		unsigned long addr = vma->vm_start;
 537
 538		/*
 539		 * Hide vma from rmap and truncate_pagecache before freeing
 540		 * pgtables
 541		 */
 542		unlink_anon_vmas(vma);
 543		unlink_file_vma(vma);
 544
 545		if (is_vm_hugetlb_page(vma)) {
 546			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
 547				floor, next? next->vm_start: ceiling);
 548		} else {
 549			/*
 550			 * Optimization: gather nearby vmas into one call down
 551			 */
 552			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
 553			       && !is_vm_hugetlb_page(next)) {
 554				vma = next;
 555				next = vma->vm_next;
 556				unlink_anon_vmas(vma);
 557				unlink_file_vma(vma);
 558			}
 559			free_pgd_range(tlb, addr, vma->vm_end,
 560				floor, next? next->vm_start: ceiling);
 561		}
 562		vma = next;
 563	}
 564}
 565
 566int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 567{
 568	spinlock_t *ptl;
 569	pgtable_t new = pte_alloc_one(mm, address);
 570	if (!new)
 571		return -ENOMEM;
 572
 573	/*
 574	 * Ensure all pte setup (eg. pte page lock and page clearing) are
 575	 * visible before the pte is made visible to other CPUs by being
 576	 * put into page tables.
 577	 *
 578	 * The other side of the story is the pointer chasing in the page
 579	 * table walking code (when walking the page table without locking;
 580	 * ie. most of the time). Fortunately, these data accesses consist
 581	 * of a chain of data-dependent loads, meaning most CPUs (alpha
 582	 * being the notable exception) will already guarantee loads are
 583	 * seen in-order. See the alpha page table accessors for the
 584	 * smp_read_barrier_depends() barriers in page table walking code.
 585	 */
 586	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 587
 588	ptl = pmd_lock(mm, pmd);
 589	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 590		atomic_long_inc(&mm->nr_ptes);
 591		pmd_populate(mm, pmd, new);
 592		new = NULL;
 593	}
 594	spin_unlock(ptl);
 595	if (new)
 596		pte_free(mm, new);
 597	return 0;
 598}
 599
 600int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
 601{
 602	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
 603	if (!new)
 604		return -ENOMEM;
 605
 606	smp_wmb(); /* See comment in __pte_alloc */
 607
 608	spin_lock(&init_mm.page_table_lock);
 609	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 610		pmd_populate_kernel(&init_mm, pmd, new);
 611		new = NULL;
 612	}
 613	spin_unlock(&init_mm.page_table_lock);
 614	if (new)
 615		pte_free_kernel(&init_mm, new);
 616	return 0;
 617}
 618
 619static inline void init_rss_vec(int *rss)
 620{
 621	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
 622}
 623
 624static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
 625{
 626	int i;
 627
 628	if (current->mm == mm)
 629		sync_mm_rss(mm);
 630	for (i = 0; i < NR_MM_COUNTERS; i++)
 631		if (rss[i])
 632			add_mm_counter(mm, i, rss[i]);
 633}
 634
 635/*
 636 * This function is called to print an error when a bad pte
 637 * is found. For example, we might have a PFN-mapped pte in
 638 * a region that doesn't allow it.
 639 *
 640 * The calling function must still handle the error.
 641 */
 642static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
 643			  pte_t pte, struct page *page)
 644{
 645	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
 646	pud_t *pud = pud_offset(pgd, addr);
 
 647	pmd_t *pmd = pmd_offset(pud, addr);
 648	struct address_space *mapping;
 649	pgoff_t index;
 650	static unsigned long resume;
 651	static unsigned long nr_shown;
 652	static unsigned long nr_unshown;
 653
 654	/*
 655	 * Allow a burst of 60 reports, then keep quiet for that minute;
 656	 * or allow a steady drip of one report per second.
 657	 */
 658	if (nr_shown == 60) {
 659		if (time_before(jiffies, resume)) {
 660			nr_unshown++;
 661			return;
 662		}
 663		if (nr_unshown) {
 664			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
 665				 nr_unshown);
 666			nr_unshown = 0;
 667		}
 668		nr_shown = 0;
 669	}
 670	if (nr_shown++ == 0)
 671		resume = jiffies + 60 * HZ;
 672
 673	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
 674	index = linear_page_index(vma, addr);
 675
 676	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
 677		 current->comm,
 678		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
 679	if (page)
 680		dump_page(page, "bad pte");
 681	pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
 682		 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
 683	/*
 684	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
 685	 */
 686	pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
 687		 vma->vm_file,
 688		 vma->vm_ops ? vma->vm_ops->fault : NULL,
 689		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
 690		 mapping ? mapping->a_ops->readpage : NULL);
 691	dump_stack();
 692	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 693}
 694
 695/*
 696 * vm_normal_page -- This function gets the "struct page" associated with a pte.
 697 *
 698 * "Special" mappings do not wish to be associated with a "struct page" (either
 699 * it doesn't exist, or it exists but they don't want to touch it). In this
 700 * case, NULL is returned here. "Normal" mappings do have a struct page.
 701 *
 702 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
 703 * pte bit, in which case this function is trivial. Secondly, an architecture
 704 * may not have a spare pte bit, which requires a more complicated scheme,
 705 * described below.
 706 *
 707 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
 708 * special mapping (even if there are underlying and valid "struct pages").
 709 * COWed pages of a VM_PFNMAP are always normal.
 710 *
 711 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
 712 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
 713 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
 714 * mapping will always honor the rule
 715 *
 716 *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
 717 *
 718 * And for normal mappings this is false.
 719 *
 720 * This restricts such mappings to be a linear translation from virtual address
 721 * to pfn. To get around this restriction, we allow arbitrary mappings so long
 722 * as the vma is not a COW mapping; in that case, we know that all ptes are
 723 * special (because none can have been COWed).
 724 *
 725 *
 726 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
 727 *
 728 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
 729 * page" backing, however the difference is that _all_ pages with a struct
 730 * page (that is, those where pfn_valid is true) are refcounted and considered
 731 * normal pages by the VM. The disadvantage is that pages are refcounted
 732 * (which can be slower and simply not an option for some PFNMAP users). The
 733 * advantage is that we don't have to follow the strict linearity rule of
 734 * PFNMAP mappings in order to support COWable mappings.
 735 *
 736 */
 737#ifdef __HAVE_ARCH_PTE_SPECIAL
 738# define HAVE_PTE_SPECIAL 1
 739#else
 740# define HAVE_PTE_SPECIAL 0
 741#endif
 742struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 743				pte_t pte)
 744{
 745	unsigned long pfn = pte_pfn(pte);
 746
 747	if (HAVE_PTE_SPECIAL) {
 748		if (likely(!pte_special(pte)))
 749			goto check_pfn;
 750		if (vma->vm_ops && vma->vm_ops->find_special_page)
 751			return vma->vm_ops->find_special_page(vma, addr);
 752		if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
 753			return NULL;
 754		if (!is_zero_pfn(pfn))
 755			print_bad_pte(vma, addr, pte, NULL);
 
 
 
 
 756		return NULL;
 757	}
 758
 759	/* !HAVE_PTE_SPECIAL case follows: */
 760
 761	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 762		if (vma->vm_flags & VM_MIXEDMAP) {
 763			if (!pfn_valid(pfn))
 764				return NULL;
 765			goto out;
 766		} else {
 767			unsigned long off;
 768			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 769			if (pfn == vma->vm_pgoff + off)
 770				return NULL;
 771			if (!is_cow_mapping(vma->vm_flags))
 772				return NULL;
 773		}
 774	}
 775
 776	if (is_zero_pfn(pfn))
 777		return NULL;
 
 778check_pfn:
 779	if (unlikely(pfn > highest_memmap_pfn)) {
 780		print_bad_pte(vma, addr, pte, NULL);
 781		return NULL;
 782	}
 783
 784	/*
 785	 * NOTE! We still have PageReserved() pages in the page tables.
 786	 * eg. VDSO mappings can cause them to exist.
 787	 */
 788out:
 789	return pfn_to_page(pfn);
 790}
 791
 792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 793struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 794				pmd_t pmd)
 795{
 796	unsigned long pfn = pmd_pfn(pmd);
 797
 798	/*
 799	 * There is no pmd_special() but there may be special pmds, e.g.
 800	 * in a direct-access (dax) mapping, so let's just replicate the
 801	 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
 802	 */
 803	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
 804		if (vma->vm_flags & VM_MIXEDMAP) {
 805			if (!pfn_valid(pfn))
 806				return NULL;
 807			goto out;
 808		} else {
 809			unsigned long off;
 810			off = (addr - vma->vm_start) >> PAGE_SHIFT;
 811			if (pfn == vma->vm_pgoff + off)
 812				return NULL;
 813			if (!is_cow_mapping(vma->vm_flags))
 814				return NULL;
 815		}
 816	}
 817
 818	if (is_zero_pfn(pfn))
 
 
 819		return NULL;
 820	if (unlikely(pfn > highest_memmap_pfn))
 821		return NULL;
 822
 823	/*
 824	 * NOTE! We still have PageReserved() pages in the page tables.
 825	 * eg. VDSO mappings can cause them to exist.
 826	 */
 827out:
 828	return pfn_to_page(pfn);
 829}
 830#endif
 831
 832/*
 833 * copy one vm_area from one task to the other. Assumes the page tables
 834 * already present in the new task to be cleared in the whole range
 835 * covered by this vma.
 836 */
 837
 838static inline unsigned long
 839copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 840		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
 841		unsigned long addr, int *rss)
 842{
 843	unsigned long vm_flags = vma->vm_flags;
 844	pte_t pte = *src_pte;
 845	struct page *page;
 
 846
 847	/* pte contains position in swap or file, so copy. */
 848	if (unlikely(!pte_present(pte))) {
 849		swp_entry_t entry = pte_to_swp_entry(pte);
 850
 851		if (likely(!non_swap_entry(entry))) {
 852			if (swap_duplicate(entry) < 0)
 853				return entry.val;
 854
 855			/* make sure dst_mm is on swapoff's mmlist. */
 856			if (unlikely(list_empty(&dst_mm->mmlist))) {
 857				spin_lock(&mmlist_lock);
 858				if (list_empty(&dst_mm->mmlist))
 859					list_add(&dst_mm->mmlist,
 860							&src_mm->mmlist);
 861				spin_unlock(&mmlist_lock);
 862			}
 863			rss[MM_SWAPENTS]++;
 864		} else if (is_migration_entry(entry)) {
 865			page = migration_entry_to_page(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 866
 867			rss[mm_counter(page)]++;
 
 
 
 
 
 
 
 
 
 
 
 868
 869			if (is_write_migration_entry(entry) &&
 870					is_cow_mapping(vm_flags)) {
 871				/*
 872				 * COW mappings require pages in both
 873				 * parent and child to be set to read.
 874				 */
 875				make_migration_entry_read(&entry);
 876				pte = swp_entry_to_pte(entry);
 877				if (pte_swp_soft_dirty(*src_pte))
 878					pte = pte_swp_mksoft_dirty(pte);
 879				set_pte_at(src_mm, addr, src_pte, pte);
 880			}
 
 
 881		}
 882		goto out_set_pte;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 883	}
 884
 885	/*
 886	 * If it's a COW mapping, write protect it both
 887	 * in the parent and the child
 888	 */
 889	if (is_cow_mapping(vm_flags)) {
 890		ptep_set_wrprotect(src_mm, addr, src_pte);
 891		pte = pte_wrprotect(pte);
 892	}
 893
 894	/*
 895	 * If it's a shared mapping, mark it clean in
 896	 * the child
 897	 */
 898	if (vm_flags & VM_SHARED)
 899		pte = pte_mkclean(pte);
 900	pte = pte_mkold(pte);
 901
 902	page = vm_normal_page(vma, addr, pte);
 903	if (page) {
 904		get_page(page);
 905		page_dup_rmap(page, false);
 906		rss[mm_counter(page)]++;
 907	}
 
 908
 909out_set_pte:
 910	set_pte_at(dst_mm, addr, dst_pte, pte);
 911	return 0;
 912}
 913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 915		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
 
 916		   unsigned long addr, unsigned long end)
 917{
 918	pte_t *orig_src_pte, *orig_dst_pte;
 919	pte_t *src_pte, *dst_pte;
 920	spinlock_t *src_ptl, *dst_ptl;
 921	int progress = 0;
 922	int rss[NR_MM_COUNTERS];
 923	swp_entry_t entry = (swp_entry_t){0};
 
 924
 925again:
 
 926	init_rss_vec(rss);
 927
 928	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
 929	if (!dst_pte)
 930		return -ENOMEM;
 
 
 931	src_pte = pte_offset_map(src_pmd, addr);
 932	src_ptl = pte_lockptr(src_mm, src_pmd);
 933	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 934	orig_src_pte = src_pte;
 935	orig_dst_pte = dst_pte;
 936	arch_enter_lazy_mmu_mode();
 937
 938	do {
 939		/*
 940		 * We are holding two locks at this point - either of them
 941		 * could generate latencies in another task on another CPU.
 942		 */
 943		if (progress >= 32) {
 944			progress = 0;
 945			if (need_resched() ||
 946			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
 947				break;
 948		}
 949		if (pte_none(*src_pte)) {
 950			progress++;
 951			continue;
 952		}
 953		entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
 
 
 954							vma, addr, rss);
 955		if (entry.val)
 
 
 
 
 
 
 
 
 
 
 
 
 956			break;
 
 
 
 
 
 
 
 
 
 
 957		progress += 8;
 958	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
 959
 960	arch_leave_lazy_mmu_mode();
 961	spin_unlock(src_ptl);
 962	pte_unmap(orig_src_pte);
 963	add_mm_rss_vec(dst_mm, rss);
 964	pte_unmap_unlock(orig_dst_pte, dst_ptl);
 965	cond_resched();
 966
 967	if (entry.val) {
 968		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
 
 
 
 
 
 
 
 
 969			return -ENOMEM;
 970		progress = 0;
 
 971	}
 972	if (addr != end)
 973		goto again;
 974	return 0;
 
 
 
 975}
 976
 977static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 978		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
 
 979		unsigned long addr, unsigned long end)
 980{
 981	pmd_t *src_pmd, *dst_pmd;
 982	unsigned long next;
 983
 984	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
 985	if (!dst_pmd)
 986		return -ENOMEM;
 987	src_pmd = pmd_offset(src_pud, addr);
 988	do {
 989		next = pmd_addr_end(addr, end);
 990		if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
 
 991			int err;
 992			VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
 993			err = copy_huge_pmd(dst_mm, src_mm,
 994					    dst_pmd, src_pmd, addr, vma);
 995			if (err == -ENOMEM)
 996				return -ENOMEM;
 997			if (!err)
 998				continue;
 999			/* fall through */
1000		}
1001		if (pmd_none_or_clear_bad(src_pmd))
1002			continue;
1003		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1004						vma, addr, next))
1005			return -ENOMEM;
1006	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1007	return 0;
1008}
1009
1010static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1011		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
 
1012		unsigned long addr, unsigned long end)
1013{
1014	pud_t *src_pud, *dst_pud;
1015	unsigned long next;
1016
1017	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
1018	if (!dst_pud)
1019		return -ENOMEM;
1020	src_pud = pud_offset(src_pgd, addr);
1021	do {
1022		next = pud_addr_end(addr, end);
 
 
 
 
 
 
 
 
 
 
 
 
1023		if (pud_none_or_clear_bad(src_pud))
1024			continue;
1025		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1026						vma, addr, next))
1027			return -ENOMEM;
1028	} while (dst_pud++, src_pud++, addr = next, addr != end);
1029	return 0;
1030}
1031
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1032int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1033		struct vm_area_struct *vma)
1034{
1035	pgd_t *src_pgd, *dst_pgd;
1036	unsigned long next;
1037	unsigned long addr = vma->vm_start;
1038	unsigned long end = vma->vm_end;
1039	unsigned long mmun_start;	/* For mmu_notifiers */
1040	unsigned long mmun_end;		/* For mmu_notifiers */
1041	bool is_cow;
1042	int ret;
1043
1044	/*
1045	 * Don't copy ptes where a page fault will fill them correctly.
1046	 * Fork becomes much lighter when there are big shared or private
1047	 * readonly mappings. The tradeoff is that copy_page_range is more
1048	 * efficient than faulting.
1049	 */
1050	if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1051			!vma->anon_vma)
1052		return 0;
1053
1054	if (is_vm_hugetlb_page(vma))
1055		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1056
1057	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1058		/*
1059		 * We do not free on error cases below as remove_vma
1060		 * gets called on error from higher level routine
1061		 */
1062		ret = track_pfn_copy(vma);
1063		if (ret)
1064			return ret;
1065	}
1066
1067	/*
1068	 * We need to invalidate the secondary MMU mappings only when
1069	 * there could be a permission downgrade on the ptes of the
1070	 * parent mm. And a permission downgrade will only happen if
1071	 * is_cow_mapping() returns true.
1072	 */
1073	is_cow = is_cow_mapping(vma->vm_flags);
1074	mmun_start = addr;
1075	mmun_end   = end;
1076	if (is_cow)
1077		mmu_notifier_invalidate_range_start(src_mm, mmun_start,
1078						    mmun_end);
 
1079
1080	ret = 0;
1081	dst_pgd = pgd_offset(dst_mm, addr);
1082	src_pgd = pgd_offset(src_mm, addr);
1083	do {
1084		next = pgd_addr_end(addr, end);
1085		if (pgd_none_or_clear_bad(src_pgd))
1086			continue;
1087		if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
1088					    vma, addr, next))) {
1089			ret = -ENOMEM;
1090			break;
1091		}
1092	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1093
1094	if (is_cow)
1095		mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
1096	return ret;
1097}
1098
1099static unsigned long zap_pte_range(struct mmu_gather *tlb,
1100				struct vm_area_struct *vma, pmd_t *pmd,
1101				unsigned long addr, unsigned long end,
1102				struct zap_details *details)
1103{
1104	struct mm_struct *mm = tlb->mm;
1105	int force_flush = 0;
1106	int rss[NR_MM_COUNTERS];
1107	spinlock_t *ptl;
1108	pte_t *start_pte;
1109	pte_t *pte;
1110	swp_entry_t entry;
1111
 
1112again:
1113	init_rss_vec(rss);
1114	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1115	pte = start_pte;
 
1116	arch_enter_lazy_mmu_mode();
1117	do {
1118		pte_t ptent = *pte;
1119		if (pte_none(ptent)) {
1120			continue;
1121		}
 
 
1122
1123		if (pte_present(ptent)) {
1124			struct page *page;
1125
1126			page = vm_normal_page(vma, addr, ptent);
1127			if (unlikely(details) && page) {
1128				/*
1129				 * unmap_shared_mapping_pages() wants to
1130				 * invalidate cache without truncating:
1131				 * unmap shared but keep private pages.
1132				 */
1133				if (details->check_mapping &&
1134				    details->check_mapping != page->mapping)
1135					continue;
1136			}
1137			ptent = ptep_get_and_clear_full(mm, addr, pte,
1138							tlb->fullmm);
1139			tlb_remove_tlb_entry(tlb, pte, addr);
1140			if (unlikely(!page))
1141				continue;
1142
1143			if (!PageAnon(page)) {
1144				if (pte_dirty(ptent)) {
1145					/*
1146					 * oom_reaper cannot tear down dirty
1147					 * pages
1148					 */
1149					if (unlikely(details && details->ignore_dirty))
1150						continue;
1151					force_flush = 1;
1152					set_page_dirty(page);
1153				}
1154				if (pte_young(ptent) &&
1155				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1156					mark_page_accessed(page);
1157			}
1158			rss[mm_counter(page)]--;
1159			page_remove_rmap(page, false);
1160			if (unlikely(page_mapcount(page) < 0))
1161				print_bad_pte(vma, addr, ptent, page);
1162			if (unlikely(!__tlb_remove_page(tlb, page))) {
1163				force_flush = 1;
1164				addr += PAGE_SIZE;
1165				break;
1166			}
1167			continue;
1168		}
1169		/* only check swap_entries if explicitly asked for in details */
1170		if (unlikely(details && !details->check_swap_entries))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1171			continue;
1172
1173		entry = pte_to_swp_entry(ptent);
1174		if (!non_swap_entry(entry))
1175			rss[MM_SWAPENTS]--;
1176		else if (is_migration_entry(entry)) {
1177			struct page *page;
1178
1179			page = migration_entry_to_page(entry);
1180			rss[mm_counter(page)]--;
1181		}
1182		if (unlikely(!free_swap_and_cache(entry)))
1183			print_bad_pte(vma, addr, ptent, NULL);
1184		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1185	} while (pte++, addr += PAGE_SIZE, addr != end);
1186
1187	add_mm_rss_vec(mm, rss);
1188	arch_leave_lazy_mmu_mode();
1189
1190	/* Do the actual TLB flush before dropping ptl */
1191	if (force_flush)
1192		tlb_flush_mmu_tlbonly(tlb);
1193	pte_unmap_unlock(start_pte, ptl);
1194
1195	/*
1196	 * If we forced a TLB flush (either due to running out of
1197	 * batch buffers or because we needed to flush dirty TLB
1198	 * entries before releasing the ptl), free the batched
1199	 * memory too. Restart if we didn't do everything.
1200	 */
1201	if (force_flush) {
1202		force_flush = 0;
1203		tlb_flush_mmu_free(tlb);
 
1204
1205		if (addr != end)
1206			goto again;
 
1207	}
1208
1209	return addr;
1210}
1211
1212static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1213				struct vm_area_struct *vma, pud_t *pud,
1214				unsigned long addr, unsigned long end,
1215				struct zap_details *details)
1216{
1217	pmd_t *pmd;
1218	unsigned long next;
1219
1220	pmd = pmd_offset(pud, addr);
1221	do {
1222		next = pmd_addr_end(addr, end);
1223		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1224			if (next - addr != HPAGE_PMD_SIZE) {
1225				VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
1226				    !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1227				split_huge_pmd(vma, pmd, addr);
1228			} else if (zap_huge_pmd(tlb, vma, pmd, addr))
1229				goto next;
1230			/* fall through */
1231		}
1232		/*
1233		 * Here there can be other concurrent MADV_DONTNEED or
1234		 * trans huge page faults running, and if the pmd is
1235		 * none or trans huge it can change under us. This is
1236		 * because MADV_DONTNEED holds the mmap_sem in read
1237		 * mode.
1238		 */
1239		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1240			goto next;
1241		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1242next:
1243		cond_resched();
1244	} while (pmd++, addr = next, addr != end);
1245
1246	return addr;
1247}
1248
1249static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1250				struct vm_area_struct *vma, pgd_t *pgd,
1251				unsigned long addr, unsigned long end,
1252				struct zap_details *details)
1253{
1254	pud_t *pud;
1255	unsigned long next;
1256
1257	pud = pud_offset(pgd, addr);
1258	do {
1259		next = pud_addr_end(addr, end);
 
 
 
 
 
 
 
 
1260		if (pud_none_or_clear_bad(pud))
1261			continue;
1262		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
 
 
1263	} while (pud++, addr = next, addr != end);
1264
1265	return addr;
1266}
1267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268void unmap_page_range(struct mmu_gather *tlb,
1269			     struct vm_area_struct *vma,
1270			     unsigned long addr, unsigned long end,
1271			     struct zap_details *details)
1272{
1273	pgd_t *pgd;
1274	unsigned long next;
1275
1276	BUG_ON(addr >= end);
1277	tlb_start_vma(tlb, vma);
1278	pgd = pgd_offset(vma->vm_mm, addr);
1279	do {
1280		next = pgd_addr_end(addr, end);
1281		if (pgd_none_or_clear_bad(pgd))
1282			continue;
1283		next = zap_pud_range(tlb, vma, pgd, addr, next, details);
1284	} while (pgd++, addr = next, addr != end);
1285	tlb_end_vma(tlb, vma);
1286}
1287
1288
1289static void unmap_single_vma(struct mmu_gather *tlb,
1290		struct vm_area_struct *vma, unsigned long start_addr,
1291		unsigned long end_addr,
1292		struct zap_details *details)
1293{
1294	unsigned long start = max(vma->vm_start, start_addr);
1295	unsigned long end;
1296
1297	if (start >= vma->vm_end)
1298		return;
1299	end = min(vma->vm_end, end_addr);
1300	if (end <= vma->vm_start)
1301		return;
1302
1303	if (vma->vm_file)
1304		uprobe_munmap(vma, start, end);
1305
1306	if (unlikely(vma->vm_flags & VM_PFNMAP))
1307		untrack_pfn(vma, 0, 0);
1308
1309	if (start != end) {
1310		if (unlikely(is_vm_hugetlb_page(vma))) {
1311			/*
1312			 * It is undesirable to test vma->vm_file as it
1313			 * should be non-null for valid hugetlb area.
1314			 * However, vm_file will be NULL in the error
1315			 * cleanup path of mmap_region. When
1316			 * hugetlbfs ->mmap method fails,
1317			 * mmap_region() nullifies vma->vm_file
1318			 * before calling this function to clean up.
1319			 * Since no pte has actually been setup, it is
1320			 * safe to do nothing in this case.
1321			 */
1322			if (vma->vm_file) {
1323				i_mmap_lock_write(vma->vm_file->f_mapping);
1324				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1325				i_mmap_unlock_write(vma->vm_file->f_mapping);
1326			}
1327		} else
1328			unmap_page_range(tlb, vma, start, end, details);
1329	}
1330}
1331
1332/**
1333 * unmap_vmas - unmap a range of memory covered by a list of vma's
1334 * @tlb: address of the caller's struct mmu_gather
1335 * @vma: the starting vma
1336 * @start_addr: virtual address at which to start unmapping
1337 * @end_addr: virtual address at which to end unmapping
1338 *
1339 * Unmap all pages in the vma list.
1340 *
1341 * Only addresses between `start' and `end' will be unmapped.
1342 *
1343 * The VMA list must be sorted in ascending virtual address order.
1344 *
1345 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1346 * range after unmap_vmas() returns.  So the only responsibility here is to
1347 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1348 * drops the lock and schedules.
1349 */
1350void unmap_vmas(struct mmu_gather *tlb,
1351		struct vm_area_struct *vma, unsigned long start_addr,
1352		unsigned long end_addr)
1353{
1354	struct mm_struct *mm = vma->vm_mm;
1355
1356	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
 
 
1357	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1358		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1359	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1360}
1361
1362/**
1363 * zap_page_range - remove user pages in a given range
1364 * @vma: vm_area_struct holding the applicable pages
1365 * @start: starting address of pages to zap
1366 * @size: number of bytes to zap
1367 * @details: details of shared cache invalidation
1368 *
1369 * Caller must protect the VMA list
1370 */
1371void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1372		unsigned long size, struct zap_details *details)
1373{
1374	struct mm_struct *mm = vma->vm_mm;
1375	struct mmu_gather tlb;
1376	unsigned long end = start + size;
1377
1378	lru_add_drain();
1379	tlb_gather_mmu(&tlb, mm, start, end);
1380	update_hiwater_rss(mm);
1381	mmu_notifier_invalidate_range_start(mm, start, end);
1382	for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
1383		unmap_single_vma(&tlb, vma, start, end, details);
1384	mmu_notifier_invalidate_range_end(mm, start, end);
1385	tlb_finish_mmu(&tlb, start, end);
 
 
1386}
1387
1388/**
1389 * zap_page_range_single - remove user pages in a given range
1390 * @vma: vm_area_struct holding the applicable pages
1391 * @address: starting address of pages to zap
1392 * @size: number of bytes to zap
1393 * @details: details of shared cache invalidation
1394 *
1395 * The range must fit into one VMA.
1396 */
1397static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1398		unsigned long size, struct zap_details *details)
1399{
1400	struct mm_struct *mm = vma->vm_mm;
1401	struct mmu_gather tlb;
1402	unsigned long end = address + size;
1403
1404	lru_add_drain();
1405	tlb_gather_mmu(&tlb, mm, address, end);
1406	update_hiwater_rss(mm);
1407	mmu_notifier_invalidate_range_start(mm, address, end);
1408	unmap_single_vma(&tlb, vma, address, end, details);
1409	mmu_notifier_invalidate_range_end(mm, address, end);
1410	tlb_finish_mmu(&tlb, address, end);
 
 
1411}
1412
1413/**
1414 * zap_vma_ptes - remove ptes mapping the vma
1415 * @vma: vm_area_struct holding ptes to be zapped
1416 * @address: starting address of pages to zap
1417 * @size: number of bytes to zap
1418 *
1419 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1420 *
1421 * The entire address range must be fully contained within the vma.
1422 *
1423 * Returns 0 if successful.
1424 */
1425int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1426		unsigned long size)
1427{
1428	if (address < vma->vm_start || address + size > vma->vm_end ||
1429	    		!(vma->vm_flags & VM_PFNMAP))
1430		return -1;
 
1431	zap_page_range_single(vma, address, size, NULL);
1432	return 0;
1433}
1434EXPORT_SYMBOL_GPL(zap_vma_ptes);
1435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1436pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1437			spinlock_t **ptl)
1438{
1439	pgd_t * pgd = pgd_offset(mm, addr);
1440	pud_t * pud = pud_alloc(mm, pgd, addr);
1441	if (pud) {
1442		pmd_t * pmd = pmd_alloc(mm, pud, addr);
1443		if (pmd) {
1444			VM_BUG_ON(pmd_trans_huge(*pmd));
1445			return pte_alloc_map_lock(mm, pmd, addr, ptl);
1446		}
1447	}
1448	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449}
1450
1451/*
1452 * This is the old fallback for page remapping.
1453 *
1454 * For historical reasons, it only allows reserved pages. Only
1455 * old drivers should use this, and they needed to mark their
1456 * pages reserved for the old functions anyway.
1457 */
1458static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1459			struct page *page, pgprot_t prot)
1460{
1461	struct mm_struct *mm = vma->vm_mm;
1462	int retval;
1463	pte_t *pte;
1464	spinlock_t *ptl;
1465
1466	retval = -EINVAL;
1467	if (PageAnon(page))
1468		goto out;
1469	retval = -ENOMEM;
1470	flush_dcache_page(page);
1471	pte = get_locked_pte(mm, addr, &ptl);
1472	if (!pte)
1473		goto out;
1474	retval = -EBUSY;
1475	if (!pte_none(*pte))
1476		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477
1478	/* Ok, finally just insert the thing.. */
1479	get_page(page);
1480	inc_mm_counter_fast(mm, mm_counter_file(page));
1481	page_add_file_rmap(page);
1482	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1483
1484	retval = 0;
1485	pte_unmap_unlock(pte, ptl);
1486	return retval;
1487out_unlock:
1488	pte_unmap_unlock(pte, ptl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1489out:
1490	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491}
 
1492
1493/**
1494 * vm_insert_page - insert single page into user vma
1495 * @vma: user vma to map to
1496 * @addr: target user address of this page
1497 * @page: source kernel page
1498 *
1499 * This allows drivers to insert individual pages they've allocated
1500 * into a user vma.
1501 *
1502 * The page has to be a nice clean _individual_ kernel allocation.
1503 * If you allocate a compound page, you need to have marked it as
1504 * such (__GFP_COMP), or manually just split the page up yourself
1505 * (see split_page()).
1506 *
1507 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1508 * took an arbitrary page protection parameter. This doesn't allow
1509 * that. Your vma protection will have to be set up correctly, which
1510 * means that if you want a shared writable mapping, you'd better
1511 * ask for a shared writable mapping!
1512 *
1513 * The page does not need to be reserved.
1514 *
1515 * Usually this function is called from f_op->mmap() handler
1516 * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
1517 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1518 * function from other places, for example from page-fault handler.
 
 
1519 */
1520int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1521			struct page *page)
1522{
1523	if (addr < vma->vm_start || addr >= vma->vm_end)
1524		return -EFAULT;
1525	if (!page_count(page))
1526		return -EINVAL;
1527	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1528		BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
1529		BUG_ON(vma->vm_flags & VM_PFNMAP);
1530		vma->vm_flags |= VM_MIXEDMAP;
1531	}
1532	return insert_page(vma, addr, page, vma->vm_page_prot);
1533}
1534EXPORT_SYMBOL(vm_insert_page);
1535
1536static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1537			pfn_t pfn, pgprot_t prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1538{
1539	struct mm_struct *mm = vma->vm_mm;
1540	int retval;
1541	pte_t *pte, entry;
1542	spinlock_t *ptl;
1543
1544	retval = -ENOMEM;
1545	pte = get_locked_pte(mm, addr, &ptl);
1546	if (!pte)
1547		goto out;
1548	retval = -EBUSY;
1549	if (!pte_none(*pte))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550		goto out_unlock;
 
1551
1552	/* Ok, finally just insert the thing.. */
1553	if (pfn_t_devmap(pfn))
1554		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1555	else
1556		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
 
 
 
 
 
 
1557	set_pte_at(mm, addr, pte, entry);
1558	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1559
1560	retval = 0;
1561out_unlock:
1562	pte_unmap_unlock(pte, ptl);
1563out:
1564	return retval;
1565}
1566
1567/**
1568 * vm_insert_pfn - insert single pfn into user vma
1569 * @vma: user vma to map to
1570 * @addr: target user address of this page
1571 * @pfn: source kernel pfn
1572 *
1573 * Similar to vm_insert_page, this allows drivers to insert individual pages
1574 * they've allocated into a user vma. Same comments apply.
1575 *
1576 * This function should only be called from a vm_ops->fault handler, and
1577 * in that case the handler should return NULL.
1578 *
1579 * vma cannot be a COW mapping.
1580 *
1581 * As this is called only for pages that do not currently exist, we
1582 * do not need to flush old virtual caches or the TLB.
1583 */
1584int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1585			unsigned long pfn)
1586{
1587	return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
1588}
1589EXPORT_SYMBOL(vm_insert_pfn);
1590
1591/**
1592 * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1593 * @vma: user vma to map to
1594 * @addr: target user address of this page
1595 * @pfn: source kernel pfn
1596 * @pgprot: pgprot flags for the inserted page
1597 *
1598 * This is exactly like vm_insert_pfn, except that it allows drivers to
1599 * to override pgprot on a per-page basis.
1600 *
1601 * This only makes sense for IO mappings, and it makes no sense for
1602 * cow mappings.  In general, using multiple vmas is preferable;
1603 * vm_insert_pfn_prot should only be used if using multiple VMAs is
1604 * impractical.
 
 
 
 
 
 
1605 */
1606int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1607			unsigned long pfn, pgprot_t pgprot)
1608{
1609	int ret;
1610	/*
1611	 * Technically, architectures with pte_special can avoid all these
1612	 * restrictions (same for remap_pfn_range).  However we would like
1613	 * consistency in testing and feature parity among all, so we should
1614	 * try to keep these invariants in place for everybody.
1615	 */
1616	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1617	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1618						(VM_PFNMAP|VM_MIXEDMAP));
1619	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1620	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1621
1622	if (addr < vma->vm_start || addr >= vma->vm_end)
1623		return -EFAULT;
1624	if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
1625		return -EINVAL;
 
1626
1627	ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
1628
1629	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1630}
1631EXPORT_SYMBOL(vm_insert_pfn_prot);
1632
1633int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1634			pfn_t pfn)
 
1635{
1636	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
 
 
1637
1638	if (addr < vma->vm_start || addr >= vma->vm_end)
1639		return -EFAULT;
 
 
 
 
 
1640
1641	/*
1642	 * If we don't have pte special, then we have to use the pfn_valid()
1643	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1644	 * refcount the page if pfn_valid is true (hence insert_page rather
1645	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
1646	 * without pte special, it would there be refcounted as a normal page.
1647	 */
1648	if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
 
1649		struct page *page;
1650
1651		/*
1652		 * At this point we are committed to insert_page()
1653		 * regardless of whether the caller specified flags that
1654		 * result in pfn_t_has_page() == false.
1655		 */
1656		page = pfn_to_page(pfn_t_to_pfn(pfn));
1657		return insert_page(vma, addr, page, vma->vm_page_prot);
 
 
1658	}
1659	return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1660}
1661EXPORT_SYMBOL(vm_insert_mixed);
1662
1663/*
1664 * maps a range of physical memory into the requested pages. the old
1665 * mappings are removed. any references to nonexistent pages results
1666 * in null mappings (currently treated as "copy-on-access")
1667 */
1668static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1669			unsigned long addr, unsigned long end,
1670			unsigned long pfn, pgprot_t prot)
1671{
1672	pte_t *pte;
1673	spinlock_t *ptl;
 
1674
1675	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1676	if (!pte)
1677		return -ENOMEM;
1678	arch_enter_lazy_mmu_mode();
1679	do {
1680		BUG_ON(!pte_none(*pte));
 
 
 
 
1681		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1682		pfn++;
1683	} while (pte++, addr += PAGE_SIZE, addr != end);
1684	arch_leave_lazy_mmu_mode();
1685	pte_unmap_unlock(pte - 1, ptl);
1686	return 0;
1687}
1688
1689static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1690			unsigned long addr, unsigned long end,
1691			unsigned long pfn, pgprot_t prot)
1692{
1693	pmd_t *pmd;
1694	unsigned long next;
 
1695
1696	pfn -= addr >> PAGE_SHIFT;
1697	pmd = pmd_alloc(mm, pud, addr);
1698	if (!pmd)
1699		return -ENOMEM;
1700	VM_BUG_ON(pmd_trans_huge(*pmd));
1701	do {
1702		next = pmd_addr_end(addr, end);
1703		if (remap_pte_range(mm, pmd, addr, next,
1704				pfn + (addr >> PAGE_SHIFT), prot))
1705			return -ENOMEM;
 
1706	} while (pmd++, addr = next, addr != end);
1707	return 0;
1708}
1709
1710static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1711			unsigned long addr, unsigned long end,
1712			unsigned long pfn, pgprot_t prot)
1713{
1714	pud_t *pud;
1715	unsigned long next;
 
1716
1717	pfn -= addr >> PAGE_SHIFT;
1718	pud = pud_alloc(mm, pgd, addr);
1719	if (!pud)
1720		return -ENOMEM;
1721	do {
1722		next = pud_addr_end(addr, end);
1723		if (remap_pmd_range(mm, pud, addr, next,
1724				pfn + (addr >> PAGE_SHIFT), prot))
1725			return -ENOMEM;
 
1726	} while (pud++, addr = next, addr != end);
1727	return 0;
1728}
1729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1730/**
1731 * remap_pfn_range - remap kernel memory to userspace
1732 * @vma: user vma to map to
1733 * @addr: target user address to start at
1734 * @pfn: physical address of kernel memory
1735 * @size: size of map area
1736 * @prot: page protection flags for this mapping
1737 *
1738 *  Note: this is only safe if the mm semaphore is held when called.
 
 
1739 */
1740int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1741		    unsigned long pfn, unsigned long size, pgprot_t prot)
1742{
1743	pgd_t *pgd;
1744	unsigned long next;
1745	unsigned long end = addr + PAGE_ALIGN(size);
1746	struct mm_struct *mm = vma->vm_mm;
 
1747	int err;
1748
 
 
 
1749	/*
1750	 * Physically remapped pages are special. Tell the
1751	 * rest of the world about it:
1752	 *   VM_IO tells people not to look at these pages
1753	 *	(accesses can have side effects).
1754	 *   VM_PFNMAP tells the core MM that the base pages are just
1755	 *	raw PFN mappings, and do not have a "struct page" associated
1756	 *	with them.
1757	 *   VM_DONTEXPAND
1758	 *      Disable vma merging and expanding with mremap().
1759	 *   VM_DONTDUMP
1760	 *      Omit vma from core dump, even when VM_IO turned off.
1761	 *
1762	 * There's a horrible special case to handle copy-on-write
1763	 * behaviour that some programs depend on. We mark the "original"
1764	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1765	 * See vm_normal_page() for details.
1766	 */
1767	if (is_cow_mapping(vma->vm_flags)) {
1768		if (addr != vma->vm_start || end != vma->vm_end)
1769			return -EINVAL;
1770		vma->vm_pgoff = pfn;
1771	}
1772
1773	err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
1774	if (err)
1775		return -EINVAL;
1776
1777	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1778
1779	BUG_ON(addr >= end);
1780	pfn -= addr >> PAGE_SHIFT;
1781	pgd = pgd_offset(mm, addr);
1782	flush_cache_range(vma, addr, end);
1783	do {
1784		next = pgd_addr_end(addr, end);
1785		err = remap_pud_range(mm, pgd, addr, next,
1786				pfn + (addr >> PAGE_SHIFT), prot);
1787		if (err)
1788			break;
1789	} while (pgd++, addr = next, addr != end);
1790
1791	if (err)
1792		untrack_pfn(vma, pfn, PAGE_ALIGN(size));
1793
1794	return err;
1795}
1796EXPORT_SYMBOL(remap_pfn_range);
1797
1798/**
1799 * vm_iomap_memory - remap memory to userspace
1800 * @vma: user vma to map to
1801 * @start: start of area
1802 * @len: size of area
1803 *
1804 * This is a simplified io_remap_pfn_range() for common driver use. The
1805 * driver just needs to give us the physical memory range to be mapped,
1806 * we'll figure out the rest from the vma information.
1807 *
1808 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
1809 * whatever write-combining details or similar.
 
 
1810 */
1811int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1812{
1813	unsigned long vm_len, pfn, pages;
1814
1815	/* Check that the physical memory area passed in looks valid */
1816	if (start + len < start)
1817		return -EINVAL;
1818	/*
1819	 * You *really* shouldn't map things that aren't page-aligned,
1820	 * but we've historically allowed it because IO memory might
1821	 * just have smaller alignment.
1822	 */
1823	len += start & ~PAGE_MASK;
1824	pfn = start >> PAGE_SHIFT;
1825	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
1826	if (pfn + pages < pfn)
1827		return -EINVAL;
1828
1829	/* We start the mapping 'vm_pgoff' pages into the area */
1830	if (vma->vm_pgoff > pages)
1831		return -EINVAL;
1832	pfn += vma->vm_pgoff;
1833	pages -= vma->vm_pgoff;
1834
1835	/* Can we fit all of the mapping? */
1836	vm_len = vma->vm_end - vma->vm_start;
1837	if (vm_len >> PAGE_SHIFT > pages)
1838		return -EINVAL;
1839
1840	/* Ok, let it rip */
1841	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1842}
1843EXPORT_SYMBOL(vm_iomap_memory);
1844
1845static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1846				     unsigned long addr, unsigned long end,
1847				     pte_fn_t fn, void *data)
 
1848{
1849	pte_t *pte;
1850	int err;
1851	pgtable_t token;
1852	spinlock_t *uninitialized_var(ptl);
1853
1854	pte = (mm == &init_mm) ?
1855		pte_alloc_kernel(pmd, addr) :
1856		pte_alloc_map_lock(mm, pmd, addr, &ptl);
1857	if (!pte)
1858		return -ENOMEM;
 
 
 
 
 
 
1859
1860	BUG_ON(pmd_huge(*pmd));
1861
1862	arch_enter_lazy_mmu_mode();
1863
1864	token = pmd_pgtable(*pmd);
1865
1866	do {
1867		err = fn(pte++, token, addr, data);
1868		if (err)
1869			break;
 
 
1870	} while (addr += PAGE_SIZE, addr != end);
 
1871
1872	arch_leave_lazy_mmu_mode();
1873
1874	if (mm != &init_mm)
1875		pte_unmap_unlock(pte-1, ptl);
1876	return err;
1877}
1878
1879static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1880				     unsigned long addr, unsigned long end,
1881				     pte_fn_t fn, void *data)
 
1882{
1883	pmd_t *pmd;
1884	unsigned long next;
1885	int err;
1886
1887	BUG_ON(pud_huge(*pud));
1888
1889	pmd = pmd_alloc(mm, pud, addr);
1890	if (!pmd)
1891		return -ENOMEM;
 
 
 
 
1892	do {
1893		next = pmd_addr_end(addr, end);
1894		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
1895		if (err)
1896			break;
 
 
 
1897	} while (pmd++, addr = next, addr != end);
1898	return err;
1899}
1900
1901static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1902				     unsigned long addr, unsigned long end,
1903				     pte_fn_t fn, void *data)
 
1904{
1905	pud_t *pud;
1906	unsigned long next;
1907	int err;
1908
1909	pud = pud_alloc(mm, pgd, addr);
1910	if (!pud)
1911		return -ENOMEM;
 
 
 
 
1912	do {
1913		next = pud_addr_end(addr, end);
1914		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
1915		if (err)
1916			break;
 
 
 
1917	} while (pud++, addr = next, addr != end);
1918	return err;
1919}
1920
1921/*
1922 * Scan a region of virtual memory, filling in page tables as necessary
1923 * and calling a provided function on each leaf page table.
1924 */
1925int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1926			unsigned long size, pte_fn_t fn, void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1927{
1928	pgd_t *pgd;
1929	unsigned long next;
1930	unsigned long end = addr + size;
1931	int err;
 
1932
1933	if (WARN_ON(addr >= end))
1934		return -EINVAL;
1935
1936	pgd = pgd_offset(mm, addr);
1937	do {
1938		next = pgd_addr_end(addr, end);
1939		err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
 
 
1940		if (err)
1941			break;
1942	} while (pgd++, addr = next, addr != end);
1943
 
 
 
1944	return err;
1945}
 
 
 
 
 
 
 
 
 
 
1946EXPORT_SYMBOL_GPL(apply_to_page_range);
1947
1948/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1949 * handle_pte_fault chooses page fault handler according to an entry which was
1950 * read non-atomically.  Before making any commitment, on those architectures
1951 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
1952 * parts, do_swap_page must check under lock before unmapping the pte and
1953 * proceeding (but do_wp_page is only called after already making such a check;
1954 * and do_anonymous_page can safely check later on).
1955 */
1956static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
1957				pte_t *page_table, pte_t orig_pte)
1958{
1959	int same = 1;
1960#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1961	if (sizeof(pte_t) > sizeof(unsigned long)) {
1962		spinlock_t *ptl = pte_lockptr(mm, pmd);
1963		spin_lock(ptl);
1964		same = pte_same(*page_table, orig_pte);
1965		spin_unlock(ptl);
1966	}
1967#endif
1968	pte_unmap(page_table);
1969	return same;
1970}
1971
1972static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
 
1973{
1974	debug_dma_assert_idle(src);
 
 
 
 
 
 
 
 
 
 
 
1975
1976	/*
1977	 * If the source page was a PFN mapping, we don't have
1978	 * a "struct page" for it. We do a best-effort copy by
1979	 * just copying from the original user address. If that
1980	 * fails, we just zero-fill it. Live with it.
1981	 */
1982	if (unlikely(!src)) {
1983		void *kaddr = kmap_atomic(dst);
1984		void __user *uaddr = (void __user *)(va & PAGE_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1985
1986		/*
1987		 * This really shouldn't fail, because the page is there
1988		 * in the page tables. But it might just be unreadable,
1989		 * in which case we just give up and fill the result with
1990		 * zeroes.
1991		 */
1992		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
 
 
 
 
 
 
1993			clear_page(kaddr);
1994		kunmap_atomic(kaddr);
1995		flush_dcache_page(dst);
1996	} else
1997		copy_user_highpage(dst, src, va, vma);
 
 
 
 
 
 
 
 
1998}
1999
2000static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2001{
2002	struct file *vm_file = vma->vm_file;
2003
2004	if (vm_file)
2005		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2006
2007	/*
2008	 * Special mappings (e.g. VDSO) do not have any file so fake
2009	 * a default GFP_KERNEL for them.
2010	 */
2011	return GFP_KERNEL;
2012}
2013
2014/*
2015 * Notify the address space that the page is about to become writable so that
2016 * it can prohibit this or wait for the page to get into an appropriate state.
2017 *
2018 * We do this without the lock held, so that it can sleep if it needs to.
2019 */
2020static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2021	       unsigned long address)
2022{
2023	struct vm_fault vmf;
2024	int ret;
 
 
 
2025
2026	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2027	vmf.pgoff = page->index;
2028	vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2029	vmf.gfp_mask = __get_fault_gfp_mask(vma);
2030	vmf.page = page;
2031	vmf.cow_page = NULL;
2032
2033	ret = vma->vm_ops->page_mkwrite(vma, &vmf);
 
 
2034	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2035		return ret;
2036	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2037		lock_page(page);
2038		if (!page->mapping) {
2039			unlock_page(page);
2040			return 0; /* retry */
2041		}
2042		ret |= VM_FAULT_LOCKED;
2043	} else
2044		VM_BUG_ON_PAGE(!PageLocked(page), page);
2045	return ret;
2046}
2047
2048/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2049 * Handle write page faults for pages that can be reused in the current vma
2050 *
2051 * This can happen either due to the mapping being with the VM_SHARED flag,
2052 * or due to us being the last reference standing to the page. In either
2053 * case, all we need to do here is to mark the page as writable and update
2054 * any related book-keeping.
2055 */
2056static inline int wp_page_reuse(struct mm_struct *mm,
2057			struct vm_area_struct *vma, unsigned long address,
2058			pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
2059			struct page *page, int page_mkwrite,
2060			int dirty_shared)
2061	__releases(ptl)
2062{
 
 
2063	pte_t entry;
2064	/*
2065	 * Clear the pages cpupid information as the existing
2066	 * information potentially belongs to a now completely
2067	 * unrelated process.
2068	 */
2069	if (page)
2070		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2071
2072	flush_cache_page(vma, address, pte_pfn(orig_pte));
2073	entry = pte_mkyoung(orig_pte);
2074	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2075	if (ptep_set_access_flags(vma, address, page_table, entry, 1))
2076		update_mmu_cache(vma, address, page_table);
2077	pte_unmap_unlock(page_table, ptl);
2078
2079	if (dirty_shared) {
2080		struct address_space *mapping;
2081		int dirtied;
2082
2083		if (!page_mkwrite)
2084			lock_page(page);
2085
2086		dirtied = set_page_dirty(page);
2087		VM_BUG_ON_PAGE(PageAnon(page), page);
2088		mapping = page->mapping;
2089		unlock_page(page);
2090		put_page(page);
2091
2092		if ((dirtied || page_mkwrite) && mapping) {
2093			/*
2094			 * Some device drivers do not set page.mapping
2095			 * but still dirty their pages
2096			 */
2097			balance_dirty_pages_ratelimited(mapping);
2098		}
2099
2100		if (!page_mkwrite)
2101			file_update_time(vma->vm_file);
2102	}
2103
2104	return VM_FAULT_WRITE;
2105}
2106
2107/*
2108 * Handle the case of a page which we actually need to copy to a new page.
2109 *
2110 * Called with mmap_sem locked and the old page referenced, but
2111 * without the ptl held.
2112 *
2113 * High level logic flow:
2114 *
2115 * - Allocate a page, copy the content of the old page to the new one.
2116 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2117 * - Take the PTL. If the pte changed, bail out and release the allocated page
2118 * - If the pte is still the way we remember it, update the page table and all
2119 *   relevant references. This includes dropping the reference the page-table
2120 *   held to the old page, as well as updating the rmap.
2121 * - In any case, unlock the PTL and drop the reference we took to the old page.
2122 */
2123static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
2124			unsigned long address, pte_t *page_table, pmd_t *pmd,
2125			pte_t orig_pte, struct page *old_page)
2126{
 
 
 
2127	struct page *new_page = NULL;
2128	spinlock_t *ptl = NULL;
2129	pte_t entry;
2130	int page_copied = 0;
2131	const unsigned long mmun_start = address & PAGE_MASK;	/* For mmu_notifiers */
2132	const unsigned long mmun_end = mmun_start + PAGE_SIZE;	/* For mmu_notifiers */
2133	struct mem_cgroup *memcg;
2134
2135	if (unlikely(anon_vma_prepare(vma)))
2136		goto oom;
2137
2138	if (is_zero_pfn(pte_pfn(orig_pte))) {
2139		new_page = alloc_zeroed_user_highpage_movable(vma, address);
 
2140		if (!new_page)
2141			goto oom;
2142	} else {
2143		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 
2144		if (!new_page)
2145			goto oom;
2146		cow_user_page(new_page, old_page, address, vma);
 
 
 
 
 
 
 
 
 
 
 
 
2147	}
2148
2149	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
2150		goto oom_free_new;
 
2151
2152	__SetPageUptodate(new_page);
2153
2154	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
 
 
2155
2156	/*
2157	 * Re-check the pte - we dropped the lock
2158	 */
2159	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2160	if (likely(pte_same(*page_table, orig_pte))) {
2161		if (old_page) {
2162			if (!PageAnon(old_page)) {
2163				dec_mm_counter_fast(mm,
2164						mm_counter_file(old_page));
2165				inc_mm_counter_fast(mm, MM_ANONPAGES);
2166			}
2167		} else {
2168			inc_mm_counter_fast(mm, MM_ANONPAGES);
2169		}
2170		flush_cache_page(vma, address, pte_pfn(orig_pte));
2171		entry = mk_pte(new_page, vma->vm_page_prot);
 
2172		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2173		/*
2174		 * Clear the pte entry and flush it first, before updating the
2175		 * pte with the new entry. This will avoid a race condition
2176		 * seen in the presence of one thread doing SMC and another
2177		 * thread doing COW.
2178		 */
2179		ptep_clear_flush_notify(vma, address, page_table);
2180		page_add_new_anon_rmap(new_page, vma, address, false);
2181		mem_cgroup_commit_charge(new_page, memcg, false, false);
2182		lru_cache_add_active_or_unevictable(new_page, vma);
2183		/*
2184		 * We call the notify macro here because, when using secondary
2185		 * mmu page tables (such as kvm shadow page tables), we want the
2186		 * new page to be mapped directly into the secondary page table.
2187		 */
2188		set_pte_at_notify(mm, address, page_table, entry);
2189		update_mmu_cache(vma, address, page_table);
2190		if (old_page) {
2191			/*
2192			 * Only after switching the pte to the new page may
2193			 * we remove the mapcount here. Otherwise another
2194			 * process may come and find the rmap count decremented
2195			 * before the pte is switched to the new page, and
2196			 * "reuse" the old page writing into it while our pte
2197			 * here still points into it and can be read by other
2198			 * threads.
2199			 *
2200			 * The critical issue is to order this
2201			 * page_remove_rmap with the ptp_clear_flush above.
2202			 * Those stores are ordered by (if nothing else,)
2203			 * the barrier present in the atomic_add_negative
2204			 * in page_remove_rmap.
2205			 *
2206			 * Then the TLB flush in ptep_clear_flush ensures that
2207			 * no process can access the old page before the
2208			 * decremented mapcount is visible. And the old page
2209			 * cannot be reused until after the decremented
2210			 * mapcount is visible. So transitively, TLBs to
2211			 * old page will be flushed before it can be reused.
2212			 */
2213			page_remove_rmap(old_page, false);
2214		}
2215
2216		/* Free the old page.. */
2217		new_page = old_page;
2218		page_copied = 1;
2219	} else {
2220		mem_cgroup_cancel_charge(new_page, memcg, false);
2221	}
2222
2223	if (new_page)
2224		put_page(new_page);
2225
2226	pte_unmap_unlock(page_table, ptl);
2227	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 
 
 
 
2228	if (old_page) {
2229		/*
2230		 * Don't let another task, with possibly unlocked vma,
2231		 * keep the mlocked page.
2232		 */
2233		if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2234			lock_page(old_page);	/* LRU manipulation */
2235			if (PageMlocked(old_page))
2236				munlock_vma_page(old_page);
2237			unlock_page(old_page);
2238		}
2239		put_page(old_page);
2240	}
2241	return page_copied ? VM_FAULT_WRITE : 0;
2242oom_free_new:
2243	put_page(new_page);
2244oom:
2245	if (old_page)
2246		put_page(old_page);
2247	return VM_FAULT_OOM;
2248}
2249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2250/*
2251 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2252 * mapping
2253 */
2254static int wp_pfn_shared(struct mm_struct *mm,
2255			struct vm_area_struct *vma, unsigned long address,
2256			pte_t *page_table, spinlock_t *ptl, pte_t orig_pte,
2257			pmd_t *pmd)
2258{
 
 
2259	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
2260		struct vm_fault vmf = {
2261			.page = NULL,
2262			.pgoff = linear_page_index(vma, address),
2263			.virtual_address = (void __user *)(address & PAGE_MASK),
2264			.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
2265		};
2266		int ret;
2267
2268		pte_unmap_unlock(page_table, ptl);
2269		ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
2270		if (ret & VM_FAULT_ERROR)
 
2271			return ret;
2272		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2273		/*
2274		 * We might have raced with another page fault while we
2275		 * released the pte_offset_map_lock.
2276		 */
2277		if (!pte_same(*page_table, orig_pte)) {
2278			pte_unmap_unlock(page_table, ptl);
2279			return 0;
2280		}
2281	}
2282	return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte,
2283			     NULL, 0, 0);
2284}
2285
2286static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2287			  unsigned long address, pte_t *page_table,
2288			  pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte,
2289			  struct page *old_page)
2290	__releases(ptl)
2291{
2292	int page_mkwrite = 0;
 
2293
2294	get_page(old_page);
2295
2296	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2297		int tmp;
2298
2299		pte_unmap_unlock(page_table, ptl);
2300		tmp = do_page_mkwrite(vma, old_page, address);
2301		if (unlikely(!tmp || (tmp &
2302				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2303			put_page(old_page);
2304			return tmp;
2305		}
2306		/*
2307		 * Since we dropped the lock we need to revalidate
2308		 * the PTE as someone else may have changed it.  If
2309		 * they did, we just return, as we can count on the
2310		 * MMU to tell us if they didn't also make it writable.
2311		 */
2312		page_table = pte_offset_map_lock(mm, pmd, address,
2313						 &ptl);
2314		if (!pte_same(*page_table, orig_pte)) {
2315			unlock_page(old_page);
2316			pte_unmap_unlock(page_table, ptl);
2317			put_page(old_page);
2318			return 0;
2319		}
2320		page_mkwrite = 1;
 
 
2321	}
 
 
2322
2323	return wp_page_reuse(mm, vma, address, page_table, ptl,
2324			     orig_pte, old_page, page_mkwrite, 1);
2325}
2326
2327/*
2328 * This routine handles present pages, when users try to write
2329 * to a shared page. It is done by copying the page to a new address
2330 * and decrementing the shared-page counter for the old page.
2331 *
2332 * Note that this routine assumes that the protection checks have been
2333 * done by the caller (the low-level page fault routine in most cases).
2334 * Thus we can safely just mark it writable once we've done any necessary
2335 * COW.
2336 *
2337 * We also mark the page dirty at this point even though the page will
2338 * change only once the write actually happens. This avoids a few races,
2339 * and potentially makes it more efficient.
2340 *
2341 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2342 * but allow concurrent faults), with pte both mapped and locked.
2343 * We return with mmap_sem still held, but pte unmapped and unlocked.
2344 */
2345static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2346		unsigned long address, pte_t *page_table, pmd_t *pmd,
2347		spinlock_t *ptl, pte_t orig_pte)
2348	__releases(ptl)
2349{
2350	struct page *old_page;
2351
2352	old_page = vm_normal_page(vma, address, orig_pte);
2353	if (!old_page) {
 
 
 
 
 
2354		/*
2355		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
2356		 * VM_PFNMAP VMA.
2357		 *
2358		 * We should not cow pages in a shared writeable mapping.
2359		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
2360		 */
2361		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2362				     (VM_WRITE|VM_SHARED))
2363			return wp_pfn_shared(mm, vma, address, page_table, ptl,
2364					     orig_pte, pmd);
2365
2366		pte_unmap_unlock(page_table, ptl);
2367		return wp_page_copy(mm, vma, address, page_table, pmd,
2368				    orig_pte, old_page);
2369	}
2370
2371	/*
2372	 * Take out anonymous pages first, anonymous shared vmas are
2373	 * not dirty accountable.
2374	 */
2375	if (PageAnon(old_page) && !PageKsm(old_page)) {
2376		int total_mapcount;
2377		if (!trylock_page(old_page)) {
2378			get_page(old_page);
2379			pte_unmap_unlock(page_table, ptl);
2380			lock_page(old_page);
2381			page_table = pte_offset_map_lock(mm, pmd, address,
2382							 &ptl);
2383			if (!pte_same(*page_table, orig_pte)) {
2384				unlock_page(old_page);
2385				pte_unmap_unlock(page_table, ptl);
2386				put_page(old_page);
2387				return 0;
2388			}
2389			put_page(old_page);
2390		}
2391		if (reuse_swap_page(old_page, &total_mapcount)) {
2392			if (total_mapcount == 1) {
2393				/*
2394				 * The page is all ours. Move it to
2395				 * our anon_vma so the rmap code will
2396				 * not search our parent or siblings.
2397				 * Protected against the rmap code by
2398				 * the page lock.
2399				 */
2400				page_move_anon_rmap(compound_head(old_page),
2401						    vma, address);
2402			}
2403			unlock_page(old_page);
2404			return wp_page_reuse(mm, vma, address, page_table, ptl,
2405					     orig_pte, old_page, 0, 0);
2406		}
2407		unlock_page(old_page);
2408	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2409					(VM_WRITE|VM_SHARED))) {
2410		return wp_page_shared(mm, vma, address, page_table, pmd,
2411				      ptl, orig_pte, old_page);
2412	}
2413
2414	/*
2415	 * Ok, we need to copy. Oh, well..
2416	 */
2417	get_page(old_page);
2418
2419	pte_unmap_unlock(page_table, ptl);
2420	return wp_page_copy(mm, vma, address, page_table, pmd,
2421			    orig_pte, old_page);
2422}
2423
2424static void unmap_mapping_range_vma(struct vm_area_struct *vma,
2425		unsigned long start_addr, unsigned long end_addr,
2426		struct zap_details *details)
2427{
2428	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
2429}
2430
2431static inline void unmap_mapping_range_tree(struct rb_root *root,
2432					    struct zap_details *details)
2433{
2434	struct vm_area_struct *vma;
2435	pgoff_t vba, vea, zba, zea;
2436
2437	vma_interval_tree_foreach(vma, root,
2438			details->first_index, details->last_index) {
2439
2440		vba = vma->vm_pgoff;
2441		vea = vba + vma_pages(vma) - 1;
2442		zba = details->first_index;
2443		if (zba < vba)
2444			zba = vba;
2445		zea = details->last_index;
2446		if (zea > vea)
2447			zea = vea;
2448
2449		unmap_mapping_range_vma(vma,
2450			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2451			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2452				details);
2453	}
2454}
2455
2456/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2457 * unmap_mapping_range - unmap the portion of all mmaps in the specified
2458 * address_space corresponding to the specified page range in the underlying
2459 * file.
2460 *
2461 * @mapping: the address space containing mmaps to be unmapped.
2462 * @holebegin: byte in first page to unmap, relative to the start of
2463 * the underlying file.  This will be rounded down to a PAGE_SIZE
2464 * boundary.  Note that this is different from truncate_pagecache(), which
2465 * must keep the partial page.  In contrast, we must get rid of
2466 * partial pages.
2467 * @holelen: size of prospective hole in bytes.  This will be rounded
2468 * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
2469 * end of the file.
2470 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2471 * but 0 when invalidating pagecache, don't throw away private data.
2472 */
2473void unmap_mapping_range(struct address_space *mapping,
2474		loff_t const holebegin, loff_t const holelen, int even_cows)
2475{
2476	struct zap_details details = { };
2477	pgoff_t hba = holebegin >> PAGE_SHIFT;
2478	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2479
2480	/* Check for overflow. */
2481	if (sizeof(holelen) > sizeof(hlen)) {
2482		long long holeend =
2483			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2484		if (holeend & ~(long long)ULONG_MAX)
2485			hlen = ULONG_MAX - hba + 1;
2486	}
2487
2488	details.check_mapping = even_cows? NULL: mapping;
2489	details.first_index = hba;
2490	details.last_index = hba + hlen - 1;
2491	if (details.last_index < details.first_index)
2492		details.last_index = ULONG_MAX;
2493
2494
2495	/* DAX uses i_mmap_lock to serialise file truncate vs page fault */
2496	i_mmap_lock_write(mapping);
2497	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
2498		unmap_mapping_range_tree(&mapping->i_mmap, &details);
2499	i_mmap_unlock_write(mapping);
2500}
2501EXPORT_SYMBOL(unmap_mapping_range);
2502
2503/*
2504 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2505 * but allow concurrent faults), and pte mapped but not yet locked.
2506 * We return with pte unmapped and unlocked.
2507 *
2508 * We return with the mmap_sem locked or unlocked in the same cases
2509 * as does filemap_fault().
2510 */
2511static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2512		unsigned long address, pte_t *page_table, pmd_t *pmd,
2513		unsigned int flags, pte_t orig_pte)
2514{
2515	spinlock_t *ptl;
2516	struct page *page, *swapcache;
2517	struct mem_cgroup *memcg;
2518	swp_entry_t entry;
2519	pte_t pte;
2520	int locked;
2521	int exclusive = 0;
2522	int ret = 0;
 
2523
2524	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2525		goto out;
2526
2527	entry = pte_to_swp_entry(orig_pte);
2528	if (unlikely(non_swap_entry(entry))) {
2529		if (is_migration_entry(entry)) {
2530			migration_entry_wait(mm, pmd, address);
 
 
 
 
2531		} else if (is_hwpoison_entry(entry)) {
2532			ret = VM_FAULT_HWPOISON;
2533		} else {
2534			print_bad_pte(vma, address, orig_pte, NULL);
2535			ret = VM_FAULT_SIGBUS;
2536		}
2537		goto out;
2538	}
 
 
2539	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2540	page = lookup_swap_cache(entry);
 
 
2541	if (!page) {
2542		page = swapin_readahead(entry,
2543					GFP_HIGHUSER_MOVABLE, vma, address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2544		if (!page) {
2545			/*
2546			 * Back out if somebody else faulted in this pte
2547			 * while we released the pte lock.
2548			 */
2549			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2550			if (likely(pte_same(*page_table, orig_pte)))
 
2551				ret = VM_FAULT_OOM;
2552			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2553			goto unlock;
2554		}
2555
2556		/* Had to read the page from swap area: Major fault */
2557		ret = VM_FAULT_MAJOR;
2558		count_vm_event(PGMAJFAULT);
2559		mem_cgroup_count_vm_event(mm, PGMAJFAULT);
2560	} else if (PageHWPoison(page)) {
2561		/*
2562		 * hwpoisoned dirty swapcache pages are kept for killing
2563		 * owner processes (which may be unknown at hwpoison time)
2564		 */
2565		ret = VM_FAULT_HWPOISON;
2566		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2567		swapcache = page;
2568		goto out_release;
2569	}
2570
2571	swapcache = page;
2572	locked = lock_page_or_retry(page, mm, flags);
2573
2574	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2575	if (!locked) {
2576		ret |= VM_FAULT_RETRY;
2577		goto out_release;
2578	}
2579
2580	/*
2581	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2582	 * release the swapcache from under us.  The page pin, and pte_same
2583	 * test below, are not enough to exclude that.  Even if it is still
2584	 * swapcache, we need to check that the page's swap has not changed.
2585	 */
2586	if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
 
2587		goto out_page;
2588
2589	page = ksm_might_need_to_copy(page, vma, address);
2590	if (unlikely(!page)) {
2591		ret = VM_FAULT_OOM;
2592		page = swapcache;
2593		goto out_page;
2594	}
2595
2596	if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false)) {
2597		ret = VM_FAULT_OOM;
2598		goto out_page;
2599	}
2600
2601	/*
2602	 * Back out if somebody else already faulted in this pte.
2603	 */
2604	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2605	if (unlikely(!pte_same(*page_table, orig_pte)))
 
2606		goto out_nomap;
2607
2608	if (unlikely(!PageUptodate(page))) {
2609		ret = VM_FAULT_SIGBUS;
2610		goto out_nomap;
2611	}
2612
2613	/*
2614	 * The page isn't present yet, go ahead with the fault.
2615	 *
2616	 * Be careful about the sequence of operations here.
2617	 * To get its accounting right, reuse_swap_page() must be called
2618	 * while the page is counted on swap but not yet in mapcount i.e.
2619	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2620	 * must be called after the swap_free(), or it will never succeed.
2621	 */
2622
2623	inc_mm_counter_fast(mm, MM_ANONPAGES);
2624	dec_mm_counter_fast(mm, MM_SWAPENTS);
2625	pte = mk_pte(page, vma->vm_page_prot);
2626	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
2627		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2628		flags &= ~FAULT_FLAG_WRITE;
2629		ret |= VM_FAULT_WRITE;
2630		exclusive = RMAP_EXCLUSIVE;
2631	}
2632	flush_icache_page(vma, page);
2633	if (pte_swp_soft_dirty(orig_pte))
2634		pte = pte_mksoft_dirty(pte);
2635	set_pte_at(mm, address, page_table, pte);
2636	if (page == swapcache) {
2637		do_page_add_anon_rmap(page, vma, address, exclusive);
2638		mem_cgroup_commit_charge(page, memcg, true, false);
2639	} else { /* ksm created a completely new copy */
2640		page_add_new_anon_rmap(page, vma, address, false);
2641		mem_cgroup_commit_charge(page, memcg, false, false);
2642		lru_cache_add_active_or_unevictable(page, vma);
 
 
 
 
 
 
2643	}
2644
2645	swap_free(entry);
2646	if (mem_cgroup_swap_full(page) ||
2647	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2648		try_to_free_swap(page);
2649	unlock_page(page);
2650	if (page != swapcache) {
2651		/*
2652		 * Hold the lock to avoid the swap entry to be reused
2653		 * until we take the PT lock for the pte_same() check
2654		 * (to avoid false positives from pte_same). For
2655		 * further safety release the lock after the swap_free
2656		 * so that the swap count won't change under a
2657		 * parallel locked swapcache.
2658		 */
2659		unlock_page(swapcache);
2660		put_page(swapcache);
2661	}
2662
2663	if (flags & FAULT_FLAG_WRITE) {
2664		ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
2665		if (ret & VM_FAULT_ERROR)
2666			ret &= VM_FAULT_ERROR;
2667		goto out;
2668	}
2669
2670	/* No need to invalidate - it was non-present before */
2671	update_mmu_cache(vma, address, page_table);
2672unlock:
2673	pte_unmap_unlock(page_table, ptl);
2674out:
2675	return ret;
2676out_nomap:
2677	mem_cgroup_cancel_charge(page, memcg, false);
2678	pte_unmap_unlock(page_table, ptl);
2679out_page:
2680	unlock_page(page);
2681out_release:
2682	put_page(page);
2683	if (page != swapcache) {
2684		unlock_page(swapcache);
2685		put_page(swapcache);
2686	}
2687	return ret;
2688}
2689
2690/*
2691 * This is like a special single-page "expand_{down|up}wards()",
2692 * except we must first make sure that 'address{-|+}PAGE_SIZE'
2693 * doesn't hit another vma.
2694 */
2695static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
2696{
2697	address &= PAGE_MASK;
2698	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2699		struct vm_area_struct *prev = vma->vm_prev;
2700
2701		/*
2702		 * Is there a mapping abutting this one below?
2703		 *
2704		 * That's only ok if it's the same stack mapping
2705		 * that has gotten split..
2706		 */
2707		if (prev && prev->vm_end == address)
2708			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2709
2710		return expand_downwards(vma, address - PAGE_SIZE);
2711	}
2712	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2713		struct vm_area_struct *next = vma->vm_next;
2714
2715		/* As VM_GROWSDOWN but s/below/above/ */
2716		if (next && next->vm_start == address + PAGE_SIZE)
2717			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
2718
2719		return expand_upwards(vma, address + PAGE_SIZE);
2720	}
2721	return 0;
2722}
2723
2724/*
2725 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2726 * but allow concurrent faults), and pte mapped but not yet locked.
2727 * We return with mmap_sem still held, but pte unmapped and unlocked.
2728 */
2729static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2730		unsigned long address, pte_t *page_table, pmd_t *pmd,
2731		unsigned int flags)
2732{
2733	struct mem_cgroup *memcg;
2734	struct page *page;
2735	spinlock_t *ptl;
2736	pte_t entry;
2737
2738	pte_unmap(page_table);
2739
2740	/* File mapping without ->vm_ops ? */
2741	if (vma->vm_flags & VM_SHARED)
2742		return VM_FAULT_SIGBUS;
2743
2744	/* Check if we need to add a guard page to the stack */
2745	if (check_stack_guard_page(vma, address) < 0)
2746		return VM_FAULT_SIGSEGV;
 
 
 
 
 
 
 
 
 
 
 
 
 
2747
2748	/* Use the zero-page for reads */
2749	if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
2750		entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
 
2751						vma->vm_page_prot));
2752		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2753		if (!pte_none(*page_table))
 
 
 
 
 
 
2754			goto unlock;
2755		/* Deliver the page fault to userland, check inside PT lock */
2756		if (userfaultfd_missing(vma)) {
2757			pte_unmap_unlock(page_table, ptl);
2758			return handle_userfault(vma, address, flags,
2759						VM_UFFD_MISSING);
2760		}
2761		goto setpte;
2762	}
2763
2764	/* Allocate our own private page. */
2765	if (unlikely(anon_vma_prepare(vma)))
2766		goto oom;
2767	page = alloc_zeroed_user_highpage_movable(vma, address);
2768	if (!page)
2769		goto oom;
2770
2771	if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false))
2772		goto oom_free_page;
 
2773
2774	/*
2775	 * The memory barrier inside __SetPageUptodate makes sure that
2776	 * preceeding stores to the page contents become visible before
2777	 * the set_pte_at() write.
2778	 */
2779	__SetPageUptodate(page);
2780
2781	entry = mk_pte(page, vma->vm_page_prot);
 
2782	if (vma->vm_flags & VM_WRITE)
2783		entry = pte_mkwrite(pte_mkdirty(entry));
2784
2785	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2786	if (!pte_none(*page_table))
 
 
 
 
 
 
 
2787		goto release;
2788
2789	/* Deliver the page fault to userland, check inside PT lock */
2790	if (userfaultfd_missing(vma)) {
2791		pte_unmap_unlock(page_table, ptl);
2792		mem_cgroup_cancel_charge(page, memcg, false);
2793		put_page(page);
2794		return handle_userfault(vma, address, flags,
2795					VM_UFFD_MISSING);
2796	}
2797
2798	inc_mm_counter_fast(mm, MM_ANONPAGES);
2799	page_add_new_anon_rmap(page, vma, address, false);
2800	mem_cgroup_commit_charge(page, memcg, false, false);
2801	lru_cache_add_active_or_unevictable(page, vma);
2802setpte:
2803	set_pte_at(mm, address, page_table, entry);
2804
2805	/* No need to invalidate - it was non-present before */
2806	update_mmu_cache(vma, address, page_table);
2807unlock:
2808	pte_unmap_unlock(page_table, ptl);
2809	return 0;
2810release:
2811	mem_cgroup_cancel_charge(page, memcg, false);
2812	put_page(page);
2813	goto unlock;
2814oom_free_page:
2815	put_page(page);
2816oom:
2817	return VM_FAULT_OOM;
2818}
2819
2820/*
2821 * The mmap_sem must have been held on entry, and may have been
2822 * released depending on flags and vma->vm_ops->fault() return value.
2823 * See filemap_fault() and __lock_page_retry().
2824 */
2825static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2826			pgoff_t pgoff, unsigned int flags,
2827			struct page *cow_page, struct page **page)
2828{
2829	struct vm_fault vmf;
2830	int ret;
2831
2832	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2833	vmf.pgoff = pgoff;
2834	vmf.flags = flags;
2835	vmf.page = NULL;
2836	vmf.gfp_mask = __get_fault_gfp_mask(vma);
2837	vmf.cow_page = cow_page;
2838
2839	ret = vma->vm_ops->fault(vma, &vmf);
2840	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2841		return ret;
2842	if (!vmf.page)
2843		goto out;
2844
2845	if (unlikely(PageHWPoison(vmf.page))) {
2846		if (ret & VM_FAULT_LOCKED)
2847			unlock_page(vmf.page);
2848		put_page(vmf.page);
 
2849		return VM_FAULT_HWPOISON;
2850	}
2851
2852	if (unlikely(!(ret & VM_FAULT_LOCKED)))
2853		lock_page(vmf.page);
2854	else
2855		VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
2856
2857 out:
2858	*page = vmf.page;
2859	return ret;
2860}
2861
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2862/**
2863 * do_set_pte - setup new PTE entry for given page and add reverse page mapping.
 
2864 *
2865 * @vma: virtual memory area
2866 * @address: user virtual address
2867 * @page: page to map
2868 * @pte: pointer to target page table entry
2869 * @write: true, if new entry is writable
2870 * @anon: true, if it's anonymous page
2871 *
2872 * Caller must hold page table lock relevant for @pte.
 
2873 *
2874 * Target users are page handler itself and implementations of
2875 * vm_ops->map_pages.
 
 
2876 */
2877void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2878		struct page *page, pte_t *pte, bool write, bool anon)
2879{
 
 
2880	pte_t entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2881
2882	flush_icache_page(vma, page);
2883	entry = mk_pte(page, vma->vm_page_prot);
 
2884	if (write)
2885		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2886	if (anon) {
 
2887		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2888		page_add_new_anon_rmap(page, vma, address, false);
 
2889	} else {
2890		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
2891		page_add_file_rmap(page);
2892	}
2893	set_pte_at(vma->vm_mm, address, pte, entry);
2894
2895	/* no need to invalidate: a not-present page won't be cached */
2896	update_mmu_cache(vma, address, pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2897}
2898
2899static unsigned long fault_around_bytes __read_mostly =
2900	rounddown_pow_of_two(65536);
2901
2902#ifdef CONFIG_DEBUG_FS
2903static int fault_around_bytes_get(void *data, u64 *val)
2904{
2905	*val = fault_around_bytes;
2906	return 0;
2907}
2908
2909/*
2910 * fault_around_pages() and fault_around_mask() expects fault_around_bytes
2911 * rounded down to nearest page order. It's what do_fault_around() expects to
2912 * see.
2913 */
2914static int fault_around_bytes_set(void *data, u64 val)
2915{
2916	if (val / PAGE_SIZE > PTRS_PER_PTE)
2917		return -EINVAL;
2918	if (val > PAGE_SIZE)
2919		fault_around_bytes = rounddown_pow_of_two(val);
2920	else
2921		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
2922	return 0;
2923}
2924DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
2925		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
2926
2927static int __init fault_around_debugfs(void)
2928{
2929	void *ret;
2930
2931	ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL,
2932			&fault_around_bytes_fops);
2933	if (!ret)
2934		pr_warn("Failed to create fault_around_bytes in debugfs");
2935	return 0;
2936}
2937late_initcall(fault_around_debugfs);
2938#endif
2939
2940/*
2941 * do_fault_around() tries to map few pages around the fault address. The hope
2942 * is that the pages will be needed soon and this will lower the number of
2943 * faults to handle.
2944 *
2945 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
2946 * not ready to be mapped: not up-to-date, locked, etc.
2947 *
2948 * This function is called with the page table lock taken. In the split ptlock
2949 * case the page table lock only protects only those entries which belong to
2950 * the page table corresponding to the fault address.
2951 *
2952 * This function doesn't cross the VMA boundaries, in order to call map_pages()
2953 * only once.
2954 *
2955 * fault_around_pages() defines how many pages we'll try to map.
2956 * do_fault_around() expects it to return a power of two less than or equal to
2957 * PTRS_PER_PTE.
2958 *
2959 * The virtual address of the area that we map is naturally aligned to the
2960 * fault_around_pages() value (and therefore to page order).  This way it's
2961 * easier to guarantee that we don't cross page table boundaries.
2962 */
2963static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
2964		pte_t *pte, pgoff_t pgoff, unsigned int flags)
2965{
2966	unsigned long start_addr, nr_pages, mask;
2967	pgoff_t max_pgoff;
2968	struct vm_fault vmf;
2969	int off;
 
2970
2971	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
2972	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
2973
2974	start_addr = max(address & mask, vma->vm_start);
2975	off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
2976	pte -= off;
2977	pgoff -= off;
2978
2979	/*
2980	 *  max_pgoff is either end of page table or end of vma
2981	 *  or fault_around_pages() from pgoff, depending what is nearest.
2982	 */
2983	max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
 
2984		PTRS_PER_PTE - 1;
2985	max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1,
2986			pgoff + nr_pages - 1);
 
 
 
 
 
 
 
2987
2988	/* Check if it makes any sense to call ->map_pages */
2989	while (!pte_none(*pte)) {
2990		if (++pgoff > max_pgoff)
2991			return;
2992		start_addr += PAGE_SIZE;
2993		if (start_addr >= vma->vm_end)
2994			return;
2995		pte++;
2996	}
2997
2998	vmf.virtual_address = (void __user *) start_addr;
2999	vmf.pte = pte;
3000	vmf.pgoff = pgoff;
3001	vmf.max_pgoff = max_pgoff;
3002	vmf.flags = flags;
3003	vmf.gfp_mask = __get_fault_gfp_mask(vma);
3004	vma->vm_ops->map_pages(vma, &vmf);
 
 
 
 
 
 
3005}
3006
3007static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3008		unsigned long address, pmd_t *pmd,
3009		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
3010{
3011	struct page *fault_page;
3012	spinlock_t *ptl;
3013	pte_t *pte;
3014	int ret = 0;
3015
3016	/*
3017	 * Let's call ->map_pages() first and use ->fault() as fallback
3018	 * if page by the offset is not ready to be mapped (cold cache or
3019	 * something).
3020	 */
3021	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3022		pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3023		do_fault_around(vma, address, pte, pgoff, flags);
3024		if (!pte_same(*pte, orig_pte))
3025			goto unlock_out;
3026		pte_unmap_unlock(pte, ptl);
3027	}
3028
3029	ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
3030	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3031		return ret;
3032
3033	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3034	if (unlikely(!pte_same(*pte, orig_pte))) {
3035		pte_unmap_unlock(pte, ptl);
3036		unlock_page(fault_page);
3037		put_page(fault_page);
3038		return ret;
3039	}
3040	do_set_pte(vma, address, fault_page, pte, false, false);
3041	unlock_page(fault_page);
3042unlock_out:
3043	pte_unmap_unlock(pte, ptl);
3044	return ret;
3045}
3046
3047static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3048		unsigned long address, pmd_t *pmd,
3049		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
3050{
3051	struct page *fault_page, *new_page;
3052	struct mem_cgroup *memcg;
3053	spinlock_t *ptl;
3054	pte_t *pte;
3055	int ret;
3056
3057	if (unlikely(anon_vma_prepare(vma)))
3058		return VM_FAULT_OOM;
3059
3060	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
3061	if (!new_page)
3062		return VM_FAULT_OOM;
3063
3064	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
3065		put_page(new_page);
3066		return VM_FAULT_OOM;
3067	}
 
3068
3069	ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
3070	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3071		goto uncharge_out;
 
 
3072
3073	if (fault_page)
3074		copy_user_highpage(new_page, fault_page, address, vma);
3075	__SetPageUptodate(new_page);
3076
3077	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3078	if (unlikely(!pte_same(*pte, orig_pte))) {
3079		pte_unmap_unlock(pte, ptl);
3080		if (fault_page) {
3081			unlock_page(fault_page);
3082			put_page(fault_page);
3083		} else {
3084			/*
3085			 * The fault handler has no page to lock, so it holds
3086			 * i_mmap_lock for read to protect against truncate.
3087			 */
3088			i_mmap_unlock_read(vma->vm_file->f_mapping);
3089		}
3090		goto uncharge_out;
3091	}
3092	do_set_pte(vma, address, new_page, pte, true, true);
3093	mem_cgroup_commit_charge(new_page, memcg, false, false);
3094	lru_cache_add_active_or_unevictable(new_page, vma);
3095	pte_unmap_unlock(pte, ptl);
3096	if (fault_page) {
3097		unlock_page(fault_page);
3098		put_page(fault_page);
3099	} else {
3100		/*
3101		 * The fault handler has no page to lock, so it holds
3102		 * i_mmap_lock for read to protect against truncate.
3103		 */
3104		i_mmap_unlock_read(vma->vm_file->f_mapping);
3105	}
3106	return ret;
3107uncharge_out:
3108	mem_cgroup_cancel_charge(new_page, memcg, false);
3109	put_page(new_page);
3110	return ret;
3111}
3112
3113static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3114		unsigned long address, pmd_t *pmd,
3115		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
3116{
3117	struct page *fault_page;
3118	struct address_space *mapping;
3119	spinlock_t *ptl;
3120	pte_t *pte;
3121	int dirtied = 0;
3122	int ret, tmp;
3123
3124	ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
3125	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3126		return ret;
3127
3128	/*
3129	 * Check if the backing address space wants to know that the page is
3130	 * about to become writable
3131	 */
3132	if (vma->vm_ops->page_mkwrite) {
3133		unlock_page(fault_page);
3134		tmp = do_page_mkwrite(vma, fault_page, address);
3135		if (unlikely(!tmp ||
3136				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3137			put_page(fault_page);
3138			return tmp;
3139		}
3140	}
3141
3142	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3143	if (unlikely(!pte_same(*pte, orig_pte))) {
3144		pte_unmap_unlock(pte, ptl);
3145		unlock_page(fault_page);
3146		put_page(fault_page);
3147		return ret;
3148	}
3149	do_set_pte(vma, address, fault_page, pte, true, false);
3150	pte_unmap_unlock(pte, ptl);
3151
3152	if (set_page_dirty(fault_page))
3153		dirtied = 1;
3154	/*
3155	 * Take a local copy of the address_space - page.mapping may be zeroed
3156	 * by truncate after unlock_page().   The address_space itself remains
3157	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
3158	 * release semantics to prevent the compiler from undoing this copying.
3159	 */
3160	mapping = page_rmapping(fault_page);
3161	unlock_page(fault_page);
3162	if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
3163		/*
3164		 * Some device drivers do not set page.mapping but still
3165		 * dirty their pages
3166		 */
3167		balance_dirty_pages_ratelimited(mapping);
3168	}
3169
3170	if (!vma->vm_ops->page_mkwrite)
3171		file_update_time(vma->vm_file);
3172
 
3173	return ret;
3174}
3175
3176/*
3177 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3178 * but allow concurrent faults).
3179 * The mmap_sem may have been released depending on flags and our
3180 * return value.  See filemap_fault() and __lock_page_or_retry().
 
 
3181 */
3182static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3183		unsigned long address, pte_t *page_table, pmd_t *pmd,
3184		unsigned int flags, pte_t orig_pte)
3185{
3186	pgoff_t pgoff = linear_page_index(vma, address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3187
3188	pte_unmap(page_table);
3189	/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
3190	if (!vma->vm_ops->fault)
3191		return VM_FAULT_SIGBUS;
3192	if (!(flags & FAULT_FLAG_WRITE))
3193		return do_read_fault(mm, vma, address, pmd, pgoff, flags,
3194				orig_pte);
3195	if (!(vma->vm_flags & VM_SHARED))
3196		return do_cow_fault(mm, vma, address, pmd, pgoff, flags,
3197				orig_pte);
3198	return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 
 
 
 
3199}
3200
3201static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
3202				unsigned long addr, int page_nid,
3203				int *flags)
3204{
3205	get_page(page);
3206
3207	count_vm_numa_event(NUMA_HINT_FAULTS);
3208	if (page_nid == numa_node_id()) {
3209		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
3210		*flags |= TNF_FAULT_LOCAL;
3211	}
3212
3213	return mpol_misplaced(page, vma, addr);
3214}
3215
3216static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3217		   unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
3218{
 
3219	struct page *page = NULL;
3220	spinlock_t *ptl;
3221	int page_nid = -1;
3222	int last_cpupid;
3223	int target_nid;
3224	bool migrated = false;
3225	bool was_writable = pte_write(pte);
 
3226	int flags = 0;
3227
3228	/* A PROT_NONE fault should not end up here */
3229	BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
3230
3231	/*
3232	* The "pte" at this point cannot be used safely without
3233	* validation through pte_unmap_same(). It's of NUMA type but
3234	* the pfn may be screwed if the read is non atomic.
3235	*
3236	* We can safely just do a "set_pte_at()", because the old
3237	* page table entry is not accessible, so there would be no
3238	* concurrent hardware modifications to the PTE.
3239	*/
3240	ptl = pte_lockptr(mm, pmd);
3241	spin_lock(ptl);
3242	if (unlikely(!pte_same(*ptep, pte))) {
3243		pte_unmap_unlock(ptep, ptl);
3244		goto out;
3245	}
3246
3247	/* Make it present again */
3248	pte = pte_modify(pte, vma->vm_page_prot);
 
 
 
 
3249	pte = pte_mkyoung(pte);
3250	if (was_writable)
3251		pte = pte_mkwrite(pte);
3252	set_pte_at(mm, addr, ptep, pte);
3253	update_mmu_cache(vma, addr, ptep);
3254
3255	page = vm_normal_page(vma, addr, pte);
3256	if (!page) {
3257		pte_unmap_unlock(ptep, ptl);
3258		return 0;
3259	}
3260
3261	/* TODO: handle PTE-mapped THP */
3262	if (PageCompound(page)) {
3263		pte_unmap_unlock(ptep, ptl);
3264		return 0;
3265	}
3266
3267	/*
3268	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
3269	 * much anyway since they can be in shared cache state. This misses
3270	 * the case where a mapping is writable but the process never writes
3271	 * to it but pte_write gets cleared during protection updates and
3272	 * pte_dirty has unpredictable behaviour between PTE scan updates,
3273	 * background writeback, dirty balancing and application behaviour.
3274	 */
3275	if (!(vma->vm_flags & VM_WRITE))
3276		flags |= TNF_NO_GROUP;
3277
3278	/*
3279	 * Flag if the page is shared between multiple address spaces. This
3280	 * is later used when determining whether to group tasks together
3281	 */
3282	if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
3283		flags |= TNF_SHARED;
3284
3285	last_cpupid = page_cpupid_last(page);
3286	page_nid = page_to_nid(page);
3287	target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags);
3288	pte_unmap_unlock(ptep, ptl);
3289	if (target_nid == -1) {
 
3290		put_page(page);
3291		goto out;
3292	}
3293
3294	/* Migrate to the requested node */
3295	migrated = migrate_misplaced_page(page, vma, target_nid);
3296	if (migrated) {
3297		page_nid = target_nid;
3298		flags |= TNF_MIGRATED;
3299	} else
3300		flags |= TNF_MIGRATE_FAIL;
3301
3302out:
3303	if (page_nid != -1)
3304		task_numa_fault(last_cpupid, page_nid, 1, flags);
3305	return 0;
3306}
3307
3308static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
3309			unsigned long address, pmd_t *pmd, unsigned int flags)
3310{
3311	if (vma_is_anonymous(vma))
3312		return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
3313	if (vma->vm_ops->pmd_fault)
3314		return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
3315	return VM_FAULT_FALLBACK;
3316}
3317
3318static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
3319			unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
3320			unsigned int flags)
3321{
3322	if (vma_is_anonymous(vma))
3323		return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
3324	if (vma->vm_ops->pmd_fault)
3325		return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3326	return VM_FAULT_FALLBACK;
3327}
3328
3329/*
3330 * These routines also need to handle stuff like marking pages dirty
3331 * and/or accessed for architectures that don't do it in hardware (most
3332 * RISC architectures).  The early dirtying is also good on the i386.
3333 *
3334 * There is also a hook called "update_mmu_cache()" that architectures
3335 * with external mmu caches can use to update those (ie the Sparc or
3336 * PowerPC hashed page tables that act as extended TLBs).
3337 *
3338 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3339 * but allow concurrent faults), and pte mapped but not yet locked.
3340 * We return with pte unmapped and unlocked.
3341 *
3342 * The mmap_sem may have been released depending on flags and our
3343 * return value.  See filemap_fault() and __lock_page_or_retry().
3344 */
3345static int handle_pte_fault(struct mm_struct *mm,
3346		     struct vm_area_struct *vma, unsigned long address,
3347		     pte_t *pte, pmd_t *pmd, unsigned int flags)
3348{
3349	pte_t entry;
3350	spinlock_t *ptl;
3351
3352	/*
3353	 * some architectures can have larger ptes than wordsize,
3354	 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y,
3355	 * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses.
3356	 * The code below just needs a consistent view for the ifs and
3357	 * we later double check anyway with the ptl lock held. So here
3358	 * a barrier will do.
3359	 */
3360	entry = *pte;
3361	barrier();
3362	if (!pte_present(entry)) {
3363		if (pte_none(entry)) {
3364			if (vma_is_anonymous(vma))
3365				return do_anonymous_page(mm, vma, address,
3366							 pte, pmd, flags);
3367			else
3368				return do_fault(mm, vma, address, pte, pmd,
3369						flags, entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3370		}
3371		return do_swap_page(mm, vma, address,
3372					pte, pmd, flags, entry);
3373	}
3374
3375	if (pte_protnone(entry))
3376		return do_numa_page(mm, vma, address, entry, pte, pmd);
3377
3378	ptl = pte_lockptr(mm, pmd);
3379	spin_lock(ptl);
3380	if (unlikely(!pte_same(*pte, entry)))
 
 
 
 
 
 
 
 
 
 
 
 
3381		goto unlock;
3382	if (flags & FAULT_FLAG_WRITE) {
 
3383		if (!pte_write(entry))
3384			return do_wp_page(mm, vma, address,
3385					pte, pmd, ptl, entry);
3386		entry = pte_mkdirty(entry);
3387	}
3388	entry = pte_mkyoung(entry);
3389	if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
3390		update_mmu_cache(vma, address, pte);
 
3391	} else {
 
 
 
3392		/*
3393		 * This is needed only for protection faults but the arch code
3394		 * is not yet telling us if this is a protection fault or not.
3395		 * This still avoids useless tlb flushes for .text page faults
3396		 * with threads.
3397		 */
3398		if (flags & FAULT_FLAG_WRITE)
3399			flush_tlb_fix_spurious_fault(vma, address);
3400	}
3401unlock:
3402	pte_unmap_unlock(pte, ptl);
3403	return 0;
3404}
3405
3406/*
3407 * By the time we get here, we already hold the mm semaphore
3408 *
3409 * The mmap_sem may have been released depending on flags and our
3410 * return value.  See filemap_fault() and __lock_page_or_retry().
3411 */
3412static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3413			     unsigned long address, unsigned int flags)
3414{
 
 
 
 
 
 
 
 
 
3415	pgd_t *pgd;
3416	pud_t *pud;
3417	pmd_t *pmd;
3418	pte_t *pte;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3419
3420	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
3421					    flags & FAULT_FLAG_INSTRUCTION,
3422					    flags & FAULT_FLAG_REMOTE))
3423		return VM_FAULT_SIGSEGV;
3424
3425	if (unlikely(is_vm_hugetlb_page(vma)))
3426		return hugetlb_fault(mm, vma, address, flags);
 
 
 
 
 
 
 
 
3427
3428	pgd = pgd_offset(mm, address);
3429	pud = pud_alloc(mm, pgd, address);
3430	if (!pud)
3431		return VM_FAULT_OOM;
3432	pmd = pmd_alloc(mm, pud, address);
3433	if (!pmd)
3434		return VM_FAULT_OOM;
3435	if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
3436		int ret = create_huge_pmd(mm, vma, address, pmd, flags);
 
 
3437		if (!(ret & VM_FAULT_FALLBACK))
3438			return ret;
3439	} else {
3440		pmd_t orig_pmd = *pmd;
3441		int ret;
3442
3443		barrier();
 
 
 
 
 
 
 
3444		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
3445			unsigned int dirty = flags & FAULT_FLAG_WRITE;
3446
3447			if (pmd_protnone(orig_pmd))
3448				return do_huge_pmd_numa_page(mm, vma, address,
3449							     orig_pmd, pmd);
3450
3451			if (dirty && !pmd_write(orig_pmd)) {
3452				ret = wp_huge_pmd(mm, vma, address, pmd,
3453							orig_pmd, flags);
3454				if (!(ret & VM_FAULT_FALLBACK))
3455					return ret;
3456			} else {
3457				huge_pmd_set_accessed(mm, vma, address, pmd,
3458						      orig_pmd, dirty);
3459				return 0;
3460			}
3461		}
3462	}
3463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3464	/*
3465	 * Use pte_alloc() instead of pte_alloc_map, because we can't
3466	 * run pte_offset_map on the pmd, if an huge pmd could
3467	 * materialize from under us from a different thread.
 
 
 
 
 
 
3468	 */
3469	if (unlikely(pte_alloc(mm, pmd, address)))
3470		return VM_FAULT_OOM;
 
3471	/*
3472	 * If a huge pmd materialized under us just retry later.  Use
3473	 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
3474	 * didn't become pmd_trans_huge under us and then back to pmd_none, as
3475	 * a result of MADV_DONTNEED running immediately after a huge pmd fault
3476	 * in a different thread of this mm, in turn leading to a misleading
3477	 * pmd_trans_huge() retval.  All we have to ensure is that it is a
3478	 * regular pmd that we can walk with pte_offset_map() and we can do that
3479	 * through an atomic read in C, which is what pmd_trans_unstable()
3480	 * provides.
3481	 */
3482	if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd)))
3483		return 0;
 
 
 
 
 
3484	/*
3485	 * A regular pmd is established and it can't morph into a huge pmd
3486	 * from under us anymore at this point because we hold the mmap_sem
3487	 * read mode and khugepaged takes it in write mode. So now it's
3488	 * safe to run pte_offset_map().
3489	 */
3490	pte = pte_offset_map(pmd, address);
 
3491
3492	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 
 
 
3493}
3494
3495/*
3496 * By the time we get here, we already hold the mm semaphore
3497 *
3498 * The mmap_sem may have been released depending on flags and our
3499 * return value.  See filemap_fault() and __lock_page_or_retry().
3500 */
3501int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3502		    unsigned long address, unsigned int flags)
3503{
3504	int ret;
3505
3506	__set_current_state(TASK_RUNNING);
3507
3508	count_vm_event(PGFAULT);
3509	mem_cgroup_count_vm_event(mm, PGFAULT);
3510
3511	/* do counter updates before entering really critical section. */
3512	check_sync_rss_stat(current);
3513
 
 
 
 
 
3514	/*
3515	 * Enable the memcg OOM handling for faults triggered in user
3516	 * space.  Kernel faults are handled more gracefully.
3517	 */
3518	if (flags & FAULT_FLAG_USER)
3519		mem_cgroup_oom_enable();
3520
3521	ret = __handle_mm_fault(mm, vma, address, flags);
 
 
 
3522
3523	if (flags & FAULT_FLAG_USER) {
3524		mem_cgroup_oom_disable();
3525                /*
3526                 * The task may have entered a memcg OOM situation but
3527                 * if the allocation error was handled gracefully (no
3528                 * VM_FAULT_OOM), there is no need to kill anything.
3529                 * Just clean up the OOM state peacefully.
3530                 */
3531                if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
3532                        mem_cgroup_oom_synchronize(false);
3533	}
3534
 
 
3535	return ret;
3536}
3537EXPORT_SYMBOL_GPL(handle_mm_fault);
3538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3539#ifndef __PAGETABLE_PUD_FOLDED
3540/*
3541 * Allocate page upper directory.
3542 * We've already handled the fast-path in-line.
3543 */
3544int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
3545{
3546	pud_t *new = pud_alloc_one(mm, address);
3547	if (!new)
3548		return -ENOMEM;
3549
3550	smp_wmb(); /* See comment in __pte_alloc */
3551
3552	spin_lock(&mm->page_table_lock);
3553	if (pgd_present(*pgd))		/* Another has populated it */
 
 
 
3554		pud_free(mm, new);
3555	else
3556		pgd_populate(mm, pgd, new);
3557	spin_unlock(&mm->page_table_lock);
3558	return 0;
3559}
3560#endif /* __PAGETABLE_PUD_FOLDED */
3561
3562#ifndef __PAGETABLE_PMD_FOLDED
3563/*
3564 * Allocate page middle directory.
3565 * We've already handled the fast-path in-line.
3566 */
3567int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3568{
 
3569	pmd_t *new = pmd_alloc_one(mm, address);
3570	if (!new)
3571		return -ENOMEM;
3572
3573	smp_wmb(); /* See comment in __pte_alloc */
3574
3575	spin_lock(&mm->page_table_lock);
3576#ifndef __ARCH_HAS_4LEVEL_HACK
3577	if (!pud_present(*pud)) {
3578		mm_inc_nr_pmds(mm);
3579		pud_populate(mm, pud, new);
3580	} else	/* Another has populated it */
3581		pmd_free(mm, new);
3582#else
3583	if (!pgd_present(*pud)) {
3584		mm_inc_nr_pmds(mm);
3585		pgd_populate(mm, pud, new);
3586	} else /* Another has populated it */
3587		pmd_free(mm, new);
3588#endif /* __ARCH_HAS_4LEVEL_HACK */
3589	spin_unlock(&mm->page_table_lock);
3590	return 0;
3591}
3592#endif /* __PAGETABLE_PMD_FOLDED */
3593
3594static int __follow_pte(struct mm_struct *mm, unsigned long address,
3595		pte_t **ptepp, spinlock_t **ptlp)
 
3596{
3597	pgd_t *pgd;
 
3598	pud_t *pud;
3599	pmd_t *pmd;
3600	pte_t *ptep;
3601
3602	pgd = pgd_offset(mm, address);
3603	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
3604		goto out;
3605
3606	pud = pud_offset(pgd, address);
 
 
 
 
3607	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
3608		goto out;
3609
3610	pmd = pmd_offset(pud, address);
3611	VM_BUG_ON(pmd_trans_huge(*pmd));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3612	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
3613		goto out;
3614
3615	/* We cannot handle huge page PFN maps. Luckily they don't exist. */
3616	if (pmd_huge(*pmd))
3617		goto out;
3618
 
 
3619	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3620	if (!ptep)
3621		goto out;
3622	if (!pte_present(*ptep))
3623		goto unlock;
3624	*ptepp = ptep;
3625	return 0;
3626unlock:
3627	pte_unmap_unlock(ptep, *ptlp);
 
 
3628out:
3629	return -EINVAL;
3630}
3631
3632static inline int follow_pte(struct mm_struct *mm, unsigned long address,
3633			     pte_t **ptepp, spinlock_t **ptlp)
3634{
3635	int res;
3636
3637	/* (void) is needed to make gcc happy */
3638	(void) __cond_lock(*ptlp,
3639			   !(res = __follow_pte(mm, address, ptepp, ptlp)));
 
3640	return res;
3641}
3642
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3643/**
3644 * follow_pfn - look up PFN at a user virtual address
3645 * @vma: memory mapping
3646 * @address: user virtual address
3647 * @pfn: location to store found PFN
3648 *
3649 * Only IO mappings and raw PFN mappings are allowed.
3650 *
3651 * Returns zero and the pfn at @pfn on success, -ve otherwise.
3652 */
3653int follow_pfn(struct vm_area_struct *vma, unsigned long address,
3654	unsigned long *pfn)
3655{
3656	int ret = -EINVAL;
3657	spinlock_t *ptl;
3658	pte_t *ptep;
3659
3660	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3661		return ret;
3662
3663	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3664	if (ret)
3665		return ret;
3666	*pfn = pte_pfn(*ptep);
3667	pte_unmap_unlock(ptep, ptl);
3668	return 0;
3669}
3670EXPORT_SYMBOL(follow_pfn);
3671
3672#ifdef CONFIG_HAVE_IOREMAP_PROT
3673int follow_phys(struct vm_area_struct *vma,
3674		unsigned long address, unsigned int flags,
3675		unsigned long *prot, resource_size_t *phys)
3676{
3677	int ret = -EINVAL;
3678	pte_t *ptep, pte;
3679	spinlock_t *ptl;
3680
3681	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3682		goto out;
3683
3684	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
3685		goto out;
3686	pte = *ptep;
3687
3688	if ((flags & FOLL_WRITE) && !pte_write(pte))
3689		goto unlock;
3690
3691	*prot = pgprot_val(pte_pgprot(pte));
3692	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
3693
3694	ret = 0;
3695unlock:
3696	pte_unmap_unlock(ptep, ptl);
3697out:
3698	return ret;
3699}
3700
3701int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3702			void *buf, int len, int write)
3703{
3704	resource_size_t phys_addr;
3705	unsigned long prot = 0;
3706	void __iomem *maddr;
3707	int offset = addr & (PAGE_SIZE-1);
3708
3709	if (follow_phys(vma, addr, write, &prot, &phys_addr))
3710		return -EINVAL;
3711
3712	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
 
 
 
3713	if (write)
3714		memcpy_toio(maddr + offset, buf, len);
3715	else
3716		memcpy_fromio(buf, maddr + offset, len);
3717	iounmap(maddr);
3718
3719	return len;
3720}
3721EXPORT_SYMBOL_GPL(generic_access_phys);
3722#endif
3723
3724/*
3725 * Access another process' address space as given in mm.  If non-NULL, use the
3726 * given task for page fault accounting.
3727 */
3728static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3729		unsigned long addr, void *buf, int len, int write)
3730{
3731	struct vm_area_struct *vma;
3732	void *old_buf = buf;
 
 
 
 
3733
3734	down_read(&mm->mmap_sem);
3735	/* ignore errors, just check how much was successfully transferred */
3736	while (len) {
3737		int bytes, ret, offset;
3738		void *maddr;
3739		struct page *page = NULL;
3740
3741		ret = get_user_pages_remote(tsk, mm, addr, 1,
3742				write, 1, &page, &vma);
3743		if (ret <= 0) {
3744#ifndef CONFIG_HAVE_IOREMAP_PROT
3745			break;
3746#else
3747			/*
3748			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
3749			 * we can access using slightly different code.
3750			 */
3751			vma = find_vma(mm, addr);
3752			if (!vma || vma->vm_start > addr)
3753				break;
3754			if (vma->vm_ops && vma->vm_ops->access)
3755				ret = vma->vm_ops->access(vma, addr, buf,
3756							  len, write);
3757			if (ret <= 0)
3758				break;
3759			bytes = ret;
3760#endif
3761		} else {
3762			bytes = len;
3763			offset = addr & (PAGE_SIZE-1);
3764			if (bytes > PAGE_SIZE-offset)
3765				bytes = PAGE_SIZE-offset;
3766
3767			maddr = kmap(page);
3768			if (write) {
3769				copy_to_user_page(vma, page, addr,
3770						  maddr + offset, buf, bytes);
3771				set_page_dirty_lock(page);
3772			} else {
3773				copy_from_user_page(vma, page, addr,
3774						    buf, maddr + offset, bytes);
3775			}
3776			kunmap(page);
3777			put_page(page);
3778		}
3779		len -= bytes;
3780		buf += bytes;
3781		addr += bytes;
3782	}
3783	up_read(&mm->mmap_sem);
3784
3785	return buf - old_buf;
3786}
3787
3788/**
3789 * access_remote_vm - access another process' address space
3790 * @mm:		the mm_struct of the target address space
3791 * @addr:	start address to access
3792 * @buf:	source or destination buffer
3793 * @len:	number of bytes to transfer
3794 * @write:	whether the access is a write
3795 *
3796 * The caller must hold a reference on @mm.
 
 
3797 */
3798int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3799		void *buf, int len, int write)
3800{
3801	return __access_remote_vm(NULL, mm, addr, buf, len, write);
3802}
3803
3804/*
3805 * Access another process' address space.
3806 * Source/target buffer must be kernel space,
3807 * Do not walk the page table directly, use get_user_pages
3808 */
3809int access_process_vm(struct task_struct *tsk, unsigned long addr,
3810		void *buf, int len, int write)
3811{
3812	struct mm_struct *mm;
3813	int ret;
3814
3815	mm = get_task_mm(tsk);
3816	if (!mm)
3817		return 0;
3818
3819	ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
 
3820	mmput(mm);
3821
3822	return ret;
3823}
 
3824
3825/*
3826 * Print the name of a VMA.
3827 */
3828void print_vma_addr(char *prefix, unsigned long ip)
3829{
3830	struct mm_struct *mm = current->mm;
3831	struct vm_area_struct *vma;
3832
3833	/*
3834	 * Do not print if we are in atomic
3835	 * contexts (in exception stacks, etc.):
3836	 */
3837	if (preempt_count())
3838		return;
3839
3840	down_read(&mm->mmap_sem);
3841	vma = find_vma(mm, ip);
3842	if (vma && vma->vm_file) {
3843		struct file *f = vma->vm_file;
3844		char *buf = (char *)__get_free_page(GFP_KERNEL);
3845		if (buf) {
3846			char *p;
3847
3848			p = file_path(f, buf, PAGE_SIZE);
3849			if (IS_ERR(p))
3850				p = "?";
3851			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
3852					vma->vm_start,
3853					vma->vm_end - vma->vm_start);
3854			free_page((unsigned long)buf);
3855		}
3856	}
3857	up_read(&mm->mmap_sem);
3858}
3859
3860#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
3861void __might_fault(const char *file, int line)
3862{
3863	/*
3864	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
3865	 * holding the mmap_sem, this is safe because kernel memory doesn't
3866	 * get paged out, therefore we'll never actually fault, and the
3867	 * below annotations will generate false positives.
3868	 */
3869	if (segment_eq(get_fs(), KERNEL_DS))
3870		return;
3871	if (pagefault_disabled())
3872		return;
3873	__might_sleep(file, line, 0);
3874#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
3875	if (current->mm)
3876		might_lock_read(&current->mm->mmap_sem);
3877#endif
3878}
3879EXPORT_SYMBOL(__might_fault);
3880#endif
3881
3882#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3883static void clear_gigantic_page(struct page *page,
3884				unsigned long addr,
3885				unsigned int pages_per_huge_page)
3886{
3887	int i;
3888	struct page *p = page;
3889
3890	might_sleep();
3891	for (i = 0; i < pages_per_huge_page;
3892	     i++, p = mem_map_next(p, page, i)) {
3893		cond_resched();
3894		clear_user_highpage(p, addr + i * PAGE_SIZE);
3895	}
3896}
 
 
 
 
 
 
 
 
3897void clear_huge_page(struct page *page,
3898		     unsigned long addr, unsigned int pages_per_huge_page)
3899{
3900	int i;
 
3901
3902	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
3903		clear_gigantic_page(page, addr, pages_per_huge_page);
3904		return;
3905	}
3906
3907	might_sleep();
3908	for (i = 0; i < pages_per_huge_page; i++) {
3909		cond_resched();
3910		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
3911	}
3912}
3913
3914static void copy_user_gigantic_page(struct page *dst, struct page *src,
3915				    unsigned long addr,
3916				    struct vm_area_struct *vma,
3917				    unsigned int pages_per_huge_page)
3918{
3919	int i;
3920	struct page *dst_base = dst;
3921	struct page *src_base = src;
3922
3923	for (i = 0; i < pages_per_huge_page; ) {
3924		cond_resched();
3925		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
3926
3927		i++;
3928		dst = mem_map_next(dst, dst_base, i);
3929		src = mem_map_next(src, src_base, i);
3930	}
3931}
3932
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3933void copy_user_huge_page(struct page *dst, struct page *src,
3934			 unsigned long addr, struct vm_area_struct *vma,
3935			 unsigned int pages_per_huge_page)
3936{
3937	int i;
 
 
 
 
 
 
3938
3939	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
3940		copy_user_gigantic_page(dst, src, addr, vma,
3941					pages_per_huge_page);
3942		return;
3943	}
3944
3945	might_sleep();
 
 
 
 
 
 
 
 
 
 
 
 
3946	for (i = 0; i < pages_per_huge_page; i++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3947		cond_resched();
3948		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
3949	}
 
3950}
3951#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
3952
3953#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
3954
3955static struct kmem_cache *page_ptl_cachep;
3956
3957void __init ptlock_cache_init(void)
3958{
3959	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
3960			SLAB_PANIC, NULL);
3961}
3962
3963bool ptlock_alloc(struct page *page)
3964{
3965	spinlock_t *ptl;
3966
3967	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
3968	if (!ptl)
3969		return false;
3970	page->ptl = ptl;
3971	return true;
3972}
3973
3974void ptlock_free(struct page *page)
3975{
3976	kmem_cache_free(page_ptl_cachep, page->ptl);
3977}
3978#endif