Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *  Copyright (C) 1995  Linus Torvalds
   3 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   4 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   5 */
   6#include <linux/magic.h>		/* STACK_END_MAGIC		*/
   7#include <linux/sched.h>		/* test_thread_flag(), ...	*/
 
   8#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
   9#include <linux/module.h>		/* search_exception_table	*/
  10#include <linux/bootmem.h>		/* max_low_pfn			*/
  11#include <linux/kprobes.h>		/* __kprobes, ...		*/
  12#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
  13#include <linux/perf_event.h>		/* perf_sw_event		*/
  14#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
  15#include <linux/prefetch.h>		/* prefetchw			*/
 
 
 
 
  16
 
  17#include <asm/traps.h>			/* dotraplinkage, ...		*/
  18#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
  19#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
  20#include <asm/vsyscall.h>
 
 
 
 
 
 
  21
  22/*
  23 * Page fault error code bits:
  24 *
  25 *   bit 0 ==	 0: no page found	1: protection fault
  26 *   bit 1 ==	 0: read access		1: write access
  27 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
  28 *   bit 3 ==				1: use of reserved bit detected
  29 *   bit 4 ==				1: fault was an instruction fetch
  30 */
  31enum x86_pf_error_code {
  32
  33	PF_PROT		=		1 << 0,
  34	PF_WRITE	=		1 << 1,
  35	PF_USER		=		1 << 2,
  36	PF_RSVD		=		1 << 3,
  37	PF_INSTR	=		1 << 4,
  38};
  39
  40/*
  41 * Returns 0 if mmiotrace is disabled, or if the fault is not
  42 * handled by mmiotrace:
  43 */
  44static inline int __kprobes
  45kmmio_fault(struct pt_regs *regs, unsigned long addr)
  46{
  47	if (unlikely(is_kmmio_active()))
  48		if (kmmio_handler(regs, addr) == 1)
  49			return -1;
  50	return 0;
  51}
  52
  53static inline int __kprobes notify_page_fault(struct pt_regs *regs)
  54{
  55	int ret = 0;
  56
  57	/* kprobe_running() needs smp_processor_id() */
  58	if (kprobes_built_in() && !user_mode_vm(regs)) {
  59		preempt_disable();
  60		if (kprobe_running() && kprobe_fault_handler(regs, 14))
  61			ret = 1;
  62		preempt_enable();
  63	}
  64
  65	return ret;
  66}
  67
  68/*
  69 * Prefetch quirks:
  70 *
  71 * 32-bit mode:
  72 *
  73 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  74 *   Check that here and ignore it.
  75 *
  76 * 64-bit mode:
  77 *
  78 *   Sometimes the CPU reports invalid exceptions on prefetch.
  79 *   Check that here and ignore it.
  80 *
  81 * Opcode checker based on code by Richard Brunner.
  82 */
  83static inline int
  84check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  85		      unsigned char opcode, int *prefetch)
  86{
  87	unsigned char instr_hi = opcode & 0xf0;
  88	unsigned char instr_lo = opcode & 0x0f;
  89
  90	switch (instr_hi) {
  91	case 0x20:
  92	case 0x30:
  93		/*
  94		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  95		 * In X86_64 long mode, the CPU will signal invalid
  96		 * opcode if some of these prefixes are present so
  97		 * X86_64 will never get here anyway
  98		 */
  99		return ((instr_lo & 7) == 0x6);
 100#ifdef CONFIG_X86_64
 101	case 0x40:
 102		/*
 103		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
 104		 * Need to figure out under what instruction mode the
 105		 * instruction was issued. Could check the LDT for lm,
 106		 * but for now it's good enough to assume that long
 107		 * mode only uses well known segments or kernel.
 108		 */
 109		return (!user_mode(regs) || user_64bit_mode(regs));
 110#endif
 111	case 0x60:
 112		/* 0x64 thru 0x67 are valid prefixes in all modes. */
 113		return (instr_lo & 0xC) == 0x4;
 114	case 0xF0:
 115		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
 116		return !instr_lo || (instr_lo>>1) == 1;
 117	case 0x00:
 118		/* Prefetch instruction is 0x0F0D or 0x0F18 */
 119		if (probe_kernel_address(instr, opcode))
 120			return 0;
 121
 122		*prefetch = (instr_lo == 0xF) &&
 123			(opcode == 0x0D || opcode == 0x18);
 124		return 0;
 125	default:
 126		return 0;
 127	}
 128}
 129
 130static int
 131is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 132{
 133	unsigned char *max_instr;
 134	unsigned char *instr;
 135	int prefetch = 0;
 136
 137	/*
 138	 * If it was a exec (instruction fetch) fault on NX page, then
 139	 * do not ignore the fault:
 140	 */
 141	if (error_code & PF_INSTR)
 142		return 0;
 143
 144	instr = (void *)convert_ip_to_linear(current, regs);
 145	max_instr = instr + 15;
 146
 147	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
 148		return 0;
 149
 150	while (instr < max_instr) {
 151		unsigned char opcode;
 152
 153		if (probe_kernel_address(instr, opcode))
 154			break;
 155
 156		instr++;
 157
 158		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
 159			break;
 160	}
 161	return prefetch;
 162}
 163
 164static void
 165force_sig_info_fault(int si_signo, int si_code, unsigned long address,
 166		     struct task_struct *tsk, int fault)
 167{
 168	unsigned lsb = 0;
 169	siginfo_t info;
 170
 171	info.si_signo	= si_signo;
 172	info.si_errno	= 0;
 173	info.si_code	= si_code;
 174	info.si_addr	= (void __user *)address;
 175	if (fault & VM_FAULT_HWPOISON_LARGE)
 176		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
 177	if (fault & VM_FAULT_HWPOISON)
 178		lsb = PAGE_SHIFT;
 179	info.si_addr_lsb = lsb;
 180
 181	force_sig_info(si_signo, &info, tsk);
 182}
 183
 184DEFINE_SPINLOCK(pgd_lock);
 185LIST_HEAD(pgd_list);
 186
 187#ifdef CONFIG_X86_32
 188static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 189{
 190	unsigned index = pgd_index(address);
 191	pgd_t *pgd_k;
 
 192	pud_t *pud, *pud_k;
 193	pmd_t *pmd, *pmd_k;
 194
 195	pgd += index;
 196	pgd_k = init_mm.pgd + index;
 197
 198	if (!pgd_present(*pgd_k))
 199		return NULL;
 200
 201	/*
 202	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
 203	 * and redundant with the set_pmd() on non-PAE. As would
 204	 * set_pud.
 205	 */
 206	pud = pud_offset(pgd, address);
 207	pud_k = pud_offset(pgd_k, address);
 
 
 
 
 
 208	if (!pud_present(*pud_k))
 209		return NULL;
 210
 211	pmd = pmd_offset(pud, address);
 212	pmd_k = pmd_offset(pud_k, address);
 213	if (!pmd_present(*pmd_k))
 214		return NULL;
 215
 216	if (!pmd_present(*pmd))
 217		set_pmd(pmd, *pmd_k);
 
 
 
 218	else
 219		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
 220
 221	return pmd_k;
 222}
 223
 224void vmalloc_sync_all(void)
 225{
 226	unsigned long address;
 227
 228	if (SHARED_KERNEL_PMD)
 229		return;
 230
 231	for (address = VMALLOC_START & PMD_MASK;
 232	     address >= TASK_SIZE && address < FIXADDR_TOP;
 233	     address += PMD_SIZE) {
 234		struct page *page;
 235
 236		spin_lock(&pgd_lock);
 237		list_for_each_entry(page, &pgd_list, lru) {
 238			spinlock_t *pgt_lock;
 239			pmd_t *ret;
 240
 241			/* the pgt_lock only for Xen */
 242			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 243
 244			spin_lock(pgt_lock);
 245			ret = vmalloc_sync_one(page_address(page), address);
 246			spin_unlock(pgt_lock);
 247
 248			if (!ret)
 249				break;
 250		}
 251		spin_unlock(&pgd_lock);
 252	}
 253}
 254
 255/*
 256 * 32-bit:
 257 *
 258 *   Handle a fault on the vmalloc or module mapping area
 
 
 
 
 
 
 
 
 
 
 
 259 */
 260static noinline __kprobes int vmalloc_fault(unsigned long address)
 261{
 262	unsigned long pgd_paddr;
 263	pmd_t *pmd_k;
 264	pte_t *pte_k;
 265
 266	/* Make sure we are in vmalloc area: */
 267	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 268		return -1;
 269
 270	WARN_ON_ONCE(in_nmi());
 271
 272	/*
 273	 * Synchronize this task's top level page-table
 274	 * with the 'reference' page table.
 275	 *
 276	 * Do _not_ use "current" here. We might be inside
 277	 * an interrupt in the middle of a task switch..
 278	 */
 279	pgd_paddr = read_cr3();
 280	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 281	if (!pmd_k)
 282		return -1;
 283
 
 
 
 284	pte_k = pte_offset_kernel(pmd_k, address);
 285	if (!pte_present(*pte_k))
 286		return -1;
 287
 288	return 0;
 289}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290
 291/*
 292 * Did it hit the DOS screen memory VA from vm86 mode?
 293 */
 294static inline void
 295check_v8086_mode(struct pt_regs *regs, unsigned long address,
 296		 struct task_struct *tsk)
 297{
 
 298	unsigned long bit;
 299
 300	if (!v8086_mode(regs))
 301		return;
 302
 303	bit = (address - 0xA0000) >> PAGE_SHIFT;
 304	if (bit < 32)
 305		tsk->thread.screen_bitmap |= 1 << bit;
 
 306}
 307
 308static bool low_pfn(unsigned long pfn)
 309{
 310	return pfn < max_low_pfn;
 311}
 312
 313static void dump_pagetable(unsigned long address)
 314{
 315	pgd_t *base = __va(read_cr3());
 316	pgd_t *pgd = &base[pgd_index(address)];
 
 
 317	pmd_t *pmd;
 318	pte_t *pte;
 319
 320#ifdef CONFIG_X86_PAE
 321	printk("*pdpt = %016Lx ", pgd_val(*pgd));
 322	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
 323		goto out;
 
 
 
 324#endif
 325	pmd = pmd_offset(pud_offset(pgd, address), address);
 326	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
 
 
 
 327
 328	/*
 329	 * We must not directly access the pte in the highpte
 330	 * case if the page table is located in highmem.
 331	 * And let's rather not kmap-atomic the pte, just in case
 332	 * it's allocated already:
 333	 */
 334	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
 335		goto out;
 336
 337	pte = pte_offset_kernel(pmd, address);
 338	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
 339out:
 340	printk("\n");
 341}
 342
 343#else /* CONFIG_X86_64: */
 344
 345void vmalloc_sync_all(void)
 346{
 347	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 348}
 349
 350/*
 351 * 64-bit:
 352 *
 353 *   Handle a fault on the vmalloc area
 354 *
 355 * This assumes no large pages in there.
 356 */
 357static noinline __kprobes int vmalloc_fault(unsigned long address)
 358{
 359	pgd_t *pgd, *pgd_ref;
 360	pud_t *pud, *pud_ref;
 361	pmd_t *pmd, *pmd_ref;
 362	pte_t *pte, *pte_ref;
 363
 364	/* Make sure we are in vmalloc area: */
 365	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 366		return -1;
 367
 368	WARN_ON_ONCE(in_nmi());
 369
 370	/*
 371	 * Copy kernel mappings over when needed. This can also
 372	 * happen within a race in page table update. In the later
 373	 * case just flush:
 374	 */
 375	pgd = pgd_offset(current->active_mm, address);
 376	pgd_ref = pgd_offset_k(address);
 377	if (pgd_none(*pgd_ref))
 378		return -1;
 379
 380	if (pgd_none(*pgd))
 381		set_pgd(pgd, *pgd_ref);
 382	else
 383		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 384
 385	/*
 386	 * Below here mismatches are bugs because these lower tables
 387	 * are shared:
 388	 */
 389
 390	pud = pud_offset(pgd, address);
 391	pud_ref = pud_offset(pgd_ref, address);
 392	if (pud_none(*pud_ref))
 393		return -1;
 394
 395	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
 396		BUG();
 397
 398	pmd = pmd_offset(pud, address);
 399	pmd_ref = pmd_offset(pud_ref, address);
 400	if (pmd_none(*pmd_ref))
 401		return -1;
 402
 403	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
 404		BUG();
 405
 406	pte_ref = pte_offset_kernel(pmd_ref, address);
 407	if (!pte_present(*pte_ref))
 408		return -1;
 409
 410	pte = pte_offset_kernel(pmd, address);
 411
 412	/*
 413	 * Don't use pte_page here, because the mappings can point
 414	 * outside mem_map, and the NUMA hash lookup cannot handle
 415	 * that:
 416	 */
 417	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
 418		BUG();
 419
 420	return 0;
 421}
 422
 423static const char errata93_warning[] =
 424KERN_ERR 
 425"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
 426"******* Working around it, but it may cause SEGVs or burn power.\n"
 427"******* Please consider a BIOS update.\n"
 428"******* Disabling USB legacy in the BIOS may also help.\n";
 
 429
 430/*
 431 * No vm86 mode in 64-bit mode:
 432 */
 433static inline void
 434check_v8086_mode(struct pt_regs *regs, unsigned long address,
 435		 struct task_struct *tsk)
 436{
 437}
 438
 439static int bad_address(void *p)
 440{
 441	unsigned long dummy;
 442
 443	return probe_kernel_address((unsigned long *)p, dummy);
 444}
 445
 446static void dump_pagetable(unsigned long address)
 447{
 448	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
 449	pgd_t *pgd = base + pgd_index(address);
 
 450	pud_t *pud;
 451	pmd_t *pmd;
 452	pte_t *pte;
 453
 454	if (bad_address(pgd))
 455		goto bad;
 456
 457	printk("PGD %lx ", pgd_val(*pgd));
 458
 459	if (!pgd_present(*pgd))
 460		goto out;
 461
 462	pud = pud_offset(pgd, address);
 
 
 
 
 
 
 
 
 463	if (bad_address(pud))
 464		goto bad;
 465
 466	printk("PUD %lx ", pud_val(*pud));
 467	if (!pud_present(*pud) || pud_large(*pud))
 468		goto out;
 469
 470	pmd = pmd_offset(pud, address);
 471	if (bad_address(pmd))
 472		goto bad;
 473
 474	printk("PMD %lx ", pmd_val(*pmd));
 475	if (!pmd_present(*pmd) || pmd_large(*pmd))
 476		goto out;
 477
 478	pte = pte_offset_kernel(pmd, address);
 479	if (bad_address(pte))
 480		goto bad;
 481
 482	printk("PTE %lx", pte_val(*pte));
 483out:
 484	printk("\n");
 485	return;
 486bad:
 487	printk("BAD\n");
 488}
 489
 490#endif /* CONFIG_X86_64 */
 491
 492/*
 493 * Workaround for K8 erratum #93 & buggy BIOS.
 494 *
 495 * BIOS SMM functions are required to use a specific workaround
 496 * to avoid corruption of the 64bit RIP register on C stepping K8.
 497 *
 498 * A lot of BIOS that didn't get tested properly miss this.
 499 *
 500 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 501 * Try to work around it here.
 502 *
 503 * Note we only handle faults in kernel here.
 504 * Does nothing on 32-bit.
 505 */
 506static int is_errata93(struct pt_regs *regs, unsigned long address)
 507{
 508#ifdef CONFIG_X86_64
 
 
 
 
 509	if (address != regs->ip)
 510		return 0;
 511
 512	if ((address >> 32) != 0)
 513		return 0;
 514
 515	address |= 0xffffffffUL << 32;
 516	if ((address >= (u64)_stext && address <= (u64)_etext) ||
 517	    (address >= MODULES_VADDR && address <= MODULES_END)) {
 518		printk_once(errata93_warning);
 519		regs->ip = address;
 520		return 1;
 521	}
 522#endif
 523	return 0;
 524}
 525
 526/*
 527 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 528 * to illegal addresses >4GB.
 529 *
 530 * We catch this in the page fault handler because these addresses
 531 * are not reachable. Just detect this case and return.  Any code
 532 * segment in LDT is compatibility mode.
 533 */
 534static int is_errata100(struct pt_regs *regs, unsigned long address)
 535{
 536#ifdef CONFIG_X86_64
 537	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
 538		return 1;
 539#endif
 540	return 0;
 541}
 542
 
 543static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
 544{
 545#ifdef CONFIG_X86_F00F_BUG
 546	unsigned long nr;
 547
 548	/*
 549	 * Pentium F0 0F C7 C8 bug workaround:
 550	 */
 551	if (boot_cpu_data.f00f_bug) {
 552		nr = (address - idt_descr.address) >> 3;
 553
 554		if (nr == 6) {
 555			do_invalid_op(regs, 0);
 556			return 1;
 557		}
 558	}
 559#endif
 560	return 0;
 561}
 562
 563static const char nx_warning[] = KERN_CRIT
 564"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565
 566static void
 567show_fault_oops(struct pt_regs *regs, unsigned long error_code,
 568		unsigned long address)
 569{
 570	if (!oops_may_print())
 571		return;
 572
 573	if (error_code & PF_INSTR) {
 574		unsigned int level;
 
 
 
 
 
 575
 576		pte_t *pte = lookup_address(address, &level);
 577
 578		if (pte && pte_present(*pte) && !pte_exec(*pte))
 579			printk(nx_warning, current_uid());
 
 
 
 
 
 
 580	}
 581
 582	printk(KERN_ALERT "BUG: unable to handle kernel ");
 583	if (address < PAGE_SIZE)
 584		printk(KERN_CONT "NULL pointer dereference");
 585	else
 586		printk(KERN_CONT "paging request");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 587
 588	printk(KERN_CONT " at %p\n", (void *) address);
 589	printk(KERN_ALERT "IP:");
 590	printk_address(regs->ip, 1);
 
 
 
 
 
 
 
 
 
 591
 592	dump_pagetable(address);
 593}
 594
 595static noinline void
 596pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 597	    unsigned long address)
 598{
 599	struct task_struct *tsk;
 600	unsigned long flags;
 601	int sig;
 602
 603	flags = oops_begin();
 604	tsk = current;
 605	sig = SIGKILL;
 606
 607	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
 608	       tsk->comm, address);
 609	dump_pagetable(address);
 610
 611	tsk->thread.cr2		= address;
 612	tsk->thread.trap_no	= 14;
 613	tsk->thread.error_code	= error_code;
 614
 615	if (__die("Bad pagetable", regs, error_code))
 616		sig = 0;
 617
 618	oops_end(flags, regs, sig);
 619}
 620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 621static noinline void
 622no_context(struct pt_regs *regs, unsigned long error_code,
 623	   unsigned long address)
 624{
 625	struct task_struct *tsk = current;
 626	unsigned long *stackend;
 627	unsigned long flags;
 628	int sig;
 629
 
 
 
 
 
 
 
 
 
 630	/* Are we prepared to handle this kernel fault? */
 631	if (fixup_exception(regs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 632		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633
 634	/*
 635	 * 32-bit:
 636	 *
 637	 *   Valid to do another page fault here, because if this fault
 638	 *   had been triggered by is_prefetch fixup_exception would have
 639	 *   handled it.
 640	 *
 641	 * 64-bit:
 642	 *
 643	 *   Hall of shame of CPU/BIOS bugs.
 644	 */
 645	if (is_prefetch(regs, error_code, address))
 646		return;
 647
 648	if (is_errata93(regs, address))
 649		return;
 650
 651	/*
 
 
 
 
 
 
 
 
 652	 * Oops. The kernel tried to access some bad page. We'll have to
 653	 * terminate things with extreme prejudice:
 654	 */
 655	flags = oops_begin();
 656
 657	show_fault_oops(regs, error_code, address);
 658
 659	stackend = end_of_stack(tsk);
 660	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
 661		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
 662
 663	tsk->thread.cr2		= address;
 664	tsk->thread.trap_no	= 14;
 665	tsk->thread.error_code	= error_code;
 666
 667	sig = SIGKILL;
 668	if (__die("Oops", regs, error_code))
 669		sig = 0;
 670
 671	/* Executive summary in case the body of the oops scrolled away */
 672	printk(KERN_EMERG "CR2: %016lx\n", address);
 673
 674	oops_end(flags, regs, sig);
 675}
 676
 677/*
 678 * Print out info about fatal segfaults, if the show_unhandled_signals
 679 * sysctl is set:
 680 */
 681static inline void
 682show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 683		unsigned long address, struct task_struct *tsk)
 684{
 
 
 685	if (!unhandled_signal(tsk, SIGSEGV))
 686		return;
 687
 688	if (!printk_ratelimit())
 689		return;
 690
 691	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
 692		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 693		tsk->comm, task_pid_nr(tsk), address,
 694		(void *)regs->ip, (void *)regs->sp, error_code);
 695
 696	print_vma_addr(KERN_CONT " in ", regs->ip);
 697
 698	printk(KERN_CONT "\n");
 
 
 
 
 
 
 
 
 
 
 
 699}
 700
 701static void
 702__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 703		       unsigned long address, int si_code)
 704{
 705	struct task_struct *tsk = current;
 706
 707	/* User mode accesses just cause a SIGSEGV */
 708	if (error_code & PF_USER) {
 709		/*
 710		 * It's possible to have interrupts off here:
 711		 */
 712		local_irq_enable();
 713
 714		/*
 715		 * Valid to do another page fault here because this one came
 716		 * from user space:
 717		 */
 718		if (is_prefetch(regs, error_code, address))
 719			return;
 720
 721		if (is_errata100(regs, address))
 722			return;
 723
 724#ifdef CONFIG_X86_64
 725		/*
 726		 * Instruction fetch faults in the vsyscall page might need
 727		 * emulation.
 
 728		 */
 729		if (unlikely((error_code & PF_INSTR) &&
 730			     ((address & ~0xfff) == VSYSCALL_START))) {
 731			if (emulate_vsyscall(regs, address))
 732				return;
 733		}
 734#endif
 735
 736		if (unlikely(show_unhandled_signals))
 737			show_signal_msg(regs, error_code, address, tsk);
 738
 739		/* Kernel addresses are always protection faults: */
 740		tsk->thread.cr2		= address;
 741		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
 742		tsk->thread.trap_no	= 14;
 743
 744		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
 
 
 745
 746		return;
 747	}
 748
 749	if (is_f00f_bug(regs, address))
 750		return;
 751
 752	no_context(regs, error_code, address);
 753}
 754
 755static noinline void
 756bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 757		     unsigned long address)
 758{
 759	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
 760}
 761
 762static void
 763__bad_area(struct pt_regs *regs, unsigned long error_code,
 764	   unsigned long address, int si_code)
 765{
 766	struct mm_struct *mm = current->mm;
 767
 768	/*
 769	 * Something tried to access memory that isn't in our memory map..
 770	 * Fix it, but check if it's kernel or user first..
 771	 */
 772	up_read(&mm->mmap_sem);
 773
 774	__bad_area_nosemaphore(regs, error_code, address, si_code);
 775}
 776
 777static noinline void
 778bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 779{
 780	__bad_area(regs, error_code, address, SEGV_MAPERR);
 781}
 782
 783static noinline void
 784bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 785		      unsigned long address)
 786{
 787	__bad_area(regs, error_code, address, SEGV_ACCERR);
 
 
 
 
 
 
 
 
 
 
 
 788}
 789
 790/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
 791static void
 792out_of_memory(struct pt_regs *regs, unsigned long error_code,
 793	      unsigned long address)
 794{
 795	/*
 796	 * We ran out of memory, call the OOM killer, and return the userspace
 797	 * (which will retry the fault, or kill us if we got oom-killed):
 
 798	 */
 799	up_read(&current->mm->mmap_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 800
 801	pagefault_out_of_memory();
 
 
 
 802}
 803
 804static void
 805do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 806	  unsigned int fault)
 807{
 808	struct task_struct *tsk = current;
 809	struct mm_struct *mm = tsk->mm;
 810	int code = BUS_ADRERR;
 811
 812	up_read(&mm->mmap_sem);
 813
 814	/* Kernel mode? Handle exceptions or die: */
 815	if (!(error_code & PF_USER)) {
 816		no_context(regs, error_code, address);
 817		return;
 818	}
 819
 820	/* User-space => ok to do another page fault: */
 821	if (is_prefetch(regs, error_code, address))
 822		return;
 823
 824	tsk->thread.cr2		= address;
 825	tsk->thread.error_code	= error_code;
 826	tsk->thread.trap_no	= 14;
 827
 828#ifdef CONFIG_MEMORY_FAILURE
 829	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 830		printk(KERN_ERR
 
 
 
 831	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 832			tsk->comm, tsk->pid, address);
 833		code = BUS_MCEERR_AR;
 
 
 
 
 
 834	}
 835#endif
 836	force_sig_info_fault(SIGBUS, code, address, tsk, fault);
 837}
 838
 839static noinline int
 840mm_fault_error(struct pt_regs *regs, unsigned long error_code,
 841	       unsigned long address, unsigned int fault)
 842{
 843	/*
 844	 * Pagefault was interrupted by SIGKILL. We have no reason to
 845	 * continue pagefault.
 846	 */
 847	if (fatal_signal_pending(current)) {
 848		if (!(fault & VM_FAULT_RETRY))
 849			up_read(&current->mm->mmap_sem);
 850		if (!(error_code & PF_USER))
 851			no_context(regs, error_code, address);
 852		return 1;
 853	}
 854	if (!(fault & VM_FAULT_ERROR))
 855		return 0;
 856
 857	if (fault & VM_FAULT_OOM) {
 858		/* Kernel mode? Handle exceptions or die: */
 859		if (!(error_code & PF_USER)) {
 860			up_read(&current->mm->mmap_sem);
 861			no_context(regs, error_code, address);
 862			return 1;
 863		}
 864
 865		out_of_memory(regs, error_code, address);
 
 
 
 
 
 866	} else {
 867		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
 868			     VM_FAULT_HWPOISON_LARGE))
 869			do_sigbus(regs, error_code, address, fault);
 
 
 870		else
 871			BUG();
 872	}
 873	return 1;
 874}
 875
 876static int spurious_fault_check(unsigned long error_code, pte_t *pte)
 877{
 878	if ((error_code & PF_WRITE) && !pte_write(*pte))
 879		return 0;
 880
 881	if ((error_code & PF_INSTR) && !pte_exec(*pte))
 882		return 0;
 883
 884	return 1;
 885}
 886
 887/*
 888 * Handle a spurious fault caused by a stale TLB entry.
 889 *
 890 * This allows us to lazily refresh the TLB when increasing the
 891 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 892 * eagerly is very expensive since that implies doing a full
 893 * cross-processor TLB flush, even if no stale TLB entries exist
 894 * on other processors.
 895 *
 
 
 
 
 896 * There are no security implications to leaving a stale TLB when
 897 * increasing the permissions on a page.
 
 
 
 
 
 898 */
 899static noinline __kprobes int
 900spurious_fault(unsigned long error_code, unsigned long address)
 901{
 902	pgd_t *pgd;
 
 903	pud_t *pud;
 904	pmd_t *pmd;
 905	pte_t *pte;
 906	int ret;
 907
 908	/* Reserved-bit violation or user access to kernel space? */
 909	if (error_code & (PF_USER | PF_RSVD))
 
 
 
 
 
 
 
 
 
 910		return 0;
 911
 912	pgd = init_mm.pgd + pgd_index(address);
 913	if (!pgd_present(*pgd))
 914		return 0;
 915
 916	pud = pud_offset(pgd, address);
 
 
 
 
 
 
 
 917	if (!pud_present(*pud))
 918		return 0;
 919
 920	if (pud_large(*pud))
 921		return spurious_fault_check(error_code, (pte_t *) pud);
 922
 923	pmd = pmd_offset(pud, address);
 924	if (!pmd_present(*pmd))
 925		return 0;
 926
 927	if (pmd_large(*pmd))
 928		return spurious_fault_check(error_code, (pte_t *) pmd);
 929
 930	/*
 931	 * Note: don't use pte_present() here, since it returns true
 932	 * if the _PAGE_PROTNONE bit is set.  However, this aliases the
 933	 * _PAGE_GLOBAL bit, which for kernel pages give false positives
 934	 * when CONFIG_DEBUG_PAGEALLOC is used.
 935	 */
 936	pte = pte_offset_kernel(pmd, address);
 937	if (!(pte_flags(*pte) & _PAGE_PRESENT))
 938		return 0;
 939
 940	ret = spurious_fault_check(error_code, pte);
 941	if (!ret)
 942		return 0;
 943
 944	/*
 945	 * Make sure we have permissions in PMD.
 946	 * If not, then there's a bug in the page tables:
 947	 */
 948	ret = spurious_fault_check(error_code, (pte_t *) pmd);
 949	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
 950
 951	return ret;
 952}
 
 953
 954int show_unhandled_signals = 1;
 955
 956static inline int
 957access_error(unsigned long error_code, struct vm_area_struct *vma)
 958{
 959	if (error_code & PF_WRITE) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960		/* write, present and write, not present: */
 961		if (unlikely(!(vma->vm_flags & VM_WRITE)))
 962			return 1;
 963		return 0;
 964	}
 965
 966	/* read, present: */
 967	if (unlikely(error_code & PF_PROT))
 968		return 1;
 969
 970	/* read, not present: */
 971	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
 972		return 1;
 973
 974	return 0;
 975}
 976
 977static int fault_in_kernel_space(unsigned long address)
 978{
 
 
 
 
 
 
 
 
 979	return address >= TASK_SIZE_MAX;
 980}
 981
 982/*
 983 * This routine handles page faults.  It determines the address,
 984 * and the problem, and then passes it off to one of the appropriate
 985 * routines.
 986 */
 987dotraplinkage void __kprobes
 988do_page_fault(struct pt_regs *regs, unsigned long error_code)
 
 989{
 990	struct vm_area_struct *vma;
 991	struct task_struct *tsk;
 992	unsigned long address;
 993	struct mm_struct *mm;
 994	int fault;
 995	int write = error_code & PF_WRITE;
 996	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
 997					(write ? FAULT_FLAG_WRITE : 0);
 998
 999	tsk = current;
1000	mm = tsk->mm;
1001
1002	/* Get the faulting address: */
1003	address = read_cr2();
1004
1005	/*
1006	 * Detect and handle instructions that would cause a page fault for
1007	 * both a tracked kernel page and a userspace page.
 
1008	 */
1009	if (kmemcheck_active(regs))
1010		kmemcheck_hide(regs);
1011	prefetchw(&mm->mmap_sem);
1012
1013	if (unlikely(kmmio_fault(regs, address)))
1014		return;
1015
 
1016	/*
1017	 * We fault-in kernel-space virtual memory on-demand. The
1018	 * 'reference' page table is init_mm.pgd.
1019	 *
1020	 * NOTE! We MUST NOT take any locks for this case. We may
1021	 * be in an interrupt or a critical region, and should
1022	 * only copy the information from the master page table,
1023	 * nothing more.
1024	 *
1025	 * This verifies that the fault happens in kernel space
1026	 * (error_code & 4) == 0, and that the fault was not a
1027	 * protection error (error_code & 9) == 0.
 
 
 
 
 
 
 
 
 
 
 
1028	 */
1029	if (unlikely(fault_in_kernel_space(address))) {
1030		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
1031			if (vmalloc_fault(address) >= 0)
1032				return;
 
1033
1034			if (kmemcheck_fault(regs, address, error_code))
1035				return;
1036		}
1037
1038		/* Can handle a stale RO->RW TLB: */
1039		if (spurious_fault(error_code, address))
1040			return;
1041
1042		/* kprobes don't want to hook the spurious faults: */
1043		if (notify_page_fault(regs))
1044			return;
1045		/*
1046		 * Don't take the mm semaphore here. If we fixup a prefetch
1047		 * fault we could otherwise deadlock:
1048		 */
1049		bad_area_nosemaphore(regs, error_code, address);
 
 
 
1050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1051		return;
1052	}
1053
1054	/* kprobes don't want to hook the spurious faults: */
1055	if (unlikely(notify_page_fault(regs)))
 
 
 
 
1056		return;
 
 
1057	/*
1058	 * It's safe to allow irq's after cr2 has been saved and the
1059	 * vmalloc fault has been handled.
1060	 *
1061	 * User-mode registers count as a user access even for any
1062	 * potential system fault or CPU buglet:
1063	 */
1064	if (user_mode_vm(regs)) {
1065		local_irq_enable();
1066		error_code |= PF_USER;
1067	} else {
1068		if (regs->flags & X86_EFLAGS_IF)
1069			local_irq_enable();
1070	}
1071
1072	if (unlikely(error_code & PF_RSVD))
1073		pgtable_bad(regs, error_code, address);
1074
1075	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1076
 
 
 
 
 
 
1077	/*
1078	 * If we're in an interrupt, have no user context or are running
1079	 * in an atomic region then we must not take the fault:
 
 
 
 
 
 
 
1080	 */
1081	if (unlikely(in_atomic() || !mm)) {
1082		bad_area_nosemaphore(regs, error_code, address);
1083		return;
1084	}
 
1085
1086	/*
1087	 * When running in the kernel we expect faults to occur only to
1088	 * addresses in user space.  All other faults represent errors in
1089	 * the kernel and should generate an OOPS.  Unfortunately, in the
1090	 * case of an erroneous fault occurring in a code path which already
1091	 * holds mmap_sem we will deadlock attempting to validate the fault
1092	 * against the address space.  Luckily the kernel only validly
1093	 * references user space from well defined areas of code, which are
1094	 * listed in the exceptions table.
1095	 *
1096	 * As the vast majority of faults will be valid we will only perform
1097	 * the source reference check when there is a possibility of a
1098	 * deadlock. Attempt to lock the address space, if we cannot we then
1099	 * validate the source. If this is invalid we can skip the address
1100	 * space check, thus avoiding the deadlock:
1101	 */
1102	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1103		if ((error_code & PF_USER) == 0 &&
1104		    !search_exception_tables(regs->ip)) {
1105			bad_area_nosemaphore(regs, error_code, address);
 
 
1106			return;
1107		}
1108retry:
1109		down_read(&mm->mmap_sem);
1110	} else {
1111		/*
1112		 * The above down_read_trylock() might have succeeded in
1113		 * which case we'll have missed the might_sleep() from
1114		 * down_read():
1115		 */
1116		might_sleep();
1117	}
1118
1119	vma = find_vma(mm, address);
1120	if (unlikely(!vma)) {
1121		bad_area(regs, error_code, address);
1122		return;
1123	}
1124	if (likely(vma->vm_start <= address))
1125		goto good_area;
1126	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1127		bad_area(regs, error_code, address);
1128		return;
1129	}
1130	if (error_code & PF_USER) {
1131		/*
1132		 * Accessing the stack below %sp is always a bug.
1133		 * The large cushion allows instructions like enter
1134		 * and pusha to work. ("enter $65535, $31" pushes
1135		 * 32 pointers and then decrements %sp by 65535.)
1136		 */
1137		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
1138			bad_area(regs, error_code, address);
1139			return;
1140		}
1141	}
1142	if (unlikely(expand_stack(vma, address))) {
1143		bad_area(regs, error_code, address);
1144		return;
1145	}
1146
1147	/*
1148	 * Ok, we have a good vm_area for this memory access, so
1149	 * we can handle it..
1150	 */
1151good_area:
1152	if (unlikely(access_error(error_code, vma))) {
1153		bad_area_access_error(regs, error_code, address);
1154		return;
1155	}
1156
1157	/*
1158	 * If for any reason at all we couldn't handle the fault,
1159	 * make sure we exit gracefully rather than endlessly redo
1160	 * the fault:
1161	 */
1162	fault = handle_mm_fault(mm, vma, address, flags);
1163
1164	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
1165		if (mm_fault_error(regs, error_code, address, fault))
1166			return;
 
 
 
 
 
 
 
 
 
 
 
1167	}
1168
1169	/*
1170	 * Major/minor page fault accounting is only done on the
1171	 * initial attempt. If we go through a retry, it is extremely
1172	 * likely that the page will be found in page cache at that point.
1173	 */
1174	if (flags & FAULT_FLAG_ALLOW_RETRY) {
1175		if (fault & VM_FAULT_MAJOR) {
1176			tsk->maj_flt++;
1177			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
1178				      regs, address);
1179		} else {
1180			tsk->min_flt++;
1181			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
1182				      regs, address);
1183		}
1184		if (fault & VM_FAULT_RETRY) {
1185			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
1186			 * of starvation. */
1187			flags &= ~FAULT_FLAG_ALLOW_RETRY;
1188			goto retry;
1189		}
1190	}
1191
1192	check_v8086_mode(regs, address, tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193
1194	up_read(&mm->mmap_sem);
1195}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright (C) 1995  Linus Torvalds
   4 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   5 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   6 */
 
   7#include <linux/sched.h>		/* test_thread_flag(), ...	*/
   8#include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
   9#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
  10#include <linux/extable.h>		/* search_exception_tables	*/
  11#include <linux/memblock.h>		/* max_low_pfn			*/
  12#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
  13#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
  14#include <linux/perf_event.h>		/* perf_sw_event		*/
  15#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
  16#include <linux/prefetch.h>		/* prefetchw			*/
  17#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
  18#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
  19#include <linux/efi.h>			/* efi_recover_from_page_fault()*/
  20#include <linux/mm_types.h>
  21
  22#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
  23#include <asm/traps.h>			/* dotraplinkage, ...		*/
  24#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
  25#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
  26#include <asm/vm86.h>			/* struct vm86			*/
  27#include <asm/mmu_context.h>		/* vma_pkey()			*/
  28#include <asm/efi.h>			/* efi_recover_from_page_fault()*/
  29#include <asm/desc.h>			/* store_idt(), ...		*/
  30#include <asm/cpu_entry_area.h>		/* exception stack		*/
  31#include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
  32#include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
  33
  34#define CREATE_TRACE_POINTS
  35#include <asm/trace/exceptions.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36
  37/*
  38 * Returns 0 if mmiotrace is disabled, or if the fault is not
  39 * handled by mmiotrace:
  40 */
  41static nokprobe_inline int
  42kmmio_fault(struct pt_regs *regs, unsigned long addr)
  43{
  44	if (unlikely(is_kmmio_active()))
  45		if (kmmio_handler(regs, addr) == 1)
  46			return -1;
  47	return 0;
  48}
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50/*
  51 * Prefetch quirks:
  52 *
  53 * 32-bit mode:
  54 *
  55 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  56 *   Check that here and ignore it.
  57 *
  58 * 64-bit mode:
  59 *
  60 *   Sometimes the CPU reports invalid exceptions on prefetch.
  61 *   Check that here and ignore it.
  62 *
  63 * Opcode checker based on code by Richard Brunner.
  64 */
  65static inline int
  66check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  67		      unsigned char opcode, int *prefetch)
  68{
  69	unsigned char instr_hi = opcode & 0xf0;
  70	unsigned char instr_lo = opcode & 0x0f;
  71
  72	switch (instr_hi) {
  73	case 0x20:
  74	case 0x30:
  75		/*
  76		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  77		 * In X86_64 long mode, the CPU will signal invalid
  78		 * opcode if some of these prefixes are present so
  79		 * X86_64 will never get here anyway
  80		 */
  81		return ((instr_lo & 7) == 0x6);
  82#ifdef CONFIG_X86_64
  83	case 0x40:
  84		/*
  85		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  86		 * Need to figure out under what instruction mode the
  87		 * instruction was issued. Could check the LDT for lm,
  88		 * but for now it's good enough to assume that long
  89		 * mode only uses well known segments or kernel.
  90		 */
  91		return (!user_mode(regs) || user_64bit_mode(regs));
  92#endif
  93	case 0x60:
  94		/* 0x64 thru 0x67 are valid prefixes in all modes. */
  95		return (instr_lo & 0xC) == 0x4;
  96	case 0xF0:
  97		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  98		return !instr_lo || (instr_lo>>1) == 1;
  99	case 0x00:
 100		/* Prefetch instruction is 0x0F0D or 0x0F18 */
 101		if (get_kernel_nofault(opcode, instr))
 102			return 0;
 103
 104		*prefetch = (instr_lo == 0xF) &&
 105			(opcode == 0x0D || opcode == 0x18);
 106		return 0;
 107	default:
 108		return 0;
 109	}
 110}
 111
 112static int
 113is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 114{
 115	unsigned char *max_instr;
 116	unsigned char *instr;
 117	int prefetch = 0;
 118
 119	/*
 120	 * If it was a exec (instruction fetch) fault on NX page, then
 121	 * do not ignore the fault:
 122	 */
 123	if (error_code & X86_PF_INSTR)
 124		return 0;
 125
 126	instr = (void *)convert_ip_to_linear(current, regs);
 127	max_instr = instr + 15;
 128
 129	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
 130		return 0;
 131
 132	while (instr < max_instr) {
 133		unsigned char opcode;
 134
 135		if (get_kernel_nofault(opcode, instr))
 136			break;
 137
 138		instr++;
 139
 140		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
 141			break;
 142	}
 143	return prefetch;
 144}
 145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146DEFINE_SPINLOCK(pgd_lock);
 147LIST_HEAD(pgd_list);
 148
 149#ifdef CONFIG_X86_32
 150static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 151{
 152	unsigned index = pgd_index(address);
 153	pgd_t *pgd_k;
 154	p4d_t *p4d, *p4d_k;
 155	pud_t *pud, *pud_k;
 156	pmd_t *pmd, *pmd_k;
 157
 158	pgd += index;
 159	pgd_k = init_mm.pgd + index;
 160
 161	if (!pgd_present(*pgd_k))
 162		return NULL;
 163
 164	/*
 165	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
 166	 * and redundant with the set_pmd() on non-PAE. As would
 167	 * set_p4d/set_pud.
 168	 */
 169	p4d = p4d_offset(pgd, address);
 170	p4d_k = p4d_offset(pgd_k, address);
 171	if (!p4d_present(*p4d_k))
 172		return NULL;
 173
 174	pud = pud_offset(p4d, address);
 175	pud_k = pud_offset(p4d_k, address);
 176	if (!pud_present(*pud_k))
 177		return NULL;
 178
 179	pmd = pmd_offset(pud, address);
 180	pmd_k = pmd_offset(pud_k, address);
 
 
 181
 182	if (pmd_present(*pmd) != pmd_present(*pmd_k))
 183		set_pmd(pmd, *pmd_k);
 184
 185	if (!pmd_present(*pmd_k))
 186		return NULL;
 187	else
 188		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 189
 190	return pmd_k;
 191}
 192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193/*
 
 
 194 *   Handle a fault on the vmalloc or module mapping area
 195 *
 196 *   This is needed because there is a race condition between the time
 197 *   when the vmalloc mapping code updates the PMD to the point in time
 198 *   where it synchronizes this update with the other page-tables in the
 199 *   system.
 200 *
 201 *   In this race window another thread/CPU can map an area on the same
 202 *   PMD, finds it already present and does not synchronize it with the
 203 *   rest of the system yet. As a result v[mz]alloc might return areas
 204 *   which are not mapped in every page-table in the system, causing an
 205 *   unhandled page-fault when they are accessed.
 206 */
 207static noinline int vmalloc_fault(unsigned long address)
 208{
 209	unsigned long pgd_paddr;
 210	pmd_t *pmd_k;
 211	pte_t *pte_k;
 212
 213	/* Make sure we are in vmalloc area: */
 214	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 215		return -1;
 216
 
 
 217	/*
 218	 * Synchronize this task's top level page-table
 219	 * with the 'reference' page table.
 220	 *
 221	 * Do _not_ use "current" here. We might be inside
 222	 * an interrupt in the middle of a task switch..
 223	 */
 224	pgd_paddr = read_cr3_pa();
 225	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 226	if (!pmd_k)
 227		return -1;
 228
 229	if (pmd_large(*pmd_k))
 230		return 0;
 231
 232	pte_k = pte_offset_kernel(pmd_k, address);
 233	if (!pte_present(*pte_k))
 234		return -1;
 235
 236	return 0;
 237}
 238NOKPROBE_SYMBOL(vmalloc_fault);
 239
 240void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
 241{
 242	unsigned long addr;
 243
 244	for (addr = start & PMD_MASK;
 245	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
 246	     addr += PMD_SIZE) {
 247		struct page *page;
 248
 249		spin_lock(&pgd_lock);
 250		list_for_each_entry(page, &pgd_list, lru) {
 251			spinlock_t *pgt_lock;
 252
 253			/* the pgt_lock only for Xen */
 254			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 255
 256			spin_lock(pgt_lock);
 257			vmalloc_sync_one(page_address(page), addr);
 258			spin_unlock(pgt_lock);
 259		}
 260		spin_unlock(&pgd_lock);
 261	}
 262}
 263
 264/*
 265 * Did it hit the DOS screen memory VA from vm86 mode?
 266 */
 267static inline void
 268check_v8086_mode(struct pt_regs *regs, unsigned long address,
 269		 struct task_struct *tsk)
 270{
 271#ifdef CONFIG_VM86
 272	unsigned long bit;
 273
 274	if (!v8086_mode(regs) || !tsk->thread.vm86)
 275		return;
 276
 277	bit = (address - 0xA0000) >> PAGE_SHIFT;
 278	if (bit < 32)
 279		tsk->thread.vm86->screen_bitmap |= 1 << bit;
 280#endif
 281}
 282
 283static bool low_pfn(unsigned long pfn)
 284{
 285	return pfn < max_low_pfn;
 286}
 287
 288static void dump_pagetable(unsigned long address)
 289{
 290	pgd_t *base = __va(read_cr3_pa());
 291	pgd_t *pgd = &base[pgd_index(address)];
 292	p4d_t *p4d;
 293	pud_t *pud;
 294	pmd_t *pmd;
 295	pte_t *pte;
 296
 297#ifdef CONFIG_X86_PAE
 298	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
 299	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
 300		goto out;
 301#define pr_pde pr_cont
 302#else
 303#define pr_pde pr_info
 304#endif
 305	p4d = p4d_offset(pgd, address);
 306	pud = pud_offset(p4d, address);
 307	pmd = pmd_offset(pud, address);
 308	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
 309#undef pr_pde
 310
 311	/*
 312	 * We must not directly access the pte in the highpte
 313	 * case if the page table is located in highmem.
 314	 * And let's rather not kmap-atomic the pte, just in case
 315	 * it's allocated already:
 316	 */
 317	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
 318		goto out;
 319
 320	pte = pte_offset_kernel(pmd, address);
 321	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
 322out:
 323	pr_cont("\n");
 324}
 325
 326#else /* CONFIG_X86_64: */
 327
 328#ifdef CONFIG_CPU_SUP_AMD
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329static const char errata93_warning[] =
 330KERN_ERR 
 331"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
 332"******* Working around it, but it may cause SEGVs or burn power.\n"
 333"******* Please consider a BIOS update.\n"
 334"******* Disabling USB legacy in the BIOS may also help.\n";
 335#endif
 336
 337/*
 338 * No vm86 mode in 64-bit mode:
 339 */
 340static inline void
 341check_v8086_mode(struct pt_regs *regs, unsigned long address,
 342		 struct task_struct *tsk)
 343{
 344}
 345
 346static int bad_address(void *p)
 347{
 348	unsigned long dummy;
 349
 350	return get_kernel_nofault(dummy, (unsigned long *)p);
 351}
 352
 353static void dump_pagetable(unsigned long address)
 354{
 355	pgd_t *base = __va(read_cr3_pa());
 356	pgd_t *pgd = base + pgd_index(address);
 357	p4d_t *p4d;
 358	pud_t *pud;
 359	pmd_t *pmd;
 360	pte_t *pte;
 361
 362	if (bad_address(pgd))
 363		goto bad;
 364
 365	pr_info("PGD %lx ", pgd_val(*pgd));
 366
 367	if (!pgd_present(*pgd))
 368		goto out;
 369
 370	p4d = p4d_offset(pgd, address);
 371	if (bad_address(p4d))
 372		goto bad;
 373
 374	pr_cont("P4D %lx ", p4d_val(*p4d));
 375	if (!p4d_present(*p4d) || p4d_large(*p4d))
 376		goto out;
 377
 378	pud = pud_offset(p4d, address);
 379	if (bad_address(pud))
 380		goto bad;
 381
 382	pr_cont("PUD %lx ", pud_val(*pud));
 383	if (!pud_present(*pud) || pud_large(*pud))
 384		goto out;
 385
 386	pmd = pmd_offset(pud, address);
 387	if (bad_address(pmd))
 388		goto bad;
 389
 390	pr_cont("PMD %lx ", pmd_val(*pmd));
 391	if (!pmd_present(*pmd) || pmd_large(*pmd))
 392		goto out;
 393
 394	pte = pte_offset_kernel(pmd, address);
 395	if (bad_address(pte))
 396		goto bad;
 397
 398	pr_cont("PTE %lx", pte_val(*pte));
 399out:
 400	pr_cont("\n");
 401	return;
 402bad:
 403	pr_info("BAD\n");
 404}
 405
 406#endif /* CONFIG_X86_64 */
 407
 408/*
 409 * Workaround for K8 erratum #93 & buggy BIOS.
 410 *
 411 * BIOS SMM functions are required to use a specific workaround
 412 * to avoid corruption of the 64bit RIP register on C stepping K8.
 413 *
 414 * A lot of BIOS that didn't get tested properly miss this.
 415 *
 416 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 417 * Try to work around it here.
 418 *
 419 * Note we only handle faults in kernel here.
 420 * Does nothing on 32-bit.
 421 */
 422static int is_errata93(struct pt_regs *regs, unsigned long address)
 423{
 424#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
 425	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
 426	    || boot_cpu_data.x86 != 0xf)
 427		return 0;
 428
 429	if (address != regs->ip)
 430		return 0;
 431
 432	if ((address >> 32) != 0)
 433		return 0;
 434
 435	address |= 0xffffffffUL << 32;
 436	if ((address >= (u64)_stext && address <= (u64)_etext) ||
 437	    (address >= MODULES_VADDR && address <= MODULES_END)) {
 438		printk_once(errata93_warning);
 439		regs->ip = address;
 440		return 1;
 441	}
 442#endif
 443	return 0;
 444}
 445
 446/*
 447 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 448 * to illegal addresses >4GB.
 449 *
 450 * We catch this in the page fault handler because these addresses
 451 * are not reachable. Just detect this case and return.  Any code
 452 * segment in LDT is compatibility mode.
 453 */
 454static int is_errata100(struct pt_regs *regs, unsigned long address)
 455{
 456#ifdef CONFIG_X86_64
 457	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
 458		return 1;
 459#endif
 460	return 0;
 461}
 462
 463/* Pentium F0 0F C7 C8 bug workaround: */
 464static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
 465{
 466#ifdef CONFIG_X86_F00F_BUG
 467	if (boot_cpu_has_bug(X86_BUG_F00F) && idt_is_f00f_address(address)) {
 468		handle_invalid_op(regs);
 469		return 1;
 
 
 
 
 
 
 
 
 
 470	}
 471#endif
 472	return 0;
 473}
 474
 475static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
 476{
 477	u32 offset = (index >> 3) * sizeof(struct desc_struct);
 478	unsigned long addr;
 479	struct ldttss_desc desc;
 480
 481	if (index == 0) {
 482		pr_alert("%s: NULL\n", name);
 483		return;
 484	}
 485
 486	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
 487		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
 488		return;
 489	}
 490
 491	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
 492			      sizeof(struct ldttss_desc))) {
 493		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
 494			 name, index);
 495		return;
 496	}
 497
 498	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
 499#ifdef CONFIG_X86_64
 500	addr |= ((u64)desc.base3 << 32);
 501#endif
 502	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
 503		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
 504}
 505
 506static void
 507show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 
 508{
 509	if (!oops_may_print())
 510		return;
 511
 512	if (error_code & X86_PF_INSTR) {
 513		unsigned int level;
 514		pgd_t *pgd;
 515		pte_t *pte;
 516
 517		pgd = __va(read_cr3_pa());
 518		pgd += pgd_index(address);
 519
 520		pte = lookup_address_in_pgd(pgd, address, &level);
 521
 522		if (pte && pte_present(*pte) && !pte_exec(*pte))
 523			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
 524				from_kuid(&init_user_ns, current_uid()));
 525		if (pte && pte_present(*pte) && pte_exec(*pte) &&
 526				(pgd_flags(*pgd) & _PAGE_USER) &&
 527				(__read_cr4() & X86_CR4_SMEP))
 528			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
 529				from_kuid(&init_user_ns, current_uid()));
 530	}
 531
 532	if (address < PAGE_SIZE && !user_mode(regs))
 533		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
 534			(void *)address);
 535	else
 536		pr_alert("BUG: unable to handle page fault for address: %px\n",
 537			(void *)address);
 538
 539	pr_alert("#PF: %s %s in %s mode\n",
 540		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
 541		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
 542		 (error_code & X86_PF_WRITE) ? "write access" :
 543					       "read access",
 544			     user_mode(regs) ? "user" : "kernel");
 545	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
 546		 !(error_code & X86_PF_PROT) ? "not-present page" :
 547		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
 548		 (error_code & X86_PF_PK)    ? "protection keys violation" :
 549					       "permissions violation");
 550
 551	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
 552		struct desc_ptr idt, gdt;
 553		u16 ldtr, tr;
 554
 555		/*
 556		 * This can happen for quite a few reasons.  The more obvious
 557		 * ones are faults accessing the GDT, or LDT.  Perhaps
 558		 * surprisingly, if the CPU tries to deliver a benign or
 559		 * contributory exception from user code and gets a page fault
 560		 * during delivery, the page fault can be delivered as though
 561		 * it originated directly from user code.  This could happen
 562		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
 563		 * kernel or IST stack.
 564		 */
 565		store_idt(&idt);
 566
 567		/* Usable even on Xen PV -- it's just slow. */
 568		native_store_gdt(&gdt);
 569
 570		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
 571			 idt.address, idt.size, gdt.address, gdt.size);
 572
 573		store_ldt(ldtr);
 574		show_ldttss(&gdt, "LDTR", ldtr);
 575
 576		store_tr(tr);
 577		show_ldttss(&gdt, "TR", tr);
 578	}
 579
 580	dump_pagetable(address);
 581}
 582
 583static noinline void
 584pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 585	    unsigned long address)
 586{
 587	struct task_struct *tsk;
 588	unsigned long flags;
 589	int sig;
 590
 591	flags = oops_begin();
 592	tsk = current;
 593	sig = SIGKILL;
 594
 595	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
 596	       tsk->comm, address);
 597	dump_pagetable(address);
 598
 
 
 
 
 599	if (__die("Bad pagetable", regs, error_code))
 600		sig = 0;
 601
 602	oops_end(flags, regs, sig);
 603}
 604
 605static void set_signal_archinfo(unsigned long address,
 606				unsigned long error_code)
 607{
 608	struct task_struct *tsk = current;
 609
 610	/*
 611	 * To avoid leaking information about the kernel page
 612	 * table layout, pretend that user-mode accesses to
 613	 * kernel addresses are always protection faults.
 614	 *
 615	 * NB: This means that failed vsyscalls with vsyscall=none
 616	 * will have the PROT bit.  This doesn't leak any
 617	 * information and does not appear to cause any problems.
 618	 */
 619	if (address >= TASK_SIZE_MAX)
 620		error_code |= X86_PF_PROT;
 621
 622	tsk->thread.trap_nr = X86_TRAP_PF;
 623	tsk->thread.error_code = error_code | X86_PF_USER;
 624	tsk->thread.cr2 = address;
 625}
 626
 627static noinline void
 628no_context(struct pt_regs *regs, unsigned long error_code,
 629	   unsigned long address, int signal, int si_code)
 630{
 631	struct task_struct *tsk = current;
 
 632	unsigned long flags;
 633	int sig;
 634
 635	if (user_mode(regs)) {
 636		/*
 637		 * This is an implicit supervisor-mode access from user
 638		 * mode.  Bypass all the kernel-mode recovery code and just
 639		 * OOPS.
 640		 */
 641		goto oops;
 642	}
 643
 644	/* Are we prepared to handle this kernel fault? */
 645	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
 646		/*
 647		 * Any interrupt that takes a fault gets the fixup. This makes
 648		 * the below recursive fault logic only apply to a faults from
 649		 * task context.
 650		 */
 651		if (in_interrupt())
 652			return;
 653
 654		/*
 655		 * Per the above we're !in_interrupt(), aka. task context.
 656		 *
 657		 * In this case we need to make sure we're not recursively
 658		 * faulting through the emulate_vsyscall() logic.
 659		 */
 660		if (current->thread.sig_on_uaccess_err && signal) {
 661			set_signal_archinfo(address, error_code);
 662
 663			/* XXX: hwpoison faults will set the wrong code. */
 664			force_sig_fault(signal, si_code, (void __user *)address);
 665		}
 666
 667		/*
 668		 * Barring that, we can do the fixup and be happy.
 669		 */
 670		return;
 671	}
 672
 673#ifdef CONFIG_VMAP_STACK
 674	/*
 675	 * Stack overflow?  During boot, we can fault near the initial
 676	 * stack in the direct map, but that's not an overflow -- check
 677	 * that we're in vmalloc space to avoid this.
 678	 */
 679	if (is_vmalloc_addr((void *)address) &&
 680	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
 681	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
 682		unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
 683		/*
 684		 * We're likely to be running with very little stack space
 685		 * left.  It's plausible that we'd hit this condition but
 686		 * double-fault even before we get this far, in which case
 687		 * we're fine: the double-fault handler will deal with it.
 688		 *
 689		 * We don't want to make it all the way into the oops code
 690		 * and then double-fault, though, because we're likely to
 691		 * break the console driver and lose most of the stack dump.
 692		 */
 693		asm volatile ("movq %[stack], %%rsp\n\t"
 694			      "call handle_stack_overflow\n\t"
 695			      "1: jmp 1b"
 696			      : ASM_CALL_CONSTRAINT
 697			      : "D" ("kernel stack overflow (page fault)"),
 698				"S" (regs), "d" (address),
 699				[stack] "rm" (stack));
 700		unreachable();
 701	}
 702#endif
 703
 704	/*
 705	 * 32-bit:
 706	 *
 707	 *   Valid to do another page fault here, because if this fault
 708	 *   had been triggered by is_prefetch fixup_exception would have
 709	 *   handled it.
 710	 *
 711	 * 64-bit:
 712	 *
 713	 *   Hall of shame of CPU/BIOS bugs.
 714	 */
 715	if (is_prefetch(regs, error_code, address))
 716		return;
 717
 718	if (is_errata93(regs, address))
 719		return;
 720
 721	/*
 722	 * Buggy firmware could access regions which might page fault, try to
 723	 * recover from such faults.
 724	 */
 725	if (IS_ENABLED(CONFIG_EFI))
 726		efi_recover_from_page_fault(address);
 727
 728oops:
 729	/*
 730	 * Oops. The kernel tried to access some bad page. We'll have to
 731	 * terminate things with extreme prejudice:
 732	 */
 733	flags = oops_begin();
 734
 735	show_fault_oops(regs, error_code, address);
 736
 737	if (task_stack_end_corrupted(tsk))
 738		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 
 
 
 
 
 739
 740	sig = SIGKILL;
 741	if (__die("Oops", regs, error_code))
 742		sig = 0;
 743
 744	/* Executive summary in case the body of the oops scrolled away */
 745	printk(KERN_DEFAULT "CR2: %016lx\n", address);
 746
 747	oops_end(flags, regs, sig);
 748}
 749
 750/*
 751 * Print out info about fatal segfaults, if the show_unhandled_signals
 752 * sysctl is set:
 753 */
 754static inline void
 755show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 756		unsigned long address, struct task_struct *tsk)
 757{
 758	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
 759
 760	if (!unhandled_signal(tsk, SIGSEGV))
 761		return;
 762
 763	if (!printk_ratelimit())
 764		return;
 765
 766	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
 767		loglvl, tsk->comm, task_pid_nr(tsk), address,
 
 768		(void *)regs->ip, (void *)regs->sp, error_code);
 769
 770	print_vma_addr(KERN_CONT " in ", regs->ip);
 771
 772	printk(KERN_CONT "\n");
 773
 774	show_opcodes(regs, loglvl);
 775}
 776
 777/*
 778 * The (legacy) vsyscall page is the long page in the kernel portion
 779 * of the address space that has user-accessible permissions.
 780 */
 781static bool is_vsyscall_vaddr(unsigned long vaddr)
 782{
 783	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
 784}
 785
 786static void
 787__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 788		       unsigned long address, u32 pkey, int si_code)
 789{
 790	struct task_struct *tsk = current;
 791
 792	/* User mode accesses just cause a SIGSEGV */
 793	if (user_mode(regs) && (error_code & X86_PF_USER)) {
 794		/*
 795		 * It's possible to have interrupts off here:
 796		 */
 797		local_irq_enable();
 798
 799		/*
 800		 * Valid to do another page fault here because this one came
 801		 * from user space:
 802		 */
 803		if (is_prefetch(regs, error_code, address))
 804			return;
 805
 806		if (is_errata100(regs, address))
 807			return;
 808
 
 809		/*
 810		 * To avoid leaking information about the kernel page table
 811		 * layout, pretend that user-mode accesses to kernel addresses
 812		 * are always protection faults.
 813		 */
 814		if (address >= TASK_SIZE_MAX)
 815			error_code |= X86_PF_PROT;
 
 
 
 
 816
 817		if (likely(show_unhandled_signals))
 818			show_signal_msg(regs, error_code, address, tsk);
 819
 820		set_signal_archinfo(address, error_code);
 821
 822		if (si_code == SEGV_PKUERR)
 823			force_sig_pkuerr((void __user *)address, pkey);
 824
 825		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 826
 827		local_irq_disable();
 828
 829		return;
 830	}
 831
 832	if (is_f00f_bug(regs, address))
 833		return;
 834
 835	no_context(regs, error_code, address, SIGSEGV, si_code);
 836}
 837
 838static noinline void
 839bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 840		     unsigned long address)
 841{
 842	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
 843}
 844
 845static void
 846__bad_area(struct pt_regs *regs, unsigned long error_code,
 847	   unsigned long address, u32 pkey, int si_code)
 848{
 849	struct mm_struct *mm = current->mm;
 
 850	/*
 851	 * Something tried to access memory that isn't in our memory map..
 852	 * Fix it, but check if it's kernel or user first..
 853	 */
 854	mmap_read_unlock(mm);
 855
 856	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
 857}
 858
 859static noinline void
 860bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 861{
 862	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
 863}
 864
 865static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 866		struct vm_area_struct *vma)
 
 867{
 868	/* This code is always called on the current mm */
 869	bool foreign = false;
 870
 871	if (!boot_cpu_has(X86_FEATURE_OSPKE))
 872		return false;
 873	if (error_code & X86_PF_PK)
 874		return true;
 875	/* this checks permission keys on the VMA: */
 876	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
 877				       (error_code & X86_PF_INSTR), foreign))
 878		return true;
 879	return false;
 880}
 881
 882static noinline void
 883bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 884		      unsigned long address, struct vm_area_struct *vma)
 
 885{
 886	/*
 887	 * This OSPKE check is not strictly necessary at runtime.
 888	 * But, doing it this way allows compiler optimizations
 889	 * if pkeys are compiled out.
 890	 */
 891	if (bad_area_access_from_pkeys(error_code, vma)) {
 892		/*
 893		 * A protection key fault means that the PKRU value did not allow
 894		 * access to some PTE.  Userspace can figure out what PKRU was
 895		 * from the XSAVE state.  This function captures the pkey from
 896		 * the vma and passes it to userspace so userspace can discover
 897		 * which protection key was set on the PTE.
 898		 *
 899		 * If we get here, we know that the hardware signaled a X86_PF_PK
 900		 * fault and that there was a VMA once we got in the fault
 901		 * handler.  It does *not* guarantee that the VMA we find here
 902		 * was the one that we faulted on.
 903		 *
 904		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 905		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 906		 * 3. T1   : faults...
 907		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 908		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
 909		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 910		 *	     faulted on a pte with its pkey=4.
 911		 */
 912		u32 pkey = vma_pkey(vma);
 913
 914		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
 915	} else {
 916		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
 917	}
 918}
 919
 920static void
 921do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 922	  vm_fault_t fault)
 923{
 
 
 
 
 
 
 924	/* Kernel mode? Handle exceptions or die: */
 925	if (!(error_code & X86_PF_USER)) {
 926		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
 927		return;
 928	}
 929
 930	/* User-space => ok to do another page fault: */
 931	if (is_prefetch(regs, error_code, address))
 932		return;
 933
 934	set_signal_archinfo(address, error_code);
 
 
 935
 936#ifdef CONFIG_MEMORY_FAILURE
 937	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 938		struct task_struct *tsk = current;
 939		unsigned lsb = 0;
 940
 941		pr_err(
 942	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 943			tsk->comm, tsk->pid, address);
 944		if (fault & VM_FAULT_HWPOISON_LARGE)
 945			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
 946		if (fault & VM_FAULT_HWPOISON)
 947			lsb = PAGE_SHIFT;
 948		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
 949		return;
 950	}
 951#endif
 952	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
 953}
 954
 955static noinline void
 956mm_fault_error(struct pt_regs *regs, unsigned long error_code,
 957	       unsigned long address, vm_fault_t fault)
 958{
 959	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
 960		no_context(regs, error_code, address, 0, 0);
 961		return;
 
 
 
 
 
 
 
 962	}
 
 
 963
 964	if (fault & VM_FAULT_OOM) {
 965		/* Kernel mode? Handle exceptions or die: */
 966		if (!(error_code & X86_PF_USER)) {
 967			no_context(regs, error_code, address,
 968				   SIGSEGV, SEGV_MAPERR);
 969			return;
 970		}
 971
 972		/*
 973		 * We ran out of memory, call the OOM killer, and return the
 974		 * userspace (which will retry the fault, or kill us if we got
 975		 * oom-killed):
 976		 */
 977		pagefault_out_of_memory();
 978	} else {
 979		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
 980			     VM_FAULT_HWPOISON_LARGE))
 981			do_sigbus(regs, error_code, address, fault);
 982		else if (fault & VM_FAULT_SIGSEGV)
 983			bad_area_nosemaphore(regs, error_code, address);
 984		else
 985			BUG();
 986	}
 
 987}
 988
 989static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
 990{
 991	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
 992		return 0;
 993
 994	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
 995		return 0;
 996
 997	return 1;
 998}
 999
1000/*
1001 * Handle a spurious fault caused by a stale TLB entry.
1002 *
1003 * This allows us to lazily refresh the TLB when increasing the
1004 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
1005 * eagerly is very expensive since that implies doing a full
1006 * cross-processor TLB flush, even if no stale TLB entries exist
1007 * on other processors.
1008 *
1009 * Spurious faults may only occur if the TLB contains an entry with
1010 * fewer permission than the page table entry.  Non-present (P = 0)
1011 * and reserved bit (R = 1) faults are never spurious.
1012 *
1013 * There are no security implications to leaving a stale TLB when
1014 * increasing the permissions on a page.
1015 *
1016 * Returns non-zero if a spurious fault was handled, zero otherwise.
1017 *
1018 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
1019 * (Optional Invalidation).
1020 */
1021static noinline int
1022spurious_kernel_fault(unsigned long error_code, unsigned long address)
1023{
1024	pgd_t *pgd;
1025	p4d_t *p4d;
1026	pud_t *pud;
1027	pmd_t *pmd;
1028	pte_t *pte;
1029	int ret;
1030
1031	/*
1032	 * Only writes to RO or instruction fetches from NX may cause
1033	 * spurious faults.
1034	 *
1035	 * These could be from user or supervisor accesses but the TLB
1036	 * is only lazily flushed after a kernel mapping protection
1037	 * change, so user accesses are not expected to cause spurious
1038	 * faults.
1039	 */
1040	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
1041	    error_code != (X86_PF_INSTR | X86_PF_PROT))
1042		return 0;
1043
1044	pgd = init_mm.pgd + pgd_index(address);
1045	if (!pgd_present(*pgd))
1046		return 0;
1047
1048	p4d = p4d_offset(pgd, address);
1049	if (!p4d_present(*p4d))
1050		return 0;
1051
1052	if (p4d_large(*p4d))
1053		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1054
1055	pud = pud_offset(p4d, address);
1056	if (!pud_present(*pud))
1057		return 0;
1058
1059	if (pud_large(*pud))
1060		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1061
1062	pmd = pmd_offset(pud, address);
1063	if (!pmd_present(*pmd))
1064		return 0;
1065
1066	if (pmd_large(*pmd))
1067		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1068
 
 
 
 
 
 
1069	pte = pte_offset_kernel(pmd, address);
1070	if (!pte_present(*pte))
1071		return 0;
1072
1073	ret = spurious_kernel_fault_check(error_code, pte);
1074	if (!ret)
1075		return 0;
1076
1077	/*
1078	 * Make sure we have permissions in PMD.
1079	 * If not, then there's a bug in the page tables:
1080	 */
1081	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1082	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1083
1084	return ret;
1085}
1086NOKPROBE_SYMBOL(spurious_kernel_fault);
1087
1088int show_unhandled_signals = 1;
1089
1090static inline int
1091access_error(unsigned long error_code, struct vm_area_struct *vma)
1092{
1093	/* This is only called for the current mm, so: */
1094	bool foreign = false;
1095
1096	/*
1097	 * Read or write was blocked by protection keys.  This is
1098	 * always an unconditional error and can never result in
1099	 * a follow-up action to resolve the fault, like a COW.
1100	 */
1101	if (error_code & X86_PF_PK)
1102		return 1;
1103
1104	/*
1105	 * Make sure to check the VMA so that we do not perform
1106	 * faults just to hit a X86_PF_PK as soon as we fill in a
1107	 * page.
1108	 */
1109	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
1110				       (error_code & X86_PF_INSTR), foreign))
1111		return 1;
1112
1113	if (error_code & X86_PF_WRITE) {
1114		/* write, present and write, not present: */
1115		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1116			return 1;
1117		return 0;
1118	}
1119
1120	/* read, present: */
1121	if (unlikely(error_code & X86_PF_PROT))
1122		return 1;
1123
1124	/* read, not present: */
1125	if (unlikely(!vma_is_accessible(vma)))
1126		return 1;
1127
1128	return 0;
1129}
1130
1131static int fault_in_kernel_space(unsigned long address)
1132{
1133	/*
1134	 * On 64-bit systems, the vsyscall page is at an address above
1135	 * TASK_SIZE_MAX, but is not considered part of the kernel
1136	 * address space.
1137	 */
1138	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
1139		return false;
1140
1141	return address >= TASK_SIZE_MAX;
1142}
1143
1144/*
1145 * Called for all faults where 'address' is part of the kernel address
1146 * space.  Might get called for faults that originate from *code* that
1147 * ran in userspace or the kernel.
1148 */
1149static void
1150do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
1151		   unsigned long address)
1152{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153	/*
1154	 * Protection keys exceptions only happen on user pages.  We
1155	 * have no user pages in the kernel portion of the address
1156	 * space, so do not expect them here.
1157	 */
1158	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
 
 
 
 
 
1159
1160#ifdef CONFIG_X86_32
1161	/*
1162	 * We can fault-in kernel-space virtual memory on-demand. The
1163	 * 'reference' page table is init_mm.pgd.
1164	 *
1165	 * NOTE! We MUST NOT take any locks for this case. We may
1166	 * be in an interrupt or a critical region, and should
1167	 * only copy the information from the master page table,
1168	 * nothing more.
1169	 *
1170	 * Before doing this on-demand faulting, ensure that the
1171	 * fault is not any of the following:
1172	 * 1. A fault on a PTE with a reserved bit set.
1173	 * 2. A fault caused by a user-mode access.  (Do not demand-
1174	 *    fault kernel memory due to user-mode accesses).
1175	 * 3. A fault caused by a page-level protection violation.
1176	 *    (A demand fault would be on a non-present page which
1177	 *     would have X86_PF_PROT==0).
1178	 *
1179	 * This is only needed to close a race condition on x86-32 in
1180	 * the vmalloc mapping/unmapping code. See the comment above
1181	 * vmalloc_fault() for details. On x86-64 the race does not
1182	 * exist as the vmalloc mappings don't need to be synchronized
1183	 * there.
1184	 */
1185	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
1186		if (vmalloc_fault(address) >= 0)
1187			return;
1188	}
1189#endif
1190
1191	/* Was the fault spurious, caused by lazy TLB invalidation? */
1192	if (spurious_kernel_fault(hw_error_code, address))
1193		return;
1194
1195	/* kprobes don't want to hook the spurious faults: */
1196	if (kprobe_page_fault(regs, X86_TRAP_PF))
1197		return;
1198
1199	/*
1200	 * Note, despite being a "bad area", there are quite a few
1201	 * acceptable reasons to get here, such as erratum fixups
1202	 * and handling kernel code that can fault, like get_user().
1203	 *
1204	 * Don't take the mm semaphore here. If we fixup a prefetch
1205	 * fault we could otherwise deadlock:
1206	 */
1207	bad_area_nosemaphore(regs, hw_error_code, address);
1208}
1209NOKPROBE_SYMBOL(do_kern_addr_fault);
1210
1211/* Handle faults in the user portion of the address space */
1212static inline
1213void do_user_addr_fault(struct pt_regs *regs,
1214			unsigned long hw_error_code,
1215			unsigned long address)
1216{
1217	struct vm_area_struct *vma;
1218	struct task_struct *tsk;
1219	struct mm_struct *mm;
1220	vm_fault_t fault;
1221	unsigned int flags = FAULT_FLAG_DEFAULT;
1222
1223	tsk = current;
1224	mm = tsk->mm;
1225
1226	/* kprobes don't want to hook the spurious faults: */
1227	if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
1228		return;
1229
1230	/*
1231	 * Reserved bits are never expected to be set on
1232	 * entries in the user portion of the page tables.
1233	 */
1234	if (unlikely(hw_error_code & X86_PF_RSVD))
1235		pgtable_bad(regs, hw_error_code, address);
1236
1237	/*
1238	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1239	 * pages in the user address space.  The odd case here is WRUSS,
1240	 * which, according to the preliminary documentation, does not respect
1241	 * SMAP and will have the USER bit set so, in all cases, SMAP
1242	 * enforcement appears to be consistent with the USER bit.
1243	 */
1244	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1245		     !(hw_error_code & X86_PF_USER) &&
1246		     !(regs->flags & X86_EFLAGS_AC)))
1247	{
1248		bad_area_nosemaphore(regs, hw_error_code, address);
1249		return;
1250	}
1251
1252	/*
1253	 * If we're in an interrupt, have no user context or are running
1254	 * in a region with pagefaults disabled then we must not take the fault
1255	 */
1256	if (unlikely(faulthandler_disabled() || !mm)) {
1257		bad_area_nosemaphore(regs, hw_error_code, address);
1258		return;
1259	}
1260
1261	/*
1262	 * It's safe to allow irq's after cr2 has been saved and the
1263	 * vmalloc fault has been handled.
1264	 *
1265	 * User-mode registers count as a user access even for any
1266	 * potential system fault or CPU buglet:
1267	 */
1268	if (user_mode(regs)) {
1269		local_irq_enable();
1270		flags |= FAULT_FLAG_USER;
1271	} else {
1272		if (regs->flags & X86_EFLAGS_IF)
1273			local_irq_enable();
1274	}
1275
 
 
 
1276	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1277
1278	if (hw_error_code & X86_PF_WRITE)
1279		flags |= FAULT_FLAG_WRITE;
1280	if (hw_error_code & X86_PF_INSTR)
1281		flags |= FAULT_FLAG_INSTRUCTION;
1282
1283#ifdef CONFIG_X86_64
1284	/*
1285	 * Faults in the vsyscall page might need emulation.  The
1286	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1287	 * considered to be part of the user address space.
1288	 *
1289	 * The vsyscall page does not have a "real" VMA, so do this
1290	 * emulation before we go searching for VMAs.
1291	 *
1292	 * PKRU never rejects instruction fetches, so we don't need
1293	 * to consider the PF_PK bit.
1294	 */
1295	if (is_vsyscall_vaddr(address)) {
1296		if (emulate_vsyscall(hw_error_code, regs, address))
1297			return;
1298	}
1299#endif
1300
1301	/*
1302	 * Kernel-mode access to the user address space should only occur
1303	 * on well-defined single instructions listed in the exception
1304	 * tables.  But, an erroneous kernel fault occurring outside one of
1305	 * those areas which also holds mmap_lock might deadlock attempting
1306	 * to validate the fault against the address space.
 
 
 
1307	 *
1308	 * Only do the expensive exception table search when we might be at
1309	 * risk of a deadlock.  This happens if we
1310	 * 1. Failed to acquire mmap_lock, and
1311	 * 2. The access did not originate in userspace.
1312	 */
1313	if (unlikely(!mmap_read_trylock(mm))) {
1314		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
1315			/*
1316			 * Fault from code in kernel from
1317			 * which we do not expect faults.
1318			 */
1319			bad_area_nosemaphore(regs, hw_error_code, address);
1320			return;
1321		}
1322retry:
1323		mmap_read_lock(mm);
1324	} else {
1325		/*
1326		 * The above down_read_trylock() might have succeeded in
1327		 * which case we'll have missed the might_sleep() from
1328		 * down_read():
1329		 */
1330		might_sleep();
1331	}
1332
1333	vma = find_vma(mm, address);
1334	if (unlikely(!vma)) {
1335		bad_area(regs, hw_error_code, address);
1336		return;
1337	}
1338	if (likely(vma->vm_start <= address))
1339		goto good_area;
1340	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1341		bad_area(regs, hw_error_code, address);
1342		return;
1343	}
 
 
 
 
 
 
 
 
 
 
 
 
1344	if (unlikely(expand_stack(vma, address))) {
1345		bad_area(regs, hw_error_code, address);
1346		return;
1347	}
1348
1349	/*
1350	 * Ok, we have a good vm_area for this memory access, so
1351	 * we can handle it..
1352	 */
1353good_area:
1354	if (unlikely(access_error(hw_error_code, vma))) {
1355		bad_area_access_error(regs, hw_error_code, address, vma);
1356		return;
1357	}
1358
1359	/*
1360	 * If for any reason at all we couldn't handle the fault,
1361	 * make sure we exit gracefully rather than endlessly redo
1362	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1363	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1364	 *
1365	 * Note that handle_userfault() may also release and reacquire mmap_lock
1366	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1367	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1368	 * (potentially after handling any pending signal during the return to
1369	 * userland). The return to userland is identified whenever
1370	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1371	 */
1372	fault = handle_mm_fault(vma, address, flags, regs);
1373
1374	/* Quick path to respond to signals */
1375	if (fault_signal_pending(fault, regs)) {
1376		if (!user_mode(regs))
1377			no_context(regs, hw_error_code, address, SIGBUS,
1378				   BUS_ADRERR);
1379		return;
1380	}
1381
1382	/*
1383	 * If we need to retry the mmap_lock has already been released,
1384	 * and if there is a fatal signal pending there is no guarantee
1385	 * that we made any progress. Handle this case first.
1386	 */
1387	if (unlikely((fault & VM_FAULT_RETRY) &&
1388		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
1389		flags |= FAULT_FLAG_TRIED;
1390		goto retry;
1391	}
1392
1393	mmap_read_unlock(mm);
1394	if (unlikely(fault & VM_FAULT_ERROR)) {
1395		mm_fault_error(regs, hw_error_code, address, fault);
1396		return;
 
 
 
 
 
 
1397	}
1398
1399	check_v8086_mode(regs, address, tsk);
1400}
1401NOKPROBE_SYMBOL(do_user_addr_fault);
1402
1403static __always_inline void
1404trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1405			 unsigned long address)
1406{
1407	if (!trace_pagefault_enabled())
1408		return;
1409
1410	if (user_mode(regs))
1411		trace_page_fault_user(address, regs, error_code);
1412	else
1413		trace_page_fault_kernel(address, regs, error_code);
1414}
1415
1416static __always_inline void
1417handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1418			      unsigned long address)
1419{
1420	trace_page_fault_entries(regs, error_code, address);
1421
1422	if (unlikely(kmmio_fault(regs, address)))
1423		return;
1424
1425	/* Was the fault on kernel-controlled part of the address space? */
1426	if (unlikely(fault_in_kernel_space(address))) {
1427		do_kern_addr_fault(regs, error_code, address);
1428	} else {
1429		do_user_addr_fault(regs, error_code, address);
1430		/*
1431		 * User address page fault handling might have reenabled
1432		 * interrupts. Fixing up all potential exit points of
1433		 * do_user_addr_fault() and its leaf functions is just not
1434		 * doable w/o creating an unholy mess or turning the code
1435		 * upside down.
1436		 */
1437		local_irq_disable();
1438	}
1439}
1440
1441DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
1442{
1443	unsigned long address = read_cr2();
1444	irqentry_state_t state;
1445
1446	prefetchw(&current->mm->mmap_lock);
1447
1448	/*
1449	 * KVM has two types of events that are, logically, interrupts, but
1450	 * are unfortunately delivered using the #PF vector.  These events are
1451	 * "you just accessed valid memory, but the host doesn't have it right
1452	 * now, so I'll put you to sleep if you continue" and "that memory
1453	 * you tried to access earlier is available now."
1454	 *
1455	 * We are relying on the interrupted context being sane (valid RSP,
1456	 * relevant locks not held, etc.), which is fine as long as the
1457	 * interrupted context had IF=1.  We are also relying on the KVM
1458	 * async pf type field and CR2 being read consistently instead of
1459	 * getting values from real and async page faults mixed up.
1460	 *
1461	 * Fingers crossed.
1462	 *
1463	 * The async #PF handling code takes care of idtentry handling
1464	 * itself.
1465	 */
1466	if (kvm_handle_async_pf(regs, (u32)address))
1467		return;
1468
1469	/*
1470	 * Entry handling for valid #PF from kernel mode is slightly
1471	 * different: RCU is already watching and rcu_irq_enter() must not
1472	 * be invoked because a kernel fault on a user space address might
1473	 * sleep.
1474	 *
1475	 * In case the fault hit a RCU idle region the conditional entry
1476	 * code reenabled RCU to avoid subsequent wreckage which helps
1477	 * debugability.
1478	 */
1479	state = irqentry_enter(regs);
1480
1481	instrumentation_begin();
1482	handle_page_fault(regs, error_code, address);
1483	instrumentation_end();
1484
1485	irqentry_exit(regs, state);
1486}