Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *  Copyright (C) 1995  Linus Torvalds
   3 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   4 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   5 */
   6#include <linux/sched.h>		/* test_thread_flag(), ...	*/
 
   7#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
   8#include <linux/module.h>		/* search_exception_table	*/
   9#include <linux/bootmem.h>		/* max_low_pfn			*/
 
  10#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
  11#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
  12#include <linux/perf_event.h>		/* perf_sw_event		*/
  13#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
  14#include <linux/prefetch.h>		/* prefetchw			*/
  15#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
  16#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
 
 
 
  17
  18#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
  19#include <asm/traps.h>			/* dotraplinkage, ...		*/
  20#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
  21#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
  22#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
  23#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
  24#include <asm/vm86.h>			/* struct vm86			*/
  25#include <asm/mmu_context.h>		/* vma_pkey()			*/
 
 
 
 
 
 
 
 
 
  26
  27#define CREATE_TRACE_POINTS
  28#include <asm/trace/exceptions.h>
  29
  30/*
  31 * Page fault error code bits:
  32 *
  33 *   bit 0 ==	 0: no page found	1: protection fault
  34 *   bit 1 ==	 0: read access		1: write access
  35 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
  36 *   bit 3 ==				1: use of reserved bit detected
  37 *   bit 4 ==				1: fault was an instruction fetch
  38 *   bit 5 ==				1: protection keys block access
  39 */
  40enum x86_pf_error_code {
  41
  42	PF_PROT		=		1 << 0,
  43	PF_WRITE	=		1 << 1,
  44	PF_USER		=		1 << 2,
  45	PF_RSVD		=		1 << 3,
  46	PF_INSTR	=		1 << 4,
  47	PF_PK		=		1 << 5,
  48};
  49
  50/*
  51 * Returns 0 if mmiotrace is disabled, or if the fault is not
  52 * handled by mmiotrace:
  53 */
  54static nokprobe_inline int
  55kmmio_fault(struct pt_regs *regs, unsigned long addr)
  56{
  57	if (unlikely(is_kmmio_active()))
  58		if (kmmio_handler(regs, addr) == 1)
  59			return -1;
  60	return 0;
  61}
  62
  63static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
  64{
  65	int ret = 0;
  66
  67	/* kprobe_running() needs smp_processor_id() */
  68	if (kprobes_built_in() && !user_mode(regs)) {
  69		preempt_disable();
  70		if (kprobe_running() && kprobe_fault_handler(regs, 14))
  71			ret = 1;
  72		preempt_enable();
  73	}
  74
  75	return ret;
  76}
  77
  78/*
  79 * Prefetch quirks:
  80 *
  81 * 32-bit mode:
  82 *
  83 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  84 *   Check that here and ignore it.
  85 *
  86 * 64-bit mode:
  87 *
  88 *   Sometimes the CPU reports invalid exceptions on prefetch.
  89 *   Check that here and ignore it.
  90 *
  91 * Opcode checker based on code by Richard Brunner.
  92 */
  93static inline int
  94check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  95		      unsigned char opcode, int *prefetch)
  96{
  97	unsigned char instr_hi = opcode & 0xf0;
  98	unsigned char instr_lo = opcode & 0x0f;
  99
 100	switch (instr_hi) {
 101	case 0x20:
 102	case 0x30:
 103		/*
 104		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
 105		 * In X86_64 long mode, the CPU will signal invalid
 106		 * opcode if some of these prefixes are present so
 107		 * X86_64 will never get here anyway
 108		 */
 109		return ((instr_lo & 7) == 0x6);
 110#ifdef CONFIG_X86_64
 111	case 0x40:
 112		/*
 113		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
 114		 * Need to figure out under what instruction mode the
 115		 * instruction was issued. Could check the LDT for lm,
 116		 * but for now it's good enough to assume that long
 117		 * mode only uses well known segments or kernel.
 118		 */
 119		return (!user_mode(regs) || user_64bit_mode(regs));
 120#endif
 121	case 0x60:
 122		/* 0x64 thru 0x67 are valid prefixes in all modes. */
 123		return (instr_lo & 0xC) == 0x4;
 124	case 0xF0:
 125		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
 126		return !instr_lo || (instr_lo>>1) == 1;
 127	case 0x00:
 128		/* Prefetch instruction is 0x0F0D or 0x0F18 */
 129		if (probe_kernel_address(instr, opcode))
 130			return 0;
 131
 132		*prefetch = (instr_lo == 0xF) &&
 133			(opcode == 0x0D || opcode == 0x18);
 134		return 0;
 135	default:
 136		return 0;
 137	}
 138}
 139
 
 
 
 
 
 
 
 
 
 140static int
 141is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 142{
 143	unsigned char *max_instr;
 144	unsigned char *instr;
 145	int prefetch = 0;
 146
 
 
 
 
 147	/*
 148	 * If it was a exec (instruction fetch) fault on NX page, then
 149	 * do not ignore the fault:
 150	 */
 151	if (error_code & PF_INSTR)
 152		return 0;
 153
 154	instr = (void *)convert_ip_to_linear(current, regs);
 155	max_instr = instr + 15;
 156
 157	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
 158		return 0;
 
 
 
 
 159
 160	while (instr < max_instr) {
 161		unsigned char opcode;
 162
 163		if (probe_kernel_address(instr, opcode))
 164			break;
 
 
 
 
 
 165
 166		instr++;
 167
 168		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
 169			break;
 170	}
 171	return prefetch;
 172}
 173
 174/*
 175 * A protection key fault means that the PKRU value did not allow
 176 * access to some PTE.  Userspace can figure out what PKRU was
 177 * from the XSAVE state, and this function fills out a field in
 178 * siginfo so userspace can discover which protection key was set
 179 * on the PTE.
 180 *
 181 * If we get here, we know that the hardware signaled a PF_PK
 182 * fault and that there was a VMA once we got in the fault
 183 * handler.  It does *not* guarantee that the VMA we find here
 184 * was the one that we faulted on.
 185 *
 186 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 187 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 188 * 3. T1   : faults...
 189 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 190 * 5. T1   : enters fault handler, takes mmap_sem, etc...
 191 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 192 *	     faulted on a pte with its pkey=4.
 193 */
 194static void fill_sig_info_pkey(int si_code, siginfo_t *info,
 195		struct vm_area_struct *vma)
 196{
 197	/* This is effectively an #ifdef */
 198	if (!boot_cpu_has(X86_FEATURE_OSPKE))
 199		return;
 200
 201	/* Fault not from Protection Keys: nothing to do */
 202	if (si_code != SEGV_PKUERR)
 203		return;
 204	/*
 205	 * force_sig_info_fault() is called from a number of
 206	 * contexts, some of which have a VMA and some of which
 207	 * do not.  The PF_PK handing happens after we have a
 208	 * valid VMA, so we should never reach this without a
 209	 * valid VMA.
 210	 */
 211	if (!vma) {
 212		WARN_ONCE(1, "PKU fault with no VMA passed in");
 213		info->si_pkey = 0;
 214		return;
 215	}
 216	/*
 217	 * si_pkey should be thought of as a strong hint, but not
 218	 * absolutely guranteed to be 100% accurate because of
 219	 * the race explained above.
 220	 */
 221	info->si_pkey = vma_pkey(vma);
 222}
 223
 224static void
 225force_sig_info_fault(int si_signo, int si_code, unsigned long address,
 226		     struct task_struct *tsk, struct vm_area_struct *vma,
 227		     int fault)
 228{
 229	unsigned lsb = 0;
 230	siginfo_t info;
 231
 232	info.si_signo	= si_signo;
 233	info.si_errno	= 0;
 234	info.si_code	= si_code;
 235	info.si_addr	= (void __user *)address;
 236	if (fault & VM_FAULT_HWPOISON_LARGE)
 237		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
 238	if (fault & VM_FAULT_HWPOISON)
 239		lsb = PAGE_SHIFT;
 240	info.si_addr_lsb = lsb;
 241
 242	fill_sig_info_pkey(si_code, &info, vma);
 243
 244	force_sig_info(si_signo, &info, tsk);
 
 245}
 246
 247DEFINE_SPINLOCK(pgd_lock);
 248LIST_HEAD(pgd_list);
 249
 250#ifdef CONFIG_X86_32
 251static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 252{
 253	unsigned index = pgd_index(address);
 254	pgd_t *pgd_k;
 
 255	pud_t *pud, *pud_k;
 256	pmd_t *pmd, *pmd_k;
 257
 258	pgd += index;
 259	pgd_k = init_mm.pgd + index;
 260
 261	if (!pgd_present(*pgd_k))
 262		return NULL;
 263
 264	/*
 265	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
 266	 * and redundant with the set_pmd() on non-PAE. As would
 267	 * set_pud.
 268	 */
 269	pud = pud_offset(pgd, address);
 270	pud_k = pud_offset(pgd_k, address);
 
 
 
 
 
 271	if (!pud_present(*pud_k))
 272		return NULL;
 273
 274	pmd = pmd_offset(pud, address);
 275	pmd_k = pmd_offset(pud_k, address);
 276	if (!pmd_present(*pmd_k))
 277		return NULL;
 278
 279	if (!pmd_present(*pmd))
 280		set_pmd(pmd, *pmd_k);
 
 
 
 281	else
 282		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
 283
 284	return pmd_k;
 285}
 286
 287void vmalloc_sync_all(void)
 288{
 289	unsigned long address;
 290
 291	if (SHARED_KERNEL_PMD)
 292		return;
 293
 294	for (address = VMALLOC_START & PMD_MASK;
 295	     address >= TASK_SIZE && address < FIXADDR_TOP;
 296	     address += PMD_SIZE) {
 297		struct page *page;
 298
 299		spin_lock(&pgd_lock);
 300		list_for_each_entry(page, &pgd_list, lru) {
 301			spinlock_t *pgt_lock;
 302			pmd_t *ret;
 303
 304			/* the pgt_lock only for Xen */
 305			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 306
 307			spin_lock(pgt_lock);
 308			ret = vmalloc_sync_one(page_address(page), address);
 309			spin_unlock(pgt_lock);
 310
 311			if (!ret)
 312				break;
 313		}
 314		spin_unlock(&pgd_lock);
 315	}
 316}
 317
 318/*
 319 * 32-bit:
 320 *
 321 *   Handle a fault on the vmalloc or module mapping area
 
 
 
 
 
 
 
 
 
 
 
 322 */
 323static noinline int vmalloc_fault(unsigned long address)
 324{
 325	unsigned long pgd_paddr;
 326	pmd_t *pmd_k;
 327	pte_t *pte_k;
 328
 329	/* Make sure we are in vmalloc area: */
 330	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 331		return -1;
 332
 333	WARN_ON_ONCE(in_nmi());
 334
 335	/*
 336	 * Synchronize this task's top level page-table
 337	 * with the 'reference' page table.
 338	 *
 339	 * Do _not_ use "current" here. We might be inside
 340	 * an interrupt in the middle of a task switch..
 341	 */
 342	pgd_paddr = read_cr3();
 343	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 344	if (!pmd_k)
 345		return -1;
 346
 347	if (pmd_huge(*pmd_k))
 348		return 0;
 349
 350	pte_k = pte_offset_kernel(pmd_k, address);
 351	if (!pte_present(*pte_k))
 352		return -1;
 353
 354	return 0;
 355}
 356NOKPROBE_SYMBOL(vmalloc_fault);
 357
 358/*
 359 * Did it hit the DOS screen memory VA from vm86 mode?
 360 */
 361static inline void
 362check_v8086_mode(struct pt_regs *regs, unsigned long address,
 363		 struct task_struct *tsk)
 364{
 365#ifdef CONFIG_VM86
 366	unsigned long bit;
 367
 368	if (!v8086_mode(regs) || !tsk->thread.vm86)
 369		return;
 
 
 370
 371	bit = (address - 0xA0000) >> PAGE_SHIFT;
 372	if (bit < 32)
 373		tsk->thread.vm86->screen_bitmap |= 1 << bit;
 374#endif
 
 
 
 
 
 
 
 
 
 375}
 376
 377static bool low_pfn(unsigned long pfn)
 378{
 379	return pfn < max_low_pfn;
 380}
 381
 382static void dump_pagetable(unsigned long address)
 383{
 384	pgd_t *base = __va(read_cr3());
 385	pgd_t *pgd = &base[pgd_index(address)];
 
 
 386	pmd_t *pmd;
 387	pte_t *pte;
 388
 389#ifdef CONFIG_X86_PAE
 390	printk("*pdpt = %016Lx ", pgd_val(*pgd));
 391	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
 392		goto out;
 
 
 
 393#endif
 394	pmd = pmd_offset(pud_offset(pgd, address), address);
 395	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
 
 
 
 396
 397	/*
 398	 * We must not directly access the pte in the highpte
 399	 * case if the page table is located in highmem.
 400	 * And let's rather not kmap-atomic the pte, just in case
 401	 * it's allocated already:
 402	 */
 403	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
 404		goto out;
 405
 406	pte = pte_offset_kernel(pmd, address);
 407	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
 408out:
 409	printk("\n");
 410}
 411
 412#else /* CONFIG_X86_64: */
 413
 414void vmalloc_sync_all(void)
 415{
 416	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
 417}
 418
 419/*
 420 * 64-bit:
 421 *
 422 *   Handle a fault on the vmalloc area
 423 */
 424static noinline int vmalloc_fault(unsigned long address)
 425{
 426	pgd_t *pgd, *pgd_ref;
 427	pud_t *pud, *pud_ref;
 428	pmd_t *pmd, *pmd_ref;
 429	pte_t *pte, *pte_ref;
 430
 431	/* Make sure we are in vmalloc area: */
 432	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 433		return -1;
 434
 435	WARN_ON_ONCE(in_nmi());
 436
 437	/*
 438	 * Copy kernel mappings over when needed. This can also
 439	 * happen within a race in page table update. In the later
 440	 * case just flush:
 441	 */
 442	pgd = pgd_offset(current->active_mm, address);
 443	pgd_ref = pgd_offset_k(address);
 444	if (pgd_none(*pgd_ref))
 445		return -1;
 446
 447	if (pgd_none(*pgd)) {
 448		set_pgd(pgd, *pgd_ref);
 449		arch_flush_lazy_mmu_mode();
 450	} else {
 451		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 452	}
 453
 454	/*
 455	 * Below here mismatches are bugs because these lower tables
 456	 * are shared:
 457	 */
 458
 459	pud = pud_offset(pgd, address);
 460	pud_ref = pud_offset(pgd_ref, address);
 461	if (pud_none(*pud_ref))
 462		return -1;
 463
 464	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
 465		BUG();
 466
 467	if (pud_huge(*pud))
 468		return 0;
 469
 470	pmd = pmd_offset(pud, address);
 471	pmd_ref = pmd_offset(pud_ref, address);
 472	if (pmd_none(*pmd_ref))
 473		return -1;
 474
 475	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
 476		BUG();
 477
 478	if (pmd_huge(*pmd))
 479		return 0;
 480
 481	pte_ref = pte_offset_kernel(pmd_ref, address);
 482	if (!pte_present(*pte_ref))
 483		return -1;
 484
 485	pte = pte_offset_kernel(pmd, address);
 486
 487	/*
 488	 * Don't use pte_page here, because the mappings can point
 489	 * outside mem_map, and the NUMA hash lookup cannot handle
 490	 * that:
 491	 */
 492	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
 493		BUG();
 494
 495	return 0;
 496}
 497NOKPROBE_SYMBOL(vmalloc_fault);
 498
 499#ifdef CONFIG_CPU_SUP_AMD
 500static const char errata93_warning[] =
 501KERN_ERR 
 502"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
 503"******* Working around it, but it may cause SEGVs or burn power.\n"
 504"******* Please consider a BIOS update.\n"
 505"******* Disabling USB legacy in the BIOS may also help.\n";
 506#endif
 507
 508/*
 509 * No vm86 mode in 64-bit mode:
 510 */
 511static inline void
 512check_v8086_mode(struct pt_regs *regs, unsigned long address,
 513		 struct task_struct *tsk)
 514{
 515}
 516
 517static int bad_address(void *p)
 518{
 519	unsigned long dummy;
 520
 521	return probe_kernel_address((unsigned long *)p, dummy);
 522}
 523
 524static void dump_pagetable(unsigned long address)
 525{
 526	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
 527	pgd_t *pgd = base + pgd_index(address);
 
 528	pud_t *pud;
 529	pmd_t *pmd;
 530	pte_t *pte;
 531
 532	if (bad_address(pgd))
 533		goto bad;
 534
 535	printk("PGD %lx ", pgd_val(*pgd));
 536
 537	if (!pgd_present(*pgd))
 538		goto out;
 539
 540	pud = pud_offset(pgd, address);
 
 
 
 
 
 
 
 
 541	if (bad_address(pud))
 542		goto bad;
 543
 544	printk("PUD %lx ", pud_val(*pud));
 545	if (!pud_present(*pud) || pud_large(*pud))
 546		goto out;
 547
 548	pmd = pmd_offset(pud, address);
 549	if (bad_address(pmd))
 550		goto bad;
 551
 552	printk("PMD %lx ", pmd_val(*pmd));
 553	if (!pmd_present(*pmd) || pmd_large(*pmd))
 554		goto out;
 555
 556	pte = pte_offset_kernel(pmd, address);
 557	if (bad_address(pte))
 558		goto bad;
 559
 560	printk("PTE %lx", pte_val(*pte));
 561out:
 562	printk("\n");
 563	return;
 564bad:
 565	printk("BAD\n");
 566}
 567
 568#endif /* CONFIG_X86_64 */
 569
 570/*
 571 * Workaround for K8 erratum #93 & buggy BIOS.
 572 *
 573 * BIOS SMM functions are required to use a specific workaround
 574 * to avoid corruption of the 64bit RIP register on C stepping K8.
 575 *
 576 * A lot of BIOS that didn't get tested properly miss this.
 577 *
 578 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 579 * Try to work around it here.
 580 *
 581 * Note we only handle faults in kernel here.
 582 * Does nothing on 32-bit.
 583 */
 584static int is_errata93(struct pt_regs *regs, unsigned long address)
 585{
 586#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
 587	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
 588	    || boot_cpu_data.x86 != 0xf)
 589		return 0;
 590
 
 
 
 591	if (address != regs->ip)
 592		return 0;
 593
 594	if ((address >> 32) != 0)
 595		return 0;
 596
 597	address |= 0xffffffffUL << 32;
 598	if ((address >= (u64)_stext && address <= (u64)_etext) ||
 599	    (address >= MODULES_VADDR && address <= MODULES_END)) {
 600		printk_once(errata93_warning);
 601		regs->ip = address;
 602		return 1;
 603	}
 604#endif
 605	return 0;
 606}
 607
 608/*
 609 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 610 * to illegal addresses >4GB.
 611 *
 612 * We catch this in the page fault handler because these addresses
 613 * are not reachable. Just detect this case and return.  Any code
 614 * segment in LDT is compatibility mode.
 615 */
 616static int is_errata100(struct pt_regs *regs, unsigned long address)
 617{
 618#ifdef CONFIG_X86_64
 619	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
 620		return 1;
 621#endif
 622	return 0;
 623}
 624
 625static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
 
 
 626{
 627#ifdef CONFIG_X86_F00F_BUG
 628	unsigned long nr;
 629
 630	/*
 631	 * Pentium F0 0F C7 C8 bug workaround:
 632	 */
 633	if (boot_cpu_has_bug(X86_BUG_F00F)) {
 634		nr = (address - idt_descr.address) >> 3;
 635
 636		if (nr == 6) {
 637			do_invalid_op(regs, 0);
 638			return 1;
 639		}
 640	}
 641#endif
 642	return 0;
 643}
 644
 645static const char nx_warning[] = KERN_CRIT
 646"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
 647static const char smep_warning[] = KERN_CRIT
 648"unable to execute userspace code (SMEP?) (uid: %d)\n";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649
 650static void
 651show_fault_oops(struct pt_regs *regs, unsigned long error_code,
 652		unsigned long address)
 653{
 654	if (!oops_may_print())
 655		return;
 656
 657	if (error_code & PF_INSTR) {
 658		unsigned int level;
 659		pgd_t *pgd;
 660		pte_t *pte;
 661
 662		pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
 663		pgd += pgd_index(address);
 664
 665		pte = lookup_address_in_pgd(pgd, address, &level);
 666
 667		if (pte && pte_present(*pte) && !pte_exec(*pte))
 668			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
 
 669		if (pte && pte_present(*pte) && pte_exec(*pte) &&
 670				(pgd_flags(*pgd) & _PAGE_USER) &&
 671				(__read_cr4() & X86_CR4_SMEP))
 672			printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
 
 673	}
 674
 675	printk(KERN_ALERT "BUG: unable to handle kernel ");
 676	if (address < PAGE_SIZE)
 677		printk(KERN_CONT "NULL pointer dereference");
 678	else
 679		printk(KERN_CONT "paging request");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680
 681	printk(KERN_CONT " at %p\n", (void *) address);
 682	printk(KERN_ALERT "IP:");
 683	printk_address(regs->ip);
 
 
 
 684
 685	dump_pagetable(address);
 
 
 
 686}
 687
 688static noinline void
 689pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 690	    unsigned long address)
 691{
 692	struct task_struct *tsk;
 693	unsigned long flags;
 694	int sig;
 695
 696	flags = oops_begin();
 697	tsk = current;
 698	sig = SIGKILL;
 699
 700	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
 701	       tsk->comm, address);
 702	dump_pagetable(address);
 703
 704	tsk->thread.cr2		= address;
 705	tsk->thread.trap_nr	= X86_TRAP_PF;
 706	tsk->thread.error_code	= error_code;
 707
 708	if (__die("Bad pagetable", regs, error_code))
 709		sig = 0;
 710
 711	oops_end(flags, regs, sig);
 712}
 713
 714static noinline void
 715no_context(struct pt_regs *regs, unsigned long error_code,
 716	   unsigned long address, int signal, int si_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717{
 718	struct task_struct *tsk = current;
 
 
 
 
 
 
 
 
 
 
 
 
 
 719	unsigned long flags;
 720	int sig;
 721	/* No context means no VMA to pass down */
 722	struct vm_area_struct *vma = NULL;
 723
 724	/* Are we prepared to handle this kernel fault? */
 725	if (fixup_exception(regs, X86_TRAP_PF)) {
 726		/*
 727		 * Any interrupt that takes a fault gets the fixup. This makes
 728		 * the below recursive fault logic only apply to a faults from
 729		 * task context.
 730		 */
 731		if (in_interrupt())
 732			return;
 733
 
 
 
 
 
 
 
 
 734		/*
 735		 * Per the above we're !in_interrupt(), aka. task context.
 
 
 
 736		 *
 737		 * In this case we need to make sure we're not recursively
 738		 * faulting through the emulate_vsyscall() logic.
 
 739		 */
 740		if (current_thread_info()->sig_on_uaccess_error && signal) {
 741			tsk->thread.trap_nr = X86_TRAP_PF;
 742			tsk->thread.error_code = error_code | PF_USER;
 743			tsk->thread.cr2 = address;
 744
 745			/* XXX: hwpoison faults will set the wrong code. */
 746			force_sig_info_fault(signal, si_code, address,
 747					     tsk, vma, 0);
 748		}
 749
 750		/*
 751		 * Barring that, we can do the fixup and be happy.
 752		 */
 753		return;
 754	}
 
 755
 756	/*
 757	 * 32-bit:
 758	 *
 759	 *   Valid to do another page fault here, because if this fault
 760	 *   had been triggered by is_prefetch fixup_exception would have
 761	 *   handled it.
 762	 *
 763	 * 64-bit:
 764	 *
 765	 *   Hall of shame of CPU/BIOS bugs.
 766	 */
 767	if (is_prefetch(regs, error_code, address))
 768		return;
 769
 770	if (is_errata93(regs, address))
 
 
 771		return;
 772
 
 773	/*
 774	 * Oops. The kernel tried to access some bad page. We'll have to
 775	 * terminate things with extreme prejudice:
 776	 */
 777	flags = oops_begin();
 778
 779	show_fault_oops(regs, error_code, address);
 780
 781	if (task_stack_end_corrupted(tsk))
 782		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 783
 784	tsk->thread.cr2		= address;
 785	tsk->thread.trap_nr	= X86_TRAP_PF;
 786	tsk->thread.error_code	= error_code;
 787
 788	sig = SIGKILL;
 789	if (__die("Oops", regs, error_code))
 790		sig = 0;
 791
 792	/* Executive summary in case the body of the oops scrolled away */
 793	printk(KERN_DEFAULT "CR2: %016lx\n", address);
 794
 795	oops_end(flags, regs, sig);
 796}
 797
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798/*
 799 * Print out info about fatal segfaults, if the show_unhandled_signals
 800 * sysctl is set:
 801 */
 802static inline void
 803show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 804		unsigned long address, struct task_struct *tsk)
 805{
 
 
 
 
 806	if (!unhandled_signal(tsk, SIGSEGV))
 807		return;
 808
 809	if (!printk_ratelimit())
 810		return;
 811
 812	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
 813		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 814		tsk->comm, task_pid_nr(tsk), address,
 815		(void *)regs->ip, (void *)regs->sp, error_code);
 816
 817	print_vma_addr(KERN_CONT " in ", regs->ip);
 818
 
 
 
 
 
 
 
 
 819	printk(KERN_CONT "\n");
 
 
 820}
 821
 822static void
 823__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 824		       unsigned long address, struct vm_area_struct *vma,
 825		       int si_code)
 826{
 827	struct task_struct *tsk = current;
 828
 829	/* User mode accesses just cause a SIGSEGV */
 830	if (error_code & PF_USER) {
 831		/*
 832		 * It's possible to have interrupts off here:
 833		 */
 834		local_irq_enable();
 835
 836		/*
 837		 * Valid to do another page fault here because this one came
 838		 * from user space:
 839		 */
 840		if (is_prefetch(regs, error_code, address))
 841			return;
 842
 843		if (is_errata100(regs, address))
 844			return;
 
 
 
 845
 846#ifdef CONFIG_X86_64
 847		/*
 848		 * Instruction fetch faults in the vsyscall page might need
 849		 * emulation.
 850		 */
 851		if (unlikely((error_code & PF_INSTR) &&
 852			     ((address & ~0xfff) == VSYSCALL_ADDR))) {
 853			if (emulate_vsyscall(regs, address))
 854				return;
 855		}
 856#endif
 857		/* Kernel addresses are always protection faults: */
 858		if (address >= TASK_SIZE)
 859			error_code |= PF_PROT;
 860
 861		if (likely(show_unhandled_signals))
 862			show_signal_msg(regs, error_code, address, tsk);
 
 
 
 
 863
 864		tsk->thread.cr2		= address;
 865		tsk->thread.error_code	= error_code;
 866		tsk->thread.trap_nr	= X86_TRAP_PF;
 867
 868		force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
 869
 
 870		return;
 871	}
 872
 873	if (is_f00f_bug(regs, address))
 874		return;
 875
 876	no_context(regs, error_code, address, SIGSEGV, si_code);
 
 
 
 
 
 
 
 877}
 878
 879static noinline void
 880bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 881		     unsigned long address, struct vm_area_struct *vma)
 882{
 883	__bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
 884}
 885
 886static void
 887__bad_area(struct pt_regs *regs, unsigned long error_code,
 888	   unsigned long address,  struct vm_area_struct *vma, int si_code)
 889{
 890	struct mm_struct *mm = current->mm;
 891
 892	/*
 893	 * Something tried to access memory that isn't in our memory map..
 894	 * Fix it, but check if it's kernel or user first..
 895	 */
 896	up_read(&mm->mmap_sem);
 897
 898	__bad_area_nosemaphore(regs, error_code, address, vma, si_code);
 899}
 900
 901static noinline void
 902bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 903{
 904	__bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
 905}
 906
 907static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 908		struct vm_area_struct *vma)
 909{
 910	/* This code is always called on the current mm */
 911	bool foreign = false;
 912
 913	if (!boot_cpu_has(X86_FEATURE_OSPKE))
 914		return false;
 915	if (error_code & PF_PK)
 916		return true;
 917	/* this checks permission keys on the VMA: */
 918	if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
 919				(error_code & PF_INSTR), foreign))
 920		return true;
 921	return false;
 922}
 923
 924static noinline void
 925bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 926		      unsigned long address, struct vm_area_struct *vma)
 927{
 928	/*
 929	 * This OSPKE check is not strictly necessary at runtime.
 930	 * But, doing it this way allows compiler optimizations
 931	 * if pkeys are compiled out.
 932	 */
 933	if (bad_area_access_from_pkeys(error_code, vma))
 934		__bad_area(regs, error_code, address, vma, SEGV_PKUERR);
 935	else
 936		__bad_area(regs, error_code, address, vma, SEGV_ACCERR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937}
 938
 939static void
 940do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 941	  struct vm_area_struct *vma, unsigned int fault)
 942{
 943	struct task_struct *tsk = current;
 944	int code = BUS_ADRERR;
 945
 946	/* Kernel mode? Handle exceptions or die: */
 947	if (!(error_code & PF_USER)) {
 948		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
 
 949		return;
 950	}
 951
 952	/* User-space => ok to do another page fault: */
 953	if (is_prefetch(regs, error_code, address))
 954		return;
 955
 956	tsk->thread.cr2		= address;
 957	tsk->thread.error_code	= error_code;
 958	tsk->thread.trap_nr	= X86_TRAP_PF;
 
 
 
 959
 960#ifdef CONFIG_MEMORY_FAILURE
 961	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 962		printk(KERN_ERR
 
 
 
 963	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 964			tsk->comm, tsk->pid, address);
 965		code = BUS_MCEERR_AR;
 966	}
 967#endif
 968	force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
 969}
 970
 971static noinline void
 972mm_fault_error(struct pt_regs *regs, unsigned long error_code,
 973	       unsigned long address, struct vm_area_struct *vma,
 974	       unsigned int fault)
 975{
 976	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
 977		no_context(regs, error_code, address, 0, 0);
 978		return;
 979	}
 980
 981	if (fault & VM_FAULT_OOM) {
 982		/* Kernel mode? Handle exceptions or die: */
 983		if (!(error_code & PF_USER)) {
 984			no_context(regs, error_code, address,
 985				   SIGSEGV, SEGV_MAPERR);
 986			return;
 987		}
 988
 989		/*
 990		 * We ran out of memory, call the OOM killer, and return the
 991		 * userspace (which will retry the fault, or kill us if we got
 992		 * oom-killed):
 993		 */
 994		pagefault_out_of_memory();
 995	} else {
 996		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
 997			     VM_FAULT_HWPOISON_LARGE))
 998			do_sigbus(regs, error_code, address, vma, fault);
 999		else if (fault & VM_FAULT_SIGSEGV)
1000			bad_area_nosemaphore(regs, error_code, address, vma);
1001		else
1002			BUG();
1003	}
1004}
1005
1006static int spurious_fault_check(unsigned long error_code, pte_t *pte)
1007{
1008	if ((error_code & PF_WRITE) && !pte_write(*pte))
1009		return 0;
1010
1011	if ((error_code & PF_INSTR) && !pte_exec(*pte))
1012		return 0;
1013	/*
1014	 * Note: We do not do lazy flushing on protection key
1015	 * changes, so no spurious fault will ever set PF_PK.
1016	 */
1017	if ((error_code & PF_PK))
1018		return 1;
1019
1020	return 1;
1021}
1022
1023/*
1024 * Handle a spurious fault caused by a stale TLB entry.
1025 *
1026 * This allows us to lazily refresh the TLB when increasing the
1027 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
1028 * eagerly is very expensive since that implies doing a full
1029 * cross-processor TLB flush, even if no stale TLB entries exist
1030 * on other processors.
1031 *
1032 * Spurious faults may only occur if the TLB contains an entry with
1033 * fewer permission than the page table entry.  Non-present (P = 0)
1034 * and reserved bit (R = 1) faults are never spurious.
1035 *
1036 * There are no security implications to leaving a stale TLB when
1037 * increasing the permissions on a page.
1038 *
1039 * Returns non-zero if a spurious fault was handled, zero otherwise.
1040 *
1041 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
1042 * (Optional Invalidation).
1043 */
1044static noinline int
1045spurious_fault(unsigned long error_code, unsigned long address)
1046{
1047	pgd_t *pgd;
 
1048	pud_t *pud;
1049	pmd_t *pmd;
1050	pte_t *pte;
1051	int ret;
1052
1053	/*
1054	 * Only writes to RO or instruction fetches from NX may cause
1055	 * spurious faults.
1056	 *
1057	 * These could be from user or supervisor accesses but the TLB
1058	 * is only lazily flushed after a kernel mapping protection
1059	 * change, so user accesses are not expected to cause spurious
1060	 * faults.
1061	 */
1062	if (error_code != (PF_WRITE | PF_PROT)
1063	    && error_code != (PF_INSTR | PF_PROT))
1064		return 0;
1065
1066	pgd = init_mm.pgd + pgd_index(address);
1067	if (!pgd_present(*pgd))
1068		return 0;
1069
1070	pud = pud_offset(pgd, address);
 
 
 
 
 
 
 
1071	if (!pud_present(*pud))
1072		return 0;
1073
1074	if (pud_large(*pud))
1075		return spurious_fault_check(error_code, (pte_t *) pud);
1076
1077	pmd = pmd_offset(pud, address);
1078	if (!pmd_present(*pmd))
1079		return 0;
1080
1081	if (pmd_large(*pmd))
1082		return spurious_fault_check(error_code, (pte_t *) pmd);
1083
1084	pte = pte_offset_kernel(pmd, address);
1085	if (!pte_present(*pte))
1086		return 0;
1087
1088	ret = spurious_fault_check(error_code, pte);
1089	if (!ret)
1090		return 0;
1091
1092	/*
1093	 * Make sure we have permissions in PMD.
1094	 * If not, then there's a bug in the page tables:
1095	 */
1096	ret = spurious_fault_check(error_code, (pte_t *) pmd);
1097	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1098
1099	return ret;
1100}
1101NOKPROBE_SYMBOL(spurious_fault);
1102
1103int show_unhandled_signals = 1;
1104
1105static inline int
1106access_error(unsigned long error_code, struct vm_area_struct *vma)
1107{
1108	/* This is only called for the current mm, so: */
1109	bool foreign = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110	/*
1111	 * Make sure to check the VMA so that we do not perform
1112	 * faults just to hit a PF_PK as soon as we fill in a
1113	 * page.
1114	 */
1115	if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
1116				(error_code & PF_INSTR), foreign))
1117		return 1;
1118
1119	if (error_code & PF_WRITE) {
 
 
 
 
 
 
 
 
 
 
 
 
1120		/* write, present and write, not present: */
 
 
1121		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1122			return 1;
1123		return 0;
1124	}
1125
1126	/* read, present: */
1127	if (unlikely(error_code & PF_PROT))
1128		return 1;
1129
1130	/* read, not present: */
1131	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
1132		return 1;
1133
1134	return 0;
1135}
1136
1137static int fault_in_kernel_space(unsigned long address)
1138{
1139	return address >= TASK_SIZE_MAX;
1140}
1141
1142static inline bool smap_violation(int error_code, struct pt_regs *regs)
1143{
1144	if (!IS_ENABLED(CONFIG_X86_SMAP))
1145		return false;
1146
1147	if (!static_cpu_has(X86_FEATURE_SMAP))
1148		return false;
1149
1150	if (error_code & PF_USER)
1151		return false;
1152
1153	if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
1154		return false;
1155
1156	return true;
1157}
1158
1159/*
1160 * This routine handles page faults.  It determines the address,
1161 * and the problem, and then passes it off to one of the appropriate
1162 * routines.
1163 *
1164 * This function must have noinline because both callers
1165 * {,trace_}do_page_fault() have notrace on. Having this an actual function
1166 * guarantees there's a function trace entry.
1167 */
1168static noinline void
1169__do_page_fault(struct pt_regs *regs, unsigned long error_code,
1170		unsigned long address)
1171{
1172	struct vm_area_struct *vma;
1173	struct task_struct *tsk;
1174	struct mm_struct *mm;
1175	int fault, major = 0;
1176	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1177
1178	tsk = current;
1179	mm = tsk->mm;
1180
1181	/*
1182	 * Detect and handle instructions that would cause a page fault for
1183	 * both a tracked kernel page and a userspace page.
 
1184	 */
1185	if (kmemcheck_active(regs))
1186		kmemcheck_hide(regs);
1187	prefetchw(&mm->mmap_sem);
1188
1189	if (unlikely(kmmio_fault(regs, address)))
1190		return;
1191
 
1192	/*
1193	 * We fault-in kernel-space virtual memory on-demand. The
1194	 * 'reference' page table is init_mm.pgd.
1195	 *
1196	 * NOTE! We MUST NOT take any locks for this case. We may
1197	 * be in an interrupt or a critical region, and should
1198	 * only copy the information from the master page table,
1199	 * nothing more.
1200	 *
1201	 * This verifies that the fault happens in kernel space
1202	 * (error_code & 4) == 0, and that the fault was not a
1203	 * protection error (error_code & 9) == 0.
 
 
 
 
 
 
 
 
 
 
 
1204	 */
1205	if (unlikely(fault_in_kernel_space(address))) {
1206		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
1207			if (vmalloc_fault(address) >= 0)
1208				return;
 
1209
1210			if (kmemcheck_fault(regs, address, error_code))
1211				return;
1212		}
1213
1214		/* Can handle a stale RO->RW TLB: */
1215		if (spurious_fault(error_code, address))
1216			return;
1217
1218		/* kprobes don't want to hook the spurious faults: */
1219		if (kprobes_fault(regs))
1220			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221		/*
1222		 * Don't take the mm semaphore here. If we fixup a prefetch
1223		 * fault we could otherwise deadlock:
 
 
 
1224		 */
1225		bad_area_nosemaphore(regs, error_code, address, NULL);
 
1226
 
1227		return;
1228	}
1229
1230	/* kprobes don't want to hook the spurious faults: */
1231	if (unlikely(kprobes_fault(regs)))
1232		return;
1233
1234	if (unlikely(error_code & PF_RSVD))
 
 
 
 
1235		pgtable_bad(regs, error_code, address);
1236
1237	if (unlikely(smap_violation(error_code, regs))) {
1238		bad_area_nosemaphore(regs, error_code, address, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
1239		return;
1240	}
1241
1242	/*
1243	 * If we're in an interrupt, have no user context or are running
1244	 * in a region with pagefaults disabled then we must not take the fault
1245	 */
1246	if (unlikely(faulthandler_disabled() || !mm)) {
1247		bad_area_nosemaphore(regs, error_code, address, NULL);
1248		return;
1249	}
1250
1251	/*
1252	 * It's safe to allow irq's after cr2 has been saved and the
1253	 * vmalloc fault has been handled.
1254	 *
1255	 * User-mode registers count as a user access even for any
1256	 * potential system fault or CPU buglet:
1257	 */
1258	if (user_mode(regs)) {
1259		local_irq_enable();
1260		error_code |= PF_USER;
1261		flags |= FAULT_FLAG_USER;
1262	} else {
1263		if (regs->flags & X86_EFLAGS_IF)
1264			local_irq_enable();
1265	}
1266
 
 
1267	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1268
1269	if (error_code & PF_WRITE)
 
 
 
 
 
 
 
 
1270		flags |= FAULT_FLAG_WRITE;
1271	if (error_code & PF_INSTR)
1272		flags |= FAULT_FLAG_INSTRUCTION;
1273
1274	/*
1275	 * When running in the kernel we expect faults to occur only to
1276	 * addresses in user space.  All other faults represent errors in
1277	 * the kernel and should generate an OOPS.  Unfortunately, in the
1278	 * case of an erroneous fault occurring in a code path which already
1279	 * holds mmap_sem we will deadlock attempting to validate the fault
1280	 * against the address space.  Luckily the kernel only validly
1281	 * references user space from well defined areas of code, which are
1282	 * listed in the exceptions table.
 
 
 
 
1283	 *
1284	 * As the vast majority of faults will be valid we will only perform
1285	 * the source reference check when there is a possibility of a
1286	 * deadlock. Attempt to lock the address space, if we cannot we then
1287	 * validate the source. If this is invalid we can skip the address
1288	 * space check, thus avoiding the deadlock:
1289	 */
1290	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1291		if ((error_code & PF_USER) == 0 &&
1292		    !search_exception_tables(regs->ip)) {
1293			bad_area_nosemaphore(regs, error_code, address, NULL);
1294			return;
1295		}
1296retry:
1297		down_read(&mm->mmap_sem);
1298	} else {
1299		/*
1300		 * The above down_read_trylock() might have succeeded in
1301		 * which case we'll have missed the might_sleep() from
1302		 * down_read():
1303		 */
1304		might_sleep();
1305	}
 
1306
1307	vma = find_vma(mm, address);
1308	if (unlikely(!vma)) {
1309		bad_area(regs, error_code, address);
1310		return;
 
 
 
 
 
 
1311	}
1312	if (likely(vma->vm_start <= address))
1313		goto good_area;
1314	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1315		bad_area(regs, error_code, address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1316		return;
1317	}
1318	if (error_code & PF_USER) {
1319		/*
1320		 * Accessing the stack below %sp is always a bug.
1321		 * The large cushion allows instructions like enter
1322		 * and pusha to work. ("enter $65535, $31" pushes
1323		 * 32 pointers and then decrements %sp by 65535.)
1324		 */
1325		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
1326			bad_area(regs, error_code, address);
1327			return;
1328		}
1329	}
1330	if (unlikely(expand_stack(vma, address))) {
1331		bad_area(regs, error_code, address);
1332		return;
1333	}
1334
1335	/*
1336	 * Ok, we have a good vm_area for this memory access, so
1337	 * we can handle it..
1338	 */
1339good_area:
1340	if (unlikely(access_error(error_code, vma))) {
1341		bad_area_access_error(regs, error_code, address, vma);
1342		return;
1343	}
1344
1345	/*
1346	 * If for any reason at all we couldn't handle the fault,
1347	 * make sure we exit gracefully rather than endlessly redo
1348	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1349	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
 
 
 
 
 
 
 
1350	 */
1351	fault = handle_mm_fault(mm, vma, address, flags);
1352	major |= fault & VM_FAULT_MAJOR;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353
1354	/*
1355	 * If we need to retry the mmap_sem has already been released,
1356	 * and if there is a fatal signal pending there is no guarantee
1357	 * that we made any progress. Handle this case first.
1358	 */
1359	if (unlikely(fault & VM_FAULT_RETRY)) {
1360		/* Retry at most once */
1361		if (flags & FAULT_FLAG_ALLOW_RETRY) {
1362			flags &= ~FAULT_FLAG_ALLOW_RETRY;
1363			flags |= FAULT_FLAG_TRIED;
1364			if (!fatal_signal_pending(tsk))
1365				goto retry;
1366		}
1367
1368		/* User mode? Just return to handle the fatal exception */
1369		if (flags & FAULT_FLAG_USER)
1370			return;
1371
1372		/* Not returning to user mode? Handle exceptions or die: */
1373		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
 
1374		return;
1375	}
1376
1377	up_read(&mm->mmap_sem);
1378	if (unlikely(fault & VM_FAULT_ERROR)) {
1379		mm_fault_error(regs, error_code, address, vma, fault);
1380		return;
1381	}
1382
1383	/*
1384	 * Major/minor page fault accounting. If any of the events
1385	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1386	 */
1387	if (major) {
1388		tsk->maj_flt++;
1389		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
 
 
 
 
 
 
 
 
1390	} else {
1391		tsk->min_flt++;
1392		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
 
 
 
 
 
1393	}
1394
1395	check_v8086_mode(regs, address, tsk);
1396}
1397NOKPROBE_SYMBOL(__do_page_fault);
1398
1399dotraplinkage void notrace
1400do_page_fault(struct pt_regs *regs, unsigned long error_code)
 
1401{
1402	unsigned long address = read_cr2(); /* Get the faulting address */
1403	enum ctx_state prev_state;
1404
1405	/*
1406	 * We must have this function tagged with __kprobes, notrace and call
1407	 * read_cr2() before calling anything else. To avoid calling any kind
1408	 * of tracing machinery before we've observed the CR2 value.
1409	 *
1410	 * exception_{enter,exit}() contain all sorts of tracepoints.
1411	 */
1412
1413	prev_state = exception_enter();
1414	__do_page_fault(regs, error_code, address);
1415	exception_exit(prev_state);
1416}
1417NOKPROBE_SYMBOL(do_page_fault);
1418
1419#ifdef CONFIG_TRACING
1420static nokprobe_inline void
1421trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1422			 unsigned long error_code)
1423{
1424	if (user_mode(regs))
1425		trace_page_fault_user(address, regs, error_code);
1426	else
1427		trace_page_fault_kernel(address, regs, error_code);
1428}
1429
1430dotraplinkage void notrace
1431trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
 
1432{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1433	/*
1434	 * The exception_enter and tracepoint processing could
1435	 * trigger another page faults (user space callchain
1436	 * reading) and destroy the original cr2 value, so read
1437	 * the faulting address now.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1438	 */
1439	unsigned long address = read_cr2();
1440	enum ctx_state prev_state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1441
1442	prev_state = exception_enter();
1443	trace_page_fault_entries(address, regs, error_code);
1444	__do_page_fault(regs, error_code, address);
1445	exception_exit(prev_state);
1446}
1447NOKPROBE_SYMBOL(trace_do_page_fault);
1448#endif /* CONFIG_TRACING */
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright (C) 1995  Linus Torvalds
   4 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   5 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   6 */
   7#include <linux/sched.h>		/* test_thread_flag(), ...	*/
   8#include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
   9#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
  10#include <linux/extable.h>		/* search_exception_tables	*/
  11#include <linux/memblock.h>		/* max_low_pfn			*/
  12#include <linux/kfence.h>		/* kfence_handle_page_fault	*/
  13#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
  14#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
  15#include <linux/perf_event.h>		/* perf_sw_event		*/
  16#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
  17#include <linux/prefetch.h>		/* prefetchw			*/
  18#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
  19#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
  20#include <linux/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
  21#include <linux/mm_types.h>
  22#include <linux/mm.h>			/* find_and_lock_vma() */
  23
  24#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
  25#include <asm/traps.h>			/* dotraplinkage, ...		*/
 
 
  26#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
  27#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
  28#include <asm/vm86.h>			/* struct vm86			*/
  29#include <asm/mmu_context.h>		/* vma_pkey()			*/
  30#include <asm/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
  31#include <asm/desc.h>			/* store_idt(), ...		*/
  32#include <asm/cpu_entry_area.h>		/* exception stack		*/
  33#include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
  34#include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
  35#include <asm/vdso.h>			/* fixup_vdso_exception()	*/
  36#include <asm/irq_stack.h>
  37#include <asm/fred.h>
  38#include <asm/sev.h>			/* snp_dump_hva_rmpentry()	*/
  39
  40#define CREATE_TRACE_POINTS
  41#include <asm/trace/exceptions.h>
  42
  43/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  44 * Returns 0 if mmiotrace is disabled, or if the fault is not
  45 * handled by mmiotrace:
  46 */
  47static nokprobe_inline int
  48kmmio_fault(struct pt_regs *regs, unsigned long addr)
  49{
  50	if (unlikely(is_kmmio_active()))
  51		if (kmmio_handler(regs, addr) == 1)
  52			return -1;
  53	return 0;
  54}
  55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56/*
  57 * Prefetch quirks:
  58 *
  59 * 32-bit mode:
  60 *
  61 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  62 *   Check that here and ignore it.  This is AMD erratum #91.
  63 *
  64 * 64-bit mode:
  65 *
  66 *   Sometimes the CPU reports invalid exceptions on prefetch.
  67 *   Check that here and ignore it.
  68 *
  69 * Opcode checker based on code by Richard Brunner.
  70 */
  71static inline int
  72check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  73		      unsigned char opcode, int *prefetch)
  74{
  75	unsigned char instr_hi = opcode & 0xf0;
  76	unsigned char instr_lo = opcode & 0x0f;
  77
  78	switch (instr_hi) {
  79	case 0x20:
  80	case 0x30:
  81		/*
  82		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  83		 * In X86_64 long mode, the CPU will signal invalid
  84		 * opcode if some of these prefixes are present so
  85		 * X86_64 will never get here anyway
  86		 */
  87		return ((instr_lo & 7) == 0x6);
  88#ifdef CONFIG_X86_64
  89	case 0x40:
  90		/*
  91		 * In 64-bit mode 0x40..0x4F are valid REX prefixes
 
 
 
 
  92		 */
  93		return (!user_mode(regs) || user_64bit_mode(regs));
  94#endif
  95	case 0x60:
  96		/* 0x64 thru 0x67 are valid prefixes in all modes. */
  97		return (instr_lo & 0xC) == 0x4;
  98	case 0xF0:
  99		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
 100		return !instr_lo || (instr_lo>>1) == 1;
 101	case 0x00:
 102		/* Prefetch instruction is 0x0F0D or 0x0F18 */
 103		if (get_kernel_nofault(opcode, instr))
 104			return 0;
 105
 106		*prefetch = (instr_lo == 0xF) &&
 107			(opcode == 0x0D || opcode == 0x18);
 108		return 0;
 109	default:
 110		return 0;
 111	}
 112}
 113
 114static bool is_amd_k8_pre_npt(void)
 115{
 116	struct cpuinfo_x86 *c = &boot_cpu_data;
 117
 118	return unlikely(IS_ENABLED(CONFIG_CPU_SUP_AMD) &&
 119			c->x86_vendor == X86_VENDOR_AMD &&
 120			c->x86 == 0xf && c->x86_model < 0x40);
 121}
 122
 123static int
 124is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 125{
 126	unsigned char *max_instr;
 127	unsigned char *instr;
 128	int prefetch = 0;
 129
 130	/* Erratum #91 affects AMD K8, pre-NPT CPUs */
 131	if (!is_amd_k8_pre_npt())
 132		return 0;
 133
 134	/*
 135	 * If it was a exec (instruction fetch) fault on NX page, then
 136	 * do not ignore the fault:
 137	 */
 138	if (error_code & X86_PF_INSTR)
 139		return 0;
 140
 141	instr = (void *)convert_ip_to_linear(current, regs);
 142	max_instr = instr + 15;
 143
 144	/*
 145	 * This code has historically always bailed out if IP points to a
 146	 * not-present page (e.g. due to a race).  No one has ever
 147	 * complained about this.
 148	 */
 149	pagefault_disable();
 150
 151	while (instr < max_instr) {
 152		unsigned char opcode;
 153
 154		if (user_mode(regs)) {
 155			if (get_user(opcode, (unsigned char __user *) instr))
 156				break;
 157		} else {
 158			if (get_kernel_nofault(opcode, instr))
 159				break;
 160		}
 161
 162		instr++;
 163
 164		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
 165			break;
 166	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 167
 168	pagefault_enable();
 169	return prefetch;
 170}
 171
 172DEFINE_SPINLOCK(pgd_lock);
 173LIST_HEAD(pgd_list);
 174
 175#ifdef CONFIG_X86_32
 176static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 177{
 178	unsigned index = pgd_index(address);
 179	pgd_t *pgd_k;
 180	p4d_t *p4d, *p4d_k;
 181	pud_t *pud, *pud_k;
 182	pmd_t *pmd, *pmd_k;
 183
 184	pgd += index;
 185	pgd_k = init_mm.pgd + index;
 186
 187	if (!pgd_present(*pgd_k))
 188		return NULL;
 189
 190	/*
 191	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
 192	 * and redundant with the set_pmd() on non-PAE. As would
 193	 * set_p4d/set_pud.
 194	 */
 195	p4d = p4d_offset(pgd, address);
 196	p4d_k = p4d_offset(pgd_k, address);
 197	if (!p4d_present(*p4d_k))
 198		return NULL;
 199
 200	pud = pud_offset(p4d, address);
 201	pud_k = pud_offset(p4d_k, address);
 202	if (!pud_present(*pud_k))
 203		return NULL;
 204
 205	pmd = pmd_offset(pud, address);
 206	pmd_k = pmd_offset(pud_k, address);
 
 
 207
 208	if (pmd_present(*pmd) != pmd_present(*pmd_k))
 209		set_pmd(pmd, *pmd_k);
 210
 211	if (!pmd_present(*pmd_k))
 212		return NULL;
 213	else
 214		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 215
 216	return pmd_k;
 217}
 218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219/*
 
 
 220 *   Handle a fault on the vmalloc or module mapping area
 221 *
 222 *   This is needed because there is a race condition between the time
 223 *   when the vmalloc mapping code updates the PMD to the point in time
 224 *   where it synchronizes this update with the other page-tables in the
 225 *   system.
 226 *
 227 *   In this race window another thread/CPU can map an area on the same
 228 *   PMD, finds it already present and does not synchronize it with the
 229 *   rest of the system yet. As a result v[mz]alloc might return areas
 230 *   which are not mapped in every page-table in the system, causing an
 231 *   unhandled page-fault when they are accessed.
 232 */
 233static noinline int vmalloc_fault(unsigned long address)
 234{
 235	unsigned long pgd_paddr;
 236	pmd_t *pmd_k;
 237	pte_t *pte_k;
 238
 239	/* Make sure we are in vmalloc area: */
 240	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 241		return -1;
 242
 
 
 243	/*
 244	 * Synchronize this task's top level page-table
 245	 * with the 'reference' page table.
 246	 *
 247	 * Do _not_ use "current" here. We might be inside
 248	 * an interrupt in the middle of a task switch..
 249	 */
 250	pgd_paddr = read_cr3_pa();
 251	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 252	if (!pmd_k)
 253		return -1;
 254
 255	if (pmd_leaf(*pmd_k))
 256		return 0;
 257
 258	pte_k = pte_offset_kernel(pmd_k, address);
 259	if (!pte_present(*pte_k))
 260		return -1;
 261
 262	return 0;
 263}
 264NOKPROBE_SYMBOL(vmalloc_fault);
 265
 266void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
 
 
 
 
 
 267{
 268	unsigned long addr;
 
 269
 270	for (addr = start & PMD_MASK;
 271	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
 272	     addr += PMD_SIZE) {
 273		struct page *page;
 274
 275		spin_lock(&pgd_lock);
 276		list_for_each_entry(page, &pgd_list, lru) {
 277			spinlock_t *pgt_lock;
 278
 279			/* the pgt_lock only for Xen */
 280			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 281
 282			spin_lock(pgt_lock);
 283			vmalloc_sync_one(page_address(page), addr);
 284			spin_unlock(pgt_lock);
 285		}
 286		spin_unlock(&pgd_lock);
 287	}
 288}
 289
 290static bool low_pfn(unsigned long pfn)
 291{
 292	return pfn < max_low_pfn;
 293}
 294
 295static void dump_pagetable(unsigned long address)
 296{
 297	pgd_t *base = __va(read_cr3_pa());
 298	pgd_t *pgd = &base[pgd_index(address)];
 299	p4d_t *p4d;
 300	pud_t *pud;
 301	pmd_t *pmd;
 302	pte_t *pte;
 303
 304#ifdef CONFIG_X86_PAE
 305	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
 306	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
 307		goto out;
 308#define pr_pde pr_cont
 309#else
 310#define pr_pde pr_info
 311#endif
 312	p4d = p4d_offset(pgd, address);
 313	pud = pud_offset(p4d, address);
 314	pmd = pmd_offset(pud, address);
 315	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
 316#undef pr_pde
 317
 318	/*
 319	 * We must not directly access the pte in the highpte
 320	 * case if the page table is located in highmem.
 321	 * And let's rather not kmap-atomic the pte, just in case
 322	 * it's allocated already:
 323	 */
 324	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd))
 325		goto out;
 326
 327	pte = pte_offset_kernel(pmd, address);
 328	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
 329out:
 330	pr_cont("\n");
 331}
 332
 333#else /* CONFIG_X86_64: */
 334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335#ifdef CONFIG_CPU_SUP_AMD
 336static const char errata93_warning[] =
 337KERN_ERR 
 338"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
 339"******* Working around it, but it may cause SEGVs or burn power.\n"
 340"******* Please consider a BIOS update.\n"
 341"******* Disabling USB legacy in the BIOS may also help.\n";
 342#endif
 343
 
 
 
 
 
 
 
 
 
 344static int bad_address(void *p)
 345{
 346	unsigned long dummy;
 347
 348	return get_kernel_nofault(dummy, (unsigned long *)p);
 349}
 350
 351static void dump_pagetable(unsigned long address)
 352{
 353	pgd_t *base = __va(read_cr3_pa());
 354	pgd_t *pgd = base + pgd_index(address);
 355	p4d_t *p4d;
 356	pud_t *pud;
 357	pmd_t *pmd;
 358	pte_t *pte;
 359
 360	if (bad_address(pgd))
 361		goto bad;
 362
 363	pr_info("PGD %lx ", pgd_val(*pgd));
 364
 365	if (!pgd_present(*pgd))
 366		goto out;
 367
 368	p4d = p4d_offset(pgd, address);
 369	if (bad_address(p4d))
 370		goto bad;
 371
 372	pr_cont("P4D %lx ", p4d_val(*p4d));
 373	if (!p4d_present(*p4d) || p4d_leaf(*p4d))
 374		goto out;
 375
 376	pud = pud_offset(p4d, address);
 377	if (bad_address(pud))
 378		goto bad;
 379
 380	pr_cont("PUD %lx ", pud_val(*pud));
 381	if (!pud_present(*pud) || pud_leaf(*pud))
 382		goto out;
 383
 384	pmd = pmd_offset(pud, address);
 385	if (bad_address(pmd))
 386		goto bad;
 387
 388	pr_cont("PMD %lx ", pmd_val(*pmd));
 389	if (!pmd_present(*pmd) || pmd_leaf(*pmd))
 390		goto out;
 391
 392	pte = pte_offset_kernel(pmd, address);
 393	if (bad_address(pte))
 394		goto bad;
 395
 396	pr_cont("PTE %lx", pte_val(*pte));
 397out:
 398	pr_cont("\n");
 399	return;
 400bad:
 401	pr_info("BAD\n");
 402}
 403
 404#endif /* CONFIG_X86_64 */
 405
 406/*
 407 * Workaround for K8 erratum #93 & buggy BIOS.
 408 *
 409 * BIOS SMM functions are required to use a specific workaround
 410 * to avoid corruption of the 64bit RIP register on C stepping K8.
 411 *
 412 * A lot of BIOS that didn't get tested properly miss this.
 413 *
 414 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 415 * Try to work around it here.
 416 *
 417 * Note we only handle faults in kernel here.
 418 * Does nothing on 32-bit.
 419 */
 420static int is_errata93(struct pt_regs *regs, unsigned long address)
 421{
 422#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
 423	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
 424	    || boot_cpu_data.x86 != 0xf)
 425		return 0;
 426
 427	if (user_mode(regs))
 428		return 0;
 429
 430	if (address != regs->ip)
 431		return 0;
 432
 433	if ((address >> 32) != 0)
 434		return 0;
 435
 436	address |= 0xffffffffUL << 32;
 437	if ((address >= (u64)_stext && address <= (u64)_etext) ||
 438	    (address >= MODULES_VADDR && address <= MODULES_END)) {
 439		printk_once(errata93_warning);
 440		regs->ip = address;
 441		return 1;
 442	}
 443#endif
 444	return 0;
 445}
 446
 447/*
 448 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 449 * to illegal addresses >4GB.
 450 *
 451 * We catch this in the page fault handler because these addresses
 452 * are not reachable. Just detect this case and return.  Any code
 453 * segment in LDT is compatibility mode.
 454 */
 455static int is_errata100(struct pt_regs *regs, unsigned long address)
 456{
 457#ifdef CONFIG_X86_64
 458	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
 459		return 1;
 460#endif
 461	return 0;
 462}
 463
 464/* Pentium F0 0F C7 C8 bug workaround: */
 465static int is_f00f_bug(struct pt_regs *regs, unsigned long error_code,
 466		       unsigned long address)
 467{
 468#ifdef CONFIG_X86_F00F_BUG
 469	if (boot_cpu_has_bug(X86_BUG_F00F) && !(error_code & X86_PF_USER) &&
 470	    idt_is_f00f_address(address)) {
 471		handle_invalid_op(regs);
 472		return 1;
 
 
 
 
 
 
 
 
 473	}
 474#endif
 475	return 0;
 476}
 477
 478static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
 479{
 480	u32 offset = (index >> 3) * sizeof(struct desc_struct);
 481	unsigned long addr;
 482	struct ldttss_desc desc;
 483
 484	if (index == 0) {
 485		pr_alert("%s: NULL\n", name);
 486		return;
 487	}
 488
 489	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
 490		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
 491		return;
 492	}
 493
 494	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
 495			      sizeof(struct ldttss_desc))) {
 496		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
 497			 name, index);
 498		return;
 499	}
 500
 501	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
 502#ifdef CONFIG_X86_64
 503	addr |= ((u64)desc.base3 << 32);
 504#endif
 505	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
 506		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
 507}
 508
 509static void
 510show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 
 511{
 512	if (!oops_may_print())
 513		return;
 514
 515	if (error_code & X86_PF_INSTR) {
 516		unsigned int level;
 517		pgd_t *pgd;
 518		pte_t *pte;
 519
 520		pgd = __va(read_cr3_pa());
 521		pgd += pgd_index(address);
 522
 523		pte = lookup_address_in_pgd(pgd, address, &level);
 524
 525		if (pte && pte_present(*pte) && !pte_exec(*pte))
 526			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
 527				from_kuid(&init_user_ns, current_uid()));
 528		if (pte && pte_present(*pte) && pte_exec(*pte) &&
 529				(pgd_flags(*pgd) & _PAGE_USER) &&
 530				(__read_cr4() & X86_CR4_SMEP))
 531			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
 532				from_kuid(&init_user_ns, current_uid()));
 533	}
 534
 535	if (address < PAGE_SIZE && !user_mode(regs))
 536		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
 537			(void *)address);
 538	else
 539		pr_alert("BUG: unable to handle page fault for address: %px\n",
 540			(void *)address);
 541
 542	pr_alert("#PF: %s %s in %s mode\n",
 543		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
 544		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
 545		 (error_code & X86_PF_WRITE) ? "write access" :
 546					       "read access",
 547			     user_mode(regs) ? "user" : "kernel");
 548	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
 549		 !(error_code & X86_PF_PROT) ? "not-present page" :
 550		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
 551		 (error_code & X86_PF_PK)    ? "protection keys violation" :
 552		 (error_code & X86_PF_RMP)   ? "RMP violation" :
 553					       "permissions violation");
 554
 555	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
 556		struct desc_ptr idt, gdt;
 557		u16 ldtr, tr;
 558
 559		/*
 560		 * This can happen for quite a few reasons.  The more obvious
 561		 * ones are faults accessing the GDT, or LDT.  Perhaps
 562		 * surprisingly, if the CPU tries to deliver a benign or
 563		 * contributory exception from user code and gets a page fault
 564		 * during delivery, the page fault can be delivered as though
 565		 * it originated directly from user code.  This could happen
 566		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
 567		 * kernel or IST stack.
 568		 */
 569		store_idt(&idt);
 570
 571		/* Usable even on Xen PV -- it's just slow. */
 572		native_store_gdt(&gdt);
 573
 574		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
 575			 idt.address, idt.size, gdt.address, gdt.size);
 576
 577		store_ldt(ldtr);
 578		show_ldttss(&gdt, "LDTR", ldtr);
 579
 580		store_tr(tr);
 581		show_ldttss(&gdt, "TR", tr);
 582	}
 583
 584	dump_pagetable(address);
 585
 586	if (error_code & X86_PF_RMP)
 587		snp_dump_hva_rmpentry(address);
 588}
 589
 590static noinline void
 591pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 592	    unsigned long address)
 593{
 594	struct task_struct *tsk;
 595	unsigned long flags;
 596	int sig;
 597
 598	flags = oops_begin();
 599	tsk = current;
 600	sig = SIGKILL;
 601
 602	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
 603	       tsk->comm, address);
 604	dump_pagetable(address);
 605
 
 
 
 
 606	if (__die("Bad pagetable", regs, error_code))
 607		sig = 0;
 608
 609	oops_end(flags, regs, sig);
 610}
 611
 612static void sanitize_error_code(unsigned long address,
 613				unsigned long *error_code)
 614{
 615	/*
 616	 * To avoid leaking information about the kernel page
 617	 * table layout, pretend that user-mode accesses to
 618	 * kernel addresses are always protection faults.
 619	 *
 620	 * NB: This means that failed vsyscalls with vsyscall=none
 621	 * will have the PROT bit.  This doesn't leak any
 622	 * information and does not appear to cause any problems.
 623	 */
 624	if (address >= TASK_SIZE_MAX)
 625		*error_code |= X86_PF_PROT;
 626}
 627
 628static void set_signal_archinfo(unsigned long address,
 629				unsigned long error_code)
 630{
 631	struct task_struct *tsk = current;
 632
 633	tsk->thread.trap_nr = X86_TRAP_PF;
 634	tsk->thread.error_code = error_code | X86_PF_USER;
 635	tsk->thread.cr2 = address;
 636}
 637
 638static noinline void
 639page_fault_oops(struct pt_regs *regs, unsigned long error_code,
 640		unsigned long address)
 641{
 642#ifdef CONFIG_VMAP_STACK
 643	struct stack_info info;
 644#endif
 645	unsigned long flags;
 646	int sig;
 
 
 647
 648	if (user_mode(regs)) {
 
 649		/*
 650		 * Implicit kernel access from user mode?  Skip the stack
 651		 * overflow and EFI special cases.
 
 652		 */
 653		goto oops;
 654	}
 655
 656#ifdef CONFIG_VMAP_STACK
 657	/*
 658	 * Stack overflow?  During boot, we can fault near the initial
 659	 * stack in the direct map, but that's not an overflow -- check
 660	 * that we're in vmalloc space to avoid this.
 661	 */
 662	if (is_vmalloc_addr((void *)address) &&
 663	    get_stack_guard_info((void *)address, &info)) {
 664		/*
 665		 * We're likely to be running with very little stack space
 666		 * left.  It's plausible that we'd hit this condition but
 667		 * double-fault even before we get this far, in which case
 668		 * we're fine: the double-fault handler will deal with it.
 669		 *
 670		 * We don't want to make it all the way into the oops code
 671		 * and then double-fault, though, because we're likely to
 672		 * break the console driver and lose most of the stack dump.
 673		 */
 674		call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*),
 675			      handle_stack_overflow,
 676			      ASM_CALL_ARG3,
 677			      , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
 
 
 
 
 
 678
 679		unreachable();
 
 
 
 680	}
 681#endif
 682
 683	/*
 684	 * Buggy firmware could access regions which might page fault.  If
 685	 * this happens, EFI has a special OOPS path that will try to
 686	 * avoid hanging the system.
 
 
 
 
 
 
 687	 */
 688	if (IS_ENABLED(CONFIG_EFI))
 689		efi_crash_gracefully_on_page_fault(address);
 690
 691	/* Only not-present faults should be handled by KFENCE. */
 692	if (!(error_code & X86_PF_PROT) &&
 693	    kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
 694		return;
 695
 696oops:
 697	/*
 698	 * Oops. The kernel tried to access some bad page. We'll have to
 699	 * terminate things with extreme prejudice:
 700	 */
 701	flags = oops_begin();
 702
 703	show_fault_oops(regs, error_code, address);
 704
 705	if (task_stack_end_corrupted(current))
 706		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 707
 
 
 
 
 708	sig = SIGKILL;
 709	if (__die("Oops", regs, error_code))
 710		sig = 0;
 711
 712	/* Executive summary in case the body of the oops scrolled away */
 713	printk(KERN_DEFAULT "CR2: %016lx\n", address);
 714
 715	oops_end(flags, regs, sig);
 716}
 717
 718static noinline void
 719kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
 720			 unsigned long address, int signal, int si_code,
 721			 u32 pkey)
 722{
 723	WARN_ON_ONCE(user_mode(regs));
 724
 725	/* Are we prepared to handle this kernel fault? */
 726	if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
 727		return;
 728
 729	/*
 730	 * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
 731	 * instruction.
 732	 */
 733	if (is_prefetch(regs, error_code, address))
 734		return;
 735
 736	page_fault_oops(regs, error_code, address);
 737}
 738
 739/*
 740 * Print out info about fatal segfaults, if the show_unhandled_signals
 741 * sysctl is set:
 742 */
 743static inline void
 744show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 745		unsigned long address, struct task_struct *tsk)
 746{
 747	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
 748	/* This is a racy snapshot, but it's better than nothing. */
 749	int cpu = raw_smp_processor_id();
 750
 751	if (!unhandled_signal(tsk, SIGSEGV))
 752		return;
 753
 754	if (!printk_ratelimit())
 755		return;
 756
 757	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
 758		loglvl, tsk->comm, task_pid_nr(tsk), address,
 
 759		(void *)regs->ip, (void *)regs->sp, error_code);
 760
 761	print_vma_addr(KERN_CONT " in ", regs->ip);
 762
 763	/*
 764	 * Dump the likely CPU where the fatal segfault happened.
 765	 * This can help identify faulty hardware.
 766	 */
 767	printk(KERN_CONT " likely on CPU %d (core %d, socket %d)", cpu,
 768	       topology_core_id(cpu), topology_physical_package_id(cpu));
 769
 770
 771	printk(KERN_CONT "\n");
 772
 773	show_opcodes(regs, loglvl);
 774}
 775
 776static void
 777__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 778		       unsigned long address, u32 pkey, int si_code)
 
 779{
 780	struct task_struct *tsk = current;
 781
 782	if (!user_mode(regs)) {
 783		kernelmode_fixup_or_oops(regs, error_code, address,
 784					 SIGSEGV, si_code, pkey);
 785		return;
 786	}
 
 
 
 
 
 
 
 
 787
 788	if (!(error_code & X86_PF_USER)) {
 789		/* Implicit user access to kernel memory -- just oops */
 790		page_fault_oops(regs, error_code, address);
 791		return;
 792	}
 793
 794	/*
 795	 * User mode accesses just cause a SIGSEGV.
 796	 * It's possible to have interrupts off here:
 797	 */
 798	local_irq_enable();
 
 
 
 
 
 
 
 
 
 799
 800	/*
 801	 * Valid to do another page fault here because this one came
 802	 * from user space:
 803	 */
 804	if (is_prefetch(regs, error_code, address))
 805		return;
 806
 807	if (is_errata100(regs, address))
 808		return;
 
 809
 810	sanitize_error_code(address, &error_code);
 811
 812	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
 813		return;
 
 814
 815	if (likely(show_unhandled_signals))
 816		show_signal_msg(regs, error_code, address, tsk);
 817
 818	set_signal_archinfo(address, error_code);
 819
 820	if (si_code == SEGV_PKUERR)
 821		force_sig_pkuerr((void __user *)address, pkey);
 822	else
 823		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 824
 825	local_irq_disable();
 826}
 827
 828static noinline void
 829bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 830		     unsigned long address)
 831{
 832	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
 833}
 834
 835static void
 836__bad_area(struct pt_regs *regs, unsigned long error_code,
 837	   unsigned long address, u32 pkey, int si_code)
 838{
 839	struct mm_struct *mm = current->mm;
 
 840	/*
 841	 * Something tried to access memory that isn't in our memory map..
 842	 * Fix it, but check if it's kernel or user first..
 843	 */
 844	mmap_read_unlock(mm);
 845
 846	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
 
 
 
 
 
 
 847}
 848
 849static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 850		struct vm_area_struct *vma)
 851{
 852	/* This code is always called on the current mm */
 853	bool foreign = false;
 854
 855	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
 856		return false;
 857	if (error_code & X86_PF_PK)
 858		return true;
 859	/* this checks permission keys on the VMA: */
 860	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
 861				       (error_code & X86_PF_INSTR), foreign))
 862		return true;
 863	return false;
 864}
 865
 866static noinline void
 867bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 868		      unsigned long address, struct vm_area_struct *vma)
 869{
 870	/*
 871	 * This OSPKE check is not strictly necessary at runtime.
 872	 * But, doing it this way allows compiler optimizations
 873	 * if pkeys are compiled out.
 874	 */
 875	if (bad_area_access_from_pkeys(error_code, vma)) {
 876		/*
 877		 * A protection key fault means that the PKRU value did not allow
 878		 * access to some PTE.  Userspace can figure out what PKRU was
 879		 * from the XSAVE state.  This function captures the pkey from
 880		 * the vma and passes it to userspace so userspace can discover
 881		 * which protection key was set on the PTE.
 882		 *
 883		 * If we get here, we know that the hardware signaled a X86_PF_PK
 884		 * fault and that there was a VMA once we got in the fault
 885		 * handler.  It does *not* guarantee that the VMA we find here
 886		 * was the one that we faulted on.
 887		 *
 888		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 889		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 890		 * 3. T1   : faults...
 891		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 892		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
 893		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 894		 *	     faulted on a pte with its pkey=4.
 895		 */
 896		u32 pkey = vma_pkey(vma);
 897
 898		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
 899	} else {
 900		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
 901	}
 902}
 903
 904static void
 905do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 906	  vm_fault_t fault)
 907{
 
 
 
 908	/* Kernel mode? Handle exceptions or die: */
 909	if (!user_mode(regs)) {
 910		kernelmode_fixup_or_oops(regs, error_code, address,
 911					 SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
 912		return;
 913	}
 914
 915	/* User-space => ok to do another page fault: */
 916	if (is_prefetch(regs, error_code, address))
 917		return;
 918
 919	sanitize_error_code(address, &error_code);
 920
 921	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
 922		return;
 923
 924	set_signal_archinfo(address, error_code);
 925
 926#ifdef CONFIG_MEMORY_FAILURE
 927	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 928		struct task_struct *tsk = current;
 929		unsigned lsb = 0;
 930
 931		pr_err(
 932	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 933			tsk->comm, tsk->pid, address);
 934		if (fault & VM_FAULT_HWPOISON_LARGE)
 935			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
 936		if (fault & VM_FAULT_HWPOISON)
 937			lsb = PAGE_SHIFT;
 938		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
 
 
 
 
 
 
 
 
 939		return;
 940	}
 941#endif
 942	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943}
 944
 945static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
 946{
 947	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
 948		return 0;
 949
 950	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
 951		return 0;
 
 
 
 
 
 
 952
 953	return 1;
 954}
 955
 956/*
 957 * Handle a spurious fault caused by a stale TLB entry.
 958 *
 959 * This allows us to lazily refresh the TLB when increasing the
 960 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 961 * eagerly is very expensive since that implies doing a full
 962 * cross-processor TLB flush, even if no stale TLB entries exist
 963 * on other processors.
 964 *
 965 * Spurious faults may only occur if the TLB contains an entry with
 966 * fewer permission than the page table entry.  Non-present (P = 0)
 967 * and reserved bit (R = 1) faults are never spurious.
 968 *
 969 * There are no security implications to leaving a stale TLB when
 970 * increasing the permissions on a page.
 971 *
 972 * Returns non-zero if a spurious fault was handled, zero otherwise.
 973 *
 974 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
 975 * (Optional Invalidation).
 976 */
 977static noinline int
 978spurious_kernel_fault(unsigned long error_code, unsigned long address)
 979{
 980	pgd_t *pgd;
 981	p4d_t *p4d;
 982	pud_t *pud;
 983	pmd_t *pmd;
 984	pte_t *pte;
 985	int ret;
 986
 987	/*
 988	 * Only writes to RO or instruction fetches from NX may cause
 989	 * spurious faults.
 990	 *
 991	 * These could be from user or supervisor accesses but the TLB
 992	 * is only lazily flushed after a kernel mapping protection
 993	 * change, so user accesses are not expected to cause spurious
 994	 * faults.
 995	 */
 996	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
 997	    error_code != (X86_PF_INSTR | X86_PF_PROT))
 998		return 0;
 999
1000	pgd = init_mm.pgd + pgd_index(address);
1001	if (!pgd_present(*pgd))
1002		return 0;
1003
1004	p4d = p4d_offset(pgd, address);
1005	if (!p4d_present(*p4d))
1006		return 0;
1007
1008	if (p4d_leaf(*p4d))
1009		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1010
1011	pud = pud_offset(p4d, address);
1012	if (!pud_present(*pud))
1013		return 0;
1014
1015	if (pud_leaf(*pud))
1016		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1017
1018	pmd = pmd_offset(pud, address);
1019	if (!pmd_present(*pmd))
1020		return 0;
1021
1022	if (pmd_leaf(*pmd))
1023		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1024
1025	pte = pte_offset_kernel(pmd, address);
1026	if (!pte_present(*pte))
1027		return 0;
1028
1029	ret = spurious_kernel_fault_check(error_code, pte);
1030	if (!ret)
1031		return 0;
1032
1033	/*
1034	 * Make sure we have permissions in PMD.
1035	 * If not, then there's a bug in the page tables:
1036	 */
1037	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1038	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1039
1040	return ret;
1041}
1042NOKPROBE_SYMBOL(spurious_kernel_fault);
1043
1044int show_unhandled_signals = 1;
1045
1046static inline int
1047access_error(unsigned long error_code, struct vm_area_struct *vma)
1048{
1049	/* This is only called for the current mm, so: */
1050	bool foreign = false;
1051
1052	/*
1053	 * Read or write was blocked by protection keys.  This is
1054	 * always an unconditional error and can never result in
1055	 * a follow-up action to resolve the fault, like a COW.
1056	 */
1057	if (error_code & X86_PF_PK)
1058		return 1;
1059
1060	/*
1061	 * SGX hardware blocked the access.  This usually happens
1062	 * when the enclave memory contents have been destroyed, like
1063	 * after a suspend/resume cycle. In any case, the kernel can't
1064	 * fix the cause of the fault.  Handle the fault as an access
1065	 * error even in cases where no actual access violation
1066	 * occurred.  This allows userspace to rebuild the enclave in
1067	 * response to the signal.
1068	 */
1069	if (unlikely(error_code & X86_PF_SGX))
1070		return 1;
1071
1072	/*
1073	 * Make sure to check the VMA so that we do not perform
1074	 * faults just to hit a X86_PF_PK as soon as we fill in a
1075	 * page.
1076	 */
1077	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
1078				       (error_code & X86_PF_INSTR), foreign))
1079		return 1;
1080
1081	/*
1082	 * Shadow stack accesses (PF_SHSTK=1) are only permitted to
1083	 * shadow stack VMAs. All other accesses result in an error.
1084	 */
1085	if (error_code & X86_PF_SHSTK) {
1086		if (unlikely(!(vma->vm_flags & VM_SHADOW_STACK)))
1087			return 1;
1088		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1089			return 1;
1090		return 0;
1091	}
1092
1093	if (error_code & X86_PF_WRITE) {
1094		/* write, present and write, not present: */
1095		if (unlikely(vma->vm_flags & VM_SHADOW_STACK))
1096			return 1;
1097		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1098			return 1;
1099		return 0;
1100	}
1101
1102	/* read, present: */
1103	if (unlikely(error_code & X86_PF_PROT))
1104		return 1;
1105
1106	/* read, not present: */
1107	if (unlikely(!vma_is_accessible(vma)))
1108		return 1;
1109
1110	return 0;
1111}
1112
1113bool fault_in_kernel_space(unsigned long address)
1114{
1115	/*
1116	 * On 64-bit systems, the vsyscall page is at an address above
1117	 * TASK_SIZE_MAX, but is not considered part of the kernel
1118	 * address space.
1119	 */
1120	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
 
 
 
 
 
 
 
 
 
1121		return false;
1122
1123	return address >= TASK_SIZE_MAX;
1124}
1125
1126/*
1127 * Called for all faults where 'address' is part of the kernel address
1128 * space.  Might get called for faults that originate from *code* that
1129 * ran in userspace or the kernel.
 
 
 
 
1130 */
1131static void
1132do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
1133		   unsigned long address)
1134{
 
 
 
 
 
 
 
 
 
1135	/*
1136	 * Protection keys exceptions only happen on user pages.  We
1137	 * have no user pages in the kernel portion of the address
1138	 * space, so do not expect them here.
1139	 */
1140	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
 
 
 
 
 
1141
1142#ifdef CONFIG_X86_32
1143	/*
1144	 * We can fault-in kernel-space virtual memory on-demand. The
1145	 * 'reference' page table is init_mm.pgd.
1146	 *
1147	 * NOTE! We MUST NOT take any locks for this case. We may
1148	 * be in an interrupt or a critical region, and should
1149	 * only copy the information from the master page table,
1150	 * nothing more.
1151	 *
1152	 * Before doing this on-demand faulting, ensure that the
1153	 * fault is not any of the following:
1154	 * 1. A fault on a PTE with a reserved bit set.
1155	 * 2. A fault caused by a user-mode access.  (Do not demand-
1156	 *    fault kernel memory due to user-mode accesses).
1157	 * 3. A fault caused by a page-level protection violation.
1158	 *    (A demand fault would be on a non-present page which
1159	 *     would have X86_PF_PROT==0).
1160	 *
1161	 * This is only needed to close a race condition on x86-32 in
1162	 * the vmalloc mapping/unmapping code. See the comment above
1163	 * vmalloc_fault() for details. On x86-64 the race does not
1164	 * exist as the vmalloc mappings don't need to be synchronized
1165	 * there.
1166	 */
1167	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
1168		if (vmalloc_fault(address) >= 0)
1169			return;
1170	}
1171#endif
1172
1173	if (is_f00f_bug(regs, hw_error_code, address))
1174		return;
 
1175
1176	/* Was the fault spurious, caused by lazy TLB invalidation? */
1177	if (spurious_kernel_fault(hw_error_code, address))
1178		return;
1179
1180	/* kprobes don't want to hook the spurious faults: */
1181	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
1182		return;
1183
1184	/*
1185	 * Note, despite being a "bad area", there are quite a few
1186	 * acceptable reasons to get here, such as erratum fixups
1187	 * and handling kernel code that can fault, like get_user().
1188	 *
1189	 * Don't take the mm semaphore here. If we fixup a prefetch
1190	 * fault we could otherwise deadlock:
1191	 */
1192	bad_area_nosemaphore(regs, hw_error_code, address);
1193}
1194NOKPROBE_SYMBOL(do_kern_addr_fault);
1195
1196/*
1197 * Handle faults in the user portion of the address space.  Nothing in here
1198 * should check X86_PF_USER without a specific justification: for almost
1199 * all purposes, we should treat a normal kernel access to user memory
1200 * (e.g. get_user(), put_user(), etc.) the same as the WRUSS instruction.
1201 * The one exception is AC flag handling, which is, per the x86
1202 * architecture, special for WRUSS.
1203 */
1204static inline
1205void do_user_addr_fault(struct pt_regs *regs,
1206			unsigned long error_code,
1207			unsigned long address)
1208{
1209	struct vm_area_struct *vma;
1210	struct task_struct *tsk;
1211	struct mm_struct *mm;
1212	vm_fault_t fault;
1213	unsigned int flags = FAULT_FLAG_DEFAULT;
1214
1215	tsk = current;
1216	mm = tsk->mm;
1217
1218	if (unlikely((error_code & (X86_PF_USER | X86_PF_INSTR)) == X86_PF_INSTR)) {
1219		/*
1220		 * Whoops, this is kernel mode code trying to execute from
1221		 * user memory.  Unless this is AMD erratum #93, which
1222		 * corrupts RIP such that it looks like a user address,
1223		 * this is unrecoverable.  Don't even try to look up the
1224		 * VMA or look for extable entries.
1225		 */
1226		if (is_errata93(regs, address))
1227			return;
1228
1229		page_fault_oops(regs, error_code, address);
1230		return;
1231	}
1232
1233	/* kprobes don't want to hook the spurious faults: */
1234	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
1235		return;
1236
1237	/*
1238	 * Reserved bits are never expected to be set on
1239	 * entries in the user portion of the page tables.
1240	 */
1241	if (unlikely(error_code & X86_PF_RSVD))
1242		pgtable_bad(regs, error_code, address);
1243
1244	/*
1245	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1246	 * pages in the user address space.  The odd case here is WRUSS,
1247	 * which, according to the preliminary documentation, does not respect
1248	 * SMAP and will have the USER bit set so, in all cases, SMAP
1249	 * enforcement appears to be consistent with the USER bit.
1250	 */
1251	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1252		     !(error_code & X86_PF_USER) &&
1253		     !(regs->flags & X86_EFLAGS_AC))) {
1254		/*
1255		 * No extable entry here.  This was a kernel access to an
1256		 * invalid pointer.  get_kernel_nofault() will not get here.
1257		 */
1258		page_fault_oops(regs, error_code, address);
1259		return;
1260	}
1261
1262	/*
1263	 * If we're in an interrupt, have no user context or are running
1264	 * in a region with pagefaults disabled then we must not take the fault
1265	 */
1266	if (unlikely(faulthandler_disabled() || !mm)) {
1267		bad_area_nosemaphore(regs, error_code, address);
1268		return;
1269	}
1270
1271	/* Legacy check - remove this after verifying that it doesn't trigger */
1272	if (WARN_ON_ONCE(!(regs->flags & X86_EFLAGS_IF))) {
1273		bad_area_nosemaphore(regs, error_code, address);
1274		return;
 
 
 
 
 
 
 
 
 
 
1275	}
1276
1277	local_irq_enable();
1278
1279	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1280
1281	/*
1282	 * Read-only permissions can not be expressed in shadow stack PTEs.
1283	 * Treat all shadow stack accesses as WRITE faults. This ensures
1284	 * that the MM will prepare everything (e.g., break COW) such that
1285	 * maybe_mkwrite() can create a proper shadow stack PTE.
1286	 */
1287	if (error_code & X86_PF_SHSTK)
1288		flags |= FAULT_FLAG_WRITE;
1289	if (error_code & X86_PF_WRITE)
1290		flags |= FAULT_FLAG_WRITE;
1291	if (error_code & X86_PF_INSTR)
1292		flags |= FAULT_FLAG_INSTRUCTION;
1293
1294	/*
1295	 * We set FAULT_FLAG_USER based on the register state, not
1296	 * based on X86_PF_USER. User space accesses that cause
1297	 * system page faults are still user accesses.
1298	 */
1299	if (user_mode(regs))
1300		flags |= FAULT_FLAG_USER;
1301
1302#ifdef CONFIG_X86_64
1303	/*
1304	 * Faults in the vsyscall page might need emulation.  The
1305	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1306	 * considered to be part of the user address space.
1307	 *
1308	 * The vsyscall page does not have a "real" VMA, so do this
1309	 * emulation before we go searching for VMAs.
1310	 *
1311	 * PKRU never rejects instruction fetches, so we don't need
1312	 * to consider the PF_PK bit.
1313	 */
1314	if (is_vsyscall_vaddr(address)) {
1315		if (emulate_vsyscall(error_code, regs, address))
 
 
1316			return;
 
 
 
 
 
 
 
 
 
 
1317	}
1318#endif
1319
1320	if (!(flags & FAULT_FLAG_USER))
1321		goto lock_mmap;
1322
1323	vma = lock_vma_under_rcu(mm, address);
1324	if (!vma)
1325		goto lock_mmap;
1326
1327	if (unlikely(access_error(error_code, vma))) {
1328		vma_end_read(vma);
1329		goto lock_mmap;
1330	}
1331	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
1332	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
1333		vma_end_read(vma);
1334
1335	if (!(fault & VM_FAULT_RETRY)) {
1336		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
1337		goto done;
1338	}
1339	count_vm_vma_lock_event(VMA_LOCK_RETRY);
1340	if (fault & VM_FAULT_MAJOR)
1341		flags |= FAULT_FLAG_TRIED;
1342
1343	/* Quick path to respond to signals */
1344	if (fault_signal_pending(fault, regs)) {
1345		if (!user_mode(regs))
1346			kernelmode_fixup_or_oops(regs, error_code, address,
1347						 SIGBUS, BUS_ADRERR,
1348						 ARCH_DEFAULT_PKEY);
1349		return;
1350	}
1351lock_mmap:
1352
1353retry:
1354	vma = lock_mm_and_find_vma(mm, address, regs);
1355	if (unlikely(!vma)) {
1356		bad_area_nosemaphore(regs, error_code, address);
 
 
 
 
 
 
 
 
1357		return;
1358	}
1359
1360	/*
1361	 * Ok, we have a good vm_area for this memory access, so
1362	 * we can handle it..
1363	 */
 
1364	if (unlikely(access_error(error_code, vma))) {
1365		bad_area_access_error(regs, error_code, address, vma);
1366		return;
1367	}
1368
1369	/*
1370	 * If for any reason at all we couldn't handle the fault,
1371	 * make sure we exit gracefully rather than endlessly redo
1372	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1373	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1374	 *
1375	 * Note that handle_userfault() may also release and reacquire mmap_lock
1376	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1377	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1378	 * (potentially after handling any pending signal during the return to
1379	 * userland). The return to userland is identified whenever
1380	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1381	 */
1382	fault = handle_mm_fault(vma, address, flags, regs);
1383
1384	if (fault_signal_pending(fault, regs)) {
1385		/*
1386		 * Quick path to respond to signals.  The core mm code
1387		 * has unlocked the mm for us if we get here.
1388		 */
1389		if (!user_mode(regs))
1390			kernelmode_fixup_or_oops(regs, error_code, address,
1391						 SIGBUS, BUS_ADRERR,
1392						 ARCH_DEFAULT_PKEY);
1393		return;
1394	}
1395
1396	/* The fault is fully completed (including releasing mmap lock) */
1397	if (fault & VM_FAULT_COMPLETED)
1398		return;
1399
1400	/*
1401	 * If we need to retry the mmap_lock has already been released,
1402	 * and if there is a fatal signal pending there is no guarantee
1403	 * that we made any progress. Handle this case first.
1404	 */
1405	if (unlikely(fault & VM_FAULT_RETRY)) {
1406		flags |= FAULT_FLAG_TRIED;
1407		goto retry;
1408	}
 
 
 
 
 
 
 
 
1409
1410	mmap_read_unlock(mm);
1411done:
1412	if (likely(!(fault & VM_FAULT_ERROR)))
1413		return;
 
1414
1415	if (fatal_signal_pending(current) && !user_mode(regs)) {
1416		kernelmode_fixup_or_oops(regs, error_code, address,
1417					 0, 0, ARCH_DEFAULT_PKEY);
1418		return;
1419	}
1420
1421	if (fault & VM_FAULT_OOM) {
1422		/* Kernel mode? Handle exceptions or die: */
1423		if (!user_mode(regs)) {
1424			kernelmode_fixup_or_oops(regs, error_code, address,
1425						 SIGSEGV, SEGV_MAPERR,
1426						 ARCH_DEFAULT_PKEY);
1427			return;
1428		}
1429
1430		/*
1431		 * We ran out of memory, call the OOM killer, and return the
1432		 * userspace (which will retry the fault, or kill us if we got
1433		 * oom-killed):
1434		 */
1435		pagefault_out_of_memory();
1436	} else {
1437		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1438			     VM_FAULT_HWPOISON_LARGE))
1439			do_sigbus(regs, error_code, address, fault);
1440		else if (fault & VM_FAULT_SIGSEGV)
1441			bad_area_nosemaphore(regs, error_code, address);
1442		else
1443			BUG();
1444	}
 
 
1445}
1446NOKPROBE_SYMBOL(do_user_addr_fault);
1447
1448static __always_inline void
1449trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1450			 unsigned long address)
1451{
1452	if (!trace_pagefault_enabled())
1453		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1454
 
 
 
 
 
1455	if (user_mode(regs))
1456		trace_page_fault_user(address, regs, error_code);
1457	else
1458		trace_page_fault_kernel(address, regs, error_code);
1459}
1460
1461static __always_inline void
1462handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1463			      unsigned long address)
1464{
1465	trace_page_fault_entries(regs, error_code, address);
1466
1467	if (unlikely(kmmio_fault(regs, address)))
1468		return;
1469
1470	/* Was the fault on kernel-controlled part of the address space? */
1471	if (unlikely(fault_in_kernel_space(address))) {
1472		do_kern_addr_fault(regs, error_code, address);
1473	} else {
1474		do_user_addr_fault(regs, error_code, address);
1475		/*
1476		 * User address page fault handling might have reenabled
1477		 * interrupts. Fixing up all potential exit points of
1478		 * do_user_addr_fault() and its leaf functions is just not
1479		 * doable w/o creating an unholy mess or turning the code
1480		 * upside down.
1481		 */
1482		local_irq_disable();
1483	}
1484}
1485
1486DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
1487{
1488	irqentry_state_t state;
1489	unsigned long address;
1490
1491	address = cpu_feature_enabled(X86_FEATURE_FRED) ? fred_event_data(regs) : read_cr2();
1492
1493	prefetchw(&current->mm->mmap_lock);
1494
1495	/*
1496	 * KVM uses #PF vector to deliver 'page not present' events to guests
1497	 * (asynchronous page fault mechanism). The event happens when a
1498	 * userspace task is trying to access some valid (from guest's point of
1499	 * view) memory which is not currently mapped by the host (e.g. the
1500	 * memory is swapped out). Note, the corresponding "page ready" event
1501	 * which is injected when the memory becomes available, is delivered via
1502	 * an interrupt mechanism and not a #PF exception
1503	 * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
1504	 *
1505	 * We are relying on the interrupted context being sane (valid RSP,
1506	 * relevant locks not held, etc.), which is fine as long as the
1507	 * interrupted context had IF=1.  We are also relying on the KVM
1508	 * async pf type field and CR2 being read consistently instead of
1509	 * getting values from real and async page faults mixed up.
1510	 *
1511	 * Fingers crossed.
1512	 *
1513	 * The async #PF handling code takes care of idtentry handling
1514	 * itself.
1515	 */
1516	if (kvm_handle_async_pf(regs, (u32)address))
1517		return;
1518
1519	/*
1520	 * Entry handling for valid #PF from kernel mode is slightly
1521	 * different: RCU is already watching and ct_irq_enter() must not
1522	 * be invoked because a kernel fault on a user space address might
1523	 * sleep.
1524	 *
1525	 * In case the fault hit a RCU idle region the conditional entry
1526	 * code reenabled RCU to avoid subsequent wreckage which helps
1527	 * debuggability.
1528	 */
1529	state = irqentry_enter(regs);
1530
1531	instrumentation_begin();
1532	handle_page_fault(regs, error_code, address);
1533	instrumentation_end();
1534
1535	irqentry_exit(regs, state);
 
 
 
1536}