Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright (C) 1995  Linus Torvalds
   4 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   5 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   6 */
   7#include <linux/sched.h>		/* test_thread_flag(), ...	*/
   8#include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
   9#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
  10#include <linux/extable.h>		/* search_exception_tables	*/
  11#include <linux/memblock.h>		/* max_low_pfn			*/
 
  12#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
  13#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
  14#include <linux/perf_event.h>		/* perf_sw_event		*/
  15#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
  16#include <linux/prefetch.h>		/* prefetchw			*/
  17#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
  18#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
  19#include <linux/efi.h>			/* efi_recover_from_page_fault()*/
  20#include <linux/mm_types.h>
 
  21
  22#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
  23#include <asm/traps.h>			/* dotraplinkage, ...		*/
  24#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
  25#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
  26#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
  27#include <asm/vm86.h>			/* struct vm86			*/
  28#include <asm/mmu_context.h>		/* vma_pkey()			*/
  29#include <asm/efi.h>			/* efi_recover_from_page_fault()*/
  30#include <asm/desc.h>			/* store_idt(), ...		*/
  31#include <asm/cpu_entry_area.h>		/* exception stack		*/
 
 
 
 
 
 
  32
  33#define CREATE_TRACE_POINTS
  34#include <asm/trace/exceptions.h>
  35
  36/*
  37 * Returns 0 if mmiotrace is disabled, or if the fault is not
  38 * handled by mmiotrace:
  39 */
  40static nokprobe_inline int
  41kmmio_fault(struct pt_regs *regs, unsigned long addr)
  42{
  43	if (unlikely(is_kmmio_active()))
  44		if (kmmio_handler(regs, addr) == 1)
  45			return -1;
  46	return 0;
  47}
  48
  49/*
  50 * Prefetch quirks:
  51 *
  52 * 32-bit mode:
  53 *
  54 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  55 *   Check that here and ignore it.
  56 *
  57 * 64-bit mode:
  58 *
  59 *   Sometimes the CPU reports invalid exceptions on prefetch.
  60 *   Check that here and ignore it.
  61 *
  62 * Opcode checker based on code by Richard Brunner.
  63 */
  64static inline int
  65check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  66		      unsigned char opcode, int *prefetch)
  67{
  68	unsigned char instr_hi = opcode & 0xf0;
  69	unsigned char instr_lo = opcode & 0x0f;
  70
  71	switch (instr_hi) {
  72	case 0x20:
  73	case 0x30:
  74		/*
  75		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  76		 * In X86_64 long mode, the CPU will signal invalid
  77		 * opcode if some of these prefixes are present so
  78		 * X86_64 will never get here anyway
  79		 */
  80		return ((instr_lo & 7) == 0x6);
  81#ifdef CONFIG_X86_64
  82	case 0x40:
  83		/*
  84		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
  85		 * Need to figure out under what instruction mode the
  86		 * instruction was issued. Could check the LDT for lm,
  87		 * but for now it's good enough to assume that long
  88		 * mode only uses well known segments or kernel.
  89		 */
  90		return (!user_mode(regs) || user_64bit_mode(regs));
  91#endif
  92	case 0x60:
  93		/* 0x64 thru 0x67 are valid prefixes in all modes. */
  94		return (instr_lo & 0xC) == 0x4;
  95	case 0xF0:
  96		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
  97		return !instr_lo || (instr_lo>>1) == 1;
  98	case 0x00:
  99		/* Prefetch instruction is 0x0F0D or 0x0F18 */
 100		if (probe_kernel_address(instr, opcode))
 101			return 0;
 102
 103		*prefetch = (instr_lo == 0xF) &&
 104			(opcode == 0x0D || opcode == 0x18);
 105		return 0;
 106	default:
 107		return 0;
 108	}
 109}
 110
 
 
 
 
 
 
 
 
 
 111static int
 112is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 113{
 114	unsigned char *max_instr;
 115	unsigned char *instr;
 116	int prefetch = 0;
 117
 
 
 
 
 118	/*
 119	 * If it was a exec (instruction fetch) fault on NX page, then
 120	 * do not ignore the fault:
 121	 */
 122	if (error_code & X86_PF_INSTR)
 123		return 0;
 124
 125	instr = (void *)convert_ip_to_linear(current, regs);
 126	max_instr = instr + 15;
 127
 128	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
 129		return 0;
 
 
 
 
 130
 131	while (instr < max_instr) {
 132		unsigned char opcode;
 133
 134		if (probe_kernel_address(instr, opcode))
 135			break;
 
 
 
 
 
 136
 137		instr++;
 138
 139		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
 140			break;
 141	}
 
 
 142	return prefetch;
 143}
 144
 145DEFINE_SPINLOCK(pgd_lock);
 146LIST_HEAD(pgd_list);
 147
 148#ifdef CONFIG_X86_32
 149static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 150{
 151	unsigned index = pgd_index(address);
 152	pgd_t *pgd_k;
 153	p4d_t *p4d, *p4d_k;
 154	pud_t *pud, *pud_k;
 155	pmd_t *pmd, *pmd_k;
 156
 157	pgd += index;
 158	pgd_k = init_mm.pgd + index;
 159
 160	if (!pgd_present(*pgd_k))
 161		return NULL;
 162
 163	/*
 164	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
 165	 * and redundant with the set_pmd() on non-PAE. As would
 166	 * set_p4d/set_pud.
 167	 */
 168	p4d = p4d_offset(pgd, address);
 169	p4d_k = p4d_offset(pgd_k, address);
 170	if (!p4d_present(*p4d_k))
 171		return NULL;
 172
 173	pud = pud_offset(p4d, address);
 174	pud_k = pud_offset(p4d_k, address);
 175	if (!pud_present(*pud_k))
 176		return NULL;
 177
 178	pmd = pmd_offset(pud, address);
 179	pmd_k = pmd_offset(pud_k, address);
 180
 181	if (pmd_present(*pmd) != pmd_present(*pmd_k))
 182		set_pmd(pmd, *pmd_k);
 183
 184	if (!pmd_present(*pmd_k))
 185		return NULL;
 186	else
 187		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 188
 189	return pmd_k;
 190}
 191
 192void vmalloc_sync_all(void)
 193{
 194	unsigned long address;
 195
 196	if (SHARED_KERNEL_PMD)
 197		return;
 198
 199	for (address = VMALLOC_START & PMD_MASK;
 200	     address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
 201	     address += PMD_SIZE) {
 202		struct page *page;
 203
 204		spin_lock(&pgd_lock);
 205		list_for_each_entry(page, &pgd_list, lru) {
 206			spinlock_t *pgt_lock;
 207
 208			/* the pgt_lock only for Xen */
 209			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 210
 211			spin_lock(pgt_lock);
 212			vmalloc_sync_one(page_address(page), address);
 213			spin_unlock(pgt_lock);
 214		}
 215		spin_unlock(&pgd_lock);
 216	}
 217}
 218
 219/*
 220 * 32-bit:
 221 *
 222 *   Handle a fault on the vmalloc or module mapping area
 
 
 
 
 
 
 
 
 
 
 
 223 */
 224static noinline int vmalloc_fault(unsigned long address)
 225{
 226	unsigned long pgd_paddr;
 227	pmd_t *pmd_k;
 228	pte_t *pte_k;
 229
 230	/* Make sure we are in vmalloc area: */
 231	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 232		return -1;
 233
 234	/*
 235	 * Synchronize this task's top level page-table
 236	 * with the 'reference' page table.
 237	 *
 238	 * Do _not_ use "current" here. We might be inside
 239	 * an interrupt in the middle of a task switch..
 240	 */
 241	pgd_paddr = read_cr3_pa();
 242	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 243	if (!pmd_k)
 244		return -1;
 245
 246	if (pmd_large(*pmd_k))
 247		return 0;
 248
 249	pte_k = pte_offset_kernel(pmd_k, address);
 250	if (!pte_present(*pte_k))
 251		return -1;
 252
 253	return 0;
 254}
 255NOKPROBE_SYMBOL(vmalloc_fault);
 256
 257/*
 258 * Did it hit the DOS screen memory VA from vm86 mode?
 259 */
 260static inline void
 261check_v8086_mode(struct pt_regs *regs, unsigned long address,
 262		 struct task_struct *tsk)
 263{
 264#ifdef CONFIG_VM86
 265	unsigned long bit;
 266
 267	if (!v8086_mode(regs) || !tsk->thread.vm86)
 268		return;
 
 
 269
 270	bit = (address - 0xA0000) >> PAGE_SHIFT;
 271	if (bit < 32)
 272		tsk->thread.vm86->screen_bitmap |= 1 << bit;
 273#endif
 
 
 
 
 
 
 
 
 
 274}
 275
 276static bool low_pfn(unsigned long pfn)
 277{
 278	return pfn < max_low_pfn;
 279}
 280
 281static void dump_pagetable(unsigned long address)
 282{
 283	pgd_t *base = __va(read_cr3_pa());
 284	pgd_t *pgd = &base[pgd_index(address)];
 285	p4d_t *p4d;
 286	pud_t *pud;
 287	pmd_t *pmd;
 288	pte_t *pte;
 289
 290#ifdef CONFIG_X86_PAE
 291	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
 292	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
 293		goto out;
 294#define pr_pde pr_cont
 295#else
 296#define pr_pde pr_info
 297#endif
 298	p4d = p4d_offset(pgd, address);
 299	pud = pud_offset(p4d, address);
 300	pmd = pmd_offset(pud, address);
 301	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
 302#undef pr_pde
 303
 304	/*
 305	 * We must not directly access the pte in the highpte
 306	 * case if the page table is located in highmem.
 307	 * And let's rather not kmap-atomic the pte, just in case
 308	 * it's allocated already:
 309	 */
 310	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
 311		goto out;
 312
 313	pte = pte_offset_kernel(pmd, address);
 314	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
 315out:
 316	pr_cont("\n");
 317}
 318
 319#else /* CONFIG_X86_64: */
 320
 321void vmalloc_sync_all(void)
 322{
 323	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 324}
 325
 326/*
 327 * 64-bit:
 328 *
 329 *   Handle a fault on the vmalloc area
 330 */
 331static noinline int vmalloc_fault(unsigned long address)
 332{
 333	pgd_t *pgd, *pgd_k;
 334	p4d_t *p4d, *p4d_k;
 335	pud_t *pud;
 336	pmd_t *pmd;
 337	pte_t *pte;
 338
 339	/* Make sure we are in vmalloc area: */
 340	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 341		return -1;
 342
 343	/*
 344	 * Copy kernel mappings over when needed. This can also
 345	 * happen within a race in page table update. In the later
 346	 * case just flush:
 347	 */
 348	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
 349	pgd_k = pgd_offset_k(address);
 350	if (pgd_none(*pgd_k))
 351		return -1;
 352
 353	if (pgtable_l5_enabled()) {
 354		if (pgd_none(*pgd)) {
 355			set_pgd(pgd, *pgd_k);
 356			arch_flush_lazy_mmu_mode();
 357		} else {
 358			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
 359		}
 360	}
 361
 362	/* With 4-level paging, copying happens on the p4d level. */
 363	p4d = p4d_offset(pgd, address);
 364	p4d_k = p4d_offset(pgd_k, address);
 365	if (p4d_none(*p4d_k))
 366		return -1;
 367
 368	if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
 369		set_p4d(p4d, *p4d_k);
 370		arch_flush_lazy_mmu_mode();
 371	} else {
 372		BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
 373	}
 374
 375	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
 376
 377	pud = pud_offset(p4d, address);
 378	if (pud_none(*pud))
 379		return -1;
 380
 381	if (pud_large(*pud))
 382		return 0;
 383
 384	pmd = pmd_offset(pud, address);
 385	if (pmd_none(*pmd))
 386		return -1;
 387
 388	if (pmd_large(*pmd))
 389		return 0;
 390
 391	pte = pte_offset_kernel(pmd, address);
 392	if (!pte_present(*pte))
 393		return -1;
 394
 395	return 0;
 396}
 397NOKPROBE_SYMBOL(vmalloc_fault);
 398
 399#ifdef CONFIG_CPU_SUP_AMD
 400static const char errata93_warning[] =
 401KERN_ERR 
 402"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
 403"******* Working around it, but it may cause SEGVs or burn power.\n"
 404"******* Please consider a BIOS update.\n"
 405"******* Disabling USB legacy in the BIOS may also help.\n";
 406#endif
 407
 408/*
 409 * No vm86 mode in 64-bit mode:
 410 */
 411static inline void
 412check_v8086_mode(struct pt_regs *regs, unsigned long address,
 413		 struct task_struct *tsk)
 414{
 415}
 416
 417static int bad_address(void *p)
 418{
 419	unsigned long dummy;
 420
 421	return probe_kernel_address((unsigned long *)p, dummy);
 422}
 423
 424static void dump_pagetable(unsigned long address)
 425{
 426	pgd_t *base = __va(read_cr3_pa());
 427	pgd_t *pgd = base + pgd_index(address);
 428	p4d_t *p4d;
 429	pud_t *pud;
 430	pmd_t *pmd;
 431	pte_t *pte;
 432
 433	if (bad_address(pgd))
 434		goto bad;
 435
 436	pr_info("PGD %lx ", pgd_val(*pgd));
 437
 438	if (!pgd_present(*pgd))
 439		goto out;
 440
 441	p4d = p4d_offset(pgd, address);
 442	if (bad_address(p4d))
 443		goto bad;
 444
 445	pr_cont("P4D %lx ", p4d_val(*p4d));
 446	if (!p4d_present(*p4d) || p4d_large(*p4d))
 447		goto out;
 448
 449	pud = pud_offset(p4d, address);
 450	if (bad_address(pud))
 451		goto bad;
 452
 453	pr_cont("PUD %lx ", pud_val(*pud));
 454	if (!pud_present(*pud) || pud_large(*pud))
 455		goto out;
 456
 457	pmd = pmd_offset(pud, address);
 458	if (bad_address(pmd))
 459		goto bad;
 460
 461	pr_cont("PMD %lx ", pmd_val(*pmd));
 462	if (!pmd_present(*pmd) || pmd_large(*pmd))
 463		goto out;
 464
 465	pte = pte_offset_kernel(pmd, address);
 466	if (bad_address(pte))
 467		goto bad;
 468
 469	pr_cont("PTE %lx", pte_val(*pte));
 470out:
 471	pr_cont("\n");
 472	return;
 473bad:
 474	pr_info("BAD\n");
 475}
 476
 477#endif /* CONFIG_X86_64 */
 478
 479/*
 480 * Workaround for K8 erratum #93 & buggy BIOS.
 481 *
 482 * BIOS SMM functions are required to use a specific workaround
 483 * to avoid corruption of the 64bit RIP register on C stepping K8.
 484 *
 485 * A lot of BIOS that didn't get tested properly miss this.
 486 *
 487 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 488 * Try to work around it here.
 489 *
 490 * Note we only handle faults in kernel here.
 491 * Does nothing on 32-bit.
 492 */
 493static int is_errata93(struct pt_regs *regs, unsigned long address)
 494{
 495#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
 496	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
 497	    || boot_cpu_data.x86 != 0xf)
 498		return 0;
 499
 
 
 
 500	if (address != regs->ip)
 501		return 0;
 502
 503	if ((address >> 32) != 0)
 504		return 0;
 505
 506	address |= 0xffffffffUL << 32;
 507	if ((address >= (u64)_stext && address <= (u64)_etext) ||
 508	    (address >= MODULES_VADDR && address <= MODULES_END)) {
 509		printk_once(errata93_warning);
 510		regs->ip = address;
 511		return 1;
 512	}
 513#endif
 514	return 0;
 515}
 516
 517/*
 518 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 519 * to illegal addresses >4GB.
 520 *
 521 * We catch this in the page fault handler because these addresses
 522 * are not reachable. Just detect this case and return.  Any code
 523 * segment in LDT is compatibility mode.
 524 */
 525static int is_errata100(struct pt_regs *regs, unsigned long address)
 526{
 527#ifdef CONFIG_X86_64
 528	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
 529		return 1;
 530#endif
 531	return 0;
 532}
 533
 534static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
 
 
 535{
 536#ifdef CONFIG_X86_F00F_BUG
 537	unsigned long nr;
 538
 539	/*
 540	 * Pentium F0 0F C7 C8 bug workaround:
 541	 */
 542	if (boot_cpu_has_bug(X86_BUG_F00F)) {
 543		nr = (address - idt_descr.address) >> 3;
 544
 545		if (nr == 6) {
 546			do_invalid_op(regs, 0);
 547			return 1;
 548		}
 549	}
 550#endif
 551	return 0;
 552}
 553
 554static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
 555{
 556	u32 offset = (index >> 3) * sizeof(struct desc_struct);
 557	unsigned long addr;
 558	struct ldttss_desc desc;
 559
 560	if (index == 0) {
 561		pr_alert("%s: NULL\n", name);
 562		return;
 563	}
 564
 565	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
 566		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
 567		return;
 568	}
 569
 570	if (probe_kernel_read(&desc, (void *)(gdt->address + offset),
 571			      sizeof(struct ldttss_desc))) {
 572		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
 573			 name, index);
 574		return;
 575	}
 576
 577	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
 578#ifdef CONFIG_X86_64
 579	addr |= ((u64)desc.base3 << 32);
 580#endif
 581	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
 582		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
 583}
 584
 585static void
 586show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 587{
 588	if (!oops_may_print())
 589		return;
 590
 591	if (error_code & X86_PF_INSTR) {
 592		unsigned int level;
 593		pgd_t *pgd;
 594		pte_t *pte;
 595
 596		pgd = __va(read_cr3_pa());
 597		pgd += pgd_index(address);
 598
 599		pte = lookup_address_in_pgd(pgd, address, &level);
 600
 601		if (pte && pte_present(*pte) && !pte_exec(*pte))
 602			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
 603				from_kuid(&init_user_ns, current_uid()));
 604		if (pte && pte_present(*pte) && pte_exec(*pte) &&
 605				(pgd_flags(*pgd) & _PAGE_USER) &&
 606				(__read_cr4() & X86_CR4_SMEP))
 607			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
 608				from_kuid(&init_user_ns, current_uid()));
 609	}
 610
 611	if (address < PAGE_SIZE && !user_mode(regs))
 612		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
 613			(void *)address);
 614	else
 615		pr_alert("BUG: unable to handle page fault for address: %px\n",
 616			(void *)address);
 617
 618	pr_alert("#PF: %s %s in %s mode\n",
 619		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
 620		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
 621		 (error_code & X86_PF_WRITE) ? "write access" :
 622					       "read access",
 623			     user_mode(regs) ? "user" : "kernel");
 624	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
 625		 !(error_code & X86_PF_PROT) ? "not-present page" :
 626		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
 627		 (error_code & X86_PF_PK)    ? "protection keys violation" :
 
 628					       "permissions violation");
 629
 630	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
 631		struct desc_ptr idt, gdt;
 632		u16 ldtr, tr;
 633
 634		/*
 635		 * This can happen for quite a few reasons.  The more obvious
 636		 * ones are faults accessing the GDT, or LDT.  Perhaps
 637		 * surprisingly, if the CPU tries to deliver a benign or
 638		 * contributory exception from user code and gets a page fault
 639		 * during delivery, the page fault can be delivered as though
 640		 * it originated directly from user code.  This could happen
 641		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
 642		 * kernel or IST stack.
 643		 */
 644		store_idt(&idt);
 645
 646		/* Usable even on Xen PV -- it's just slow. */
 647		native_store_gdt(&gdt);
 648
 649		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
 650			 idt.address, idt.size, gdt.address, gdt.size);
 651
 652		store_ldt(ldtr);
 653		show_ldttss(&gdt, "LDTR", ldtr);
 654
 655		store_tr(tr);
 656		show_ldttss(&gdt, "TR", tr);
 657	}
 658
 659	dump_pagetable(address);
 
 
 
 660}
 661
 662static noinline void
 663pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 664	    unsigned long address)
 665{
 666	struct task_struct *tsk;
 667	unsigned long flags;
 668	int sig;
 669
 670	flags = oops_begin();
 671	tsk = current;
 672	sig = SIGKILL;
 673
 674	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
 675	       tsk->comm, address);
 676	dump_pagetable(address);
 677
 678	if (__die("Bad pagetable", regs, error_code))
 679		sig = 0;
 680
 681	oops_end(flags, regs, sig);
 682}
 683
 684static void set_signal_archinfo(unsigned long address,
 685				unsigned long error_code)
 686{
 687	struct task_struct *tsk = current;
 688
 689	/*
 690	 * To avoid leaking information about the kernel page
 691	 * table layout, pretend that user-mode accesses to
 692	 * kernel addresses are always protection faults.
 693	 *
 694	 * NB: This means that failed vsyscalls with vsyscall=none
 695	 * will have the PROT bit.  This doesn't leak any
 696	 * information and does not appear to cause any problems.
 697	 */
 698	if (address >= TASK_SIZE_MAX)
 699		error_code |= X86_PF_PROT;
 
 
 
 
 
 
 700
 701	tsk->thread.trap_nr = X86_TRAP_PF;
 702	tsk->thread.error_code = error_code | X86_PF_USER;
 703	tsk->thread.cr2 = address;
 704}
 705
 706static noinline void
 707no_context(struct pt_regs *regs, unsigned long error_code,
 708	   unsigned long address, int signal, int si_code)
 709{
 710	struct task_struct *tsk = current;
 
 
 711	unsigned long flags;
 712	int sig;
 713
 714	if (user_mode(regs)) {
 715		/*
 716		 * This is an implicit supervisor-mode access from user
 717		 * mode.  Bypass all the kernel-mode recovery code and just
 718		 * OOPS.
 719		 */
 720		goto oops;
 721	}
 722
 723	/* Are we prepared to handle this kernel fault? */
 724	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
 725		/*
 726		 * Any interrupt that takes a fault gets the fixup. This makes
 727		 * the below recursive fault logic only apply to a faults from
 728		 * task context.
 729		 */
 730		if (in_interrupt())
 731			return;
 732
 733		/*
 734		 * Per the above we're !in_interrupt(), aka. task context.
 735		 *
 736		 * In this case we need to make sure we're not recursively
 737		 * faulting through the emulate_vsyscall() logic.
 738		 */
 739		if (current->thread.sig_on_uaccess_err && signal) {
 740			set_signal_archinfo(address, error_code);
 741
 742			/* XXX: hwpoison faults will set the wrong code. */
 743			force_sig_fault(signal, si_code, (void __user *)address);
 744		}
 745
 746		/*
 747		 * Barring that, we can do the fixup and be happy.
 748		 */
 749		return;
 750	}
 751
 752#ifdef CONFIG_VMAP_STACK
 753	/*
 754	 * Stack overflow?  During boot, we can fault near the initial
 755	 * stack in the direct map, but that's not an overflow -- check
 756	 * that we're in vmalloc space to avoid this.
 757	 */
 758	if (is_vmalloc_addr((void *)address) &&
 759	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
 760	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
 761		unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
 762		/*
 763		 * We're likely to be running with very little stack space
 764		 * left.  It's plausible that we'd hit this condition but
 765		 * double-fault even before we get this far, in which case
 766		 * we're fine: the double-fault handler will deal with it.
 767		 *
 768		 * We don't want to make it all the way into the oops code
 769		 * and then double-fault, though, because we're likely to
 770		 * break the console driver and lose most of the stack dump.
 771		 */
 772		asm volatile ("movq %[stack], %%rsp\n\t"
 773			      "call handle_stack_overflow\n\t"
 774			      "1: jmp 1b"
 775			      : ASM_CALL_CONSTRAINT
 776			      : "D" ("kernel stack overflow (page fault)"),
 777				"S" (regs), "d" (address),
 778				[stack] "rm" (stack));
 779		unreachable();
 780	}
 781#endif
 782
 783	/*
 784	 * 32-bit:
 785	 *
 786	 *   Valid to do another page fault here, because if this fault
 787	 *   had been triggered by is_prefetch fixup_exception would have
 788	 *   handled it.
 789	 *
 790	 * 64-bit:
 791	 *
 792	 *   Hall of shame of CPU/BIOS bugs.
 793	 */
 794	if (is_prefetch(regs, error_code, address))
 795		return;
 796
 797	if (is_errata93(regs, address))
 
 
 798		return;
 799
 800	/*
 801	 * Buggy firmware could access regions which might page fault, try to
 802	 * recover from such faults.
 803	 */
 804	if (IS_ENABLED(CONFIG_EFI))
 805		efi_recover_from_page_fault(address);
 806
 807oops:
 808	/*
 809	 * Oops. The kernel tried to access some bad page. We'll have to
 810	 * terminate things with extreme prejudice:
 811	 */
 812	flags = oops_begin();
 813
 814	show_fault_oops(regs, error_code, address);
 815
 816	if (task_stack_end_corrupted(tsk))
 817		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 818
 819	sig = SIGKILL;
 820	if (__die("Oops", regs, error_code))
 821		sig = 0;
 822
 823	/* Executive summary in case the body of the oops scrolled away */
 824	printk(KERN_DEFAULT "CR2: %016lx\n", address);
 825
 826	oops_end(flags, regs, sig);
 827}
 828
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829/*
 830 * Print out info about fatal segfaults, if the show_unhandled_signals
 831 * sysctl is set:
 832 */
 833static inline void
 834show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 835		unsigned long address, struct task_struct *tsk)
 836{
 837	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
 
 
 838
 839	if (!unhandled_signal(tsk, SIGSEGV))
 840		return;
 841
 842	if (!printk_ratelimit())
 843		return;
 844
 845	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
 846		loglvl, tsk->comm, task_pid_nr(tsk), address,
 847		(void *)regs->ip, (void *)regs->sp, error_code);
 848
 849	print_vma_addr(KERN_CONT " in ", regs->ip);
 850
 
 
 
 
 
 
 
 
 851	printk(KERN_CONT "\n");
 852
 853	show_opcodes(regs, loglvl);
 854}
 855
 856/*
 857 * The (legacy) vsyscall page is the long page in the kernel portion
 858 * of the address space that has user-accessible permissions.
 859 */
 860static bool is_vsyscall_vaddr(unsigned long vaddr)
 861{
 862	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
 863}
 864
 865static void
 866__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 867		       unsigned long address, u32 pkey, int si_code)
 868{
 869	struct task_struct *tsk = current;
 870
 871	/* User mode accesses just cause a SIGSEGV */
 872	if (user_mode(regs) && (error_code & X86_PF_USER)) {
 873		/*
 874		 * It's possible to have interrupts off here:
 875		 */
 876		local_irq_enable();
 877
 878		/*
 879		 * Valid to do another page fault here because this one came
 880		 * from user space:
 881		 */
 882		if (is_prefetch(regs, error_code, address))
 883			return;
 884
 885		if (is_errata100(regs, address))
 886			return;
 
 
 
 887
 888		/*
 889		 * To avoid leaking information about the kernel page table
 890		 * layout, pretend that user-mode accesses to kernel addresses
 891		 * are always protection faults.
 892		 */
 893		if (address >= TASK_SIZE_MAX)
 894			error_code |= X86_PF_PROT;
 895
 896		if (likely(show_unhandled_signals))
 897			show_signal_msg(regs, error_code, address, tsk);
 898
 899		set_signal_archinfo(address, error_code);
 900
 901		if (si_code == SEGV_PKUERR)
 902			force_sig_pkuerr((void __user *)address, pkey);
 903
 904		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 
 905
 906		return;
 907	}
 908
 909	if (is_f00f_bug(regs, address))
 910		return;
 
 
 911
 912	no_context(regs, error_code, address, SIGSEGV, si_code);
 913}
 914
 915static noinline void
 916bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 917		     unsigned long address)
 918{
 919	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
 920}
 921
 922static void
 923__bad_area(struct pt_regs *regs, unsigned long error_code,
 924	   unsigned long address, u32 pkey, int si_code)
 925{
 926	struct mm_struct *mm = current->mm;
 927	/*
 928	 * Something tried to access memory that isn't in our memory map..
 929	 * Fix it, but check if it's kernel or user first..
 930	 */
 931	up_read(&mm->mmap_sem);
 932
 933	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
 934}
 935
 936static noinline void
 937bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 938{
 939	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
 940}
 941
 942static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 943		struct vm_area_struct *vma)
 944{
 945	/* This code is always called on the current mm */
 946	bool foreign = false;
 947
 948	if (!boot_cpu_has(X86_FEATURE_OSPKE))
 949		return false;
 950	if (error_code & X86_PF_PK)
 951		return true;
 952	/* this checks permission keys on the VMA: */
 953	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
 954				       (error_code & X86_PF_INSTR), foreign))
 955		return true;
 956	return false;
 957}
 958
 959static noinline void
 960bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 961		      unsigned long address, struct vm_area_struct *vma)
 962{
 963	/*
 964	 * This OSPKE check is not strictly necessary at runtime.
 965	 * But, doing it this way allows compiler optimizations
 966	 * if pkeys are compiled out.
 967	 */
 968	if (bad_area_access_from_pkeys(error_code, vma)) {
 969		/*
 970		 * A protection key fault means that the PKRU value did not allow
 971		 * access to some PTE.  Userspace can figure out what PKRU was
 972		 * from the XSAVE state.  This function captures the pkey from
 973		 * the vma and passes it to userspace so userspace can discover
 974		 * which protection key was set on the PTE.
 975		 *
 976		 * If we get here, we know that the hardware signaled a X86_PF_PK
 977		 * fault and that there was a VMA once we got in the fault
 978		 * handler.  It does *not* guarantee that the VMA we find here
 979		 * was the one that we faulted on.
 980		 *
 981		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 982		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 983		 * 3. T1   : faults...
 984		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 985		 * 5. T1   : enters fault handler, takes mmap_sem, etc...
 986		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 987		 *	     faulted on a pte with its pkey=4.
 988		 */
 989		u32 pkey = vma_pkey(vma);
 990
 991		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
 992	} else {
 993		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
 994	}
 995}
 996
 997static void
 998do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 999	  vm_fault_t fault)
1000{
1001	/* Kernel mode? Handle exceptions or die: */
1002	if (!(error_code & X86_PF_USER)) {
1003		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
 
1004		return;
1005	}
1006
1007	/* User-space => ok to do another page fault: */
1008	if (is_prefetch(regs, error_code, address))
1009		return;
1010
 
 
 
 
 
1011	set_signal_archinfo(address, error_code);
1012
1013#ifdef CONFIG_MEMORY_FAILURE
1014	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
1015		struct task_struct *tsk = current;
1016		unsigned lsb = 0;
1017
1018		pr_err(
1019	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
1020			tsk->comm, tsk->pid, address);
1021		if (fault & VM_FAULT_HWPOISON_LARGE)
1022			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
1023		if (fault & VM_FAULT_HWPOISON)
1024			lsb = PAGE_SHIFT;
1025		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
1026		return;
1027	}
1028#endif
1029	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
1030}
1031
1032static noinline void
1033mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1034	       unsigned long address, vm_fault_t fault)
1035{
1036	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
1037		no_context(regs, error_code, address, 0, 0);
1038		return;
1039	}
1040
1041	if (fault & VM_FAULT_OOM) {
1042		/* Kernel mode? Handle exceptions or die: */
1043		if (!(error_code & X86_PF_USER)) {
1044			no_context(regs, error_code, address,
1045				   SIGSEGV, SEGV_MAPERR);
1046			return;
1047		}
1048
1049		/*
1050		 * We ran out of memory, call the OOM killer, and return the
1051		 * userspace (which will retry the fault, or kill us if we got
1052		 * oom-killed):
1053		 */
1054		pagefault_out_of_memory();
1055	} else {
1056		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1057			     VM_FAULT_HWPOISON_LARGE))
1058			do_sigbus(regs, error_code, address, fault);
1059		else if (fault & VM_FAULT_SIGSEGV)
1060			bad_area_nosemaphore(regs, error_code, address);
1061		else
1062			BUG();
1063	}
1064}
1065
1066static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
1067{
1068	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
1069		return 0;
1070
1071	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
1072		return 0;
1073
1074	return 1;
1075}
1076
1077/*
1078 * Handle a spurious fault caused by a stale TLB entry.
1079 *
1080 * This allows us to lazily refresh the TLB when increasing the
1081 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
1082 * eagerly is very expensive since that implies doing a full
1083 * cross-processor TLB flush, even if no stale TLB entries exist
1084 * on other processors.
1085 *
1086 * Spurious faults may only occur if the TLB contains an entry with
1087 * fewer permission than the page table entry.  Non-present (P = 0)
1088 * and reserved bit (R = 1) faults are never spurious.
1089 *
1090 * There are no security implications to leaving a stale TLB when
1091 * increasing the permissions on a page.
1092 *
1093 * Returns non-zero if a spurious fault was handled, zero otherwise.
1094 *
1095 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
1096 * (Optional Invalidation).
1097 */
1098static noinline int
1099spurious_kernel_fault(unsigned long error_code, unsigned long address)
1100{
1101	pgd_t *pgd;
1102	p4d_t *p4d;
1103	pud_t *pud;
1104	pmd_t *pmd;
1105	pte_t *pte;
1106	int ret;
1107
1108	/*
1109	 * Only writes to RO or instruction fetches from NX may cause
1110	 * spurious faults.
1111	 *
1112	 * These could be from user or supervisor accesses but the TLB
1113	 * is only lazily flushed after a kernel mapping protection
1114	 * change, so user accesses are not expected to cause spurious
1115	 * faults.
1116	 */
1117	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
1118	    error_code != (X86_PF_INSTR | X86_PF_PROT))
1119		return 0;
1120
1121	pgd = init_mm.pgd + pgd_index(address);
1122	if (!pgd_present(*pgd))
1123		return 0;
1124
1125	p4d = p4d_offset(pgd, address);
1126	if (!p4d_present(*p4d))
1127		return 0;
1128
1129	if (p4d_large(*p4d))
1130		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1131
1132	pud = pud_offset(p4d, address);
1133	if (!pud_present(*pud))
1134		return 0;
1135
1136	if (pud_large(*pud))
1137		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1138
1139	pmd = pmd_offset(pud, address);
1140	if (!pmd_present(*pmd))
1141		return 0;
1142
1143	if (pmd_large(*pmd))
1144		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1145
1146	pte = pte_offset_kernel(pmd, address);
1147	if (!pte_present(*pte))
1148		return 0;
1149
1150	ret = spurious_kernel_fault_check(error_code, pte);
1151	if (!ret)
1152		return 0;
1153
1154	/*
1155	 * Make sure we have permissions in PMD.
1156	 * If not, then there's a bug in the page tables:
1157	 */
1158	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1159	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1160
1161	return ret;
1162}
1163NOKPROBE_SYMBOL(spurious_kernel_fault);
1164
1165int show_unhandled_signals = 1;
1166
1167static inline int
1168access_error(unsigned long error_code, struct vm_area_struct *vma)
1169{
1170	/* This is only called for the current mm, so: */
1171	bool foreign = false;
1172
1173	/*
1174	 * Read or write was blocked by protection keys.  This is
1175	 * always an unconditional error and can never result in
1176	 * a follow-up action to resolve the fault, like a COW.
1177	 */
1178	if (error_code & X86_PF_PK)
1179		return 1;
1180
1181	/*
 
 
 
 
 
 
 
 
 
 
 
 
1182	 * Make sure to check the VMA so that we do not perform
1183	 * faults just to hit a X86_PF_PK as soon as we fill in a
1184	 * page.
1185	 */
1186	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
1187				       (error_code & X86_PF_INSTR), foreign))
1188		return 1;
1189
 
 
 
 
 
 
 
 
 
 
 
 
1190	if (error_code & X86_PF_WRITE) {
1191		/* write, present and write, not present: */
 
 
1192		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1193			return 1;
1194		return 0;
1195	}
1196
1197	/* read, present: */
1198	if (unlikely(error_code & X86_PF_PROT))
1199		return 1;
1200
1201	/* read, not present: */
1202	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
1203		return 1;
1204
1205	return 0;
1206}
1207
1208static int fault_in_kernel_space(unsigned long address)
1209{
1210	/*
1211	 * On 64-bit systems, the vsyscall page is at an address above
1212	 * TASK_SIZE_MAX, but is not considered part of the kernel
1213	 * address space.
1214	 */
1215	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
1216		return false;
1217
1218	return address >= TASK_SIZE_MAX;
1219}
1220
1221/*
1222 * Called for all faults where 'address' is part of the kernel address
1223 * space.  Might get called for faults that originate from *code* that
1224 * ran in userspace or the kernel.
1225 */
1226static void
1227do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
1228		   unsigned long address)
1229{
1230	/*
1231	 * Protection keys exceptions only happen on user pages.  We
1232	 * have no user pages in the kernel portion of the address
1233	 * space, so do not expect them here.
1234	 */
1235	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1236
 
1237	/*
1238	 * We can fault-in kernel-space virtual memory on-demand. The
1239	 * 'reference' page table is init_mm.pgd.
1240	 *
1241	 * NOTE! We MUST NOT take any locks for this case. We may
1242	 * be in an interrupt or a critical region, and should
1243	 * only copy the information from the master page table,
1244	 * nothing more.
1245	 *
1246	 * Before doing this on-demand faulting, ensure that the
1247	 * fault is not any of the following:
1248	 * 1. A fault on a PTE with a reserved bit set.
1249	 * 2. A fault caused by a user-mode access.  (Do not demand-
1250	 *    fault kernel memory due to user-mode accesses).
1251	 * 3. A fault caused by a page-level protection violation.
1252	 *    (A demand fault would be on a non-present page which
1253	 *     would have X86_PF_PROT==0).
 
 
 
 
 
 
1254	 */
1255	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
1256		if (vmalloc_fault(address) >= 0)
1257			return;
1258	}
 
 
 
 
1259
1260	/* Was the fault spurious, caused by lazy TLB invalidation? */
1261	if (spurious_kernel_fault(hw_error_code, address))
1262		return;
1263
1264	/* kprobes don't want to hook the spurious faults: */
1265	if (kprobe_page_fault(regs, X86_TRAP_PF))
1266		return;
1267
1268	/*
1269	 * Note, despite being a "bad area", there are quite a few
1270	 * acceptable reasons to get here, such as erratum fixups
1271	 * and handling kernel code that can fault, like get_user().
1272	 *
1273	 * Don't take the mm semaphore here. If we fixup a prefetch
1274	 * fault we could otherwise deadlock:
1275	 */
1276	bad_area_nosemaphore(regs, hw_error_code, address);
1277}
1278NOKPROBE_SYMBOL(do_kern_addr_fault);
1279
1280/* Handle faults in the user portion of the address space */
 
 
 
 
 
 
 
1281static inline
1282void do_user_addr_fault(struct pt_regs *regs,
1283			unsigned long hw_error_code,
1284			unsigned long address)
1285{
1286	struct vm_area_struct *vma;
1287	struct task_struct *tsk;
1288	struct mm_struct *mm;
1289	vm_fault_t fault, major = 0;
1290	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1291
1292	tsk = current;
1293	mm = tsk->mm;
1294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1295	/* kprobes don't want to hook the spurious faults: */
1296	if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
1297		return;
1298
1299	/*
1300	 * Reserved bits are never expected to be set on
1301	 * entries in the user portion of the page tables.
1302	 */
1303	if (unlikely(hw_error_code & X86_PF_RSVD))
1304		pgtable_bad(regs, hw_error_code, address);
1305
1306	/*
1307	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1308	 * pages in the user address space.  The odd case here is WRUSS,
1309	 * which, according to the preliminary documentation, does not respect
1310	 * SMAP and will have the USER bit set so, in all cases, SMAP
1311	 * enforcement appears to be consistent with the USER bit.
1312	 */
1313	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1314		     !(hw_error_code & X86_PF_USER) &&
1315		     !(regs->flags & X86_EFLAGS_AC)))
1316	{
1317		bad_area_nosemaphore(regs, hw_error_code, address);
 
 
 
1318		return;
1319	}
1320
1321	/*
1322	 * If we're in an interrupt, have no user context or are running
1323	 * in a region with pagefaults disabled then we must not take the fault
1324	 */
1325	if (unlikely(faulthandler_disabled() || !mm)) {
1326		bad_area_nosemaphore(regs, hw_error_code, address);
1327		return;
1328	}
1329
1330	/*
1331	 * It's safe to allow irq's after cr2 has been saved and the
1332	 * vmalloc fault has been handled.
1333	 *
1334	 * User-mode registers count as a user access even for any
1335	 * potential system fault or CPU buglet:
1336	 */
1337	if (user_mode(regs)) {
1338		local_irq_enable();
1339		flags |= FAULT_FLAG_USER;
1340	} else {
1341		if (regs->flags & X86_EFLAGS_IF)
1342			local_irq_enable();
1343	}
1344
 
 
1345	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1346
1347	if (hw_error_code & X86_PF_WRITE)
 
 
 
 
 
 
 
 
1348		flags |= FAULT_FLAG_WRITE;
1349	if (hw_error_code & X86_PF_INSTR)
1350		flags |= FAULT_FLAG_INSTRUCTION;
1351
 
 
 
 
 
 
 
 
1352#ifdef CONFIG_X86_64
1353	/*
1354	 * Faults in the vsyscall page might need emulation.  The
1355	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1356	 * considered to be part of the user address space.
1357	 *
1358	 * The vsyscall page does not have a "real" VMA, so do this
1359	 * emulation before we go searching for VMAs.
1360	 *
1361	 * PKRU never rejects instruction fetches, so we don't need
1362	 * to consider the PF_PK bit.
1363	 */
1364	if (is_vsyscall_vaddr(address)) {
1365		if (emulate_vsyscall(hw_error_code, regs, address))
1366			return;
1367	}
1368#endif
1369
1370	/*
1371	 * Kernel-mode access to the user address space should only occur
1372	 * on well-defined single instructions listed in the exception
1373	 * tables.  But, an erroneous kernel fault occurring outside one of
1374	 * those areas which also holds mmap_sem might deadlock attempting
1375	 * to validate the fault against the address space.
1376	 *
1377	 * Only do the expensive exception table search when we might be at
1378	 * risk of a deadlock.  This happens if we
1379	 * 1. Failed to acquire mmap_sem, and
1380	 * 2. The access did not originate in userspace.
1381	 */
1382	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1383		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
1384			/*
1385			 * Fault from code in kernel from
1386			 * which we do not expect faults.
1387			 */
1388			bad_area_nosemaphore(regs, hw_error_code, address);
1389			return;
1390		}
1391retry:
1392		down_read(&mm->mmap_sem);
1393	} else {
1394		/*
1395		 * The above down_read_trylock() might have succeeded in
1396		 * which case we'll have missed the might_sleep() from
1397		 * down_read():
1398		 */
1399		might_sleep();
1400	}
1401
1402	vma = find_vma(mm, address);
1403	if (unlikely(!vma)) {
1404		bad_area(regs, hw_error_code, address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1405		return;
1406	}
1407	if (likely(vma->vm_start <= address))
1408		goto good_area;
1409	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
1410		bad_area(regs, hw_error_code, address);
1411		return;
1412	}
1413	if (unlikely(expand_stack(vma, address))) {
1414		bad_area(regs, hw_error_code, address);
1415		return;
1416	}
1417
1418	/*
1419	 * Ok, we have a good vm_area for this memory access, so
1420	 * we can handle it..
1421	 */
1422good_area:
1423	if (unlikely(access_error(hw_error_code, vma))) {
1424		bad_area_access_error(regs, hw_error_code, address, vma);
1425		return;
1426	}
1427
1428	/*
1429	 * If for any reason at all we couldn't handle the fault,
1430	 * make sure we exit gracefully rather than endlessly redo
1431	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1432	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1433	 *
1434	 * Note that handle_userfault() may also release and reacquire mmap_sem
1435	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1436	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1437	 * (potentially after handling any pending signal during the return to
1438	 * userland). The return to userland is identified whenever
1439	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1440	 */
1441	fault = handle_mm_fault(vma, address, flags);
1442	major |= fault & VM_FAULT_MAJOR;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1443
1444	/*
1445	 * If we need to retry the mmap_sem has already been released,
1446	 * and if there is a fatal signal pending there is no guarantee
1447	 * that we made any progress. Handle this case first.
1448	 */
1449	if (unlikely(fault & VM_FAULT_RETRY)) {
1450		/* Retry at most once */
1451		if (flags & FAULT_FLAG_ALLOW_RETRY) {
1452			flags &= ~FAULT_FLAG_ALLOW_RETRY;
1453			flags |= FAULT_FLAG_TRIED;
1454			if (!fatal_signal_pending(tsk))
1455				goto retry;
1456		}
1457
1458		/* User mode? Just return to handle the fatal exception */
1459		if (flags & FAULT_FLAG_USER)
1460			return;
1461
1462		/* Not returning to user mode? Handle exceptions or die: */
1463		no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
 
1464		return;
1465	}
1466
1467	up_read(&mm->mmap_sem);
1468	if (unlikely(fault & VM_FAULT_ERROR)) {
1469		mm_fault_error(regs, hw_error_code, address, fault);
1470		return;
1471	}
1472
1473	/*
1474	 * Major/minor page fault accounting. If any of the events
1475	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1476	 */
1477	if (major) {
1478		tsk->maj_flt++;
1479		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
 
 
 
 
 
 
 
 
1480	} else {
1481		tsk->min_flt++;
1482		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
 
 
 
 
 
1483	}
1484
1485	check_v8086_mode(regs, address, tsk);
1486}
1487NOKPROBE_SYMBOL(do_user_addr_fault);
1488
1489/*
1490 * Explicitly marked noinline such that the function tracer sees this as the
1491 * page_fault entry point.
1492 */
1493static noinline void
1494__do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
1495		unsigned long address)
1496{
1497	prefetchw(&current->mm->mmap_sem);
1498
1499	if (unlikely(kmmio_fault(regs, address)))
1500		return;
1501
1502	/* Was the fault on kernel-controlled part of the address space? */
1503	if (unlikely(fault_in_kernel_space(address)))
1504		do_kern_addr_fault(regs, hw_error_code, address);
1505	else
1506		do_user_addr_fault(regs, hw_error_code, address);
1507}
1508NOKPROBE_SYMBOL(__do_page_fault);
1509
1510static __always_inline void
1511trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1512			 unsigned long address)
1513{
1514	if (!trace_pagefault_enabled())
1515		return;
1516
1517	if (user_mode(regs))
1518		trace_page_fault_user(address, regs, error_code);
1519	else
1520		trace_page_fault_kernel(address, regs, error_code);
1521}
1522
1523dotraplinkage void
1524do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 
1525{
1526	enum ctx_state prev_state;
1527
1528	prev_state = exception_enter();
1529	trace_page_fault_entries(regs, error_code, address);
1530	__do_page_fault(regs, error_code, address);
1531	exception_exit(prev_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1532}
1533NOKPROBE_SYMBOL(do_page_fault);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright (C) 1995  Linus Torvalds
   4 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
   5 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
   6 */
   7#include <linux/sched.h>		/* test_thread_flag(), ...	*/
   8#include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
   9#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
  10#include <linux/extable.h>		/* search_exception_tables	*/
  11#include <linux/memblock.h>		/* max_low_pfn			*/
  12#include <linux/kfence.h>		/* kfence_handle_page_fault	*/
  13#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
  14#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
  15#include <linux/perf_event.h>		/* perf_sw_event		*/
  16#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
  17#include <linux/prefetch.h>		/* prefetchw			*/
  18#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
  19#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
  20#include <linux/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
  21#include <linux/mm_types.h>
  22#include <linux/mm.h>			/* find_and_lock_vma() */
  23
  24#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
  25#include <asm/traps.h>			/* dotraplinkage, ...		*/
 
  26#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
  27#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
  28#include <asm/vm86.h>			/* struct vm86			*/
  29#include <asm/mmu_context.h>		/* vma_pkey()			*/
  30#include <asm/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
  31#include <asm/desc.h>			/* store_idt(), ...		*/
  32#include <asm/cpu_entry_area.h>		/* exception stack		*/
  33#include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
  34#include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
  35#include <asm/vdso.h>			/* fixup_vdso_exception()	*/
  36#include <asm/irq_stack.h>
  37#include <asm/fred.h>
  38#include <asm/sev.h>			/* snp_dump_hva_rmpentry()	*/
  39
  40#define CREATE_TRACE_POINTS
  41#include <asm/trace/exceptions.h>
  42
  43/*
  44 * Returns 0 if mmiotrace is disabled, or if the fault is not
  45 * handled by mmiotrace:
  46 */
  47static nokprobe_inline int
  48kmmio_fault(struct pt_regs *regs, unsigned long addr)
  49{
  50	if (unlikely(is_kmmio_active()))
  51		if (kmmio_handler(regs, addr) == 1)
  52			return -1;
  53	return 0;
  54}
  55
  56/*
  57 * Prefetch quirks:
  58 *
  59 * 32-bit mode:
  60 *
  61 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
  62 *   Check that here and ignore it.  This is AMD erratum #91.
  63 *
  64 * 64-bit mode:
  65 *
  66 *   Sometimes the CPU reports invalid exceptions on prefetch.
  67 *   Check that here and ignore it.
  68 *
  69 * Opcode checker based on code by Richard Brunner.
  70 */
  71static inline int
  72check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
  73		      unsigned char opcode, int *prefetch)
  74{
  75	unsigned char instr_hi = opcode & 0xf0;
  76	unsigned char instr_lo = opcode & 0x0f;
  77
  78	switch (instr_hi) {
  79	case 0x20:
  80	case 0x30:
  81		/*
  82		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
  83		 * In X86_64 long mode, the CPU will signal invalid
  84		 * opcode if some of these prefixes are present so
  85		 * X86_64 will never get here anyway
  86		 */
  87		return ((instr_lo & 7) == 0x6);
  88#ifdef CONFIG_X86_64
  89	case 0x40:
  90		/*
  91		 * In 64-bit mode 0x40..0x4F are valid REX prefixes
 
 
 
 
  92		 */
  93		return (!user_mode(regs) || user_64bit_mode(regs));
  94#endif
  95	case 0x60:
  96		/* 0x64 thru 0x67 are valid prefixes in all modes. */
  97		return (instr_lo & 0xC) == 0x4;
  98	case 0xF0:
  99		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
 100		return !instr_lo || (instr_lo>>1) == 1;
 101	case 0x00:
 102		/* Prefetch instruction is 0x0F0D or 0x0F18 */
 103		if (get_kernel_nofault(opcode, instr))
 104			return 0;
 105
 106		*prefetch = (instr_lo == 0xF) &&
 107			(opcode == 0x0D || opcode == 0x18);
 108		return 0;
 109	default:
 110		return 0;
 111	}
 112}
 113
 114static bool is_amd_k8_pre_npt(void)
 115{
 116	struct cpuinfo_x86 *c = &boot_cpu_data;
 117
 118	return unlikely(IS_ENABLED(CONFIG_CPU_SUP_AMD) &&
 119			c->x86_vendor == X86_VENDOR_AMD &&
 120			c->x86 == 0xf && c->x86_model < 0x40);
 121}
 122
 123static int
 124is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 125{
 126	unsigned char *max_instr;
 127	unsigned char *instr;
 128	int prefetch = 0;
 129
 130	/* Erratum #91 affects AMD K8, pre-NPT CPUs */
 131	if (!is_amd_k8_pre_npt())
 132		return 0;
 133
 134	/*
 135	 * If it was a exec (instruction fetch) fault on NX page, then
 136	 * do not ignore the fault:
 137	 */
 138	if (error_code & X86_PF_INSTR)
 139		return 0;
 140
 141	instr = (void *)convert_ip_to_linear(current, regs);
 142	max_instr = instr + 15;
 143
 144	/*
 145	 * This code has historically always bailed out if IP points to a
 146	 * not-present page (e.g. due to a race).  No one has ever
 147	 * complained about this.
 148	 */
 149	pagefault_disable();
 150
 151	while (instr < max_instr) {
 152		unsigned char opcode;
 153
 154		if (user_mode(regs)) {
 155			if (get_user(opcode, (unsigned char __user *) instr))
 156				break;
 157		} else {
 158			if (get_kernel_nofault(opcode, instr))
 159				break;
 160		}
 161
 162		instr++;
 163
 164		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
 165			break;
 166	}
 167
 168	pagefault_enable();
 169	return prefetch;
 170}
 171
 172DEFINE_SPINLOCK(pgd_lock);
 173LIST_HEAD(pgd_list);
 174
 175#ifdef CONFIG_X86_32
 176static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 177{
 178	unsigned index = pgd_index(address);
 179	pgd_t *pgd_k;
 180	p4d_t *p4d, *p4d_k;
 181	pud_t *pud, *pud_k;
 182	pmd_t *pmd, *pmd_k;
 183
 184	pgd += index;
 185	pgd_k = init_mm.pgd + index;
 186
 187	if (!pgd_present(*pgd_k))
 188		return NULL;
 189
 190	/*
 191	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
 192	 * and redundant with the set_pmd() on non-PAE. As would
 193	 * set_p4d/set_pud.
 194	 */
 195	p4d = p4d_offset(pgd, address);
 196	p4d_k = p4d_offset(pgd_k, address);
 197	if (!p4d_present(*p4d_k))
 198		return NULL;
 199
 200	pud = pud_offset(p4d, address);
 201	pud_k = pud_offset(p4d_k, address);
 202	if (!pud_present(*pud_k))
 203		return NULL;
 204
 205	pmd = pmd_offset(pud, address);
 206	pmd_k = pmd_offset(pud_k, address);
 207
 208	if (pmd_present(*pmd) != pmd_present(*pmd_k))
 209		set_pmd(pmd, *pmd_k);
 210
 211	if (!pmd_present(*pmd_k))
 212		return NULL;
 213	else
 214		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 215
 216	return pmd_k;
 217}
 218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219/*
 
 
 220 *   Handle a fault on the vmalloc or module mapping area
 221 *
 222 *   This is needed because there is a race condition between the time
 223 *   when the vmalloc mapping code updates the PMD to the point in time
 224 *   where it synchronizes this update with the other page-tables in the
 225 *   system.
 226 *
 227 *   In this race window another thread/CPU can map an area on the same
 228 *   PMD, finds it already present and does not synchronize it with the
 229 *   rest of the system yet. As a result v[mz]alloc might return areas
 230 *   which are not mapped in every page-table in the system, causing an
 231 *   unhandled page-fault when they are accessed.
 232 */
 233static noinline int vmalloc_fault(unsigned long address)
 234{
 235	unsigned long pgd_paddr;
 236	pmd_t *pmd_k;
 237	pte_t *pte_k;
 238
 239	/* Make sure we are in vmalloc area: */
 240	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 241		return -1;
 242
 243	/*
 244	 * Synchronize this task's top level page-table
 245	 * with the 'reference' page table.
 246	 *
 247	 * Do _not_ use "current" here. We might be inside
 248	 * an interrupt in the middle of a task switch..
 249	 */
 250	pgd_paddr = read_cr3_pa();
 251	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 252	if (!pmd_k)
 253		return -1;
 254
 255	if (pmd_leaf(*pmd_k))
 256		return 0;
 257
 258	pte_k = pte_offset_kernel(pmd_k, address);
 259	if (!pte_present(*pte_k))
 260		return -1;
 261
 262	return 0;
 263}
 264NOKPROBE_SYMBOL(vmalloc_fault);
 265
 266void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
 
 
 
 
 
 267{
 268	unsigned long addr;
 
 269
 270	for (addr = start & PMD_MASK;
 271	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
 272	     addr += PMD_SIZE) {
 273		struct page *page;
 274
 275		spin_lock(&pgd_lock);
 276		list_for_each_entry(page, &pgd_list, lru) {
 277			spinlock_t *pgt_lock;
 278
 279			/* the pgt_lock only for Xen */
 280			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 281
 282			spin_lock(pgt_lock);
 283			vmalloc_sync_one(page_address(page), addr);
 284			spin_unlock(pgt_lock);
 285		}
 286		spin_unlock(&pgd_lock);
 287	}
 288}
 289
 290static bool low_pfn(unsigned long pfn)
 291{
 292	return pfn < max_low_pfn;
 293}
 294
 295static void dump_pagetable(unsigned long address)
 296{
 297	pgd_t *base = __va(read_cr3_pa());
 298	pgd_t *pgd = &base[pgd_index(address)];
 299	p4d_t *p4d;
 300	pud_t *pud;
 301	pmd_t *pmd;
 302	pte_t *pte;
 303
 304#ifdef CONFIG_X86_PAE
 305	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
 306	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
 307		goto out;
 308#define pr_pde pr_cont
 309#else
 310#define pr_pde pr_info
 311#endif
 312	p4d = p4d_offset(pgd, address);
 313	pud = pud_offset(p4d, address);
 314	pmd = pmd_offset(pud, address);
 315	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
 316#undef pr_pde
 317
 318	/*
 319	 * We must not directly access the pte in the highpte
 320	 * case if the page table is located in highmem.
 321	 * And let's rather not kmap-atomic the pte, just in case
 322	 * it's allocated already:
 323	 */
 324	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd))
 325		goto out;
 326
 327	pte = pte_offset_kernel(pmd, address);
 328	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
 329out:
 330	pr_cont("\n");
 331}
 332
 333#else /* CONFIG_X86_64: */
 334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335#ifdef CONFIG_CPU_SUP_AMD
 336static const char errata93_warning[] =
 337KERN_ERR 
 338"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
 339"******* Working around it, but it may cause SEGVs or burn power.\n"
 340"******* Please consider a BIOS update.\n"
 341"******* Disabling USB legacy in the BIOS may also help.\n";
 342#endif
 343
 
 
 
 
 
 
 
 
 
 344static int bad_address(void *p)
 345{
 346	unsigned long dummy;
 347
 348	return get_kernel_nofault(dummy, (unsigned long *)p);
 349}
 350
 351static void dump_pagetable(unsigned long address)
 352{
 353	pgd_t *base = __va(read_cr3_pa());
 354	pgd_t *pgd = base + pgd_index(address);
 355	p4d_t *p4d;
 356	pud_t *pud;
 357	pmd_t *pmd;
 358	pte_t *pte;
 359
 360	if (bad_address(pgd))
 361		goto bad;
 362
 363	pr_info("PGD %lx ", pgd_val(*pgd));
 364
 365	if (!pgd_present(*pgd))
 366		goto out;
 367
 368	p4d = p4d_offset(pgd, address);
 369	if (bad_address(p4d))
 370		goto bad;
 371
 372	pr_cont("P4D %lx ", p4d_val(*p4d));
 373	if (!p4d_present(*p4d) || p4d_leaf(*p4d))
 374		goto out;
 375
 376	pud = pud_offset(p4d, address);
 377	if (bad_address(pud))
 378		goto bad;
 379
 380	pr_cont("PUD %lx ", pud_val(*pud));
 381	if (!pud_present(*pud) || pud_leaf(*pud))
 382		goto out;
 383
 384	pmd = pmd_offset(pud, address);
 385	if (bad_address(pmd))
 386		goto bad;
 387
 388	pr_cont("PMD %lx ", pmd_val(*pmd));
 389	if (!pmd_present(*pmd) || pmd_leaf(*pmd))
 390		goto out;
 391
 392	pte = pte_offset_kernel(pmd, address);
 393	if (bad_address(pte))
 394		goto bad;
 395
 396	pr_cont("PTE %lx", pte_val(*pte));
 397out:
 398	pr_cont("\n");
 399	return;
 400bad:
 401	pr_info("BAD\n");
 402}
 403
 404#endif /* CONFIG_X86_64 */
 405
 406/*
 407 * Workaround for K8 erratum #93 & buggy BIOS.
 408 *
 409 * BIOS SMM functions are required to use a specific workaround
 410 * to avoid corruption of the 64bit RIP register on C stepping K8.
 411 *
 412 * A lot of BIOS that didn't get tested properly miss this.
 413 *
 414 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 415 * Try to work around it here.
 416 *
 417 * Note we only handle faults in kernel here.
 418 * Does nothing on 32-bit.
 419 */
 420static int is_errata93(struct pt_regs *regs, unsigned long address)
 421{
 422#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
 423	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
 424	    || boot_cpu_data.x86 != 0xf)
 425		return 0;
 426
 427	if (user_mode(regs))
 428		return 0;
 429
 430	if (address != regs->ip)
 431		return 0;
 432
 433	if ((address >> 32) != 0)
 434		return 0;
 435
 436	address |= 0xffffffffUL << 32;
 437	if ((address >= (u64)_stext && address <= (u64)_etext) ||
 438	    (address >= MODULES_VADDR && address <= MODULES_END)) {
 439		printk_once(errata93_warning);
 440		regs->ip = address;
 441		return 1;
 442	}
 443#endif
 444	return 0;
 445}
 446
 447/*
 448 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 449 * to illegal addresses >4GB.
 450 *
 451 * We catch this in the page fault handler because these addresses
 452 * are not reachable. Just detect this case and return.  Any code
 453 * segment in LDT is compatibility mode.
 454 */
 455static int is_errata100(struct pt_regs *regs, unsigned long address)
 456{
 457#ifdef CONFIG_X86_64
 458	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
 459		return 1;
 460#endif
 461	return 0;
 462}
 463
 464/* Pentium F0 0F C7 C8 bug workaround: */
 465static int is_f00f_bug(struct pt_regs *regs, unsigned long error_code,
 466		       unsigned long address)
 467{
 468#ifdef CONFIG_X86_F00F_BUG
 469	if (boot_cpu_has_bug(X86_BUG_F00F) && !(error_code & X86_PF_USER) &&
 470	    idt_is_f00f_address(address)) {
 471		handle_invalid_op(regs);
 472		return 1;
 
 
 
 
 
 
 
 
 473	}
 474#endif
 475	return 0;
 476}
 477
 478static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
 479{
 480	u32 offset = (index >> 3) * sizeof(struct desc_struct);
 481	unsigned long addr;
 482	struct ldttss_desc desc;
 483
 484	if (index == 0) {
 485		pr_alert("%s: NULL\n", name);
 486		return;
 487	}
 488
 489	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
 490		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
 491		return;
 492	}
 493
 494	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
 495			      sizeof(struct ldttss_desc))) {
 496		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
 497			 name, index);
 498		return;
 499	}
 500
 501	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
 502#ifdef CONFIG_X86_64
 503	addr |= ((u64)desc.base3 << 32);
 504#endif
 505	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
 506		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
 507}
 508
 509static void
 510show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 511{
 512	if (!oops_may_print())
 513		return;
 514
 515	if (error_code & X86_PF_INSTR) {
 516		unsigned int level;
 517		pgd_t *pgd;
 518		pte_t *pte;
 519
 520		pgd = __va(read_cr3_pa());
 521		pgd += pgd_index(address);
 522
 523		pte = lookup_address_in_pgd(pgd, address, &level);
 524
 525		if (pte && pte_present(*pte) && !pte_exec(*pte))
 526			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
 527				from_kuid(&init_user_ns, current_uid()));
 528		if (pte && pte_present(*pte) && pte_exec(*pte) &&
 529				(pgd_flags(*pgd) & _PAGE_USER) &&
 530				(__read_cr4() & X86_CR4_SMEP))
 531			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
 532				from_kuid(&init_user_ns, current_uid()));
 533	}
 534
 535	if (address < PAGE_SIZE && !user_mode(regs))
 536		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
 537			(void *)address);
 538	else
 539		pr_alert("BUG: unable to handle page fault for address: %px\n",
 540			(void *)address);
 541
 542	pr_alert("#PF: %s %s in %s mode\n",
 543		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
 544		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
 545		 (error_code & X86_PF_WRITE) ? "write access" :
 546					       "read access",
 547			     user_mode(regs) ? "user" : "kernel");
 548	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
 549		 !(error_code & X86_PF_PROT) ? "not-present page" :
 550		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
 551		 (error_code & X86_PF_PK)    ? "protection keys violation" :
 552		 (error_code & X86_PF_RMP)   ? "RMP violation" :
 553					       "permissions violation");
 554
 555	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
 556		struct desc_ptr idt, gdt;
 557		u16 ldtr, tr;
 558
 559		/*
 560		 * This can happen for quite a few reasons.  The more obvious
 561		 * ones are faults accessing the GDT, or LDT.  Perhaps
 562		 * surprisingly, if the CPU tries to deliver a benign or
 563		 * contributory exception from user code and gets a page fault
 564		 * during delivery, the page fault can be delivered as though
 565		 * it originated directly from user code.  This could happen
 566		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
 567		 * kernel or IST stack.
 568		 */
 569		store_idt(&idt);
 570
 571		/* Usable even on Xen PV -- it's just slow. */
 572		native_store_gdt(&gdt);
 573
 574		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
 575			 idt.address, idt.size, gdt.address, gdt.size);
 576
 577		store_ldt(ldtr);
 578		show_ldttss(&gdt, "LDTR", ldtr);
 579
 580		store_tr(tr);
 581		show_ldttss(&gdt, "TR", tr);
 582	}
 583
 584	dump_pagetable(address);
 585
 586	if (error_code & X86_PF_RMP)
 587		snp_dump_hva_rmpentry(address);
 588}
 589
 590static noinline void
 591pgtable_bad(struct pt_regs *regs, unsigned long error_code,
 592	    unsigned long address)
 593{
 594	struct task_struct *tsk;
 595	unsigned long flags;
 596	int sig;
 597
 598	flags = oops_begin();
 599	tsk = current;
 600	sig = SIGKILL;
 601
 602	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
 603	       tsk->comm, address);
 604	dump_pagetable(address);
 605
 606	if (__die("Bad pagetable", regs, error_code))
 607		sig = 0;
 608
 609	oops_end(flags, regs, sig);
 610}
 611
 612static void sanitize_error_code(unsigned long address,
 613				unsigned long *error_code)
 614{
 
 
 615	/*
 616	 * To avoid leaking information about the kernel page
 617	 * table layout, pretend that user-mode accesses to
 618	 * kernel addresses are always protection faults.
 619	 *
 620	 * NB: This means that failed vsyscalls with vsyscall=none
 621	 * will have the PROT bit.  This doesn't leak any
 622	 * information and does not appear to cause any problems.
 623	 */
 624	if (address >= TASK_SIZE_MAX)
 625		*error_code |= X86_PF_PROT;
 626}
 627
 628static void set_signal_archinfo(unsigned long address,
 629				unsigned long error_code)
 630{
 631	struct task_struct *tsk = current;
 632
 633	tsk->thread.trap_nr = X86_TRAP_PF;
 634	tsk->thread.error_code = error_code | X86_PF_USER;
 635	tsk->thread.cr2 = address;
 636}
 637
 638static noinline void
 639page_fault_oops(struct pt_regs *regs, unsigned long error_code,
 640		unsigned long address)
 641{
 642#ifdef CONFIG_VMAP_STACK
 643	struct stack_info info;
 644#endif
 645	unsigned long flags;
 646	int sig;
 647
 648	if (user_mode(regs)) {
 649		/*
 650		 * Implicit kernel access from user mode?  Skip the stack
 651		 * overflow and EFI special cases.
 
 652		 */
 653		goto oops;
 654	}
 655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 656#ifdef CONFIG_VMAP_STACK
 657	/*
 658	 * Stack overflow?  During boot, we can fault near the initial
 659	 * stack in the direct map, but that's not an overflow -- check
 660	 * that we're in vmalloc space to avoid this.
 661	 */
 662	if (is_vmalloc_addr((void *)address) &&
 663	    get_stack_guard_info((void *)address, &info)) {
 
 
 664		/*
 665		 * We're likely to be running with very little stack space
 666		 * left.  It's plausible that we'd hit this condition but
 667		 * double-fault even before we get this far, in which case
 668		 * we're fine: the double-fault handler will deal with it.
 669		 *
 670		 * We don't want to make it all the way into the oops code
 671		 * and then double-fault, though, because we're likely to
 672		 * break the console driver and lose most of the stack dump.
 673		 */
 674		call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*),
 675			      handle_stack_overflow,
 676			      ASM_CALL_ARG3,
 677			      , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
 678
 
 
 679		unreachable();
 680	}
 681#endif
 682
 683	/*
 684	 * Buggy firmware could access regions which might page fault.  If
 685	 * this happens, EFI has a special OOPS path that will try to
 686	 * avoid hanging the system.
 
 
 
 
 
 
 687	 */
 688	if (IS_ENABLED(CONFIG_EFI))
 689		efi_crash_gracefully_on_page_fault(address);
 690
 691	/* Only not-present faults should be handled by KFENCE. */
 692	if (!(error_code & X86_PF_PROT) &&
 693	    kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
 694		return;
 695
 
 
 
 
 
 
 
 696oops:
 697	/*
 698	 * Oops. The kernel tried to access some bad page. We'll have to
 699	 * terminate things with extreme prejudice:
 700	 */
 701	flags = oops_begin();
 702
 703	show_fault_oops(regs, error_code, address);
 704
 705	if (task_stack_end_corrupted(current))
 706		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 707
 708	sig = SIGKILL;
 709	if (__die("Oops", regs, error_code))
 710		sig = 0;
 711
 712	/* Executive summary in case the body of the oops scrolled away */
 713	printk(KERN_DEFAULT "CR2: %016lx\n", address);
 714
 715	oops_end(flags, regs, sig);
 716}
 717
 718static noinline void
 719kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
 720			 unsigned long address, int signal, int si_code,
 721			 u32 pkey)
 722{
 723	WARN_ON_ONCE(user_mode(regs));
 724
 725	/* Are we prepared to handle this kernel fault? */
 726	if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
 727		return;
 728
 729	/*
 730	 * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
 731	 * instruction.
 732	 */
 733	if (is_prefetch(regs, error_code, address))
 734		return;
 735
 736	page_fault_oops(regs, error_code, address);
 737}
 738
 739/*
 740 * Print out info about fatal segfaults, if the show_unhandled_signals
 741 * sysctl is set:
 742 */
 743static inline void
 744show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 745		unsigned long address, struct task_struct *tsk)
 746{
 747	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
 748	/* This is a racy snapshot, but it's better than nothing. */
 749	int cpu = raw_smp_processor_id();
 750
 751	if (!unhandled_signal(tsk, SIGSEGV))
 752		return;
 753
 754	if (!printk_ratelimit())
 755		return;
 756
 757	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
 758		loglvl, tsk->comm, task_pid_nr(tsk), address,
 759		(void *)regs->ip, (void *)regs->sp, error_code);
 760
 761	print_vma_addr(KERN_CONT " in ", regs->ip);
 762
 763	/*
 764	 * Dump the likely CPU where the fatal segfault happened.
 765	 * This can help identify faulty hardware.
 766	 */
 767	printk(KERN_CONT " likely on CPU %d (core %d, socket %d)", cpu,
 768	       topology_core_id(cpu), topology_physical_package_id(cpu));
 769
 770
 771	printk(KERN_CONT "\n");
 772
 773	show_opcodes(regs, loglvl);
 774}
 775
 
 
 
 
 
 
 
 
 
 776static void
 777__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 778		       unsigned long address, u32 pkey, int si_code)
 779{
 780	struct task_struct *tsk = current;
 781
 782	if (!user_mode(regs)) {
 783		kernelmode_fixup_or_oops(regs, error_code, address,
 784					 SIGSEGV, si_code, pkey);
 785		return;
 786	}
 
 787
 788	if (!(error_code & X86_PF_USER)) {
 789		/* Implicit user access to kernel memory -- just oops */
 790		page_fault_oops(regs, error_code, address);
 791		return;
 792	}
 
 793
 794	/*
 795	 * User mode accesses just cause a SIGSEGV.
 796	 * It's possible to have interrupts off here:
 797	 */
 798	local_irq_enable();
 799
 800	/*
 801	 * Valid to do another page fault here because this one came
 802	 * from user space:
 803	 */
 804	if (is_prefetch(regs, error_code, address))
 805		return;
 
 806
 807	if (is_errata100(regs, address))
 808		return;
 809
 810	sanitize_error_code(address, &error_code);
 811
 812	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
 813		return;
 814
 815	if (likely(show_unhandled_signals))
 816		show_signal_msg(regs, error_code, address, tsk);
 817
 818	set_signal_archinfo(address, error_code);
 
 819
 820	if (si_code == SEGV_PKUERR)
 821		force_sig_pkuerr((void __user *)address, pkey);
 822	else
 823		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 824
 825	local_irq_disable();
 826}
 827
 828static noinline void
 829bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 830		     unsigned long address)
 831{
 832	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
 833}
 834
 835static void
 836__bad_area(struct pt_regs *regs, unsigned long error_code,
 837	   unsigned long address, u32 pkey, int si_code)
 838{
 839	struct mm_struct *mm = current->mm;
 840	/*
 841	 * Something tried to access memory that isn't in our memory map..
 842	 * Fix it, but check if it's kernel or user first..
 843	 */
 844	mmap_read_unlock(mm);
 845
 846	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
 847}
 848
 
 
 
 
 
 
 849static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 850		struct vm_area_struct *vma)
 851{
 852	/* This code is always called on the current mm */
 853	bool foreign = false;
 854
 855	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
 856		return false;
 857	if (error_code & X86_PF_PK)
 858		return true;
 859	/* this checks permission keys on the VMA: */
 860	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
 861				       (error_code & X86_PF_INSTR), foreign))
 862		return true;
 863	return false;
 864}
 865
 866static noinline void
 867bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 868		      unsigned long address, struct vm_area_struct *vma)
 869{
 870	/*
 871	 * This OSPKE check is not strictly necessary at runtime.
 872	 * But, doing it this way allows compiler optimizations
 873	 * if pkeys are compiled out.
 874	 */
 875	if (bad_area_access_from_pkeys(error_code, vma)) {
 876		/*
 877		 * A protection key fault means that the PKRU value did not allow
 878		 * access to some PTE.  Userspace can figure out what PKRU was
 879		 * from the XSAVE state.  This function captures the pkey from
 880		 * the vma and passes it to userspace so userspace can discover
 881		 * which protection key was set on the PTE.
 882		 *
 883		 * If we get here, we know that the hardware signaled a X86_PF_PK
 884		 * fault and that there was a VMA once we got in the fault
 885		 * handler.  It does *not* guarantee that the VMA we find here
 886		 * was the one that we faulted on.
 887		 *
 888		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 889		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 890		 * 3. T1   : faults...
 891		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 892		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
 893		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 894		 *	     faulted on a pte with its pkey=4.
 895		 */
 896		u32 pkey = vma_pkey(vma);
 897
 898		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
 899	} else {
 900		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
 901	}
 902}
 903
 904static void
 905do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 906	  vm_fault_t fault)
 907{
 908	/* Kernel mode? Handle exceptions or die: */
 909	if (!user_mode(regs)) {
 910		kernelmode_fixup_or_oops(regs, error_code, address,
 911					 SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
 912		return;
 913	}
 914
 915	/* User-space => ok to do another page fault: */
 916	if (is_prefetch(regs, error_code, address))
 917		return;
 918
 919	sanitize_error_code(address, &error_code);
 920
 921	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
 922		return;
 923
 924	set_signal_archinfo(address, error_code);
 925
 926#ifdef CONFIG_MEMORY_FAILURE
 927	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 928		struct task_struct *tsk = current;
 929		unsigned lsb = 0;
 930
 931		pr_err(
 932	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
 933			tsk->comm, tsk->pid, address);
 934		if (fault & VM_FAULT_HWPOISON_LARGE)
 935			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
 936		if (fault & VM_FAULT_HWPOISON)
 937			lsb = PAGE_SHIFT;
 938		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
 939		return;
 940	}
 941#endif
 942	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
 943}
 944
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
 946{
 947	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
 948		return 0;
 949
 950	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
 951		return 0;
 952
 953	return 1;
 954}
 955
 956/*
 957 * Handle a spurious fault caused by a stale TLB entry.
 958 *
 959 * This allows us to lazily refresh the TLB when increasing the
 960 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 961 * eagerly is very expensive since that implies doing a full
 962 * cross-processor TLB flush, even if no stale TLB entries exist
 963 * on other processors.
 964 *
 965 * Spurious faults may only occur if the TLB contains an entry with
 966 * fewer permission than the page table entry.  Non-present (P = 0)
 967 * and reserved bit (R = 1) faults are never spurious.
 968 *
 969 * There are no security implications to leaving a stale TLB when
 970 * increasing the permissions on a page.
 971 *
 972 * Returns non-zero if a spurious fault was handled, zero otherwise.
 973 *
 974 * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
 975 * (Optional Invalidation).
 976 */
 977static noinline int
 978spurious_kernel_fault(unsigned long error_code, unsigned long address)
 979{
 980	pgd_t *pgd;
 981	p4d_t *p4d;
 982	pud_t *pud;
 983	pmd_t *pmd;
 984	pte_t *pte;
 985	int ret;
 986
 987	/*
 988	 * Only writes to RO or instruction fetches from NX may cause
 989	 * spurious faults.
 990	 *
 991	 * These could be from user or supervisor accesses but the TLB
 992	 * is only lazily flushed after a kernel mapping protection
 993	 * change, so user accesses are not expected to cause spurious
 994	 * faults.
 995	 */
 996	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
 997	    error_code != (X86_PF_INSTR | X86_PF_PROT))
 998		return 0;
 999
1000	pgd = init_mm.pgd + pgd_index(address);
1001	if (!pgd_present(*pgd))
1002		return 0;
1003
1004	p4d = p4d_offset(pgd, address);
1005	if (!p4d_present(*p4d))
1006		return 0;
1007
1008	if (p4d_leaf(*p4d))
1009		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1010
1011	pud = pud_offset(p4d, address);
1012	if (!pud_present(*pud))
1013		return 0;
1014
1015	if (pud_leaf(*pud))
1016		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1017
1018	pmd = pmd_offset(pud, address);
1019	if (!pmd_present(*pmd))
1020		return 0;
1021
1022	if (pmd_leaf(*pmd))
1023		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1024
1025	pte = pte_offset_kernel(pmd, address);
1026	if (!pte_present(*pte))
1027		return 0;
1028
1029	ret = spurious_kernel_fault_check(error_code, pte);
1030	if (!ret)
1031		return 0;
1032
1033	/*
1034	 * Make sure we have permissions in PMD.
1035	 * If not, then there's a bug in the page tables:
1036	 */
1037	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1038	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
1039
1040	return ret;
1041}
1042NOKPROBE_SYMBOL(spurious_kernel_fault);
1043
1044int show_unhandled_signals = 1;
1045
1046static inline int
1047access_error(unsigned long error_code, struct vm_area_struct *vma)
1048{
1049	/* This is only called for the current mm, so: */
1050	bool foreign = false;
1051
1052	/*
1053	 * Read or write was blocked by protection keys.  This is
1054	 * always an unconditional error and can never result in
1055	 * a follow-up action to resolve the fault, like a COW.
1056	 */
1057	if (error_code & X86_PF_PK)
1058		return 1;
1059
1060	/*
1061	 * SGX hardware blocked the access.  This usually happens
1062	 * when the enclave memory contents have been destroyed, like
1063	 * after a suspend/resume cycle. In any case, the kernel can't
1064	 * fix the cause of the fault.  Handle the fault as an access
1065	 * error even in cases where no actual access violation
1066	 * occurred.  This allows userspace to rebuild the enclave in
1067	 * response to the signal.
1068	 */
1069	if (unlikely(error_code & X86_PF_SGX))
1070		return 1;
1071
1072	/*
1073	 * Make sure to check the VMA so that we do not perform
1074	 * faults just to hit a X86_PF_PK as soon as we fill in a
1075	 * page.
1076	 */
1077	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
1078				       (error_code & X86_PF_INSTR), foreign))
1079		return 1;
1080
1081	/*
1082	 * Shadow stack accesses (PF_SHSTK=1) are only permitted to
1083	 * shadow stack VMAs. All other accesses result in an error.
1084	 */
1085	if (error_code & X86_PF_SHSTK) {
1086		if (unlikely(!(vma->vm_flags & VM_SHADOW_STACK)))
1087			return 1;
1088		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1089			return 1;
1090		return 0;
1091	}
1092
1093	if (error_code & X86_PF_WRITE) {
1094		/* write, present and write, not present: */
1095		if (unlikely(vma->vm_flags & VM_SHADOW_STACK))
1096			return 1;
1097		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1098			return 1;
1099		return 0;
1100	}
1101
1102	/* read, present: */
1103	if (unlikely(error_code & X86_PF_PROT))
1104		return 1;
1105
1106	/* read, not present: */
1107	if (unlikely(!vma_is_accessible(vma)))
1108		return 1;
1109
1110	return 0;
1111}
1112
1113bool fault_in_kernel_space(unsigned long address)
1114{
1115	/*
1116	 * On 64-bit systems, the vsyscall page is at an address above
1117	 * TASK_SIZE_MAX, but is not considered part of the kernel
1118	 * address space.
1119	 */
1120	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
1121		return false;
1122
1123	return address >= TASK_SIZE_MAX;
1124}
1125
1126/*
1127 * Called for all faults where 'address' is part of the kernel address
1128 * space.  Might get called for faults that originate from *code* that
1129 * ran in userspace or the kernel.
1130 */
1131static void
1132do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
1133		   unsigned long address)
1134{
1135	/*
1136	 * Protection keys exceptions only happen on user pages.  We
1137	 * have no user pages in the kernel portion of the address
1138	 * space, so do not expect them here.
1139	 */
1140	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1141
1142#ifdef CONFIG_X86_32
1143	/*
1144	 * We can fault-in kernel-space virtual memory on-demand. The
1145	 * 'reference' page table is init_mm.pgd.
1146	 *
1147	 * NOTE! We MUST NOT take any locks for this case. We may
1148	 * be in an interrupt or a critical region, and should
1149	 * only copy the information from the master page table,
1150	 * nothing more.
1151	 *
1152	 * Before doing this on-demand faulting, ensure that the
1153	 * fault is not any of the following:
1154	 * 1. A fault on a PTE with a reserved bit set.
1155	 * 2. A fault caused by a user-mode access.  (Do not demand-
1156	 *    fault kernel memory due to user-mode accesses).
1157	 * 3. A fault caused by a page-level protection violation.
1158	 *    (A demand fault would be on a non-present page which
1159	 *     would have X86_PF_PROT==0).
1160	 *
1161	 * This is only needed to close a race condition on x86-32 in
1162	 * the vmalloc mapping/unmapping code. See the comment above
1163	 * vmalloc_fault() for details. On x86-64 the race does not
1164	 * exist as the vmalloc mappings don't need to be synchronized
1165	 * there.
1166	 */
1167	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
1168		if (vmalloc_fault(address) >= 0)
1169			return;
1170	}
1171#endif
1172
1173	if (is_f00f_bug(regs, hw_error_code, address))
1174		return;
1175
1176	/* Was the fault spurious, caused by lazy TLB invalidation? */
1177	if (spurious_kernel_fault(hw_error_code, address))
1178		return;
1179
1180	/* kprobes don't want to hook the spurious faults: */
1181	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
1182		return;
1183
1184	/*
1185	 * Note, despite being a "bad area", there are quite a few
1186	 * acceptable reasons to get here, such as erratum fixups
1187	 * and handling kernel code that can fault, like get_user().
1188	 *
1189	 * Don't take the mm semaphore here. If we fixup a prefetch
1190	 * fault we could otherwise deadlock:
1191	 */
1192	bad_area_nosemaphore(regs, hw_error_code, address);
1193}
1194NOKPROBE_SYMBOL(do_kern_addr_fault);
1195
1196/*
1197 * Handle faults in the user portion of the address space.  Nothing in here
1198 * should check X86_PF_USER without a specific justification: for almost
1199 * all purposes, we should treat a normal kernel access to user memory
1200 * (e.g. get_user(), put_user(), etc.) the same as the WRUSS instruction.
1201 * The one exception is AC flag handling, which is, per the x86
1202 * architecture, special for WRUSS.
1203 */
1204static inline
1205void do_user_addr_fault(struct pt_regs *regs,
1206			unsigned long error_code,
1207			unsigned long address)
1208{
1209	struct vm_area_struct *vma;
1210	struct task_struct *tsk;
1211	struct mm_struct *mm;
1212	vm_fault_t fault;
1213	unsigned int flags = FAULT_FLAG_DEFAULT;
1214
1215	tsk = current;
1216	mm = tsk->mm;
1217
1218	if (unlikely((error_code & (X86_PF_USER | X86_PF_INSTR)) == X86_PF_INSTR)) {
1219		/*
1220		 * Whoops, this is kernel mode code trying to execute from
1221		 * user memory.  Unless this is AMD erratum #93, which
1222		 * corrupts RIP such that it looks like a user address,
1223		 * this is unrecoverable.  Don't even try to look up the
1224		 * VMA or look for extable entries.
1225		 */
1226		if (is_errata93(regs, address))
1227			return;
1228
1229		page_fault_oops(regs, error_code, address);
1230		return;
1231	}
1232
1233	/* kprobes don't want to hook the spurious faults: */
1234	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
1235		return;
1236
1237	/*
1238	 * Reserved bits are never expected to be set on
1239	 * entries in the user portion of the page tables.
1240	 */
1241	if (unlikely(error_code & X86_PF_RSVD))
1242		pgtable_bad(regs, error_code, address);
1243
1244	/*
1245	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1246	 * pages in the user address space.  The odd case here is WRUSS,
1247	 * which, according to the preliminary documentation, does not respect
1248	 * SMAP and will have the USER bit set so, in all cases, SMAP
1249	 * enforcement appears to be consistent with the USER bit.
1250	 */
1251	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1252		     !(error_code & X86_PF_USER) &&
1253		     !(regs->flags & X86_EFLAGS_AC))) {
1254		/*
1255		 * No extable entry here.  This was a kernel access to an
1256		 * invalid pointer.  get_kernel_nofault() will not get here.
1257		 */
1258		page_fault_oops(regs, error_code, address);
1259		return;
1260	}
1261
1262	/*
1263	 * If we're in an interrupt, have no user context or are running
1264	 * in a region with pagefaults disabled then we must not take the fault
1265	 */
1266	if (unlikely(faulthandler_disabled() || !mm)) {
1267		bad_area_nosemaphore(regs, error_code, address);
1268		return;
1269	}
1270
1271	/* Legacy check - remove this after verifying that it doesn't trigger */
1272	if (WARN_ON_ONCE(!(regs->flags & X86_EFLAGS_IF))) {
1273		bad_area_nosemaphore(regs, error_code, address);
1274		return;
 
 
 
 
 
 
 
 
 
1275	}
1276
1277	local_irq_enable();
1278
1279	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1280
1281	/*
1282	 * Read-only permissions can not be expressed in shadow stack PTEs.
1283	 * Treat all shadow stack accesses as WRITE faults. This ensures
1284	 * that the MM will prepare everything (e.g., break COW) such that
1285	 * maybe_mkwrite() can create a proper shadow stack PTE.
1286	 */
1287	if (error_code & X86_PF_SHSTK)
1288		flags |= FAULT_FLAG_WRITE;
1289	if (error_code & X86_PF_WRITE)
1290		flags |= FAULT_FLAG_WRITE;
1291	if (error_code & X86_PF_INSTR)
1292		flags |= FAULT_FLAG_INSTRUCTION;
1293
1294	/*
1295	 * We set FAULT_FLAG_USER based on the register state, not
1296	 * based on X86_PF_USER. User space accesses that cause
1297	 * system page faults are still user accesses.
1298	 */
1299	if (user_mode(regs))
1300		flags |= FAULT_FLAG_USER;
1301
1302#ifdef CONFIG_X86_64
1303	/*
1304	 * Faults in the vsyscall page might need emulation.  The
1305	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1306	 * considered to be part of the user address space.
1307	 *
1308	 * The vsyscall page does not have a "real" VMA, so do this
1309	 * emulation before we go searching for VMAs.
1310	 *
1311	 * PKRU never rejects instruction fetches, so we don't need
1312	 * to consider the PF_PK bit.
1313	 */
1314	if (is_vsyscall_vaddr(address)) {
1315		if (emulate_vsyscall(error_code, regs, address))
1316			return;
1317	}
1318#endif
1319
1320	if (!(flags & FAULT_FLAG_USER))
1321		goto lock_mmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322
1323	vma = lock_vma_under_rcu(mm, address);
1324	if (!vma)
1325		goto lock_mmap;
1326
1327	if (unlikely(access_error(error_code, vma))) {
1328		vma_end_read(vma);
1329		goto lock_mmap;
1330	}
1331	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
1332	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
1333		vma_end_read(vma);
1334
1335	if (!(fault & VM_FAULT_RETRY)) {
1336		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
1337		goto done;
1338	}
1339	count_vm_vma_lock_event(VMA_LOCK_RETRY);
1340	if (fault & VM_FAULT_MAJOR)
1341		flags |= FAULT_FLAG_TRIED;
1342
1343	/* Quick path to respond to signals */
1344	if (fault_signal_pending(fault, regs)) {
1345		if (!user_mode(regs))
1346			kernelmode_fixup_or_oops(regs, error_code, address,
1347						 SIGBUS, BUS_ADRERR,
1348						 ARCH_DEFAULT_PKEY);
1349		return;
1350	}
1351lock_mmap:
1352
1353retry:
1354	vma = lock_mm_and_find_vma(mm, address, regs);
1355	if (unlikely(!vma)) {
1356		bad_area_nosemaphore(regs, error_code, address);
 
 
1357		return;
1358	}
1359
1360	/*
1361	 * Ok, we have a good vm_area for this memory access, so
1362	 * we can handle it..
1363	 */
1364	if (unlikely(access_error(error_code, vma))) {
1365		bad_area_access_error(regs, error_code, address, vma);
 
1366		return;
1367	}
1368
1369	/*
1370	 * If for any reason at all we couldn't handle the fault,
1371	 * make sure we exit gracefully rather than endlessly redo
1372	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1373	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1374	 *
1375	 * Note that handle_userfault() may also release and reacquire mmap_lock
1376	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1377	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1378	 * (potentially after handling any pending signal during the return to
1379	 * userland). The return to userland is identified whenever
1380	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1381	 */
1382	fault = handle_mm_fault(vma, address, flags, regs);
1383
1384	if (fault_signal_pending(fault, regs)) {
1385		/*
1386		 * Quick path to respond to signals.  The core mm code
1387		 * has unlocked the mm for us if we get here.
1388		 */
1389		if (!user_mode(regs))
1390			kernelmode_fixup_or_oops(regs, error_code, address,
1391						 SIGBUS, BUS_ADRERR,
1392						 ARCH_DEFAULT_PKEY);
1393		return;
1394	}
1395
1396	/* The fault is fully completed (including releasing mmap lock) */
1397	if (fault & VM_FAULT_COMPLETED)
1398		return;
1399
1400	/*
1401	 * If we need to retry the mmap_lock has already been released,
1402	 * and if there is a fatal signal pending there is no guarantee
1403	 * that we made any progress. Handle this case first.
1404	 */
1405	if (unlikely(fault & VM_FAULT_RETRY)) {
1406		flags |= FAULT_FLAG_TRIED;
1407		goto retry;
1408	}
 
 
 
 
 
 
 
 
1409
1410	mmap_read_unlock(mm);
1411done:
1412	if (likely(!(fault & VM_FAULT_ERROR)))
1413		return;
 
1414
1415	if (fatal_signal_pending(current) && !user_mode(regs)) {
1416		kernelmode_fixup_or_oops(regs, error_code, address,
1417					 0, 0, ARCH_DEFAULT_PKEY);
1418		return;
1419	}
1420
1421	if (fault & VM_FAULT_OOM) {
1422		/* Kernel mode? Handle exceptions or die: */
1423		if (!user_mode(regs)) {
1424			kernelmode_fixup_or_oops(regs, error_code, address,
1425						 SIGSEGV, SEGV_MAPERR,
1426						 ARCH_DEFAULT_PKEY);
1427			return;
1428		}
1429
1430		/*
1431		 * We ran out of memory, call the OOM killer, and return the
1432		 * userspace (which will retry the fault, or kill us if we got
1433		 * oom-killed):
1434		 */
1435		pagefault_out_of_memory();
1436	} else {
1437		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1438			     VM_FAULT_HWPOISON_LARGE))
1439			do_sigbus(regs, error_code, address, fault);
1440		else if (fault & VM_FAULT_SIGSEGV)
1441			bad_area_nosemaphore(regs, error_code, address);
1442		else
1443			BUG();
1444	}
 
 
1445}
1446NOKPROBE_SYMBOL(do_user_addr_fault);
1447
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1448static __always_inline void
1449trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1450			 unsigned long address)
1451{
1452	if (!trace_pagefault_enabled())
1453		return;
1454
1455	if (user_mode(regs))
1456		trace_page_fault_user(address, regs, error_code);
1457	else
1458		trace_page_fault_kernel(address, regs, error_code);
1459}
1460
1461static __always_inline void
1462handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1463			      unsigned long address)
1464{
 
 
 
1465	trace_page_fault_entries(regs, error_code, address);
1466
1467	if (unlikely(kmmio_fault(regs, address)))
1468		return;
1469
1470	/* Was the fault on kernel-controlled part of the address space? */
1471	if (unlikely(fault_in_kernel_space(address))) {
1472		do_kern_addr_fault(regs, error_code, address);
1473	} else {
1474		do_user_addr_fault(regs, error_code, address);
1475		/*
1476		 * User address page fault handling might have reenabled
1477		 * interrupts. Fixing up all potential exit points of
1478		 * do_user_addr_fault() and its leaf functions is just not
1479		 * doable w/o creating an unholy mess or turning the code
1480		 * upside down.
1481		 */
1482		local_irq_disable();
1483	}
1484}
1485
1486DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
1487{
1488	irqentry_state_t state;
1489	unsigned long address;
1490
1491	address = cpu_feature_enabled(X86_FEATURE_FRED) ? fred_event_data(regs) : read_cr2();
1492
1493	prefetchw(&current->mm->mmap_lock);
1494
1495	/*
1496	 * KVM uses #PF vector to deliver 'page not present' events to guests
1497	 * (asynchronous page fault mechanism). The event happens when a
1498	 * userspace task is trying to access some valid (from guest's point of
1499	 * view) memory which is not currently mapped by the host (e.g. the
1500	 * memory is swapped out). Note, the corresponding "page ready" event
1501	 * which is injected when the memory becomes available, is delivered via
1502	 * an interrupt mechanism and not a #PF exception
1503	 * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
1504	 *
1505	 * We are relying on the interrupted context being sane (valid RSP,
1506	 * relevant locks not held, etc.), which is fine as long as the
1507	 * interrupted context had IF=1.  We are also relying on the KVM
1508	 * async pf type field and CR2 being read consistently instead of
1509	 * getting values from real and async page faults mixed up.
1510	 *
1511	 * Fingers crossed.
1512	 *
1513	 * The async #PF handling code takes care of idtentry handling
1514	 * itself.
1515	 */
1516	if (kvm_handle_async_pf(regs, (u32)address))
1517		return;
1518
1519	/*
1520	 * Entry handling for valid #PF from kernel mode is slightly
1521	 * different: RCU is already watching and ct_irq_enter() must not
1522	 * be invoked because a kernel fault on a user space address might
1523	 * sleep.
1524	 *
1525	 * In case the fault hit a RCU idle region the conditional entry
1526	 * code reenabled RCU to avoid subsequent wreckage which helps
1527	 * debuggability.
1528	 */
1529	state = irqentry_enter(regs);
1530
1531	instrumentation_begin();
1532	handle_page_fault(regs, error_code, address);
1533	instrumentation_end();
1534
1535	irqentry_exit(regs, state);
1536}