Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.13.7
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *	Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
  18#include <linux/kmsan.h>
  19#include <linux/spinlock.h>
  20#include <linux/kprobes.h>
  21#include <linux/uaccess.h>
  22#include <linux/kdebug.h>
  23#include <linux/kgdb.h>
  24#include <linux/kernel.h>
  25#include <linux/export.h>
  26#include <linux/ptrace.h>
  27#include <linux/uprobes.h>
  28#include <linux/string.h>
  29#include <linux/delay.h>
  30#include <linux/errno.h>
  31#include <linux/kexec.h>
  32#include <linux/sched.h>
  33#include <linux/sched/task_stack.h>
  34#include <linux/timer.h>
  35#include <linux/init.h>
  36#include <linux/bug.h>
  37#include <linux/nmi.h>
  38#include <linux/mm.h>
  39#include <linux/smp.h>
  40#include <linux/cpu.h>
  41#include <linux/io.h>
  42#include <linux/hardirq.h>
  43#include <linux/atomic.h>
  44#include <linux/iommu.h>
  45#include <linux/ubsan.h>
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47#include <asm/stacktrace.h>
  48#include <asm/processor.h>
  49#include <asm/debugreg.h>
  50#include <asm/realmode.h>
  51#include <asm/text-patching.h>
  52#include <asm/ftrace.h>
  53#include <asm/traps.h>
  54#include <asm/desc.h>
  55#include <asm/fred.h>
  56#include <asm/fpu/api.h>
  57#include <asm/cpu.h>
  58#include <asm/cpu_entry_area.h>
  59#include <asm/mce.h>
  60#include <asm/fixmap.h>
  61#include <asm/mach_traps.h>
  62#include <asm/alternative.h>
  63#include <asm/fpu/xstate.h>
  64#include <asm/vm86.h>
  65#include <asm/umip.h>
  66#include <asm/insn.h>
  67#include <asm/insn-eval.h>
  68#include <asm/vdso.h>
  69#include <asm/tdx.h>
  70#include <asm/cfi.h>
  71
  72#ifdef CONFIG_X86_64
  73#include <asm/x86_init.h>
 
 
  74#else
  75#include <asm/processor-flags.h>
  76#include <asm/setup.h>
  77#endif
  78
  79#include <asm/proto.h>
  80
  81DECLARE_BITMAP(system_vectors, NR_VECTORS);
 
  82
  83__always_inline int is_valid_bugaddr(unsigned long addr)
  84{
  85	if (addr < TASK_SIZE_MAX)
  86		return 0;
 
 
  87
  88	/*
  89	 * We got #UD, if the text isn't readable we'd have gotten
  90	 * a different exception.
  91	 */
  92	return *(unsigned short *)addr == INSN_UD2;
  93}
  94
 
  95/*
  96 * Check for UD1 or UD2, accounting for Address Size Override Prefixes.
  97 * If it's a UD1, get the ModRM byte to pass along to UBSan.
  98 */
  99__always_inline int decode_bug(unsigned long addr, u32 *imm)
 100{
 101	u8 v;
 102
 103	if (addr < TASK_SIZE_MAX)
 104		return BUG_NONE;
 
 
 
 105
 106	v = *(u8 *)(addr++);
 107	if (v == INSN_ASOP)
 108		v = *(u8 *)(addr++);
 109	if (v != OPCODE_ESCAPE)
 110		return BUG_NONE;
 111
 112	v = *(u8 *)(addr++);
 113	if (v == SECOND_BYTE_OPCODE_UD2)
 114		return BUG_UD2;
 115
 116	if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1)
 117		return BUG_NONE;
 118
 119	/* Retrieve the immediate (type value) for the UBSAN UD1 */
 120	v = *(u8 *)(addr++);
 121	if (X86_MODRM_RM(v) == 4)
 122		addr++;
 123
 124	*imm = 0;
 125	if (X86_MODRM_MOD(v) == 1)
 126		*imm = *(u8 *)addr;
 127	else if (X86_MODRM_MOD(v) == 2)
 128		*imm = *(u32 *)addr;
 129	else
 130		WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v));
 131
 132	return BUG_UD1;
 
 
 
 133}
 134
 
 
 
 
 
 
 135
 136static nokprobe_inline int
 137do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
 138		  struct pt_regs *regs,	long error_code)
 139{
 140	if (v8086_mode(regs)) {
 
 
 
 141		/*
 142		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 143		 * On nmi (interrupt 2), do_trap should not be called.
 144		 */
 145		if (trapnr < X86_TRAP_UD) {
 146			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 147						error_code, trapnr))
 148				return 0;
 149		}
 150	} else if (!user_mode(regs)) {
 151		if (fixup_exception(regs, trapnr, error_code, 0))
 152			return 0;
 153
 154		tsk->thread.error_code = error_code;
 155		tsk->thread.trap_nr = trapnr;
 156		die(str, regs, error_code);
 157	} else {
 158		if (fixup_vdso_exception(regs, trapnr, error_code, 0))
 159			return 0;
 160	}
 
 
 
 
 161
 
 
 
 162	/*
 163	 * We want error_code and trap_nr set for userspace faults and
 164	 * kernelspace faults which result in die(), but not
 165	 * kernelspace faults which are fixed up.  die() gives the
 166	 * process no chance to handle the signal and notice the
 167	 * kernel fault information, so that won't result in polluting
 168	 * the information about previously queued, but not yet
 169	 * delivered, faults.  See also exc_general_protection below.
 170	 */
 171	tsk->thread.error_code = error_code;
 172	tsk->thread.trap_nr = trapnr;
 173
 174	return -1;
 175}
 176
 177static void show_signal(struct task_struct *tsk, int signr,
 178			const char *type, const char *desc,
 179			struct pt_regs *regs, long error_code)
 180{
 181	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 182	    printk_ratelimit()) {
 183		pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
 184			tsk->comm, task_pid_nr(tsk), type, desc,
 185			regs->ip, regs->sp, error_code);
 186		print_vma_addr(KERN_CONT " in ", regs->ip);
 187		pr_cont("\n");
 
 188	}
 189}
 190
 191static void
 192do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 193	long error_code, int sicode, void __user *addr)
 194{
 195	struct task_struct *tsk = current;
 196
 197	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 198		return;
 199
 200	show_signal(tsk, signr, "trap ", str, regs, error_code);
 201
 202	if (!sicode)
 203		force_sig(signr);
 204	else
 205		force_sig_fault(signr, sicode, addr);
 206}
 207NOKPROBE_SYMBOL(do_trap);
 208
 209static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 210	unsigned long trapnr, int signr, int sicode, void __user *addr)
 211{
 212	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 213
 214	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 215			NOTIFY_STOP) {
 216		cond_local_irq_enable(regs);
 217		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
 218		cond_local_irq_disable(regs);
 219	}
 220}
 221
 222/*
 223 * Posix requires to provide the address of the faulting instruction for
 224 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
 225 *
 226 * This address is usually regs->ip, but when an uprobe moved the code out
 227 * of line then regs->ip points to the XOL code which would confuse
 228 * anything which analyzes the fault address vs. the unmodified binary. If
 229 * a trap happened in XOL code then uprobe maps regs->ip back to the
 230 * original instruction address.
 231 */
 232static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
 233{
 234	return (void __user *)uprobe_get_trap_addr(regs);
 235}
 236
 237DEFINE_IDTENTRY(exc_divide_error)
 238{
 239	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
 240		      FPE_INTDIV, error_get_trap_addr(regs));
 241}
 242
 243DEFINE_IDTENTRY(exc_overflow)
 244{
 245	do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
 246}
 247
 248#ifdef CONFIG_X86_F00F_BUG
 249void handle_invalid_op(struct pt_regs *regs)
 250#else
 251static inline void handle_invalid_op(struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252#endif
 
 
 
 
 
 253{
 254	do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
 255		      ILL_ILLOPN, error_get_trap_addr(regs));
 
 
 
 
 256}
 257
 258static noinstr bool handle_bug(struct pt_regs *regs)
 259{
 260	bool handled = false;
 261	int ud_type;
 262	u32 imm;
 263
 264	ud_type = decode_bug(regs->ip, &imm);
 265	if (ud_type == BUG_NONE)
 266		return handled;
 267
 268	/*
 269	 * All lies, just get the WARN/BUG out.
 270	 */
 271	instrumentation_begin();
 272	/*
 273	 * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
 274	 * is a rare case that uses @regs without passing them to
 275	 * irqentry_enter().
 276	 */
 277	kmsan_unpoison_entry_regs(regs);
 278	/*
 279	 * Since we're emulating a CALL with exceptions, restore the interrupt
 280	 * state to what it was at the exception site.
 281	 */
 282	if (regs->flags & X86_EFLAGS_IF)
 283		raw_local_irq_enable();
 284	if (ud_type == BUG_UD2) {
 285		if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
 286		    handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
 287			regs->ip += LEN_UD2;
 288			handled = true;
 289		}
 290	} else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
 291		pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip);
 292	}
 293	if (regs->flags & X86_EFLAGS_IF)
 294		raw_local_irq_disable();
 295	instrumentation_end();
 296
 297	return handled;
 298}
 299
 300DEFINE_IDTENTRY_RAW(exc_invalid_op)
 301{
 302	irqentry_state_t state;
 303
 304	/*
 305	 * We use UD2 as a short encoding for 'CALL __WARN', as such
 306	 * handle it before exception entry to avoid recursive WARN
 307	 * in case exception entry is the one triggering WARNs.
 308	 */
 309	if (!user_mode(regs) && handle_bug(regs))
 310		return;
 311
 312	state = irqentry_enter(regs);
 313	instrumentation_begin();
 314	handle_invalid_op(regs);
 315	instrumentation_end();
 316	irqentry_exit(regs, state);
 317}
 
 318
 319DEFINE_IDTENTRY(exc_coproc_segment_overrun)
 
 320{
 321	do_error_trap(regs, 0, "coprocessor segment overrun",
 322		      X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
 323}
 324
 325DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
 326{
 327	do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
 328		      0, NULL);
 329}
 330
 331DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
 332{
 333	do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
 334		      SIGBUS, 0, NULL);
 335}
 336
 337DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
 338{
 339	do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
 340		      0, NULL);
 341}
 342
 343DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
 344{
 345	char *str = "alignment check";
 346
 347	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
 348		return;
 
 
 
 
 
 
 
 349
 350	if (!user_mode(regs))
 351		die("Split lock detected\n", regs, error_code);
 352
 
 
 353	local_irq_enable();
 
 
 
 354
 355	if (handle_user_split_lock(regs, error_code))
 356		goto out;
 357
 358	do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
 359		error_code, BUS_ADRALN, NULL);
 360
 361out:
 362	local_irq_disable();
 
 
 
 
 363}
 364
 365#ifdef CONFIG_VMAP_STACK
 366__visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
 367						unsigned long fault_address,
 368						struct stack_info *info)
 369{
 370	const char *name = stack_type_name(info->type);
 371
 372	printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n",
 373	       name, (void *)fault_address, info->begin, info->end);
 374
 375	die("stack guard page", regs, 0);
 376
 377	/* Be absolutely certain we don't return. */
 378	panic("%s stack guard hit", name);
 379}
 380#endif
 381
 382/*
 383 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
 384 *
 385 * On x86_64, this is more or less a normal kernel entry.  Notwithstanding the
 386 * SDM's warnings about double faults being unrecoverable, returning works as
 387 * expected.  Presumably what the SDM actually means is that the CPU may get
 388 * the register state wrong on entry, so returning could be a bad idea.
 389 *
 390 * Various CPU engineers have promised that double faults due to an IRET fault
 391 * while the stack is read-only are, in fact, recoverable.
 392 *
 393 * On x86_32, this is entered through a task gate, and regs are synthesized
 394 * from the TSS.  Returning is, in principle, okay, but changes to regs will
 395 * be lost.  If, for some reason, we need to return to a context with modified
 396 * regs, the shim code could be adjusted to synchronize the registers.
 397 *
 398 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
 399 * to be read before doing anything else.
 400 */
 401DEFINE_IDTENTRY_DF(exc_double_fault)
 402{
 403	static const char str[] = "double fault";
 404	struct task_struct *tsk = current;
 405
 406#ifdef CONFIG_VMAP_STACK
 407	unsigned long address = read_cr2();
 408	struct stack_info info;
 409#endif
 410
 411#ifdef CONFIG_X86_ESPFIX64
 412	extern unsigned char native_irq_return_iret[];
 413
 414	/*
 415	 * If IRET takes a non-IST fault on the espfix64 stack, then we
 416	 * end up promoting it to a doublefault.  In that case, take
 417	 * advantage of the fact that we're not using the normal (TSS.sp0)
 418	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
 419	 * and then modify our own IRET frame so that, when we return,
 420	 * we land directly at the #GP(0) vector with the stack already
 421	 * set up according to its expectations.
 422	 *
 423	 * The net result is that our #GP handler will think that we
 424	 * entered from usermode with the bad user context.
 425	 *
 426	 * No need for nmi_enter() here because we don't use RCU.
 427	 */
 428	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
 429		regs->cs == __KERNEL_CS &&
 430		regs->ip == (unsigned long)native_irq_return_iret)
 431	{
 432		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 433		unsigned long *p = (unsigned long *)regs->sp;
 434
 435		/*
 436		 * regs->sp points to the failing IRET frame on the
 437		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
 438		 * in gpregs->ss through gpregs->ip.
 439		 *
 440		 */
 441		gpregs->ip	= p[0];
 442		gpregs->cs	= p[1];
 443		gpregs->flags	= p[2];
 444		gpregs->sp	= p[3];
 445		gpregs->ss	= p[4];
 446		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
 447
 448		/*
 449		 * Adjust our frame so that we return straight to the #GP
 450		 * vector with the expected RSP value.  This is safe because
 451		 * we won't enable interrupts or schedule before we invoke
 452		 * general_protection, so nothing will clobber the stack
 453		 * frame we just set up.
 454		 *
 455		 * We will enter general_protection with kernel GSBASE,
 456		 * which is what the stub expects, given that the faulting
 457		 * RIP will be the IRET instruction.
 458		 */
 459		regs->ip = (unsigned long)asm_exc_general_protection;
 460		regs->sp = (unsigned long)&gpregs->orig_ax;
 461
 462		return;
 463	}
 464#endif
 465
 466	irqentry_nmi_enter(regs);
 467	instrumentation_begin();
 468	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 469
 470	tsk->thread.error_code = error_code;
 471	tsk->thread.trap_nr = X86_TRAP_DF;
 472
 473#ifdef CONFIG_VMAP_STACK
 474	/*
 475	 * If we overflow the stack into a guard page, the CPU will fail
 476	 * to deliver #PF and will send #DF instead.  Similarly, if we
 477	 * take any non-IST exception while too close to the bottom of
 478	 * the stack, the processor will get a page fault while
 479	 * delivering the exception and will generate a double fault.
 480	 *
 481	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
 482	 * Page-Fault Exception (#PF):
 483	 *
 484	 *   Processors update CR2 whenever a page fault is detected. If a
 485	 *   second page fault occurs while an earlier page fault is being
 486	 *   delivered, the faulting linear address of the second fault will
 487	 *   overwrite the contents of CR2 (replacing the previous
 488	 *   address). These updates to CR2 occur even if the page fault
 489	 *   results in a double fault or occurs during the delivery of a
 490	 *   double fault.
 491	 *
 492	 * The logic below has a small possibility of incorrectly diagnosing
 493	 * some errors as stack overflows.  For example, if the IDT or GDT
 494	 * gets corrupted such that #GP delivery fails due to a bad descriptor
 495	 * causing #GP and we hit this condition while CR2 coincidentally
 496	 * points to the stack guard page, we'll think we overflowed the
 497	 * stack.  Given that we're going to panic one way or another
 498	 * if this happens, this isn't necessarily worth fixing.
 499	 *
 500	 * If necessary, we could improve the test by only diagnosing
 501	 * a stack overflow if the saved RSP points within 47 bytes of
 502	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
 503	 * take an exception, the stack is already aligned and there
 504	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
 505	 * possible error code, so a stack overflow would *not* double
 506	 * fault.  With any less space left, exception delivery could
 507	 * fail, and, as a practical matter, we've overflowed the
 508	 * stack even if the actual trigger for the double fault was
 509	 * something else.
 510	 */
 511	if (get_stack_guard_info((void *)address, &info))
 512		handle_stack_overflow(regs, address, &info);
 513#endif
 514
 515	pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
 516	die("double fault", regs, error_code);
 517	panic("Machine halted.");
 518	instrumentation_end();
 519}
 520
 521DEFINE_IDTENTRY(exc_bounds)
 
 522{
 523	if (notify_die(DIE_TRAP, "bounds", regs, 0,
 524			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 525		return;
 526	cond_local_irq_enable(regs);
 527
 528	if (!user_mode(regs))
 529		die("bounds", regs, 0);
 530
 531	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
 532
 533	cond_local_irq_disable(regs);
 534}
 535
 536enum kernel_gp_hint {
 537	GP_NO_HINT,
 538	GP_NON_CANONICAL,
 539	GP_CANONICAL
 540};
 541
 542/*
 543 * When an uncaught #GP occurs, try to determine the memory address accessed by
 544 * the instruction and return that address to the caller. Also, try to figure
 545 * out whether any part of the access to that address was non-canonical.
 546 */
 547static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
 548						 unsigned long *addr)
 549{
 550	u8 insn_buf[MAX_INSN_SIZE];
 551	struct insn insn;
 552	int ret;
 553
 554	if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
 555			MAX_INSN_SIZE))
 556		return GP_NO_HINT;
 557
 558	ret = insn_decode_kernel(&insn, insn_buf);
 559	if (ret < 0)
 560		return GP_NO_HINT;
 561
 562	*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
 563	if (*addr == -1UL)
 564		return GP_NO_HINT;
 565
 566#ifdef CONFIG_X86_64
 567	/*
 568	 * Check that:
 569	 *  - the operand is not in the kernel half
 570	 *  - the last byte of the operand is not in the user canonical half
 571	 */
 572	if (*addr < ~__VIRTUAL_MASK &&
 573	    *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
 574		return GP_NON_CANONICAL;
 575#endif
 576
 577	return GP_CANONICAL;
 578}
 579
 580#define GPFSTR "general protection fault"
 
 
 581
 582static bool fixup_iopl_exception(struct pt_regs *regs)
 583{
 584	struct thread_struct *t = &current->thread;
 585	unsigned char byte;
 586	unsigned long ip;
 587
 588	if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
 589		return false;
 590
 591	if (insn_get_effective_ip(regs, &ip))
 592		return false;
 593
 594	if (get_user(byte, (const char __user *)ip))
 595		return false;
 596
 597	if (byte != 0xfa && byte != 0xfb)
 598		return false;
 599
 600	if (!t->iopl_warn && printk_ratelimit()) {
 601		pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
 602		       current->comm, task_pid_nr(current), ip);
 603		print_vma_addr(KERN_CONT " in ", ip);
 604		pr_cont("\n");
 605		t->iopl_warn = 1;
 606	}
 607
 608	regs->ip += 1;
 609	return true;
 610}
 611
 612/*
 613 * The unprivileged ENQCMD instruction generates #GPs if the
 614 * IA32_PASID MSR has not been populated.  If possible, populate
 615 * the MSR from a PASID previously allocated to the mm.
 616 */
 617static bool try_fixup_enqcmd_gp(void)
 618{
 619#ifdef CONFIG_ARCH_HAS_CPU_PASID
 620	u32 pasid;
 621
 
 622	/*
 623	 * MSR_IA32_PASID is managed using XSAVE.  Directly
 624	 * writing to the MSR is only possible when fpregs
 625	 * are valid and the fpstate is not.  This is
 626	 * guaranteed when handling a userspace exception
 627	 * in *before* interrupts are re-enabled.
 628	 */
 629	lockdep_assert_irqs_disabled();
 
 
 
 
 
 
 630
 631	/*
 632	 * Hardware without ENQCMD will not generate
 633	 * #GPs that can be fixed up here.
 634	 */
 635	if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
 636		return false;
 637
 638	/*
 639	 * If the mm has not been allocated a
 640	 * PASID, the #GP can not be fixed up.
 641	 */
 642	if (!mm_valid_pasid(current->mm))
 643		return false;
 644
 645	pasid = mm_get_enqcmd_pasid(current->mm);
 
 
 646
 647	/*
 648	 * Did this thread already have its PASID activated?
 649	 * If so, the #GP must be from something else.
 
 650	 */
 651	if (current->pasid_activated)
 652		return false;
 653
 654	wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
 655	current->pasid_activated = 1;
 
 656
 657	return true;
 658#else
 659	return false;
 
 
 
 
 
 
 
 
 660#endif
 
 
 
 
 
 
 661}
 662
 663static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
 664				    unsigned long error_code, const char *str,
 665				    unsigned long address)
 666{
 667	if (fixup_exception(regs, trapnr, error_code, address))
 668		return true;
 669
 670	current->thread.error_code = error_code;
 671	current->thread.trap_nr = trapnr;
 672
 673	/*
 674	 * To be potentially processing a kprobe fault and to trust the result
 675	 * from kprobe_running(), we have to be non-preemptible.
 676	 */
 677	if (!preemptible() && kprobe_running() &&
 678	    kprobe_fault_handler(regs, trapnr))
 679		return true;
 680
 681	return notify_die(DIE_GPF, str, regs, error_code, trapnr, SIGSEGV) == NOTIFY_STOP;
 682}
 683
 684static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr,
 685				   unsigned long error_code, const char *str)
 686{
 687	current->thread.error_code = error_code;
 688	current->thread.trap_nr = trapnr;
 689	show_signal(current, SIGSEGV, "", str, regs, error_code);
 690	force_sig(SIGSEGV);
 691}
 692
 693DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
 694{
 695	char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
 696	enum kernel_gp_hint hint = GP_NO_HINT;
 697	unsigned long gp_addr;
 698
 699	if (user_mode(regs) && try_fixup_enqcmd_gp())
 700		return;
 701
 702	cond_local_irq_enable(regs);
 703
 704	if (static_cpu_has(X86_FEATURE_UMIP)) {
 705		if (user_mode(regs) && fixup_umip_exception(regs))
 706			goto exit;
 707	}
 708
 709	if (v8086_mode(regs)) {
 710		local_irq_enable();
 711		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 712		local_irq_disable();
 713		return;
 714	}
 715
 716	if (user_mode(regs)) {
 717		if (fixup_iopl_exception(regs))
 718			goto exit;
 719
 720		if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
 721			goto exit;
 722
 723		gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
 724		goto exit;
 725	}
 726
 727	if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
 728		goto exit;
 729
 730	if (error_code)
 731		snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
 732	else
 733		hint = get_kernel_gp_address(regs, &gp_addr);
 734
 735	if (hint != GP_NO_HINT)
 736		snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
 737			 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
 738						    : "maybe for address",
 739			 gp_addr);
 740
 741	/*
 742	 * KASAN is interested only in the non-canonical case, clear it
 743	 * otherwise.
 744	 */
 745	if (hint != GP_NON_CANONICAL)
 746		gp_addr = 0;
 747
 748	die_addr(desc, regs, error_code, gp_addr);
 749
 750exit:
 751	cond_local_irq_disable(regs);
 752}
 753
 754static bool do_int3(struct pt_regs *regs)
 
 755{
 756	int res;
 757
 758#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 759	if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
 760			 SIGTRAP) == NOTIFY_STOP)
 761		return true;
 762#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 763
 764#ifdef CONFIG_KPROBES
 765	if (kprobe_int3_handler(regs))
 766		return true;
 767#endif
 768	res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
 769
 770	return res == NOTIFY_STOP;
 771}
 772NOKPROBE_SYMBOL(do_int3);
 773
 774static void do_int3_user(struct pt_regs *regs)
 775{
 776	if (do_int3(regs))
 777		return;
 778
 779	cond_local_irq_enable(regs);
 780	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
 781	cond_local_irq_disable(regs);
 782}
 783
 784DEFINE_IDTENTRY_RAW(exc_int3)
 785{
 786	/*
 787	 * poke_int3_handler() is completely self contained code; it does (and
 788	 * must) *NOT* call out to anything, lest it hits upon yet another
 789	 * INT3.
 790	 */
 791	if (poke_int3_handler(regs))
 792		return;
 
 793
 794	/*
 795	 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
 796	 * and therefore can trigger INT3, hence poke_int3_handler() must
 797	 * be done before. If the entry came from kernel mode, then use
 798	 * nmi_enter() because the INT3 could have been hit in any context
 799	 * including NMI.
 800	 */
 801	if (user_mode(regs)) {
 802		irqentry_enter_from_user_mode(regs);
 803		instrumentation_begin();
 804		do_int3_user(regs);
 805		instrumentation_end();
 806		irqentry_exit_to_user_mode(regs);
 807	} else {
 808		irqentry_state_t irq_state = irqentry_nmi_enter(regs);
 809
 810		instrumentation_begin();
 811		if (!do_int3(regs))
 812			die("int3", regs, 0);
 813		instrumentation_end();
 814		irqentry_nmi_exit(regs, irq_state);
 815	}
 816}
 817
 818#ifdef CONFIG_X86_64
 819/*
 820 * Help handler running on a per-cpu (IST or entry trampoline) stack
 821 * to switch to the normal thread stack if the interrupted code was in
 822 * user mode. The actual stack switch is done in entry_64.S
 823 */
 824asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
 825{
 826	struct pt_regs *regs = (struct pt_regs *)current_top_of_stack() - 1;
 827	if (regs != eregs)
 
 
 
 
 
 
 
 
 
 
 
 
 828		*regs = *eregs;
 829	return regs;
 830}
 831
 832#ifdef CONFIG_AMD_MEM_ENCRYPT
 833asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
 834{
 835	unsigned long sp, *stack;
 836	struct stack_info info;
 837	struct pt_regs *regs_ret;
 838
 839	/*
 840	 * In the SYSCALL entry path the RSP value comes from user-space - don't
 841	 * trust it and switch to the current kernel stack
 842	 */
 843	if (ip_within_syscall_gap(regs)) {
 844		sp = current_top_of_stack();
 845		goto sync;
 846	}
 847
 848	/*
 849	 * From here on the RSP value is trusted. Now check whether entry
 850	 * happened from a safe stack. Not safe are the entry or unknown stacks,
 851	 * use the fall-back stack instead in this case.
 852	 */
 853	sp    = regs->sp;
 854	stack = (unsigned long *)sp;
 855
 856	if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
 857	    info.type > STACK_TYPE_EXCEPTION_LAST)
 858		sp = __this_cpu_ist_top_va(VC2);
 859
 860sync:
 861	/*
 862	 * Found a safe stack - switch to it as if the entry didn't happen via
 863	 * IST stack. The code below only copies pt_regs, the real switch happens
 864	 * in assembly code.
 865	 */
 866	sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
 867
 868	regs_ret = (struct pt_regs *)sp;
 869	*regs_ret = *regs;
 870
 871	return regs_ret;
 872}
 873#endif
 874
 875asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
 876{
 877	struct pt_regs tmp, *new_stack;
 878
 879	/*
 880	 * This is called from entry_64.S early in handling a fault
 881	 * caused by a bad iret to user mode.  To handle the fault
 882	 * correctly, we want to move our stack frame to where it would
 883	 * be had we entered directly on the entry stack (rather than
 884	 * just below the IRET frame) and we want to pretend that the
 885	 * exception came from the IRET target.
 886	 */
 887	new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 888
 889	/* Copy the IRET target to the temporary storage. */
 890	__memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
 891
 892	/* Copy the remainder of the stack from the current stack. */
 893	__memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
 894
 895	/* Update the entry stack */
 896	__memcpy(new_stack, &tmp, sizeof(tmp));
 897
 898	BUG_ON(!user_mode(new_stack));
 899	return new_stack;
 900}
 901#endif
 902
 903static bool is_sysenter_singlestep(struct pt_regs *regs)
 904{
 905	/*
 906	 * We don't try for precision here.  If we're anywhere in the region of
 907	 * code that can be single-stepped in the SYSENTER entry path, then
 908	 * assume that this is a useless single-step trap due to SYSENTER
 909	 * being invoked with TF set.  (We don't know in advance exactly
 910	 * which instructions will be hit because BTF could plausibly
 911	 * be set.)
 912	 */
 913#ifdef CONFIG_X86_32
 914	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
 915		(unsigned long)__end_SYSENTER_singlestep_region -
 916		(unsigned long)__begin_SYSENTER_singlestep_region;
 917#elif defined(CONFIG_IA32_EMULATION)
 918	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
 919		(unsigned long)__end_entry_SYSENTER_compat -
 920		(unsigned long)entry_SYSENTER_compat;
 921#else
 922	return false;
 923#endif
 924}
 925
 926static __always_inline unsigned long debug_read_clear_dr6(void)
 927{
 928	unsigned long dr6;
 929
 930	/*
 931	 * The Intel SDM says:
 932	 *
 933	 *   Certain debug exceptions may clear bits 0-3. The remaining
 934	 *   contents of the DR6 register are never cleared by the
 935	 *   processor. To avoid confusion in identifying debug
 936	 *   exceptions, debug handlers should clear the register before
 937	 *   returning to the interrupted task.
 938	 *
 939	 * Keep it simple: clear DR6 immediately.
 940	 */
 941	get_debugreg(dr6, 6);
 942	set_debugreg(DR6_RESERVED, 6);
 943	dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
 944
 945	return dr6;
 946}
 947
 948/*
 949 * Our handling of the processor debug registers is non-trivial.
 950 * We do not clear them on entry and exit from the kernel. Therefore
 951 * it is possible to get a watchpoint trap here from inside the kernel.
 952 * However, the code in ./ptrace.c has ensured that the user can
 953 * only set watchpoints on userspace addresses. Therefore the in-kernel
 954 * watchpoint trap can only occur in code which is reading/writing
 955 * from user space. Such code must not hold kernel locks (since it
 956 * can equally take a page fault), therefore it is safe to call
 957 * force_sig_info even though that claims and releases locks.
 958 *
 959 * Code in ./signal.c ensures that the debug control register
 960 * is restored before we deliver any signal, and therefore that
 961 * user code runs with the correct debug control register even though
 962 * we clear it here.
 963 *
 964 * Being careful here means that we don't have to be as careful in a
 965 * lot of more complicated places (task switching can be a bit lazy
 966 * about restoring all the debug state, and ptrace doesn't have to
 967 * find every occurrence of the TF bit that could be saved away even
 968 * by user code)
 969 *
 970 * May run on IST stack.
 971 */
 972
 973static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
 974{
 975	/*
 976	 * Notifiers will clear bits in @dr6 to indicate the event has been
 977	 * consumed - hw_breakpoint_handler(), single_stop_cont().
 978	 *
 979	 * Notifiers will set bits in @virtual_dr6 to indicate the desire
 980	 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
 981	 */
 982	if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
 983		return true;
 984
 985	return false;
 986}
 987
 988static noinstr void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6)
 989{
 990	/*
 991	 * Disable breakpoints during exception handling; recursive exceptions
 992	 * are exceedingly 'fun'.
 993	 *
 994	 * Since this function is NOKPROBE, and that also applies to
 995	 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
 996	 * HW_BREAKPOINT_W on our stack)
 997	 *
 998	 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
 999	 * includes the entry stack is excluded for everything.
1000	 *
1001	 * For FRED, nested #DB should just work fine. But when a watchpoint or
1002	 * breakpoint is set in the code path which is executed by #DB handler,
1003	 * it results in an endless recursion and stack overflow. Thus we stay
1004	 * with the IDT approach, i.e., save DR7 and disable #DB.
1005	 */
1006	unsigned long dr7 = local_db_save();
1007	irqentry_state_t irq_state = irqentry_nmi_enter(regs);
1008	instrumentation_begin();
1009
1010	/*
1011	 * If something gets miswired and we end up here for a user mode
1012	 * #DB, we will malfunction.
 
1013	 */
1014	WARN_ON_ONCE(user_mode(regs));
 
1015
1016	if (test_thread_flag(TIF_BLOCKSTEP)) {
1017		/*
1018		 * The SDM says "The processor clears the BTF flag when it
1019		 * generates a debug exception." but PTRACE_BLOCKSTEP requested
1020		 * it for userspace, but we just took a kernel #DB, so re-set
1021		 * BTF.
1022		 */
1023		unsigned long debugctl;
1024
1025		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1026		debugctl |= DEBUGCTLMSR_BTF;
1027		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1028	}
1029
1030	/*
1031	 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
1032	 * watchpoint at the same time then that will still be handled.
1033	 */
1034	if (!cpu_feature_enabled(X86_FEATURE_FRED) &&
1035	    (dr6 & DR_STEP) && is_sysenter_singlestep(regs))
1036		dr6 &= ~DR_STEP;
1037
1038	/*
1039	 * The kernel doesn't use INT1
1040	 */
1041	if (!dr6)
1042		goto out;
1043
1044	if (notify_debug(regs, &dr6))
1045		goto out;
 
 
 
 
 
 
 
 
 
 
 
1046
1047	/*
1048	 * The kernel doesn't use TF single-step outside of:
1049	 *
1050	 *  - Kprobes, consumed through kprobe_debug_handler()
1051	 *  - KGDB, consumed through notify_debug()
1052	 *
1053	 * So if we get here with DR_STEP set, something is wonky.
1054	 *
1055	 * A known way to trigger this is through QEMU's GDB stub,
1056	 * which leaks #DB into the guest and causes IST recursion.
1057	 */
1058	if (WARN_ON_ONCE(dr6 & DR_STEP))
 
 
1059		regs->flags &= ~X86_EFLAGS_TF;
1060out:
1061	instrumentation_end();
1062	irqentry_nmi_exit(regs, irq_state);
 
 
1063
1064	local_db_restore(dr7);
1065}
1066
1067static noinstr void exc_debug_user(struct pt_regs *regs, unsigned long dr6)
 
 
 
 
 
1068{
1069	bool icebp;
1070
1071	/*
1072	 * If something gets miswired and we end up here for a kernel mode
1073	 * #DB, we will malfunction.
1074	 */
1075	WARN_ON_ONCE(!user_mode(regs));
1076
1077	/*
1078	 * NB: We can't easily clear DR7 here because
1079	 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
1080	 * user memory, etc.  This means that a recursive #DB is possible.  If
1081	 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
1082	 * Since we're not on the IST stack right now, everything will be
1083	 * fine.
1084	 */
1085
1086	irqentry_enter_from_user_mode(regs);
1087	instrumentation_begin();
1088
1089	/*
1090	 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
1091	 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
1092	 *
1093	 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
1094	 * even if it is not the result of PTRACE_SINGLESTEP.
1095	 */
1096	current->thread.virtual_dr6 = (dr6 & DR_STEP);
1097
1098	/*
1099	 * The SDM says "The processor clears the BTF flag when it
1100	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
1101	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
1102	 */
1103	clear_thread_flag(TIF_BLOCKSTEP);
 
 
 
1104
1105	/*
1106	 * If dr6 has no reason to give us about the origin of this trap,
1107	 * then it's very likely the result of an icebp/int01 trap.
1108	 * User wants a sigtrap for that.
1109	 */
1110	icebp = !dr6;
1111
1112	if (notify_debug(regs, &dr6))
1113		goto out;
1114
1115	/* It's safe to allow irq's after DR6 has been saved */
1116	local_irq_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
1117
1118	if (v8086_mode(regs)) {
1119		handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1120		goto out_irq;
 
 
 
 
 
 
 
1121	}
1122
1123	/* #DB for bus lock can only be triggered from userspace. */
1124	if (dr6 & DR_BUS_LOCK)
1125		handle_bus_lock(regs);
1126
1127	/* Add the virtual_dr6 bits for signals. */
1128	dr6 |= current->thread.virtual_dr6;
1129	if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1130		send_sigtrap(regs, 0, get_si_code(dr6));
1131
1132out_irq:
1133	local_irq_disable();
1134out:
1135	instrumentation_end();
1136	irqentry_exit_to_user_mode(regs);
 
 
 
 
 
 
 
 
 
1137}
1138
1139#ifdef CONFIG_X86_64
1140/* IST stack entry */
1141DEFINE_IDTENTRY_DEBUG(exc_debug)
1142{
1143	exc_debug_kernel(regs, debug_read_clear_dr6());
 
 
 
 
1144}
1145
1146/* User entry, runs on regular task stack */
1147DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1148{
1149	exc_debug_user(regs, debug_read_clear_dr6());
1150}
1151
1152#ifdef CONFIG_X86_FRED
1153/*
1154 * When occurred on different ring level, i.e., from user or kernel
1155 * context, #DB needs to be handled on different stack: User #DB on
1156 * current task stack, while kernel #DB on a dedicated stack.
1157 *
1158 * This is exactly how FRED event delivery invokes an exception
1159 * handler: ring 3 event on level 0 stack, i.e., current task stack;
1160 * ring 0 event on the #DB dedicated stack specified in the
1161 * IA32_FRED_STKLVLS MSR. So unlike IDT, the FRED debug exception
1162 * entry stub doesn't do stack switch.
1163 */
1164DEFINE_FREDENTRY_DEBUG(exc_debug)
1165{
1166	/*
1167	 * FRED #DB stores DR6 on the stack in the format which
1168	 * debug_read_clear_dr6() returns for the IDT entry points.
1169	 */
1170	unsigned long dr6 = fred_event_data(regs);
1171
1172	if (user_mode(regs))
1173		exc_debug_user(regs, dr6);
1174	else
1175		exc_debug_kernel(regs, dr6);
1176}
1177#endif /* CONFIG_X86_FRED */
1178
1179#else
1180/* 32 bit does not have separate entry points. */
1181DEFINE_IDTENTRY_RAW(exc_debug)
1182{
1183	unsigned long dr6 = debug_read_clear_dr6();
1184
1185	if (user_mode(regs))
1186		exc_debug_user(regs, dr6);
1187	else
1188		exc_debug_kernel(regs, dr6);
1189}
1190#endif
1191
1192/*
1193 * Note that we play around with the 'TS' bit in an attempt to get
1194 * the correct behaviour even in the presence of the asynchronous
1195 * IRQ13 behaviour
1196 */
1197static void math_error(struct pt_regs *regs, int trapnr)
1198{
1199	struct task_struct *task = current;
1200	struct fpu *fpu = &task->thread.fpu;
1201	int si_code;
1202	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1203						"simd exception";
1204
1205	cond_local_irq_enable(regs);
1206
1207	if (!user_mode(regs)) {
1208		if (fixup_exception(regs, trapnr, 0, 0))
1209			goto exit;
1210
1211		task->thread.error_code = 0;
1212		task->thread.trap_nr = trapnr;
1213
1214		if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1215			       SIGFPE) != NOTIFY_STOP)
1216			die(str, regs, 0);
1217		goto exit;
1218	}
1219
1220	/*
1221	 * Synchronize the FPU register state to the memory register state
1222	 * if necessary. This allows the exception handler to inspect it.
1223	 */
1224	fpu_sync_fpstate(fpu);
1225
1226	task->thread.trap_nr	= trapnr;
1227	task->thread.error_code = 0;
1228
1229	si_code = fpu__exception_code(fpu, trapnr);
1230	/* Retry when we get spurious exceptions: */
1231	if (!si_code)
1232		goto exit;
1233
1234	if (fixup_vdso_exception(regs, trapnr, 0, 0))
1235		goto exit;
1236
1237	force_sig_fault(SIGFPE, si_code,
1238			(void __user *)uprobe_get_trap_addr(regs));
1239exit:
1240	cond_local_irq_disable(regs);
1241}
1242
1243DEFINE_IDTENTRY(exc_coprocessor_error)
 
 
 
 
 
 
 
 
 
 
1244{
1245	math_error(regs, X86_TRAP_MF);
1246}
1247
1248DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1249{
1250	if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1251		/* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1252		if (!static_cpu_has(X86_FEATURE_XMM)) {
1253			__exc_general_protection(regs, 0);
 
 
 
 
1254			return;
1255		}
 
1256	}
1257	math_error(regs, X86_TRAP_XF);
1258}
1259
1260DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1261{
1262	/*
1263	 * This addresses a Pentium Pro Erratum:
1264	 *
1265	 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1266	 * Virtual Wire mode implemented through the local APIC, an
1267	 * interrupt vector of 0Fh (Intel reserved encoding) may be
1268	 * generated by the local APIC (Int 15).  This vector may be
1269	 * generated upon receipt of a spurious interrupt (an interrupt
1270	 * which is removed before the system receives the INTA sequence)
1271	 * instead of the programmed 8259 spurious interrupt vector.
1272	 *
1273	 * IMPLICATION: The spurious interrupt vector programmed in the
1274	 * 8259 is normally handled by an operating system's spurious
1275	 * interrupt handler. However, a vector of 0Fh is unknown to some
1276	 * operating systems, which would crash if this erratum occurred.
1277	 *
1278	 * In theory this could be limited to 32bit, but the handler is not
1279	 * hurting and who knows which other CPUs suffer from this.
1280	 */
1281}
1282
1283static bool handle_xfd_event(struct pt_regs *regs)
1284{
1285	u64 xfd_err;
1286	int err;
1287
1288	if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
1289		return false;
1290
1291	rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
1292	if (!xfd_err)
1293		return false;
1294
1295	wrmsrl(MSR_IA32_XFD_ERR, 0);
1296
1297	/* Die if that happens in kernel space */
1298	if (WARN_ON(!user_mode(regs)))
1299		return false;
1300
1301	local_irq_enable();
1302
1303	err = xfd_enable_feature(xfd_err);
1304
1305	switch (err) {
1306	case -EPERM:
1307		force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs));
1308		break;
1309	case -EFAULT:
1310		force_sig(SIGSEGV);
1311		break;
1312	}
1313
1314	local_irq_disable();
1315	return true;
1316}
 
1317
1318DEFINE_IDTENTRY(exc_device_not_available)
 
1319{
1320	unsigned long cr0 = read_cr0();
1321
1322	if (handle_xfd_event(regs))
1323		return;
1324
1325#ifdef CONFIG_MATH_EMULATION
1326	if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1327		struct math_emu_info info = { };
1328
1329		cond_local_irq_enable(regs);
1330
1331		info.regs = regs;
1332		math_emulate(&info);
1333
1334		cond_local_irq_disable(regs);
1335		return;
1336	}
1337#endif
1338
1339	/* This should not happen. */
1340	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1341		/* Try to fix it up and carry on. */
1342		write_cr0(cr0 & ~X86_CR0_TS);
1343	} else {
1344		/*
1345		 * Something terrible happened, and we're better off trying
1346		 * to kill the task than getting stuck in a never-ending
1347		 * loop of #NM faults.
1348		 */
1349		die("unexpected #NM exception", regs, 0);
1350	}
1351}
1352
1353#ifdef CONFIG_INTEL_TDX_GUEST
1354
1355#define VE_FAULT_STR "VE fault"
1356
1357static void ve_raise_fault(struct pt_regs *regs, long error_code,
1358			   unsigned long address)
1359{
1360	if (user_mode(regs)) {
1361		gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
1362		return;
1363	}
1364
1365	if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
1366				    VE_FAULT_STR, address)) {
 
 
 
 
1367		return;
1368	}
1369
1370	die_addr(VE_FAULT_STR, regs, error_code, address);
1371}
1372
1373/*
1374 * Virtualization Exceptions (#VE) are delivered to TDX guests due to
1375 * specific guest actions which may happen in either user space or the
1376 * kernel:
1377 *
1378 *  * Specific instructions (WBINVD, for example)
1379 *  * Specific MSR accesses
1380 *  * Specific CPUID leaf accesses
1381 *  * Access to specific guest physical addresses
1382 *
1383 * In the settings that Linux will run in, virtualization exceptions are
1384 * never generated on accesses to normal, TD-private memory that has been
1385 * accepted (by BIOS or with tdx_enc_status_changed()).
1386 *
1387 * Syscall entry code has a critical window where the kernel stack is not
1388 * yet set up. Any exception in this window leads to hard to debug issues
1389 * and can be exploited for privilege escalation. Exceptions in the NMI
1390 * entry code also cause issues. Returning from the exception handler with
1391 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack.
1392 *
1393 * For these reasons, the kernel avoids #VEs during the syscall gap and
1394 * the NMI entry code. Entry code paths do not access TD-shared memory,
1395 * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves
1396 * that might generate #VE. VMM can remove memory from TD at any point,
1397 * but access to unaccepted (or missing) private memory leads to VM
1398 * termination, not to #VE.
1399 *
1400 * Similarly to page faults and breakpoints, #VEs are allowed in NMI
1401 * handlers once the kernel is ready to deal with nested NMIs.
1402 *
1403 * During #VE delivery, all interrupts, including NMIs, are blocked until
1404 * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads
1405 * the VE info.
1406 *
1407 * If a guest kernel action which would normally cause a #VE occurs in
1408 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault
1409 * exception) is delivered to the guest which will result in an oops.
1410 *
1411 * The entry code has been audited carefully for following these expectations.
1412 * Changes in the entry code have to be audited for correctness vs. this
1413 * aspect. Similarly to #PF, #VE in these places will expose kernel to
1414 * privilege escalation or may lead to random crashes.
1415 */
1416DEFINE_IDTENTRY(exc_virtualization_exception)
1417{
1418	struct ve_info ve;
1419
1420	/*
1421	 * NMIs/Machine-checks/Interrupts will be in a disabled state
1422	 * till TDGETVEINFO TDCALL is executed. This ensures that VE
1423	 * info cannot be overwritten by a nested #VE.
1424	 */
1425	tdx_get_ve_info(&ve);
1426
1427	cond_local_irq_enable(regs);
1428
1429	/*
1430	 * If tdx_handle_virt_exception() could not process
1431	 * it successfully, treat it as #GP(0) and handle it.
1432	 */
1433	if (!tdx_handle_virt_exception(regs, &ve))
1434		ve_raise_fault(regs, 0, ve.gla);
1435
1436	cond_local_irq_disable(regs);
1437}
1438
1439#endif
1440
1441#ifdef CONFIG_X86_32
1442DEFINE_IDTENTRY_SW(iret_error)
1443{
1444	local_irq_enable();
1445	if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1446			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1447		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1448			ILL_BADSTK, (void __user *)NULL);
1449	}
1450	local_irq_disable();
1451}
1452#endif
1453
1454void __init trap_init(void)
1455{
1456	/* Init cpu_entry_area before IST entries are set up */
1457	setup_cpu_entry_areas();
1458
1459	/* Init GHCB memory pages when running as an SEV-ES guest */
1460	sev_es_init_vc_handling();
1461
1462	/* Initialize TSS before setting up traps so ISTs work */
1463	cpu_init_exception_handling(true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464
1465	/* Setup traps as cpu_init() might #GP */
1466	if (!cpu_feature_enabled(X86_FEATURE_FRED))
1467		idt_setup_traps();
 
1468
 
 
 
1469	cpu_init();
 
 
1470}
v3.1
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 */
  8
  9/*
 10 * Handle hardware traps and faults.
 11 */
 
 
 
 
 12#include <linux/interrupt.h>
 13#include <linux/kallsyms.h>
 
 14#include <linux/spinlock.h>
 15#include <linux/kprobes.h>
 16#include <linux/uaccess.h>
 17#include <linux/kdebug.h>
 18#include <linux/kgdb.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/ptrace.h>
 
 22#include <linux/string.h>
 23#include <linux/delay.h>
 24#include <linux/errno.h>
 25#include <linux/kexec.h>
 26#include <linux/sched.h>
 
 27#include <linux/timer.h>
 28#include <linux/init.h>
 29#include <linux/bug.h>
 30#include <linux/nmi.h>
 31#include <linux/mm.h>
 32#include <linux/smp.h>
 
 33#include <linux/io.h>
 
 
 
 
 34
 35#ifdef CONFIG_EISA
 36#include <linux/ioport.h>
 37#include <linux/eisa.h>
 38#endif
 39
 40#ifdef CONFIG_MCA
 41#include <linux/mca.h>
 42#endif
 43
 44#if defined(CONFIG_EDAC)
 45#include <linux/edac.h>
 46#endif
 47
 48#include <asm/kmemcheck.h>
 49#include <asm/stacktrace.h>
 50#include <asm/processor.h>
 51#include <asm/debugreg.h>
 52#include <linux/atomic.h>
 53#include <asm/system.h>
 
 54#include <asm/traps.h>
 55#include <asm/desc.h>
 56#include <asm/i387.h>
 
 
 
 57#include <asm/mce.h>
 58
 59#include <asm/mach_traps.h>
 
 
 
 
 
 
 
 
 
 60
 61#ifdef CONFIG_X86_64
 62#include <asm/x86_init.h>
 63#include <asm/pgalloc.h>
 64#include <asm/proto.h>
 65#else
 66#include <asm/processor-flags.h>
 67#include <asm/setup.h>
 
 68
 69asmlinkage int system_call(void);
 70
 71/* Do we ignore FPU interrupts ? */
 72char ignore_fpu_irq;
 73
 74/*
 75 * The IDT has to be page-aligned to simplify the Pentium
 76 * F0 0F bug workaround.
 77 */
 78gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
 79#endif
 80
 81DECLARE_BITMAP(used_vectors, NR_VECTORS);
 82EXPORT_SYMBOL_GPL(used_vectors);
 83
 84static int ignore_nmis;
 
 
 85
 86int unknown_nmi_panic;
 87/*
 88 * Prevent NMI reason port (0x61) being accessed simultaneously, can
 89 * only be used in NMI handler.
 90 */
 91static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
 
 
 92
 93static inline void conditional_sti(struct pt_regs *regs)
 94{
 95	if (regs->flags & X86_EFLAGS_IF)
 96		local_irq_enable();
 97}
 98
 99static inline void preempt_conditional_sti(struct pt_regs *regs)
100{
101	inc_preempt_count();
102	if (regs->flags & X86_EFLAGS_IF)
103		local_irq_enable();
104}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106static inline void conditional_cli(struct pt_regs *regs)
107{
108	if (regs->flags & X86_EFLAGS_IF)
109		local_irq_disable();
110}
111
112static inline void preempt_conditional_cli(struct pt_regs *regs)
113{
114	if (regs->flags & X86_EFLAGS_IF)
115		local_irq_disable();
116	dec_preempt_count();
117}
118
119static void __kprobes
120do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
121	long error_code, siginfo_t *info)
122{
123	struct task_struct *tsk = current;
124
125#ifdef CONFIG_X86_32
126	if (regs->flags & X86_VM_MASK) {
127		/*
128		 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
129		 * On nmi (interrupt 2), do_trap should not be called.
130		 */
131		if (trapnr < 6)
132			goto vm86_trap;
133		goto trap_signal;
 
 
 
 
 
 
 
 
 
 
 
 
134	}
135#endif
136
137	if (!user_mode(regs))
138		goto kernel_trap;
139
140#ifdef CONFIG_X86_32
141trap_signal:
142#endif
143	/*
144	 * We want error_code and trap_no set for userspace faults and
145	 * kernelspace faults which result in die(), but not
146	 * kernelspace faults which are fixed up.  die() gives the
147	 * process no chance to handle the signal and notice the
148	 * kernel fault information, so that won't result in polluting
149	 * the information about previously queued, but not yet
150	 * delivered, faults.  See also do_general_protection below.
151	 */
152	tsk->thread.error_code = error_code;
153	tsk->thread.trap_no = trapnr;
154
155#ifdef CONFIG_X86_64
 
 
 
 
 
 
156	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
157	    printk_ratelimit()) {
158		printk(KERN_INFO
159		       "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
160		       tsk->comm, tsk->pid, str,
161		       regs->ip, regs->sp, error_code);
162		print_vma_addr(" in ", regs->ip);
163		printk("\n");
164	}
165#endif
 
 
 
 
 
 
 
 
 
 
 
166
167	if (info)
168		force_sig_info(signr, info, tsk);
169	else
170		force_sig(signr, tsk);
171	return;
 
 
 
 
 
 
172
173kernel_trap:
174	if (!fixup_exception(regs)) {
175		tsk->thread.error_code = error_code;
176		tsk->thread.trap_no = trapnr;
177		die(str, regs, error_code);
178	}
179	return;
180
181#ifdef CONFIG_X86_32
182vm86_trap:
183	if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
184						error_code, trapnr))
185		goto trap_signal;
186	return;
187#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188}
189
190#define DO_ERROR(trapnr, signr, str, name)				\
191dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
192{									\
193	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
194							== NOTIFY_STOP)	\
195		return;							\
196	conditional_sti(regs);						\
197	do_trap(trapnr, signr, str, regs, error_code, NULL);		\
198}
199
200#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr)		\
201dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
202{									\
203	siginfo_t info;							\
204	info.si_signo = signr;						\
205	info.si_errno = 0;						\
206	info.si_code = sicode;						\
207	info.si_addr = (void __user *)siaddr;				\
208	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
209							== NOTIFY_STOP)	\
210		return;							\
211	conditional_sti(regs);						\
212	do_trap(trapnr, signr, str, regs, error_code, &info);		\
213}
214
215DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
216DO_ERROR(4, SIGSEGV, "overflow", overflow)
217DO_ERROR(5, SIGSEGV, "bounds", bounds)
218DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
219DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
220DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
221DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
222#ifdef CONFIG_X86_32
223DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
224#endif
225DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
226
227#ifdef CONFIG_X86_64
228/* Runs on IST stack */
229dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
230{
231	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
232			12, SIGBUS) == NOTIFY_STOP)
233		return;
234	preempt_conditional_sti(regs);
235	do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
236	preempt_conditional_cli(regs);
237}
238
239dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
240{
241	static const char str[] = "double fault";
242	struct task_struct *tsk = current;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
244	/* Return not checked because double check cannot be ignored */
245	notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
246
247	tsk->thread.error_code = error_code;
248	tsk->thread.trap_no = 8;
 
249
250	/*
251	 * This is always a kernel trap and never fixable (and thus must
252	 * never return).
 
253	 */
254	for (;;)
255		die(str, regs, error_code);
 
 
 
 
 
 
256}
257#endif
258
259dotraplinkage void __kprobes
260do_general_protection(struct pt_regs *regs, long error_code)
261{
262	struct task_struct *tsk;
 
 
263
264	conditional_sti(regs);
 
 
 
 
265
266#ifdef CONFIG_X86_32
267	if (regs->flags & X86_VM_MASK)
268		goto gp_in_vm86;
269#endif
 
270
271	tsk = current;
272	if (!user_mode(regs))
273		goto gp_in_kernel;
 
 
274
275	tsk->thread.error_code = error_code;
276	tsk->thread.trap_no = 13;
 
277
278	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
279			printk_ratelimit()) {
280		printk(KERN_INFO
281			"%s[%d] general protection ip:%lx sp:%lx error:%lx",
282			tsk->comm, task_pid_nr(tsk),
283			regs->ip, regs->sp, error_code);
284		print_vma_addr(" in ", regs->ip);
285		printk("\n");
286	}
287
288	force_sig(SIGSEGV, tsk);
289	return;
290
291#ifdef CONFIG_X86_32
292gp_in_vm86:
293	local_irq_enable();
294	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
295	return;
296#endif
297
298gp_in_kernel:
299	if (fixup_exception(regs))
300		return;
 
 
301
302	tsk->thread.error_code = error_code;
303	tsk->thread.trap_no = 13;
304	if (notify_die(DIE_GPF, "general protection fault", regs,
305				error_code, 13, SIGSEGV) == NOTIFY_STOP)
306		return;
307	die("general protection fault", regs, error_code);
308}
309
310static int __init setup_unknown_nmi_panic(char *str)
 
 
 
311{
312	unknown_nmi_panic = 1;
313	return 1;
 
 
 
 
 
 
 
314}
315__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
316
317static notrace __kprobes void
318pci_serr_error(unsigned char reason, struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319{
320	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
321		 reason, smp_processor_id());
 
 
 
 
 
 
 
 
322
323	/*
324	 * On some machines, PCI SERR line is used to report memory
325	 * errors. EDAC makes use of it.
 
 
 
 
 
 
 
 
 
 
326	 */
327#if defined(CONFIG_EDAC)
328	if (edac_handler_set()) {
329		edac_atomic_assert_error();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330		return;
331	}
332#endif
333
334	if (panic_on_unrecovered_nmi)
335		panic("NMI: Not continuing");
 
 
 
 
336
337	pr_emerg("Dazed and confused, but trying to continue\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
339	/* Clear and disable the PCI SERR error line. */
340	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
341	outb(reason, NMI_REASON_PORT);
 
342}
343
344static notrace __kprobes void
345io_check_error(unsigned char reason, struct pt_regs *regs)
346{
347	unsigned long i;
 
 
 
348
349	pr_emerg(
350	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
351		 reason, smp_processor_id());
352	show_registers(regs);
 
 
 
353
354	if (panic_on_io_nmi)
355		panic("NMI IOCK error: Not continuing");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356
357	/* Re-enable the IOCK line, wait for a few seconds */
358	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
359	outb(reason, NMI_REASON_PORT);
360
361	i = 20000;
362	while (--i) {
363		touch_nmi_watchdog();
364		udelay(100);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365	}
366
367	reason &= ~NMI_REASON_CLEAR_IOCHK;
368	outb(reason, NMI_REASON_PORT);
369}
370
371static notrace __kprobes void
372unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
 
 
 
 
373{
374	if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
375			NOTIFY_STOP)
376		return;
377#ifdef CONFIG_MCA
378	/*
379	 * Might actually be able to figure out what the guilty party
380	 * is:
 
 
 
381	 */
382	if (MCA_bus) {
383		mca_handle_nmi();
384		return;
385	}
386#endif
387	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
388		 reason, smp_processor_id());
389
390	pr_emerg("Do you have a strange power saving mode enabled?\n");
391	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
392		panic("NMI: Not continuing");
 
 
 
393
394	pr_emerg("Dazed and confused, but trying to continue\n");
395}
 
 
 
 
396
397static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
398{
399	unsigned char reason = 0;
400
401	/*
402	 * CPU-specific NMI must be processed before non-CPU-specific
403	 * NMI, otherwise we may lose it, because the CPU-specific
404	 * NMI can not be detected/processed on other CPUs.
405	 */
406	if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
407		return;
408
409	/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
410	raw_spin_lock(&nmi_reason_lock);
411	reason = get_nmi_reason();
412
413	if (reason & NMI_REASON_MASK) {
414		if (reason & NMI_REASON_SERR)
415			pci_serr_error(reason, regs);
416		else if (reason & NMI_REASON_IOCHK)
417			io_check_error(reason, regs);
418#ifdef CONFIG_X86_32
419		/*
420		 * Reassert NMI in case it became active
421		 * meanwhile as it's edge-triggered:
422		 */
423		reassert_nmi();
424#endif
425		raw_spin_unlock(&nmi_reason_lock);
426		return;
427	}
428	raw_spin_unlock(&nmi_reason_lock);
429
430	unknown_nmi_error(reason, regs);
431}
432
433dotraplinkage notrace __kprobes void
434do_nmi(struct pt_regs *regs, long error_code)
 
435{
436	nmi_enter();
 
437
438	inc_irq_stat(__nmi_count);
 
439
440	if (!ignore_nmis)
441		default_do_nmi(regs);
 
 
 
 
 
442
443	nmi_exit();
444}
445
446void stop_nmi(void)
 
447{
448	ignore_nmis++;
 
 
 
449}
450
451void restart_nmi(void)
452{
453	ignore_nmis--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454}
455
456/* May run on IST stack. */
457dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
458{
 
 
459#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
460	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
461			== NOTIFY_STOP)
462		return;
463#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
464#ifdef CONFIG_KPROBES
465	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
466			== NOTIFY_STOP)
 
 
 
 
 
 
 
 
 
 
467		return;
468#else
469	if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
470			== NOTIFY_STOP)
 
 
 
 
 
 
 
 
 
 
 
471		return;
472#endif
473
474	preempt_conditional_sti(regs);
475	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
476	preempt_conditional_cli(regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477}
478
479#ifdef CONFIG_X86_64
480/*
481 * Help handler running on IST stack to switch back to user stack
482 * for scheduling or signal handling. The actual stack switch is done in
483 * entry.S
484 */
485asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
486{
487	struct pt_regs *regs = eregs;
488	/* Did already sync */
489	if (eregs == (struct pt_regs *)eregs->sp)
490		;
491	/* Exception from user space */
492	else if (user_mode(eregs))
493		regs = task_pt_regs(current);
494	/*
495	 * Exception from kernel and interrupts are enabled. Move to
496	 * kernel process stack.
497	 */
498	else if (eregs->flags & X86_EFLAGS_IF)
499		regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
500	if (eregs != regs)
501		*regs = *eregs;
502	return regs;
503}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504#endif
505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506/*
507 * Our handling of the processor debug registers is non-trivial.
508 * We do not clear them on entry and exit from the kernel. Therefore
509 * it is possible to get a watchpoint trap here from inside the kernel.
510 * However, the code in ./ptrace.c has ensured that the user can
511 * only set watchpoints on userspace addresses. Therefore the in-kernel
512 * watchpoint trap can only occur in code which is reading/writing
513 * from user space. Such code must not hold kernel locks (since it
514 * can equally take a page fault), therefore it is safe to call
515 * force_sig_info even though that claims and releases locks.
516 *
517 * Code in ./signal.c ensures that the debug control register
518 * is restored before we deliver any signal, and therefore that
519 * user code runs with the correct debug control register even though
520 * we clear it here.
521 *
522 * Being careful here means that we don't have to be as careful in a
523 * lot of more complicated places (task switching can be a bit lazy
524 * about restoring all the debug state, and ptrace doesn't have to
525 * find every occurrence of the TF bit that could be saved away even
526 * by user code)
527 *
528 * May run on IST stack.
529 */
530dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
 
531{
532	struct task_struct *tsk = current;
533	int user_icebp = 0;
534	unsigned long dr6;
535	int si_code;
 
 
 
 
 
536
537	get_debugreg(dr6, 6);
 
538
539	/* Filter out all the reserved bits which are preset to 1 */
540	dr6 &= ~DR6_RESERVED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541
542	/*
543	 * If dr6 has no reason to give us about the origin of this trap,
544	 * then it's very likely the result of an icebp/int01 trap.
545	 * User wants a sigtrap for that.
546	 */
547	if (!dr6 && user_mode(regs))
548		user_icebp = 1;
549
550	/* Catch kmemcheck conditions first of all! */
551	if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
552		return;
 
 
 
 
 
553
554	/* DR6 may or may not be cleared by the CPU */
555	set_debugreg(0, 6);
 
 
556
557	/*
558	 * The processor cleared BTF, so don't mark that we need it set.
 
559	 */
560	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
 
 
561
562	/* Store the virtualized DR6 value */
563	tsk->thread.debugreg6 = dr6;
 
 
 
564
565	if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
566							SIGTRAP) == NOTIFY_STOP)
567		return;
568
569	/* It's safe to allow irq's after DR6 has been saved */
570	preempt_conditional_sti(regs);
571
572	if (regs->flags & X86_VM_MASK) {
573		handle_vm86_trap((struct kernel_vm86_regs *) regs,
574				error_code, 1);
575		preempt_conditional_cli(regs);
576		return;
577	}
578
579	/*
580	 * Single-stepping through system calls: ignore any exceptions in
581	 * kernel space, but re-enable TF when returning to user mode.
 
 
 
 
582	 *
583	 * We already checked v86 mode above, so we can check for kernel mode
584	 * by just checking the CPL of CS.
585	 */
586	if ((dr6 & DR_STEP) && !user_mode(regs)) {
587		tsk->thread.debugreg6 &= ~DR_STEP;
588		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
589		regs->flags &= ~X86_EFLAGS_TF;
590	}
591	si_code = get_si_code(tsk->thread.debugreg6);
592	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
593		send_sigtrap(tsk, regs, error_code, si_code);
594	preempt_conditional_cli(regs);
595
596	return;
597}
598
599/*
600 * Note that we play around with the 'TS' bit in an attempt to get
601 * the correct behaviour even in the presence of the asynchronous
602 * IRQ13 behaviour
603 */
604void math_error(struct pt_regs *regs, int error_code, int trapnr)
605{
606	struct task_struct *task = current;
607	siginfo_t info;
608	unsigned short err;
609	char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610
611	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
612		return;
613	conditional_sti(regs);
 
 
 
 
 
614
615	if (!user_mode_vm(regs))
616	{
617		if (!fixup_exception(regs)) {
618			task->thread.error_code = error_code;
619			task->thread.trap_no = trapnr;
620			die(str, regs, error_code);
621		}
622		return;
623	}
624
625	/*
626	 * Save the info for the exception handler and clear the error.
 
 
627	 */
628	save_init_fpu(task);
629	task->thread.trap_no = trapnr;
630	task->thread.error_code = error_code;
631	info.si_signo = SIGFPE;
632	info.si_errno = 0;
633	info.si_addr = (void __user *)regs->ip;
634	if (trapnr == 16) {
635		unsigned short cwd, swd;
636		/*
637		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
638		 * status.  0x3f is the exception bits in these regs, 0x200 is the
639		 * C1 reg you need in case of a stack fault, 0x040 is the stack
640		 * fault bit.  We should only be taking one exception at a time,
641		 * so if this combination doesn't produce any single exception,
642		 * then we have a bad program that isn't synchronizing its FPU usage
643		 * and it will suffer the consequences since we won't be able to
644		 * fully reproduce the context of the exception
645		 */
646		cwd = get_fpu_cwd(task);
647		swd = get_fpu_swd(task);
648
649		err = swd & ~cwd;
650	} else {
651		/*
652		 * The SIMD FPU exceptions are handled a little differently, as there
653		 * is only a single status/control register.  Thus, to determine which
654		 * unmasked exception was caught we must mask the exception mask bits
655		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
656		 */
657		unsigned short mxcsr = get_fpu_mxcsr(task);
658		err = ~(mxcsr >> 7) & mxcsr;
659	}
660
661	if (err & 0x001) {	/* Invalid op */
662		/*
663		 * swd & 0x240 == 0x040: Stack Underflow
664		 * swd & 0x240 == 0x240: Stack Overflow
665		 * User must clear the SF bit (0x40) if set
666		 */
667		info.si_code = FPE_FLTINV;
668	} else if (err & 0x004) { /* Divide by Zero */
669		info.si_code = FPE_FLTDIV;
670	} else if (err & 0x008) { /* Overflow */
671		info.si_code = FPE_FLTOVF;
672	} else if (err & 0x012) { /* Denormal, Underflow */
673		info.si_code = FPE_FLTUND;
674	} else if (err & 0x020) { /* Precision */
675		info.si_code = FPE_FLTRES;
676	} else {
677		/*
678		 * If we're using IRQ 13, or supposedly even some trap 16
679		 * implementations, it's possible we get a spurious trap...
680		 */
681		return;		/* Spurious trap, no error */
682	}
683	force_sig_info(SIGFPE, &info, task);
684}
685
686dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
 
 
687{
688#ifdef CONFIG_X86_32
689	ignore_fpu_irq = 1;
690#endif
691
692	math_error(regs, error_code, 16);
693}
694
695dotraplinkage void
696do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
697{
698	math_error(regs, error_code, 19);
699}
700
701dotraplinkage void
702do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
 
703{
704	conditional_sti(regs);
705#if 0
706	/* No need to warn about this any longer. */
707	printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
708#endif
 
 
 
 
 
709}
 
710
711asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
 
 
712{
713}
714
715asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
716{
 
 
717}
 
718
719/*
720 * __math_state_restore assumes that cr0.TS is already clear and the
721 * fpu state is all ready for use.  Used during context switch.
 
722 */
723void __math_state_restore(void)
724{
725	struct thread_info *thread = current_thread_info();
726	struct task_struct *tsk = thread->task;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
727
728	/*
729	 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
 
730	 */
731	if (unlikely(restore_fpu_checking(tsk))) {
732		stts();
733		force_sig(SIGSEGV, tsk);
734		return;
735	}
 
 
 
 
 
 
 
736
737	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
738	tsk->fpu_counter++;
 
 
739}
740
741/*
742 * 'math_state_restore()' saves the current math information in the
743 * old math state array, and gets the new ones from the current task
744 *
745 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
746 * Don't touch unless you *really* know how it works.
747 *
748 * Must be called with kernel preemption disabled (in this case,
749 * local interrupts are disabled at the call-site in entry.S).
750 */
751asmlinkage void math_state_restore(void)
752{
753	struct thread_info *thread = current_thread_info();
754	struct task_struct *tsk = thread->task;
755
756	if (!tsk_used_math(tsk)) {
757		local_irq_enable();
758		/*
759		 * does a slab alloc which can sleep
760		 */
761		if (init_fpu(tsk)) {
762			/*
763			 * ran out of memory!
764			 */
765			do_group_exit(SIGKILL);
766			return;
767		}
768		local_irq_disable();
769	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
770
771	clts();				/* Allow maths ops (or we recurse) */
 
 
772
773	__math_state_restore();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774}
775EXPORT_SYMBOL_GPL(math_state_restore);
776
777dotraplinkage void __kprobes
778do_device_not_available(struct pt_regs *regs, long error_code)
779{
 
 
 
 
 
780#ifdef CONFIG_MATH_EMULATION
781	if (read_cr0() & X86_CR0_EM) {
782		struct math_emu_info info = { };
783
784		conditional_sti(regs);
785
786		info.regs = regs;
787		math_emulate(&info);
 
 
788		return;
789	}
790#endif
791	math_state_restore(); /* interrupts still off */
792#ifdef CONFIG_X86_32
793	conditional_sti(regs);
794#endif
 
 
 
 
 
 
 
 
 
795}
796
797#ifdef CONFIG_X86_32
798dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 
 
 
799{
800	siginfo_t info;
801	local_irq_enable();
 
 
802
803	info.si_signo = SIGILL;
804	info.si_errno = 0;
805	info.si_code = ILL_BADSTK;
806	info.si_addr = NULL;
807	if (notify_die(DIE_TRAP, "iret exception",
808			regs, error_code, 32, SIGILL) == NOTIFY_STOP)
809		return;
810	do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
 
 
811}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
812#endif
813
814/* Set of traps needed for early debugging. */
815void __init early_trap_init(void)
816{
817	set_intr_gate_ist(1, &debug, DEBUG_STACK);
818	/* int3 can be called from all */
819	set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
820	set_intr_gate(14, &page_fault);
821	load_idt(&idt_descr);
 
 
822}
 
823
824void __init trap_init(void)
825{
826	int i;
 
827
828#ifdef CONFIG_EISA
829	void __iomem *p = early_ioremap(0x0FFFD9, 4);
830
831	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
832		EISA_bus = 1;
833	early_iounmap(p, 4);
834#endif
835
836	set_intr_gate(0, &divide_error);
837	set_intr_gate_ist(2, &nmi, NMI_STACK);
838	/* int4 can be called from all */
839	set_system_intr_gate(4, &overflow);
840	set_intr_gate(5, &bounds);
841	set_intr_gate(6, &invalid_op);
842	set_intr_gate(7, &device_not_available);
843#ifdef CONFIG_X86_32
844	set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
845#else
846	set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
847#endif
848	set_intr_gate(9, &coprocessor_segment_overrun);
849	set_intr_gate(10, &invalid_TSS);
850	set_intr_gate(11, &segment_not_present);
851	set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
852	set_intr_gate(13, &general_protection);
853	set_intr_gate(15, &spurious_interrupt_bug);
854	set_intr_gate(16, &coprocessor_error);
855	set_intr_gate(17, &alignment_check);
856#ifdef CONFIG_X86_MCE
857	set_intr_gate_ist(18, &machine_check, MCE_STACK);
858#endif
859	set_intr_gate(19, &simd_coprocessor_error);
860
861	/* Reserve all the builtin and the syscall vector: */
862	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
863		set_bit(i, used_vectors);
864
865#ifdef CONFIG_IA32_EMULATION
866	set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
867	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
868#endif
869
870#ifdef CONFIG_X86_32
871	set_system_trap_gate(SYSCALL_VECTOR, &system_call);
872	set_bit(SYSCALL_VECTOR, used_vectors);
873#endif
874
875	/*
876	 * Should be a barrier for any external CPU state:
877	 */
878	cpu_init();
879
880	x86_init.irqs.trap_init();
881}