Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *	Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
 
  18#include <linux/spinlock.h>
  19#include <linux/kprobes.h>
  20#include <linux/uaccess.h>
  21#include <linux/kdebug.h>
  22#include <linux/kgdb.h>
  23#include <linux/kernel.h>
  24#include <linux/export.h>
  25#include <linux/ptrace.h>
  26#include <linux/uprobes.h>
  27#include <linux/string.h>
  28#include <linux/delay.h>
  29#include <linux/errno.h>
  30#include <linux/kexec.h>
  31#include <linux/sched.h>
 
  32#include <linux/timer.h>
  33#include <linux/init.h>
  34#include <linux/bug.h>
  35#include <linux/nmi.h>
  36#include <linux/mm.h>
  37#include <linux/smp.h>
 
  38#include <linux/io.h>
 
 
 
  39
  40#ifdef CONFIG_EISA
  41#include <linux/ioport.h>
  42#include <linux/eisa.h>
  43#endif
  44
  45#if defined(CONFIG_EDAC)
  46#include <linux/edac.h>
  47#endif
  48
  49#include <asm/kmemcheck.h>
  50#include <asm/stacktrace.h>
  51#include <asm/processor.h>
  52#include <asm/debugreg.h>
  53#include <linux/atomic.h>
  54#include <asm/text-patching.h>
  55#include <asm/ftrace.h>
  56#include <asm/traps.h>
  57#include <asm/desc.h>
  58#include <asm/fpu/internal.h>
 
 
  59#include <asm/mce.h>
  60#include <asm/fixmap.h>
  61#include <asm/mach_traps.h>
  62#include <asm/alternative.h>
  63#include <asm/fpu/xstate.h>
  64#include <asm/trace/mpx.h>
  65#include <asm/mpx.h>
  66#include <asm/vm86.h>
 
 
 
 
 
 
  67
  68#ifdef CONFIG_X86_64
  69#include <asm/x86_init.h>
  70#include <asm/pgalloc.h>
  71#include <asm/proto.h>
  72
  73/* No need to be aligned, but done to keep all IDTs defined the same way. */
  74gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
  75#else
  76#include <asm/processor-flags.h>
  77#include <asm/setup.h>
  78#include <asm/proto.h>
  79#endif
  80
  81/* Must be page-aligned because the real IDT is used in a fixmap. */
  82gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
  83
  84DECLARE_BITMAP(used_vectors, NR_VECTORS);
  85EXPORT_SYMBOL_GPL(used_vectors);
  86
  87static inline void cond_local_irq_enable(struct pt_regs *regs)
  88{
  89	if (regs->flags & X86_EFLAGS_IF)
  90		local_irq_enable();
  91}
  92
  93static inline void cond_local_irq_disable(struct pt_regs *regs)
  94{
  95	if (regs->flags & X86_EFLAGS_IF)
  96		local_irq_disable();
  97}
  98
  99/*
 100 * In IST context, we explicitly disable preemption.  This serves two
 101 * purposes: it makes it much less likely that we would accidentally
 102 * schedule in IST context and it will force a warning if we somehow
 103 * manage to schedule by accident.
 104 */
 105void ist_enter(struct pt_regs *regs)
 106{
 107	if (user_mode(regs)) {
 108		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 109	} else {
 110		/*
 111		 * We might have interrupted pretty much anything.  In
 112		 * fact, if we're a machine check, we can even interrupt
 113		 * NMI processing.  We don't want in_nmi() to return true,
 114		 * but we need to notify RCU.
 115		 */
 116		rcu_nmi_enter();
 117	}
 118
 119	preempt_disable();
 120
 121	/* This code is a bit fragile.  Test it. */
 122	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
 123}
 124
 125void ist_exit(struct pt_regs *regs)
 126{
 127	preempt_enable_no_resched();
 128
 129	if (!user_mode(regs))
 130		rcu_nmi_exit();
 131}
 132
 133/**
 134 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
 135 * @regs:	regs passed to the IST exception handler
 136 *
 137 * IST exception handlers normally cannot schedule.  As a special
 138 * exception, if the exception interrupted userspace code (i.e.
 139 * user_mode(regs) would return true) and the exception was not
 140 * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
 141 * begins a non-atomic section within an ist_enter()/ist_exit() region.
 142 * Callers are responsible for enabling interrupts themselves inside
 143 * the non-atomic section, and callers must call ist_end_non_atomic()
 144 * before ist_exit().
 145 */
 146void ist_begin_non_atomic(struct pt_regs *regs)
 147{
 148	BUG_ON(!user_mode(regs));
 
 149
 150	/*
 151	 * Sanity check: we need to be on the normal thread stack.  This
 152	 * will catch asm bugs and any attempt to use ist_preempt_enable
 153	 * from double_fault.
 154	 */
 155	BUG_ON((unsigned long)(current_top_of_stack() -
 156			       current_stack_pointer()) >= THREAD_SIZE);
 157
 158	preempt_enable_no_resched();
 159}
 160
 161/**
 162 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
 163 *
 164 * Ends a non-atomic section started with ist_begin_non_atomic().
 165 */
 166void ist_end_non_atomic(void)
 167{
 168	preempt_disable();
 169}
 170
 171static nokprobe_inline int
 172do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
 173		  struct pt_regs *regs,	long error_code)
 174{
 175	if (v8086_mode(regs)) {
 176		/*
 177		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 178		 * On nmi (interrupt 2), do_trap should not be called.
 179		 */
 180		if (trapnr < X86_TRAP_UD) {
 181			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 182						error_code, trapnr))
 183				return 0;
 184		}
 185		return -1;
 186	}
 
 187
 188	if (!user_mode(regs)) {
 189		if (!fixup_exception(regs, trapnr)) {
 190			tsk->thread.error_code = error_code;
 191			tsk->thread.trap_nr = trapnr;
 192			die(str, regs, error_code);
 193		}
 194		return 0;
 195	}
 196
 197	return -1;
 198}
 199
 200static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
 201				siginfo_t *info)
 202{
 203	unsigned long siaddr;
 204	int sicode;
 205
 206	switch (trapnr) {
 207	default:
 208		return SEND_SIG_PRIV;
 209
 210	case X86_TRAP_DE:
 211		sicode = FPE_INTDIV;
 212		siaddr = uprobe_get_trap_addr(regs);
 213		break;
 214	case X86_TRAP_UD:
 215		sicode = ILL_ILLOPN;
 216		siaddr = uprobe_get_trap_addr(regs);
 217		break;
 218	case X86_TRAP_AC:
 219		sicode = BUS_ADRALN;
 220		siaddr = 0;
 221		break;
 222	}
 223
 224	info->si_signo = signr;
 225	info->si_errno = 0;
 226	info->si_code = sicode;
 227	info->si_addr = (void __user *)siaddr;
 228	return info;
 229}
 230
 231static void
 232do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 233	long error_code, siginfo_t *info)
 234{
 235	struct task_struct *tsk = current;
 236
 237
 238	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 239		return;
 240	/*
 241	 * We want error_code and trap_nr set for userspace faults and
 242	 * kernelspace faults which result in die(), but not
 243	 * kernelspace faults which are fixed up.  die() gives the
 244	 * process no chance to handle the signal and notice the
 245	 * kernel fault information, so that won't result in polluting
 246	 * the information about previously queued, but not yet
 247	 * delivered, faults.  See also do_general_protection below.
 248	 */
 249	tsk->thread.error_code = error_code;
 250	tsk->thread.trap_nr = trapnr;
 251
 
 
 
 
 
 
 
 252	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 253	    printk_ratelimit()) {
 254		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
 255			tsk->comm, tsk->pid, str,
 256			regs->ip, regs->sp, error_code);
 257		print_vma_addr(" in ", regs->ip);
 258		pr_cont("\n");
 259	}
 
 
 
 
 
 
 
 260
 261	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
 
 
 
 
 
 
 
 
 262}
 263NOKPROBE_SYMBOL(do_trap);
 264
 265static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 266			  unsigned long trapnr, int signr)
 267{
 268	siginfo_t info;
 269
 270	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 271
 272	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 273			NOTIFY_STOP) {
 274		cond_local_irq_enable(regs);
 275		do_trap(trapnr, signr, str, regs, error_code,
 276			fill_trap_info(regs, signr, trapnr, &info));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 277	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 278}
 279
 280#define DO_ERROR(trapnr, signr, str, name)				\
 281dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
 282{									\
 283	do_error_trap(regs, error_code, str, trapnr, signr);		\
 284}
 285
 286DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",		divide_error)
 287DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",			overflow)
 288DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op)
 289DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
 290DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",		invalid_TSS)
 291DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",	segment_not_present)
 292DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment)
 293DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294
 295#ifdef CONFIG_VMAP_STACK
 296__visible void __noreturn handle_stack_overflow(const char *message,
 297						struct pt_regs *regs,
 298						unsigned long fault_address)
 299{
 300	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
 301		 (void *)fault_address, current->stack,
 302		 (char *)current->stack + THREAD_SIZE - 1);
 303	die(message, regs, 0);
 
 
 304
 305	/* Be absolutely certain we don't return. */
 306	panic(message);
 307}
 308#endif
 309
 310#ifdef CONFIG_X86_64
 311/* Runs on IST stack */
 312dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313{
 314	static const char str[] = "double fault";
 315	struct task_struct *tsk = current;
 
 316#ifdef CONFIG_VMAP_STACK
 317	unsigned long cr2;
 
 318#endif
 319
 320#ifdef CONFIG_X86_ESPFIX64
 321	extern unsigned char native_irq_return_iret[];
 322
 323	/*
 324	 * If IRET takes a non-IST fault on the espfix64 stack, then we
 325	 * end up promoting it to a doublefault.  In that case, modify
 326	 * the stack to make it look like we just entered the #GP
 327	 * handler from user space, similar to bad_iret.
 
 
 
 
 
 
 328	 *
 329	 * No need for ist_enter here because we don't use RCU.
 330	 */
 331	if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
 332		regs->cs == __KERNEL_CS &&
 333		regs->ip == (unsigned long)native_irq_return_iret)
 334	{
 335		struct pt_regs *normal_regs = task_pt_regs(current);
 
 336
 337		/* Fake a #GP(0) from userspace. */
 338		memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
 339		normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
 340		regs->ip = (unsigned long)general_protection;
 341		regs->sp = (unsigned long)&normal_regs->orig_ax;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342
 343		return;
 344	}
 345#endif
 346
 347	ist_enter(regs);
 
 348	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 349
 350	tsk->thread.error_code = error_code;
 351	tsk->thread.trap_nr = X86_TRAP_DF;
 352
 353#ifdef CONFIG_VMAP_STACK
 354	/*
 355	 * If we overflow the stack into a guard page, the CPU will fail
 356	 * to deliver #PF and will send #DF instead.  Similarly, if we
 357	 * take any non-IST exception while too close to the bottom of
 358	 * the stack, the processor will get a page fault while
 359	 * delivering the exception and will generate a double fault.
 360	 *
 361	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
 362	 * Page-Fault Exception (#PF):
 363	 *
 364	 *   Processors update CR2 whenever a page fault is detected. If a
 365	 *   second page fault occurs while an earlier page fault is being
 366	 *   deliv- ered, the faulting linear address of the second fault will
 367	 *   overwrite the contents of CR2 (replacing the previous
 368	 *   address). These updates to CR2 occur even if the page fault
 369	 *   results in a double fault or occurs during the delivery of a
 370	 *   double fault.
 371	 *
 372	 * The logic below has a small possibility of incorrectly diagnosing
 373	 * some errors as stack overflows.  For example, if the IDT or GDT
 374	 * gets corrupted such that #GP delivery fails due to a bad descriptor
 375	 * causing #GP and we hit this condition while CR2 coincidentally
 376	 * points to the stack guard page, we'll think we overflowed the
 377	 * stack.  Given that we're going to panic one way or another
 378	 * if this happens, this isn't necessarily worth fixing.
 379	 *
 380	 * If necessary, we could improve the test by only diagnosing
 381	 * a stack overflow if the saved RSP points within 47 bytes of
 382	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
 383	 * take an exception, the stack is already aligned and there
 384	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
 385	 * possible error code, so a stack overflow would *not* double
 386	 * fault.  With any less space left, exception delivery could
 387	 * fail, and, as a practical matter, we've overflowed the
 388	 * stack even if the actual trigger for the double fault was
 389	 * something else.
 390	 */
 391	cr2 = read_cr2();
 392	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
 393		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
 394#endif
 395
 396#ifdef CONFIG_DOUBLEFAULT
 397	df_debug(regs, error_code);
 398#endif
 399	/*
 400	 * This is always a kernel trap and never fixable (and thus must
 401	 * never return).
 402	 */
 403	for (;;)
 404		die(str, regs, error_code);
 405}
 406#endif
 407
 408dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 409{
 410	const struct mpx_bndcsr *bndcsr;
 411	siginfo_t *info;
 412
 413	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 414	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
 415			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 416		return;
 417	cond_local_irq_enable(regs);
 418
 419	if (!user_mode(regs))
 420		die("bounds", regs, error_code);
 421
 422	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
 423		/* The exception is not from Intel MPX */
 424		goto exit_trap;
 425	}
 426
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 427	/*
 428	 * We need to look at BNDSTATUS to resolve this exception.
 429	 * A NULL here might mean that it is in its 'init state',
 430	 * which is all zeros which indicates MPX was not
 431	 * responsible for the exception.
 432	 */
 433	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
 434	if (!bndcsr)
 435		goto exit_trap;
 436
 437	trace_bounds_exception_mpx(bndcsr);
 438	/*
 439	 * The error code field of the BNDSTATUS register communicates status
 440	 * information of a bound range exception #BR or operation involving
 441	 * bound directory.
 442	 */
 443	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
 444	case 2:	/* Bound directory has invalid entry. */
 445		if (mpx_handle_bd_fault())
 446			goto exit_trap;
 447		break; /* Success, it was handled */
 448	case 1: /* Bound violation. */
 449		info = mpx_generate_siginfo(regs);
 450		if (IS_ERR(info)) {
 451			/*
 452			 * We failed to decode the MPX instruction.  Act as if
 453			 * the exception was not caused by MPX.
 454			 */
 455			goto exit_trap;
 456		}
 457		/*
 458		 * Success, we decoded the instruction and retrieved
 459		 * an 'info' containing the address being accessed
 460		 * which caused the exception.  This information
 461		 * allows and application to possibly handle the
 462		 * #BR exception itself.
 463		 */
 464		do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
 465		kfree(info);
 466		break;
 467	case 0: /* No exception caused by Intel MPX operations. */
 468		goto exit_trap;
 469	default:
 470		die("bounds", regs, error_code);
 471	}
 472
 473	return;
 
 
 
 
 
 
 
 
 
 
 
 
 474
 475exit_trap:
 476	/*
 477	 * This path out is for all the cases where we could not
 478	 * handle the exception in some way (like allocating a
 479	 * table or telling userspace about it.  We will also end
 480	 * up here if the kernel has MPX turned off at compile
 481	 * time..
 482	 */
 483	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484}
 485
 486dotraplinkage void
 487do_general_protection(struct pt_regs *regs, long error_code)
 
 488{
 489	struct task_struct *tsk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490
 491	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 492	cond_local_irq_enable(regs);
 493
 
 
 
 
 
 494	if (v8086_mode(regs)) {
 495		local_irq_enable();
 496		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 
 497		return;
 498	}
 499
 500	tsk = current;
 501	if (!user_mode(regs)) {
 502		if (fixup_exception(regs, X86_TRAP_GP))
 503			return;
 504
 505		tsk->thread.error_code = error_code;
 506		tsk->thread.trap_nr = X86_TRAP_GP;
 507		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 508			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
 509			die("general protection fault", regs, error_code);
 510		return;
 511	}
 512
 513	tsk->thread.error_code = error_code;
 514	tsk->thread.trap_nr = X86_TRAP_GP;
 515
 516	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
 517			printk_ratelimit()) {
 518		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
 519			tsk->comm, task_pid_nr(tsk),
 520			regs->ip, regs->sp, error_code);
 521		print_vma_addr(" in ", regs->ip);
 522		pr_cont("\n");
 523	}
 524
 525	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
 526}
 527NOKPROBE_SYMBOL(do_general_protection);
 
 
 528
 529/* May run on IST stack. */
 530dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 531{
 532#ifdef CONFIG_DYNAMIC_FTRACE
 533	/*
 534	 * ftrace must be first, everything else may cause a recursive crash.
 535	 * See note by declaration of modifying_ftrace_code in ftrace.c
 536	 */
 537	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
 538	    ftrace_int3_handler(regs))
 539		return;
 540#endif
 541	if (poke_int3_handler(regs))
 542		return;
 
 
 
 
 
 
 543
 544	ist_enter(regs);
 545	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 546#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 547	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 548				SIGTRAP) == NOTIFY_STOP)
 549		goto exit;
 550#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 551
 552#ifdef CONFIG_KPROBES
 553	if (kprobe_int3_handler(regs))
 554		goto exit;
 555#endif
 
 556
 557	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 558			SIGTRAP) == NOTIFY_STOP)
 559		goto exit;
 
 
 
 
 
 560
 561	/*
 562	 * Let others (NMI) know that the debug stack is in use
 563	 * as we may switch to the interrupt stack.
 564	 */
 565	debug_stack_usage_inc();
 566	preempt_disable();
 567	cond_local_irq_enable(regs);
 568	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
 569	cond_local_irq_disable(regs);
 570	preempt_enable_no_resched();
 571	debug_stack_usage_dec();
 572exit:
 573	ist_exit(regs);
 574}
 575NOKPROBE_SYMBOL(do_int3);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576
 577#ifdef CONFIG_X86_64
 578/*
 579 * Help handler running on IST stack to switch off the IST stack if the
 580 * interrupted code was in user mode. The actual stack switch is done in
 581 * entry_64.S
 582 */
 583asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 584{
 585	struct pt_regs *regs = task_pt_regs(current);
 586	*regs = *eregs;
 
 587	return regs;
 588}
 589NOKPROBE_SYMBOL(sync_regs);
 590
 591struct bad_iret_stack {
 592	void *error_entry_ret;
 593	struct pt_regs regs;
 594};
 
 
 
 
 
 
 
 
 
 
 
 595
 596asmlinkage __visible notrace
 597struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598{
 
 
 599	/*
 600	 * This is called from entry_64.S early in handling a fault
 601	 * caused by a bad iret to user mode.  To handle the fault
 602	 * correctly, we want move our stack frame to task_pt_regs
 603	 * and we want to pretend that the exception came from the
 604	 * iret target.
 605	 */
 606	struct bad_iret_stack *new_stack =
 607		container_of(task_pt_regs(current),
 608			     struct bad_iret_stack, regs);
 609
 610	/* Copy the IRET target to the new stack. */
 611	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
 612
 613	/* Copy the remainder of the stack from the current stack. */
 614	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
 
 
 
 615
 616	BUG_ON(!user_mode(&new_stack->regs));
 617	return new_stack;
 618}
 619NOKPROBE_SYMBOL(fixup_bad_iret);
 620#endif
 621
 622static bool is_sysenter_singlestep(struct pt_regs *regs)
 623{
 624	/*
 625	 * We don't try for precision here.  If we're anywhere in the region of
 626	 * code that can be single-stepped in the SYSENTER entry path, then
 627	 * assume that this is a useless single-step trap due to SYSENTER
 628	 * being invoked with TF set.  (We don't know in advance exactly
 629	 * which instructions will be hit because BTF could plausibly
 630	 * be set.)
 631	 */
 632#ifdef CONFIG_X86_32
 633	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
 634		(unsigned long)__end_SYSENTER_singlestep_region -
 635		(unsigned long)__begin_SYSENTER_singlestep_region;
 636#elif defined(CONFIG_IA32_EMULATION)
 637	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
 638		(unsigned long)__end_entry_SYSENTER_compat -
 639		(unsigned long)entry_SYSENTER_compat;
 640#else
 641	return false;
 642#endif
 643}
 644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 645/*
 646 * Our handling of the processor debug registers is non-trivial.
 647 * We do not clear them on entry and exit from the kernel. Therefore
 648 * it is possible to get a watchpoint trap here from inside the kernel.
 649 * However, the code in ./ptrace.c has ensured that the user can
 650 * only set watchpoints on userspace addresses. Therefore the in-kernel
 651 * watchpoint trap can only occur in code which is reading/writing
 652 * from user space. Such code must not hold kernel locks (since it
 653 * can equally take a page fault), therefore it is safe to call
 654 * force_sig_info even though that claims and releases locks.
 655 *
 656 * Code in ./signal.c ensures that the debug control register
 657 * is restored before we deliver any signal, and therefore that
 658 * user code runs with the correct debug control register even though
 659 * we clear it here.
 660 *
 661 * Being careful here means that we don't have to be as careful in a
 662 * lot of more complicated places (task switching can be a bit lazy
 663 * about restoring all the debug state, and ptrace doesn't have to
 664 * find every occurrence of the TF bit that could be saved away even
 665 * by user code)
 666 *
 667 * May run on IST stack.
 668 */
 669dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
 
 670{
 671	struct task_struct *tsk = current;
 672	int user_icebp = 0;
 673	unsigned long dr6;
 674	int si_code;
 
 
 
 
 
 675
 676	ist_enter(regs);
 
 677
 678	get_debugreg(dr6, 6);
 
 
 679	/*
 680	 * The Intel SDM says:
 
 681	 *
 682	 *   Certain debug exceptions may clear bits 0-3. The remaining
 683	 *   contents of the DR6 register are never cleared by the
 684	 *   processor. To avoid confusion in identifying debug
 685	 *   exceptions, debug handlers should clear the register before
 686	 *   returning to the interrupted task.
 687	 *
 688	 * Keep it simple: clear DR6 immediately.
 
 689	 */
 690	set_debugreg(0, 6);
 691
 692	/* Filter out all the reserved bits which are preset to 1 */
 693	dr6 &= ~DR6_RESERVED;
 694
 695	/*
 696	 * The SDM says "The processor clears the BTF flag when it
 697	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
 698	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
 699	 */
 700	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
 701
 702	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
 703		     is_sysenter_singlestep(regs))) {
 704		dr6 &= ~DR_STEP;
 705		if (!dr6)
 706			goto exit;
 707		/*
 708		 * else we might have gotten a single-step trap and hit a
 709		 * watchpoint at the same time, in which case we should fall
 710		 * through and handle the watchpoint.
 
 711		 */
 
 
 
 
 
 712	}
 713
 714	/*
 715	 * If dr6 has no reason to give us about the origin of this trap,
 716	 * then it's very likely the result of an icebp/int01 trap.
 717	 * User wants a sigtrap for that.
 718	 */
 719	if (!dr6 && user_mode(regs))
 720		user_icebp = 1;
 721
 722	/* Catch kmemcheck conditions! */
 723	if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
 724		goto exit;
 
 
 725
 726	/* Store the virtualized DR6 value */
 727	tsk->thread.debugreg6 = dr6;
 728
 729#ifdef CONFIG_KPROBES
 730	if (kprobe_debug_handler(regs))
 731		goto exit;
 732#endif
 
 
 
 
 
 
 
 
 
 
 
 
 733
 734	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
 735							SIGTRAP) == NOTIFY_STOP)
 736		goto exit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737
 738	/*
 739	 * Let others (NMI) know that the debug stack is in use
 740	 * as we may switch to the interrupt stack.
 
 741	 */
 742	debug_stack_usage_inc();
 
 
 
 743
 744	/* It's safe to allow irq's after DR6 has been saved */
 745	preempt_disable();
 746	cond_local_irq_enable(regs);
 747
 748	if (v8086_mode(regs)) {
 749		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
 750					X86_TRAP_DB);
 751		cond_local_irq_disable(regs);
 752		preempt_enable_no_resched();
 753		debug_stack_usage_dec();
 754		goto exit;
 755	}
 756
 757	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
 758		/*
 759		 * Historical junk that used to handle SYSENTER single-stepping.
 760		 * This should be unreachable now.  If we survive for a while
 761		 * without anyone hitting this warning, we'll turn this into
 762		 * an oops.
 763		 */
 764		tsk->thread.debugreg6 &= ~DR_STEP;
 765		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
 766		regs->flags &= ~X86_EFLAGS_TF;
 767	}
 768	si_code = get_si_code(tsk->thread.debugreg6);
 769	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
 770		send_sigtrap(tsk, regs, error_code, si_code);
 771	cond_local_irq_disable(regs);
 772	preempt_enable_no_resched();
 773	debug_stack_usage_dec();
 774
 775exit:
 776#if defined(CONFIG_X86_32)
 777	/*
 778	 * This is the most likely code path that involves non-trivial use
 779	 * of the SYSENTER stack.  Check that we haven't overrun it.
 780	 */
 781	WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
 782	     "Overran or corrupted SYSENTER stack\n");
 783#endif
 784	ist_exit(regs);
 
 
 
 
 
 
 
 
 
 
 
 
 785}
 786NOKPROBE_SYMBOL(do_debug);
 787
 788/*
 789 * Note that we play around with the 'TS' bit in an attempt to get
 790 * the correct behaviour even in the presence of the asynchronous
 791 * IRQ13 behaviour
 792 */
 793static void math_error(struct pt_regs *regs, int error_code, int trapnr)
 794{
 795	struct task_struct *task = current;
 796	struct fpu *fpu = &task->thread.fpu;
 797	siginfo_t info;
 798	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
 799						"simd exception";
 800
 801	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
 802		return;
 803	cond_local_irq_enable(regs);
 804
 805	if (!user_mode(regs)) {
 806		if (!fixup_exception(regs, trapnr)) {
 807			task->thread.error_code = error_code;
 808			task->thread.trap_nr = trapnr;
 809			die(str, regs, error_code);
 810		}
 811		return;
 
 
 
 
 812	}
 813
 814	/*
 815	 * Save the info for the exception handler and clear the error.
 
 816	 */
 817	fpu__save(fpu);
 818
 819	task->thread.trap_nr	= trapnr;
 820	task->thread.error_code = error_code;
 821	info.si_signo		= SIGFPE;
 822	info.si_errno		= 0;
 823	info.si_addr		= (void __user *)uprobe_get_trap_addr(regs);
 824
 825	info.si_code = fpu__exception_code(fpu, trapnr);
 826
 
 827	/* Retry when we get spurious exceptions: */
 828	if (!info.si_code)
 829		return;
 830
 831	force_sig_info(SIGFPE, &info, task);
 
 
 
 
 
 
 832}
 833
 834dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
 835{
 836	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 837	math_error(regs, error_code, X86_TRAP_MF);
 838}
 839
 840dotraplinkage void
 841do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 842{
 843	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 844	math_error(regs, error_code, X86_TRAP_XF);
 
 
 
 
 
 
 845}
 846
 847dotraplinkage void
 848do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 849{
 850	cond_local_irq_enable(regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 851}
 852
 853dotraplinkage void
 854do_device_not_available(struct pt_regs *regs, long error_code)
 855{
 856	unsigned long cr0;
 
 857
 858	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859
 860#ifdef CONFIG_MATH_EMULATION
 861	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
 862		struct math_emu_info info = { };
 863
 864		cond_local_irq_enable(regs);
 865
 866		info.regs = regs;
 867		math_emulate(&info);
 
 
 868		return;
 869	}
 870#endif
 871
 872	/* This should not happen. */
 873	cr0 = read_cr0();
 874	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
 875		/* Try to fix it up and carry on. */
 876		write_cr0(cr0 & ~X86_CR0_TS);
 877	} else {
 878		/*
 879		 * Something terrible happened, and we're better off trying
 880		 * to kill the task than getting stuck in a never-ending
 881		 * loop of #NM faults.
 882		 */
 883		die("unexpected #NM exception", regs, error_code);
 884	}
 885}
 886NOKPROBE_SYMBOL(do_device_not_available);
 887
 888#ifdef CONFIG_X86_32
 889dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 890{
 891	siginfo_t info;
 892
 893	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 894	local_irq_enable();
 895
 896	info.si_signo = SIGILL;
 897	info.si_errno = 0;
 898	info.si_code = ILL_BADSTK;
 899	info.si_addr = NULL;
 900	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
 901			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
 902		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
 903			&info);
 
 
 
 904	}
 
 
 905}
 906#endif
 907
 908/* Set of traps needed for early debugging. */
 909void __init early_trap_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910{
 
 
 911	/*
 912	 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
 913	 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
 914	 * CPU runs at ring 0 so it is impossible to hit an invalid
 915	 * stack.  Using the original stack works well enough at this
 916	 * early stage. DEBUG_STACK will be equipped after cpu_init() in
 917	 * trap_init().
 918	 *
 919	 * We don't need to set trace_idt_table like set_intr_gate(),
 920	 * since we don't have trace_debug and it will be reset to
 921	 * 'debug' in trap_init() by set_intr_gate_ist().
 922	 */
 923	set_intr_gate_notrace(X86_TRAP_DB, debug);
 924	/* int3 can be called from all */
 925	set_system_intr_gate(X86_TRAP_BP, &int3);
 926#ifdef CONFIG_X86_32
 927	set_intr_gate(X86_TRAP_PF, page_fault);
 928#endif
 929	load_idt(&idt_descr);
 930}
 931
 932void __init early_trap_pf_init(void)
 933{
 934#ifdef CONFIG_X86_64
 935	set_intr_gate(X86_TRAP_PF, page_fault);
 936#endif
 937}
 938
 939void __init trap_init(void)
 940{
 941	int i;
 
 
 
 942
 943#ifdef CONFIG_EISA
 944	void __iomem *p = early_ioremap(0x0FFFD9, 4);
 945
 946	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
 947		EISA_bus = 1;
 948	early_iounmap(p, 4);
 949#endif
 950
 951	set_intr_gate(X86_TRAP_DE, divide_error);
 952	set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
 953	/* int4 can be called from all */
 954	set_system_intr_gate(X86_TRAP_OF, &overflow);
 955	set_intr_gate(X86_TRAP_BR, bounds);
 956	set_intr_gate(X86_TRAP_UD, invalid_op);
 957	set_intr_gate(X86_TRAP_NM, device_not_available);
 958#ifdef CONFIG_X86_32
 959	set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
 960#else
 961	set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
 962#endif
 963	set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
 964	set_intr_gate(X86_TRAP_TS, invalid_TSS);
 965	set_intr_gate(X86_TRAP_NP, segment_not_present);
 966	set_intr_gate(X86_TRAP_SS, stack_segment);
 967	set_intr_gate(X86_TRAP_GP, general_protection);
 968	set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
 969	set_intr_gate(X86_TRAP_MF, coprocessor_error);
 970	set_intr_gate(X86_TRAP_AC, alignment_check);
 971#ifdef CONFIG_X86_MCE
 972	set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
 973#endif
 974	set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
 975
 976	/* Reserve all the builtin and the syscall vector: */
 977	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
 978		set_bit(i, used_vectors);
 979
 980#ifdef CONFIG_IA32_EMULATION
 981	set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
 982	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 983#endif
 984
 985#ifdef CONFIG_X86_32
 986	set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
 987	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 
 
 
 
 
 
 
 
 988#endif
 989
 990	/*
 991	 * Set the IDT descriptor to a fixed read-only location, so that the
 992	 * "sidt" instruction will not leak the location of the kernel, and
 993	 * to defend the IDT against arbitrary memory write vulnerabilities.
 994	 * It will be reloaded in cpu_init() */
 995	__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
 996	idt_descr.address = fix_to_virt(FIX_RO_IDT);
 997
 998	/*
 999	 * Should be a barrier for any external CPU state:
1000	 */
1001	cpu_init();
1002
1003	/*
1004	 * X86_TRAP_DB and X86_TRAP_BP have been set
1005	 * in early_trap_init(). However, ITS works only after
1006	 * cpu_init() loads TSS. See comments in early_trap_init().
1007	 */
1008	set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
1009	/* int3 can be called from all */
1010	set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
1011
1012	x86_init.irqs.trap_init();
 
1013
1014#ifdef CONFIG_X86_64
1015	memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
1016	set_nmi_gate(X86_TRAP_DB, &debug);
1017	set_nmi_gate(X86_TRAP_BP, &int3);
1018#endif
1019}
v6.8
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *	Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
  18#include <linux/kmsan.h>
  19#include <linux/spinlock.h>
  20#include <linux/kprobes.h>
  21#include <linux/uaccess.h>
  22#include <linux/kdebug.h>
  23#include <linux/kgdb.h>
  24#include <linux/kernel.h>
  25#include <linux/export.h>
  26#include <linux/ptrace.h>
  27#include <linux/uprobes.h>
  28#include <linux/string.h>
  29#include <linux/delay.h>
  30#include <linux/errno.h>
  31#include <linux/kexec.h>
  32#include <linux/sched.h>
  33#include <linux/sched/task_stack.h>
  34#include <linux/timer.h>
  35#include <linux/init.h>
  36#include <linux/bug.h>
  37#include <linux/nmi.h>
  38#include <linux/mm.h>
  39#include <linux/smp.h>
  40#include <linux/cpu.h>
  41#include <linux/io.h>
  42#include <linux/hardirq.h>
  43#include <linux/atomic.h>
  44#include <linux/iommu.h>
  45
 
 
 
 
 
 
 
 
 
 
  46#include <asm/stacktrace.h>
  47#include <asm/processor.h>
  48#include <asm/debugreg.h>
  49#include <asm/realmode.h>
  50#include <asm/text-patching.h>
  51#include <asm/ftrace.h>
  52#include <asm/traps.h>
  53#include <asm/desc.h>
  54#include <asm/fpu/api.h>
  55#include <asm/cpu.h>
  56#include <asm/cpu_entry_area.h>
  57#include <asm/mce.h>
  58#include <asm/fixmap.h>
  59#include <asm/mach_traps.h>
  60#include <asm/alternative.h>
  61#include <asm/fpu/xstate.h>
 
 
  62#include <asm/vm86.h>
  63#include <asm/umip.h>
  64#include <asm/insn.h>
  65#include <asm/insn-eval.h>
  66#include <asm/vdso.h>
  67#include <asm/tdx.h>
  68#include <asm/cfi.h>
  69
  70#ifdef CONFIG_X86_64
  71#include <asm/x86_init.h>
 
 
 
 
 
  72#else
  73#include <asm/processor-flags.h>
  74#include <asm/setup.h>
 
  75#endif
  76
  77#include <asm/proto.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  78
  79DECLARE_BITMAP(system_vectors, NR_VECTORS);
 
 
  80
  81__always_inline int is_valid_bugaddr(unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
  82{
  83	if (addr < TASK_SIZE_MAX)
  84		return 0;
  85
  86	/*
  87	 * We got #UD, if the text isn't readable we'd have gotten
  88	 * a different exception.
 
  89	 */
  90	return *(unsigned short *)addr == INSN_UD2;
 
 
 
 
 
 
 
 
 
 
 
 
 
  91}
  92
  93static nokprobe_inline int
  94do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
  95		  struct pt_regs *regs,	long error_code)
  96{
  97	if (v8086_mode(regs)) {
  98		/*
  99		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 100		 * On nmi (interrupt 2), do_trap should not be called.
 101		 */
 102		if (trapnr < X86_TRAP_UD) {
 103			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 104						error_code, trapnr))
 105				return 0;
 106		}
 107	} else if (!user_mode(regs)) {
 108		if (fixup_exception(regs, trapnr, error_code, 0))
 109			return 0;
 110
 111		tsk->thread.error_code = error_code;
 112		tsk->thread.trap_nr = trapnr;
 113		die(str, regs, error_code);
 114	} else {
 115		if (fixup_vdso_exception(regs, trapnr, error_code, 0))
 116			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117	}
 118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119	/*
 120	 * We want error_code and trap_nr set for userspace faults and
 121	 * kernelspace faults which result in die(), but not
 122	 * kernelspace faults which are fixed up.  die() gives the
 123	 * process no chance to handle the signal and notice the
 124	 * kernel fault information, so that won't result in polluting
 125	 * the information about previously queued, but not yet
 126	 * delivered, faults.  See also exc_general_protection below.
 127	 */
 128	tsk->thread.error_code = error_code;
 129	tsk->thread.trap_nr = trapnr;
 130
 131	return -1;
 132}
 133
 134static void show_signal(struct task_struct *tsk, int signr,
 135			const char *type, const char *desc,
 136			struct pt_regs *regs, long error_code)
 137{
 138	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 139	    printk_ratelimit()) {
 140		pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
 141			tsk->comm, task_pid_nr(tsk), type, desc,
 142			regs->ip, regs->sp, error_code);
 143		print_vma_addr(KERN_CONT " in ", regs->ip);
 144		pr_cont("\n");
 145	}
 146}
 147
 148static void
 149do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 150	long error_code, int sicode, void __user *addr)
 151{
 152	struct task_struct *tsk = current;
 153
 154	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 155		return;
 156
 157	show_signal(tsk, signr, "trap ", str, regs, error_code);
 158
 159	if (!sicode)
 160		force_sig(signr);
 161	else
 162		force_sig_fault(signr, sicode, addr);
 163}
 164NOKPROBE_SYMBOL(do_trap);
 165
 166static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 167	unsigned long trapnr, int signr, int sicode, void __user *addr)
 168{
 
 
 169	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 170
 171	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 172			NOTIFY_STOP) {
 173		cond_local_irq_enable(regs);
 174		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
 175		cond_local_irq_disable(regs);
 176	}
 177}
 178
 179/*
 180 * Posix requires to provide the address of the faulting instruction for
 181 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
 182 *
 183 * This address is usually regs->ip, but when an uprobe moved the code out
 184 * of line then regs->ip points to the XOL code which would confuse
 185 * anything which analyzes the fault address vs. the unmodified binary. If
 186 * a trap happened in XOL code then uprobe maps regs->ip back to the
 187 * original instruction address.
 188 */
 189static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
 190{
 191	return (void __user *)uprobe_get_trap_addr(regs);
 192}
 193
 194DEFINE_IDTENTRY(exc_divide_error)
 195{
 196	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
 197		      FPE_INTDIV, error_get_trap_addr(regs));
 198}
 199
 200DEFINE_IDTENTRY(exc_overflow)
 201{
 202	do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
 203}
 204
 205#ifdef CONFIG_X86_F00F_BUG
 206void handle_invalid_op(struct pt_regs *regs)
 207#else
 208static inline void handle_invalid_op(struct pt_regs *regs)
 209#endif
 210{
 211	do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
 212		      ILL_ILLOPN, error_get_trap_addr(regs));
 213}
 214
 215static noinstr bool handle_bug(struct pt_regs *regs)
 216{
 217	bool handled = false;
 218
 219	/*
 220	 * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
 221	 * is a rare case that uses @regs without passing them to
 222	 * irqentry_enter().
 223	 */
 224	kmsan_unpoison_entry_regs(regs);
 225	if (!is_valid_bugaddr(regs->ip))
 226		return handled;
 227
 228	/*
 229	 * All lies, just get the WARN/BUG out.
 230	 */
 231	instrumentation_begin();
 232	/*
 233	 * Since we're emulating a CALL with exceptions, restore the interrupt
 234	 * state to what it was at the exception site.
 235	 */
 236	if (regs->flags & X86_EFLAGS_IF)
 237		raw_local_irq_enable();
 238	if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
 239	    handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
 240		regs->ip += LEN_UD2;
 241		handled = true;
 242	}
 243	if (regs->flags & X86_EFLAGS_IF)
 244		raw_local_irq_disable();
 245	instrumentation_end();
 246
 247	return handled;
 248}
 249
 250DEFINE_IDTENTRY_RAW(exc_invalid_op)
 251{
 252	irqentry_state_t state;
 253
 254	/*
 255	 * We use UD2 as a short encoding for 'CALL __WARN', as such
 256	 * handle it before exception entry to avoid recursive WARN
 257	 * in case exception entry is the one triggering WARNs.
 258	 */
 259	if (!user_mode(regs) && handle_bug(regs))
 260		return;
 261
 262	state = irqentry_enter(regs);
 263	instrumentation_begin();
 264	handle_invalid_op(regs);
 265	instrumentation_end();
 266	irqentry_exit(regs, state);
 267}
 268
 269DEFINE_IDTENTRY(exc_coproc_segment_overrun)
 270{
 271	do_error_trap(regs, 0, "coprocessor segment overrun",
 272		      X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
 273}
 274
 275DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
 276{
 277	do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
 278		      0, NULL);
 279}
 280
 281DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
 282{
 283	do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
 284		      SIGBUS, 0, NULL);
 285}
 286
 287DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
 288{
 289	do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
 290		      0, NULL);
 291}
 292
 293DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
 294{
 295	char *str = "alignment check";
 296
 297	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
 298		return;
 299
 300	if (!user_mode(regs))
 301		die("Split lock detected\n", regs, error_code);
 302
 303	local_irq_enable();
 304
 305	if (handle_user_split_lock(regs, error_code))
 306		goto out;
 307
 308	do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
 309		error_code, BUS_ADRALN, NULL);
 310
 311out:
 312	local_irq_disable();
 313}
 314
 315#ifdef CONFIG_VMAP_STACK
 316__visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
 317						unsigned long fault_address,
 318						struct stack_info *info)
 319{
 320	const char *name = stack_type_name(info->type);
 321
 322	printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n",
 323	       name, (void *)fault_address, info->begin, info->end);
 324
 325	die("stack guard page", regs, 0);
 326
 327	/* Be absolutely certain we don't return. */
 328	panic("%s stack guard hit", name);
 329}
 330#endif
 331
 332/*
 333 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
 334 *
 335 * On x86_64, this is more or less a normal kernel entry.  Notwithstanding the
 336 * SDM's warnings about double faults being unrecoverable, returning works as
 337 * expected.  Presumably what the SDM actually means is that the CPU may get
 338 * the register state wrong on entry, so returning could be a bad idea.
 339 *
 340 * Various CPU engineers have promised that double faults due to an IRET fault
 341 * while the stack is read-only are, in fact, recoverable.
 342 *
 343 * On x86_32, this is entered through a task gate, and regs are synthesized
 344 * from the TSS.  Returning is, in principle, okay, but changes to regs will
 345 * be lost.  If, for some reason, we need to return to a context with modified
 346 * regs, the shim code could be adjusted to synchronize the registers.
 347 *
 348 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
 349 * to be read before doing anything else.
 350 */
 351DEFINE_IDTENTRY_DF(exc_double_fault)
 352{
 353	static const char str[] = "double fault";
 354	struct task_struct *tsk = current;
 355
 356#ifdef CONFIG_VMAP_STACK
 357	unsigned long address = read_cr2();
 358	struct stack_info info;
 359#endif
 360
 361#ifdef CONFIG_X86_ESPFIX64
 362	extern unsigned char native_irq_return_iret[];
 363
 364	/*
 365	 * If IRET takes a non-IST fault on the espfix64 stack, then we
 366	 * end up promoting it to a doublefault.  In that case, take
 367	 * advantage of the fact that we're not using the normal (TSS.sp0)
 368	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
 369	 * and then modify our own IRET frame so that, when we return,
 370	 * we land directly at the #GP(0) vector with the stack already
 371	 * set up according to its expectations.
 372	 *
 373	 * The net result is that our #GP handler will think that we
 374	 * entered from usermode with the bad user context.
 375	 *
 376	 * No need for nmi_enter() here because we don't use RCU.
 377	 */
 378	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
 379		regs->cs == __KERNEL_CS &&
 380		regs->ip == (unsigned long)native_irq_return_iret)
 381	{
 382		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 383		unsigned long *p = (unsigned long *)regs->sp;
 384
 385		/*
 386		 * regs->sp points to the failing IRET frame on the
 387		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
 388		 * in gpregs->ss through gpregs->ip.
 389		 *
 390		 */
 391		gpregs->ip	= p[0];
 392		gpregs->cs	= p[1];
 393		gpregs->flags	= p[2];
 394		gpregs->sp	= p[3];
 395		gpregs->ss	= p[4];
 396		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
 397
 398		/*
 399		 * Adjust our frame so that we return straight to the #GP
 400		 * vector with the expected RSP value.  This is safe because
 401		 * we won't enable interrupts or schedule before we invoke
 402		 * general_protection, so nothing will clobber the stack
 403		 * frame we just set up.
 404		 *
 405		 * We will enter general_protection with kernel GSBASE,
 406		 * which is what the stub expects, given that the faulting
 407		 * RIP will be the IRET instruction.
 408		 */
 409		regs->ip = (unsigned long)asm_exc_general_protection;
 410		regs->sp = (unsigned long)&gpregs->orig_ax;
 411
 412		return;
 413	}
 414#endif
 415
 416	irqentry_nmi_enter(regs);
 417	instrumentation_begin();
 418	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 419
 420	tsk->thread.error_code = error_code;
 421	tsk->thread.trap_nr = X86_TRAP_DF;
 422
 423#ifdef CONFIG_VMAP_STACK
 424	/*
 425	 * If we overflow the stack into a guard page, the CPU will fail
 426	 * to deliver #PF and will send #DF instead.  Similarly, if we
 427	 * take any non-IST exception while too close to the bottom of
 428	 * the stack, the processor will get a page fault while
 429	 * delivering the exception and will generate a double fault.
 430	 *
 431	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
 432	 * Page-Fault Exception (#PF):
 433	 *
 434	 *   Processors update CR2 whenever a page fault is detected. If a
 435	 *   second page fault occurs while an earlier page fault is being
 436	 *   delivered, the faulting linear address of the second fault will
 437	 *   overwrite the contents of CR2 (replacing the previous
 438	 *   address). These updates to CR2 occur even if the page fault
 439	 *   results in a double fault or occurs during the delivery of a
 440	 *   double fault.
 441	 *
 442	 * The logic below has a small possibility of incorrectly diagnosing
 443	 * some errors as stack overflows.  For example, if the IDT or GDT
 444	 * gets corrupted such that #GP delivery fails due to a bad descriptor
 445	 * causing #GP and we hit this condition while CR2 coincidentally
 446	 * points to the stack guard page, we'll think we overflowed the
 447	 * stack.  Given that we're going to panic one way or another
 448	 * if this happens, this isn't necessarily worth fixing.
 449	 *
 450	 * If necessary, we could improve the test by only diagnosing
 451	 * a stack overflow if the saved RSP points within 47 bytes of
 452	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
 453	 * take an exception, the stack is already aligned and there
 454	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
 455	 * possible error code, so a stack overflow would *not* double
 456	 * fault.  With any less space left, exception delivery could
 457	 * fail, and, as a practical matter, we've overflowed the
 458	 * stack even if the actual trigger for the double fault was
 459	 * something else.
 460	 */
 461	if (get_stack_guard_info((void *)address, &info))
 462		handle_stack_overflow(regs, address, &info);
 
 463#endif
 464
 465	pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
 466	die("double fault", regs, error_code);
 467	panic("Machine halted.");
 468	instrumentation_end();
 
 
 
 
 
 469}
 
 470
 471DEFINE_IDTENTRY(exc_bounds)
 472{
 473	if (notify_die(DIE_TRAP, "bounds", regs, 0,
 
 
 
 
 474			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 475		return;
 476	cond_local_irq_enable(regs);
 477
 478	if (!user_mode(regs))
 479		die("bounds", regs, 0);
 480
 481	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
 
 
 
 482
 483	cond_local_irq_disable(regs);
 484}
 485
 486enum kernel_gp_hint {
 487	GP_NO_HINT,
 488	GP_NON_CANONICAL,
 489	GP_CANONICAL
 490};
 491
 492/*
 493 * When an uncaught #GP occurs, try to determine the memory address accessed by
 494 * the instruction and return that address to the caller. Also, try to figure
 495 * out whether any part of the access to that address was non-canonical.
 496 */
 497static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
 498						 unsigned long *addr)
 499{
 500	u8 insn_buf[MAX_INSN_SIZE];
 501	struct insn insn;
 502	int ret;
 503
 504	if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
 505			MAX_INSN_SIZE))
 506		return GP_NO_HINT;
 507
 508	ret = insn_decode_kernel(&insn, insn_buf);
 509	if (ret < 0)
 510		return GP_NO_HINT;
 511
 512	*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
 513	if (*addr == -1UL)
 514		return GP_NO_HINT;
 515
 516#ifdef CONFIG_X86_64
 517	/*
 518	 * Check that:
 519	 *  - the operand is not in the kernel half
 520	 *  - the last byte of the operand is not in the user canonical half
 521	 */
 522	if (*addr < ~__VIRTUAL_MASK &&
 523	    *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
 524		return GP_NON_CANONICAL;
 525#endif
 526
 527	return GP_CANONICAL;
 528}
 529
 530#define GPFSTR "general protection fault"
 531
 532static bool fixup_iopl_exception(struct pt_regs *regs)
 533{
 534	struct thread_struct *t = &current->thread;
 535	unsigned char byte;
 536	unsigned long ip;
 537
 538	if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
 539		return false;
 540
 541	if (insn_get_effective_ip(regs, &ip))
 542		return false;
 543
 544	if (get_user(byte, (const char __user *)ip))
 545		return false;
 546
 547	if (byte != 0xfa && byte != 0xfb)
 548		return false;
 549
 550	if (!t->iopl_warn && printk_ratelimit()) {
 551		pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
 552		       current->comm, task_pid_nr(current), ip);
 553		print_vma_addr(KERN_CONT " in ", ip);
 554		pr_cont("\n");
 555		t->iopl_warn = 1;
 
 
 
 
 
 556	}
 557
 558	regs->ip += 1;
 559	return true;
 560}
 561
 562/*
 563 * The unprivileged ENQCMD instruction generates #GPs if the
 564 * IA32_PASID MSR has not been populated.  If possible, populate
 565 * the MSR from a PASID previously allocated to the mm.
 566 */
 567static bool try_fixup_enqcmd_gp(void)
 568{
 569#ifdef CONFIG_ARCH_HAS_CPU_PASID
 570	u32 pasid;
 571
 
 572	/*
 573	 * MSR_IA32_PASID is managed using XSAVE.  Directly
 574	 * writing to the MSR is only possible when fpregs
 575	 * are valid and the fpstate is not.  This is
 576	 * guaranteed when handling a userspace exception
 577	 * in *before* interrupts are re-enabled.
 578	 */
 579	lockdep_assert_irqs_disabled();
 580
 581	/*
 582	 * Hardware without ENQCMD will not generate
 583	 * #GPs that can be fixed up here.
 584	 */
 585	if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
 586		return false;
 587
 588	/*
 589	 * If the mm has not been allocated a
 590	 * PASID, the #GP can not be fixed up.
 591	 */
 592	if (!mm_valid_pasid(current->mm))
 593		return false;
 594
 595	pasid = mm_get_enqcmd_pasid(current->mm);
 596
 597	/*
 598	 * Did this thread already have its PASID activated?
 599	 * If so, the #GP must be from something else.
 600	 */
 601	if (current->pasid_activated)
 602		return false;
 603
 604	wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
 605	current->pasid_activated = 1;
 606
 607	return true;
 608#else
 609	return false;
 610#endif
 611}
 612
 613static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
 614				    unsigned long error_code, const char *str,
 615				    unsigned long address)
 616{
 617	if (fixup_exception(regs, trapnr, error_code, address))
 618		return true;
 619
 620	current->thread.error_code = error_code;
 621	current->thread.trap_nr = trapnr;
 622
 623	/*
 624	 * To be potentially processing a kprobe fault and to trust the result
 625	 * from kprobe_running(), we have to be non-preemptible.
 626	 */
 627	if (!preemptible() && kprobe_running() &&
 628	    kprobe_fault_handler(regs, trapnr))
 629		return true;
 630
 631	return notify_die(DIE_GPF, str, regs, error_code, trapnr, SIGSEGV) == NOTIFY_STOP;
 632}
 633
 634static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr,
 635				   unsigned long error_code, const char *str)
 636{
 637	current->thread.error_code = error_code;
 638	current->thread.trap_nr = trapnr;
 639	show_signal(current, SIGSEGV, "", str, regs, error_code);
 640	force_sig(SIGSEGV);
 641}
 642
 643DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
 644{
 645	char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
 646	enum kernel_gp_hint hint = GP_NO_HINT;
 647	unsigned long gp_addr;
 648
 649	if (user_mode(regs) && try_fixup_enqcmd_gp())
 650		return;
 651
 
 652	cond_local_irq_enable(regs);
 653
 654	if (static_cpu_has(X86_FEATURE_UMIP)) {
 655		if (user_mode(regs) && fixup_umip_exception(regs))
 656			goto exit;
 657	}
 658
 659	if (v8086_mode(regs)) {
 660		local_irq_enable();
 661		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 662		local_irq_disable();
 663		return;
 664	}
 665
 666	if (user_mode(regs)) {
 667		if (fixup_iopl_exception(regs))
 668			goto exit;
 
 669
 670		if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
 671			goto exit;
 672
 673		gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
 674		goto exit;
 
 675	}
 676
 677	if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
 678		goto exit;
 679
 680	if (error_code)
 681		snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
 682	else
 683		hint = get_kernel_gp_address(regs, &gp_addr);
 
 
 
 
 684
 685	if (hint != GP_NO_HINT)
 686		snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
 687			 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
 688						    : "maybe for address",
 689			 gp_addr);
 690
 
 
 
 
 691	/*
 692	 * KASAN is interested only in the non-canonical case, clear it
 693	 * otherwise.
 694	 */
 695	if (hint != GP_NON_CANONICAL)
 696		gp_addr = 0;
 697
 698	die_addr(desc, regs, error_code, gp_addr);
 699
 700exit:
 701	cond_local_irq_disable(regs);
 702}
 703
 704static bool do_int3(struct pt_regs *regs)
 705{
 706	int res;
 707
 
 
 708#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 709	if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
 710			 SIGTRAP) == NOTIFY_STOP)
 711		return true;
 712#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 713
 714#ifdef CONFIG_KPROBES
 715	if (kprobe_int3_handler(regs))
 716		return true;
 717#endif
 718	res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
 719
 720	return res == NOTIFY_STOP;
 721}
 722NOKPROBE_SYMBOL(do_int3);
 723
 724static void do_int3_user(struct pt_regs *regs)
 725{
 726	if (do_int3(regs))
 727		return;
 728
 
 
 
 
 
 
 729	cond_local_irq_enable(regs);
 730	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
 731	cond_local_irq_disable(regs);
 
 
 
 
 732}
 733
 734DEFINE_IDTENTRY_RAW(exc_int3)
 735{
 736	/*
 737	 * poke_int3_handler() is completely self contained code; it does (and
 738	 * must) *NOT* call out to anything, lest it hits upon yet another
 739	 * INT3.
 740	 */
 741	if (poke_int3_handler(regs))
 742		return;
 743
 744	/*
 745	 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
 746	 * and therefore can trigger INT3, hence poke_int3_handler() must
 747	 * be done before. If the entry came from kernel mode, then use
 748	 * nmi_enter() because the INT3 could have been hit in any context
 749	 * including NMI.
 750	 */
 751	if (user_mode(regs)) {
 752		irqentry_enter_from_user_mode(regs);
 753		instrumentation_begin();
 754		do_int3_user(regs);
 755		instrumentation_end();
 756		irqentry_exit_to_user_mode(regs);
 757	} else {
 758		irqentry_state_t irq_state = irqentry_nmi_enter(regs);
 759
 760		instrumentation_begin();
 761		if (!do_int3(regs))
 762			die("int3", regs, 0);
 763		instrumentation_end();
 764		irqentry_nmi_exit(regs, irq_state);
 765	}
 766}
 767
 768#ifdef CONFIG_X86_64
 769/*
 770 * Help handler running on a per-cpu (IST or entry trampoline) stack
 771 * to switch to the normal thread stack if the interrupted code was in
 772 * user mode. The actual stack switch is done in entry_64.S
 773 */
 774asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
 775{
 776	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(pcpu_hot.top_of_stack) - 1;
 777	if (regs != eregs)
 778		*regs = *eregs;
 779	return regs;
 780}
 
 781
 782#ifdef CONFIG_AMD_MEM_ENCRYPT
 783asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
 784{
 785	unsigned long sp, *stack;
 786	struct stack_info info;
 787	struct pt_regs *regs_ret;
 788
 789	/*
 790	 * In the SYSCALL entry path the RSP value comes from user-space - don't
 791	 * trust it and switch to the current kernel stack
 792	 */
 793	if (ip_within_syscall_gap(regs)) {
 794		sp = this_cpu_read(pcpu_hot.top_of_stack);
 795		goto sync;
 796	}
 797
 798	/*
 799	 * From here on the RSP value is trusted. Now check whether entry
 800	 * happened from a safe stack. Not safe are the entry or unknown stacks,
 801	 * use the fall-back stack instead in this case.
 802	 */
 803	sp    = regs->sp;
 804	stack = (unsigned long *)sp;
 805
 806	if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
 807	    info.type > STACK_TYPE_EXCEPTION_LAST)
 808		sp = __this_cpu_ist_top_va(VC2);
 809
 810sync:
 811	/*
 812	 * Found a safe stack - switch to it as if the entry didn't happen via
 813	 * IST stack. The code below only copies pt_regs, the real switch happens
 814	 * in assembly code.
 815	 */
 816	sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
 817
 818	regs_ret = (struct pt_regs *)sp;
 819	*regs_ret = *regs;
 820
 821	return regs_ret;
 822}
 823#endif
 824
 825asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
 826{
 827	struct pt_regs tmp, *new_stack;
 828
 829	/*
 830	 * This is called from entry_64.S early in handling a fault
 831	 * caused by a bad iret to user mode.  To handle the fault
 832	 * correctly, we want to move our stack frame to where it would
 833	 * be had we entered directly on the entry stack (rather than
 834	 * just below the IRET frame) and we want to pretend that the
 835	 * exception came from the IRET target.
 836	 */
 837	new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
 838
 839	/* Copy the IRET target to the temporary storage. */
 840	__memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
 841
 842	/* Copy the remainder of the stack from the current stack. */
 843	__memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
 844
 845	/* Update the entry stack */
 846	__memcpy(new_stack, &tmp, sizeof(tmp));
 847
 848	BUG_ON(!user_mode(new_stack));
 849	return new_stack;
 850}
 
 851#endif
 852
 853static bool is_sysenter_singlestep(struct pt_regs *regs)
 854{
 855	/*
 856	 * We don't try for precision here.  If we're anywhere in the region of
 857	 * code that can be single-stepped in the SYSENTER entry path, then
 858	 * assume that this is a useless single-step trap due to SYSENTER
 859	 * being invoked with TF set.  (We don't know in advance exactly
 860	 * which instructions will be hit because BTF could plausibly
 861	 * be set.)
 862	 */
 863#ifdef CONFIG_X86_32
 864	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
 865		(unsigned long)__end_SYSENTER_singlestep_region -
 866		(unsigned long)__begin_SYSENTER_singlestep_region;
 867#elif defined(CONFIG_IA32_EMULATION)
 868	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
 869		(unsigned long)__end_entry_SYSENTER_compat -
 870		(unsigned long)entry_SYSENTER_compat;
 871#else
 872	return false;
 873#endif
 874}
 875
 876static __always_inline unsigned long debug_read_clear_dr6(void)
 877{
 878	unsigned long dr6;
 879
 880	/*
 881	 * The Intel SDM says:
 882	 *
 883	 *   Certain debug exceptions may clear bits 0-3. The remaining
 884	 *   contents of the DR6 register are never cleared by the
 885	 *   processor. To avoid confusion in identifying debug
 886	 *   exceptions, debug handlers should clear the register before
 887	 *   returning to the interrupted task.
 888	 *
 889	 * Keep it simple: clear DR6 immediately.
 890	 */
 891	get_debugreg(dr6, 6);
 892	set_debugreg(DR6_RESERVED, 6);
 893	dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
 894
 895	return dr6;
 896}
 897
 898/*
 899 * Our handling of the processor debug registers is non-trivial.
 900 * We do not clear them on entry and exit from the kernel. Therefore
 901 * it is possible to get a watchpoint trap here from inside the kernel.
 902 * However, the code in ./ptrace.c has ensured that the user can
 903 * only set watchpoints on userspace addresses. Therefore the in-kernel
 904 * watchpoint trap can only occur in code which is reading/writing
 905 * from user space. Such code must not hold kernel locks (since it
 906 * can equally take a page fault), therefore it is safe to call
 907 * force_sig_info even though that claims and releases locks.
 908 *
 909 * Code in ./signal.c ensures that the debug control register
 910 * is restored before we deliver any signal, and therefore that
 911 * user code runs with the correct debug control register even though
 912 * we clear it here.
 913 *
 914 * Being careful here means that we don't have to be as careful in a
 915 * lot of more complicated places (task switching can be a bit lazy
 916 * about restoring all the debug state, and ptrace doesn't have to
 917 * find every occurrence of the TF bit that could be saved away even
 918 * by user code)
 919 *
 920 * May run on IST stack.
 921 */
 922
 923static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
 924{
 925	/*
 926	 * Notifiers will clear bits in @dr6 to indicate the event has been
 927	 * consumed - hw_breakpoint_handler(), single_stop_cont().
 928	 *
 929	 * Notifiers will set bits in @virtual_dr6 to indicate the desire
 930	 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
 931	 */
 932	if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
 933		return true;
 934
 935	return false;
 936}
 937
 938static __always_inline void exc_debug_kernel(struct pt_regs *regs,
 939					     unsigned long dr6)
 940{
 941	/*
 942	 * Disable breakpoints during exception handling; recursive exceptions
 943	 * are exceedingly 'fun'.
 944	 *
 945	 * Since this function is NOKPROBE, and that also applies to
 946	 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
 947	 * HW_BREAKPOINT_W on our stack)
 
 
 948	 *
 949	 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
 950	 * includes the entry stack is excluded for everything.
 951	 */
 952	unsigned long dr7 = local_db_save();
 953	irqentry_state_t irq_state = irqentry_nmi_enter(regs);
 954	instrumentation_begin();
 
 955
 956	/*
 957	 * If something gets miswired and we end up here for a user mode
 958	 * #DB, we will malfunction.
 
 959	 */
 960	WARN_ON_ONCE(user_mode(regs));
 961
 962	if (test_thread_flag(TIF_BLOCKSTEP)) {
 
 
 
 
 963		/*
 964		 * The SDM says "The processor clears the BTF flag when it
 965		 * generates a debug exception." but PTRACE_BLOCKSTEP requested
 966		 * it for userspace, but we just took a kernel #DB, so re-set
 967		 * BTF.
 968		 */
 969		unsigned long debugctl;
 970
 971		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 972		debugctl |= DEBUGCTLMSR_BTF;
 973		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 974	}
 975
 976	/*
 977	 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
 978	 * watchpoint at the same time then that will still be handled.
 
 979	 */
 980	if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
 981		dr6 &= ~DR_STEP;
 982
 983	/*
 984	 * The kernel doesn't use INT1
 985	 */
 986	if (!dr6)
 987		goto out;
 988
 989	if (notify_debug(regs, &dr6))
 990		goto out;
 991
 992	/*
 993	 * The kernel doesn't use TF single-step outside of:
 994	 *
 995	 *  - Kprobes, consumed through kprobe_debug_handler()
 996	 *  - KGDB, consumed through notify_debug()
 997	 *
 998	 * So if we get here with DR_STEP set, something is wonky.
 999	 *
1000	 * A known way to trigger this is through QEMU's GDB stub,
1001	 * which leaks #DB into the guest and causes IST recursion.
1002	 */
1003	if (WARN_ON_ONCE(dr6 & DR_STEP))
1004		regs->flags &= ~X86_EFLAGS_TF;
1005out:
1006	instrumentation_end();
1007	irqentry_nmi_exit(regs, irq_state);
1008
1009	local_db_restore(dr7);
1010}
1011
1012static __always_inline void exc_debug_user(struct pt_regs *regs,
1013					   unsigned long dr6)
1014{
1015	bool icebp;
1016
1017	/*
1018	 * If something gets miswired and we end up here for a kernel mode
1019	 * #DB, we will malfunction.
1020	 */
1021	WARN_ON_ONCE(!user_mode(regs));
1022
1023	/*
1024	 * NB: We can't easily clear DR7 here because
1025	 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
1026	 * user memory, etc.  This means that a recursive #DB is possible.  If
1027	 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
1028	 * Since we're not on the IST stack right now, everything will be
1029	 * fine.
1030	 */
1031
1032	irqentry_enter_from_user_mode(regs);
1033	instrumentation_begin();
1034
1035	/*
1036	 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
1037	 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
1038	 *
1039	 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
1040	 * even if it is not the result of PTRACE_SINGLESTEP.
1041	 */
1042	current->thread.virtual_dr6 = (dr6 & DR_STEP);
1043
1044	/*
1045	 * The SDM says "The processor clears the BTF flag when it
1046	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
1047	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
1048	 */
1049	clear_thread_flag(TIF_BLOCKSTEP);
1050
1051	/*
1052	 * If dr6 has no reason to give us about the origin of this trap,
1053	 * then it's very likely the result of an icebp/int01 trap.
1054	 * User wants a sigtrap for that.
1055	 */
1056	icebp = !dr6;
1057
1058	if (notify_debug(regs, &dr6))
1059		goto out;
1060
1061	/* It's safe to allow irq's after DR6 has been saved */
1062	local_irq_enable();
 
1063
1064	if (v8086_mode(regs)) {
1065		handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1066		goto out_irq;
 
 
 
 
1067	}
1068
1069	/* #DB for bus lock can only be triggered from userspace. */
1070	if (dr6 & DR_BUS_LOCK)
1071		handle_bus_lock(regs);
1072
1073	/* Add the virtual_dr6 bits for signals. */
1074	dr6 |= current->thread.virtual_dr6;
1075	if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1076		send_sigtrap(regs, 0, get_si_code(dr6));
1077
1078out_irq:
1079	local_irq_disable();
1080out:
1081	instrumentation_end();
1082	irqentry_exit_to_user_mode(regs);
1083}
 
 
1084
1085#ifdef CONFIG_X86_64
1086/* IST stack entry */
1087DEFINE_IDTENTRY_DEBUG(exc_debug)
1088{
1089	exc_debug_kernel(regs, debug_read_clear_dr6());
1090}
1091
1092/* User entry, runs on regular task stack */
1093DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1094{
1095	exc_debug_user(regs, debug_read_clear_dr6());
1096}
1097#else
1098/* 32 bit does not have separate entry points. */
1099DEFINE_IDTENTRY_RAW(exc_debug)
1100{
1101	unsigned long dr6 = debug_read_clear_dr6();
1102
1103	if (user_mode(regs))
1104		exc_debug_user(regs, dr6);
1105	else
1106		exc_debug_kernel(regs, dr6);
1107}
1108#endif
1109
1110/*
1111 * Note that we play around with the 'TS' bit in an attempt to get
1112 * the correct behaviour even in the presence of the asynchronous
1113 * IRQ13 behaviour
1114 */
1115static void math_error(struct pt_regs *regs, int trapnr)
1116{
1117	struct task_struct *task = current;
1118	struct fpu *fpu = &task->thread.fpu;
1119	int si_code;
1120	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1121						"simd exception";
1122
 
 
1123	cond_local_irq_enable(regs);
1124
1125	if (!user_mode(regs)) {
1126		if (fixup_exception(regs, trapnr, 0, 0))
1127			goto exit;
1128
1129		task->thread.error_code = 0;
1130		task->thread.trap_nr = trapnr;
1131
1132		if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1133			       SIGFPE) != NOTIFY_STOP)
1134			die(str, regs, 0);
1135		goto exit;
1136	}
1137
1138	/*
1139	 * Synchronize the FPU register state to the memory register state
1140	 * if necessary. This allows the exception handler to inspect it.
1141	 */
1142	fpu_sync_fpstate(fpu);
1143
1144	task->thread.trap_nr	= trapnr;
1145	task->thread.error_code = 0;
 
 
 
 
 
1146
1147	si_code = fpu__exception_code(fpu, trapnr);
1148	/* Retry when we get spurious exceptions: */
1149	if (!si_code)
1150		goto exit;
1151
1152	if (fixup_vdso_exception(regs, trapnr, 0, 0))
1153		goto exit;
1154
1155	force_sig_fault(SIGFPE, si_code,
1156			(void __user *)uprobe_get_trap_addr(regs));
1157exit:
1158	cond_local_irq_disable(regs);
1159}
1160
1161DEFINE_IDTENTRY(exc_coprocessor_error)
1162{
1163	math_error(regs, X86_TRAP_MF);
 
1164}
1165
1166DEFINE_IDTENTRY(exc_simd_coprocessor_error)
 
1167{
1168	if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1169		/* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1170		if (!static_cpu_has(X86_FEATURE_XMM)) {
1171			__exc_general_protection(regs, 0);
1172			return;
1173		}
1174	}
1175	math_error(regs, X86_TRAP_XF);
1176}
1177
1178DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
 
1179{
1180	/*
1181	 * This addresses a Pentium Pro Erratum:
1182	 *
1183	 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1184	 * Virtual Wire mode implemented through the local APIC, an
1185	 * interrupt vector of 0Fh (Intel reserved encoding) may be
1186	 * generated by the local APIC (Int 15).  This vector may be
1187	 * generated upon receipt of a spurious interrupt (an interrupt
1188	 * which is removed before the system receives the INTA sequence)
1189	 * instead of the programmed 8259 spurious interrupt vector.
1190	 *
1191	 * IMPLICATION: The spurious interrupt vector programmed in the
1192	 * 8259 is normally handled by an operating system's spurious
1193	 * interrupt handler. However, a vector of 0Fh is unknown to some
1194	 * operating systems, which would crash if this erratum occurred.
1195	 *
1196	 * In theory this could be limited to 32bit, but the handler is not
1197	 * hurting and who knows which other CPUs suffer from this.
1198	 */
1199}
1200
1201static bool handle_xfd_event(struct pt_regs *regs)
 
1202{
1203	u64 xfd_err;
1204	int err;
1205
1206	if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
1207		return false;
1208
1209	rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
1210	if (!xfd_err)
1211		return false;
1212
1213	wrmsrl(MSR_IA32_XFD_ERR, 0);
1214
1215	/* Die if that happens in kernel space */
1216	if (WARN_ON(!user_mode(regs)))
1217		return false;
1218
1219	local_irq_enable();
1220
1221	err = xfd_enable_feature(xfd_err);
1222
1223	switch (err) {
1224	case -EPERM:
1225		force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs));
1226		break;
1227	case -EFAULT:
1228		force_sig(SIGSEGV);
1229		break;
1230	}
1231
1232	local_irq_disable();
1233	return true;
1234}
1235
1236DEFINE_IDTENTRY(exc_device_not_available)
1237{
1238	unsigned long cr0 = read_cr0();
1239
1240	if (handle_xfd_event(regs))
1241		return;
1242
1243#ifdef CONFIG_MATH_EMULATION
1244	if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1245		struct math_emu_info info = { };
1246
1247		cond_local_irq_enable(regs);
1248
1249		info.regs = regs;
1250		math_emulate(&info);
1251
1252		cond_local_irq_disable(regs);
1253		return;
1254	}
1255#endif
1256
1257	/* This should not happen. */
 
1258	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1259		/* Try to fix it up and carry on. */
1260		write_cr0(cr0 & ~X86_CR0_TS);
1261	} else {
1262		/*
1263		 * Something terrible happened, and we're better off trying
1264		 * to kill the task than getting stuck in a never-ending
1265		 * loop of #NM faults.
1266		 */
1267		die("unexpected #NM exception", regs, 0);
1268	}
1269}
 
1270
1271#ifdef CONFIG_INTEL_TDX_GUEST
 
 
 
1272
1273#define VE_FAULT_STR "VE fault"
 
1274
1275static void ve_raise_fault(struct pt_regs *regs, long error_code,
1276			   unsigned long address)
1277{
1278	if (user_mode(regs)) {
1279		gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
1280		return;
1281	}
1282
1283	if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
1284				    VE_FAULT_STR, address)) {
1285		return;
1286	}
1287
1288	die_addr(VE_FAULT_STR, regs, error_code, address);
1289}
 
1290
1291/*
1292 * Virtualization Exceptions (#VE) are delivered to TDX guests due to
1293 * specific guest actions which may happen in either user space or the
1294 * kernel:
1295 *
1296 *  * Specific instructions (WBINVD, for example)
1297 *  * Specific MSR accesses
1298 *  * Specific CPUID leaf accesses
1299 *  * Access to specific guest physical addresses
1300 *
1301 * In the settings that Linux will run in, virtualization exceptions are
1302 * never generated on accesses to normal, TD-private memory that has been
1303 * accepted (by BIOS or with tdx_enc_status_changed()).
1304 *
1305 * Syscall entry code has a critical window where the kernel stack is not
1306 * yet set up. Any exception in this window leads to hard to debug issues
1307 * and can be exploited for privilege escalation. Exceptions in the NMI
1308 * entry code also cause issues. Returning from the exception handler with
1309 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack.
1310 *
1311 * For these reasons, the kernel avoids #VEs during the syscall gap and
1312 * the NMI entry code. Entry code paths do not access TD-shared memory,
1313 * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves
1314 * that might generate #VE. VMM can remove memory from TD at any point,
1315 * but access to unaccepted (or missing) private memory leads to VM
1316 * termination, not to #VE.
1317 *
1318 * Similarly to page faults and breakpoints, #VEs are allowed in NMI
1319 * handlers once the kernel is ready to deal with nested NMIs.
1320 *
1321 * During #VE delivery, all interrupts, including NMIs, are blocked until
1322 * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads
1323 * the VE info.
1324 *
1325 * If a guest kernel action which would normally cause a #VE occurs in
1326 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault
1327 * exception) is delivered to the guest which will result in an oops.
1328 *
1329 * The entry code has been audited carefully for following these expectations.
1330 * Changes in the entry code have to be audited for correctness vs. this
1331 * aspect. Similarly to #PF, #VE in these places will expose kernel to
1332 * privilege escalation or may lead to random crashes.
1333 */
1334DEFINE_IDTENTRY(exc_virtualization_exception)
1335{
1336	struct ve_info ve;
1337
1338	/*
1339	 * NMIs/Machine-checks/Interrupts will be in a disabled state
1340	 * till TDGETVEINFO TDCALL is executed. This ensures that VE
1341	 * info cannot be overwritten by a nested #VE.
 
 
 
 
 
 
 
1342	 */
1343	tdx_get_ve_info(&ve);
 
 
 
 
 
 
 
1344
1345	cond_local_irq_enable(regs);
 
 
 
 
 
1346
1347	/*
1348	 * If tdx_handle_virt_exception() could not process
1349	 * it successfully, treat it as #GP(0) and handle it.
1350	 */
1351	if (!tdx_handle_virt_exception(regs, &ve))
1352		ve_raise_fault(regs, 0, ve.gla);
1353
1354	cond_local_irq_disable(regs);
1355}
1356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357#endif
1358
1359#ifdef CONFIG_X86_32
1360DEFINE_IDTENTRY_SW(iret_error)
1361{
1362	local_irq_enable();
1363	if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1364			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1365		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1366			ILL_BADSTK, (void __user *)NULL);
1367	}
1368	local_irq_disable();
1369}
1370#endif
1371
1372void __init trap_init(void)
1373{
1374	/* Init cpu_entry_area before IST entries are set up */
1375	setup_cpu_entry_areas();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376
1377	/* Init GHCB memory pages when running as an SEV-ES guest */
1378	sev_es_init_vc_handling();
1379
1380	/* Initialize TSS before setting up traps so ISTs work */
1381	cpu_init_exception_handling();
1382	/* Setup traps as cpu_init() might #GP */
1383	idt_setup_traps();
1384	cpu_init();
1385}