Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *	Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/kprobes.h>
  20#include <linux/uaccess.h>
  21#include <linux/kdebug.h>
  22#include <linux/kgdb.h>
  23#include <linux/kernel.h>
  24#include <linux/export.h>
  25#include <linux/ptrace.h>
  26#include <linux/uprobes.h>
  27#include <linux/string.h>
  28#include <linux/delay.h>
  29#include <linux/errno.h>
  30#include <linux/kexec.h>
  31#include <linux/sched.h>
 
  32#include <linux/timer.h>
  33#include <linux/init.h>
  34#include <linux/bug.h>
  35#include <linux/nmi.h>
  36#include <linux/mm.h>
  37#include <linux/smp.h>
  38#include <linux/io.h>
 
 
  39
  40#ifdef CONFIG_EISA
  41#include <linux/ioport.h>
  42#include <linux/eisa.h>
  43#endif
  44
  45#if defined(CONFIG_EDAC)
  46#include <linux/edac.h>
  47#endif
  48
  49#include <asm/kmemcheck.h>
  50#include <asm/stacktrace.h>
  51#include <asm/processor.h>
  52#include <asm/debugreg.h>
  53#include <linux/atomic.h>
  54#include <asm/text-patching.h>
  55#include <asm/ftrace.h>
  56#include <asm/traps.h>
  57#include <asm/desc.h>
  58#include <asm/fpu/internal.h>
 
 
  59#include <asm/mce.h>
  60#include <asm/fixmap.h>
  61#include <asm/mach_traps.h>
  62#include <asm/alternative.h>
  63#include <asm/fpu/xstate.h>
  64#include <asm/trace/mpx.h>
  65#include <asm/mpx.h>
  66#include <asm/vm86.h>
 
 
 
 
  67
  68#ifdef CONFIG_X86_64
  69#include <asm/x86_init.h>
  70#include <asm/pgalloc.h>
  71#include <asm/proto.h>
  72
  73/* No need to be aligned, but done to keep all IDTs defined the same way. */
  74gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
  75#else
  76#include <asm/processor-flags.h>
  77#include <asm/setup.h>
  78#include <asm/proto.h>
  79#endif
  80
  81/* Must be page-aligned because the real IDT is used in a fixmap. */
  82gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
  83
  84DECLARE_BITMAP(used_vectors, NR_VECTORS);
  85EXPORT_SYMBOL_GPL(used_vectors);
  86
  87static inline void cond_local_irq_enable(struct pt_regs *regs)
  88{
  89	if (regs->flags & X86_EFLAGS_IF)
  90		local_irq_enable();
  91}
  92
  93static inline void cond_local_irq_disable(struct pt_regs *regs)
  94{
  95	if (regs->flags & X86_EFLAGS_IF)
  96		local_irq_disable();
  97}
  98
  99/*
 100 * In IST context, we explicitly disable preemption.  This serves two
 101 * purposes: it makes it much less likely that we would accidentally
 102 * schedule in IST context and it will force a warning if we somehow
 103 * manage to schedule by accident.
 104 */
 105void ist_enter(struct pt_regs *regs)
 106{
 107	if (user_mode(regs)) {
 108		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 109	} else {
 110		/*
 111		 * We might have interrupted pretty much anything.  In
 112		 * fact, if we're a machine check, we can even interrupt
 113		 * NMI processing.  We don't want in_nmi() to return true,
 114		 * but we need to notify RCU.
 115		 */
 116		rcu_nmi_enter();
 117	}
 118
 119	preempt_disable();
 120
 121	/* This code is a bit fragile.  Test it. */
 122	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
 123}
 124
 125void ist_exit(struct pt_regs *regs)
 126{
 127	preempt_enable_no_resched();
 128
 129	if (!user_mode(regs))
 130		rcu_nmi_exit();
 131}
 132
 133/**
 134 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
 135 * @regs:	regs passed to the IST exception handler
 136 *
 137 * IST exception handlers normally cannot schedule.  As a special
 138 * exception, if the exception interrupted userspace code (i.e.
 139 * user_mode(regs) would return true) and the exception was not
 140 * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
 141 * begins a non-atomic section within an ist_enter()/ist_exit() region.
 142 * Callers are responsible for enabling interrupts themselves inside
 143 * the non-atomic section, and callers must call ist_end_non_atomic()
 144 * before ist_exit().
 145 */
 146void ist_begin_non_atomic(struct pt_regs *regs)
 147{
 148	BUG_ON(!user_mode(regs));
 
 149
 150	/*
 151	 * Sanity check: we need to be on the normal thread stack.  This
 152	 * will catch asm bugs and any attempt to use ist_preempt_enable
 153	 * from double_fault.
 154	 */
 155	BUG_ON((unsigned long)(current_top_of_stack() -
 156			       current_stack_pointer()) >= THREAD_SIZE);
 157
 158	preempt_enable_no_resched();
 159}
 160
 161/**
 162 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
 163 *
 164 * Ends a non-atomic section started with ist_begin_non_atomic().
 165 */
 166void ist_end_non_atomic(void)
 167{
 168	preempt_disable();
 169}
 170
 171static nokprobe_inline int
 172do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
 173		  struct pt_regs *regs,	long error_code)
 174{
 175	if (v8086_mode(regs)) {
 176		/*
 177		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 178		 * On nmi (interrupt 2), do_trap should not be called.
 179		 */
 180		if (trapnr < X86_TRAP_UD) {
 181			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 182						error_code, trapnr))
 183				return 0;
 184		}
 185		return -1;
 186	}
 
 187
 188	if (!user_mode(regs)) {
 189		if (!fixup_exception(regs, trapnr)) {
 190			tsk->thread.error_code = error_code;
 191			tsk->thread.trap_nr = trapnr;
 192			die(str, regs, error_code);
 193		}
 194		return 0;
 195	}
 196
 197	return -1;
 198}
 199
 200static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
 201				siginfo_t *info)
 202{
 203	unsigned long siaddr;
 204	int sicode;
 205
 206	switch (trapnr) {
 207	default:
 208		return SEND_SIG_PRIV;
 209
 210	case X86_TRAP_DE:
 211		sicode = FPE_INTDIV;
 212		siaddr = uprobe_get_trap_addr(regs);
 213		break;
 214	case X86_TRAP_UD:
 215		sicode = ILL_ILLOPN;
 216		siaddr = uprobe_get_trap_addr(regs);
 217		break;
 218	case X86_TRAP_AC:
 219		sicode = BUS_ADRALN;
 220		siaddr = 0;
 221		break;
 222	}
 223
 224	info->si_signo = signr;
 225	info->si_errno = 0;
 226	info->si_code = sicode;
 227	info->si_addr = (void __user *)siaddr;
 228	return info;
 229}
 230
 231static void
 232do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 233	long error_code, siginfo_t *info)
 234{
 235	struct task_struct *tsk = current;
 236
 237
 238	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 239		return;
 240	/*
 241	 * We want error_code and trap_nr set for userspace faults and
 242	 * kernelspace faults which result in die(), but not
 243	 * kernelspace faults which are fixed up.  die() gives the
 244	 * process no chance to handle the signal and notice the
 245	 * kernel fault information, so that won't result in polluting
 246	 * the information about previously queued, but not yet
 247	 * delivered, faults.  See also do_general_protection below.
 248	 */
 249	tsk->thread.error_code = error_code;
 250	tsk->thread.trap_nr = trapnr;
 251
 
 
 
 
 
 
 
 252	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 253	    printk_ratelimit()) {
 254		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
 255			tsk->comm, tsk->pid, str,
 256			regs->ip, regs->sp, error_code);
 257		print_vma_addr(" in ", regs->ip);
 258		pr_cont("\n");
 259	}
 
 260
 261	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 262}
 263NOKPROBE_SYMBOL(do_trap);
 264
 265static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 266			  unsigned long trapnr, int signr)
 267{
 268	siginfo_t info;
 269
 270	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 271
 272	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 273			NOTIFY_STOP) {
 274		cond_local_irq_enable(regs);
 275		do_trap(trapnr, signr, str, regs, error_code,
 276			fill_trap_info(regs, signr, trapnr, &info));
 277	}
 278}
 279
 280#define DO_ERROR(trapnr, signr, str, name)				\
 281dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
 282{									\
 283	do_error_trap(regs, error_code, str, trapnr, signr);		\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284}
 285
 286DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",		divide_error)
 287DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",			overflow)
 288DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op)
 289DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
 290DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",		invalid_TSS)
 291DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",	segment_not_present)
 292DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment)
 293DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294
 295#ifdef CONFIG_VMAP_STACK
 296__visible void __noreturn handle_stack_overflow(const char *message,
 297						struct pt_regs *regs,
 298						unsigned long fault_address)
 299{
 300	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
 301		 (void *)fault_address, current->stack,
 302		 (char *)current->stack + THREAD_SIZE - 1);
 303	die(message, regs, 0);
 304
 305	/* Be absolutely certain we don't return. */
 306	panic(message);
 307}
 308#endif
 309
 310#ifdef CONFIG_X86_64
 311/* Runs on IST stack */
 312dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313{
 314	static const char str[] = "double fault";
 315	struct task_struct *tsk = current;
 
 316#ifdef CONFIG_VMAP_STACK
 317	unsigned long cr2;
 318#endif
 319
 320#ifdef CONFIG_X86_ESPFIX64
 321	extern unsigned char native_irq_return_iret[];
 322
 323	/*
 324	 * If IRET takes a non-IST fault on the espfix64 stack, then we
 325	 * end up promoting it to a doublefault.  In that case, modify
 326	 * the stack to make it look like we just entered the #GP
 327	 * handler from user space, similar to bad_iret.
 
 
 
 
 
 
 328	 *
 329	 * No need for ist_enter here because we don't use RCU.
 330	 */
 331	if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
 332		regs->cs == __KERNEL_CS &&
 333		regs->ip == (unsigned long)native_irq_return_iret)
 334	{
 335		struct pt_regs *normal_regs = task_pt_regs(current);
 
 336
 337		/* Fake a #GP(0) from userspace. */
 338		memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
 339		normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
 340		regs->ip = (unsigned long)general_protection;
 341		regs->sp = (unsigned long)&normal_regs->orig_ax;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342
 343		return;
 344	}
 345#endif
 346
 347	ist_enter(regs);
 
 348	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 349
 350	tsk->thread.error_code = error_code;
 351	tsk->thread.trap_nr = X86_TRAP_DF;
 352
 353#ifdef CONFIG_VMAP_STACK
 354	/*
 355	 * If we overflow the stack into a guard page, the CPU will fail
 356	 * to deliver #PF and will send #DF instead.  Similarly, if we
 357	 * take any non-IST exception while too close to the bottom of
 358	 * the stack, the processor will get a page fault while
 359	 * delivering the exception and will generate a double fault.
 360	 *
 361	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
 362	 * Page-Fault Exception (#PF):
 363	 *
 364	 *   Processors update CR2 whenever a page fault is detected. If a
 365	 *   second page fault occurs while an earlier page fault is being
 366	 *   deliv- ered, the faulting linear address of the second fault will
 367	 *   overwrite the contents of CR2 (replacing the previous
 368	 *   address). These updates to CR2 occur even if the page fault
 369	 *   results in a double fault or occurs during the delivery of a
 370	 *   double fault.
 371	 *
 372	 * The logic below has a small possibility of incorrectly diagnosing
 373	 * some errors as stack overflows.  For example, if the IDT or GDT
 374	 * gets corrupted such that #GP delivery fails due to a bad descriptor
 375	 * causing #GP and we hit this condition while CR2 coincidentally
 376	 * points to the stack guard page, we'll think we overflowed the
 377	 * stack.  Given that we're going to panic one way or another
 378	 * if this happens, this isn't necessarily worth fixing.
 379	 *
 380	 * If necessary, we could improve the test by only diagnosing
 381	 * a stack overflow if the saved RSP points within 47 bytes of
 382	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
 383	 * take an exception, the stack is already aligned and there
 384	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
 385	 * possible error code, so a stack overflow would *not* double
 386	 * fault.  With any less space left, exception delivery could
 387	 * fail, and, as a practical matter, we've overflowed the
 388	 * stack even if the actual trigger for the double fault was
 389	 * something else.
 390	 */
 391	cr2 = read_cr2();
 392	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
 393		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
 
 394#endif
 395
 396#ifdef CONFIG_DOUBLEFAULT
 397	df_debug(regs, error_code);
 398#endif
 399	/*
 400	 * This is always a kernel trap and never fixable (and thus must
 401	 * never return).
 402	 */
 403	for (;;)
 404		die(str, regs, error_code);
 405}
 406#endif
 407
 408dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 409{
 410	const struct mpx_bndcsr *bndcsr;
 411	siginfo_t *info;
 412
 413	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 414	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
 415			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 416		return;
 417	cond_local_irq_enable(regs);
 418
 419	if (!user_mode(regs))
 420		die("bounds", regs, error_code);
 421
 422	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
 423		/* The exception is not from Intel MPX */
 424		goto exit_trap;
 425	}
 426
 427	/*
 428	 * We need to look at BNDSTATUS to resolve this exception.
 429	 * A NULL here might mean that it is in its 'init state',
 430	 * which is all zeros which indicates MPX was not
 431	 * responsible for the exception.
 432	 */
 433	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
 434	if (!bndcsr)
 435		goto exit_trap;
 436
 437	trace_bounds_exception_mpx(bndcsr);
 438	/*
 439	 * The error code field of the BNDSTATUS register communicates status
 440	 * information of a bound range exception #BR or operation involving
 441	 * bound directory.
 442	 */
 443	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
 444	case 2:	/* Bound directory has invalid entry. */
 445		if (mpx_handle_bd_fault())
 446			goto exit_trap;
 447		break; /* Success, it was handled */
 448	case 1: /* Bound violation. */
 449		info = mpx_generate_siginfo(regs);
 450		if (IS_ERR(info)) {
 451			/*
 452			 * We failed to decode the MPX instruction.  Act as if
 453			 * the exception was not caused by MPX.
 454			 */
 455			goto exit_trap;
 456		}
 457		/*
 458		 * Success, we decoded the instruction and retrieved
 459		 * an 'info' containing the address being accessed
 460		 * which caused the exception.  This information
 461		 * allows and application to possibly handle the
 462		 * #BR exception itself.
 463		 */
 464		do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
 465		kfree(info);
 466		break;
 467	case 0: /* No exception caused by Intel MPX operations. */
 468		goto exit_trap;
 469	default:
 470		die("bounds", regs, error_code);
 471	}
 472
 473	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474
 475exit_trap:
 476	/*
 477	 * This path out is for all the cases where we could not
 478	 * handle the exception in some way (like allocating a
 479	 * table or telling userspace about it.  We will also end
 480	 * up here if the kernel has MPX turned off at compile
 481	 * time..
 482	 */
 483	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
 
 
 
 
 
 484}
 485
 486dotraplinkage void
 487do_general_protection(struct pt_regs *regs, long error_code)
 
 488{
 
 
 489	struct task_struct *tsk;
 
 
 490
 491	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 492	cond_local_irq_enable(regs);
 493
 
 
 
 
 
 494	if (v8086_mode(regs)) {
 495		local_irq_enable();
 496		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 
 497		return;
 498	}
 499
 500	tsk = current;
 501	if (!user_mode(regs)) {
 502		if (fixup_exception(regs, X86_TRAP_GP))
 503			return;
 504
 
 505		tsk->thread.error_code = error_code;
 506		tsk->thread.trap_nr = X86_TRAP_GP;
 507		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 508			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
 509			die("general protection fault", regs, error_code);
 510		return;
 
 
 
 511	}
 512
 
 
 
 513	tsk->thread.error_code = error_code;
 514	tsk->thread.trap_nr = X86_TRAP_GP;
 515
 516	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
 517			printk_ratelimit()) {
 518		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
 519			tsk->comm, task_pid_nr(tsk),
 520			regs->ip, regs->sp, error_code);
 521		print_vma_addr(" in ", regs->ip);
 522		pr_cont("\n");
 523	}
 524
 525	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
 526}
 527NOKPROBE_SYMBOL(do_general_protection);
 
 
 
 
 
 
 
 
 
 
 
 528
 529/* May run on IST stack. */
 530dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 531{
 532#ifdef CONFIG_DYNAMIC_FTRACE
 533	/*
 534	 * ftrace must be first, everything else may cause a recursive crash.
 535	 * See note by declaration of modifying_ftrace_code in ftrace.c
 536	 */
 537	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
 538	    ftrace_int3_handler(regs))
 539		return;
 540#endif
 541	if (poke_int3_handler(regs))
 542		return;
 
 
 
 
 
 
 543
 544	ist_enter(regs);
 545	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 546#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 547	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 548				SIGTRAP) == NOTIFY_STOP)
 549		goto exit;
 550#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 551
 552#ifdef CONFIG_KPROBES
 553	if (kprobe_int3_handler(regs))
 554		goto exit;
 555#endif
 
 556
 557	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 558			SIGTRAP) == NOTIFY_STOP)
 559		goto exit;
 
 
 
 
 560
 561	/*
 562	 * Let others (NMI) know that the debug stack is in use
 563	 * as we may switch to the interrupt stack.
 564	 */
 565	debug_stack_usage_inc();
 566	preempt_disable();
 567	cond_local_irq_enable(regs);
 568	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
 569	cond_local_irq_disable(regs);
 570	preempt_enable_no_resched();
 571	debug_stack_usage_dec();
 572exit:
 573	ist_exit(regs);
 574}
 575NOKPROBE_SYMBOL(do_int3);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576
 577#ifdef CONFIG_X86_64
 578/*
 579 * Help handler running on IST stack to switch off the IST stack if the
 580 * interrupted code was in user mode. The actual stack switch is done in
 581 * entry_64.S
 582 */
 583asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 584{
 585	struct pt_regs *regs = task_pt_regs(current);
 586	*regs = *eregs;
 
 587	return regs;
 588}
 589NOKPROBE_SYMBOL(sync_regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590
 591struct bad_iret_stack {
 592	void *error_entry_ret;
 593	struct pt_regs regs;
 594};
 595
 596asmlinkage __visible notrace
 597struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 598{
 599	/*
 600	 * This is called from entry_64.S early in handling a fault
 601	 * caused by a bad iret to user mode.  To handle the fault
 602	 * correctly, we want move our stack frame to task_pt_regs
 603	 * and we want to pretend that the exception came from the
 604	 * iret target.
 605	 */
 606	struct bad_iret_stack *new_stack =
 607		container_of(task_pt_regs(current),
 608			     struct bad_iret_stack, regs);
 609
 610	/* Copy the IRET target to the new stack. */
 611	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
 612
 613	/* Copy the remainder of the stack from the current stack. */
 614	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
 
 
 
 615
 616	BUG_ON(!user_mode(&new_stack->regs));
 617	return new_stack;
 618}
 619NOKPROBE_SYMBOL(fixup_bad_iret);
 620#endif
 621
 622static bool is_sysenter_singlestep(struct pt_regs *regs)
 623{
 624	/*
 625	 * We don't try for precision here.  If we're anywhere in the region of
 626	 * code that can be single-stepped in the SYSENTER entry path, then
 627	 * assume that this is a useless single-step trap due to SYSENTER
 628	 * being invoked with TF set.  (We don't know in advance exactly
 629	 * which instructions will be hit because BTF could plausibly
 630	 * be set.)
 631	 */
 632#ifdef CONFIG_X86_32
 633	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
 634		(unsigned long)__end_SYSENTER_singlestep_region -
 635		(unsigned long)__begin_SYSENTER_singlestep_region;
 636#elif defined(CONFIG_IA32_EMULATION)
 637	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
 638		(unsigned long)__end_entry_SYSENTER_compat -
 639		(unsigned long)entry_SYSENTER_compat;
 640#else
 641	return false;
 642#endif
 643}
 644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 645/*
 646 * Our handling of the processor debug registers is non-trivial.
 647 * We do not clear them on entry and exit from the kernel. Therefore
 648 * it is possible to get a watchpoint trap here from inside the kernel.
 649 * However, the code in ./ptrace.c has ensured that the user can
 650 * only set watchpoints on userspace addresses. Therefore the in-kernel
 651 * watchpoint trap can only occur in code which is reading/writing
 652 * from user space. Such code must not hold kernel locks (since it
 653 * can equally take a page fault), therefore it is safe to call
 654 * force_sig_info even though that claims and releases locks.
 655 *
 656 * Code in ./signal.c ensures that the debug control register
 657 * is restored before we deliver any signal, and therefore that
 658 * user code runs with the correct debug control register even though
 659 * we clear it here.
 660 *
 661 * Being careful here means that we don't have to be as careful in a
 662 * lot of more complicated places (task switching can be a bit lazy
 663 * about restoring all the debug state, and ptrace doesn't have to
 664 * find every occurrence of the TF bit that could be saved away even
 665 * by user code)
 666 *
 667 * May run on IST stack.
 668 */
 669dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
 
 670{
 671	struct task_struct *tsk = current;
 672	int user_icebp = 0;
 673	unsigned long dr6;
 674	int si_code;
 
 
 
 
 
 675
 676	ist_enter(regs);
 
 677
 678	get_debugreg(dr6, 6);
 
 
 679	/*
 680	 * The Intel SDM says:
 
 681	 *
 682	 *   Certain debug exceptions may clear bits 0-3. The remaining
 683	 *   contents of the DR6 register are never cleared by the
 684	 *   processor. To avoid confusion in identifying debug
 685	 *   exceptions, debug handlers should clear the register before
 686	 *   returning to the interrupted task.
 687	 *
 688	 * Keep it simple: clear DR6 immediately.
 
 689	 */
 690	set_debugreg(0, 6);
 691
 692	/* Filter out all the reserved bits which are preset to 1 */
 693	dr6 &= ~DR6_RESERVED;
 694
 695	/*
 696	 * The SDM says "The processor clears the BTF flag when it
 697	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
 698	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
 699	 */
 700	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
 701
 702	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
 703		     is_sysenter_singlestep(regs))) {
 704		dr6 &= ~DR_STEP;
 705		if (!dr6)
 706			goto exit;
 707		/*
 708		 * else we might have gotten a single-step trap and hit a
 709		 * watchpoint at the same time, in which case we should fall
 710		 * through and handle the watchpoint.
 
 711		 */
 
 
 
 
 
 712	}
 713
 714	/*
 715	 * If dr6 has no reason to give us about the origin of this trap,
 716	 * then it's very likely the result of an icebp/int01 trap.
 717	 * User wants a sigtrap for that.
 718	 */
 719	if (!dr6 && user_mode(regs))
 720		user_icebp = 1;
 721
 722	/* Catch kmemcheck conditions! */
 723	if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
 724		goto exit;
 
 
 725
 726	/* Store the virtualized DR6 value */
 727	tsk->thread.debugreg6 = dr6;
 728
 729#ifdef CONFIG_KPROBES
 730	if (kprobe_debug_handler(regs))
 731		goto exit;
 732#endif
 
 
 
 
 
 
 
 
 
 
 
 
 733
 734	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
 735							SIGTRAP) == NOTIFY_STOP)
 736		goto exit;
 
 
 
 
 
 
 
 
 
 
 737
 738	/*
 739	 * Let others (NMI) know that the debug stack is in use
 740	 * as we may switch to the interrupt stack.
 
 
 
 
 741	 */
 742	debug_stack_usage_inc();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743
 744	/* It's safe to allow irq's after DR6 has been saved */
 745	preempt_disable();
 746	cond_local_irq_enable(regs);
 747
 748	if (v8086_mode(regs)) {
 749		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
 750					X86_TRAP_DB);
 751		cond_local_irq_disable(regs);
 752		preempt_enable_no_resched();
 753		debug_stack_usage_dec();
 754		goto exit;
 755	}
 756
 757	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
 758		/*
 759		 * Historical junk that used to handle SYSENTER single-stepping.
 760		 * This should be unreachable now.  If we survive for a while
 761		 * without anyone hitting this warning, we'll turn this into
 762		 * an oops.
 763		 */
 764		tsk->thread.debugreg6 &= ~DR_STEP;
 765		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
 766		regs->flags &= ~X86_EFLAGS_TF;
 767	}
 768	si_code = get_si_code(tsk->thread.debugreg6);
 769	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
 770		send_sigtrap(tsk, regs, error_code, si_code);
 771	cond_local_irq_disable(regs);
 772	preempt_enable_no_resched();
 773	debug_stack_usage_dec();
 774
 775exit:
 776#if defined(CONFIG_X86_32)
 777	/*
 778	 * This is the most likely code path that involves non-trivial use
 779	 * of the SYSENTER stack.  Check that we haven't overrun it.
 780	 */
 781	WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
 782	     "Overran or corrupted SYSENTER stack\n");
 783#endif
 784	ist_exit(regs);
 
 
 
 
 
 
 
 
 
 
 
 
 785}
 786NOKPROBE_SYMBOL(do_debug);
 787
 788/*
 789 * Note that we play around with the 'TS' bit in an attempt to get
 790 * the correct behaviour even in the presence of the asynchronous
 791 * IRQ13 behaviour
 792 */
 793static void math_error(struct pt_regs *regs, int error_code, int trapnr)
 794{
 795	struct task_struct *task = current;
 796	struct fpu *fpu = &task->thread.fpu;
 797	siginfo_t info;
 798	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
 799						"simd exception";
 800
 801	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
 802		return;
 803	cond_local_irq_enable(regs);
 804
 805	if (!user_mode(regs)) {
 806		if (!fixup_exception(regs, trapnr)) {
 807			task->thread.error_code = error_code;
 808			task->thread.trap_nr = trapnr;
 809			die(str, regs, error_code);
 810		}
 811		return;
 
 
 
 
 812	}
 813
 814	/*
 815	 * Save the info for the exception handler and clear the error.
 
 816	 */
 817	fpu__save(fpu);
 818
 819	task->thread.trap_nr	= trapnr;
 820	task->thread.error_code = error_code;
 821	info.si_signo		= SIGFPE;
 822	info.si_errno		= 0;
 823	info.si_addr		= (void __user *)uprobe_get_trap_addr(regs);
 824
 825	info.si_code = fpu__exception_code(fpu, trapnr);
 826
 
 827	/* Retry when we get spurious exceptions: */
 828	if (!info.si_code)
 829		return;
 830
 831	force_sig_info(SIGFPE, &info, task);
 
 
 
 
 
 
 832}
 833
 834dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
 835{
 836	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 837	math_error(regs, error_code, X86_TRAP_MF);
 838}
 839
 840dotraplinkage void
 841do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 842{
 843	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 844	math_error(regs, error_code, X86_TRAP_XF);
 
 
 
 
 
 
 845}
 846
 847dotraplinkage void
 848do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 849{
 850	cond_local_irq_enable(regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 851}
 852
 853dotraplinkage void
 854do_device_not_available(struct pt_regs *regs, long error_code)
 855{
 856	unsigned long cr0;
 857
 858	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 859
 860#ifdef CONFIG_MATH_EMULATION
 861	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
 862		struct math_emu_info info = { };
 863
 864		cond_local_irq_enable(regs);
 865
 866		info.regs = regs;
 867		math_emulate(&info);
 
 
 868		return;
 869	}
 870#endif
 871
 872	/* This should not happen. */
 873	cr0 = read_cr0();
 874	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
 875		/* Try to fix it up and carry on. */
 876		write_cr0(cr0 & ~X86_CR0_TS);
 877	} else {
 878		/*
 879		 * Something terrible happened, and we're better off trying
 880		 * to kill the task than getting stuck in a never-ending
 881		 * loop of #NM faults.
 882		 */
 883		die("unexpected #NM exception", regs, error_code);
 884	}
 885}
 886NOKPROBE_SYMBOL(do_device_not_available);
 887
 888#ifdef CONFIG_X86_32
 889dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 890{
 891	siginfo_t info;
 892
 893	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 894	local_irq_enable();
 895
 896	info.si_signo = SIGILL;
 897	info.si_errno = 0;
 898	info.si_code = ILL_BADSTK;
 899	info.si_addr = NULL;
 900	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
 901			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
 902		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
 903			&info);
 904	}
 
 905}
 906#endif
 907
 908/* Set of traps needed for early debugging. */
 909void __init early_trap_init(void)
 910{
 911	/*
 912	 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
 913	 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
 914	 * CPU runs at ring 0 so it is impossible to hit an invalid
 915	 * stack.  Using the original stack works well enough at this
 916	 * early stage. DEBUG_STACK will be equipped after cpu_init() in
 917	 * trap_init().
 918	 *
 919	 * We don't need to set trace_idt_table like set_intr_gate(),
 920	 * since we don't have trace_debug and it will be reset to
 921	 * 'debug' in trap_init() by set_intr_gate_ist().
 922	 */
 923	set_intr_gate_notrace(X86_TRAP_DB, debug);
 924	/* int3 can be called from all */
 925	set_system_intr_gate(X86_TRAP_BP, &int3);
 926#ifdef CONFIG_X86_32
 927	set_intr_gate(X86_TRAP_PF, page_fault);
 928#endif
 929	load_idt(&idt_descr);
 930}
 931
 932void __init early_trap_pf_init(void)
 933{
 934#ifdef CONFIG_X86_64
 935	set_intr_gate(X86_TRAP_PF, page_fault);
 936#endif
 937}
 938
 939void __init trap_init(void)
 940{
 941	int i;
 942
 943#ifdef CONFIG_EISA
 944	void __iomem *p = early_ioremap(0x0FFFD9, 4);
 945
 946	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
 947		EISA_bus = 1;
 948	early_iounmap(p, 4);
 949#endif
 950
 951	set_intr_gate(X86_TRAP_DE, divide_error);
 952	set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
 953	/* int4 can be called from all */
 954	set_system_intr_gate(X86_TRAP_OF, &overflow);
 955	set_intr_gate(X86_TRAP_BR, bounds);
 956	set_intr_gate(X86_TRAP_UD, invalid_op);
 957	set_intr_gate(X86_TRAP_NM, device_not_available);
 958#ifdef CONFIG_X86_32
 959	set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
 960#else
 961	set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
 962#endif
 963	set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
 964	set_intr_gate(X86_TRAP_TS, invalid_TSS);
 965	set_intr_gate(X86_TRAP_NP, segment_not_present);
 966	set_intr_gate(X86_TRAP_SS, stack_segment);
 967	set_intr_gate(X86_TRAP_GP, general_protection);
 968	set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
 969	set_intr_gate(X86_TRAP_MF, coprocessor_error);
 970	set_intr_gate(X86_TRAP_AC, alignment_check);
 971#ifdef CONFIG_X86_MCE
 972	set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
 973#endif
 974	set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
 975
 976	/* Reserve all the builtin and the syscall vector: */
 977	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
 978		set_bit(i, used_vectors);
 979
 980#ifdef CONFIG_IA32_EMULATION
 981	set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
 982	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 983#endif
 984
 985#ifdef CONFIG_X86_32
 986	set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
 987	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 988#endif
 989
 990	/*
 991	 * Set the IDT descriptor to a fixed read-only location, so that the
 992	 * "sidt" instruction will not leak the location of the kernel, and
 993	 * to defend the IDT against arbitrary memory write vulnerabilities.
 994	 * It will be reloaded in cpu_init() */
 995	__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
 996	idt_descr.address = fix_to_virt(FIX_RO_IDT);
 997
 998	/*
 999	 * Should be a barrier for any external CPU state:
1000	 */
1001	cpu_init();
1002
1003	/*
1004	 * X86_TRAP_DB and X86_TRAP_BP have been set
1005	 * in early_trap_init(). However, ITS works only after
1006	 * cpu_init() loads TSS. See comments in early_trap_init().
1007	 */
1008	set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
1009	/* int3 can be called from all */
1010	set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
1011
1012	x86_init.irqs.trap_init();
1013
1014#ifdef CONFIG_X86_64
1015	memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
1016	set_nmi_gate(X86_TRAP_DB, &debug);
1017	set_nmi_gate(X86_TRAP_BP, &int3);
1018#endif
1019}
v5.14.15
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *	Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/kprobes.h>
  20#include <linux/uaccess.h>
  21#include <linux/kdebug.h>
  22#include <linux/kgdb.h>
  23#include <linux/kernel.h>
  24#include <linux/export.h>
  25#include <linux/ptrace.h>
  26#include <linux/uprobes.h>
  27#include <linux/string.h>
  28#include <linux/delay.h>
  29#include <linux/errno.h>
  30#include <linux/kexec.h>
  31#include <linux/sched.h>
  32#include <linux/sched/task_stack.h>
  33#include <linux/timer.h>
  34#include <linux/init.h>
  35#include <linux/bug.h>
  36#include <linux/nmi.h>
  37#include <linux/mm.h>
  38#include <linux/smp.h>
  39#include <linux/io.h>
  40#include <linux/hardirq.h>
  41#include <linux/atomic.h>
  42
 
 
 
 
 
 
 
 
 
 
  43#include <asm/stacktrace.h>
  44#include <asm/processor.h>
  45#include <asm/debugreg.h>
  46#include <asm/realmode.h>
  47#include <asm/text-patching.h>
  48#include <asm/ftrace.h>
  49#include <asm/traps.h>
  50#include <asm/desc.h>
  51#include <asm/fpu/internal.h>
  52#include <asm/cpu.h>
  53#include <asm/cpu_entry_area.h>
  54#include <asm/mce.h>
  55#include <asm/fixmap.h>
  56#include <asm/mach_traps.h>
  57#include <asm/alternative.h>
  58#include <asm/fpu/xstate.h>
 
 
  59#include <asm/vm86.h>
  60#include <asm/umip.h>
  61#include <asm/insn.h>
  62#include <asm/insn-eval.h>
  63#include <asm/vdso.h>
  64
  65#ifdef CONFIG_X86_64
  66#include <asm/x86_init.h>
 
  67#include <asm/proto.h>
 
 
 
  68#else
  69#include <asm/processor-flags.h>
  70#include <asm/setup.h>
  71#include <asm/proto.h>
  72#endif
  73
  74DECLARE_BITMAP(system_vectors, NR_VECTORS);
 
 
 
 
  75
  76static inline void cond_local_irq_enable(struct pt_regs *regs)
  77{
  78	if (regs->flags & X86_EFLAGS_IF)
  79		local_irq_enable();
  80}
  81
  82static inline void cond_local_irq_disable(struct pt_regs *regs)
  83{
  84	if (regs->flags & X86_EFLAGS_IF)
  85		local_irq_disable();
  86}
  87
  88__always_inline int is_valid_bugaddr(unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89{
  90	if (addr < TASK_SIZE_MAX)
  91		return 0;
  92
  93	/*
  94	 * We got #UD, if the text isn't readable we'd have gotten
  95	 * a different exception.
 
  96	 */
  97	return *(unsigned short *)addr == INSN_UD2;
 
 
 
 
 
 
 
 
 
 
 
 
 
  98}
  99
 100static nokprobe_inline int
 101do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
 102		  struct pt_regs *regs,	long error_code)
 103{
 104	if (v8086_mode(regs)) {
 105		/*
 106		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 107		 * On nmi (interrupt 2), do_trap should not be called.
 108		 */
 109		if (trapnr < X86_TRAP_UD) {
 110			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 111						error_code, trapnr))
 112				return 0;
 113		}
 114	} else if (!user_mode(regs)) {
 115		if (fixup_exception(regs, trapnr, error_code, 0))
 116			return 0;
 117
 118		tsk->thread.error_code = error_code;
 119		tsk->thread.trap_nr = trapnr;
 120		die(str, regs, error_code);
 121	} else {
 122		if (fixup_vdso_exception(regs, trapnr, error_code, 0))
 123			return 0;
 
 124	}
 125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126	/*
 127	 * We want error_code and trap_nr set for userspace faults and
 128	 * kernelspace faults which result in die(), but not
 129	 * kernelspace faults which are fixed up.  die() gives the
 130	 * process no chance to handle the signal and notice the
 131	 * kernel fault information, so that won't result in polluting
 132	 * the information about previously queued, but not yet
 133	 * delivered, faults.  See also exc_general_protection below.
 134	 */
 135	tsk->thread.error_code = error_code;
 136	tsk->thread.trap_nr = trapnr;
 137
 138	return -1;
 139}
 140
 141static void show_signal(struct task_struct *tsk, int signr,
 142			const char *type, const char *desc,
 143			struct pt_regs *regs, long error_code)
 144{
 145	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 146	    printk_ratelimit()) {
 147		pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
 148			tsk->comm, task_pid_nr(tsk), type, desc,
 149			regs->ip, regs->sp, error_code);
 150		print_vma_addr(KERN_CONT " in ", regs->ip);
 151		pr_cont("\n");
 152	}
 153}
 154
 155static void
 156do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 157	long error_code, int sicode, void __user *addr)
 158{
 159	struct task_struct *tsk = current;
 160
 161	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 162		return;
 163
 164	show_signal(tsk, signr, "trap ", str, regs, error_code);
 165
 166	if (!sicode)
 167		force_sig(signr);
 168	else
 169		force_sig_fault(signr, sicode, addr);
 170}
 171NOKPROBE_SYMBOL(do_trap);
 172
 173static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 174	unsigned long trapnr, int signr, int sicode, void __user *addr)
 175{
 
 
 176	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 177
 178	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 179			NOTIFY_STOP) {
 180		cond_local_irq_enable(regs);
 181		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
 182		cond_local_irq_disable(regs);
 183	}
 184}
 185
 186/*
 187 * Posix requires to provide the address of the faulting instruction for
 188 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
 189 *
 190 * This address is usually regs->ip, but when an uprobe moved the code out
 191 * of line then regs->ip points to the XOL code which would confuse
 192 * anything which analyzes the fault address vs. the unmodified binary. If
 193 * a trap happened in XOL code then uprobe maps regs->ip back to the
 194 * original instruction address.
 195 */
 196static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
 197{
 198	return (void __user *)uprobe_get_trap_addr(regs);
 199}
 200
 201DEFINE_IDTENTRY(exc_divide_error)
 202{
 203	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
 204		      FPE_INTDIV, error_get_trap_addr(regs));
 205}
 206
 207DEFINE_IDTENTRY(exc_overflow)
 208{
 209	do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
 210}
 211
 212#ifdef CONFIG_X86_F00F_BUG
 213void handle_invalid_op(struct pt_regs *regs)
 214#else
 215static inline void handle_invalid_op(struct pt_regs *regs)
 216#endif
 217{
 218	do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
 219		      ILL_ILLOPN, error_get_trap_addr(regs));
 220}
 221
 222static noinstr bool handle_bug(struct pt_regs *regs)
 223{
 224	bool handled = false;
 225
 226	if (!is_valid_bugaddr(regs->ip))
 227		return handled;
 228
 229	/*
 230	 * All lies, just get the WARN/BUG out.
 231	 */
 232	instrumentation_begin();
 233	/*
 234	 * Since we're emulating a CALL with exceptions, restore the interrupt
 235	 * state to what it was at the exception site.
 236	 */
 237	if (regs->flags & X86_EFLAGS_IF)
 238		raw_local_irq_enable();
 239	if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
 240		regs->ip += LEN_UD2;
 241		handled = true;
 242	}
 243	if (regs->flags & X86_EFLAGS_IF)
 244		raw_local_irq_disable();
 245	instrumentation_end();
 246
 247	return handled;
 248}
 249
 250DEFINE_IDTENTRY_RAW(exc_invalid_op)
 251{
 252	irqentry_state_t state;
 253
 254	/*
 255	 * We use UD2 as a short encoding for 'CALL __WARN', as such
 256	 * handle it before exception entry to avoid recursive WARN
 257	 * in case exception entry is the one triggering WARNs.
 258	 */
 259	if (!user_mode(regs) && handle_bug(regs))
 260		return;
 261
 262	state = irqentry_enter(regs);
 263	instrumentation_begin();
 264	handle_invalid_op(regs);
 265	instrumentation_end();
 266	irqentry_exit(regs, state);
 267}
 268
 269DEFINE_IDTENTRY(exc_coproc_segment_overrun)
 270{
 271	do_error_trap(regs, 0, "coprocessor segment overrun",
 272		      X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
 273}
 274
 275DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
 276{
 277	do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
 278		      0, NULL);
 279}
 280
 281DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
 282{
 283	do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
 284		      SIGBUS, 0, NULL);
 285}
 286
 287DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
 288{
 289	do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
 290		      0, NULL);
 291}
 292
 293DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
 294{
 295	char *str = "alignment check";
 296
 297	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
 298		return;
 299
 300	if (!user_mode(regs))
 301		die("Split lock detected\n", regs, error_code);
 302
 303	local_irq_enable();
 304
 305	if (handle_user_split_lock(regs, error_code))
 306		goto out;
 307
 308	do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
 309		error_code, BUS_ADRALN, NULL);
 310
 311out:
 312	local_irq_disable();
 313}
 314
 315#ifdef CONFIG_VMAP_STACK
 316__visible void __noreturn handle_stack_overflow(const char *message,
 317						struct pt_regs *regs,
 318						unsigned long fault_address)
 319{
 320	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
 321		 (void *)fault_address, current->stack,
 322		 (char *)current->stack + THREAD_SIZE - 1);
 323	die(message, regs, 0);
 324
 325	/* Be absolutely certain we don't return. */
 326	panic("%s", message);
 327}
 328#endif
 329
 330/*
 331 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
 332 *
 333 * On x86_64, this is more or less a normal kernel entry.  Notwithstanding the
 334 * SDM's warnings about double faults being unrecoverable, returning works as
 335 * expected.  Presumably what the SDM actually means is that the CPU may get
 336 * the register state wrong on entry, so returning could be a bad idea.
 337 *
 338 * Various CPU engineers have promised that double faults due to an IRET fault
 339 * while the stack is read-only are, in fact, recoverable.
 340 *
 341 * On x86_32, this is entered through a task gate, and regs are synthesized
 342 * from the TSS.  Returning is, in principle, okay, but changes to regs will
 343 * be lost.  If, for some reason, we need to return to a context with modified
 344 * regs, the shim code could be adjusted to synchronize the registers.
 345 *
 346 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
 347 * to be read before doing anything else.
 348 */
 349DEFINE_IDTENTRY_DF(exc_double_fault)
 350{
 351	static const char str[] = "double fault";
 352	struct task_struct *tsk = current;
 353
 354#ifdef CONFIG_VMAP_STACK
 355	unsigned long address = read_cr2();
 356#endif
 357
 358#ifdef CONFIG_X86_ESPFIX64
 359	extern unsigned char native_irq_return_iret[];
 360
 361	/*
 362	 * If IRET takes a non-IST fault on the espfix64 stack, then we
 363	 * end up promoting it to a doublefault.  In that case, take
 364	 * advantage of the fact that we're not using the normal (TSS.sp0)
 365	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
 366	 * and then modify our own IRET frame so that, when we return,
 367	 * we land directly at the #GP(0) vector with the stack already
 368	 * set up according to its expectations.
 369	 *
 370	 * The net result is that our #GP handler will think that we
 371	 * entered from usermode with the bad user context.
 372	 *
 373	 * No need for nmi_enter() here because we don't use RCU.
 374	 */
 375	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
 376		regs->cs == __KERNEL_CS &&
 377		regs->ip == (unsigned long)native_irq_return_iret)
 378	{
 379		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 380		unsigned long *p = (unsigned long *)regs->sp;
 381
 382		/*
 383		 * regs->sp points to the failing IRET frame on the
 384		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
 385		 * in gpregs->ss through gpregs->ip.
 386		 *
 387		 */
 388		gpregs->ip	= p[0];
 389		gpregs->cs	= p[1];
 390		gpregs->flags	= p[2];
 391		gpregs->sp	= p[3];
 392		gpregs->ss	= p[4];
 393		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
 394
 395		/*
 396		 * Adjust our frame so that we return straight to the #GP
 397		 * vector with the expected RSP value.  This is safe because
 398		 * we won't enable interrupts or schedule before we invoke
 399		 * general_protection, so nothing will clobber the stack
 400		 * frame we just set up.
 401		 *
 402		 * We will enter general_protection with kernel GSBASE,
 403		 * which is what the stub expects, given that the faulting
 404		 * RIP will be the IRET instruction.
 405		 */
 406		regs->ip = (unsigned long)asm_exc_general_protection;
 407		regs->sp = (unsigned long)&gpregs->orig_ax;
 408
 409		return;
 410	}
 411#endif
 412
 413	irqentry_nmi_enter(regs);
 414	instrumentation_begin();
 415	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 416
 417	tsk->thread.error_code = error_code;
 418	tsk->thread.trap_nr = X86_TRAP_DF;
 419
 420#ifdef CONFIG_VMAP_STACK
 421	/*
 422	 * If we overflow the stack into a guard page, the CPU will fail
 423	 * to deliver #PF and will send #DF instead.  Similarly, if we
 424	 * take any non-IST exception while too close to the bottom of
 425	 * the stack, the processor will get a page fault while
 426	 * delivering the exception and will generate a double fault.
 427	 *
 428	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
 429	 * Page-Fault Exception (#PF):
 430	 *
 431	 *   Processors update CR2 whenever a page fault is detected. If a
 432	 *   second page fault occurs while an earlier page fault is being
 433	 *   delivered, the faulting linear address of the second fault will
 434	 *   overwrite the contents of CR2 (replacing the previous
 435	 *   address). These updates to CR2 occur even if the page fault
 436	 *   results in a double fault or occurs during the delivery of a
 437	 *   double fault.
 438	 *
 439	 * The logic below has a small possibility of incorrectly diagnosing
 440	 * some errors as stack overflows.  For example, if the IDT or GDT
 441	 * gets corrupted such that #GP delivery fails due to a bad descriptor
 442	 * causing #GP and we hit this condition while CR2 coincidentally
 443	 * points to the stack guard page, we'll think we overflowed the
 444	 * stack.  Given that we're going to panic one way or another
 445	 * if this happens, this isn't necessarily worth fixing.
 446	 *
 447	 * If necessary, we could improve the test by only diagnosing
 448	 * a stack overflow if the saved RSP points within 47 bytes of
 449	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
 450	 * take an exception, the stack is already aligned and there
 451	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
 452	 * possible error code, so a stack overflow would *not* double
 453	 * fault.  With any less space left, exception delivery could
 454	 * fail, and, as a practical matter, we've overflowed the
 455	 * stack even if the actual trigger for the double fault was
 456	 * something else.
 457	 */
 458	if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
 459		handle_stack_overflow("kernel stack overflow (double-fault)",
 460				      regs, address);
 461	}
 462#endif
 463
 464	pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
 465	die("double fault", regs, error_code);
 466	panic("Machine halted.");
 467	instrumentation_end();
 
 
 
 
 
 468}
 
 469
 470DEFINE_IDTENTRY(exc_bounds)
 471{
 472	if (notify_die(DIE_TRAP, "bounds", regs, 0,
 
 
 
 
 473			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 474		return;
 475	cond_local_irq_enable(regs);
 476
 477	if (!user_mode(regs))
 478		die("bounds", regs, 0);
 479
 480	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
 
 
 
 481
 482	cond_local_irq_disable(regs);
 483}
 484
 485enum kernel_gp_hint {
 486	GP_NO_HINT,
 487	GP_NON_CANONICAL,
 488	GP_CANONICAL
 489};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490
 491/*
 492 * When an uncaught #GP occurs, try to determine the memory address accessed by
 493 * the instruction and return that address to the caller. Also, try to figure
 494 * out whether any part of the access to that address was non-canonical.
 495 */
 496static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
 497						 unsigned long *addr)
 498{
 499	u8 insn_buf[MAX_INSN_SIZE];
 500	struct insn insn;
 501	int ret;
 502
 503	if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
 504			MAX_INSN_SIZE))
 505		return GP_NO_HINT;
 506
 507	ret = insn_decode_kernel(&insn, insn_buf);
 508	if (ret < 0)
 509		return GP_NO_HINT;
 510
 511	*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
 512	if (*addr == -1UL)
 513		return GP_NO_HINT;
 514
 515#ifdef CONFIG_X86_64
 516	/*
 517	 * Check that:
 518	 *  - the operand is not in the kernel half
 519	 *  - the last byte of the operand is not in the user canonical half
 
 
 520	 */
 521	if (*addr < ~__VIRTUAL_MASK &&
 522	    *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
 523		return GP_NON_CANONICAL;
 524#endif
 525
 526	return GP_CANONICAL;
 527}
 528
 529#define GPFSTR "general protection fault"
 530
 531DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
 532{
 533	char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
 534	enum kernel_gp_hint hint = GP_NO_HINT;
 535	struct task_struct *tsk;
 536	unsigned long gp_addr;
 537	int ret;
 538
 
 539	cond_local_irq_enable(regs);
 540
 541	if (static_cpu_has(X86_FEATURE_UMIP)) {
 542		if (user_mode(regs) && fixup_umip_exception(regs))
 543			goto exit;
 544	}
 545
 546	if (v8086_mode(regs)) {
 547		local_irq_enable();
 548		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 549		local_irq_disable();
 550		return;
 551	}
 552
 553	tsk = current;
 
 
 
 554
 555	if (user_mode(regs)) {
 556		tsk->thread.error_code = error_code;
 557		tsk->thread.trap_nr = X86_TRAP_GP;
 558
 559		if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
 560			goto exit;
 561
 562		show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
 563		force_sig(SIGSEGV);
 564		goto exit;
 565	}
 566
 567	if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
 568		goto exit;
 569
 570	tsk->thread.error_code = error_code;
 571	tsk->thread.trap_nr = X86_TRAP_GP;
 572
 573	/*
 574	 * To be potentially processing a kprobe fault and to trust the result
 575	 * from kprobe_running(), we have to be non-preemptible.
 576	 */
 577	if (!preemptible() &&
 578	    kprobe_running() &&
 579	    kprobe_fault_handler(regs, X86_TRAP_GP))
 580		goto exit;
 581
 582	ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
 583	if (ret == NOTIFY_STOP)
 584		goto exit;
 585
 586	if (error_code)
 587		snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
 588	else
 589		hint = get_kernel_gp_address(regs, &gp_addr);
 590
 591	if (hint != GP_NO_HINT)
 592		snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
 593			 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
 594						    : "maybe for address",
 595			 gp_addr);
 596
 
 
 
 
 597	/*
 598	 * KASAN is interested only in the non-canonical case, clear it
 599	 * otherwise.
 600	 */
 601	if (hint != GP_NON_CANONICAL)
 602		gp_addr = 0;
 603
 604	die_addr(desc, regs, error_code, gp_addr);
 605
 606exit:
 607	cond_local_irq_disable(regs);
 608}
 609
 610static bool do_int3(struct pt_regs *regs)
 611{
 612	int res;
 613
 
 
 614#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 615	if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
 616			 SIGTRAP) == NOTIFY_STOP)
 617		return true;
 618#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 619
 620#ifdef CONFIG_KPROBES
 621	if (kprobe_int3_handler(regs))
 622		return true;
 623#endif
 624	res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
 625
 626	return res == NOTIFY_STOP;
 627}
 628
 629static void do_int3_user(struct pt_regs *regs)
 630{
 631	if (do_int3(regs))
 632		return;
 633
 
 
 
 
 
 
 634	cond_local_irq_enable(regs);
 635	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
 636	cond_local_irq_disable(regs);
 
 
 
 
 637}
 638
 639DEFINE_IDTENTRY_RAW(exc_int3)
 640{
 641	/*
 642	 * poke_int3_handler() is completely self contained code; it does (and
 643	 * must) *NOT* call out to anything, lest it hits upon yet another
 644	 * INT3.
 645	 */
 646	if (poke_int3_handler(regs))
 647		return;
 648
 649	/*
 650	 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
 651	 * and therefore can trigger INT3, hence poke_int3_handler() must
 652	 * be done before. If the entry came from kernel mode, then use
 653	 * nmi_enter() because the INT3 could have been hit in any context
 654	 * including NMI.
 655	 */
 656	if (user_mode(regs)) {
 657		irqentry_enter_from_user_mode(regs);
 658		instrumentation_begin();
 659		do_int3_user(regs);
 660		instrumentation_end();
 661		irqentry_exit_to_user_mode(regs);
 662	} else {
 663		irqentry_state_t irq_state = irqentry_nmi_enter(regs);
 664
 665		instrumentation_begin();
 666		if (!do_int3(regs))
 667			die("int3", regs, 0);
 668		instrumentation_end();
 669		irqentry_nmi_exit(regs, irq_state);
 670	}
 671}
 672
 673#ifdef CONFIG_X86_64
 674/*
 675 * Help handler running on a per-cpu (IST or entry trampoline) stack
 676 * to switch to the normal thread stack if the interrupted code was in
 677 * user mode. The actual stack switch is done in entry_64.S
 678 */
 679asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
 680{
 681	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
 682	if (regs != eregs)
 683		*regs = *eregs;
 684	return regs;
 685}
 686
 687#ifdef CONFIG_AMD_MEM_ENCRYPT
 688asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
 689{
 690	unsigned long sp, *stack;
 691	struct stack_info info;
 692	struct pt_regs *regs_ret;
 693
 694	/*
 695	 * In the SYSCALL entry path the RSP value comes from user-space - don't
 696	 * trust it and switch to the current kernel stack
 697	 */
 698	if (ip_within_syscall_gap(regs)) {
 699		sp = this_cpu_read(cpu_current_top_of_stack);
 700		goto sync;
 701	}
 702
 703	/*
 704	 * From here on the RSP value is trusted. Now check whether entry
 705	 * happened from a safe stack. Not safe are the entry or unknown stacks,
 706	 * use the fall-back stack instead in this case.
 707	 */
 708	sp    = regs->sp;
 709	stack = (unsigned long *)sp;
 710
 711	if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
 712	    info.type >= STACK_TYPE_EXCEPTION_LAST)
 713		sp = __this_cpu_ist_top_va(VC2);
 714
 715sync:
 716	/*
 717	 * Found a safe stack - switch to it as if the entry didn't happen via
 718	 * IST stack. The code below only copies pt_regs, the real switch happens
 719	 * in assembly code.
 720	 */
 721	sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
 722
 723	regs_ret = (struct pt_regs *)sp;
 724	*regs_ret = *regs;
 725
 726	return regs_ret;
 727}
 728#endif
 729
 730struct bad_iret_stack {
 731	void *error_entry_ret;
 732	struct pt_regs regs;
 733};
 734
 735asmlinkage __visible noinstr
 736struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 737{
 738	/*
 739	 * This is called from entry_64.S early in handling a fault
 740	 * caused by a bad iret to user mode.  To handle the fault
 741	 * correctly, we want to move our stack frame to where it would
 742	 * be had we entered directly on the entry stack (rather than
 743	 * just below the IRET frame) and we want to pretend that the
 744	 * exception came from the IRET target.
 745	 */
 746	struct bad_iret_stack tmp, *new_stack =
 747		(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 748
 749	/* Copy the IRET target to the temporary storage. */
 750	__memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
 751
 752	/* Copy the remainder of the stack from the current stack. */
 753	__memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
 754
 755	/* Update the entry stack */
 756	__memcpy(new_stack, &tmp, sizeof(tmp));
 757
 758	BUG_ON(!user_mode(&new_stack->regs));
 759	return new_stack;
 760}
 
 761#endif
 762
 763static bool is_sysenter_singlestep(struct pt_regs *regs)
 764{
 765	/*
 766	 * We don't try for precision here.  If we're anywhere in the region of
 767	 * code that can be single-stepped in the SYSENTER entry path, then
 768	 * assume that this is a useless single-step trap due to SYSENTER
 769	 * being invoked with TF set.  (We don't know in advance exactly
 770	 * which instructions will be hit because BTF could plausibly
 771	 * be set.)
 772	 */
 773#ifdef CONFIG_X86_32
 774	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
 775		(unsigned long)__end_SYSENTER_singlestep_region -
 776		(unsigned long)__begin_SYSENTER_singlestep_region;
 777#elif defined(CONFIG_IA32_EMULATION)
 778	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
 779		(unsigned long)__end_entry_SYSENTER_compat -
 780		(unsigned long)entry_SYSENTER_compat;
 781#else
 782	return false;
 783#endif
 784}
 785
 786static __always_inline unsigned long debug_read_clear_dr6(void)
 787{
 788	unsigned long dr6;
 789
 790	/*
 791	 * The Intel SDM says:
 792	 *
 793	 *   Certain debug exceptions may clear bits 0-3. The remaining
 794	 *   contents of the DR6 register are never cleared by the
 795	 *   processor. To avoid confusion in identifying debug
 796	 *   exceptions, debug handlers should clear the register before
 797	 *   returning to the interrupted task.
 798	 *
 799	 * Keep it simple: clear DR6 immediately.
 800	 */
 801	get_debugreg(dr6, 6);
 802	set_debugreg(DR6_RESERVED, 6);
 803	dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
 804
 805	return dr6;
 806}
 807
 808/*
 809 * Our handling of the processor debug registers is non-trivial.
 810 * We do not clear them on entry and exit from the kernel. Therefore
 811 * it is possible to get a watchpoint trap here from inside the kernel.
 812 * However, the code in ./ptrace.c has ensured that the user can
 813 * only set watchpoints on userspace addresses. Therefore the in-kernel
 814 * watchpoint trap can only occur in code which is reading/writing
 815 * from user space. Such code must not hold kernel locks (since it
 816 * can equally take a page fault), therefore it is safe to call
 817 * force_sig_info even though that claims and releases locks.
 818 *
 819 * Code in ./signal.c ensures that the debug control register
 820 * is restored before we deliver any signal, and therefore that
 821 * user code runs with the correct debug control register even though
 822 * we clear it here.
 823 *
 824 * Being careful here means that we don't have to be as careful in a
 825 * lot of more complicated places (task switching can be a bit lazy
 826 * about restoring all the debug state, and ptrace doesn't have to
 827 * find every occurrence of the TF bit that could be saved away even
 828 * by user code)
 829 *
 830 * May run on IST stack.
 831 */
 832
 833static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
 834{
 835	/*
 836	 * Notifiers will clear bits in @dr6 to indicate the event has been
 837	 * consumed - hw_breakpoint_handler(), single_stop_cont().
 838	 *
 839	 * Notifiers will set bits in @virtual_dr6 to indicate the desire
 840	 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
 841	 */
 842	if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
 843		return true;
 844
 845	return false;
 846}
 847
 848static __always_inline void exc_debug_kernel(struct pt_regs *regs,
 849					     unsigned long dr6)
 850{
 851	/*
 852	 * Disable breakpoints during exception handling; recursive exceptions
 853	 * are exceedingly 'fun'.
 854	 *
 855	 * Since this function is NOKPROBE, and that also applies to
 856	 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
 857	 * HW_BREAKPOINT_W on our stack)
 
 
 858	 *
 859	 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
 860	 * includes the entry stack is excluded for everything.
 861	 */
 862	unsigned long dr7 = local_db_save();
 863	irqentry_state_t irq_state = irqentry_nmi_enter(regs);
 864	instrumentation_begin();
 
 865
 866	/*
 867	 * If something gets miswired and we end up here for a user mode
 868	 * #DB, we will malfunction.
 
 869	 */
 870	WARN_ON_ONCE(user_mode(regs));
 871
 872	if (test_thread_flag(TIF_BLOCKSTEP)) {
 
 
 
 
 873		/*
 874		 * The SDM says "The processor clears the BTF flag when it
 875		 * generates a debug exception." but PTRACE_BLOCKSTEP requested
 876		 * it for userspace, but we just took a kernel #DB, so re-set
 877		 * BTF.
 878		 */
 879		unsigned long debugctl;
 880
 881		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 882		debugctl |= DEBUGCTLMSR_BTF;
 883		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 884	}
 885
 886	/*
 887	 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
 888	 * watchpoint at the same time then that will still be handled.
 
 889	 */
 890	if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
 891		dr6 &= ~DR_STEP;
 892
 893	/*
 894	 * The kernel doesn't use INT1
 895	 */
 896	if (!dr6)
 897		goto out;
 898
 899	if (notify_debug(regs, &dr6))
 900		goto out;
 901
 902	/*
 903	 * The kernel doesn't use TF single-step outside of:
 904	 *
 905	 *  - Kprobes, consumed through kprobe_debug_handler()
 906	 *  - KGDB, consumed through notify_debug()
 907	 *
 908	 * So if we get here with DR_STEP set, something is wonky.
 909	 *
 910	 * A known way to trigger this is through QEMU's GDB stub,
 911	 * which leaks #DB into the guest and causes IST recursion.
 912	 */
 913	if (WARN_ON_ONCE(dr6 & DR_STEP))
 914		regs->flags &= ~X86_EFLAGS_TF;
 915out:
 916	instrumentation_end();
 917	irqentry_nmi_exit(regs, irq_state);
 918
 919	local_db_restore(dr7);
 920}
 921
 922static __always_inline void exc_debug_user(struct pt_regs *regs,
 923					   unsigned long dr6)
 924{
 925	bool icebp;
 926
 927	/*
 928	 * If something gets miswired and we end up here for a kernel mode
 929	 * #DB, we will malfunction.
 930	 */
 931	WARN_ON_ONCE(!user_mode(regs));
 932
 933	/*
 934	 * NB: We can't easily clear DR7 here because
 935	 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
 936	 * user memory, etc.  This means that a recursive #DB is possible.  If
 937	 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
 938	 * Since we're not on the IST stack right now, everything will be
 939	 * fine.
 940	 */
 941
 942	irqentry_enter_from_user_mode(regs);
 943	instrumentation_begin();
 944
 945	/*
 946	 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
 947	 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
 948	 *
 949	 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
 950	 * even if it is not the result of PTRACE_SINGLESTEP.
 951	 */
 952	current->thread.virtual_dr6 = (dr6 & DR_STEP);
 953
 954	/*
 955	 * The SDM says "The processor clears the BTF flag when it
 956	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
 957	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
 958	 */
 959	clear_thread_flag(TIF_BLOCKSTEP);
 960
 961	/*
 962	 * If dr6 has no reason to give us about the origin of this trap,
 963	 * then it's very likely the result of an icebp/int01 trap.
 964	 * User wants a sigtrap for that.
 965	 */
 966	icebp = !dr6;
 967
 968	if (notify_debug(regs, &dr6))
 969		goto out;
 970
 971	/* It's safe to allow irq's after DR6 has been saved */
 972	local_irq_enable();
 
 973
 974	if (v8086_mode(regs)) {
 975		handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
 976		goto out_irq;
 
 
 
 
 977	}
 978
 979	/* #DB for bus lock can only be triggered from userspace. */
 980	if (dr6 & DR_BUS_LOCK)
 981		handle_bus_lock(regs);
 982
 983	/* Add the virtual_dr6 bits for signals. */
 984	dr6 |= current->thread.virtual_dr6;
 985	if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
 986		send_sigtrap(regs, 0, get_si_code(dr6));
 987
 988out_irq:
 989	local_irq_disable();
 990out:
 991	instrumentation_end();
 992	irqentry_exit_to_user_mode(regs);
 993}
 
 
 994
 995#ifdef CONFIG_X86_64
 996/* IST stack entry */
 997DEFINE_IDTENTRY_DEBUG(exc_debug)
 998{
 999	exc_debug_kernel(regs, debug_read_clear_dr6());
1000}
1001
1002/* User entry, runs on regular task stack */
1003DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1004{
1005	exc_debug_user(regs, debug_read_clear_dr6());
1006}
1007#else
1008/* 32 bit does not have separate entry points. */
1009DEFINE_IDTENTRY_RAW(exc_debug)
1010{
1011	unsigned long dr6 = debug_read_clear_dr6();
1012
1013	if (user_mode(regs))
1014		exc_debug_user(regs, dr6);
1015	else
1016		exc_debug_kernel(regs, dr6);
1017}
1018#endif
1019
1020/*
1021 * Note that we play around with the 'TS' bit in an attempt to get
1022 * the correct behaviour even in the presence of the asynchronous
1023 * IRQ13 behaviour
1024 */
1025static void math_error(struct pt_regs *regs, int trapnr)
1026{
1027	struct task_struct *task = current;
1028	struct fpu *fpu = &task->thread.fpu;
1029	int si_code;
1030	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1031						"simd exception";
1032
 
 
1033	cond_local_irq_enable(regs);
1034
1035	if (!user_mode(regs)) {
1036		if (fixup_exception(regs, trapnr, 0, 0))
1037			goto exit;
1038
1039		task->thread.error_code = 0;
1040		task->thread.trap_nr = trapnr;
1041
1042		if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1043			       SIGFPE) != NOTIFY_STOP)
1044			die(str, regs, 0);
1045		goto exit;
1046	}
1047
1048	/*
1049	 * Synchronize the FPU register state to the memory register state
1050	 * if necessary. This allows the exception handler to inspect it.
1051	 */
1052	fpu_sync_fpstate(fpu);
1053
1054	task->thread.trap_nr	= trapnr;
1055	task->thread.error_code = 0;
 
 
 
 
 
1056
1057	si_code = fpu__exception_code(fpu, trapnr);
1058	/* Retry when we get spurious exceptions: */
1059	if (!si_code)
1060		goto exit;
1061
1062	if (fixup_vdso_exception(regs, trapnr, 0, 0))
1063		goto exit;
1064
1065	force_sig_fault(SIGFPE, si_code,
1066			(void __user *)uprobe_get_trap_addr(regs));
1067exit:
1068	cond_local_irq_disable(regs);
1069}
1070
1071DEFINE_IDTENTRY(exc_coprocessor_error)
1072{
1073	math_error(regs, X86_TRAP_MF);
 
1074}
1075
1076DEFINE_IDTENTRY(exc_simd_coprocessor_error)
 
1077{
1078	if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1079		/* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1080		if (!static_cpu_has(X86_FEATURE_XMM)) {
1081			__exc_general_protection(regs, 0);
1082			return;
1083		}
1084	}
1085	math_error(regs, X86_TRAP_XF);
1086}
1087
1088DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
 
1089{
1090	/*
1091	 * This addresses a Pentium Pro Erratum:
1092	 *
1093	 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1094	 * Virtual Wire mode implemented through the local APIC, an
1095	 * interrupt vector of 0Fh (Intel reserved encoding) may be
1096	 * generated by the local APIC (Int 15).  This vector may be
1097	 * generated upon receipt of a spurious interrupt (an interrupt
1098	 * which is removed before the system receives the INTA sequence)
1099	 * instead of the programmed 8259 spurious interrupt vector.
1100	 *
1101	 * IMPLICATION: The spurious interrupt vector programmed in the
1102	 * 8259 is normally handled by an operating system's spurious
1103	 * interrupt handler. However, a vector of 0Fh is unknown to some
1104	 * operating systems, which would crash if this erratum occurred.
1105	 *
1106	 * In theory this could be limited to 32bit, but the handler is not
1107	 * hurting and who knows which other CPUs suffer from this.
1108	 */
1109}
1110
1111DEFINE_IDTENTRY(exc_device_not_available)
 
1112{
1113	unsigned long cr0 = read_cr0();
 
 
1114
1115#ifdef CONFIG_MATH_EMULATION
1116	if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1117		struct math_emu_info info = { };
1118
1119		cond_local_irq_enable(regs);
1120
1121		info.regs = regs;
1122		math_emulate(&info);
1123
1124		cond_local_irq_disable(regs);
1125		return;
1126	}
1127#endif
1128
1129	/* This should not happen. */
 
1130	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1131		/* Try to fix it up and carry on. */
1132		write_cr0(cr0 & ~X86_CR0_TS);
1133	} else {
1134		/*
1135		 * Something terrible happened, and we're better off trying
1136		 * to kill the task than getting stuck in a never-ending
1137		 * loop of #NM faults.
1138		 */
1139		die("unexpected #NM exception", regs, 0);
1140	}
1141}
 
1142
1143#ifdef CONFIG_X86_32
1144DEFINE_IDTENTRY_SW(iret_error)
1145{
 
 
 
1146	local_irq_enable();
1147	if (notify_die(DIE_TRAP, "iret exception", regs, 0,
 
 
 
 
 
1148			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1149		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1150			ILL_BADSTK, (void __user *)NULL);
1151	}
1152	local_irq_disable();
1153}
1154#endif
1155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156void __init trap_init(void)
1157{
1158	/* Init cpu_entry_area before IST entries are set up */
1159	setup_cpu_entry_areas();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1160
1161	/* Init GHCB memory pages when running as an SEV-ES guest */
1162	sev_es_init_vc_handling();
 
 
1163
1164	/* Initialize TSS before setting up traps so ISTs work */
1165	cpu_init_exception_handling();
1166	/* Setup traps as cpu_init() might #GP */
1167	idt_setup_traps();
 
 
 
 
 
 
 
1168	cpu_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1169}