Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 */
  8
  9/*
 10 * Handle hardware traps and faults.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/context_tracking.h>
 16#include <linux/interrupt.h>
 17#include <linux/kallsyms.h>
 18#include <linux/spinlock.h>
 19#include <linux/kprobes.h>
 20#include <linux/uaccess.h>
 21#include <linux/kdebug.h>
 22#include <linux/kgdb.h>
 23#include <linux/kernel.h>
 24#include <linux/export.h>
 25#include <linux/ptrace.h>
 26#include <linux/uprobes.h>
 27#include <linux/string.h>
 28#include <linux/delay.h>
 29#include <linux/errno.h>
 30#include <linux/kexec.h>
 31#include <linux/sched.h>
 32#include <linux/sched/task_stack.h>
 33#include <linux/timer.h>
 34#include <linux/init.h>
 35#include <linux/bug.h>
 36#include <linux/nmi.h>
 37#include <linux/mm.h>
 38#include <linux/smp.h>
 39#include <linux/io.h>
 40
 
 
 
 
 
 41#if defined(CONFIG_EDAC)
 42#include <linux/edac.h>
 43#endif
 44
 
 45#include <asm/stacktrace.h>
 46#include <asm/processor.h>
 47#include <asm/debugreg.h>
 48#include <linux/atomic.h>
 49#include <asm/text-patching.h>
 50#include <asm/ftrace.h>
 51#include <asm/traps.h>
 52#include <asm/desc.h>
 53#include <asm/fpu/internal.h>
 54#include <asm/cpu_entry_area.h>
 55#include <asm/mce.h>
 56#include <asm/fixmap.h>
 57#include <asm/mach_traps.h>
 58#include <asm/alternative.h>
 59#include <asm/fpu/xstate.h>
 60#include <asm/trace/mpx.h>
 61#include <asm/mpx.h>
 62#include <asm/vm86.h>
 63#include <asm/umip.h>
 64
 65#ifdef CONFIG_X86_64
 66#include <asm/x86_init.h>
 67#include <asm/pgalloc.h>
 68#include <asm/proto.h>
 
 
 
 69#else
 70#include <asm/processor-flags.h>
 71#include <asm/setup.h>
 72#include <asm/proto.h>
 73#endif
 74
 75DECLARE_BITMAP(system_vectors, NR_VECTORS);
 
 
 
 
 76
 77static inline void cond_local_irq_enable(struct pt_regs *regs)
 78{
 79	if (regs->flags & X86_EFLAGS_IF)
 80		local_irq_enable();
 81}
 82
 83static inline void cond_local_irq_disable(struct pt_regs *regs)
 84{
 85	if (regs->flags & X86_EFLAGS_IF)
 86		local_irq_disable();
 87}
 88
 89/*
 90 * In IST context, we explicitly disable preemption.  This serves two
 91 * purposes: it makes it much less likely that we would accidentally
 92 * schedule in IST context and it will force a warning if we somehow
 93 * manage to schedule by accident.
 94 */
 95void ist_enter(struct pt_regs *regs)
 96{
 97	if (user_mode(regs)) {
 98		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 99	} else {
100		/*
101		 * We might have interrupted pretty much anything.  In
102		 * fact, if we're a machine check, we can even interrupt
103		 * NMI processing.  We don't want in_nmi() to return true,
104		 * but we need to notify RCU.
105		 */
106		rcu_nmi_enter();
107	}
108
109	preempt_disable();
110
111	/* This code is a bit fragile.  Test it. */
112	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
113}
114
115void ist_exit(struct pt_regs *regs)
116{
117	preempt_enable_no_resched();
118
119	if (!user_mode(regs))
120		rcu_nmi_exit();
121}
122
123/**
124 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
125 * @regs:	regs passed to the IST exception handler
126 *
127 * IST exception handlers normally cannot schedule.  As a special
128 * exception, if the exception interrupted userspace code (i.e.
129 * user_mode(regs) would return true) and the exception was not
130 * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
131 * begins a non-atomic section within an ist_enter()/ist_exit() region.
132 * Callers are responsible for enabling interrupts themselves inside
133 * the non-atomic section, and callers must call ist_end_non_atomic()
134 * before ist_exit().
135 */
136void ist_begin_non_atomic(struct pt_regs *regs)
137{
138	BUG_ON(!user_mode(regs));
139
140	/*
141	 * Sanity check: we need to be on the normal thread stack.  This
142	 * will catch asm bugs and any attempt to use ist_preempt_enable
143	 * from double_fault.
144	 */
145	BUG_ON(!on_thread_stack());
 
146
147	preempt_enable_no_resched();
148}
149
150/**
151 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
152 *
153 * Ends a non-atomic section started with ist_begin_non_atomic().
154 */
155void ist_end_non_atomic(void)
156{
157	preempt_disable();
158}
159
160int is_valid_bugaddr(unsigned long addr)
161{
162	unsigned short ud;
163
164	if (addr < TASK_SIZE_MAX)
165		return 0;
166
167	if (probe_kernel_address((unsigned short *)addr, ud))
168		return 0;
169
170	return ud == INSN_UD0 || ud == INSN_UD2;
171}
172
173int fixup_bug(struct pt_regs *regs, int trapnr)
174{
175	if (trapnr != X86_TRAP_UD)
176		return 0;
177
178	switch (report_bug(regs->ip, regs)) {
179	case BUG_TRAP_TYPE_NONE:
180	case BUG_TRAP_TYPE_BUG:
181		break;
182
183	case BUG_TRAP_TYPE_WARN:
184		regs->ip += LEN_UD2;
185		return 1;
186	}
187
188	return 0;
189}
190
191static nokprobe_inline int
192do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
193		  struct pt_regs *regs,	long error_code)
194{
195	if (v8086_mode(regs)) {
196		/*
197		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
198		 * On nmi (interrupt 2), do_trap should not be called.
199		 */
200		if (trapnr < X86_TRAP_UD) {
201			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
202						error_code, trapnr))
203				return 0;
204		}
205		return -1;
206	}
207
208	if (!user_mode(regs)) {
209		if (fixup_exception(regs, trapnr))
210			return 0;
211
212		tsk->thread.error_code = error_code;
213		tsk->thread.trap_nr = trapnr;
214		die(str, regs, error_code);
215	}
216
217	return -1;
218}
219
220static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
221				siginfo_t *info)
222{
223	unsigned long siaddr;
224	int sicode;
225
226	switch (trapnr) {
227	default:
228		return SEND_SIG_PRIV;
229
230	case X86_TRAP_DE:
231		sicode = FPE_INTDIV;
232		siaddr = uprobe_get_trap_addr(regs);
233		break;
234	case X86_TRAP_UD:
235		sicode = ILL_ILLOPN;
236		siaddr = uprobe_get_trap_addr(regs);
237		break;
238	case X86_TRAP_AC:
239		sicode = BUS_ADRALN;
240		siaddr = 0;
241		break;
242	}
243
244	info->si_signo = signr;
245	info->si_errno = 0;
246	info->si_code = sicode;
247	info->si_addr = (void __user *)siaddr;
248	return info;
249}
250
251static void
252do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
253	long error_code, siginfo_t *info)
254{
255	struct task_struct *tsk = current;
256
257
258	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
259		return;
260	/*
261	 * We want error_code and trap_nr set for userspace faults and
262	 * kernelspace faults which result in die(), but not
263	 * kernelspace faults which are fixed up.  die() gives the
264	 * process no chance to handle the signal and notice the
265	 * kernel fault information, so that won't result in polluting
266	 * the information about previously queued, but not yet
267	 * delivered, faults.  See also do_general_protection below.
268	 */
269	tsk->thread.error_code = error_code;
270	tsk->thread.trap_nr = trapnr;
271
272	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
273	    printk_ratelimit()) {
274		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
275			tsk->comm, tsk->pid, str,
276			regs->ip, regs->sp, error_code);
277		print_vma_addr(KERN_CONT " in ", regs->ip);
278		pr_cont("\n");
279	}
280
281	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
282}
283NOKPROBE_SYMBOL(do_trap);
284
285static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
286			  unsigned long trapnr, int signr)
287{
288	siginfo_t info;
289
290	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
291
292	/*
293	 * WARN*()s end up here; fix them up before we call the
294	 * notifier chain.
295	 */
296	if (!user_mode(regs) && fixup_bug(regs, trapnr))
297		return;
298
299	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
300			NOTIFY_STOP) {
301		cond_local_irq_enable(regs);
302		do_trap(trapnr, signr, str, regs, error_code,
303			fill_trap_info(regs, signr, trapnr, &info));
304	}
305}
306
307#define DO_ERROR(trapnr, signr, str, name)				\
308dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
309{									\
310	do_error_trap(regs, error_code, str, trapnr, signr);		\
311}
312
313DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",		divide_error)
314DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",			overflow)
315DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op)
316DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
317DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",		invalid_TSS)
318DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",	segment_not_present)
319DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment)
320DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check)
321
322#ifdef CONFIG_VMAP_STACK
323__visible void __noreturn handle_stack_overflow(const char *message,
324						struct pt_regs *regs,
325						unsigned long fault_address)
326{
327	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
328		 (void *)fault_address, current->stack,
329		 (char *)current->stack + THREAD_SIZE - 1);
330	die(message, regs, 0);
331
332	/* Be absolutely certain we don't return. */
333	panic(message);
334}
335#endif
336
337#ifdef CONFIG_X86_64
338/* Runs on IST stack */
339dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
340{
341	static const char str[] = "double fault";
342	struct task_struct *tsk = current;
343#ifdef CONFIG_VMAP_STACK
344	unsigned long cr2;
345#endif
346
347#ifdef CONFIG_X86_ESPFIX64
348	extern unsigned char native_irq_return_iret[];
349
350	/*
351	 * If IRET takes a non-IST fault on the espfix64 stack, then we
352	 * end up promoting it to a doublefault.  In that case, take
353	 * advantage of the fact that we're not using the normal (TSS.sp0)
354	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
355	 * and then modify our own IRET frame so that, when we return,
356	 * we land directly at the #GP(0) vector with the stack already
357	 * set up according to its expectations.
358	 *
359	 * The net result is that our #GP handler will think that we
360	 * entered from usermode with the bad user context.
361	 *
362	 * No need for ist_enter here because we don't use RCU.
363	 */
364	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
365		regs->cs == __KERNEL_CS &&
366		regs->ip == (unsigned long)native_irq_return_iret)
367	{
368		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
369
370		/*
371		 * regs->sp points to the failing IRET frame on the
372		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
373		 * in gpregs->ss through gpregs->ip.
374		 *
375		 */
376		memmove(&gpregs->ip, (void *)regs->sp, 5*8);
377		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
378
379		/*
380		 * Adjust our frame so that we return straight to the #GP
381		 * vector with the expected RSP value.  This is safe because
382		 * we won't enable interupts or schedule before we invoke
383		 * general_protection, so nothing will clobber the stack
384		 * frame we just set up.
385		 */
386		regs->ip = (unsigned long)general_protection;
387		regs->sp = (unsigned long)&gpregs->orig_ax;
388
389		return;
390	}
391#endif
392
393	ist_enter(regs);
394	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
395
396	tsk->thread.error_code = error_code;
397	tsk->thread.trap_nr = X86_TRAP_DF;
398
399#ifdef CONFIG_VMAP_STACK
400	/*
401	 * If we overflow the stack into a guard page, the CPU will fail
402	 * to deliver #PF and will send #DF instead.  Similarly, if we
403	 * take any non-IST exception while too close to the bottom of
404	 * the stack, the processor will get a page fault while
405	 * delivering the exception and will generate a double fault.
406	 *
407	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
408	 * Page-Fault Exception (#PF):
409	 *
410	 *   Processors update CR2 whenever a page fault is detected. If a
411	 *   second page fault occurs while an earlier page fault is being
412	 *   delivered, the faulting linear address of the second fault will
413	 *   overwrite the contents of CR2 (replacing the previous
414	 *   address). These updates to CR2 occur even if the page fault
415	 *   results in a double fault or occurs during the delivery of a
416	 *   double fault.
417	 *
418	 * The logic below has a small possibility of incorrectly diagnosing
419	 * some errors as stack overflows.  For example, if the IDT or GDT
420	 * gets corrupted such that #GP delivery fails due to a bad descriptor
421	 * causing #GP and we hit this condition while CR2 coincidentally
422	 * points to the stack guard page, we'll think we overflowed the
423	 * stack.  Given that we're going to panic one way or another
424	 * if this happens, this isn't necessarily worth fixing.
425	 *
426	 * If necessary, we could improve the test by only diagnosing
427	 * a stack overflow if the saved RSP points within 47 bytes of
428	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
429	 * take an exception, the stack is already aligned and there
430	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
431	 * possible error code, so a stack overflow would *not* double
432	 * fault.  With any less space left, exception delivery could
433	 * fail, and, as a practical matter, we've overflowed the
434	 * stack even if the actual trigger for the double fault was
435	 * something else.
436	 */
437	cr2 = read_cr2();
438	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
439		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
440#endif
441
442#ifdef CONFIG_DOUBLEFAULT
443	df_debug(regs, error_code);
444#endif
445	/*
446	 * This is always a kernel trap and never fixable (and thus must
447	 * never return).
448	 */
449	for (;;)
450		die(str, regs, error_code);
451}
452#endif
453
454dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
455{
456	const struct mpx_bndcsr *bndcsr;
457	siginfo_t *info;
458
459	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
460	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
461			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
462		return;
463	cond_local_irq_enable(regs);
464
465	if (!user_mode(regs))
466		die("bounds", regs, error_code);
467
468	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
469		/* The exception is not from Intel MPX */
470		goto exit_trap;
471	}
472
473	/*
474	 * We need to look at BNDSTATUS to resolve this exception.
475	 * A NULL here might mean that it is in its 'init state',
476	 * which is all zeros which indicates MPX was not
477	 * responsible for the exception.
478	 */
479	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
480	if (!bndcsr)
481		goto exit_trap;
482
483	trace_bounds_exception_mpx(bndcsr);
484	/*
485	 * The error code field of the BNDSTATUS register communicates status
486	 * information of a bound range exception #BR or operation involving
487	 * bound directory.
488	 */
489	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
490	case 2:	/* Bound directory has invalid entry. */
491		if (mpx_handle_bd_fault())
492			goto exit_trap;
493		break; /* Success, it was handled */
494	case 1: /* Bound violation. */
495		info = mpx_generate_siginfo(regs);
496		if (IS_ERR(info)) {
497			/*
498			 * We failed to decode the MPX instruction.  Act as if
499			 * the exception was not caused by MPX.
500			 */
501			goto exit_trap;
502		}
503		/*
504		 * Success, we decoded the instruction and retrieved
505		 * an 'info' containing the address being accessed
506		 * which caused the exception.  This information
507		 * allows and application to possibly handle the
508		 * #BR exception itself.
509		 */
510		do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
511		kfree(info);
512		break;
513	case 0: /* No exception caused by Intel MPX operations. */
514		goto exit_trap;
515	default:
516		die("bounds", regs, error_code);
517	}
518
519	return;
520
521exit_trap:
522	/*
523	 * This path out is for all the cases where we could not
524	 * handle the exception in some way (like allocating a
525	 * table or telling userspace about it.  We will also end
526	 * up here if the kernel has MPX turned off at compile
527	 * time..
528	 */
529	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
530}
531
532dotraplinkage void
533do_general_protection(struct pt_regs *regs, long error_code)
534{
535	struct task_struct *tsk;
536
537	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
538	cond_local_irq_enable(regs);
539
540	if (static_cpu_has(X86_FEATURE_UMIP)) {
541		if (user_mode(regs) && fixup_umip_exception(regs))
542			return;
543	}
544
545	if (v8086_mode(regs)) {
546		local_irq_enable();
547		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
548		return;
549	}
550
551	tsk = current;
552	if (!user_mode(regs)) {
553		if (fixup_exception(regs, X86_TRAP_GP))
554			return;
555
556		tsk->thread.error_code = error_code;
557		tsk->thread.trap_nr = X86_TRAP_GP;
558		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
559			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
560			die("general protection fault", regs, error_code);
561		return;
562	}
563
564	tsk->thread.error_code = error_code;
565	tsk->thread.trap_nr = X86_TRAP_GP;
566
567	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
568			printk_ratelimit()) {
569		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
570			tsk->comm, task_pid_nr(tsk),
571			regs->ip, regs->sp, error_code);
572		print_vma_addr(KERN_CONT " in ", regs->ip);
573		pr_cont("\n");
574	}
575
576	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
577}
578NOKPROBE_SYMBOL(do_general_protection);
579
 
580dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
581{
582#ifdef CONFIG_DYNAMIC_FTRACE
583	/*
584	 * ftrace must be first, everything else may cause a recursive crash.
585	 * See note by declaration of modifying_ftrace_code in ftrace.c
586	 */
587	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
588	    ftrace_int3_handler(regs))
589		return;
590#endif
591	if (poke_int3_handler(regs))
592		return;
593
594	/*
595	 * Use ist_enter despite the fact that we don't use an IST stack.
596	 * We can be called from a kprobe in non-CONTEXT_KERNEL kernel
597	 * mode or even during context tracking state changes.
598	 *
599	 * This means that we can't schedule.  That's okay.
600	 */
601	ist_enter(regs);
602	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
603#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
604	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
605				SIGTRAP) == NOTIFY_STOP)
606		goto exit;
607#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
608
609#ifdef CONFIG_KPROBES
610	if (kprobe_int3_handler(regs))
611		goto exit;
612#endif
613
614	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
615			SIGTRAP) == NOTIFY_STOP)
616		goto exit;
617
 
 
 
 
 
 
618	cond_local_irq_enable(regs);
619	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
620	cond_local_irq_disable(regs);
621
 
622exit:
623	ist_exit(regs);
624}
625NOKPROBE_SYMBOL(do_int3);
626
627#ifdef CONFIG_X86_64
628/*
629 * Help handler running on a per-cpu (IST or entry trampoline) stack
630 * to switch to the normal thread stack if the interrupted code was in
631 * user mode. The actual stack switch is done in entry_64.S
632 */
633asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
634{
635	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
636	if (regs != eregs)
637		*regs = *eregs;
638	return regs;
639}
640NOKPROBE_SYMBOL(sync_regs);
641
642struct bad_iret_stack {
643	void *error_entry_ret;
644	struct pt_regs regs;
645};
646
647asmlinkage __visible notrace
648struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
649{
650	/*
651	 * This is called from entry_64.S early in handling a fault
652	 * caused by a bad iret to user mode.  To handle the fault
653	 * correctly, we want to move our stack frame to where it would
654	 * be had we entered directly on the entry stack (rather than
655	 * just below the IRET frame) and we want to pretend that the
656	 * exception came from the IRET target.
657	 */
658	struct bad_iret_stack *new_stack =
659		(struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
660
661	/* Copy the IRET target to the new stack. */
662	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
663
664	/* Copy the remainder of the stack from the current stack. */
665	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
666
667	BUG_ON(!user_mode(&new_stack->regs));
668	return new_stack;
669}
670NOKPROBE_SYMBOL(fixup_bad_iret);
671#endif
672
673static bool is_sysenter_singlestep(struct pt_regs *regs)
674{
675	/*
676	 * We don't try for precision here.  If we're anywhere in the region of
677	 * code that can be single-stepped in the SYSENTER entry path, then
678	 * assume that this is a useless single-step trap due to SYSENTER
679	 * being invoked with TF set.  (We don't know in advance exactly
680	 * which instructions will be hit because BTF could plausibly
681	 * be set.)
682	 */
683#ifdef CONFIG_X86_32
684	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
685		(unsigned long)__end_SYSENTER_singlestep_region -
686		(unsigned long)__begin_SYSENTER_singlestep_region;
687#elif defined(CONFIG_IA32_EMULATION)
688	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
689		(unsigned long)__end_entry_SYSENTER_compat -
690		(unsigned long)entry_SYSENTER_compat;
691#else
692	return false;
693#endif
694}
695
696/*
697 * Our handling of the processor debug registers is non-trivial.
698 * We do not clear them on entry and exit from the kernel. Therefore
699 * it is possible to get a watchpoint trap here from inside the kernel.
700 * However, the code in ./ptrace.c has ensured that the user can
701 * only set watchpoints on userspace addresses. Therefore the in-kernel
702 * watchpoint trap can only occur in code which is reading/writing
703 * from user space. Such code must not hold kernel locks (since it
704 * can equally take a page fault), therefore it is safe to call
705 * force_sig_info even though that claims and releases locks.
706 *
707 * Code in ./signal.c ensures that the debug control register
708 * is restored before we deliver any signal, and therefore that
709 * user code runs with the correct debug control register even though
710 * we clear it here.
711 *
712 * Being careful here means that we don't have to be as careful in a
713 * lot of more complicated places (task switching can be a bit lazy
714 * about restoring all the debug state, and ptrace doesn't have to
715 * find every occurrence of the TF bit that could be saved away even
716 * by user code)
717 *
718 * May run on IST stack.
719 */
720dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
721{
722	struct task_struct *tsk = current;
723	int user_icebp = 0;
724	unsigned long dr6;
725	int si_code;
726
727	ist_enter(regs);
728
729	get_debugreg(dr6, 6);
730	/*
731	 * The Intel SDM says:
732	 *
733	 *   Certain debug exceptions may clear bits 0-3. The remaining
734	 *   contents of the DR6 register are never cleared by the
735	 *   processor. To avoid confusion in identifying debug
736	 *   exceptions, debug handlers should clear the register before
737	 *   returning to the interrupted task.
738	 *
739	 * Keep it simple: clear DR6 immediately.
740	 */
741	set_debugreg(0, 6);
742
743	/* Filter out all the reserved bits which are preset to 1 */
744	dr6 &= ~DR6_RESERVED;
745
746	/*
747	 * The SDM says "The processor clears the BTF flag when it
748	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
749	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
750	 */
751	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
752
753	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
754		     is_sysenter_singlestep(regs))) {
755		dr6 &= ~DR_STEP;
756		if (!dr6)
757			goto exit;
758		/*
759		 * else we might have gotten a single-step trap and hit a
760		 * watchpoint at the same time, in which case we should fall
761		 * through and handle the watchpoint.
762		 */
763	}
764
765	/*
766	 * If dr6 has no reason to give us about the origin of this trap,
767	 * then it's very likely the result of an icebp/int01 trap.
768	 * User wants a sigtrap for that.
769	 */
770	if (!dr6 && user_mode(regs))
771		user_icebp = 1;
772
 
 
 
 
773	/* Store the virtualized DR6 value */
774	tsk->thread.debugreg6 = dr6;
775
776#ifdef CONFIG_KPROBES
777	if (kprobe_debug_handler(regs))
778		goto exit;
779#endif
780
781	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
782							SIGTRAP) == NOTIFY_STOP)
783		goto exit;
784
785	/*
786	 * Let others (NMI) know that the debug stack is in use
787	 * as we may switch to the interrupt stack.
788	 */
789	debug_stack_usage_inc();
790
791	/* It's safe to allow irq's after DR6 has been saved */
 
792	cond_local_irq_enable(regs);
793
794	if (v8086_mode(regs)) {
795		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
796					X86_TRAP_DB);
797		cond_local_irq_disable(regs);
 
798		debug_stack_usage_dec();
799		goto exit;
800	}
801
802	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
803		/*
804		 * Historical junk that used to handle SYSENTER single-stepping.
805		 * This should be unreachable now.  If we survive for a while
806		 * without anyone hitting this warning, we'll turn this into
807		 * an oops.
808		 */
809		tsk->thread.debugreg6 &= ~DR_STEP;
810		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
811		regs->flags &= ~X86_EFLAGS_TF;
812	}
813	si_code = get_si_code(tsk->thread.debugreg6);
814	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
815		send_sigtrap(tsk, regs, error_code, si_code);
816	cond_local_irq_disable(regs);
 
817	debug_stack_usage_dec();
818
819exit:
 
 
 
 
 
 
 
 
820	ist_exit(regs);
821}
822NOKPROBE_SYMBOL(do_debug);
823
824/*
825 * Note that we play around with the 'TS' bit in an attempt to get
826 * the correct behaviour even in the presence of the asynchronous
827 * IRQ13 behaviour
828 */
829static void math_error(struct pt_regs *regs, int error_code, int trapnr)
830{
831	struct task_struct *task = current;
832	struct fpu *fpu = &task->thread.fpu;
833	siginfo_t info;
834	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
835						"simd exception";
836
837	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
838		return;
839	cond_local_irq_enable(regs);
840
841	if (!user_mode(regs)) {
842		if (!fixup_exception(regs, trapnr)) {
843			task->thread.error_code = error_code;
844			task->thread.trap_nr = trapnr;
845			die(str, regs, error_code);
846		}
847		return;
848	}
849
850	/*
851	 * Save the info for the exception handler and clear the error.
852	 */
853	fpu__save(fpu);
854
855	task->thread.trap_nr	= trapnr;
856	task->thread.error_code = error_code;
857	info.si_signo		= SIGFPE;
858	info.si_errno		= 0;
859	info.si_addr		= (void __user *)uprobe_get_trap_addr(regs);
860
861	info.si_code = fpu__exception_code(fpu, trapnr);
862
863	/* Retry when we get spurious exceptions: */
864	if (!info.si_code)
865		return;
866
867	force_sig_info(SIGFPE, &info, task);
868}
869
870dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
871{
872	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
873	math_error(regs, error_code, X86_TRAP_MF);
874}
875
876dotraplinkage void
877do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
878{
879	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
880	math_error(regs, error_code, X86_TRAP_XF);
881}
882
883dotraplinkage void
884do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
885{
886	cond_local_irq_enable(regs);
887}
888
889dotraplinkage void
890do_device_not_available(struct pt_regs *regs, long error_code)
891{
892	unsigned long cr0;
893
894	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
895
896#ifdef CONFIG_MATH_EMULATION
897	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
898		struct math_emu_info info = { };
899
900		cond_local_irq_enable(regs);
901
902		info.regs = regs;
903		math_emulate(&info);
904		return;
905	}
906#endif
907
908	/* This should not happen. */
909	cr0 = read_cr0();
910	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
911		/* Try to fix it up and carry on. */
912		write_cr0(cr0 & ~X86_CR0_TS);
913	} else {
914		/*
915		 * Something terrible happened, and we're better off trying
916		 * to kill the task than getting stuck in a never-ending
917		 * loop of #NM faults.
918		 */
919		die("unexpected #NM exception", regs, error_code);
920	}
921}
922NOKPROBE_SYMBOL(do_device_not_available);
923
924#ifdef CONFIG_X86_32
925dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
926{
927	siginfo_t info;
928
929	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
930	local_irq_enable();
931
932	info.si_signo = SIGILL;
933	info.si_errno = 0;
934	info.si_code = ILL_BADSTK;
935	info.si_addr = NULL;
936	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
937			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
938		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
939			&info);
940	}
941}
942#endif
943
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
944void __init trap_init(void)
945{
946	/* Init cpu_entry_area before IST entries are set up */
947	setup_cpu_entry_areas();
 
 
948
949	idt_setup_traps();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
950
951	/*
952	 * Set the IDT descriptor to a fixed read-only location, so that the
953	 * "sidt" instruction will not leak the location of the kernel, and
954	 * to defend the IDT against arbitrary memory write vulnerabilities.
955	 * It will be reloaded in cpu_init() */
956	cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
957		    PAGE_KERNEL_RO);
958	idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
959
960	/*
961	 * Should be a barrier for any external CPU state:
962	 */
963	cpu_init();
964
965	idt_setup_ist_traps();
 
 
 
 
 
 
 
966
967	x86_init.irqs.trap_init();
968
969	idt_setup_debugidt_traps();
 
 
 
 
970}
v4.10.11
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *	Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/kprobes.h>
  20#include <linux/uaccess.h>
  21#include <linux/kdebug.h>
  22#include <linux/kgdb.h>
  23#include <linux/kernel.h>
  24#include <linux/export.h>
  25#include <linux/ptrace.h>
  26#include <linux/uprobes.h>
  27#include <linux/string.h>
  28#include <linux/delay.h>
  29#include <linux/errno.h>
  30#include <linux/kexec.h>
  31#include <linux/sched.h>
 
  32#include <linux/timer.h>
  33#include <linux/init.h>
  34#include <linux/bug.h>
  35#include <linux/nmi.h>
  36#include <linux/mm.h>
  37#include <linux/smp.h>
  38#include <linux/io.h>
  39
  40#ifdef CONFIG_EISA
  41#include <linux/ioport.h>
  42#include <linux/eisa.h>
  43#endif
  44
  45#if defined(CONFIG_EDAC)
  46#include <linux/edac.h>
  47#endif
  48
  49#include <asm/kmemcheck.h>
  50#include <asm/stacktrace.h>
  51#include <asm/processor.h>
  52#include <asm/debugreg.h>
  53#include <linux/atomic.h>
  54#include <asm/text-patching.h>
  55#include <asm/ftrace.h>
  56#include <asm/traps.h>
  57#include <asm/desc.h>
  58#include <asm/fpu/internal.h>
 
  59#include <asm/mce.h>
  60#include <asm/fixmap.h>
  61#include <asm/mach_traps.h>
  62#include <asm/alternative.h>
  63#include <asm/fpu/xstate.h>
  64#include <asm/trace/mpx.h>
  65#include <asm/mpx.h>
  66#include <asm/vm86.h>
 
  67
  68#ifdef CONFIG_X86_64
  69#include <asm/x86_init.h>
  70#include <asm/pgalloc.h>
  71#include <asm/proto.h>
  72
  73/* No need to be aligned, but done to keep all IDTs defined the same way. */
  74gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
  75#else
  76#include <asm/processor-flags.h>
  77#include <asm/setup.h>
  78#include <asm/proto.h>
  79#endif
  80
  81/* Must be page-aligned because the real IDT is used in a fixmap. */
  82gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
  83
  84DECLARE_BITMAP(used_vectors, NR_VECTORS);
  85EXPORT_SYMBOL_GPL(used_vectors);
  86
  87static inline void cond_local_irq_enable(struct pt_regs *regs)
  88{
  89	if (regs->flags & X86_EFLAGS_IF)
  90		local_irq_enable();
  91}
  92
  93static inline void cond_local_irq_disable(struct pt_regs *regs)
  94{
  95	if (regs->flags & X86_EFLAGS_IF)
  96		local_irq_disable();
  97}
  98
  99/*
 100 * In IST context, we explicitly disable preemption.  This serves two
 101 * purposes: it makes it much less likely that we would accidentally
 102 * schedule in IST context and it will force a warning if we somehow
 103 * manage to schedule by accident.
 104 */
 105void ist_enter(struct pt_regs *regs)
 106{
 107	if (user_mode(regs)) {
 108		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 109	} else {
 110		/*
 111		 * We might have interrupted pretty much anything.  In
 112		 * fact, if we're a machine check, we can even interrupt
 113		 * NMI processing.  We don't want in_nmi() to return true,
 114		 * but we need to notify RCU.
 115		 */
 116		rcu_nmi_enter();
 117	}
 118
 119	preempt_disable();
 120
 121	/* This code is a bit fragile.  Test it. */
 122	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
 123}
 124
 125void ist_exit(struct pt_regs *regs)
 126{
 127	preempt_enable_no_resched();
 128
 129	if (!user_mode(regs))
 130		rcu_nmi_exit();
 131}
 132
 133/**
 134 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
 135 * @regs:	regs passed to the IST exception handler
 136 *
 137 * IST exception handlers normally cannot schedule.  As a special
 138 * exception, if the exception interrupted userspace code (i.e.
 139 * user_mode(regs) would return true) and the exception was not
 140 * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
 141 * begins a non-atomic section within an ist_enter()/ist_exit() region.
 142 * Callers are responsible for enabling interrupts themselves inside
 143 * the non-atomic section, and callers must call ist_end_non_atomic()
 144 * before ist_exit().
 145 */
 146void ist_begin_non_atomic(struct pt_regs *regs)
 147{
 148	BUG_ON(!user_mode(regs));
 149
 150	/*
 151	 * Sanity check: we need to be on the normal thread stack.  This
 152	 * will catch asm bugs and any attempt to use ist_preempt_enable
 153	 * from double_fault.
 154	 */
 155	BUG_ON((unsigned long)(current_top_of_stack() -
 156			       current_stack_pointer()) >= THREAD_SIZE);
 157
 158	preempt_enable_no_resched();
 159}
 160
 161/**
 162 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
 163 *
 164 * Ends a non-atomic section started with ist_begin_non_atomic().
 165 */
 166void ist_end_non_atomic(void)
 167{
 168	preempt_disable();
 169}
 170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171static nokprobe_inline int
 172do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
 173		  struct pt_regs *regs,	long error_code)
 174{
 175	if (v8086_mode(regs)) {
 176		/*
 177		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 178		 * On nmi (interrupt 2), do_trap should not be called.
 179		 */
 180		if (trapnr < X86_TRAP_UD) {
 181			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 182						error_code, trapnr))
 183				return 0;
 184		}
 185		return -1;
 186	}
 187
 188	if (!user_mode(regs)) {
 189		if (!fixup_exception(regs, trapnr)) {
 190			tsk->thread.error_code = error_code;
 191			tsk->thread.trap_nr = trapnr;
 192			die(str, regs, error_code);
 193		}
 194		return 0;
 195	}
 196
 197	return -1;
 198}
 199
 200static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
 201				siginfo_t *info)
 202{
 203	unsigned long siaddr;
 204	int sicode;
 205
 206	switch (trapnr) {
 207	default:
 208		return SEND_SIG_PRIV;
 209
 210	case X86_TRAP_DE:
 211		sicode = FPE_INTDIV;
 212		siaddr = uprobe_get_trap_addr(regs);
 213		break;
 214	case X86_TRAP_UD:
 215		sicode = ILL_ILLOPN;
 216		siaddr = uprobe_get_trap_addr(regs);
 217		break;
 218	case X86_TRAP_AC:
 219		sicode = BUS_ADRALN;
 220		siaddr = 0;
 221		break;
 222	}
 223
 224	info->si_signo = signr;
 225	info->si_errno = 0;
 226	info->si_code = sicode;
 227	info->si_addr = (void __user *)siaddr;
 228	return info;
 229}
 230
 231static void
 232do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 233	long error_code, siginfo_t *info)
 234{
 235	struct task_struct *tsk = current;
 236
 237
 238	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 239		return;
 240	/*
 241	 * We want error_code and trap_nr set for userspace faults and
 242	 * kernelspace faults which result in die(), but not
 243	 * kernelspace faults which are fixed up.  die() gives the
 244	 * process no chance to handle the signal and notice the
 245	 * kernel fault information, so that won't result in polluting
 246	 * the information about previously queued, but not yet
 247	 * delivered, faults.  See also do_general_protection below.
 248	 */
 249	tsk->thread.error_code = error_code;
 250	tsk->thread.trap_nr = trapnr;
 251
 252	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 253	    printk_ratelimit()) {
 254		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
 255			tsk->comm, tsk->pid, str,
 256			regs->ip, regs->sp, error_code);
 257		print_vma_addr(" in ", regs->ip);
 258		pr_cont("\n");
 259	}
 260
 261	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
 262}
 263NOKPROBE_SYMBOL(do_trap);
 264
 265static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 266			  unsigned long trapnr, int signr)
 267{
 268	siginfo_t info;
 269
 270	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 271
 
 
 
 
 
 
 
 272	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 273			NOTIFY_STOP) {
 274		cond_local_irq_enable(regs);
 275		do_trap(trapnr, signr, str, regs, error_code,
 276			fill_trap_info(regs, signr, trapnr, &info));
 277	}
 278}
 279
 280#define DO_ERROR(trapnr, signr, str, name)				\
 281dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
 282{									\
 283	do_error_trap(regs, error_code, str, trapnr, signr);		\
 284}
 285
 286DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",		divide_error)
 287DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",			overflow)
 288DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op)
 289DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
 290DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",		invalid_TSS)
 291DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",	segment_not_present)
 292DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment)
 293DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check)
 294
 295#ifdef CONFIG_VMAP_STACK
 296__visible void __noreturn handle_stack_overflow(const char *message,
 297						struct pt_regs *regs,
 298						unsigned long fault_address)
 299{
 300	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
 301		 (void *)fault_address, current->stack,
 302		 (char *)current->stack + THREAD_SIZE - 1);
 303	die(message, regs, 0);
 304
 305	/* Be absolutely certain we don't return. */
 306	panic(message);
 307}
 308#endif
 309
 310#ifdef CONFIG_X86_64
 311/* Runs on IST stack */
 312dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 313{
 314	static const char str[] = "double fault";
 315	struct task_struct *tsk = current;
 316#ifdef CONFIG_VMAP_STACK
 317	unsigned long cr2;
 318#endif
 319
 320#ifdef CONFIG_X86_ESPFIX64
 321	extern unsigned char native_irq_return_iret[];
 322
 323	/*
 324	 * If IRET takes a non-IST fault on the espfix64 stack, then we
 325	 * end up promoting it to a doublefault.  In that case, modify
 326	 * the stack to make it look like we just entered the #GP
 327	 * handler from user space, similar to bad_iret.
 
 
 
 
 
 
 328	 *
 329	 * No need for ist_enter here because we don't use RCU.
 330	 */
 331	if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
 332		regs->cs == __KERNEL_CS &&
 333		regs->ip == (unsigned long)native_irq_return_iret)
 334	{
 335		struct pt_regs *normal_regs = task_pt_regs(current);
 336
 337		/* Fake a #GP(0) from userspace. */
 338		memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
 339		normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
 
 
 
 
 
 
 
 
 
 
 
 
 
 340		regs->ip = (unsigned long)general_protection;
 341		regs->sp = (unsigned long)&normal_regs->orig_ax;
 342
 343		return;
 344	}
 345#endif
 346
 347	ist_enter(regs);
 348	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 349
 350	tsk->thread.error_code = error_code;
 351	tsk->thread.trap_nr = X86_TRAP_DF;
 352
 353#ifdef CONFIG_VMAP_STACK
 354	/*
 355	 * If we overflow the stack into a guard page, the CPU will fail
 356	 * to deliver #PF and will send #DF instead.  Similarly, if we
 357	 * take any non-IST exception while too close to the bottom of
 358	 * the stack, the processor will get a page fault while
 359	 * delivering the exception and will generate a double fault.
 360	 *
 361	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
 362	 * Page-Fault Exception (#PF):
 363	 *
 364	 *   Processors update CR2 whenever a page fault is detected. If a
 365	 *   second page fault occurs while an earlier page fault is being
 366	 *   deliv- ered, the faulting linear address of the second fault will
 367	 *   overwrite the contents of CR2 (replacing the previous
 368	 *   address). These updates to CR2 occur even if the page fault
 369	 *   results in a double fault or occurs during the delivery of a
 370	 *   double fault.
 371	 *
 372	 * The logic below has a small possibility of incorrectly diagnosing
 373	 * some errors as stack overflows.  For example, if the IDT or GDT
 374	 * gets corrupted such that #GP delivery fails due to a bad descriptor
 375	 * causing #GP and we hit this condition while CR2 coincidentally
 376	 * points to the stack guard page, we'll think we overflowed the
 377	 * stack.  Given that we're going to panic one way or another
 378	 * if this happens, this isn't necessarily worth fixing.
 379	 *
 380	 * If necessary, we could improve the test by only diagnosing
 381	 * a stack overflow if the saved RSP points within 47 bytes of
 382	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
 383	 * take an exception, the stack is already aligned and there
 384	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
 385	 * possible error code, so a stack overflow would *not* double
 386	 * fault.  With any less space left, exception delivery could
 387	 * fail, and, as a practical matter, we've overflowed the
 388	 * stack even if the actual trigger for the double fault was
 389	 * something else.
 390	 */
 391	cr2 = read_cr2();
 392	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
 393		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
 394#endif
 395
 396#ifdef CONFIG_DOUBLEFAULT
 397	df_debug(regs, error_code);
 398#endif
 399	/*
 400	 * This is always a kernel trap and never fixable (and thus must
 401	 * never return).
 402	 */
 403	for (;;)
 404		die(str, regs, error_code);
 405}
 406#endif
 407
 408dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 409{
 410	const struct mpx_bndcsr *bndcsr;
 411	siginfo_t *info;
 412
 413	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 414	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
 415			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 416		return;
 417	cond_local_irq_enable(regs);
 418
 419	if (!user_mode(regs))
 420		die("bounds", regs, error_code);
 421
 422	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
 423		/* The exception is not from Intel MPX */
 424		goto exit_trap;
 425	}
 426
 427	/*
 428	 * We need to look at BNDSTATUS to resolve this exception.
 429	 * A NULL here might mean that it is in its 'init state',
 430	 * which is all zeros which indicates MPX was not
 431	 * responsible for the exception.
 432	 */
 433	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
 434	if (!bndcsr)
 435		goto exit_trap;
 436
 437	trace_bounds_exception_mpx(bndcsr);
 438	/*
 439	 * The error code field of the BNDSTATUS register communicates status
 440	 * information of a bound range exception #BR or operation involving
 441	 * bound directory.
 442	 */
 443	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
 444	case 2:	/* Bound directory has invalid entry. */
 445		if (mpx_handle_bd_fault())
 446			goto exit_trap;
 447		break; /* Success, it was handled */
 448	case 1: /* Bound violation. */
 449		info = mpx_generate_siginfo(regs);
 450		if (IS_ERR(info)) {
 451			/*
 452			 * We failed to decode the MPX instruction.  Act as if
 453			 * the exception was not caused by MPX.
 454			 */
 455			goto exit_trap;
 456		}
 457		/*
 458		 * Success, we decoded the instruction and retrieved
 459		 * an 'info' containing the address being accessed
 460		 * which caused the exception.  This information
 461		 * allows and application to possibly handle the
 462		 * #BR exception itself.
 463		 */
 464		do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
 465		kfree(info);
 466		break;
 467	case 0: /* No exception caused by Intel MPX operations. */
 468		goto exit_trap;
 469	default:
 470		die("bounds", regs, error_code);
 471	}
 472
 473	return;
 474
 475exit_trap:
 476	/*
 477	 * This path out is for all the cases where we could not
 478	 * handle the exception in some way (like allocating a
 479	 * table or telling userspace about it.  We will also end
 480	 * up here if the kernel has MPX turned off at compile
 481	 * time..
 482	 */
 483	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
 484}
 485
 486dotraplinkage void
 487do_general_protection(struct pt_regs *regs, long error_code)
 488{
 489	struct task_struct *tsk;
 490
 491	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 492	cond_local_irq_enable(regs);
 493
 
 
 
 
 
 494	if (v8086_mode(regs)) {
 495		local_irq_enable();
 496		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 497		return;
 498	}
 499
 500	tsk = current;
 501	if (!user_mode(regs)) {
 502		if (fixup_exception(regs, X86_TRAP_GP))
 503			return;
 504
 505		tsk->thread.error_code = error_code;
 506		tsk->thread.trap_nr = X86_TRAP_GP;
 507		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 508			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
 509			die("general protection fault", regs, error_code);
 510		return;
 511	}
 512
 513	tsk->thread.error_code = error_code;
 514	tsk->thread.trap_nr = X86_TRAP_GP;
 515
 516	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
 517			printk_ratelimit()) {
 518		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
 519			tsk->comm, task_pid_nr(tsk),
 520			regs->ip, regs->sp, error_code);
 521		print_vma_addr(" in ", regs->ip);
 522		pr_cont("\n");
 523	}
 524
 525	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
 526}
 527NOKPROBE_SYMBOL(do_general_protection);
 528
 529/* May run on IST stack. */
 530dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 531{
 532#ifdef CONFIG_DYNAMIC_FTRACE
 533	/*
 534	 * ftrace must be first, everything else may cause a recursive crash.
 535	 * See note by declaration of modifying_ftrace_code in ftrace.c
 536	 */
 537	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
 538	    ftrace_int3_handler(regs))
 539		return;
 540#endif
 541	if (poke_int3_handler(regs))
 542		return;
 543
 
 
 
 
 
 
 
 544	ist_enter(regs);
 545	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 546#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 547	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 548				SIGTRAP) == NOTIFY_STOP)
 549		goto exit;
 550#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 551
 552#ifdef CONFIG_KPROBES
 553	if (kprobe_int3_handler(regs))
 554		goto exit;
 555#endif
 556
 557	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 558			SIGTRAP) == NOTIFY_STOP)
 559		goto exit;
 560
 561	/*
 562	 * Let others (NMI) know that the debug stack is in use
 563	 * as we may switch to the interrupt stack.
 564	 */
 565	debug_stack_usage_inc();
 566	preempt_disable();
 567	cond_local_irq_enable(regs);
 568	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
 569	cond_local_irq_disable(regs);
 570	preempt_enable_no_resched();
 571	debug_stack_usage_dec();
 572exit:
 573	ist_exit(regs);
 574}
 575NOKPROBE_SYMBOL(do_int3);
 576
 577#ifdef CONFIG_X86_64
 578/*
 579 * Help handler running on IST stack to switch off the IST stack if the
 580 * interrupted code was in user mode. The actual stack switch is done in
 581 * entry_64.S
 582 */
 583asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 584{
 585	struct pt_regs *regs = task_pt_regs(current);
 586	*regs = *eregs;
 
 587	return regs;
 588}
 589NOKPROBE_SYMBOL(sync_regs);
 590
 591struct bad_iret_stack {
 592	void *error_entry_ret;
 593	struct pt_regs regs;
 594};
 595
 596asmlinkage __visible notrace
 597struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 598{
 599	/*
 600	 * This is called from entry_64.S early in handling a fault
 601	 * caused by a bad iret to user mode.  To handle the fault
 602	 * correctly, we want move our stack frame to task_pt_regs
 603	 * and we want to pretend that the exception came from the
 604	 * iret target.
 
 605	 */
 606	struct bad_iret_stack *new_stack =
 607		container_of(task_pt_regs(current),
 608			     struct bad_iret_stack, regs);
 609
 610	/* Copy the IRET target to the new stack. */
 611	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
 612
 613	/* Copy the remainder of the stack from the current stack. */
 614	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
 615
 616	BUG_ON(!user_mode(&new_stack->regs));
 617	return new_stack;
 618}
 619NOKPROBE_SYMBOL(fixup_bad_iret);
 620#endif
 621
 622static bool is_sysenter_singlestep(struct pt_regs *regs)
 623{
 624	/*
 625	 * We don't try for precision here.  If we're anywhere in the region of
 626	 * code that can be single-stepped in the SYSENTER entry path, then
 627	 * assume that this is a useless single-step trap due to SYSENTER
 628	 * being invoked with TF set.  (We don't know in advance exactly
 629	 * which instructions will be hit because BTF could plausibly
 630	 * be set.)
 631	 */
 632#ifdef CONFIG_X86_32
 633	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
 634		(unsigned long)__end_SYSENTER_singlestep_region -
 635		(unsigned long)__begin_SYSENTER_singlestep_region;
 636#elif defined(CONFIG_IA32_EMULATION)
 637	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
 638		(unsigned long)__end_entry_SYSENTER_compat -
 639		(unsigned long)entry_SYSENTER_compat;
 640#else
 641	return false;
 642#endif
 643}
 644
 645/*
 646 * Our handling of the processor debug registers is non-trivial.
 647 * We do not clear them on entry and exit from the kernel. Therefore
 648 * it is possible to get a watchpoint trap here from inside the kernel.
 649 * However, the code in ./ptrace.c has ensured that the user can
 650 * only set watchpoints on userspace addresses. Therefore the in-kernel
 651 * watchpoint trap can only occur in code which is reading/writing
 652 * from user space. Such code must not hold kernel locks (since it
 653 * can equally take a page fault), therefore it is safe to call
 654 * force_sig_info even though that claims and releases locks.
 655 *
 656 * Code in ./signal.c ensures that the debug control register
 657 * is restored before we deliver any signal, and therefore that
 658 * user code runs with the correct debug control register even though
 659 * we clear it here.
 660 *
 661 * Being careful here means that we don't have to be as careful in a
 662 * lot of more complicated places (task switching can be a bit lazy
 663 * about restoring all the debug state, and ptrace doesn't have to
 664 * find every occurrence of the TF bit that could be saved away even
 665 * by user code)
 666 *
 667 * May run on IST stack.
 668 */
 669dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
 670{
 671	struct task_struct *tsk = current;
 672	int user_icebp = 0;
 673	unsigned long dr6;
 674	int si_code;
 675
 676	ist_enter(regs);
 677
 678	get_debugreg(dr6, 6);
 679	/*
 680	 * The Intel SDM says:
 681	 *
 682	 *   Certain debug exceptions may clear bits 0-3. The remaining
 683	 *   contents of the DR6 register are never cleared by the
 684	 *   processor. To avoid confusion in identifying debug
 685	 *   exceptions, debug handlers should clear the register before
 686	 *   returning to the interrupted task.
 687	 *
 688	 * Keep it simple: clear DR6 immediately.
 689	 */
 690	set_debugreg(0, 6);
 691
 692	/* Filter out all the reserved bits which are preset to 1 */
 693	dr6 &= ~DR6_RESERVED;
 694
 695	/*
 696	 * The SDM says "The processor clears the BTF flag when it
 697	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
 698	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
 699	 */
 700	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
 701
 702	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
 703		     is_sysenter_singlestep(regs))) {
 704		dr6 &= ~DR_STEP;
 705		if (!dr6)
 706			goto exit;
 707		/*
 708		 * else we might have gotten a single-step trap and hit a
 709		 * watchpoint at the same time, in which case we should fall
 710		 * through and handle the watchpoint.
 711		 */
 712	}
 713
 714	/*
 715	 * If dr6 has no reason to give us about the origin of this trap,
 716	 * then it's very likely the result of an icebp/int01 trap.
 717	 * User wants a sigtrap for that.
 718	 */
 719	if (!dr6 && user_mode(regs))
 720		user_icebp = 1;
 721
 722	/* Catch kmemcheck conditions! */
 723	if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
 724		goto exit;
 725
 726	/* Store the virtualized DR6 value */
 727	tsk->thread.debugreg6 = dr6;
 728
 729#ifdef CONFIG_KPROBES
 730	if (kprobe_debug_handler(regs))
 731		goto exit;
 732#endif
 733
 734	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
 735							SIGTRAP) == NOTIFY_STOP)
 736		goto exit;
 737
 738	/*
 739	 * Let others (NMI) know that the debug stack is in use
 740	 * as we may switch to the interrupt stack.
 741	 */
 742	debug_stack_usage_inc();
 743
 744	/* It's safe to allow irq's after DR6 has been saved */
 745	preempt_disable();
 746	cond_local_irq_enable(regs);
 747
 748	if (v8086_mode(regs)) {
 749		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
 750					X86_TRAP_DB);
 751		cond_local_irq_disable(regs);
 752		preempt_enable_no_resched();
 753		debug_stack_usage_dec();
 754		goto exit;
 755	}
 756
 757	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
 758		/*
 759		 * Historical junk that used to handle SYSENTER single-stepping.
 760		 * This should be unreachable now.  If we survive for a while
 761		 * without anyone hitting this warning, we'll turn this into
 762		 * an oops.
 763		 */
 764		tsk->thread.debugreg6 &= ~DR_STEP;
 765		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
 766		regs->flags &= ~X86_EFLAGS_TF;
 767	}
 768	si_code = get_si_code(tsk->thread.debugreg6);
 769	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
 770		send_sigtrap(tsk, regs, error_code, si_code);
 771	cond_local_irq_disable(regs);
 772	preempt_enable_no_resched();
 773	debug_stack_usage_dec();
 774
 775exit:
 776#if defined(CONFIG_X86_32)
 777	/*
 778	 * This is the most likely code path that involves non-trivial use
 779	 * of the SYSENTER stack.  Check that we haven't overrun it.
 780	 */
 781	WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
 782	     "Overran or corrupted SYSENTER stack\n");
 783#endif
 784	ist_exit(regs);
 785}
 786NOKPROBE_SYMBOL(do_debug);
 787
 788/*
 789 * Note that we play around with the 'TS' bit in an attempt to get
 790 * the correct behaviour even in the presence of the asynchronous
 791 * IRQ13 behaviour
 792 */
 793static void math_error(struct pt_regs *regs, int error_code, int trapnr)
 794{
 795	struct task_struct *task = current;
 796	struct fpu *fpu = &task->thread.fpu;
 797	siginfo_t info;
 798	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
 799						"simd exception";
 800
 801	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
 802		return;
 803	cond_local_irq_enable(regs);
 804
 805	if (!user_mode(regs)) {
 806		if (!fixup_exception(regs, trapnr)) {
 807			task->thread.error_code = error_code;
 808			task->thread.trap_nr = trapnr;
 809			die(str, regs, error_code);
 810		}
 811		return;
 812	}
 813
 814	/*
 815	 * Save the info for the exception handler and clear the error.
 816	 */
 817	fpu__save(fpu);
 818
 819	task->thread.trap_nr	= trapnr;
 820	task->thread.error_code = error_code;
 821	info.si_signo		= SIGFPE;
 822	info.si_errno		= 0;
 823	info.si_addr		= (void __user *)uprobe_get_trap_addr(regs);
 824
 825	info.si_code = fpu__exception_code(fpu, trapnr);
 826
 827	/* Retry when we get spurious exceptions: */
 828	if (!info.si_code)
 829		return;
 830
 831	force_sig_info(SIGFPE, &info, task);
 832}
 833
 834dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
 835{
 836	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 837	math_error(regs, error_code, X86_TRAP_MF);
 838}
 839
 840dotraplinkage void
 841do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 842{
 843	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 844	math_error(regs, error_code, X86_TRAP_XF);
 845}
 846
 847dotraplinkage void
 848do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 849{
 850	cond_local_irq_enable(regs);
 851}
 852
 853dotraplinkage void
 854do_device_not_available(struct pt_regs *regs, long error_code)
 855{
 856	unsigned long cr0;
 857
 858	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 859
 860#ifdef CONFIG_MATH_EMULATION
 861	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
 862		struct math_emu_info info = { };
 863
 864		cond_local_irq_enable(regs);
 865
 866		info.regs = regs;
 867		math_emulate(&info);
 868		return;
 869	}
 870#endif
 871
 872	/* This should not happen. */
 873	cr0 = read_cr0();
 874	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
 875		/* Try to fix it up and carry on. */
 876		write_cr0(cr0 & ~X86_CR0_TS);
 877	} else {
 878		/*
 879		 * Something terrible happened, and we're better off trying
 880		 * to kill the task than getting stuck in a never-ending
 881		 * loop of #NM faults.
 882		 */
 883		die("unexpected #NM exception", regs, error_code);
 884	}
 885}
 886NOKPROBE_SYMBOL(do_device_not_available);
 887
 888#ifdef CONFIG_X86_32
 889dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 890{
 891	siginfo_t info;
 892
 893	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 894	local_irq_enable();
 895
 896	info.si_signo = SIGILL;
 897	info.si_errno = 0;
 898	info.si_code = ILL_BADSTK;
 899	info.si_addr = NULL;
 900	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
 901			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
 902		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
 903			&info);
 904	}
 905}
 906#endif
 907
 908/* Set of traps needed for early debugging. */
 909void __init early_trap_init(void)
 910{
 911	/*
 912	 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
 913	 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
 914	 * CPU runs at ring 0 so it is impossible to hit an invalid
 915	 * stack.  Using the original stack works well enough at this
 916	 * early stage. DEBUG_STACK will be equipped after cpu_init() in
 917	 * trap_init().
 918	 *
 919	 * We don't need to set trace_idt_table like set_intr_gate(),
 920	 * since we don't have trace_debug and it will be reset to
 921	 * 'debug' in trap_init() by set_intr_gate_ist().
 922	 */
 923	set_intr_gate_notrace(X86_TRAP_DB, debug);
 924	/* int3 can be called from all */
 925	set_system_intr_gate(X86_TRAP_BP, &int3);
 926#ifdef CONFIG_X86_32
 927	set_intr_gate(X86_TRAP_PF, page_fault);
 928#endif
 929	load_idt(&idt_descr);
 930}
 931
 932void __init early_trap_pf_init(void)
 933{
 934#ifdef CONFIG_X86_64
 935	set_intr_gate(X86_TRAP_PF, page_fault);
 936#endif
 937}
 938
 939void __init trap_init(void)
 940{
 941	int i;
 942
 943#ifdef CONFIG_EISA
 944	void __iomem *p = early_ioremap(0x0FFFD9, 4);
 945
 946	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
 947		EISA_bus = 1;
 948	early_iounmap(p, 4);
 949#endif
 950
 951	set_intr_gate(X86_TRAP_DE, divide_error);
 952	set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
 953	/* int4 can be called from all */
 954	set_system_intr_gate(X86_TRAP_OF, &overflow);
 955	set_intr_gate(X86_TRAP_BR, bounds);
 956	set_intr_gate(X86_TRAP_UD, invalid_op);
 957	set_intr_gate(X86_TRAP_NM, device_not_available);
 958#ifdef CONFIG_X86_32
 959	set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
 960#else
 961	set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
 962#endif
 963	set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
 964	set_intr_gate(X86_TRAP_TS, invalid_TSS);
 965	set_intr_gate(X86_TRAP_NP, segment_not_present);
 966	set_intr_gate(X86_TRAP_SS, stack_segment);
 967	set_intr_gate(X86_TRAP_GP, general_protection);
 968	set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
 969	set_intr_gate(X86_TRAP_MF, coprocessor_error);
 970	set_intr_gate(X86_TRAP_AC, alignment_check);
 971#ifdef CONFIG_X86_MCE
 972	set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
 973#endif
 974	set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
 975
 976	/* Reserve all the builtin and the syscall vector: */
 977	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
 978		set_bit(i, used_vectors);
 979
 980#ifdef CONFIG_IA32_EMULATION
 981	set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
 982	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 983#endif
 984
 985#ifdef CONFIG_X86_32
 986	set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
 987	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 988#endif
 989
 990	/*
 991	 * Set the IDT descriptor to a fixed read-only location, so that the
 992	 * "sidt" instruction will not leak the location of the kernel, and
 993	 * to defend the IDT against arbitrary memory write vulnerabilities.
 994	 * It will be reloaded in cpu_init() */
 995	__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
 996	idt_descr.address = fix_to_virt(FIX_RO_IDT);
 
 997
 998	/*
 999	 * Should be a barrier for any external CPU state:
1000	 */
1001	cpu_init();
1002
1003	/*
1004	 * X86_TRAP_DB and X86_TRAP_BP have been set
1005	 * in early_trap_init(). However, ITS works only after
1006	 * cpu_init() loads TSS. See comments in early_trap_init().
1007	 */
1008	set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
1009	/* int3 can be called from all */
1010	set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
1011
1012	x86_init.irqs.trap_init();
1013
1014#ifdef CONFIG_X86_64
1015	memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
1016	set_nmi_gate(X86_TRAP_DB, &debug);
1017	set_nmi_gate(X86_TRAP_BP, &int3);
1018#endif
1019}