Loading...
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9/*
10 * Handle hardware traps and faults.
11 */
12#include <linux/interrupt.h>
13#include <linux/kallsyms.h>
14#include <linux/spinlock.h>
15#include <linux/kprobes.h>
16#include <linux/uaccess.h>
17#include <linux/kdebug.h>
18#include <linux/kgdb.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/ptrace.h>
22#include <linux/string.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/kexec.h>
26#include <linux/sched.h>
27#include <linux/timer.h>
28#include <linux/init.h>
29#include <linux/bug.h>
30#include <linux/nmi.h>
31#include <linux/mm.h>
32#include <linux/smp.h>
33#include <linux/io.h>
34
35#ifdef CONFIG_EISA
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#endif
39
40#if defined(CONFIG_EDAC)
41#include <linux/edac.h>
42#endif
43
44#include <asm/kmemcheck.h>
45#include <asm/stacktrace.h>
46#include <asm/processor.h>
47#include <asm/debugreg.h>
48#include <linux/atomic.h>
49#include <asm/ftrace.h>
50#include <asm/traps.h>
51#include <asm/desc.h>
52#include <asm/i387.h>
53#include <asm/fpu-internal.h>
54#include <asm/mce.h>
55
56#include <asm/mach_traps.h>
57
58#ifdef CONFIG_X86_64
59#include <asm/x86_init.h>
60#include <asm/pgalloc.h>
61#include <asm/proto.h>
62#else
63#include <asm/processor-flags.h>
64#include <asm/setup.h>
65
66asmlinkage int system_call(void);
67
68/* Do we ignore FPU interrupts ? */
69char ignore_fpu_irq;
70
71/*
72 * The IDT has to be page-aligned to simplify the Pentium
73 * F0 0F bug workaround.
74 */
75gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
76#endif
77
78DECLARE_BITMAP(used_vectors, NR_VECTORS);
79EXPORT_SYMBOL_GPL(used_vectors);
80
81static inline void conditional_sti(struct pt_regs *regs)
82{
83 if (regs->flags & X86_EFLAGS_IF)
84 local_irq_enable();
85}
86
87static inline void preempt_conditional_sti(struct pt_regs *regs)
88{
89 inc_preempt_count();
90 if (regs->flags & X86_EFLAGS_IF)
91 local_irq_enable();
92}
93
94static inline void conditional_cli(struct pt_regs *regs)
95{
96 if (regs->flags & X86_EFLAGS_IF)
97 local_irq_disable();
98}
99
100static inline void preempt_conditional_cli(struct pt_regs *regs)
101{
102 if (regs->flags & X86_EFLAGS_IF)
103 local_irq_disable();
104 dec_preempt_count();
105}
106
107static void __kprobes
108do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
109 long error_code, siginfo_t *info)
110{
111 struct task_struct *tsk = current;
112
113#ifdef CONFIG_X86_32
114 if (regs->flags & X86_VM_MASK) {
115 /*
116 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
117 * On nmi (interrupt 2), do_trap should not be called.
118 */
119 if (trapnr < X86_TRAP_UD)
120 goto vm86_trap;
121 goto trap_signal;
122 }
123#endif
124
125 if (!user_mode(regs))
126 goto kernel_trap;
127
128#ifdef CONFIG_X86_32
129trap_signal:
130#endif
131 /*
132 * We want error_code and trap_nr set for userspace faults and
133 * kernelspace faults which result in die(), but not
134 * kernelspace faults which are fixed up. die() gives the
135 * process no chance to handle the signal and notice the
136 * kernel fault information, so that won't result in polluting
137 * the information about previously queued, but not yet
138 * delivered, faults. See also do_general_protection below.
139 */
140 tsk->thread.error_code = error_code;
141 tsk->thread.trap_nr = trapnr;
142
143#ifdef CONFIG_X86_64
144 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
145 printk_ratelimit()) {
146 printk(KERN_INFO
147 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
148 tsk->comm, tsk->pid, str,
149 regs->ip, regs->sp, error_code);
150 print_vma_addr(" in ", regs->ip);
151 printk("\n");
152 }
153#endif
154
155 if (info)
156 force_sig_info(signr, info, tsk);
157 else
158 force_sig(signr, tsk);
159 return;
160
161kernel_trap:
162 if (!fixup_exception(regs)) {
163 tsk->thread.error_code = error_code;
164 tsk->thread.trap_nr = trapnr;
165 die(str, regs, error_code);
166 }
167 return;
168
169#ifdef CONFIG_X86_32
170vm86_trap:
171 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
172 error_code, trapnr))
173 goto trap_signal;
174 return;
175#endif
176}
177
178#define DO_ERROR(trapnr, signr, str, name) \
179dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
180{ \
181 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
182 == NOTIFY_STOP) \
183 return; \
184 conditional_sti(regs); \
185 do_trap(trapnr, signr, str, regs, error_code, NULL); \
186}
187
188#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
189dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
190{ \
191 siginfo_t info; \
192 info.si_signo = signr; \
193 info.si_errno = 0; \
194 info.si_code = sicode; \
195 info.si_addr = (void __user *)siaddr; \
196 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
197 == NOTIFY_STOP) \
198 return; \
199 conditional_sti(regs); \
200 do_trap(trapnr, signr, str, regs, error_code, &info); \
201}
202
203DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
204 regs->ip)
205DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
206DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
207DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
208 regs->ip)
209DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
210 coprocessor_segment_overrun)
211DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
212DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
213#ifdef CONFIG_X86_32
214DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
215#endif
216DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
217 BUS_ADRALN, 0)
218
219#ifdef CONFIG_X86_64
220/* Runs on IST stack */
221dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
222{
223 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
224 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
225 return;
226 preempt_conditional_sti(regs);
227 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
228 preempt_conditional_cli(regs);
229}
230
231dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
232{
233 static const char str[] = "double fault";
234 struct task_struct *tsk = current;
235
236 /* Return not checked because double check cannot be ignored */
237 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
238
239 tsk->thread.error_code = error_code;
240 tsk->thread.trap_nr = X86_TRAP_DF;
241
242 /*
243 * This is always a kernel trap and never fixable (and thus must
244 * never return).
245 */
246 for (;;)
247 die(str, regs, error_code);
248}
249#endif
250
251dotraplinkage void __kprobes
252do_general_protection(struct pt_regs *regs, long error_code)
253{
254 struct task_struct *tsk;
255
256 conditional_sti(regs);
257
258#ifdef CONFIG_X86_32
259 if (regs->flags & X86_VM_MASK)
260 goto gp_in_vm86;
261#endif
262
263 tsk = current;
264 if (!user_mode(regs))
265 goto gp_in_kernel;
266
267 tsk->thread.error_code = error_code;
268 tsk->thread.trap_nr = X86_TRAP_GP;
269
270 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
271 printk_ratelimit()) {
272 printk(KERN_INFO
273 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
274 tsk->comm, task_pid_nr(tsk),
275 regs->ip, regs->sp, error_code);
276 print_vma_addr(" in ", regs->ip);
277 printk("\n");
278 }
279
280 force_sig(SIGSEGV, tsk);
281 return;
282
283#ifdef CONFIG_X86_32
284gp_in_vm86:
285 local_irq_enable();
286 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
287 return;
288#endif
289
290gp_in_kernel:
291 if (fixup_exception(regs))
292 return;
293
294 tsk->thread.error_code = error_code;
295 tsk->thread.trap_nr = X86_TRAP_GP;
296 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
297 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
298 return;
299 die("general protection fault", regs, error_code);
300}
301
302/* May run on IST stack. */
303dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
304{
305#ifdef CONFIG_DYNAMIC_FTRACE
306 /*
307 * ftrace must be first, everything else may cause a recursive crash.
308 * See note by declaration of modifying_ftrace_code in ftrace.c
309 */
310 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
311 ftrace_int3_handler(regs))
312 return;
313#endif
314#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
315 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
316 SIGTRAP) == NOTIFY_STOP)
317 return;
318#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
319
320 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
321 SIGTRAP) == NOTIFY_STOP)
322 return;
323
324 /*
325 * Let others (NMI) know that the debug stack is in use
326 * as we may switch to the interrupt stack.
327 */
328 debug_stack_usage_inc();
329 preempt_conditional_sti(regs);
330 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
331 preempt_conditional_cli(regs);
332 debug_stack_usage_dec();
333}
334
335#ifdef CONFIG_X86_64
336/*
337 * Help handler running on IST stack to switch back to user stack
338 * for scheduling or signal handling. The actual stack switch is done in
339 * entry.S
340 */
341asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
342{
343 struct pt_regs *regs = eregs;
344 /* Did already sync */
345 if (eregs == (struct pt_regs *)eregs->sp)
346 ;
347 /* Exception from user space */
348 else if (user_mode(eregs))
349 regs = task_pt_regs(current);
350 /*
351 * Exception from kernel and interrupts are enabled. Move to
352 * kernel process stack.
353 */
354 else if (eregs->flags & X86_EFLAGS_IF)
355 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
356 if (eregs != regs)
357 *regs = *eregs;
358 return regs;
359}
360#endif
361
362/*
363 * Our handling of the processor debug registers is non-trivial.
364 * We do not clear them on entry and exit from the kernel. Therefore
365 * it is possible to get a watchpoint trap here from inside the kernel.
366 * However, the code in ./ptrace.c has ensured that the user can
367 * only set watchpoints on userspace addresses. Therefore the in-kernel
368 * watchpoint trap can only occur in code which is reading/writing
369 * from user space. Such code must not hold kernel locks (since it
370 * can equally take a page fault), therefore it is safe to call
371 * force_sig_info even though that claims and releases locks.
372 *
373 * Code in ./signal.c ensures that the debug control register
374 * is restored before we deliver any signal, and therefore that
375 * user code runs with the correct debug control register even though
376 * we clear it here.
377 *
378 * Being careful here means that we don't have to be as careful in a
379 * lot of more complicated places (task switching can be a bit lazy
380 * about restoring all the debug state, and ptrace doesn't have to
381 * find every occurrence of the TF bit that could be saved away even
382 * by user code)
383 *
384 * May run on IST stack.
385 */
386dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
387{
388 struct task_struct *tsk = current;
389 int user_icebp = 0;
390 unsigned long dr6;
391 int si_code;
392
393 get_debugreg(dr6, 6);
394
395 /* Filter out all the reserved bits which are preset to 1 */
396 dr6 &= ~DR6_RESERVED;
397
398 /*
399 * If dr6 has no reason to give us about the origin of this trap,
400 * then it's very likely the result of an icebp/int01 trap.
401 * User wants a sigtrap for that.
402 */
403 if (!dr6 && user_mode(regs))
404 user_icebp = 1;
405
406 /* Catch kmemcheck conditions first of all! */
407 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
408 return;
409
410 /* DR6 may or may not be cleared by the CPU */
411 set_debugreg(0, 6);
412
413 /*
414 * The processor cleared BTF, so don't mark that we need it set.
415 */
416 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
417
418 /* Store the virtualized DR6 value */
419 tsk->thread.debugreg6 = dr6;
420
421 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
422 SIGTRAP) == NOTIFY_STOP)
423 return;
424
425 /*
426 * Let others (NMI) know that the debug stack is in use
427 * as we may switch to the interrupt stack.
428 */
429 debug_stack_usage_inc();
430
431 /* It's safe to allow irq's after DR6 has been saved */
432 preempt_conditional_sti(regs);
433
434 if (regs->flags & X86_VM_MASK) {
435 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
436 X86_TRAP_DB);
437 preempt_conditional_cli(regs);
438 debug_stack_usage_dec();
439 return;
440 }
441
442 /*
443 * Single-stepping through system calls: ignore any exceptions in
444 * kernel space, but re-enable TF when returning to user mode.
445 *
446 * We already checked v86 mode above, so we can check for kernel mode
447 * by just checking the CPL of CS.
448 */
449 if ((dr6 & DR_STEP) && !user_mode(regs)) {
450 tsk->thread.debugreg6 &= ~DR_STEP;
451 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
452 regs->flags &= ~X86_EFLAGS_TF;
453 }
454 si_code = get_si_code(tsk->thread.debugreg6);
455 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
456 send_sigtrap(tsk, regs, error_code, si_code);
457 preempt_conditional_cli(regs);
458 debug_stack_usage_dec();
459
460 return;
461}
462
463/*
464 * Note that we play around with the 'TS' bit in an attempt to get
465 * the correct behaviour even in the presence of the asynchronous
466 * IRQ13 behaviour
467 */
468void math_error(struct pt_regs *regs, int error_code, int trapnr)
469{
470 struct task_struct *task = current;
471 siginfo_t info;
472 unsigned short err;
473 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
474 "simd exception";
475
476 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
477 return;
478 conditional_sti(regs);
479
480 if (!user_mode_vm(regs))
481 {
482 if (!fixup_exception(regs)) {
483 task->thread.error_code = error_code;
484 task->thread.trap_nr = trapnr;
485 die(str, regs, error_code);
486 }
487 return;
488 }
489
490 /*
491 * Save the info for the exception handler and clear the error.
492 */
493 save_init_fpu(task);
494 task->thread.trap_nr = trapnr;
495 task->thread.error_code = error_code;
496 info.si_signo = SIGFPE;
497 info.si_errno = 0;
498 info.si_addr = (void __user *)regs->ip;
499 if (trapnr == X86_TRAP_MF) {
500 unsigned short cwd, swd;
501 /*
502 * (~cwd & swd) will mask out exceptions that are not set to unmasked
503 * status. 0x3f is the exception bits in these regs, 0x200 is the
504 * C1 reg you need in case of a stack fault, 0x040 is the stack
505 * fault bit. We should only be taking one exception at a time,
506 * so if this combination doesn't produce any single exception,
507 * then we have a bad program that isn't synchronizing its FPU usage
508 * and it will suffer the consequences since we won't be able to
509 * fully reproduce the context of the exception
510 */
511 cwd = get_fpu_cwd(task);
512 swd = get_fpu_swd(task);
513
514 err = swd & ~cwd;
515 } else {
516 /*
517 * The SIMD FPU exceptions are handled a little differently, as there
518 * is only a single status/control register. Thus, to determine which
519 * unmasked exception was caught we must mask the exception mask bits
520 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
521 */
522 unsigned short mxcsr = get_fpu_mxcsr(task);
523 err = ~(mxcsr >> 7) & mxcsr;
524 }
525
526 if (err & 0x001) { /* Invalid op */
527 /*
528 * swd & 0x240 == 0x040: Stack Underflow
529 * swd & 0x240 == 0x240: Stack Overflow
530 * User must clear the SF bit (0x40) if set
531 */
532 info.si_code = FPE_FLTINV;
533 } else if (err & 0x004) { /* Divide by Zero */
534 info.si_code = FPE_FLTDIV;
535 } else if (err & 0x008) { /* Overflow */
536 info.si_code = FPE_FLTOVF;
537 } else if (err & 0x012) { /* Denormal, Underflow */
538 info.si_code = FPE_FLTUND;
539 } else if (err & 0x020) { /* Precision */
540 info.si_code = FPE_FLTRES;
541 } else {
542 /*
543 * If we're using IRQ 13, or supposedly even some trap
544 * X86_TRAP_MF implementations, it's possible
545 * we get a spurious trap, which is not an error.
546 */
547 return;
548 }
549 force_sig_info(SIGFPE, &info, task);
550}
551
552dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
553{
554#ifdef CONFIG_X86_32
555 ignore_fpu_irq = 1;
556#endif
557
558 math_error(regs, error_code, X86_TRAP_MF);
559}
560
561dotraplinkage void
562do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
563{
564 math_error(regs, error_code, X86_TRAP_XF);
565}
566
567dotraplinkage void
568do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
569{
570 conditional_sti(regs);
571#if 0
572 /* No need to warn about this any longer. */
573 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
574#endif
575}
576
577asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
578{
579}
580
581asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
582{
583}
584
585/*
586 * 'math_state_restore()' saves the current math information in the
587 * old math state array, and gets the new ones from the current task
588 *
589 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
590 * Don't touch unless you *really* know how it works.
591 *
592 * Must be called with kernel preemption disabled (eg with local
593 * local interrupts as in the case of do_device_not_available).
594 */
595void math_state_restore(void)
596{
597 struct task_struct *tsk = current;
598
599 if (!tsk_used_math(tsk)) {
600 local_irq_enable();
601 /*
602 * does a slab alloc which can sleep
603 */
604 if (init_fpu(tsk)) {
605 /*
606 * ran out of memory!
607 */
608 do_group_exit(SIGKILL);
609 return;
610 }
611 local_irq_disable();
612 }
613
614 __thread_fpu_begin(tsk);
615 /*
616 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
617 */
618 if (unlikely(restore_fpu_checking(tsk))) {
619 __thread_fpu_end(tsk);
620 force_sig(SIGSEGV, tsk);
621 return;
622 }
623
624 tsk->fpu_counter++;
625}
626EXPORT_SYMBOL_GPL(math_state_restore);
627
628dotraplinkage void __kprobes
629do_device_not_available(struct pt_regs *regs, long error_code)
630{
631#ifdef CONFIG_MATH_EMULATION
632 if (read_cr0() & X86_CR0_EM) {
633 struct math_emu_info info = { };
634
635 conditional_sti(regs);
636
637 info.regs = regs;
638 math_emulate(&info);
639 return;
640 }
641#endif
642 math_state_restore(); /* interrupts still off */
643#ifdef CONFIG_X86_32
644 conditional_sti(regs);
645#endif
646}
647
648#ifdef CONFIG_X86_32
649dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
650{
651 siginfo_t info;
652 local_irq_enable();
653
654 info.si_signo = SIGILL;
655 info.si_errno = 0;
656 info.si_code = ILL_BADSTK;
657 info.si_addr = NULL;
658 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
659 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
660 return;
661 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
662 &info);
663}
664#endif
665
666/* Set of traps needed for early debugging. */
667void __init early_trap_init(void)
668{
669 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
670 /* int3 can be called from all */
671 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
672 set_intr_gate(X86_TRAP_PF, &page_fault);
673 load_idt(&idt_descr);
674}
675
676void __init trap_init(void)
677{
678 int i;
679
680#ifdef CONFIG_EISA
681 void __iomem *p = early_ioremap(0x0FFFD9, 4);
682
683 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
684 EISA_bus = 1;
685 early_iounmap(p, 4);
686#endif
687
688 set_intr_gate(X86_TRAP_DE, ÷_error);
689 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
690 /* int4 can be called from all */
691 set_system_intr_gate(X86_TRAP_OF, &overflow);
692 set_intr_gate(X86_TRAP_BR, &bounds);
693 set_intr_gate(X86_TRAP_UD, &invalid_op);
694 set_intr_gate(X86_TRAP_NM, &device_not_available);
695#ifdef CONFIG_X86_32
696 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
697#else
698 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
699#endif
700 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
701 set_intr_gate(X86_TRAP_TS, &invalid_TSS);
702 set_intr_gate(X86_TRAP_NP, &segment_not_present);
703 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
704 set_intr_gate(X86_TRAP_GP, &general_protection);
705 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
706 set_intr_gate(X86_TRAP_MF, &coprocessor_error);
707 set_intr_gate(X86_TRAP_AC, &alignment_check);
708#ifdef CONFIG_X86_MCE
709 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
710#endif
711 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
712
713 /* Reserve all the builtin and the syscall vector: */
714 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
715 set_bit(i, used_vectors);
716
717#ifdef CONFIG_IA32_EMULATION
718 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
719 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
720#endif
721
722#ifdef CONFIG_X86_32
723 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
724 set_bit(SYSCALL_VECTOR, used_vectors);
725#endif
726
727 /*
728 * Should be a barrier for any external CPU state:
729 */
730 cpu_init();
731
732 x86_init.irqs.trap_init();
733
734#ifdef CONFIG_X86_64
735 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
736 set_nmi_gate(X86_TRAP_DB, &debug);
737 set_nmi_gate(X86_TRAP_BP, &int3);
738#endif
739}
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9/*
10 * Handle hardware traps and faults.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/context_tracking.h>
16#include <linux/interrupt.h>
17#include <linux/kallsyms.h>
18#include <linux/spinlock.h>
19#include <linux/kprobes.h>
20#include <linux/uaccess.h>
21#include <linux/kdebug.h>
22#include <linux/kgdb.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
27#include <linux/string.h>
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/kexec.h>
31#include <linux/sched.h>
32#include <linux/timer.h>
33#include <linux/init.h>
34#include <linux/bug.h>
35#include <linux/nmi.h>
36#include <linux/mm.h>
37#include <linux/smp.h>
38#include <linux/io.h>
39
40#ifdef CONFIG_EISA
41#include <linux/ioport.h>
42#include <linux/eisa.h>
43#endif
44
45#if defined(CONFIG_EDAC)
46#include <linux/edac.h>
47#endif
48
49#include <asm/kmemcheck.h>
50#include <asm/stacktrace.h>
51#include <asm/processor.h>
52#include <asm/debugreg.h>
53#include <linux/atomic.h>
54#include <asm/ftrace.h>
55#include <asm/traps.h>
56#include <asm/desc.h>
57#include <asm/fpu/internal.h>
58#include <asm/mce.h>
59#include <asm/fixmap.h>
60#include <asm/mach_traps.h>
61#include <asm/alternative.h>
62#include <asm/fpu/xstate.h>
63#include <asm/trace/mpx.h>
64#include <asm/mpx.h>
65#include <asm/vm86.h>
66
67#ifdef CONFIG_X86_64
68#include <asm/x86_init.h>
69#include <asm/pgalloc.h>
70#include <asm/proto.h>
71
72/* No need to be aligned, but done to keep all IDTs defined the same way. */
73gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
74#else
75#include <asm/processor-flags.h>
76#include <asm/setup.h>
77#include <asm/proto.h>
78#endif
79
80/* Must be page-aligned because the real IDT is used in a fixmap. */
81gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
82
83DECLARE_BITMAP(used_vectors, NR_VECTORS);
84EXPORT_SYMBOL_GPL(used_vectors);
85
86static inline void cond_local_irq_enable(struct pt_regs *regs)
87{
88 if (regs->flags & X86_EFLAGS_IF)
89 local_irq_enable();
90}
91
92static inline void cond_local_irq_disable(struct pt_regs *regs)
93{
94 if (regs->flags & X86_EFLAGS_IF)
95 local_irq_disable();
96}
97
98void ist_enter(struct pt_regs *regs)
99{
100 if (user_mode(regs)) {
101 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
102 } else {
103 /*
104 * We might have interrupted pretty much anything. In
105 * fact, if we're a machine check, we can even interrupt
106 * NMI processing. We don't want in_nmi() to return true,
107 * but we need to notify RCU.
108 */
109 rcu_nmi_enter();
110 }
111
112 /*
113 * We are atomic because we're on the IST stack; or we're on
114 * x86_32, in which case we still shouldn't schedule; or we're
115 * on x86_64 and entered from user mode, in which case we're
116 * still atomic unless ist_begin_non_atomic is called.
117 */
118 preempt_count_add(HARDIRQ_OFFSET);
119
120 /* This code is a bit fragile. Test it. */
121 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
122}
123
124void ist_exit(struct pt_regs *regs)
125{
126 preempt_count_sub(HARDIRQ_OFFSET);
127
128 if (!user_mode(regs))
129 rcu_nmi_exit();
130}
131
132/**
133 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
134 * @regs: regs passed to the IST exception handler
135 *
136 * IST exception handlers normally cannot schedule. As a special
137 * exception, if the exception interrupted userspace code (i.e.
138 * user_mode(regs) would return true) and the exception was not
139 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
140 * begins a non-atomic section within an ist_enter()/ist_exit() region.
141 * Callers are responsible for enabling interrupts themselves inside
142 * the non-atomic section, and callers must call ist_end_non_atomic()
143 * before ist_exit().
144 */
145void ist_begin_non_atomic(struct pt_regs *regs)
146{
147 BUG_ON(!user_mode(regs));
148
149 /*
150 * Sanity check: we need to be on the normal thread stack. This
151 * will catch asm bugs and any attempt to use ist_preempt_enable
152 * from double_fault.
153 */
154 BUG_ON((unsigned long)(current_top_of_stack() -
155 current_stack_pointer()) >= THREAD_SIZE);
156
157 preempt_count_sub(HARDIRQ_OFFSET);
158}
159
160/**
161 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
162 *
163 * Ends a non-atomic section started with ist_begin_non_atomic().
164 */
165void ist_end_non_atomic(void)
166{
167 preempt_count_add(HARDIRQ_OFFSET);
168}
169
170static nokprobe_inline int
171do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
172 struct pt_regs *regs, long error_code)
173{
174 if (v8086_mode(regs)) {
175 /*
176 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
177 * On nmi (interrupt 2), do_trap should not be called.
178 */
179 if (trapnr < X86_TRAP_UD) {
180 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
181 error_code, trapnr))
182 return 0;
183 }
184 return -1;
185 }
186
187 if (!user_mode(regs)) {
188 if (!fixup_exception(regs, trapnr)) {
189 tsk->thread.error_code = error_code;
190 tsk->thread.trap_nr = trapnr;
191 die(str, regs, error_code);
192 }
193 return 0;
194 }
195
196 return -1;
197}
198
199static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
200 siginfo_t *info)
201{
202 unsigned long siaddr;
203 int sicode;
204
205 switch (trapnr) {
206 default:
207 return SEND_SIG_PRIV;
208
209 case X86_TRAP_DE:
210 sicode = FPE_INTDIV;
211 siaddr = uprobe_get_trap_addr(regs);
212 break;
213 case X86_TRAP_UD:
214 sicode = ILL_ILLOPN;
215 siaddr = uprobe_get_trap_addr(regs);
216 break;
217 case X86_TRAP_AC:
218 sicode = BUS_ADRALN;
219 siaddr = 0;
220 break;
221 }
222
223 info->si_signo = signr;
224 info->si_errno = 0;
225 info->si_code = sicode;
226 info->si_addr = (void __user *)siaddr;
227 return info;
228}
229
230static void
231do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
232 long error_code, siginfo_t *info)
233{
234 struct task_struct *tsk = current;
235
236
237 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
238 return;
239 /*
240 * We want error_code and trap_nr set for userspace faults and
241 * kernelspace faults which result in die(), but not
242 * kernelspace faults which are fixed up. die() gives the
243 * process no chance to handle the signal and notice the
244 * kernel fault information, so that won't result in polluting
245 * the information about previously queued, but not yet
246 * delivered, faults. See also do_general_protection below.
247 */
248 tsk->thread.error_code = error_code;
249 tsk->thread.trap_nr = trapnr;
250
251 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
252 printk_ratelimit()) {
253 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
254 tsk->comm, tsk->pid, str,
255 regs->ip, regs->sp, error_code);
256 print_vma_addr(" in ", regs->ip);
257 pr_cont("\n");
258 }
259
260 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
261}
262NOKPROBE_SYMBOL(do_trap);
263
264static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
265 unsigned long trapnr, int signr)
266{
267 siginfo_t info;
268
269 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
270
271 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
272 NOTIFY_STOP) {
273 cond_local_irq_enable(regs);
274 do_trap(trapnr, signr, str, regs, error_code,
275 fill_trap_info(regs, signr, trapnr, &info));
276 }
277}
278
279#define DO_ERROR(trapnr, signr, str, name) \
280dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
281{ \
282 do_error_trap(regs, error_code, str, trapnr, signr); \
283}
284
285DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error)
286DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
287DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
288DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
289DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
290DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
291DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
292DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
293
294#ifdef CONFIG_X86_64
295/* Runs on IST stack */
296dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
297{
298 static const char str[] = "double fault";
299 struct task_struct *tsk = current;
300
301#ifdef CONFIG_X86_ESPFIX64
302 extern unsigned char native_irq_return_iret[];
303
304 /*
305 * If IRET takes a non-IST fault on the espfix64 stack, then we
306 * end up promoting it to a doublefault. In that case, modify
307 * the stack to make it look like we just entered the #GP
308 * handler from user space, similar to bad_iret.
309 *
310 * No need for ist_enter here because we don't use RCU.
311 */
312 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
313 regs->cs == __KERNEL_CS &&
314 regs->ip == (unsigned long)native_irq_return_iret)
315 {
316 struct pt_regs *normal_regs = task_pt_regs(current);
317
318 /* Fake a #GP(0) from userspace. */
319 memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
320 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
321 regs->ip = (unsigned long)general_protection;
322 regs->sp = (unsigned long)&normal_regs->orig_ax;
323
324 return;
325 }
326#endif
327
328 ist_enter(regs);
329 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
330
331 tsk->thread.error_code = error_code;
332 tsk->thread.trap_nr = X86_TRAP_DF;
333
334#ifdef CONFIG_DOUBLEFAULT
335 df_debug(regs, error_code);
336#endif
337 /*
338 * This is always a kernel trap and never fixable (and thus must
339 * never return).
340 */
341 for (;;)
342 die(str, regs, error_code);
343}
344#endif
345
346dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
347{
348 const struct mpx_bndcsr *bndcsr;
349 siginfo_t *info;
350
351 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
352 if (notify_die(DIE_TRAP, "bounds", regs, error_code,
353 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
354 return;
355 cond_local_irq_enable(regs);
356
357 if (!user_mode(regs))
358 die("bounds", regs, error_code);
359
360 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
361 /* The exception is not from Intel MPX */
362 goto exit_trap;
363 }
364
365 /*
366 * We need to look at BNDSTATUS to resolve this exception.
367 * A NULL here might mean that it is in its 'init state',
368 * which is all zeros which indicates MPX was not
369 * responsible for the exception.
370 */
371 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
372 if (!bndcsr)
373 goto exit_trap;
374
375 trace_bounds_exception_mpx(bndcsr);
376 /*
377 * The error code field of the BNDSTATUS register communicates status
378 * information of a bound range exception #BR or operation involving
379 * bound directory.
380 */
381 switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
382 case 2: /* Bound directory has invalid entry. */
383 if (mpx_handle_bd_fault())
384 goto exit_trap;
385 break; /* Success, it was handled */
386 case 1: /* Bound violation. */
387 info = mpx_generate_siginfo(regs);
388 if (IS_ERR(info)) {
389 /*
390 * We failed to decode the MPX instruction. Act as if
391 * the exception was not caused by MPX.
392 */
393 goto exit_trap;
394 }
395 /*
396 * Success, we decoded the instruction and retrieved
397 * an 'info' containing the address being accessed
398 * which caused the exception. This information
399 * allows and application to possibly handle the
400 * #BR exception itself.
401 */
402 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
403 kfree(info);
404 break;
405 case 0: /* No exception caused by Intel MPX operations. */
406 goto exit_trap;
407 default:
408 die("bounds", regs, error_code);
409 }
410
411 return;
412
413exit_trap:
414 /*
415 * This path out is for all the cases where we could not
416 * handle the exception in some way (like allocating a
417 * table or telling userspace about it. We will also end
418 * up here if the kernel has MPX turned off at compile
419 * time..
420 */
421 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
422}
423
424dotraplinkage void
425do_general_protection(struct pt_regs *regs, long error_code)
426{
427 struct task_struct *tsk;
428
429 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
430 cond_local_irq_enable(regs);
431
432 if (v8086_mode(regs)) {
433 local_irq_enable();
434 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
435 return;
436 }
437
438 tsk = current;
439 if (!user_mode(regs)) {
440 if (fixup_exception(regs, X86_TRAP_GP))
441 return;
442
443 tsk->thread.error_code = error_code;
444 tsk->thread.trap_nr = X86_TRAP_GP;
445 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
446 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
447 die("general protection fault", regs, error_code);
448 return;
449 }
450
451 tsk->thread.error_code = error_code;
452 tsk->thread.trap_nr = X86_TRAP_GP;
453
454 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
455 printk_ratelimit()) {
456 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
457 tsk->comm, task_pid_nr(tsk),
458 regs->ip, regs->sp, error_code);
459 print_vma_addr(" in ", regs->ip);
460 pr_cont("\n");
461 }
462
463 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
464}
465NOKPROBE_SYMBOL(do_general_protection);
466
467/* May run on IST stack. */
468dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
469{
470#ifdef CONFIG_DYNAMIC_FTRACE
471 /*
472 * ftrace must be first, everything else may cause a recursive crash.
473 * See note by declaration of modifying_ftrace_code in ftrace.c
474 */
475 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
476 ftrace_int3_handler(regs))
477 return;
478#endif
479 if (poke_int3_handler(regs))
480 return;
481
482 ist_enter(regs);
483 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
484#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
485 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
486 SIGTRAP) == NOTIFY_STOP)
487 goto exit;
488#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
489
490#ifdef CONFIG_KPROBES
491 if (kprobe_int3_handler(regs))
492 goto exit;
493#endif
494
495 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
496 SIGTRAP) == NOTIFY_STOP)
497 goto exit;
498
499 /*
500 * Let others (NMI) know that the debug stack is in use
501 * as we may switch to the interrupt stack.
502 */
503 debug_stack_usage_inc();
504 preempt_disable();
505 cond_local_irq_enable(regs);
506 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
507 cond_local_irq_disable(regs);
508 preempt_enable_no_resched();
509 debug_stack_usage_dec();
510exit:
511 ist_exit(regs);
512}
513NOKPROBE_SYMBOL(do_int3);
514
515#ifdef CONFIG_X86_64
516/*
517 * Help handler running on IST stack to switch off the IST stack if the
518 * interrupted code was in user mode. The actual stack switch is done in
519 * entry_64.S
520 */
521asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
522{
523 struct pt_regs *regs = task_pt_regs(current);
524 *regs = *eregs;
525 return regs;
526}
527NOKPROBE_SYMBOL(sync_regs);
528
529struct bad_iret_stack {
530 void *error_entry_ret;
531 struct pt_regs regs;
532};
533
534asmlinkage __visible notrace
535struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
536{
537 /*
538 * This is called from entry_64.S early in handling a fault
539 * caused by a bad iret to user mode. To handle the fault
540 * correctly, we want move our stack frame to task_pt_regs
541 * and we want to pretend that the exception came from the
542 * iret target.
543 */
544 struct bad_iret_stack *new_stack =
545 container_of(task_pt_regs(current),
546 struct bad_iret_stack, regs);
547
548 /* Copy the IRET target to the new stack. */
549 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
550
551 /* Copy the remainder of the stack from the current stack. */
552 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
553
554 BUG_ON(!user_mode(&new_stack->regs));
555 return new_stack;
556}
557NOKPROBE_SYMBOL(fixup_bad_iret);
558#endif
559
560static bool is_sysenter_singlestep(struct pt_regs *regs)
561{
562 /*
563 * We don't try for precision here. If we're anywhere in the region of
564 * code that can be single-stepped in the SYSENTER entry path, then
565 * assume that this is a useless single-step trap due to SYSENTER
566 * being invoked with TF set. (We don't know in advance exactly
567 * which instructions will be hit because BTF could plausibly
568 * be set.)
569 */
570#ifdef CONFIG_X86_32
571 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
572 (unsigned long)__end_SYSENTER_singlestep_region -
573 (unsigned long)__begin_SYSENTER_singlestep_region;
574#elif defined(CONFIG_IA32_EMULATION)
575 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
576 (unsigned long)__end_entry_SYSENTER_compat -
577 (unsigned long)entry_SYSENTER_compat;
578#else
579 return false;
580#endif
581}
582
583/*
584 * Our handling of the processor debug registers is non-trivial.
585 * We do not clear them on entry and exit from the kernel. Therefore
586 * it is possible to get a watchpoint trap here from inside the kernel.
587 * However, the code in ./ptrace.c has ensured that the user can
588 * only set watchpoints on userspace addresses. Therefore the in-kernel
589 * watchpoint trap can only occur in code which is reading/writing
590 * from user space. Such code must not hold kernel locks (since it
591 * can equally take a page fault), therefore it is safe to call
592 * force_sig_info even though that claims and releases locks.
593 *
594 * Code in ./signal.c ensures that the debug control register
595 * is restored before we deliver any signal, and therefore that
596 * user code runs with the correct debug control register even though
597 * we clear it here.
598 *
599 * Being careful here means that we don't have to be as careful in a
600 * lot of more complicated places (task switching can be a bit lazy
601 * about restoring all the debug state, and ptrace doesn't have to
602 * find every occurrence of the TF bit that could be saved away even
603 * by user code)
604 *
605 * May run on IST stack.
606 */
607dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
608{
609 struct task_struct *tsk = current;
610 int user_icebp = 0;
611 unsigned long dr6;
612 int si_code;
613
614 ist_enter(regs);
615
616 get_debugreg(dr6, 6);
617 /*
618 * The Intel SDM says:
619 *
620 * Certain debug exceptions may clear bits 0-3. The remaining
621 * contents of the DR6 register are never cleared by the
622 * processor. To avoid confusion in identifying debug
623 * exceptions, debug handlers should clear the register before
624 * returning to the interrupted task.
625 *
626 * Keep it simple: clear DR6 immediately.
627 */
628 set_debugreg(0, 6);
629
630 /* Filter out all the reserved bits which are preset to 1 */
631 dr6 &= ~DR6_RESERVED;
632
633 /*
634 * The SDM says "The processor clears the BTF flag when it
635 * generates a debug exception." Clear TIF_BLOCKSTEP to keep
636 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
637 */
638 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
639
640 if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
641 is_sysenter_singlestep(regs))) {
642 dr6 &= ~DR_STEP;
643 if (!dr6)
644 goto exit;
645 /*
646 * else we might have gotten a single-step trap and hit a
647 * watchpoint at the same time, in which case we should fall
648 * through and handle the watchpoint.
649 */
650 }
651
652 /*
653 * If dr6 has no reason to give us about the origin of this trap,
654 * then it's very likely the result of an icebp/int01 trap.
655 * User wants a sigtrap for that.
656 */
657 if (!dr6 && user_mode(regs))
658 user_icebp = 1;
659
660 /* Catch kmemcheck conditions! */
661 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
662 goto exit;
663
664 /* Store the virtualized DR6 value */
665 tsk->thread.debugreg6 = dr6;
666
667#ifdef CONFIG_KPROBES
668 if (kprobe_debug_handler(regs))
669 goto exit;
670#endif
671
672 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
673 SIGTRAP) == NOTIFY_STOP)
674 goto exit;
675
676 /*
677 * Let others (NMI) know that the debug stack is in use
678 * as we may switch to the interrupt stack.
679 */
680 debug_stack_usage_inc();
681
682 /* It's safe to allow irq's after DR6 has been saved */
683 preempt_disable();
684 cond_local_irq_enable(regs);
685
686 if (v8086_mode(regs)) {
687 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
688 X86_TRAP_DB);
689 cond_local_irq_disable(regs);
690 preempt_enable_no_resched();
691 debug_stack_usage_dec();
692 goto exit;
693 }
694
695 if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
696 /*
697 * Historical junk that used to handle SYSENTER single-stepping.
698 * This should be unreachable now. If we survive for a while
699 * without anyone hitting this warning, we'll turn this into
700 * an oops.
701 */
702 tsk->thread.debugreg6 &= ~DR_STEP;
703 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
704 regs->flags &= ~X86_EFLAGS_TF;
705 }
706 si_code = get_si_code(tsk->thread.debugreg6);
707 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
708 send_sigtrap(tsk, regs, error_code, si_code);
709 cond_local_irq_disable(regs);
710 preempt_enable_no_resched();
711 debug_stack_usage_dec();
712
713exit:
714#if defined(CONFIG_X86_32)
715 /*
716 * This is the most likely code path that involves non-trivial use
717 * of the SYSENTER stack. Check that we haven't overrun it.
718 */
719 WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
720 "Overran or corrupted SYSENTER stack\n");
721#endif
722 ist_exit(regs);
723}
724NOKPROBE_SYMBOL(do_debug);
725
726/*
727 * Note that we play around with the 'TS' bit in an attempt to get
728 * the correct behaviour even in the presence of the asynchronous
729 * IRQ13 behaviour
730 */
731static void math_error(struct pt_regs *regs, int error_code, int trapnr)
732{
733 struct task_struct *task = current;
734 struct fpu *fpu = &task->thread.fpu;
735 siginfo_t info;
736 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
737 "simd exception";
738
739 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
740 return;
741 cond_local_irq_enable(regs);
742
743 if (!user_mode(regs)) {
744 if (!fixup_exception(regs, trapnr)) {
745 task->thread.error_code = error_code;
746 task->thread.trap_nr = trapnr;
747 die(str, regs, error_code);
748 }
749 return;
750 }
751
752 /*
753 * Save the info for the exception handler and clear the error.
754 */
755 fpu__save(fpu);
756
757 task->thread.trap_nr = trapnr;
758 task->thread.error_code = error_code;
759 info.si_signo = SIGFPE;
760 info.si_errno = 0;
761 info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
762
763 info.si_code = fpu__exception_code(fpu, trapnr);
764
765 /* Retry when we get spurious exceptions: */
766 if (!info.si_code)
767 return;
768
769 force_sig_info(SIGFPE, &info, task);
770}
771
772dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
773{
774 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
775 math_error(regs, error_code, X86_TRAP_MF);
776}
777
778dotraplinkage void
779do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
780{
781 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
782 math_error(regs, error_code, X86_TRAP_XF);
783}
784
785dotraplinkage void
786do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
787{
788 cond_local_irq_enable(regs);
789}
790
791dotraplinkage void
792do_device_not_available(struct pt_regs *regs, long error_code)
793{
794 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
795
796#ifdef CONFIG_MATH_EMULATION
797 if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
798 struct math_emu_info info = { };
799
800 cond_local_irq_enable(regs);
801
802 info.regs = regs;
803 math_emulate(&info);
804 return;
805 }
806#endif
807 fpu__restore(¤t->thread.fpu); /* interrupts still off */
808#ifdef CONFIG_X86_32
809 cond_local_irq_enable(regs);
810#endif
811}
812NOKPROBE_SYMBOL(do_device_not_available);
813
814#ifdef CONFIG_X86_32
815dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
816{
817 siginfo_t info;
818
819 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
820 local_irq_enable();
821
822 info.si_signo = SIGILL;
823 info.si_errno = 0;
824 info.si_code = ILL_BADSTK;
825 info.si_addr = NULL;
826 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
827 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
828 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
829 &info);
830 }
831}
832#endif
833
834/* Set of traps needed for early debugging. */
835void __init early_trap_init(void)
836{
837 /*
838 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
839 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
840 * CPU runs at ring 0 so it is impossible to hit an invalid
841 * stack. Using the original stack works well enough at this
842 * early stage. DEBUG_STACK will be equipped after cpu_init() in
843 * trap_init().
844 *
845 * We don't need to set trace_idt_table like set_intr_gate(),
846 * since we don't have trace_debug and it will be reset to
847 * 'debug' in trap_init() by set_intr_gate_ist().
848 */
849 set_intr_gate_notrace(X86_TRAP_DB, debug);
850 /* int3 can be called from all */
851 set_system_intr_gate(X86_TRAP_BP, &int3);
852#ifdef CONFIG_X86_32
853 set_intr_gate(X86_TRAP_PF, page_fault);
854#endif
855 load_idt(&idt_descr);
856}
857
858void __init early_trap_pf_init(void)
859{
860#ifdef CONFIG_X86_64
861 set_intr_gate(X86_TRAP_PF, page_fault);
862#endif
863}
864
865void __init trap_init(void)
866{
867 int i;
868
869#ifdef CONFIG_EISA
870 void __iomem *p = early_ioremap(0x0FFFD9, 4);
871
872 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
873 EISA_bus = 1;
874 early_iounmap(p, 4);
875#endif
876
877 set_intr_gate(X86_TRAP_DE, divide_error);
878 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
879 /* int4 can be called from all */
880 set_system_intr_gate(X86_TRAP_OF, &overflow);
881 set_intr_gate(X86_TRAP_BR, bounds);
882 set_intr_gate(X86_TRAP_UD, invalid_op);
883 set_intr_gate(X86_TRAP_NM, device_not_available);
884#ifdef CONFIG_X86_32
885 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
886#else
887 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
888#endif
889 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
890 set_intr_gate(X86_TRAP_TS, invalid_TSS);
891 set_intr_gate(X86_TRAP_NP, segment_not_present);
892 set_intr_gate(X86_TRAP_SS, stack_segment);
893 set_intr_gate(X86_TRAP_GP, general_protection);
894 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
895 set_intr_gate(X86_TRAP_MF, coprocessor_error);
896 set_intr_gate(X86_TRAP_AC, alignment_check);
897#ifdef CONFIG_X86_MCE
898 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
899#endif
900 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
901
902 /* Reserve all the builtin and the syscall vector: */
903 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
904 set_bit(i, used_vectors);
905
906#ifdef CONFIG_IA32_EMULATION
907 set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
908 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
909#endif
910
911#ifdef CONFIG_X86_32
912 set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
913 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
914#endif
915
916 /*
917 * Set the IDT descriptor to a fixed read-only location, so that the
918 * "sidt" instruction will not leak the location of the kernel, and
919 * to defend the IDT against arbitrary memory write vulnerabilities.
920 * It will be reloaded in cpu_init() */
921 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
922 idt_descr.address = fix_to_virt(FIX_RO_IDT);
923
924 /*
925 * Should be a barrier for any external CPU state:
926 */
927 cpu_init();
928
929 /*
930 * X86_TRAP_DB and X86_TRAP_BP have been set
931 * in early_trap_init(). However, ITS works only after
932 * cpu_init() loads TSS. See comments in early_trap_init().
933 */
934 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
935 /* int3 can be called from all */
936 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
937
938 x86_init.irqs.trap_init();
939
940#ifdef CONFIG_X86_64
941 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
942 set_nmi_gate(X86_TRAP_DB, &debug);
943 set_nmi_gate(X86_TRAP_BP, &int3);
944#endif
945}