Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 */
  8
  9/*
 10 * Handle hardware traps and faults.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/context_tracking.h>
 16#include <linux/interrupt.h>
 17#include <linux/kallsyms.h>
 18#include <linux/spinlock.h>
 19#include <linux/kprobes.h>
 20#include <linux/uaccess.h>
 21#include <linux/kdebug.h>
 22#include <linux/kgdb.h>
 23#include <linux/kernel.h>
 24#include <linux/export.h>
 25#include <linux/ptrace.h>
 26#include <linux/uprobes.h>
 27#include <linux/string.h>
 28#include <linux/delay.h>
 29#include <linux/errno.h>
 30#include <linux/kexec.h>
 31#include <linux/sched.h>
 32#include <linux/sched/task_stack.h>
 33#include <linux/timer.h>
 34#include <linux/init.h>
 35#include <linux/bug.h>
 36#include <linux/nmi.h>
 37#include <linux/mm.h>
 38#include <linux/smp.h>
 39#include <linux/io.h>
 40
 
 
 
 
 
 41#if defined(CONFIG_EDAC)
 42#include <linux/edac.h>
 43#endif
 44
 
 45#include <asm/stacktrace.h>
 46#include <asm/processor.h>
 47#include <asm/debugreg.h>
 48#include <linux/atomic.h>
 49#include <asm/text-patching.h>
 50#include <asm/ftrace.h>
 51#include <asm/traps.h>
 52#include <asm/desc.h>
 53#include <asm/fpu/internal.h>
 54#include <asm/cpu_entry_area.h>
 55#include <asm/mce.h>
 56#include <asm/fixmap.h>
 57#include <asm/mach_traps.h>
 58#include <asm/alternative.h>
 59#include <asm/fpu/xstate.h>
 60#include <asm/trace/mpx.h>
 61#include <asm/mpx.h>
 62#include <asm/vm86.h>
 63#include <asm/umip.h>
 64
 65#ifdef CONFIG_X86_64
 66#include <asm/x86_init.h>
 67#include <asm/pgalloc.h>
 68#include <asm/proto.h>
 
 
 
 69#else
 70#include <asm/processor-flags.h>
 71#include <asm/setup.h>
 72#include <asm/proto.h>
 
 73#endif
 74
 75DECLARE_BITMAP(system_vectors, NR_VECTORS);
 
 
 
 
 76
 77static inline void cond_local_irq_enable(struct pt_regs *regs)
 78{
 79	if (regs->flags & X86_EFLAGS_IF)
 80		local_irq_enable();
 81}
 82
 83static inline void cond_local_irq_disable(struct pt_regs *regs)
 84{
 
 85	if (regs->flags & X86_EFLAGS_IF)
 86		local_irq_disable();
 87}
 88
 89/*
 90 * In IST context, we explicitly disable preemption.  This serves two
 91 * purposes: it makes it much less likely that we would accidentally
 92 * schedule in IST context and it will force a warning if we somehow
 93 * manage to schedule by accident.
 94 */
 95void ist_enter(struct pt_regs *regs)
 96{
 97	if (user_mode(regs)) {
 98		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 99	} else {
100		/*
101		 * We might have interrupted pretty much anything.  In
102		 * fact, if we're a machine check, we can even interrupt
103		 * NMI processing.  We don't want in_nmi() to return true,
104		 * but we need to notify RCU.
105		 */
106		rcu_nmi_enter();
107	}
108
109	preempt_disable();
110
111	/* This code is a bit fragile.  Test it. */
112	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
113}
114NOKPROBE_SYMBOL(ist_enter);
115
116void ist_exit(struct pt_regs *regs)
117{
118	preempt_enable_no_resched();
119
120	if (!user_mode(regs))
121		rcu_nmi_exit();
122}
123
124/**
125 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
126 * @regs:	regs passed to the IST exception handler
127 *
128 * IST exception handlers normally cannot schedule.  As a special
129 * exception, if the exception interrupted userspace code (i.e.
130 * user_mode(regs) would return true) and the exception was not
131 * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
132 * begins a non-atomic section within an ist_enter()/ist_exit() region.
133 * Callers are responsible for enabling interrupts themselves inside
134 * the non-atomic section, and callers must call ist_end_non_atomic()
135 * before ist_exit().
136 */
137void ist_begin_non_atomic(struct pt_regs *regs)
138{
139	BUG_ON(!user_mode(regs));
140
141	/*
142	 * Sanity check: we need to be on the normal thread stack.  This
143	 * will catch asm bugs and any attempt to use ist_preempt_enable
144	 * from double_fault.
145	 */
146	BUG_ON(!on_thread_stack());
147
148	preempt_enable_no_resched();
149}
150
151/**
152 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
153 *
154 * Ends a non-atomic section started with ist_begin_non_atomic().
155 */
156void ist_end_non_atomic(void)
157{
158	preempt_disable();
159}
160
161int is_valid_bugaddr(unsigned long addr)
162{
163	unsigned short ud;
164
165	if (addr < TASK_SIZE_MAX)
166		return 0;
167
168	if (probe_kernel_address((unsigned short *)addr, ud))
169		return 0;
170
171	return ud == INSN_UD0 || ud == INSN_UD2;
172}
173
174int fixup_bug(struct pt_regs *regs, int trapnr)
175{
176	if (trapnr != X86_TRAP_UD)
177		return 0;
178
179	switch (report_bug(regs->ip, regs)) {
180	case BUG_TRAP_TYPE_NONE:
181	case BUG_TRAP_TYPE_BUG:
182		break;
183
184	case BUG_TRAP_TYPE_WARN:
185		regs->ip += LEN_UD2;
186		return 1;
187	}
188
189	return 0;
190}
191
192static nokprobe_inline int
193do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
194		  struct pt_regs *regs,	long error_code)
195{
196	if (v8086_mode(regs)) {
 
197		/*
198		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
199		 * On nmi (interrupt 2), do_trap should not be called.
200		 */
201		if (trapnr < X86_TRAP_UD) {
202			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
203						error_code, trapnr))
204				return 0;
205		}
206	} else if (!user_mode(regs)) {
207		if (fixup_exception(regs, trapnr, error_code, 0))
208			return 0;
209
210		tsk->thread.error_code = error_code;
211		tsk->thread.trap_nr = trapnr;
212		die(str, regs, error_code);
 
 
 
213	}
214
 
 
 
 
 
 
 
 
 
 
 
 
215	/*
216	 * We want error_code and trap_nr set for userspace faults and
217	 * kernelspace faults which result in die(), but not
218	 * kernelspace faults which are fixed up.  die() gives the
219	 * process no chance to handle the signal and notice the
220	 * kernel fault information, so that won't result in polluting
221	 * the information about previously queued, but not yet
222	 * delivered, faults.  See also do_general_protection below.
223	 */
224	tsk->thread.error_code = error_code;
225	tsk->thread.trap_nr = trapnr;
226
227	return -1;
228}
229
230static void show_signal(struct task_struct *tsk, int signr,
231			const char *type, const char *desc,
232			struct pt_regs *regs, long error_code)
233{
234	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
235	    printk_ratelimit()) {
236		pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
237			tsk->comm, task_pid_nr(tsk), type, desc,
238			regs->ip, regs->sp, error_code);
239		print_vma_addr(KERN_CONT " in ", regs->ip);
240		pr_cont("\n");
241	}
242}
243
244static void
245do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
246	long error_code, int sicode, void __user *addr)
247{
248	struct task_struct *tsk = current;
249
250
251	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
252		return;
253
254	show_signal(tsk, signr, "trap ", str, regs, error_code);
255
256	if (!sicode)
257		force_sig(signr);
258	else
259		force_sig_fault(signr, sicode, addr);
260}
261NOKPROBE_SYMBOL(do_trap);
262
263static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
264	unsigned long trapnr, int signr, int sicode, void __user *addr)
265{
266	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
268	/*
269	 * WARN*()s end up here; fix them up before we call the
270	 * notifier chain.
271	 */
272	if (!user_mode(regs) && fixup_bug(regs, trapnr))
273		return;
274
275	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
276			NOTIFY_STOP) {
277		cond_local_irq_enable(regs);
278		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
 
 
279	}
 
280}
281
282#define IP ((void __user *)uprobe_get_trap_addr(regs))
283#define DO_ERROR(trapnr, signr, sicode, addr, str, name)		   \
284dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	   \
285{									   \
286	do_error_trap(regs, error_code, str, trapnr, signr, sicode, addr); \
287}
288
289DO_ERROR(X86_TRAP_DE,     SIGFPE,  FPE_INTDIV,   IP, "divide error",        divide_error)
290DO_ERROR(X86_TRAP_OF,     SIGSEGV,          0, NULL, "overflow",            overflow)
291DO_ERROR(X86_TRAP_UD,     SIGILL,  ILL_ILLOPN,   IP, "invalid opcode",      invalid_op)
292DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,           0, NULL, "coprocessor segment overrun", coprocessor_segment_overrun)
293DO_ERROR(X86_TRAP_TS,     SIGSEGV,          0, NULL, "invalid TSS",         invalid_TSS)
294DO_ERROR(X86_TRAP_NP,     SIGBUS,           0, NULL, "segment not present", segment_not_present)
295DO_ERROR(X86_TRAP_SS,     SIGBUS,           0, NULL, "stack segment",       stack_segment)
296DO_ERROR(X86_TRAP_AC,     SIGBUS,  BUS_ADRALN, NULL, "alignment check",     alignment_check)
297#undef IP
298
299#ifdef CONFIG_VMAP_STACK
300__visible void __noreturn handle_stack_overflow(const char *message,
301						struct pt_regs *regs,
302						unsigned long fault_address)
303{
304	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
305		 (void *)fault_address, current->stack,
306		 (char *)current->stack + THREAD_SIZE - 1);
307	die(message, regs, 0);
308
309	/* Be absolutely certain we don't return. */
310	panic("%s", message);
311}
312#endif
313
314#ifdef CONFIG_X86_64
315/* Runs on IST stack */
316dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2)
317{
318	static const char str[] = "double fault";
319	struct task_struct *tsk = current;
320
321#ifdef CONFIG_X86_ESPFIX64
322	extern unsigned char native_irq_return_iret[];
323
324	/*
325	 * If IRET takes a non-IST fault on the espfix64 stack, then we
326	 * end up promoting it to a doublefault.  In that case, take
327	 * advantage of the fact that we're not using the normal (TSS.sp0)
328	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
329	 * and then modify our own IRET frame so that, when we return,
330	 * we land directly at the #GP(0) vector with the stack already
331	 * set up according to its expectations.
332	 *
333	 * The net result is that our #GP handler will think that we
334	 * entered from usermode with the bad user context.
335	 *
336	 * No need for ist_enter here because we don't use RCU.
337	 */
338	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
339		regs->cs == __KERNEL_CS &&
340		regs->ip == (unsigned long)native_irq_return_iret)
341	{
342		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
343
344		/*
345		 * regs->sp points to the failing IRET frame on the
346		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
347		 * in gpregs->ss through gpregs->ip.
348		 *
349		 */
350		memmove(&gpregs->ip, (void *)regs->sp, 5*8);
351		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
352
353		/*
354		 * Adjust our frame so that we return straight to the #GP
355		 * vector with the expected RSP value.  This is safe because
356		 * we won't enable interupts or schedule before we invoke
357		 * general_protection, so nothing will clobber the stack
358		 * frame we just set up.
359		 *
360		 * We will enter general_protection with kernel GSBASE,
361		 * which is what the stub expects, given that the faulting
362		 * RIP will be the IRET instruction.
363		 */
364		regs->ip = (unsigned long)general_protection;
365		regs->sp = (unsigned long)&gpregs->orig_ax;
366
367		return;
368	}
369#endif
370
371	ist_enter(regs);
372	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
373
374	tsk->thread.error_code = error_code;
375	tsk->thread.trap_nr = X86_TRAP_DF;
376
377#ifdef CONFIG_VMAP_STACK
378	/*
379	 * If we overflow the stack into a guard page, the CPU will fail
380	 * to deliver #PF and will send #DF instead.  Similarly, if we
381	 * take any non-IST exception while too close to the bottom of
382	 * the stack, the processor will get a page fault while
383	 * delivering the exception and will generate a double fault.
384	 *
385	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
386	 * Page-Fault Exception (#PF):
387	 *
388	 *   Processors update CR2 whenever a page fault is detected. If a
389	 *   second page fault occurs while an earlier page fault is being
390	 *   delivered, the faulting linear address of the second fault will
391	 *   overwrite the contents of CR2 (replacing the previous
392	 *   address). These updates to CR2 occur even if the page fault
393	 *   results in a double fault or occurs during the delivery of a
394	 *   double fault.
395	 *
396	 * The logic below has a small possibility of incorrectly diagnosing
397	 * some errors as stack overflows.  For example, if the IDT or GDT
398	 * gets corrupted such that #GP delivery fails due to a bad descriptor
399	 * causing #GP and we hit this condition while CR2 coincidentally
400	 * points to the stack guard page, we'll think we overflowed the
401	 * stack.  Given that we're going to panic one way or another
402	 * if this happens, this isn't necessarily worth fixing.
403	 *
404	 * If necessary, we could improve the test by only diagnosing
405	 * a stack overflow if the saved RSP points within 47 bytes of
406	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
407	 * take an exception, the stack is already aligned and there
408	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
409	 * possible error code, so a stack overflow would *not* double
410	 * fault.  With any less space left, exception delivery could
411	 * fail, and, as a practical matter, we've overflowed the
412	 * stack even if the actual trigger for the double fault was
413	 * something else.
414	 */
415	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
416		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
417#endif
418
419#ifdef CONFIG_DOUBLEFAULT
420	df_debug(regs, error_code);
421#endif
422	/*
423	 * This is always a kernel trap and never fixable (and thus must
424	 * never return).
425	 */
426	for (;;)
427		die(str, regs, error_code);
428}
429#endif
430
431dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
432{
433	const struct mpx_bndcsr *bndcsr;
434
435	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
436	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
437			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
438		return;
439	cond_local_irq_enable(regs);
440
441	if (!user_mode(regs))
442		die("bounds", regs, error_code);
443
444	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
445		/* The exception is not from Intel MPX */
446		goto exit_trap;
447	}
448
449	/*
450	 * We need to look at BNDSTATUS to resolve this exception.
451	 * A NULL here might mean that it is in its 'init state',
452	 * which is all zeros which indicates MPX was not
453	 * responsible for the exception.
454	 */
455	bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
456	if (!bndcsr)
457		goto exit_trap;
458
459	trace_bounds_exception_mpx(bndcsr);
460	/*
461	 * The error code field of the BNDSTATUS register communicates status
462	 * information of a bound range exception #BR or operation involving
463	 * bound directory.
464	 */
465	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
466	case 2:	/* Bound directory has invalid entry. */
467		if (mpx_handle_bd_fault())
468			goto exit_trap;
469		break; /* Success, it was handled */
470	case 1: /* Bound violation. */
471	{
472		struct task_struct *tsk = current;
473		struct mpx_fault_info mpx;
474
475		if (mpx_fault_info(&mpx, regs)) {
476			/*
477			 * We failed to decode the MPX instruction.  Act as if
478			 * the exception was not caused by MPX.
479			 */
480			goto exit_trap;
481		}
482		/*
483		 * Success, we decoded the instruction and retrieved
484		 * an 'mpx' containing the address being accessed
485		 * which caused the exception.  This information
486		 * allows and application to possibly handle the
487		 * #BR exception itself.
488		 */
489		if (!do_trap_no_signal(tsk, X86_TRAP_BR, "bounds", regs,
490				       error_code))
491			break;
492
493		show_signal(tsk, SIGSEGV, "trap ", "bounds", regs, error_code);
494
495		force_sig_bnderr(mpx.addr, mpx.lower, mpx.upper);
496		break;
497	}
498	case 0: /* No exception caused by Intel MPX operations. */
499		goto exit_trap;
500	default:
501		die("bounds", regs, error_code);
502	}
503
504	return;
505
506exit_trap:
507	/*
508	 * This path out is for all the cases where we could not
509	 * handle the exception in some way (like allocating a
510	 * table or telling userspace about it.  We will also end
511	 * up here if the kernel has MPX turned off at compile
512	 * time..
513	 */
514	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
515}
516
517dotraplinkage void
518do_general_protection(struct pt_regs *regs, long error_code)
519{
520	const char *desc = "general protection fault";
521	struct task_struct *tsk;
 
522
523	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
524	cond_local_irq_enable(regs);
525
526	if (static_cpu_has(X86_FEATURE_UMIP)) {
527		if (user_mode(regs) && fixup_umip_exception(regs))
528			return;
529	}
530
531	if (v8086_mode(regs)) {
 
532		local_irq_enable();
533		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
534		return;
535	}
 
536
537	tsk = current;
538	if (!user_mode(regs)) {
539		if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
540			return;
541
542		tsk->thread.error_code = error_code;
543		tsk->thread.trap_nr = X86_TRAP_GP;
544
545		/*
546		 * To be potentially processing a kprobe fault and to
547		 * trust the result from kprobe_running(), we have to
548		 * be non-preemptible.
549		 */
550		if (!preemptible() && kprobe_running() &&
551		    kprobe_fault_handler(regs, X86_TRAP_GP))
552			return;
553
554		if (notify_die(DIE_GPF, desc, regs, error_code,
555			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
556			die(desc, regs, error_code);
557		return;
558	}
559
560	tsk->thread.error_code = error_code;
561	tsk->thread.trap_nr = X86_TRAP_GP;
562
563	show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
 
 
 
 
 
 
 
564
565	force_sig(SIGSEGV);
 
 
566}
567NOKPROBE_SYMBOL(do_general_protection);
568
569dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 
570{
 
 
571#ifdef CONFIG_DYNAMIC_FTRACE
572	/*
573	 * ftrace must be first, everything else may cause a recursive crash.
574	 * See note by declaration of modifying_ftrace_code in ftrace.c
575	 */
576	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
577	    ftrace_int3_handler(regs))
578		return;
579#endif
580	if (poke_int3_handler(regs))
581		return;
582
583	/*
584	 * Use ist_enter despite the fact that we don't use an IST stack.
585	 * We can be called from a kprobe in non-CONTEXT_KERNEL kernel
586	 * mode or even during context tracking state changes.
587	 *
588	 * This means that we can't schedule.  That's okay.
589	 */
590	ist_enter(regs);
591	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
592#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
593	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
594				SIGTRAP) == NOTIFY_STOP)
595		goto exit;
596#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
597
598#ifdef CONFIG_KPROBES
599	if (kprobe_int3_handler(regs))
600		goto exit;
601#endif
602
603	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
604			SIGTRAP) == NOTIFY_STOP)
605		goto exit;
606
607	cond_local_irq_enable(regs);
608	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, 0, NULL);
609	cond_local_irq_disable(regs);
610
 
 
 
 
 
611exit:
612	ist_exit(regs);
613}
614NOKPROBE_SYMBOL(do_int3);
615
616#ifdef CONFIG_X86_64
617/*
618 * Help handler running on a per-cpu (IST or entry trampoline) stack
619 * to switch to the normal thread stack if the interrupted code was in
620 * user mode. The actual stack switch is done in entry_64.S
621 */
622asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
623{
624	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
625	if (regs != eregs)
 
 
 
 
 
 
 
 
 
 
 
 
626		*regs = *eregs;
627	return regs;
628}
629NOKPROBE_SYMBOL(sync_regs);
630
631struct bad_iret_stack {
632	void *error_entry_ret;
633	struct pt_regs regs;
634};
635
636asmlinkage __visible notrace
637struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
638{
639	/*
640	 * This is called from entry_64.S early in handling a fault
641	 * caused by a bad iret to user mode.  To handle the fault
642	 * correctly, we want to move our stack frame to where it would
643	 * be had we entered directly on the entry stack (rather than
644	 * just below the IRET frame) and we want to pretend that the
645	 * exception came from the IRET target.
646	 */
647	struct bad_iret_stack *new_stack =
648		(struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
649
650	/* Copy the IRET target to the new stack. */
651	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
652
653	/* Copy the remainder of the stack from the current stack. */
654	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
655
656	BUG_ON(!user_mode(&new_stack->regs));
657	return new_stack;
658}
659NOKPROBE_SYMBOL(fixup_bad_iret);
660#endif
661
662static bool is_sysenter_singlestep(struct pt_regs *regs)
663{
664	/*
665	 * We don't try for precision here.  If we're anywhere in the region of
666	 * code that can be single-stepped in the SYSENTER entry path, then
667	 * assume that this is a useless single-step trap due to SYSENTER
668	 * being invoked with TF set.  (We don't know in advance exactly
669	 * which instructions will be hit because BTF could plausibly
670	 * be set.)
671	 */
672#ifdef CONFIG_X86_32
673	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
674		(unsigned long)__end_SYSENTER_singlestep_region -
675		(unsigned long)__begin_SYSENTER_singlestep_region;
676#elif defined(CONFIG_IA32_EMULATION)
677	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
678		(unsigned long)__end_entry_SYSENTER_compat -
679		(unsigned long)entry_SYSENTER_compat;
680#else
681	return false;
682#endif
683}
684
685/*
686 * Our handling of the processor debug registers is non-trivial.
687 * We do not clear them on entry and exit from the kernel. Therefore
688 * it is possible to get a watchpoint trap here from inside the kernel.
689 * However, the code in ./ptrace.c has ensured that the user can
690 * only set watchpoints on userspace addresses. Therefore the in-kernel
691 * watchpoint trap can only occur in code which is reading/writing
692 * from user space. Such code must not hold kernel locks (since it
693 * can equally take a page fault), therefore it is safe to call
694 * force_sig_info even though that claims and releases locks.
695 *
696 * Code in ./signal.c ensures that the debug control register
697 * is restored before we deliver any signal, and therefore that
698 * user code runs with the correct debug control register even though
699 * we clear it here.
700 *
701 * Being careful here means that we don't have to be as careful in a
702 * lot of more complicated places (task switching can be a bit lazy
703 * about restoring all the debug state, and ptrace doesn't have to
704 * find every occurrence of the TF bit that could be saved away even
705 * by user code)
706 *
707 * May run on IST stack.
708 */
709dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
710{
711	struct task_struct *tsk = current;
 
712	int user_icebp = 0;
713	unsigned long dr6;
714	int si_code;
715
716	ist_enter(regs);
717
718	get_debugreg(dr6, 6);
719	/*
720	 * The Intel SDM says:
721	 *
722	 *   Certain debug exceptions may clear bits 0-3. The remaining
723	 *   contents of the DR6 register are never cleared by the
724	 *   processor. To avoid confusion in identifying debug
725	 *   exceptions, debug handlers should clear the register before
726	 *   returning to the interrupted task.
727	 *
728	 * Keep it simple: clear DR6 immediately.
729	 */
730	set_debugreg(0, 6);
731
732	/* Filter out all the reserved bits which are preset to 1 */
733	dr6 &= ~DR6_RESERVED;
734
735	/*
736	 * The SDM says "The processor clears the BTF flag when it
737	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
738	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
739	 */
740	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
741
742	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
743		     is_sysenter_singlestep(regs))) {
744		dr6 &= ~DR_STEP;
745		if (!dr6)
746			goto exit;
747		/*
748		 * else we might have gotten a single-step trap and hit a
749		 * watchpoint at the same time, in which case we should fall
750		 * through and handle the watchpoint.
751		 */
752	}
753
754	/*
755	 * If dr6 has no reason to give us about the origin of this trap,
756	 * then it's very likely the result of an icebp/int01 trap.
757	 * User wants a sigtrap for that.
758	 */
759	if (!dr6 && user_mode(regs))
760		user_icebp = 1;
761
 
 
 
 
 
 
 
 
 
 
 
 
762	/* Store the virtualized DR6 value */
763	tsk->thread.debugreg6 = dr6;
764
765#ifdef CONFIG_KPROBES
766	if (kprobe_debug_handler(regs))
767		goto exit;
768#endif
769
770	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
771							SIGTRAP) == NOTIFY_STOP)
772		goto exit;
773
774	/*
775	 * Let others (NMI) know that the debug stack is in use
776	 * as we may switch to the interrupt stack.
777	 */
778	debug_stack_usage_inc();
779
780	/* It's safe to allow irq's after DR6 has been saved */
781	cond_local_irq_enable(regs);
782
783	if (v8086_mode(regs)) {
784		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
785					X86_TRAP_DB);
786		cond_local_irq_disable(regs);
787		debug_stack_usage_dec();
788		goto exit;
789	}
790
791	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
792		/*
793		 * Historical junk that used to handle SYSENTER single-stepping.
794		 * This should be unreachable now.  If we survive for a while
795		 * without anyone hitting this warning, we'll turn this into
796		 * an oops.
797		 */
 
798		tsk->thread.debugreg6 &= ~DR_STEP;
799		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
800		regs->flags &= ~X86_EFLAGS_TF;
801	}
802	si_code = get_si_code(tsk->thread.debugreg6);
803	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
804		send_sigtrap(regs, error_code, si_code);
805	cond_local_irq_disable(regs);
806	debug_stack_usage_dec();
807
808exit:
809	ist_exit(regs);
810}
811NOKPROBE_SYMBOL(do_debug);
812
813/*
814 * Note that we play around with the 'TS' bit in an attempt to get
815 * the correct behaviour even in the presence of the asynchronous
816 * IRQ13 behaviour
817 */
818static void math_error(struct pt_regs *regs, int error_code, int trapnr)
819{
820	struct task_struct *task = current;
821	struct fpu *fpu = &task->thread.fpu;
822	int si_code;
823	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
824						"simd exception";
825
826	cond_local_irq_enable(regs);
827
828	if (!user_mode(regs)) {
829		if (fixup_exception(regs, trapnr, error_code, 0))
830			return;
831
832		task->thread.error_code = error_code;
833		task->thread.trap_nr = trapnr;
834
835		if (notify_die(DIE_TRAP, str, regs, error_code,
836					trapnr, SIGFPE) != NOTIFY_STOP)
 
 
 
837			die(str, regs, error_code);
 
838		return;
839	}
840
841	/*
842	 * Save the info for the exception handler and clear the error.
843	 */
844	fpu__save(fpu);
845
846	task->thread.trap_nr	= trapnr;
847	task->thread.error_code = error_code;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
848
849	si_code = fpu__exception_code(fpu, trapnr);
850	/* Retry when we get spurious exceptions: */
851	if (!si_code)
852		return;
 
 
 
 
 
 
 
853
854	force_sig_fault(SIGFPE, si_code,
855			(void __user *)uprobe_get_trap_addr(regs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
856}
857
858dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
859{
860	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
 
861	math_error(regs, error_code, X86_TRAP_MF);
 
862}
863
864dotraplinkage void
865do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
866{
867	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
 
868	math_error(regs, error_code, X86_TRAP_XF);
 
869}
870
871dotraplinkage void
872do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
873{
874	cond_local_irq_enable(regs);
 
 
 
 
875}
876
877dotraplinkage void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
878do_device_not_available(struct pt_regs *regs, long error_code)
879{
880	unsigned long cr0 = read_cr0();
881
882	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
883
884#ifdef CONFIG_MATH_EMULATION
885	if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
886		struct math_emu_info info = { };
887
888		cond_local_irq_enable(regs);
889
890		info.regs = regs;
891		math_emulate(&info);
 
892		return;
893	}
894#endif
895
896	/* This should not happen. */
897	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
898		/* Try to fix it up and carry on. */
899		write_cr0(cr0 & ~X86_CR0_TS);
900	} else {
901		/*
902		 * Something terrible happened, and we're better off trying
903		 * to kill the task than getting stuck in a never-ending
904		 * loop of #NM faults.
905		 */
906		die("unexpected #NM exception", regs, error_code);
907	}
908}
909NOKPROBE_SYMBOL(do_device_not_available);
910
911#ifdef CONFIG_X86_32
912dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
913{
914	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
 
 
915	local_irq_enable();
916
 
 
 
 
917	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
918			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
919		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
920			ILL_BADSTK, (void __user *)NULL);
921	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
922}
 
 
 
 
 
923#endif
 
924
925void __init trap_init(void)
926{
927	/* Init cpu_entry_area before IST entries are set up */
928	setup_cpu_entry_areas();
 
 
929
930	idt_setup_traps();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
931
932	/*
933	 * Set the IDT descriptor to a fixed read-only location, so that the
934	 * "sidt" instruction will not leak the location of the kernel, and
935	 * to defend the IDT against arbitrary memory write vulnerabilities.
936	 * It will be reloaded in cpu_init() */
937	cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
938		    PAGE_KERNEL_RO);
939	idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
940
941	/*
942	 * Should be a barrier for any external CPU state:
943	 */
944	cpu_init();
945
946	idt_setup_ist_traps();
947
948	x86_init.irqs.trap_init();
949
950	idt_setup_debugidt_traps();
 
 
 
 
951}
v3.15
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 */
  8
  9/*
 10 * Handle hardware traps and faults.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/context_tracking.h>
 16#include <linux/interrupt.h>
 17#include <linux/kallsyms.h>
 18#include <linux/spinlock.h>
 19#include <linux/kprobes.h>
 20#include <linux/uaccess.h>
 21#include <linux/kdebug.h>
 22#include <linux/kgdb.h>
 23#include <linux/kernel.h>
 24#include <linux/module.h>
 25#include <linux/ptrace.h>
 
 26#include <linux/string.h>
 27#include <linux/delay.h>
 28#include <linux/errno.h>
 29#include <linux/kexec.h>
 30#include <linux/sched.h>
 
 31#include <linux/timer.h>
 32#include <linux/init.h>
 33#include <linux/bug.h>
 34#include <linux/nmi.h>
 35#include <linux/mm.h>
 36#include <linux/smp.h>
 37#include <linux/io.h>
 38
 39#ifdef CONFIG_EISA
 40#include <linux/ioport.h>
 41#include <linux/eisa.h>
 42#endif
 43
 44#if defined(CONFIG_EDAC)
 45#include <linux/edac.h>
 46#endif
 47
 48#include <asm/kmemcheck.h>
 49#include <asm/stacktrace.h>
 50#include <asm/processor.h>
 51#include <asm/debugreg.h>
 52#include <linux/atomic.h>
 
 53#include <asm/ftrace.h>
 54#include <asm/traps.h>
 55#include <asm/desc.h>
 56#include <asm/i387.h>
 57#include <asm/fpu-internal.h>
 58#include <asm/mce.h>
 59#include <asm/fixmap.h>
 60#include <asm/mach_traps.h>
 61#include <asm/alternative.h>
 
 
 
 
 
 62
 63#ifdef CONFIG_X86_64
 64#include <asm/x86_init.h>
 65#include <asm/pgalloc.h>
 66#include <asm/proto.h>
 67
 68/* No need to be aligned, but done to keep all IDTs defined the same way. */
 69gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
 70#else
 71#include <asm/processor-flags.h>
 72#include <asm/setup.h>
 73
 74asmlinkage int system_call(void);
 75#endif
 76
 77/* Must be page-aligned because the real IDT is used in a fixmap. */
 78gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
 79
 80DECLARE_BITMAP(used_vectors, NR_VECTORS);
 81EXPORT_SYMBOL_GPL(used_vectors);
 82
 83static inline void conditional_sti(struct pt_regs *regs)
 84{
 85	if (regs->flags & X86_EFLAGS_IF)
 86		local_irq_enable();
 87}
 88
 89static inline void preempt_conditional_sti(struct pt_regs *regs)
 90{
 91	preempt_count_inc();
 92	if (regs->flags & X86_EFLAGS_IF)
 93		local_irq_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 94}
 95
 96static inline void conditional_cli(struct pt_regs *regs)
 97{
 98	if (regs->flags & X86_EFLAGS_IF)
 99		local_irq_disable();
 
 
 
 
 
 
 
100}
101
102static inline void preempt_conditional_cli(struct pt_regs *regs)
103{
104	if (regs->flags & X86_EFLAGS_IF)
105		local_irq_disable();
106	preempt_count_dec();
 
 
 
 
 
 
 
 
 
 
 
107}
108
109static int __kprobes
110do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
111		  struct pt_regs *regs,	long error_code)
112{
113#ifdef CONFIG_X86_32
114	if (regs->flags & X86_VM_MASK) {
115		/*
116		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
117		 * On nmi (interrupt 2), do_trap should not be called.
118		 */
119		if (trapnr < X86_TRAP_UD) {
120			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
121						error_code, trapnr))
122				return 0;
123		}
124		return -1;
125	}
126#endif
127	if (!user_mode(regs)) {
128		if (!fixup_exception(regs)) {
129			tsk->thread.error_code = error_code;
130			tsk->thread.trap_nr = trapnr;
131			die(str, regs, error_code);
132		}
133		return 0;
134	}
135
136	return -1;
137}
138
139static void __kprobes
140do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
141	long error_code, siginfo_t *info)
142{
143	struct task_struct *tsk = current;
144
145
146	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
147		return;
148	/*
149	 * We want error_code and trap_nr set for userspace faults and
150	 * kernelspace faults which result in die(), but not
151	 * kernelspace faults which are fixed up.  die() gives the
152	 * process no chance to handle the signal and notice the
153	 * kernel fault information, so that won't result in polluting
154	 * the information about previously queued, but not yet
155	 * delivered, faults.  See also do_general_protection below.
156	 */
157	tsk->thread.error_code = error_code;
158	tsk->thread.trap_nr = trapnr;
159
160#ifdef CONFIG_X86_64
 
 
 
 
 
 
161	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
162	    printk_ratelimit()) {
163		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
164			tsk->comm, tsk->pid, str,
165			regs->ip, regs->sp, error_code);
166		print_vma_addr(" in ", regs->ip);
167		pr_cont("\n");
168	}
169#endif
 
 
 
 
 
 
 
 
 
 
 
 
170
171	if (info)
172		force_sig_info(signr, info, tsk);
173	else
174		force_sig(signr, tsk);
175}
 
176
177#define DO_ERROR(trapnr, signr, str, name)				\
178dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
179{									\
180	enum ctx_state prev_state;					\
181									\
182	prev_state = exception_enter();					\
183	if (notify_die(DIE_TRAP, str, regs, error_code,			\
184			trapnr, signr) == NOTIFY_STOP) {		\
185		exception_exit(prev_state);				\
186		return;							\
187	}								\
188	conditional_sti(regs);						\
189	do_trap(trapnr, signr, str, regs, error_code, NULL);		\
190	exception_exit(prev_state);					\
191}
192
193#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr)		\
194dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
195{									\
196	siginfo_t info;							\
197	enum ctx_state prev_state;					\
198									\
199	info.si_signo = signr;						\
200	info.si_errno = 0;						\
201	info.si_code = sicode;						\
202	info.si_addr = (void __user *)siaddr;				\
203	prev_state = exception_enter();					\
204	if (notify_die(DIE_TRAP, str, regs, error_code,			\
205			trapnr, signr) == NOTIFY_STOP) {		\
206		exception_exit(prev_state);				\
207		return;							\
208	}								\
209	conditional_sti(regs);						\
210	do_trap(trapnr, signr, str, regs, error_code, &info);		\
211	exception_exit(prev_state);					\
212}
213
214DO_ERROR_INFO(X86_TRAP_DE,     SIGFPE,  "divide error",			divide_error,		     FPE_INTDIV, regs->ip )
215DO_ERROR     (X86_TRAP_OF,     SIGSEGV, "overflow",			overflow					  )
216DO_ERROR     (X86_TRAP_BR,     SIGSEGV, "bounds",			bounds						  )
217DO_ERROR_INFO(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op,		     ILL_ILLOPN, regs->ip )
218DO_ERROR     (X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",	coprocessor_segment_overrun			  )
219DO_ERROR     (X86_TRAP_TS,     SIGSEGV, "invalid TSS",			invalid_TSS					  )
220DO_ERROR     (X86_TRAP_NP,     SIGBUS,  "segment not present",		segment_not_present				  )
221#ifdef CONFIG_X86_32
222DO_ERROR     (X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment					  )
223#endif
224DO_ERROR_INFO(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check,	     BUS_ADRALN, 0	  )
225
226#ifdef CONFIG_X86_64
227/* Runs on IST stack */
228dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
229{
230	enum ctx_state prev_state;
 
231
232	prev_state = exception_enter();
233	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
234		       X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
235		preempt_conditional_sti(regs);
236		do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
237		preempt_conditional_cli(regs);
238	}
239	exception_exit(prev_state);
240}
241
242dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243{
244	static const char str[] = "double fault";
245	struct task_struct *tsk = current;
246
247	exception_enter();
248	/* Return not checked because double check cannot be ignored */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
250
251	tsk->thread.error_code = error_code;
252	tsk->thread.trap_nr = X86_TRAP_DF;
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254#ifdef CONFIG_DOUBLEFAULT
255	df_debug(regs, error_code);
256#endif
257	/*
258	 * This is always a kernel trap and never fixable (and thus must
259	 * never return).
260	 */
261	for (;;)
262		die(str, regs, error_code);
263}
264#endif
265
266dotraplinkage void __kprobes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267do_general_protection(struct pt_regs *regs, long error_code)
268{
 
269	struct task_struct *tsk;
270	enum ctx_state prev_state;
271
272	prev_state = exception_enter();
273	conditional_sti(regs);
 
 
 
 
 
274
275#ifdef CONFIG_X86_32
276	if (regs->flags & X86_VM_MASK) {
277		local_irq_enable();
278		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
279		goto exit;
280	}
281#endif
282
283	tsk = current;
284	if (!user_mode(regs)) {
285		if (fixup_exception(regs))
286			goto exit;
287
288		tsk->thread.error_code = error_code;
289		tsk->thread.trap_nr = X86_TRAP_GP;
290		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 
 
 
 
 
 
 
 
 
 
291			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
292			die("general protection fault", regs, error_code);
293		goto exit;
294	}
295
296	tsk->thread.error_code = error_code;
297	tsk->thread.trap_nr = X86_TRAP_GP;
298
299	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
300			printk_ratelimit()) {
301		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
302			tsk->comm, task_pid_nr(tsk),
303			regs->ip, regs->sp, error_code);
304		print_vma_addr(" in ", regs->ip);
305		pr_cont("\n");
306	}
307
308	force_sig(SIGSEGV, tsk);
309exit:
310	exception_exit(prev_state);
311}
 
312
313/* May run on IST stack. */
314dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
315{
316	enum ctx_state prev_state;
317
318#ifdef CONFIG_DYNAMIC_FTRACE
319	/*
320	 * ftrace must be first, everything else may cause a recursive crash.
321	 * See note by declaration of modifying_ftrace_code in ftrace.c
322	 */
323	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
324	    ftrace_int3_handler(regs))
325		return;
326#endif
327	if (poke_int3_handler(regs))
328		return;
329
330	prev_state = exception_enter();
 
 
 
 
 
 
 
 
331#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
332	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
333				SIGTRAP) == NOTIFY_STOP)
334		goto exit;
335#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
336
 
 
 
 
 
337	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
338			SIGTRAP) == NOTIFY_STOP)
339		goto exit;
340
341	/*
342	 * Let others (NMI) know that the debug stack is in use
343	 * as we may switch to the interrupt stack.
344	 */
345	debug_stack_usage_inc();
346	preempt_conditional_sti(regs);
347	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
348	preempt_conditional_cli(regs);
349	debug_stack_usage_dec();
350exit:
351	exception_exit(prev_state);
352}
 
353
354#ifdef CONFIG_X86_64
355/*
356 * Help handler running on IST stack to switch back to user stack
357 * for scheduling or signal handling. The actual stack switch is done in
358 * entry.S
359 */
360asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
361{
362	struct pt_regs *regs = eregs;
363	/* Did already sync */
364	if (eregs == (struct pt_regs *)eregs->sp)
365		;
366	/* Exception from user space */
367	else if (user_mode(eregs))
368		regs = task_pt_regs(current);
369	/*
370	 * Exception from kernel and interrupts are enabled. Move to
371	 * kernel process stack.
372	 */
373	else if (eregs->flags & X86_EFLAGS_IF)
374		regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
375	if (eregs != regs)
376		*regs = *eregs;
377	return regs;
378}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379#endif
380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381/*
382 * Our handling of the processor debug registers is non-trivial.
383 * We do not clear them on entry and exit from the kernel. Therefore
384 * it is possible to get a watchpoint trap here from inside the kernel.
385 * However, the code in ./ptrace.c has ensured that the user can
386 * only set watchpoints on userspace addresses. Therefore the in-kernel
387 * watchpoint trap can only occur in code which is reading/writing
388 * from user space. Such code must not hold kernel locks (since it
389 * can equally take a page fault), therefore it is safe to call
390 * force_sig_info even though that claims and releases locks.
391 *
392 * Code in ./signal.c ensures that the debug control register
393 * is restored before we deliver any signal, and therefore that
394 * user code runs with the correct debug control register even though
395 * we clear it here.
396 *
397 * Being careful here means that we don't have to be as careful in a
398 * lot of more complicated places (task switching can be a bit lazy
399 * about restoring all the debug state, and ptrace doesn't have to
400 * find every occurrence of the TF bit that could be saved away even
401 * by user code)
402 *
403 * May run on IST stack.
404 */
405dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
406{
407	struct task_struct *tsk = current;
408	enum ctx_state prev_state;
409	int user_icebp = 0;
410	unsigned long dr6;
411	int si_code;
412
413	prev_state = exception_enter();
414
415	get_debugreg(dr6, 6);
 
 
 
 
 
 
 
 
 
 
 
 
416
417	/* Filter out all the reserved bits which are preset to 1 */
418	dr6 &= ~DR6_RESERVED;
419
420	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421	 * If dr6 has no reason to give us about the origin of this trap,
422	 * then it's very likely the result of an icebp/int01 trap.
423	 * User wants a sigtrap for that.
424	 */
425	if (!dr6 && user_mode(regs))
426		user_icebp = 1;
427
428	/* Catch kmemcheck conditions first of all! */
429	if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
430		goto exit;
431
432	/* DR6 may or may not be cleared by the CPU */
433	set_debugreg(0, 6);
434
435	/*
436	 * The processor cleared BTF, so don't mark that we need it set.
437	 */
438	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
439
440	/* Store the virtualized DR6 value */
441	tsk->thread.debugreg6 = dr6;
442
 
 
 
 
 
443	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
444							SIGTRAP) == NOTIFY_STOP)
445		goto exit;
446
447	/*
448	 * Let others (NMI) know that the debug stack is in use
449	 * as we may switch to the interrupt stack.
450	 */
451	debug_stack_usage_inc();
452
453	/* It's safe to allow irq's after DR6 has been saved */
454	preempt_conditional_sti(regs);
455
456	if (regs->flags & X86_VM_MASK) {
457		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
458					X86_TRAP_DB);
459		preempt_conditional_cli(regs);
460		debug_stack_usage_dec();
461		goto exit;
462	}
463
464	/*
465	 * Single-stepping through system calls: ignore any exceptions in
466	 * kernel space, but re-enable TF when returning to user mode.
467	 *
468	 * We already checked v86 mode above, so we can check for kernel mode
469	 * by just checking the CPL of CS.
470	 */
471	if ((dr6 & DR_STEP) && !user_mode(regs)) {
472		tsk->thread.debugreg6 &= ~DR_STEP;
473		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
474		regs->flags &= ~X86_EFLAGS_TF;
475	}
476	si_code = get_si_code(tsk->thread.debugreg6);
477	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
478		send_sigtrap(tsk, regs, error_code, si_code);
479	preempt_conditional_cli(regs);
480	debug_stack_usage_dec();
481
482exit:
483	exception_exit(prev_state);
484}
 
485
486/*
487 * Note that we play around with the 'TS' bit in an attempt to get
488 * the correct behaviour even in the presence of the asynchronous
489 * IRQ13 behaviour
490 */
491void math_error(struct pt_regs *regs, int error_code, int trapnr)
492{
493	struct task_struct *task = current;
494	siginfo_t info;
495	unsigned short err;
496	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
497						"simd exception";
498
499	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
500		return;
501	conditional_sti(regs);
 
 
 
 
 
502
503	if (!user_mode_vm(regs))
504	{
505		if (!fixup_exception(regs)) {
506			task->thread.error_code = error_code;
507			task->thread.trap_nr = trapnr;
508			die(str, regs, error_code);
509		}
510		return;
511	}
512
513	/*
514	 * Save the info for the exception handler and clear the error.
515	 */
516	save_init_fpu(task);
517	task->thread.trap_nr = trapnr;
 
518	task->thread.error_code = error_code;
519	info.si_signo = SIGFPE;
520	info.si_errno = 0;
521	info.si_addr = (void __user *)regs->ip;
522	if (trapnr == X86_TRAP_MF) {
523		unsigned short cwd, swd;
524		/*
525		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
526		 * status.  0x3f is the exception bits in these regs, 0x200 is the
527		 * C1 reg you need in case of a stack fault, 0x040 is the stack
528		 * fault bit.  We should only be taking one exception at a time,
529		 * so if this combination doesn't produce any single exception,
530		 * then we have a bad program that isn't synchronizing its FPU usage
531		 * and it will suffer the consequences since we won't be able to
532		 * fully reproduce the context of the exception
533		 */
534		cwd = get_fpu_cwd(task);
535		swd = get_fpu_swd(task);
536
537		err = swd & ~cwd;
538	} else {
539		/*
540		 * The SIMD FPU exceptions are handled a little differently, as there
541		 * is only a single status/control register.  Thus, to determine which
542		 * unmasked exception was caught we must mask the exception mask bits
543		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
544		 */
545		unsigned short mxcsr = get_fpu_mxcsr(task);
546		err = ~(mxcsr >> 7) & mxcsr;
547	}
548
549	if (err & 0x001) {	/* Invalid op */
550		/*
551		 * swd & 0x240 == 0x040: Stack Underflow
552		 * swd & 0x240 == 0x240: Stack Overflow
553		 * User must clear the SF bit (0x40) if set
554		 */
555		info.si_code = FPE_FLTINV;
556	} else if (err & 0x004) { /* Divide by Zero */
557		info.si_code = FPE_FLTDIV;
558	} else if (err & 0x008) { /* Overflow */
559		info.si_code = FPE_FLTOVF;
560	} else if (err & 0x012) { /* Denormal, Underflow */
561		info.si_code = FPE_FLTUND;
562	} else if (err & 0x020) { /* Precision */
563		info.si_code = FPE_FLTRES;
564	} else {
565		/*
566		 * If we're using IRQ 13, or supposedly even some trap
567		 * X86_TRAP_MF implementations, it's possible
568		 * we get a spurious trap, which is not an error.
569		 */
570		return;
571	}
572	force_sig_info(SIGFPE, &info, task);
573}
574
575dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
576{
577	enum ctx_state prev_state;
578
579	prev_state = exception_enter();
580	math_error(regs, error_code, X86_TRAP_MF);
581	exception_exit(prev_state);
582}
583
584dotraplinkage void
585do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
586{
587	enum ctx_state prev_state;
588
589	prev_state = exception_enter();
590	math_error(regs, error_code, X86_TRAP_XF);
591	exception_exit(prev_state);
592}
593
594dotraplinkage void
595do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
596{
597	conditional_sti(regs);
598#if 0
599	/* No need to warn about this any longer. */
600	pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
601#endif
602}
603
604asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
605{
606}
607
608asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
609{
610}
611
612/*
613 * 'math_state_restore()' saves the current math information in the
614 * old math state array, and gets the new ones from the current task
615 *
616 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
617 * Don't touch unless you *really* know how it works.
618 *
619 * Must be called with kernel preemption disabled (eg with local
620 * local interrupts as in the case of do_device_not_available).
621 */
622void math_state_restore(void)
623{
624	struct task_struct *tsk = current;
625
626	if (!tsk_used_math(tsk)) {
627		local_irq_enable();
628		/*
629		 * does a slab alloc which can sleep
630		 */
631		if (init_fpu(tsk)) {
632			/*
633			 * ran out of memory!
634			 */
635			do_group_exit(SIGKILL);
636			return;
637		}
638		local_irq_disable();
639	}
640
641	__thread_fpu_begin(tsk);
642
643	/*
644	 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
645	 */
646	if (unlikely(restore_fpu_checking(tsk))) {
647		drop_init_fpu(tsk);
648		force_sig(SIGSEGV, tsk);
649		return;
650	}
651
652	tsk->thread.fpu_counter++;
653}
654EXPORT_SYMBOL_GPL(math_state_restore);
655
656dotraplinkage void __kprobes
657do_device_not_available(struct pt_regs *regs, long error_code)
658{
659	enum ctx_state prev_state;
660
661	prev_state = exception_enter();
662	BUG_ON(use_eager_fpu());
663
664#ifdef CONFIG_MATH_EMULATION
665	if (read_cr0() & X86_CR0_EM) {
666		struct math_emu_info info = { };
667
668		conditional_sti(regs);
669
670		info.regs = regs;
671		math_emulate(&info);
672		exception_exit(prev_state);
673		return;
674	}
675#endif
676	math_state_restore(); /* interrupts still off */
677#ifdef CONFIG_X86_32
678	conditional_sti(regs);
679#endif
680	exception_exit(prev_state);
 
 
 
 
 
 
 
 
681}
 
682
683#ifdef CONFIG_X86_32
684dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
685{
686	siginfo_t info;
687	enum ctx_state prev_state;
688
689	prev_state = exception_enter();
690	local_irq_enable();
691
692	info.si_signo = SIGILL;
693	info.si_errno = 0;
694	info.si_code = ILL_BADSTK;
695	info.si_addr = NULL;
696	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
697			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
698		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
699			&info);
700	}
701	exception_exit(prev_state);
702}
703#endif
704
705/* Set of traps needed for early debugging. */
706void __init early_trap_init(void)
707{
708	set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
709	/* int3 can be called from all */
710	set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
711#ifdef CONFIG_X86_32
712	set_intr_gate(X86_TRAP_PF, page_fault);
713#endif
714	load_idt(&idt_descr);
715}
716
717void __init early_trap_pf_init(void)
718{
719#ifdef CONFIG_X86_64
720	set_intr_gate(X86_TRAP_PF, page_fault);
721#endif
722}
723
724void __init trap_init(void)
725{
726	int i;
727
728#ifdef CONFIG_EISA
729	void __iomem *p = early_ioremap(0x0FFFD9, 4);
730
731	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
732		EISA_bus = 1;
733	early_iounmap(p, 4);
734#endif
735
736	set_intr_gate(X86_TRAP_DE, divide_error);
737	set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
738	/* int4 can be called from all */
739	set_system_intr_gate(X86_TRAP_OF, &overflow);
740	set_intr_gate(X86_TRAP_BR, bounds);
741	set_intr_gate(X86_TRAP_UD, invalid_op);
742	set_intr_gate(X86_TRAP_NM, device_not_available);
743#ifdef CONFIG_X86_32
744	set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
745#else
746	set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
747#endif
748	set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
749	set_intr_gate(X86_TRAP_TS, invalid_TSS);
750	set_intr_gate(X86_TRAP_NP, segment_not_present);
751	set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
752	set_intr_gate(X86_TRAP_GP, general_protection);
753	set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
754	set_intr_gate(X86_TRAP_MF, coprocessor_error);
755	set_intr_gate(X86_TRAP_AC, alignment_check);
756#ifdef CONFIG_X86_MCE
757	set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
758#endif
759	set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
760
761	/* Reserve all the builtin and the syscall vector: */
762	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
763		set_bit(i, used_vectors);
764
765#ifdef CONFIG_IA32_EMULATION
766	set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
767	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
768#endif
769
770#ifdef CONFIG_X86_32
771	set_system_trap_gate(SYSCALL_VECTOR, &system_call);
772	set_bit(SYSCALL_VECTOR, used_vectors);
773#endif
774
775	/*
776	 * Set the IDT descriptor to a fixed read-only location, so that the
777	 * "sidt" instruction will not leak the location of the kernel, and
778	 * to defend the IDT against arbitrary memory write vulnerabilities.
779	 * It will be reloaded in cpu_init() */
780	__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
781	idt_descr.address = fix_to_virt(FIX_RO_IDT);
 
782
783	/*
784	 * Should be a barrier for any external CPU state:
785	 */
786	cpu_init();
787
 
 
788	x86_init.irqs.trap_init();
789
790#ifdef CONFIG_X86_64
791	memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
792	set_nmi_gate(X86_TRAP_DB, &debug);
793	set_nmi_gate(X86_TRAP_BP, &int3);
794#endif
795}