Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 *
  5 *  Pentium III FXSR, SSE support
  6 *	Gareth Hughes <gareth@valinux.com>, May 2000
  7 */
  8
  9/*
 10 * Handle hardware traps and faults.
 11 */
 
 
 
 
 12#include <linux/interrupt.h>
 13#include <linux/kallsyms.h>
 
 14#include <linux/spinlock.h>
 15#include <linux/kprobes.h>
 16#include <linux/uaccess.h>
 17#include <linux/kdebug.h>
 18#include <linux/kgdb.h>
 19#include <linux/kernel.h>
 20#include <linux/module.h>
 21#include <linux/ptrace.h>
 
 22#include <linux/string.h>
 23#include <linux/delay.h>
 24#include <linux/errno.h>
 25#include <linux/kexec.h>
 26#include <linux/sched.h>
 
 27#include <linux/timer.h>
 28#include <linux/init.h>
 29#include <linux/bug.h>
 30#include <linux/nmi.h>
 31#include <linux/mm.h>
 32#include <linux/smp.h>
 
 33#include <linux/io.h>
 
 
 
 34
 35#ifdef CONFIG_EISA
 36#include <linux/ioport.h>
 37#include <linux/eisa.h>
 38#endif
 39
 40#if defined(CONFIG_EDAC)
 41#include <linux/edac.h>
 42#endif
 43
 44#include <asm/kmemcheck.h>
 45#include <asm/stacktrace.h>
 46#include <asm/processor.h>
 47#include <asm/debugreg.h>
 48#include <linux/atomic.h>
 
 49#include <asm/ftrace.h>
 50#include <asm/traps.h>
 51#include <asm/desc.h>
 52#include <asm/i387.h>
 53#include <asm/fpu-internal.h>
 
 54#include <asm/mce.h>
 55
 56#include <asm/mach_traps.h>
 
 
 
 
 
 
 
 
 
 57
 58#ifdef CONFIG_X86_64
 59#include <asm/x86_init.h>
 60#include <asm/pgalloc.h>
 61#include <asm/proto.h>
 62#else
 63#include <asm/processor-flags.h>
 64#include <asm/setup.h>
 65
 66asmlinkage int system_call(void);
 67
 68/* Do we ignore FPU interrupts ? */
 69char ignore_fpu_irq;
 70
 71/*
 72 * The IDT has to be page-aligned to simplify the Pentium
 73 * F0 0F bug workaround.
 74 */
 75gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
 76#endif
 77
 78DECLARE_BITMAP(used_vectors, NR_VECTORS);
 79EXPORT_SYMBOL_GPL(used_vectors);
 80
 81static inline void conditional_sti(struct pt_regs *regs)
 82{
 83	if (regs->flags & X86_EFLAGS_IF)
 84		local_irq_enable();
 85}
 86
 87static inline void preempt_conditional_sti(struct pt_regs *regs)
 88{
 89	inc_preempt_count();
 90	if (regs->flags & X86_EFLAGS_IF)
 91		local_irq_enable();
 92}
 93
 94static inline void conditional_cli(struct pt_regs *regs)
 95{
 96	if (regs->flags & X86_EFLAGS_IF)
 97		local_irq_disable();
 98}
 99
100static inline void preempt_conditional_cli(struct pt_regs *regs)
101{
102	if (regs->flags & X86_EFLAGS_IF)
103		local_irq_disable();
104	dec_preempt_count();
105}
106
107static void __kprobes
108do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
109	long error_code, siginfo_t *info)
110{
111	struct task_struct *tsk = current;
112
113#ifdef CONFIG_X86_32
114	if (regs->flags & X86_VM_MASK) {
115		/*
116		 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
117		 * On nmi (interrupt 2), do_trap should not be called.
118		 */
119		if (trapnr < X86_TRAP_UD)
120			goto vm86_trap;
121		goto trap_signal;
122	}
123#endif
 
 
 
124
125	if (!user_mode(regs))
126		goto kernel_trap;
 
 
 
 
 
127
128#ifdef CONFIG_X86_32
129trap_signal:
130#endif
131	/*
132	 * We want error_code and trap_nr set for userspace faults and
133	 * kernelspace faults which result in die(), but not
134	 * kernelspace faults which are fixed up.  die() gives the
135	 * process no chance to handle the signal and notice the
136	 * kernel fault information, so that won't result in polluting
137	 * the information about previously queued, but not yet
138	 * delivered, faults.  See also do_general_protection below.
139	 */
140	tsk->thread.error_code = error_code;
141	tsk->thread.trap_nr = trapnr;
142
143#ifdef CONFIG_X86_64
 
 
 
 
 
 
144	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
145	    printk_ratelimit()) {
146		printk(KERN_INFO
147		       "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
148		       tsk->comm, tsk->pid, str,
149		       regs->ip, regs->sp, error_code);
150		print_vma_addr(" in ", regs->ip);
151		printk("\n");
152	}
153#endif
 
 
 
 
 
 
 
 
 
154
155	if (info)
156		force_sig_info(signr, info, tsk);
 
 
157	else
158		force_sig(signr, tsk);
159	return;
 
160
161kernel_trap:
162	if (!fixup_exception(regs)) {
163		tsk->thread.error_code = error_code;
164		tsk->thread.trap_nr = trapnr;
165		die(str, regs, error_code);
 
 
 
 
 
166	}
167	return;
168
169#ifdef CONFIG_X86_32
170vm86_trap:
171	if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
172						error_code, trapnr))
173		goto trap_signal;
174	return;
175#endif
 
 
 
 
 
 
176}
177
178#define DO_ERROR(trapnr, signr, str, name)				\
179dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
180{									\
181	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
182							== NOTIFY_STOP)	\
183		return;							\
184	conditional_sti(regs);						\
185	do_trap(trapnr, signr, str, regs, error_code, NULL);		\
186}
187
188#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr)		\
189dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
190{									\
191	siginfo_t info;							\
192	info.si_signo = signr;						\
193	info.si_errno = 0;						\
194	info.si_code = sicode;						\
195	info.si_addr = (void __user *)siaddr;				\
196	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr)	\
197							== NOTIFY_STOP)	\
198		return;							\
199	conditional_sti(regs);						\
200	do_trap(trapnr, signr, str, regs, error_code, &info);		\
201}
202
203DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
204		regs->ip)
205DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
206DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
207DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
208		regs->ip)
209DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
210		coprocessor_segment_overrun)
211DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
212DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
213#ifdef CONFIG_X86_32
214DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
215#endif
216DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
217		BUS_ADRALN, 0)
 
 
218
219#ifdef CONFIG_X86_64
220/* Runs on IST stack */
221dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222{
223	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
224			X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
 
 
 
 
 
 
225		return;
226	preempt_conditional_sti(regs);
227	do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
228	preempt_conditional_cli(regs);
 
 
 
229}
230
231dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232{
233	static const char str[] = "double fault";
234	struct task_struct *tsk = current;
235
236	/* Return not checked because double check cannot be ignored */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
238
239	tsk->thread.error_code = error_code;
240	tsk->thread.trap_nr = X86_TRAP_DF;
241
 
242	/*
243	 * This is always a kernel trap and never fixable (and thus must
244	 * never return).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245	 */
246	for (;;)
247		die(str, regs, error_code);
248}
249#endif
250
251dotraplinkage void __kprobes
252do_general_protection(struct pt_regs *regs, long error_code)
 
 
 
 
 
253{
254	struct task_struct *tsk;
 
 
 
 
 
 
255
256	conditional_sti(regs);
257
258#ifdef CONFIG_X86_32
259	if (regs->flags & X86_VM_MASK)
260		goto gp_in_vm86;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261#endif
262
263	tsk = current;
264	if (!user_mode(regs))
265		goto gp_in_kernel;
266
267	tsk->thread.error_code = error_code;
268	tsk->thread.trap_nr = X86_TRAP_GP;
269
270	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
271			printk_ratelimit()) {
272		printk(KERN_INFO
273			"%s[%d] general protection ip:%lx sp:%lx error:%lx",
274			tsk->comm, task_pid_nr(tsk),
275			regs->ip, regs->sp, error_code);
276		print_vma_addr(" in ", regs->ip);
277		printk("\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278	}
279
280	force_sig(SIGSEGV, tsk);
281	return;
 
282
283#ifdef CONFIG_X86_32
284gp_in_vm86:
285	local_irq_enable();
286	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
287	return;
288#endif
 
 
 
289
290gp_in_kernel:
291	if (fixup_exception(regs))
292		return;
 
 
 
 
 
293
294	tsk->thread.error_code = error_code;
295	tsk->thread.trap_nr = X86_TRAP_GP;
296	if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
297			X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
298		return;
299	die("general protection fault", regs, error_code);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300}
301
302/* May run on IST stack. */
303dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
 
304{
305#ifdef CONFIG_DYNAMIC_FTRACE
 
 
 
 
 
306	/*
307	 * ftrace must be first, everything else may cause a recursive crash.
308	 * See note by declaration of modifying_ftrace_code in ftrace.c
309	 */
310	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
311	    ftrace_int3_handler(regs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312		return;
313#endif
314#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
315	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
316				SIGTRAP) == NOTIFY_STOP)
 
 
 
 
 
 
 
 
317		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
319
320	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
321			SIGTRAP) == NOTIFY_STOP)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322		return;
323
324	/*
325	 * Let others (NMI) know that the debug stack is in use
326	 * as we may switch to the interrupt stack.
 
 
 
327	 */
328	debug_stack_usage_inc();
329	preempt_conditional_sti(regs);
330	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
331	preempt_conditional_cli(regs);
332	debug_stack_usage_dec();
 
 
 
 
 
 
 
 
 
 
333}
334
335#ifdef CONFIG_X86_64
336/*
337 * Help handler running on IST stack to switch back to user stack
338 * for scheduling or signal handling. The actual stack switch is done in
339 * entry.S
340 */
341asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
342{
343	struct pt_regs *regs = eregs;
344	/* Did already sync */
345	if (eregs == (struct pt_regs *)eregs->sp)
346		;
347	/* Exception from user space */
348	else if (user_mode(eregs))
349		regs = task_pt_regs(current);
350	/*
351	 * Exception from kernel and interrupts are enabled. Move to
352	 * kernel process stack.
353	 */
354	else if (eregs->flags & X86_EFLAGS_IF)
355		regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
356	if (eregs != regs)
357		*regs = *eregs;
358	return regs;
359}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360#endif
361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362/*
363 * Our handling of the processor debug registers is non-trivial.
364 * We do not clear them on entry and exit from the kernel. Therefore
365 * it is possible to get a watchpoint trap here from inside the kernel.
366 * However, the code in ./ptrace.c has ensured that the user can
367 * only set watchpoints on userspace addresses. Therefore the in-kernel
368 * watchpoint trap can only occur in code which is reading/writing
369 * from user space. Such code must not hold kernel locks (since it
370 * can equally take a page fault), therefore it is safe to call
371 * force_sig_info even though that claims and releases locks.
372 *
373 * Code in ./signal.c ensures that the debug control register
374 * is restored before we deliver any signal, and therefore that
375 * user code runs with the correct debug control register even though
376 * we clear it here.
377 *
378 * Being careful here means that we don't have to be as careful in a
379 * lot of more complicated places (task switching can be a bit lazy
380 * about restoring all the debug state, and ptrace doesn't have to
381 * find every occurrence of the TF bit that could be saved away even
382 * by user code)
383 *
384 * May run on IST stack.
385 */
386dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
 
387{
388	struct task_struct *tsk = current;
389	int user_icebp = 0;
390	unsigned long dr6;
391	int si_code;
 
 
 
 
 
392
393	get_debugreg(dr6, 6);
 
394
395	/* Filter out all the reserved bits which are preset to 1 */
396	dr6 &= ~DR6_RESERVED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
398	/*
399	 * If dr6 has no reason to give us about the origin of this trap,
400	 * then it's very likely the result of an icebp/int01 trap.
401	 * User wants a sigtrap for that.
402	 */
403	if (!dr6 && user_mode(regs))
404		user_icebp = 1;
405
406	/* Catch kmemcheck conditions first of all! */
407	if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
408		return;
 
 
 
 
 
 
 
 
 
 
409
410	/* DR6 may or may not be cleared by the CPU */
411	set_debugreg(0, 6);
 
 
 
 
412
413	/*
414	 * The processor cleared BTF, so don't mark that we need it set.
415	 */
416	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
 
417
418	/* Store the virtualized DR6 value */
419	tsk->thread.debugreg6 = dr6;
420
421	if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
422							SIGTRAP) == NOTIFY_STOP)
423		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
425	/*
426	 * Let others (NMI) know that the debug stack is in use
427	 * as we may switch to the interrupt stack.
428	 */
429	debug_stack_usage_inc();
430
431	/* It's safe to allow irq's after DR6 has been saved */
432	preempt_conditional_sti(regs);
 
 
 
 
 
 
433
434	if (regs->flags & X86_VM_MASK) {
435		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
436					X86_TRAP_DB);
437		preempt_conditional_cli(regs);
438		debug_stack_usage_dec();
439		return;
440	}
441
442	/*
443	 * Single-stepping through system calls: ignore any exceptions in
444	 * kernel space, but re-enable TF when returning to user mode.
445	 *
446	 * We already checked v86 mode above, so we can check for kernel mode
447	 * by just checking the CPL of CS.
448	 */
449	if ((dr6 & DR_STEP) && !user_mode(regs)) {
450		tsk->thread.debugreg6 &= ~DR_STEP;
451		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
452		regs->flags &= ~X86_EFLAGS_TF;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453	}
454	si_code = get_si_code(tsk->thread.debugreg6);
455	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
456		send_sigtrap(tsk, regs, error_code, si_code);
457	preempt_conditional_cli(regs);
458	debug_stack_usage_dec();
459
460	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461}
 
462
463/*
464 * Note that we play around with the 'TS' bit in an attempt to get
465 * the correct behaviour even in the presence of the asynchronous
466 * IRQ13 behaviour
467 */
468void math_error(struct pt_regs *regs, int error_code, int trapnr)
469{
470	struct task_struct *task = current;
471	siginfo_t info;
472	unsigned short err;
473	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
474						"simd exception";
475
476	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
477		return;
478	conditional_sti(regs);
479
480	if (!user_mode_vm(regs))
481	{
482		if (!fixup_exception(regs)) {
483			task->thread.error_code = error_code;
484			task->thread.trap_nr = trapnr;
485			die(str, regs, error_code);
486		}
487		return;
 
 
 
488	}
489
490	/*
491	 * Save the info for the exception handler and clear the error.
 
492	 */
493	save_init_fpu(task);
494	task->thread.trap_nr = trapnr;
495	task->thread.error_code = error_code;
496	info.si_signo = SIGFPE;
497	info.si_errno = 0;
498	info.si_addr = (void __user *)regs->ip;
499	if (trapnr == X86_TRAP_MF) {
500		unsigned short cwd, swd;
501		/*
502		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
503		 * status.  0x3f is the exception bits in these regs, 0x200 is the
504		 * C1 reg you need in case of a stack fault, 0x040 is the stack
505		 * fault bit.  We should only be taking one exception at a time,
506		 * so if this combination doesn't produce any single exception,
507		 * then we have a bad program that isn't synchronizing its FPU usage
508		 * and it will suffer the consequences since we won't be able to
509		 * fully reproduce the context of the exception
510		 */
511		cwd = get_fpu_cwd(task);
512		swd = get_fpu_swd(task);
513
514		err = swd & ~cwd;
515	} else {
516		/*
517		 * The SIMD FPU exceptions are handled a little differently, as there
518		 * is only a single status/control register.  Thus, to determine which
519		 * unmasked exception was caught we must mask the exception mask bits
520		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
521		 */
522		unsigned short mxcsr = get_fpu_mxcsr(task);
523		err = ~(mxcsr >> 7) & mxcsr;
524	}
525
526	if (err & 0x001) {	/* Invalid op */
527		/*
528		 * swd & 0x240 == 0x040: Stack Underflow
529		 * swd & 0x240 == 0x240: Stack Overflow
530		 * User must clear the SF bit (0x40) if set
531		 */
532		info.si_code = FPE_FLTINV;
533	} else if (err & 0x004) { /* Divide by Zero */
534		info.si_code = FPE_FLTDIV;
535	} else if (err & 0x008) { /* Overflow */
536		info.si_code = FPE_FLTOVF;
537	} else if (err & 0x012) { /* Denormal, Underflow */
538		info.si_code = FPE_FLTUND;
539	} else if (err & 0x020) { /* Precision */
540		info.si_code = FPE_FLTRES;
541	} else {
542		/*
543		 * If we're using IRQ 13, or supposedly even some trap
544		 * X86_TRAP_MF implementations, it's possible
545		 * we get a spurious trap, which is not an error.
546		 */
547		return;
548	}
549	force_sig_info(SIGFPE, &info, task);
550}
551
552dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
553{
554#ifdef CONFIG_X86_32
555	ignore_fpu_irq = 1;
556#endif
557
558	math_error(regs, error_code, X86_TRAP_MF);
 
 
 
559}
560
561dotraplinkage void
562do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
563{
564	math_error(regs, error_code, X86_TRAP_XF);
565}
566
567dotraplinkage void
568do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
569{
570	conditional_sti(regs);
571#if 0
572	/* No need to warn about this any longer. */
573	printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
574#endif
 
 
 
575}
576
577asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
578{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579}
580
581asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
582{
583}
 
584
585/*
586 * 'math_state_restore()' saves the current math information in the
587 * old math state array, and gets the new ones from the current task
588 *
589 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
590 * Don't touch unless you *really* know how it works.
591 *
592 * Must be called with kernel preemption disabled (eg with local
593 * local interrupts as in the case of do_device_not_available).
594 */
595void math_state_restore(void)
596{
597	struct task_struct *tsk = current;
598
599	if (!tsk_used_math(tsk)) {
600		local_irq_enable();
601		/*
602		 * does a slab alloc which can sleep
603		 */
604		if (init_fpu(tsk)) {
605			/*
606			 * ran out of memory!
607			 */
608			do_group_exit(SIGKILL);
609			return;
610		}
611		local_irq_disable();
612	}
613
614	__thread_fpu_begin(tsk);
615	/*
616	 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
617	 */
618	if (unlikely(restore_fpu_checking(tsk))) {
619		__thread_fpu_end(tsk);
620		force_sig(SIGSEGV, tsk);
621		return;
 
 
 
 
 
 
 
 
 
622	}
623
624	tsk->fpu_counter++;
 
625}
626EXPORT_SYMBOL_GPL(math_state_restore);
627
628dotraplinkage void __kprobes
629do_device_not_available(struct pt_regs *regs, long error_code)
630{
 
 
 
 
 
631#ifdef CONFIG_MATH_EMULATION
632	if (read_cr0() & X86_CR0_EM) {
633		struct math_emu_info info = { };
634
635		conditional_sti(regs);
636
637		info.regs = regs;
638		math_emulate(&info);
 
 
639		return;
640	}
641#endif
642	math_state_restore(); /* interrupts still off */
643#ifdef CONFIG_X86_32
644	conditional_sti(regs);
645#endif
 
 
 
 
 
 
 
 
 
646}
647
648#ifdef CONFIG_X86_32
649dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 
 
 
650{
651	siginfo_t info;
652	local_irq_enable();
 
 
653
654	info.si_signo = SIGILL;
655	info.si_errno = 0;
656	info.si_code = ILL_BADSTK;
657	info.si_addr = NULL;
658	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
659			X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
660		return;
661	do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
662		&info);
663}
664#endif
665
666/* Set of traps needed for early debugging. */
667void __init early_trap_init(void)
668{
669	set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
670	/* int3 can be called from all */
671	set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
672	set_intr_gate(X86_TRAP_PF, &page_fault);
673	load_idt(&idt_descr);
674}
675
676void __init trap_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677{
678	int i;
679
680#ifdef CONFIG_EISA
681	void __iomem *p = early_ioremap(0x0FFFD9, 4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682
683	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
684		EISA_bus = 1;
685	early_iounmap(p, 4);
686#endif
687
688	set_intr_gate(X86_TRAP_DE, &divide_error);
689	set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
690	/* int4 can be called from all */
691	set_system_intr_gate(X86_TRAP_OF, &overflow);
692	set_intr_gate(X86_TRAP_BR, &bounds);
693	set_intr_gate(X86_TRAP_UD, &invalid_op);
694	set_intr_gate(X86_TRAP_NM, &device_not_available);
695#ifdef CONFIG_X86_32
696	set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
697#else
698	set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
699#endif
700	set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
701	set_intr_gate(X86_TRAP_TS, &invalid_TSS);
702	set_intr_gate(X86_TRAP_NP, &segment_not_present);
703	set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
704	set_intr_gate(X86_TRAP_GP, &general_protection);
705	set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
706	set_intr_gate(X86_TRAP_MF, &coprocessor_error);
707	set_intr_gate(X86_TRAP_AC, &alignment_check);
708#ifdef CONFIG_X86_MCE
709	set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
710#endif
711	set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
712
713	/* Reserve all the builtin and the syscall vector: */
714	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
715		set_bit(i, used_vectors);
716
717#ifdef CONFIG_IA32_EMULATION
718	set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
719	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
720#endif
721
722#ifdef CONFIG_X86_32
723	set_system_trap_gate(SYSCALL_VECTOR, &system_call);
724	set_bit(SYSCALL_VECTOR, used_vectors);
 
 
 
 
 
 
 
 
725#endif
726
727	/*
728	 * Should be a barrier for any external CPU state:
729	 */
730	cpu_init();
731
732	x86_init.irqs.trap_init();
 
733
734#ifdef CONFIG_X86_64
735	memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
736	set_nmi_gate(X86_TRAP_DB, &debug);
737	set_nmi_gate(X86_TRAP_BP, &int3);
738#endif
739}
v6.8
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *	Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
  18#include <linux/kmsan.h>
  19#include <linux/spinlock.h>
  20#include <linux/kprobes.h>
  21#include <linux/uaccess.h>
  22#include <linux/kdebug.h>
  23#include <linux/kgdb.h>
  24#include <linux/kernel.h>
  25#include <linux/export.h>
  26#include <linux/ptrace.h>
  27#include <linux/uprobes.h>
  28#include <linux/string.h>
  29#include <linux/delay.h>
  30#include <linux/errno.h>
  31#include <linux/kexec.h>
  32#include <linux/sched.h>
  33#include <linux/sched/task_stack.h>
  34#include <linux/timer.h>
  35#include <linux/init.h>
  36#include <linux/bug.h>
  37#include <linux/nmi.h>
  38#include <linux/mm.h>
  39#include <linux/smp.h>
  40#include <linux/cpu.h>
  41#include <linux/io.h>
  42#include <linux/hardirq.h>
  43#include <linux/atomic.h>
  44#include <linux/iommu.h>
  45
 
 
 
 
 
 
 
 
 
 
  46#include <asm/stacktrace.h>
  47#include <asm/processor.h>
  48#include <asm/debugreg.h>
  49#include <asm/realmode.h>
  50#include <asm/text-patching.h>
  51#include <asm/ftrace.h>
  52#include <asm/traps.h>
  53#include <asm/desc.h>
  54#include <asm/fpu/api.h>
  55#include <asm/cpu.h>
  56#include <asm/cpu_entry_area.h>
  57#include <asm/mce.h>
  58#include <asm/fixmap.h>
  59#include <asm/mach_traps.h>
  60#include <asm/alternative.h>
  61#include <asm/fpu/xstate.h>
  62#include <asm/vm86.h>
  63#include <asm/umip.h>
  64#include <asm/insn.h>
  65#include <asm/insn-eval.h>
  66#include <asm/vdso.h>
  67#include <asm/tdx.h>
  68#include <asm/cfi.h>
  69
  70#ifdef CONFIG_X86_64
  71#include <asm/x86_init.h>
 
 
  72#else
  73#include <asm/processor-flags.h>
  74#include <asm/setup.h>
 
 
 
 
 
 
 
 
 
 
 
  75#endif
  76
  77#include <asm/proto.h>
 
 
 
 
 
 
 
  78
  79DECLARE_BITMAP(system_vectors, NR_VECTORS);
 
 
 
 
 
  80
  81__always_inline int is_valid_bugaddr(unsigned long addr)
  82{
  83	if (addr < TASK_SIZE_MAX)
  84		return 0;
 
  85
  86	/*
  87	 * We got #UD, if the text isn't readable we'd have gotten
  88	 * a different exception.
  89	 */
  90	return *(unsigned short *)addr == INSN_UD2;
  91}
  92
  93static nokprobe_inline int
  94do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
  95		  struct pt_regs *regs,	long error_code)
  96{
  97	if (v8086_mode(regs)) {
 
 
 
  98		/*
  99		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 100		 * On nmi (interrupt 2), do_trap should not be called.
 101		 */
 102		if (trapnr < X86_TRAP_UD) {
 103			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 104						error_code, trapnr))
 105				return 0;
 106		}
 107	} else if (!user_mode(regs)) {
 108		if (fixup_exception(regs, trapnr, error_code, 0))
 109			return 0;
 110
 111		tsk->thread.error_code = error_code;
 112		tsk->thread.trap_nr = trapnr;
 113		die(str, regs, error_code);
 114	} else {
 115		if (fixup_vdso_exception(regs, trapnr, error_code, 0))
 116			return 0;
 117	}
 118
 
 
 
 119	/*
 120	 * We want error_code and trap_nr set for userspace faults and
 121	 * kernelspace faults which result in die(), but not
 122	 * kernelspace faults which are fixed up.  die() gives the
 123	 * process no chance to handle the signal and notice the
 124	 * kernel fault information, so that won't result in polluting
 125	 * the information about previously queued, but not yet
 126	 * delivered, faults.  See also exc_general_protection below.
 127	 */
 128	tsk->thread.error_code = error_code;
 129	tsk->thread.trap_nr = trapnr;
 130
 131	return -1;
 132}
 133
 134static void show_signal(struct task_struct *tsk, int signr,
 135			const char *type, const char *desc,
 136			struct pt_regs *regs, long error_code)
 137{
 138	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 139	    printk_ratelimit()) {
 140		pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
 141			tsk->comm, task_pid_nr(tsk), type, desc,
 142			regs->ip, regs->sp, error_code);
 143		print_vma_addr(KERN_CONT " in ", regs->ip);
 144		pr_cont("\n");
 
 145	}
 146}
 147
 148static void
 149do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 150	long error_code, int sicode, void __user *addr)
 151{
 152	struct task_struct *tsk = current;
 153
 154	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 155		return;
 156
 157	show_signal(tsk, signr, "trap ", str, regs, error_code);
 158
 159	if (!sicode)
 160		force_sig(signr);
 161	else
 162		force_sig_fault(signr, sicode, addr);
 163}
 164NOKPROBE_SYMBOL(do_trap);
 165
 166static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 167	unsigned long trapnr, int signr, int sicode, void __user *addr)
 168{
 169	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 170
 171	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 172			NOTIFY_STOP) {
 173		cond_local_irq_enable(regs);
 174		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
 175		cond_local_irq_disable(regs);
 176	}
 177}
 178
 179/*
 180 * Posix requires to provide the address of the faulting instruction for
 181 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
 182 *
 183 * This address is usually regs->ip, but when an uprobe moved the code out
 184 * of line then regs->ip points to the XOL code which would confuse
 185 * anything which analyzes the fault address vs. the unmodified binary. If
 186 * a trap happened in XOL code then uprobe maps regs->ip back to the
 187 * original instruction address.
 188 */
 189static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
 190{
 191	return (void __user *)uprobe_get_trap_addr(regs);
 192}
 193
 194DEFINE_IDTENTRY(exc_divide_error)
 195{
 196	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
 197		      FPE_INTDIV, error_get_trap_addr(regs));
 198}
 199
 200DEFINE_IDTENTRY(exc_overflow)
 201{
 202	do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
 203}
 204
 205#ifdef CONFIG_X86_F00F_BUG
 206void handle_invalid_op(struct pt_regs *regs)
 207#else
 208static inline void handle_invalid_op(struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 209#endif
 210{
 211	do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
 212		      ILL_ILLOPN, error_get_trap_addr(regs));
 213}
 214
 215static noinstr bool handle_bug(struct pt_regs *regs)
 216{
 217	bool handled = false;
 218
 219	/*
 220	 * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
 221	 * is a rare case that uses @regs without passing them to
 222	 * irqentry_enter().
 223	 */
 224	kmsan_unpoison_entry_regs(regs);
 225	if (!is_valid_bugaddr(regs->ip))
 226		return handled;
 227
 228	/*
 229	 * All lies, just get the WARN/BUG out.
 230	 */
 231	instrumentation_begin();
 232	/*
 233	 * Since we're emulating a CALL with exceptions, restore the interrupt
 234	 * state to what it was at the exception site.
 235	 */
 236	if (regs->flags & X86_EFLAGS_IF)
 237		raw_local_irq_enable();
 238	if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
 239	    handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
 240		regs->ip += LEN_UD2;
 241		handled = true;
 242	}
 243	if (regs->flags & X86_EFLAGS_IF)
 244		raw_local_irq_disable();
 245	instrumentation_end();
 246
 247	return handled;
 248}
 249
 250DEFINE_IDTENTRY_RAW(exc_invalid_op)
 251{
 252	irqentry_state_t state;
 253
 254	/*
 255	 * We use UD2 as a short encoding for 'CALL __WARN', as such
 256	 * handle it before exception entry to avoid recursive WARN
 257	 * in case exception entry is the one triggering WARNs.
 258	 */
 259	if (!user_mode(regs) && handle_bug(regs))
 260		return;
 261
 262	state = irqentry_enter(regs);
 263	instrumentation_begin();
 264	handle_invalid_op(regs);
 265	instrumentation_end();
 266	irqentry_exit(regs, state);
 267}
 268
 269DEFINE_IDTENTRY(exc_coproc_segment_overrun)
 270{
 271	do_error_trap(regs, 0, "coprocessor segment overrun",
 272		      X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
 273}
 274
 275DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
 276{
 277	do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
 278		      0, NULL);
 279}
 280
 281DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
 282{
 283	do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
 284		      SIGBUS, 0, NULL);
 285}
 286
 287DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
 288{
 289	do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
 290		      0, NULL);
 291}
 292
 293DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
 294{
 295	char *str = "alignment check";
 296
 297	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
 298		return;
 299
 300	if (!user_mode(regs))
 301		die("Split lock detected\n", regs, error_code);
 302
 303	local_irq_enable();
 304
 305	if (handle_user_split_lock(regs, error_code))
 306		goto out;
 307
 308	do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
 309		error_code, BUS_ADRALN, NULL);
 310
 311out:
 312	local_irq_disable();
 313}
 314
 315#ifdef CONFIG_VMAP_STACK
 316__visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
 317						unsigned long fault_address,
 318						struct stack_info *info)
 319{
 320	const char *name = stack_type_name(info->type);
 321
 322	printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n",
 323	       name, (void *)fault_address, info->begin, info->end);
 324
 325	die("stack guard page", regs, 0);
 326
 327	/* Be absolutely certain we don't return. */
 328	panic("%s stack guard hit", name);
 329}
 330#endif
 331
 332/*
 333 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
 334 *
 335 * On x86_64, this is more or less a normal kernel entry.  Notwithstanding the
 336 * SDM's warnings about double faults being unrecoverable, returning works as
 337 * expected.  Presumably what the SDM actually means is that the CPU may get
 338 * the register state wrong on entry, so returning could be a bad idea.
 339 *
 340 * Various CPU engineers have promised that double faults due to an IRET fault
 341 * while the stack is read-only are, in fact, recoverable.
 342 *
 343 * On x86_32, this is entered through a task gate, and regs are synthesized
 344 * from the TSS.  Returning is, in principle, okay, but changes to regs will
 345 * be lost.  If, for some reason, we need to return to a context with modified
 346 * regs, the shim code could be adjusted to synchronize the registers.
 347 *
 348 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
 349 * to be read before doing anything else.
 350 */
 351DEFINE_IDTENTRY_DF(exc_double_fault)
 352{
 353	static const char str[] = "double fault";
 354	struct task_struct *tsk = current;
 355
 356#ifdef CONFIG_VMAP_STACK
 357	unsigned long address = read_cr2();
 358	struct stack_info info;
 359#endif
 360
 361#ifdef CONFIG_X86_ESPFIX64
 362	extern unsigned char native_irq_return_iret[];
 363
 364	/*
 365	 * If IRET takes a non-IST fault on the espfix64 stack, then we
 366	 * end up promoting it to a doublefault.  In that case, take
 367	 * advantage of the fact that we're not using the normal (TSS.sp0)
 368	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
 369	 * and then modify our own IRET frame so that, when we return,
 370	 * we land directly at the #GP(0) vector with the stack already
 371	 * set up according to its expectations.
 372	 *
 373	 * The net result is that our #GP handler will think that we
 374	 * entered from usermode with the bad user context.
 375	 *
 376	 * No need for nmi_enter() here because we don't use RCU.
 377	 */
 378	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
 379		regs->cs == __KERNEL_CS &&
 380		regs->ip == (unsigned long)native_irq_return_iret)
 381	{
 382		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 383		unsigned long *p = (unsigned long *)regs->sp;
 384
 385		/*
 386		 * regs->sp points to the failing IRET frame on the
 387		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
 388		 * in gpregs->ss through gpregs->ip.
 389		 *
 390		 */
 391		gpregs->ip	= p[0];
 392		gpregs->cs	= p[1];
 393		gpregs->flags	= p[2];
 394		gpregs->sp	= p[3];
 395		gpregs->ss	= p[4];
 396		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
 397
 398		/*
 399		 * Adjust our frame so that we return straight to the #GP
 400		 * vector with the expected RSP value.  This is safe because
 401		 * we won't enable interrupts or schedule before we invoke
 402		 * general_protection, so nothing will clobber the stack
 403		 * frame we just set up.
 404		 *
 405		 * We will enter general_protection with kernel GSBASE,
 406		 * which is what the stub expects, given that the faulting
 407		 * RIP will be the IRET instruction.
 408		 */
 409		regs->ip = (unsigned long)asm_exc_general_protection;
 410		regs->sp = (unsigned long)&gpregs->orig_ax;
 411
 412		return;
 413	}
 414#endif
 415
 416	irqentry_nmi_enter(regs);
 417	instrumentation_begin();
 418	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 419
 420	tsk->thread.error_code = error_code;
 421	tsk->thread.trap_nr = X86_TRAP_DF;
 422
 423#ifdef CONFIG_VMAP_STACK
 424	/*
 425	 * If we overflow the stack into a guard page, the CPU will fail
 426	 * to deliver #PF and will send #DF instead.  Similarly, if we
 427	 * take any non-IST exception while too close to the bottom of
 428	 * the stack, the processor will get a page fault while
 429	 * delivering the exception and will generate a double fault.
 430	 *
 431	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
 432	 * Page-Fault Exception (#PF):
 433	 *
 434	 *   Processors update CR2 whenever a page fault is detected. If a
 435	 *   second page fault occurs while an earlier page fault is being
 436	 *   delivered, the faulting linear address of the second fault will
 437	 *   overwrite the contents of CR2 (replacing the previous
 438	 *   address). These updates to CR2 occur even if the page fault
 439	 *   results in a double fault or occurs during the delivery of a
 440	 *   double fault.
 441	 *
 442	 * The logic below has a small possibility of incorrectly diagnosing
 443	 * some errors as stack overflows.  For example, if the IDT or GDT
 444	 * gets corrupted such that #GP delivery fails due to a bad descriptor
 445	 * causing #GP and we hit this condition while CR2 coincidentally
 446	 * points to the stack guard page, we'll think we overflowed the
 447	 * stack.  Given that we're going to panic one way or another
 448	 * if this happens, this isn't necessarily worth fixing.
 449	 *
 450	 * If necessary, we could improve the test by only diagnosing
 451	 * a stack overflow if the saved RSP points within 47 bytes of
 452	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
 453	 * take an exception, the stack is already aligned and there
 454	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
 455	 * possible error code, so a stack overflow would *not* double
 456	 * fault.  With any less space left, exception delivery could
 457	 * fail, and, as a practical matter, we've overflowed the
 458	 * stack even if the actual trigger for the double fault was
 459	 * something else.
 460	 */
 461	if (get_stack_guard_info((void *)address, &info))
 462		handle_stack_overflow(regs, address, &info);
 
 463#endif
 464
 465	pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
 466	die("double fault", regs, error_code);
 467	panic("Machine halted.");
 468	instrumentation_end();
 469}
 470
 471DEFINE_IDTENTRY(exc_bounds)
 472{
 473	if (notify_die(DIE_TRAP, "bounds", regs, 0,
 474			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
 475		return;
 476	cond_local_irq_enable(regs);
 477
 478	if (!user_mode(regs))
 479		die("bounds", regs, 0);
 480
 481	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
 482
 483	cond_local_irq_disable(regs);
 484}
 485
 486enum kernel_gp_hint {
 487	GP_NO_HINT,
 488	GP_NON_CANONICAL,
 489	GP_CANONICAL
 490};
 491
 492/*
 493 * When an uncaught #GP occurs, try to determine the memory address accessed by
 494 * the instruction and return that address to the caller. Also, try to figure
 495 * out whether any part of the access to that address was non-canonical.
 496 */
 497static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
 498						 unsigned long *addr)
 499{
 500	u8 insn_buf[MAX_INSN_SIZE];
 501	struct insn insn;
 502	int ret;
 503
 504	if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
 505			MAX_INSN_SIZE))
 506		return GP_NO_HINT;
 507
 508	ret = insn_decode_kernel(&insn, insn_buf);
 509	if (ret < 0)
 510		return GP_NO_HINT;
 511
 512	*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
 513	if (*addr == -1UL)
 514		return GP_NO_HINT;
 515
 516#ifdef CONFIG_X86_64
 517	/*
 518	 * Check that:
 519	 *  - the operand is not in the kernel half
 520	 *  - the last byte of the operand is not in the user canonical half
 521	 */
 522	if (*addr < ~__VIRTUAL_MASK &&
 523	    *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
 524		return GP_NON_CANONICAL;
 525#endif
 526
 527	return GP_CANONICAL;
 528}
 
 529
 530#define GPFSTR "general protection fault"
 
 531
 532static bool fixup_iopl_exception(struct pt_regs *regs)
 533{
 534	struct thread_struct *t = &current->thread;
 535	unsigned char byte;
 536	unsigned long ip;
 537
 538	if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
 539		return false;
 540
 541	if (insn_get_effective_ip(regs, &ip))
 542		return false;
 543
 544	if (get_user(byte, (const char __user *)ip))
 545		return false;
 546
 547	if (byte != 0xfa && byte != 0xfb)
 548		return false;
 549
 550	if (!t->iopl_warn && printk_ratelimit()) {
 551		pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
 552		       current->comm, task_pid_nr(current), ip);
 553		print_vma_addr(KERN_CONT " in ", ip);
 554		pr_cont("\n");
 555		t->iopl_warn = 1;
 556	}
 557
 558	regs->ip += 1;
 559	return true;
 560}
 561
 562/*
 563 * The unprivileged ENQCMD instruction generates #GPs if the
 564 * IA32_PASID MSR has not been populated.  If possible, populate
 565 * the MSR from a PASID previously allocated to the mm.
 566 */
 567static bool try_fixup_enqcmd_gp(void)
 568{
 569#ifdef CONFIG_ARCH_HAS_CPU_PASID
 570	u32 pasid;
 571
 572	/*
 573	 * MSR_IA32_PASID is managed using XSAVE.  Directly
 574	 * writing to the MSR is only possible when fpregs
 575	 * are valid and the fpstate is not.  This is
 576	 * guaranteed when handling a userspace exception
 577	 * in *before* interrupts are re-enabled.
 578	 */
 579	lockdep_assert_irqs_disabled();
 580
 581	/*
 582	 * Hardware without ENQCMD will not generate
 583	 * #GPs that can be fixed up here.
 584	 */
 585	if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
 586		return false;
 587
 588	/*
 589	 * If the mm has not been allocated a
 590	 * PASID, the #GP can not be fixed up.
 591	 */
 592	if (!mm_valid_pasid(current->mm))
 593		return false;
 594
 595	pasid = mm_get_enqcmd_pasid(current->mm);
 596
 597	/*
 598	 * Did this thread already have its PASID activated?
 599	 * If so, the #GP must be from something else.
 600	 */
 601	if (current->pasid_activated)
 602		return false;
 603
 604	wrmsrl(MSR_IA32_PASID, pasid | MSR_IA32_PASID_VALID);
 605	current->pasid_activated = 1;
 606
 607	return true;
 608#else
 609	return false;
 610#endif
 611}
 612
 613static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
 614				    unsigned long error_code, const char *str,
 615				    unsigned long address)
 616{
 617	if (fixup_exception(regs, trapnr, error_code, address))
 618		return true;
 619
 620	current->thread.error_code = error_code;
 621	current->thread.trap_nr = trapnr;
 622
 623	/*
 624	 * To be potentially processing a kprobe fault and to trust the result
 625	 * from kprobe_running(), we have to be non-preemptible.
 626	 */
 627	if (!preemptible() && kprobe_running() &&
 628	    kprobe_fault_handler(regs, trapnr))
 629		return true;
 630
 631	return notify_die(DIE_GPF, str, regs, error_code, trapnr, SIGSEGV) == NOTIFY_STOP;
 632}
 633
 634static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr,
 635				   unsigned long error_code, const char *str)
 636{
 637	current->thread.error_code = error_code;
 638	current->thread.trap_nr = trapnr;
 639	show_signal(current, SIGSEGV, "", str, regs, error_code);
 640	force_sig(SIGSEGV);
 641}
 642
 643DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
 644{
 645	char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
 646	enum kernel_gp_hint hint = GP_NO_HINT;
 647	unsigned long gp_addr;
 648
 649	if (user_mode(regs) && try_fixup_enqcmd_gp())
 650		return;
 651
 652	cond_local_irq_enable(regs);
 653
 654	if (static_cpu_has(X86_FEATURE_UMIP)) {
 655		if (user_mode(regs) && fixup_umip_exception(regs))
 656			goto exit;
 657	}
 658
 659	if (v8086_mode(regs)) {
 660		local_irq_enable();
 661		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 662		local_irq_disable();
 663		return;
 664	}
 665
 666	if (user_mode(regs)) {
 667		if (fixup_iopl_exception(regs))
 668			goto exit;
 669
 670		if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
 671			goto exit;
 672
 673		gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
 674		goto exit;
 675	}
 676
 677	if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
 678		goto exit;
 679
 680	if (error_code)
 681		snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
 682	else
 683		hint = get_kernel_gp_address(regs, &gp_addr);
 684
 685	if (hint != GP_NO_HINT)
 686		snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
 687			 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
 688						    : "maybe for address",
 689			 gp_addr);
 690
 691	/*
 692	 * KASAN is interested only in the non-canonical case, clear it
 693	 * otherwise.
 694	 */
 695	if (hint != GP_NON_CANONICAL)
 696		gp_addr = 0;
 697
 698	die_addr(desc, regs, error_code, gp_addr);
 699
 700exit:
 701	cond_local_irq_disable(regs);
 702}
 703
 704static bool do_int3(struct pt_regs *regs)
 705{
 706	int res;
 707
 708#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 709	if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
 710			 SIGTRAP) == NOTIFY_STOP)
 711		return true;
 712#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 713
 714#ifdef CONFIG_KPROBES
 715	if (kprobe_int3_handler(regs))
 716		return true;
 717#endif
 718	res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
 719
 720	return res == NOTIFY_STOP;
 721}
 722NOKPROBE_SYMBOL(do_int3);
 723
 724static void do_int3_user(struct pt_regs *regs)
 725{
 726	if (do_int3(regs))
 727		return;
 728
 729	cond_local_irq_enable(regs);
 730	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
 731	cond_local_irq_disable(regs);
 732}
 733
 734DEFINE_IDTENTRY_RAW(exc_int3)
 735{
 736	/*
 737	 * poke_int3_handler() is completely self contained code; it does (and
 738	 * must) *NOT* call out to anything, lest it hits upon yet another
 739	 * INT3.
 740	 */
 741	if (poke_int3_handler(regs))
 742		return;
 743
 744	/*
 745	 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
 746	 * and therefore can trigger INT3, hence poke_int3_handler() must
 747	 * be done before. If the entry came from kernel mode, then use
 748	 * nmi_enter() because the INT3 could have been hit in any context
 749	 * including NMI.
 750	 */
 751	if (user_mode(regs)) {
 752		irqentry_enter_from_user_mode(regs);
 753		instrumentation_begin();
 754		do_int3_user(regs);
 755		instrumentation_end();
 756		irqentry_exit_to_user_mode(regs);
 757	} else {
 758		irqentry_state_t irq_state = irqentry_nmi_enter(regs);
 759
 760		instrumentation_begin();
 761		if (!do_int3(regs))
 762			die("int3", regs, 0);
 763		instrumentation_end();
 764		irqentry_nmi_exit(regs, irq_state);
 765	}
 766}
 767
 768#ifdef CONFIG_X86_64
 769/*
 770 * Help handler running on a per-cpu (IST or entry trampoline) stack
 771 * to switch to the normal thread stack if the interrupted code was in
 772 * user mode. The actual stack switch is done in entry_64.S
 773 */
 774asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
 775{
 776	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(pcpu_hot.top_of_stack) - 1;
 777	if (regs != eregs)
 
 
 
 
 
 
 
 
 
 
 
 
 778		*regs = *eregs;
 779	return regs;
 780}
 781
 782#ifdef CONFIG_AMD_MEM_ENCRYPT
 783asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
 784{
 785	unsigned long sp, *stack;
 786	struct stack_info info;
 787	struct pt_regs *regs_ret;
 788
 789	/*
 790	 * In the SYSCALL entry path the RSP value comes from user-space - don't
 791	 * trust it and switch to the current kernel stack
 792	 */
 793	if (ip_within_syscall_gap(regs)) {
 794		sp = this_cpu_read(pcpu_hot.top_of_stack);
 795		goto sync;
 796	}
 797
 798	/*
 799	 * From here on the RSP value is trusted. Now check whether entry
 800	 * happened from a safe stack. Not safe are the entry or unknown stacks,
 801	 * use the fall-back stack instead in this case.
 802	 */
 803	sp    = regs->sp;
 804	stack = (unsigned long *)sp;
 805
 806	if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
 807	    info.type > STACK_TYPE_EXCEPTION_LAST)
 808		sp = __this_cpu_ist_top_va(VC2);
 809
 810sync:
 811	/*
 812	 * Found a safe stack - switch to it as if the entry didn't happen via
 813	 * IST stack. The code below only copies pt_regs, the real switch happens
 814	 * in assembly code.
 815	 */
 816	sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
 817
 818	regs_ret = (struct pt_regs *)sp;
 819	*regs_ret = *regs;
 820
 821	return regs_ret;
 822}
 823#endif
 824
 825asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
 826{
 827	struct pt_regs tmp, *new_stack;
 828
 829	/*
 830	 * This is called from entry_64.S early in handling a fault
 831	 * caused by a bad iret to user mode.  To handle the fault
 832	 * correctly, we want to move our stack frame to where it would
 833	 * be had we entered directly on the entry stack (rather than
 834	 * just below the IRET frame) and we want to pretend that the
 835	 * exception came from the IRET target.
 836	 */
 837	new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 838
 839	/* Copy the IRET target to the temporary storage. */
 840	__memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
 841
 842	/* Copy the remainder of the stack from the current stack. */
 843	__memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
 844
 845	/* Update the entry stack */
 846	__memcpy(new_stack, &tmp, sizeof(tmp));
 847
 848	BUG_ON(!user_mode(new_stack));
 849	return new_stack;
 850}
 851#endif
 852
 853static bool is_sysenter_singlestep(struct pt_regs *regs)
 854{
 855	/*
 856	 * We don't try for precision here.  If we're anywhere in the region of
 857	 * code that can be single-stepped in the SYSENTER entry path, then
 858	 * assume that this is a useless single-step trap due to SYSENTER
 859	 * being invoked with TF set.  (We don't know in advance exactly
 860	 * which instructions will be hit because BTF could plausibly
 861	 * be set.)
 862	 */
 863#ifdef CONFIG_X86_32
 864	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
 865		(unsigned long)__end_SYSENTER_singlestep_region -
 866		(unsigned long)__begin_SYSENTER_singlestep_region;
 867#elif defined(CONFIG_IA32_EMULATION)
 868	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
 869		(unsigned long)__end_entry_SYSENTER_compat -
 870		(unsigned long)entry_SYSENTER_compat;
 871#else
 872	return false;
 873#endif
 874}
 875
 876static __always_inline unsigned long debug_read_clear_dr6(void)
 877{
 878	unsigned long dr6;
 879
 880	/*
 881	 * The Intel SDM says:
 882	 *
 883	 *   Certain debug exceptions may clear bits 0-3. The remaining
 884	 *   contents of the DR6 register are never cleared by the
 885	 *   processor. To avoid confusion in identifying debug
 886	 *   exceptions, debug handlers should clear the register before
 887	 *   returning to the interrupted task.
 888	 *
 889	 * Keep it simple: clear DR6 immediately.
 890	 */
 891	get_debugreg(dr6, 6);
 892	set_debugreg(DR6_RESERVED, 6);
 893	dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
 894
 895	return dr6;
 896}
 897
 898/*
 899 * Our handling of the processor debug registers is non-trivial.
 900 * We do not clear them on entry and exit from the kernel. Therefore
 901 * it is possible to get a watchpoint trap here from inside the kernel.
 902 * However, the code in ./ptrace.c has ensured that the user can
 903 * only set watchpoints on userspace addresses. Therefore the in-kernel
 904 * watchpoint trap can only occur in code which is reading/writing
 905 * from user space. Such code must not hold kernel locks (since it
 906 * can equally take a page fault), therefore it is safe to call
 907 * force_sig_info even though that claims and releases locks.
 908 *
 909 * Code in ./signal.c ensures that the debug control register
 910 * is restored before we deliver any signal, and therefore that
 911 * user code runs with the correct debug control register even though
 912 * we clear it here.
 913 *
 914 * Being careful here means that we don't have to be as careful in a
 915 * lot of more complicated places (task switching can be a bit lazy
 916 * about restoring all the debug state, and ptrace doesn't have to
 917 * find every occurrence of the TF bit that could be saved away even
 918 * by user code)
 919 *
 920 * May run on IST stack.
 921 */
 922
 923static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
 924{
 925	/*
 926	 * Notifiers will clear bits in @dr6 to indicate the event has been
 927	 * consumed - hw_breakpoint_handler(), single_stop_cont().
 928	 *
 929	 * Notifiers will set bits in @virtual_dr6 to indicate the desire
 930	 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
 931	 */
 932	if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
 933		return true;
 934
 935	return false;
 936}
 937
 938static __always_inline void exc_debug_kernel(struct pt_regs *regs,
 939					     unsigned long dr6)
 940{
 941	/*
 942	 * Disable breakpoints during exception handling; recursive exceptions
 943	 * are exceedingly 'fun'.
 944	 *
 945	 * Since this function is NOKPROBE, and that also applies to
 946	 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
 947	 * HW_BREAKPOINT_W on our stack)
 948	 *
 949	 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
 950	 * includes the entry stack is excluded for everything.
 951	 */
 952	unsigned long dr7 = local_db_save();
 953	irqentry_state_t irq_state = irqentry_nmi_enter(regs);
 954	instrumentation_begin();
 955
 956	/*
 957	 * If something gets miswired and we end up here for a user mode
 958	 * #DB, we will malfunction.
 
 959	 */
 960	WARN_ON_ONCE(user_mode(regs));
 
 961
 962	if (test_thread_flag(TIF_BLOCKSTEP)) {
 963		/*
 964		 * The SDM says "The processor clears the BTF flag when it
 965		 * generates a debug exception." but PTRACE_BLOCKSTEP requested
 966		 * it for userspace, but we just took a kernel #DB, so re-set
 967		 * BTF.
 968		 */
 969		unsigned long debugctl;
 970
 971		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 972		debugctl |= DEBUGCTLMSR_BTF;
 973		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 974	}
 975
 976	/*
 977	 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
 978	 * watchpoint at the same time then that will still be handled.
 979	 */
 980	if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
 981		dr6 &= ~DR_STEP;
 982
 983	/*
 984	 * The kernel doesn't use INT1
 985	 */
 986	if (!dr6)
 987		goto out;
 988
 989	if (notify_debug(regs, &dr6))
 990		goto out;
 991
 992	/*
 993	 * The kernel doesn't use TF single-step outside of:
 994	 *
 995	 *  - Kprobes, consumed through kprobe_debug_handler()
 996	 *  - KGDB, consumed through notify_debug()
 997	 *
 998	 * So if we get here with DR_STEP set, something is wonky.
 999	 *
1000	 * A known way to trigger this is through QEMU's GDB stub,
1001	 * which leaks #DB into the guest and causes IST recursion.
1002	 */
1003	if (WARN_ON_ONCE(dr6 & DR_STEP))
1004		regs->flags &= ~X86_EFLAGS_TF;
1005out:
1006	instrumentation_end();
1007	irqentry_nmi_exit(regs, irq_state);
1008
1009	local_db_restore(dr7);
1010}
1011
1012static __always_inline void exc_debug_user(struct pt_regs *regs,
1013					   unsigned long dr6)
1014{
1015	bool icebp;
1016
1017	/*
1018	 * If something gets miswired and we end up here for a kernel mode
1019	 * #DB, we will malfunction.
1020	 */
1021	WARN_ON_ONCE(!user_mode(regs));
1022
1023	/*
1024	 * NB: We can't easily clear DR7 here because
1025	 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
1026	 * user memory, etc.  This means that a recursive #DB is possible.  If
1027	 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
1028	 * Since we're not on the IST stack right now, everything will be
1029	 * fine.
1030	 */
1031
1032	irqentry_enter_from_user_mode(regs);
1033	instrumentation_begin();
 
 
 
 
 
1034
1035	/*
1036	 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
1037	 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
1038	 *
1039	 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
1040	 * even if it is not the result of PTRACE_SINGLESTEP.
1041	 */
1042	current->thread.virtual_dr6 = (dr6 & DR_STEP);
1043
1044	/*
1045	 * The SDM says "The processor clears the BTF flag when it
1046	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
1047	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
1048	 */
1049	clear_thread_flag(TIF_BLOCKSTEP);
1050
1051	/*
1052	 * If dr6 has no reason to give us about the origin of this trap,
1053	 * then it's very likely the result of an icebp/int01 trap.
1054	 * User wants a sigtrap for that.
1055	 */
1056	icebp = !dr6;
1057
1058	if (notify_debug(regs, &dr6))
1059		goto out;
1060
1061	/* It's safe to allow irq's after DR6 has been saved */
1062	local_irq_enable();
1063
1064	if (v8086_mode(regs)) {
1065		handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1066		goto out_irq;
1067	}
 
 
 
 
 
1068
1069	/* #DB for bus lock can only be triggered from userspace. */
1070	if (dr6 & DR_BUS_LOCK)
1071		handle_bus_lock(regs);
1072
1073	/* Add the virtual_dr6 bits for signals. */
1074	dr6 |= current->thread.virtual_dr6;
1075	if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1076		send_sigtrap(regs, 0, get_si_code(dr6));
1077
1078out_irq:
1079	local_irq_disable();
1080out:
1081	instrumentation_end();
1082	irqentry_exit_to_user_mode(regs);
1083}
1084
1085#ifdef CONFIG_X86_64
1086/* IST stack entry */
1087DEFINE_IDTENTRY_DEBUG(exc_debug)
1088{
1089	exc_debug_kernel(regs, debug_read_clear_dr6());
1090}
1091
1092/* User entry, runs on regular task stack */
1093DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1094{
1095	exc_debug_user(regs, debug_read_clear_dr6());
1096}
1097#else
1098/* 32 bit does not have separate entry points. */
1099DEFINE_IDTENTRY_RAW(exc_debug)
1100{
1101	unsigned long dr6 = debug_read_clear_dr6();
1102
1103	if (user_mode(regs))
1104		exc_debug_user(regs, dr6);
1105	else
1106		exc_debug_kernel(regs, dr6);
1107}
1108#endif
1109
1110/*
1111 * Note that we play around with the 'TS' bit in an attempt to get
1112 * the correct behaviour even in the presence of the asynchronous
1113 * IRQ13 behaviour
1114 */
1115static void math_error(struct pt_regs *regs, int trapnr)
1116{
1117	struct task_struct *task = current;
1118	struct fpu *fpu = &task->thread.fpu;
1119	int si_code;
1120	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1121						"simd exception";
1122
1123	cond_local_irq_enable(regs);
 
 
1124
1125	if (!user_mode(regs)) {
1126		if (fixup_exception(regs, trapnr, 0, 0))
1127			goto exit;
1128
1129		task->thread.error_code = 0;
1130		task->thread.trap_nr = trapnr;
1131
1132		if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1133			       SIGFPE) != NOTIFY_STOP)
1134			die(str, regs, 0);
1135		goto exit;
1136	}
1137
1138	/*
1139	 * Synchronize the FPU register state to the memory register state
1140	 * if necessary. This allows the exception handler to inspect it.
1141	 */
1142	fpu_sync_fpstate(fpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143
1144	task->thread.trap_nr	= trapnr;
1145	task->thread.error_code = 0;
 
 
 
 
 
 
 
 
 
1146
1147	si_code = fpu__exception_code(fpu, trapnr);
1148	/* Retry when we get spurious exceptions: */
1149	if (!si_code)
1150		goto exit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151
1152	if (fixup_vdso_exception(regs, trapnr, 0, 0))
1153		goto exit;
 
 
 
1154
1155	force_sig_fault(SIGFPE, si_code,
1156			(void __user *)uprobe_get_trap_addr(regs));
1157exit:
1158	cond_local_irq_disable(regs);
1159}
1160
1161DEFINE_IDTENTRY(exc_coprocessor_error)
 
1162{
1163	math_error(regs, X86_TRAP_MF);
1164}
1165
1166DEFINE_IDTENTRY(exc_simd_coprocessor_error)
 
1167{
1168	if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1169		/* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1170		if (!static_cpu_has(X86_FEATURE_XMM)) {
1171			__exc_general_protection(regs, 0);
1172			return;
1173		}
1174	}
1175	math_error(regs, X86_TRAP_XF);
1176}
1177
1178DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1179{
1180	/*
1181	 * This addresses a Pentium Pro Erratum:
1182	 *
1183	 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1184	 * Virtual Wire mode implemented through the local APIC, an
1185	 * interrupt vector of 0Fh (Intel reserved encoding) may be
1186	 * generated by the local APIC (Int 15).  This vector may be
1187	 * generated upon receipt of a spurious interrupt (an interrupt
1188	 * which is removed before the system receives the INTA sequence)
1189	 * instead of the programmed 8259 spurious interrupt vector.
1190	 *
1191	 * IMPLICATION: The spurious interrupt vector programmed in the
1192	 * 8259 is normally handled by an operating system's spurious
1193	 * interrupt handler. However, a vector of 0Fh is unknown to some
1194	 * operating systems, which would crash if this erratum occurred.
1195	 *
1196	 * In theory this could be limited to 32bit, but the handler is not
1197	 * hurting and who knows which other CPUs suffer from this.
1198	 */
1199}
1200
1201static bool handle_xfd_event(struct pt_regs *regs)
1202{
1203	u64 xfd_err;
1204	int err;
1205
1206	if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
1207		return false;
 
 
 
 
 
 
 
 
 
 
 
1208
1209	rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
1210	if (!xfd_err)
1211		return false;
 
 
 
 
 
 
 
 
 
 
 
1212
1213	wrmsrl(MSR_IA32_XFD_ERR, 0);
1214
1215	/* Die if that happens in kernel space */
1216	if (WARN_ON(!user_mode(regs)))
1217		return false;
1218
1219	local_irq_enable();
1220
1221	err = xfd_enable_feature(xfd_err);
1222
1223	switch (err) {
1224	case -EPERM:
1225		force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs));
1226		break;
1227	case -EFAULT:
1228		force_sig(SIGSEGV);
1229		break;
1230	}
1231
1232	local_irq_disable();
1233	return true;
1234}
 
1235
1236DEFINE_IDTENTRY(exc_device_not_available)
 
1237{
1238	unsigned long cr0 = read_cr0();
1239
1240	if (handle_xfd_event(regs))
1241		return;
1242
1243#ifdef CONFIG_MATH_EMULATION
1244	if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1245		struct math_emu_info info = { };
1246
1247		cond_local_irq_enable(regs);
1248
1249		info.regs = regs;
1250		math_emulate(&info);
1251
1252		cond_local_irq_disable(regs);
1253		return;
1254	}
1255#endif
1256
1257	/* This should not happen. */
1258	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1259		/* Try to fix it up and carry on. */
1260		write_cr0(cr0 & ~X86_CR0_TS);
1261	} else {
1262		/*
1263		 * Something terrible happened, and we're better off trying
1264		 * to kill the task than getting stuck in a never-ending
1265		 * loop of #NM faults.
1266		 */
1267		die("unexpected #NM exception", regs, 0);
1268	}
1269}
1270
1271#ifdef CONFIG_INTEL_TDX_GUEST
1272
1273#define VE_FAULT_STR "VE fault"
1274
1275static void ve_raise_fault(struct pt_regs *regs, long error_code,
1276			   unsigned long address)
1277{
1278	if (user_mode(regs)) {
1279		gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
1280		return;
1281	}
1282
1283	if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
1284				    VE_FAULT_STR, address)) {
1285		return;
1286	}
 
 
 
 
 
 
 
1287
1288	die_addr(VE_FAULT_STR, regs, error_code, address);
 
 
 
 
 
 
 
1289}
1290
1291/*
1292 * Virtualization Exceptions (#VE) are delivered to TDX guests due to
1293 * specific guest actions which may happen in either user space or the
1294 * kernel:
1295 *
1296 *  * Specific instructions (WBINVD, for example)
1297 *  * Specific MSR accesses
1298 *  * Specific CPUID leaf accesses
1299 *  * Access to specific guest physical addresses
1300 *
1301 * In the settings that Linux will run in, virtualization exceptions are
1302 * never generated on accesses to normal, TD-private memory that has been
1303 * accepted (by BIOS or with tdx_enc_status_changed()).
1304 *
1305 * Syscall entry code has a critical window where the kernel stack is not
1306 * yet set up. Any exception in this window leads to hard to debug issues
1307 * and can be exploited for privilege escalation. Exceptions in the NMI
1308 * entry code also cause issues. Returning from the exception handler with
1309 * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack.
1310 *
1311 * For these reasons, the kernel avoids #VEs during the syscall gap and
1312 * the NMI entry code. Entry code paths do not access TD-shared memory,
1313 * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves
1314 * that might generate #VE. VMM can remove memory from TD at any point,
1315 * but access to unaccepted (or missing) private memory leads to VM
1316 * termination, not to #VE.
1317 *
1318 * Similarly to page faults and breakpoints, #VEs are allowed in NMI
1319 * handlers once the kernel is ready to deal with nested NMIs.
1320 *
1321 * During #VE delivery, all interrupts, including NMIs, are blocked until
1322 * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads
1323 * the VE info.
1324 *
1325 * If a guest kernel action which would normally cause a #VE occurs in
1326 * the interrupt-disabled region before TDGETVEINFO, a #DF (fault
1327 * exception) is delivered to the guest which will result in an oops.
1328 *
1329 * The entry code has been audited carefully for following these expectations.
1330 * Changes in the entry code have to be audited for correctness vs. this
1331 * aspect. Similarly to #PF, #VE in these places will expose kernel to
1332 * privilege escalation or may lead to random crashes.
1333 */
1334DEFINE_IDTENTRY(exc_virtualization_exception)
1335{
1336	struct ve_info ve;
1337
1338	/*
1339	 * NMIs/Machine-checks/Interrupts will be in a disabled state
1340	 * till TDGETVEINFO TDCALL is executed. This ensures that VE
1341	 * info cannot be overwritten by a nested #VE.
1342	 */
1343	tdx_get_ve_info(&ve);
1344
1345	cond_local_irq_enable(regs);
1346
1347	/*
1348	 * If tdx_handle_virt_exception() could not process
1349	 * it successfully, treat it as #GP(0) and handle it.
1350	 */
1351	if (!tdx_handle_virt_exception(regs, &ve))
1352		ve_raise_fault(regs, 0, ve.gla);
1353
1354	cond_local_irq_disable(regs);
1355}
1356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357#endif
1358
1359#ifdef CONFIG_X86_32
1360DEFINE_IDTENTRY_SW(iret_error)
1361{
1362	local_irq_enable();
1363	if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1364			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1365		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1366			ILL_BADSTK, (void __user *)NULL);
1367	}
1368	local_irq_disable();
1369}
1370#endif
1371
1372void __init trap_init(void)
1373{
1374	/* Init cpu_entry_area before IST entries are set up */
1375	setup_cpu_entry_areas();
1376
1377	/* Init GHCB memory pages when running as an SEV-ES guest */
1378	sev_es_init_vc_handling();
1379
1380	/* Initialize TSS before setting up traps so ISTs work */
1381	cpu_init_exception_handling();
1382	/* Setup traps as cpu_init() might #GP */
1383	idt_setup_traps();
1384	cpu_init();
1385}