Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  linux/arch/x86_64/entry.S
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
   6 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
   7 *
   8 * entry.S contains the system-call and fault low-level handling routines.
   9 *
  10 * Some of this is documented in Documentation/x86/entry_64.txt
  11 *
  12 * A note on terminology:
  13 * - iret frame:	Architecture defined interrupt frame from SS to RIP
  14 *			at the top of the kernel process stack.
  15 *
  16 * Some macro usage:
  17 * - ENTRY/END:		Define functions in the symbol table.
  18 * - TRACE_IRQ_*:	Trace hardirq state for lock debugging.
  19 * - idtentry:		Define exception entry points.
  20 */
  21#include <linux/linkage.h>
  22#include <asm/segment.h>
  23#include <asm/cache.h>
  24#include <asm/errno.h>
  25#include "calling.h"
  26#include <asm/asm-offsets.h>
  27#include <asm/msr.h>
  28#include <asm/unistd.h>
  29#include <asm/thread_info.h>
  30#include <asm/hw_irq.h>
  31#include <asm/page_types.h>
  32#include <asm/irqflags.h>
  33#include <asm/paravirt.h>
  34#include <asm/percpu.h>
  35#include <asm/asm.h>
  36#include <asm/smap.h>
  37#include <asm/pgtable_types.h>
 
 
  38#include <linux/err.h>
  39
  40/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
  41#include <linux/elf-em.h>
  42#define AUDIT_ARCH_X86_64			(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
  43#define __AUDIT_ARCH_64BIT			0x80000000
  44#define __AUDIT_ARCH_LE				0x40000000
  45
  46.code64
  47.section .entry.text, "ax"
  48
  49#ifdef CONFIG_PARAVIRT
  50ENTRY(native_usergs_sysret64)
  51	swapgs
  52	sysretq
  53ENDPROC(native_usergs_sysret64)
  54#endif /* CONFIG_PARAVIRT */
  55
  56.macro TRACE_IRQS_IRETQ
  57#ifdef CONFIG_TRACE_IRQFLAGS
  58	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
  59	jnc	1f
  60	TRACE_IRQS_ON
  611:
  62#endif
  63.endm
  64
  65/*
  66 * When dynamic function tracer is enabled it will add a breakpoint
  67 * to all locations that it is about to modify, sync CPUs, update
  68 * all the code, sync CPUs, then remove the breakpoints. In this time
  69 * if lockdep is enabled, it might jump back into the debug handler
  70 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
  71 *
  72 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
  73 * make sure the stack pointer does not get reset back to the top
  74 * of the debug stack, and instead just reuses the current stack.
  75 */
  76#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
  77
  78.macro TRACE_IRQS_OFF_DEBUG
  79	call	debug_stack_set_zero
  80	TRACE_IRQS_OFF
  81	call	debug_stack_reset
  82.endm
  83
  84.macro TRACE_IRQS_ON_DEBUG
  85	call	debug_stack_set_zero
  86	TRACE_IRQS_ON
  87	call	debug_stack_reset
  88.endm
  89
  90.macro TRACE_IRQS_IRETQ_DEBUG
  91	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
  92	jnc	1f
  93	TRACE_IRQS_ON_DEBUG
  941:
  95.endm
  96
  97#else
  98# define TRACE_IRQS_OFF_DEBUG			TRACE_IRQS_OFF
  99# define TRACE_IRQS_ON_DEBUG			TRACE_IRQS_ON
 100# define TRACE_IRQS_IRETQ_DEBUG			TRACE_IRQS_IRETQ
 101#endif
 102
 103/*
 104 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
 105 *
 106 * This is the only entry point used for 64-bit system calls.  The
 107 * hardware interface is reasonably well designed and the register to
 108 * argument mapping Linux uses fits well with the registers that are
 109 * available when SYSCALL is used.
 110 *
 111 * SYSCALL instructions can be found inlined in libc implementations as
 112 * well as some other programs and libraries.  There are also a handful
 113 * of SYSCALL instructions in the vDSO used, for example, as a
 114 * clock_gettimeofday fallback.
 115 *
 116 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
 117 * then loads new ss, cs, and rip from previously programmed MSRs.
 118 * rflags gets masked by a value from another MSR (so CLD and CLAC
 119 * are not needed). SYSCALL does not save anything on the stack
 120 * and does not change rsp.
 121 *
 122 * Registers on entry:
 123 * rax  system call number
 124 * rcx  return address
 125 * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
 126 * rdi  arg0
 127 * rsi  arg1
 128 * rdx  arg2
 129 * r10  arg3 (needs to be moved to rcx to conform to C ABI)
 130 * r8   arg4
 131 * r9   arg5
 132 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
 133 *
 134 * Only called from user space.
 135 *
 136 * When user can change pt_regs->foo always force IRET. That is because
 137 * it deals with uncanonical addresses better. SYSRET has trouble
 138 * with them due to bugs in both AMD and Intel CPUs.
 139 */
 140
 141ENTRY(entry_SYSCALL_64)
 142	/*
 143	 * Interrupts are off on entry.
 144	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
 145	 * it is too small to ever cause noticeable irq latency.
 146	 */
 147	SWAPGS_UNSAFE_STACK
 148	/*
 149	 * A hypervisor implementation might want to use a label
 150	 * after the swapgs, so that it can do the swapgs
 151	 * for the guest and jump here on syscall.
 152	 */
 153GLOBAL(entry_SYSCALL_64_after_swapgs)
 154
 155	movq	%rsp, PER_CPU_VAR(rsp_scratch)
 156	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 157
 158	TRACE_IRQS_OFF
 159
 160	/* Construct struct pt_regs on stack */
 161	pushq	$__USER_DS			/* pt_regs->ss */
 162	pushq	PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
 163	pushq	%r11				/* pt_regs->flags */
 164	pushq	$__USER_CS			/* pt_regs->cs */
 165	pushq	%rcx				/* pt_regs->ip */
 166	pushq	%rax				/* pt_regs->orig_ax */
 167	pushq	%rdi				/* pt_regs->di */
 168	pushq	%rsi				/* pt_regs->si */
 169	pushq	%rdx				/* pt_regs->dx */
 170	pushq	%rcx				/* pt_regs->cx */
 171	pushq	$-ENOSYS			/* pt_regs->ax */
 172	pushq	%r8				/* pt_regs->r8 */
 173	pushq	%r9				/* pt_regs->r9 */
 174	pushq	%r10				/* pt_regs->r10 */
 175	pushq	%r11				/* pt_regs->r11 */
 176	sub	$(6*8), %rsp			/* pt_regs->bp, bx, r12-15 not saved */
 177
 178	/*
 179	 * If we need to do entry work or if we guess we'll need to do
 180	 * exit work, go straight to the slow path.
 181	 */
 182	testl	$_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
 
 183	jnz	entry_SYSCALL64_slow_path
 184
 185entry_SYSCALL_64_fastpath:
 186	/*
 187	 * Easy case: enable interrupts and issue the syscall.  If the syscall
 188	 * needs pt_regs, we'll call a stub that disables interrupts again
 189	 * and jumps to the slow path.
 190	 */
 191	TRACE_IRQS_ON
 192	ENABLE_INTERRUPTS(CLBR_NONE)
 193#if __SYSCALL_MASK == ~0
 194	cmpq	$__NR_syscall_max, %rax
 195#else
 196	andl	$__SYSCALL_MASK, %eax
 197	cmpl	$__NR_syscall_max, %eax
 198#endif
 199	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
 200	movq	%r10, %rcx
 201
 202	/*
 203	 * This call instruction is handled specially in stub_ptregs_64.
 204	 * It might end up jumping to the slow path.  If it jumps, RAX
 205	 * and all argument registers are clobbered.
 206	 */
 207	call	*sys_call_table(, %rax, 8)
 208.Lentry_SYSCALL_64_after_fastpath_call:
 209
 210	movq	%rax, RAX(%rsp)
 2111:
 212
 213	/*
 214	 * If we get here, then we know that pt_regs is clean for SYSRET64.
 215	 * If we see that no exit work is required (which we are required
 216	 * to check with IRQs off), then we can go straight to SYSRET64.
 217	 */
 218	DISABLE_INTERRUPTS(CLBR_NONE)
 219	TRACE_IRQS_OFF
 220	testl	$_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
 
 221	jnz	1f
 222
 223	LOCKDEP_SYS_EXIT
 224	TRACE_IRQS_ON		/* user mode is traced as IRQs on */
 225	movq	RIP(%rsp), %rcx
 226	movq	EFLAGS(%rsp), %r11
 227	RESTORE_C_REGS_EXCEPT_RCX_R11
 228	movq	RSP(%rsp), %rsp
 229	USERGS_SYSRET64
 230
 2311:
 232	/*
 233	 * The fast path looked good when we started, but something changed
 234	 * along the way and we need to switch to the slow path.  Calling
 235	 * raise(3) will trigger this, for example.  IRQs are off.
 236	 */
 237	TRACE_IRQS_ON
 238	ENABLE_INTERRUPTS(CLBR_NONE)
 239	SAVE_EXTRA_REGS
 240	movq	%rsp, %rdi
 241	call	syscall_return_slowpath	/* returns with IRQs disabled */
 242	jmp	return_from_SYSCALL_64
 243
 244entry_SYSCALL64_slow_path:
 245	/* IRQs are off. */
 246	SAVE_EXTRA_REGS
 247	movq	%rsp, %rdi
 248	call	do_syscall_64		/* returns with IRQs disabled */
 249
 250return_from_SYSCALL_64:
 251	RESTORE_EXTRA_REGS
 252	TRACE_IRQS_IRETQ		/* we're about to change IF */
 253
 254	/*
 255	 * Try to use SYSRET instead of IRET if we're returning to
 256	 * a completely clean 64-bit userspace context.
 257	 */
 258	movq	RCX(%rsp), %rcx
 259	movq	RIP(%rsp), %r11
 260	cmpq	%rcx, %r11			/* RCX == RIP */
 261	jne	opportunistic_sysret_failed
 262
 263	/*
 264	 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
 265	 * in kernel space.  This essentially lets the user take over
 266	 * the kernel, since userspace controls RSP.
 267	 *
 268	 * If width of "canonical tail" ever becomes variable, this will need
 269	 * to be updated to remain correct on both old and new CPUs.
 270	 */
 271	.ifne __VIRTUAL_MASK_SHIFT - 47
 272	.error "virtual address width changed -- SYSRET checks need update"
 273	.endif
 274
 275	/* Change top 16 bits to be the sign-extension of 47th bit */
 276	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
 277	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
 278
 279	/* If this changed %rcx, it was not canonical */
 280	cmpq	%rcx, %r11
 281	jne	opportunistic_sysret_failed
 282
 283	cmpq	$__USER_CS, CS(%rsp)		/* CS must match SYSRET */
 284	jne	opportunistic_sysret_failed
 285
 286	movq	R11(%rsp), %r11
 287	cmpq	%r11, EFLAGS(%rsp)		/* R11 == RFLAGS */
 288	jne	opportunistic_sysret_failed
 289
 290	/*
 291	 * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
 292	 * restoring TF results in a trap from userspace immediately after
 293	 * SYSRET.  This would cause an infinite loop whenever #DB happens
 294	 * with register state that satisfies the opportunistic SYSRET
 295	 * conditions.  For example, single-stepping this user code:
 
 
 
 
 296	 *
 297	 *           movq	$stuck_here, %rcx
 298	 *           pushfq
 299	 *           popq %r11
 300	 *   stuck_here:
 301	 *
 302	 * would never get past 'stuck_here'.
 303	 */
 304	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
 305	jnz	opportunistic_sysret_failed
 306
 307	/* nothing to check for RSP */
 308
 309	cmpq	$__USER_DS, SS(%rsp)		/* SS must match SYSRET */
 310	jne	opportunistic_sysret_failed
 311
 312	/*
 313	 * We win! This label is here just for ease of understanding
 314	 * perf profiles. Nothing jumps here.
 315	 */
 316syscall_return_via_sysret:
 317	/* rcx and r11 are already restored (see code above) */
 318	RESTORE_C_REGS_EXCEPT_RCX_R11
 319	movq	RSP(%rsp), %rsp
 320	USERGS_SYSRET64
 321
 322opportunistic_sysret_failed:
 323	SWAPGS
 324	jmp	restore_c_regs_and_iret
 325END(entry_SYSCALL_64)
 326
 327ENTRY(stub_ptregs_64)
 328	/*
 329	 * Syscalls marked as needing ptregs land here.
 330	 * If we are on the fast path, we need to save the extra regs,
 331	 * which we achieve by trying again on the slow path.  If we are on
 332	 * the slow path, the extra regs are already saved.
 333	 *
 334	 * RAX stores a pointer to the C function implementing the syscall.
 335	 * IRQs are on.
 336	 */
 337	cmpq	$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
 338	jne	1f
 339
 340	/*
 341	 * Called from fast path -- disable IRQs again, pop return address
 342	 * and jump to slow path
 343	 */
 344	DISABLE_INTERRUPTS(CLBR_NONE)
 345	TRACE_IRQS_OFF
 346	popq	%rax
 347	jmp	entry_SYSCALL64_slow_path
 348
 3491:
 350	/* Called from C */
 351	jmp	*%rax				/* called from C */
 352END(stub_ptregs_64)
 353
 354.macro ptregs_stub func
 355ENTRY(ptregs_\func)
 356	leaq	\func(%rip), %rax
 357	jmp	stub_ptregs_64
 358END(ptregs_\func)
 359.endm
 360
 361/* Instantiate ptregs_stub for each ptregs-using syscall */
 362#define __SYSCALL_64_QUAL_(sym)
 363#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
 364#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
 365#include <asm/syscalls_64.h>
 366
 367/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368 * A newly forked process directly context switches into this address.
 369 *
 370 * rdi: prev task we switched from
 
 
 371 */
 372ENTRY(ret_from_fork)
 373	LOCK ; btr $TIF_FORK, TI_flags(%r8)
 374
 375	pushq	$0x0002
 376	popfq					/* reset kernel eflags */
 377
 378	call	schedule_tail			/* rdi: 'prev' task parameter */
 379
 380	testb	$3, CS(%rsp)			/* from kernel_thread? */
 381	jnz	1f
 382
 383	/*
 384	 * We came from kernel_thread.  This code path is quite twisted, and
 385	 * someone should clean it up.
 386	 *
 387	 * copy_thread_tls stashes the function pointer in RBX and the
 388	 * parameter to be passed in RBP.  The called function is permitted
 389	 * to call do_execve and thereby jump to user mode.
 390	 */
 391	movq	RBP(%rsp), %rdi
 392	call	*RBX(%rsp)
 393	movl	$0, RAX(%rsp)
 394
 395	/*
 396	 * Fall through as though we're exiting a syscall.  This makes a
 397	 * twisted sort of sense if we just called do_execve.
 398	 */
 399
 4001:
 401	movq	%rsp, %rdi
 402	call	syscall_return_slowpath	/* returns with IRQs disabled */
 403	TRACE_IRQS_ON			/* user mode is traced as IRQS on */
 404	SWAPGS
 
 405	jmp	restore_regs_and_iret
 
 
 
 
 
 
 
 
 
 
 
 
 406END(ret_from_fork)
 407
 408/*
 409 * Build the entry stubs with some assembler magic.
 410 * We pack 1 stub into every 8-byte block.
 411 */
 412	.align 8
 413ENTRY(irq_entries_start)
 414    vector=FIRST_EXTERNAL_VECTOR
 415    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
 416	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
 417    vector=vector+1
 418	jmp	common_interrupt
 419	.align	8
 420    .endr
 421END(irq_entries_start)
 422
 423/*
 424 * Interrupt entry/exit.
 425 *
 426 * Interrupt entry points save only callee clobbered registers in fast path.
 427 *
 428 * Entry runs with interrupts off.
 429 */
 430
 431/* 0(%rsp): ~(interrupt number) */
 432	.macro interrupt func
 433	cld
 434	ALLOC_PT_GPREGS_ON_STACK
 435	SAVE_C_REGS
 436	SAVE_EXTRA_REGS
 
 437
 438	testb	$3, CS(%rsp)
 439	jz	1f
 440
 441	/*
 442	 * IRQ from user mode.  Switch to kernel gsbase and inform context
 443	 * tracking that we're in kernel mode.
 444	 */
 445	SWAPGS
 446
 447	/*
 448	 * We need to tell lockdep that IRQs are off.  We can't do this until
 449	 * we fix gsbase, and we should do it before enter_from_user_mode
 450	 * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
 451	 * the simplest way to handle it is to just call it twice if
 452	 * we enter from user mode.  There's no reason to optimize this since
 453	 * TRACE_IRQS_OFF is a no-op if lockdep is off.
 454	 */
 455	TRACE_IRQS_OFF
 456
 457	CALL_enter_from_user_mode
 458
 4591:
 460	/*
 461	 * Save previous stack pointer, optionally switch to interrupt stack.
 462	 * irq_count is used to check if a CPU is already on an interrupt stack
 463	 * or not. While this is essentially redundant with preempt_count it is
 464	 * a little cheaper to use a separate counter in the PDA (short of
 465	 * moving irq_enter into assembly, which would be too much work)
 466	 */
 467	movq	%rsp, %rdi
 468	incl	PER_CPU_VAR(irq_count)
 469	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
 470	pushq	%rdi
 471	/* We entered an interrupt context - irqs are off: */
 472	TRACE_IRQS_OFF
 473
 474	call	\func	/* rdi points to pt_regs */
 475	.endm
 476
 477	/*
 478	 * The interrupt stubs push (~vector+0x80) onto the stack and
 479	 * then jump to common_interrupt.
 480	 */
 481	.p2align CONFIG_X86_L1_CACHE_SHIFT
 482common_interrupt:
 483	ASM_CLAC
 484	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
 485	interrupt do_IRQ
 486	/* 0(%rsp): old RSP */
 487ret_from_intr:
 488	DISABLE_INTERRUPTS(CLBR_NONE)
 489	TRACE_IRQS_OFF
 490	decl	PER_CPU_VAR(irq_count)
 491
 492	/* Restore saved previous stack */
 493	popq	%rsp
 494
 495	testb	$3, CS(%rsp)
 496	jz	retint_kernel
 497
 498	/* Interrupt came from user space */
 499GLOBAL(retint_user)
 500	mov	%rsp,%rdi
 501	call	prepare_exit_to_usermode
 502	TRACE_IRQS_IRETQ
 503	SWAPGS
 504	jmp	restore_regs_and_iret
 505
 506/* Returning to kernel space */
 507retint_kernel:
 508#ifdef CONFIG_PREEMPT
 509	/* Interrupts are off */
 510	/* Check if we need preemption */
 511	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
 512	jnc	1f
 5130:	cmpl	$0, PER_CPU_VAR(__preempt_count)
 514	jnz	1f
 515	call	preempt_schedule_irq
 516	jmp	0b
 5171:
 518#endif
 519	/*
 520	 * The iretq could re-enable interrupts:
 521	 */
 522	TRACE_IRQS_IRETQ
 523
 524/*
 525 * At this label, code paths which return to kernel and to user,
 526 * which come from interrupts/exception and from syscalls, merge.
 527 */
 528GLOBAL(restore_regs_and_iret)
 529	RESTORE_EXTRA_REGS
 530restore_c_regs_and_iret:
 531	RESTORE_C_REGS
 532	REMOVE_PT_GPREGS_FROM_STACK 8
 533	INTERRUPT_RETURN
 534
 535ENTRY(native_iret)
 536	/*
 537	 * Are we returning to a stack segment from the LDT?  Note: in
 538	 * 64-bit mode SS:RSP on the exception stack is always valid.
 539	 */
 540#ifdef CONFIG_X86_ESPFIX64
 541	testb	$4, (SS-RIP)(%rsp)
 542	jnz	native_irq_return_ldt
 543#endif
 544
 545.global native_irq_return_iret
 546native_irq_return_iret:
 547	/*
 548	 * This may fault.  Non-paranoid faults on return to userspace are
 549	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
 550	 * Double-faults due to espfix64 are handled in do_double_fault.
 551	 * Other faults here are fatal.
 552	 */
 553	iretq
 554
 555#ifdef CONFIG_X86_ESPFIX64
 556native_irq_return_ldt:
 557	pushq	%rax
 558	pushq	%rdi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559	SWAPGS
 560	movq	PER_CPU_VAR(espfix_waddr), %rdi
 561	movq	%rax, (0*8)(%rdi)		/* RAX */
 562	movq	(2*8)(%rsp), %rax		/* RIP */
 563	movq	%rax, (1*8)(%rdi)
 564	movq	(3*8)(%rsp), %rax		/* CS */
 565	movq	%rax, (2*8)(%rdi)
 566	movq	(4*8)(%rsp), %rax		/* RFLAGS */
 567	movq	%rax, (3*8)(%rdi)
 568	movq	(6*8)(%rsp), %rax		/* SS */
 569	movq	%rax, (5*8)(%rdi)
 570	movq	(5*8)(%rsp), %rax		/* RSP */
 571	movq	%rax, (4*8)(%rdi)
 572	andl	$0xffff0000, %eax
 573	popq	%rdi
 
 
 
 
 
 
 
 
 
 
 
 574	orq	PER_CPU_VAR(espfix_stack), %rax
 575	SWAPGS
 576	movq	%rax, %rsp
 577	popq	%rax
 
 
 
 
 
 
 
 
 
 
 
 578	jmp	native_irq_return_iret
 579#endif
 580END(common_interrupt)
 581
 582/*
 583 * APIC interrupts.
 584 */
 585.macro apicinterrupt3 num sym do_sym
 586ENTRY(\sym)
 587	ASM_CLAC
 588	pushq	$~(\num)
 589.Lcommon_\sym:
 590	interrupt \do_sym
 591	jmp	ret_from_intr
 592END(\sym)
 593.endm
 594
 595#ifdef CONFIG_TRACING
 596#define trace(sym) trace_##sym
 597#define smp_trace(sym) smp_trace_##sym
 598
 599.macro trace_apicinterrupt num sym
 600apicinterrupt3 \num trace(\sym) smp_trace(\sym)
 601.endm
 602#else
 603.macro trace_apicinterrupt num sym do_sym
 604.endm
 605#endif
 606
 
 
 
 
 
 
 
 
 
 607.macro apicinterrupt num sym do_sym
 
 608apicinterrupt3 \num \sym \do_sym
 609trace_apicinterrupt \num \sym
 
 610.endm
 611
 612#ifdef CONFIG_SMP
 613apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR		irq_move_cleanup_interrupt	smp_irq_move_cleanup_interrupt
 614apicinterrupt3 REBOOT_VECTOR			reboot_interrupt		smp_reboot_interrupt
 615#endif
 616
 617#ifdef CONFIG_X86_UV
 618apicinterrupt3 UV_BAU_MESSAGE			uv_bau_message_intr1		uv_bau_message_interrupt
 619#endif
 620
 621apicinterrupt LOCAL_TIMER_VECTOR		apic_timer_interrupt		smp_apic_timer_interrupt
 622apicinterrupt X86_PLATFORM_IPI_VECTOR		x86_platform_ipi		smp_x86_platform_ipi
 623
 624#ifdef CONFIG_HAVE_KVM
 625apicinterrupt3 POSTED_INTR_VECTOR		kvm_posted_intr_ipi		smp_kvm_posted_intr_ipi
 626apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR	kvm_posted_intr_wakeup_ipi	smp_kvm_posted_intr_wakeup_ipi
 627#endif
 628
 629#ifdef CONFIG_X86_MCE_THRESHOLD
 630apicinterrupt THRESHOLD_APIC_VECTOR		threshold_interrupt		smp_threshold_interrupt
 631#endif
 632
 633#ifdef CONFIG_X86_MCE_AMD
 634apicinterrupt DEFERRED_ERROR_VECTOR		deferred_error_interrupt	smp_deferred_error_interrupt
 635#endif
 636
 637#ifdef CONFIG_X86_THERMAL_VECTOR
 638apicinterrupt THERMAL_APIC_VECTOR		thermal_interrupt		smp_thermal_interrupt
 639#endif
 640
 641#ifdef CONFIG_SMP
 642apicinterrupt CALL_FUNCTION_SINGLE_VECTOR	call_function_single_interrupt	smp_call_function_single_interrupt
 643apicinterrupt CALL_FUNCTION_VECTOR		call_function_interrupt		smp_call_function_interrupt
 644apicinterrupt RESCHEDULE_VECTOR			reschedule_interrupt		smp_reschedule_interrupt
 645#endif
 646
 647apicinterrupt ERROR_APIC_VECTOR			error_interrupt			smp_error_interrupt
 648apicinterrupt SPURIOUS_APIC_VECTOR		spurious_interrupt		smp_spurious_interrupt
 649
 650#ifdef CONFIG_IRQ_WORK
 651apicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
 652#endif
 653
 654/*
 655 * Exception entry points.
 656 */
 657#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
 658
 659.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 660ENTRY(\sym)
 661	/* Sanity check */
 662	.if \shift_ist != -1 && \paranoid == 0
 663	.error "using shift_ist requires paranoid=1"
 664	.endif
 665
 666	ASM_CLAC
 667	PARAVIRT_ADJUST_EXCEPTION_FRAME
 668
 669	.ifeq \has_error_code
 670	pushq	$-1				/* ORIG_RAX: no syscall to restart */
 671	.endif
 672
 673	ALLOC_PT_GPREGS_ON_STACK
 674
 675	.if \paranoid
 676	.if \paranoid == 1
 677	testb	$3, CS(%rsp)			/* If coming from userspace, switch stacks */
 678	jnz	1f
 679	.endif
 680	call	paranoid_entry
 681	.else
 682	call	error_entry
 683	.endif
 684	/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
 685
 686	.if \paranoid
 687	.if \shift_ist != -1
 688	TRACE_IRQS_OFF_DEBUG			/* reload IDT in case of recursion */
 689	.else
 690	TRACE_IRQS_OFF
 691	.endif
 692	.endif
 693
 694	movq	%rsp, %rdi			/* pt_regs pointer */
 695
 696	.if \has_error_code
 697	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
 698	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
 699	.else
 700	xorl	%esi, %esi			/* no error code */
 701	.endif
 702
 703	.if \shift_ist != -1
 704	subq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
 705	.endif
 706
 707	call	\do_sym
 708
 709	.if \shift_ist != -1
 710	addq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
 711	.endif
 712
 713	/* these procedures expect "no swapgs" flag in ebx */
 714	.if \paranoid
 715	jmp	paranoid_exit
 716	.else
 717	jmp	error_exit
 718	.endif
 719
 720	.if \paranoid == 1
 721	/*
 722	 * Paranoid entry from userspace.  Switch stacks and treat it
 723	 * as a normal entry.  This means that paranoid handlers
 724	 * run in real process context if user_mode(regs).
 725	 */
 7261:
 727	call	error_entry
 728
 729
 730	movq	%rsp, %rdi			/* pt_regs pointer */
 731	call	sync_regs
 732	movq	%rax, %rsp			/* switch stack */
 733
 734	movq	%rsp, %rdi			/* pt_regs pointer */
 735
 736	.if \has_error_code
 737	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
 738	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
 739	.else
 740	xorl	%esi, %esi			/* no error code */
 741	.endif
 742
 743	call	\do_sym
 744
 745	jmp	error_exit			/* %ebx: no swapgs flag */
 746	.endif
 747END(\sym)
 748.endm
 749
 750#ifdef CONFIG_TRACING
 751.macro trace_idtentry sym do_sym has_error_code:req
 752idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
 753idtentry \sym \do_sym has_error_code=\has_error_code
 754.endm
 755#else
 756.macro trace_idtentry sym do_sym has_error_code:req
 757idtentry \sym \do_sym has_error_code=\has_error_code
 758.endm
 759#endif
 760
 761idtentry divide_error			do_divide_error			has_error_code=0
 762idtentry overflow			do_overflow			has_error_code=0
 763idtentry bounds				do_bounds			has_error_code=0
 764idtentry invalid_op			do_invalid_op			has_error_code=0
 765idtentry device_not_available		do_device_not_available		has_error_code=0
 766idtentry double_fault			do_double_fault			has_error_code=1 paranoid=2
 767idtentry coprocessor_segment_overrun	do_coprocessor_segment_overrun	has_error_code=0
 768idtentry invalid_TSS			do_invalid_TSS			has_error_code=1
 769idtentry segment_not_present		do_segment_not_present		has_error_code=1
 770idtentry spurious_interrupt_bug		do_spurious_interrupt_bug	has_error_code=0
 771idtentry coprocessor_error		do_coprocessor_error		has_error_code=0
 772idtentry alignment_check		do_alignment_check		has_error_code=1
 773idtentry simd_coprocessor_error		do_simd_coprocessor_error	has_error_code=0
 774
 775
 776	/*
 777	 * Reload gs selector with exception handling
 778	 * edi:  new selector
 779	 */
 780ENTRY(native_load_gs_index)
 781	pushfq
 782	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
 783	SWAPGS
 784gs_change:
 785	movl	%edi, %gs
 7862:	mfence					/* workaround */
 787	SWAPGS
 788	popfq
 789	ret
 790END(native_load_gs_index)
 
 791
 792	_ASM_EXTABLE(gs_change, bad_gs)
 793	.section .fixup, "ax"
 794	/* running with kernelgs */
 795bad_gs:
 796	SWAPGS					/* switch back to user gs */
 
 
 
 
 
 
 797	xorl	%eax, %eax
 798	movl	%eax, %gs
 799	jmp	2b
 800	.previous
 801
 802/* Call softirq on interrupt stack. Interrupts are off. */
 803ENTRY(do_softirq_own_stack)
 804	pushq	%rbp
 805	mov	%rsp, %rbp
 806	incl	PER_CPU_VAR(irq_count)
 807	cmove	PER_CPU_VAR(irq_stack_ptr), %rsp
 808	push	%rbp				/* frame pointer backlink */
 809	call	__do_softirq
 810	leaveq
 811	decl	PER_CPU_VAR(irq_count)
 812	ret
 813END(do_softirq_own_stack)
 814
 815#ifdef CONFIG_XEN
 816idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
 817
 818/*
 819 * A note on the "critical region" in our callback handler.
 820 * We want to avoid stacking callback handlers due to events occurring
 821 * during handling of the last event. To do this, we keep events disabled
 822 * until we've done all processing. HOWEVER, we must enable events before
 823 * popping the stack frame (can't be done atomically) and so it would still
 824 * be possible to get enough handler activations to overflow the stack.
 825 * Although unlikely, bugs of that kind are hard to track down, so we'd
 826 * like to avoid the possibility.
 827 * So, on entry to the handler we detect whether we interrupted an
 828 * existing activation in its critical region -- if so, we pop the current
 829 * activation and restart the handler using the previous one.
 830 */
 831ENTRY(xen_do_hypervisor_callback)		/* do_hypervisor_callback(struct *pt_regs) */
 832
 833/*
 834 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
 835 * see the correct pointer to the pt_regs
 836 */
 837	movq	%rdi, %rsp			/* we don't return, adjust the stack frame */
 83811:	incl	PER_CPU_VAR(irq_count)
 839	movq	%rsp, %rbp
 840	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
 841	pushq	%rbp				/* frame pointer backlink */
 842	call	xen_evtchn_do_upcall
 843	popq	%rsp
 844	decl	PER_CPU_VAR(irq_count)
 845#ifndef CONFIG_PREEMPT
 846	call	xen_maybe_preempt_hcall
 847#endif
 848	jmp	error_exit
 849END(xen_do_hypervisor_callback)
 850
 851/*
 852 * Hypervisor uses this for application faults while it executes.
 853 * We get here for two reasons:
 854 *  1. Fault while reloading DS, ES, FS or GS
 855 *  2. Fault while executing IRET
 856 * Category 1 we do not need to fix up as Xen has already reloaded all segment
 857 * registers that could be reloaded and zeroed the others.
 858 * Category 2 we fix up by killing the current process. We cannot use the
 859 * normal Linux return path in this case because if we use the IRET hypercall
 860 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 861 * We distinguish between categories by comparing each saved segment register
 862 * with its current contents: any discrepancy means we in category 1.
 863 */
 864ENTRY(xen_failsafe_callback)
 865	movl	%ds, %ecx
 866	cmpw	%cx, 0x10(%rsp)
 867	jne	1f
 868	movl	%es, %ecx
 869	cmpw	%cx, 0x18(%rsp)
 870	jne	1f
 871	movl	%fs, %ecx
 872	cmpw	%cx, 0x20(%rsp)
 873	jne	1f
 874	movl	%gs, %ecx
 875	cmpw	%cx, 0x28(%rsp)
 876	jne	1f
 877	/* All segments match their saved values => Category 2 (Bad IRET). */
 878	movq	(%rsp), %rcx
 879	movq	8(%rsp), %r11
 880	addq	$0x30, %rsp
 881	pushq	$0				/* RIP */
 882	pushq	%r11
 883	pushq	%rcx
 884	jmp	general_protection
 8851:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
 886	movq	(%rsp), %rcx
 887	movq	8(%rsp), %r11
 888	addq	$0x30, %rsp
 889	pushq	$-1 /* orig_ax = -1 => not a system call */
 890	ALLOC_PT_GPREGS_ON_STACK
 891	SAVE_C_REGS
 892	SAVE_EXTRA_REGS
 
 893	jmp	error_exit
 894END(xen_failsafe_callback)
 895
 896apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
 897	xen_hvm_callback_vector xen_evtchn_do_upcall
 898
 899#endif /* CONFIG_XEN */
 900
 901#if IS_ENABLED(CONFIG_HYPERV)
 902apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
 903	hyperv_callback_vector hyperv_vector_handler
 904#endif /* CONFIG_HYPERV */
 905
 906idtentry debug			do_debug		has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
 907idtentry int3			do_int3			has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
 908idtentry stack_segment		do_stack_segment	has_error_code=1
 909
 910#ifdef CONFIG_XEN
 911idtentry xen_debug		do_debug		has_error_code=0
 912idtentry xen_int3		do_int3			has_error_code=0
 913idtentry xen_stack_segment	do_stack_segment	has_error_code=1
 914#endif
 915
 916idtentry general_protection	do_general_protection	has_error_code=1
 917trace_idtentry page_fault	do_page_fault		has_error_code=1
 918
 919#ifdef CONFIG_KVM_GUEST
 920idtentry async_page_fault	do_async_page_fault	has_error_code=1
 921#endif
 922
 923#ifdef CONFIG_X86_MCE
 924idtentry machine_check					has_error_code=0	paranoid=1 do_sym=*machine_check_vector(%rip)
 925#endif
 926
 927/*
 928 * Save all registers in pt_regs, and switch gs if needed.
 929 * Use slow, but surefire "are we in kernel?" check.
 930 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
 931 */
 932ENTRY(paranoid_entry)
 933	cld
 934	SAVE_C_REGS 8
 935	SAVE_EXTRA_REGS 8
 
 936	movl	$1, %ebx
 937	movl	$MSR_GS_BASE, %ecx
 938	rdmsr
 939	testl	%edx, %edx
 940	js	1f				/* negative -> in kernel */
 941	SWAPGS
 942	xorl	%ebx, %ebx
 9431:	ret
 944END(paranoid_entry)
 945
 946/*
 947 * "Paranoid" exit path from exception stack.  This is invoked
 948 * only on return from non-NMI IST interrupts that came
 949 * from kernel space.
 950 *
 951 * We may be returning to very strange contexts (e.g. very early
 952 * in syscall entry), so checking for preemption here would
 953 * be complicated.  Fortunately, we there's no good reason
 954 * to try to handle preemption here.
 955 *
 956 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
 957 */
 958ENTRY(paranoid_exit)
 959	DISABLE_INTERRUPTS(CLBR_NONE)
 960	TRACE_IRQS_OFF_DEBUG
 961	testl	%ebx, %ebx			/* swapgs needed? */
 962	jnz	paranoid_exit_no_swapgs
 963	TRACE_IRQS_IRETQ
 964	SWAPGS_UNSAFE_STACK
 965	jmp	paranoid_exit_restore
 966paranoid_exit_no_swapgs:
 967	TRACE_IRQS_IRETQ_DEBUG
 968paranoid_exit_restore:
 969	RESTORE_EXTRA_REGS
 970	RESTORE_C_REGS
 971	REMOVE_PT_GPREGS_FROM_STACK 8
 972	INTERRUPT_RETURN
 973END(paranoid_exit)
 974
 975/*
 976 * Save all registers in pt_regs, and switch gs if needed.
 977 * Return: EBX=0: came from user mode; EBX=1: otherwise
 978 */
 979ENTRY(error_entry)
 980	cld
 981	SAVE_C_REGS 8
 982	SAVE_EXTRA_REGS 8
 
 983	xorl	%ebx, %ebx
 984	testb	$3, CS+8(%rsp)
 985	jz	.Lerror_kernelspace
 986
 987.Lerror_entry_from_usermode_swapgs:
 988	/*
 989	 * We entered from user mode or we're pretending to have entered
 990	 * from user mode due to an IRET fault.
 991	 */
 992	SWAPGS
 993
 994.Lerror_entry_from_usermode_after_swapgs:
 995	/*
 996	 * We need to tell lockdep that IRQs are off.  We can't do this until
 997	 * we fix gsbase, and we should do it before enter_from_user_mode
 998	 * (which can take locks).
 999	 */
1000	TRACE_IRQS_OFF
1001	CALL_enter_from_user_mode
1002	ret
1003
1004.Lerror_entry_done:
1005	TRACE_IRQS_OFF
1006	ret
1007
1008	/*
1009	 * There are two places in the kernel that can potentially fault with
1010	 * usergs. Handle them here.  B stepping K8s sometimes report a
1011	 * truncated RIP for IRET exceptions returning to compat mode. Check
1012	 * for these here too.
1013	 */
1014.Lerror_kernelspace:
1015	incl	%ebx
1016	leaq	native_irq_return_iret(%rip), %rcx
1017	cmpq	%rcx, RIP+8(%rsp)
1018	je	.Lerror_bad_iret
1019	movl	%ecx, %eax			/* zero extend */
1020	cmpq	%rax, RIP+8(%rsp)
1021	je	.Lbstep_iret
1022	cmpq	$gs_change, RIP+8(%rsp)
1023	jne	.Lerror_entry_done
1024
1025	/*
1026	 * hack: gs_change can fail with user gsbase.  If this happens, fix up
1027	 * gsbase and proceed.  We'll fix up the exception and land in
1028	 * gs_change's error handler with kernel gsbase.
1029	 */
1030	jmp	.Lerror_entry_from_usermode_swapgs
 
1031
1032.Lbstep_iret:
1033	/* Fix truncated RIP */
1034	movq	%rcx, RIP+8(%rsp)
1035	/* fall through */
1036
1037.Lerror_bad_iret:
1038	/*
1039	 * We came from an IRET to user mode, so we have user gsbase.
1040	 * Switch to kernel gsbase:
1041	 */
1042	SWAPGS
1043
1044	/*
1045	 * Pretend that the exception came from user mode: set up pt_regs
1046	 * as if we faulted immediately after IRET and clear EBX so that
1047	 * error_exit knows that we will be returning to user mode.
1048	 */
1049	mov	%rsp, %rdi
1050	call	fixup_bad_iret
1051	mov	%rax, %rsp
1052	decl	%ebx
1053	jmp	.Lerror_entry_from_usermode_after_swapgs
1054END(error_entry)
1055
1056
1057/*
1058 * On entry, EBS is a "return to kernel mode" flag:
1059 *   1: already in kernel mode, don't need SWAPGS
1060 *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1061 */
1062ENTRY(error_exit)
1063	movl	%ebx, %eax
1064	DISABLE_INTERRUPTS(CLBR_NONE)
1065	TRACE_IRQS_OFF
1066	testl	%eax, %eax
1067	jnz	retint_kernel
1068	jmp	retint_user
1069END(error_exit)
1070
1071/* Runs on exception stack */
1072ENTRY(nmi)
1073	/*
1074	 * Fix up the exception frame if we're on Xen.
1075	 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1076	 * one value to the stack on native, so it may clobber the rdx
1077	 * scratch slot, but it won't clobber any of the important
1078	 * slots past it.
1079	 *
1080	 * Xen is a different story, because the Xen frame itself overlaps
1081	 * the "NMI executing" variable.
1082	 */
1083	PARAVIRT_ADJUST_EXCEPTION_FRAME
1084
1085	/*
1086	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1087	 * the iretq it performs will take us out of NMI context.
1088	 * This means that we can have nested NMIs where the next
1089	 * NMI is using the top of the stack of the previous NMI. We
1090	 * can't let it execute because the nested NMI will corrupt the
1091	 * stack of the previous NMI. NMI handlers are not re-entrant
1092	 * anyway.
1093	 *
1094	 * To handle this case we do the following:
1095	 *  Check the a special location on the stack that contains
1096	 *  a variable that is set when NMIs are executing.
1097	 *  The interrupted task's stack is also checked to see if it
1098	 *  is an NMI stack.
1099	 *  If the variable is not set and the stack is not the NMI
1100	 *  stack then:
1101	 *    o Set the special variable on the stack
1102	 *    o Copy the interrupt frame into an "outermost" location on the
1103	 *      stack
1104	 *    o Copy the interrupt frame into an "iret" location on the stack
1105	 *    o Continue processing the NMI
1106	 *  If the variable is set or the previous stack is the NMI stack:
1107	 *    o Modify the "iret" location to jump to the repeat_nmi
1108	 *    o return back to the first NMI
1109	 *
1110	 * Now on exit of the first NMI, we first clear the stack variable
1111	 * The NMI stack will tell any nested NMIs at that point that it is
1112	 * nested. Then we pop the stack normally with iret, and if there was
1113	 * a nested NMI that updated the copy interrupt stack frame, a
1114	 * jump will be made to the repeat_nmi code that will handle the second
1115	 * NMI.
1116	 *
1117	 * However, espfix prevents us from directly returning to userspace
1118	 * with a single IRET instruction.  Similarly, IRET to user mode
1119	 * can fault.  We therefore handle NMIs from user space like
1120	 * other IST entries.
1121	 */
1122
1123	/* Use %rdx as our temp variable throughout */
1124	pushq	%rdx
1125
1126	testb	$3, CS-RIP+8(%rsp)
1127	jz	.Lnmi_from_kernel
1128
1129	/*
1130	 * NMI from user mode.  We need to run on the thread stack, but we
1131	 * can't go through the normal entry paths: NMIs are masked, and
1132	 * we don't want to enable interrupts, because then we'll end
1133	 * up in an awkward situation in which IRQs are on but NMIs
1134	 * are off.
1135	 *
1136	 * We also must not push anything to the stack before switching
1137	 * stacks lest we corrupt the "NMI executing" variable.
1138	 */
1139
1140	SWAPGS_UNSAFE_STACK
1141	cld
1142	movq	%rsp, %rdx
1143	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1144	pushq	5*8(%rdx)	/* pt_regs->ss */
1145	pushq	4*8(%rdx)	/* pt_regs->rsp */
1146	pushq	3*8(%rdx)	/* pt_regs->flags */
1147	pushq	2*8(%rdx)	/* pt_regs->cs */
1148	pushq	1*8(%rdx)	/* pt_regs->rip */
1149	pushq   $-1		/* pt_regs->orig_ax */
1150	pushq   %rdi		/* pt_regs->di */
1151	pushq   %rsi		/* pt_regs->si */
1152	pushq   (%rdx)		/* pt_regs->dx */
1153	pushq   %rcx		/* pt_regs->cx */
1154	pushq   %rax		/* pt_regs->ax */
1155	pushq   %r8		/* pt_regs->r8 */
1156	pushq   %r9		/* pt_regs->r9 */
1157	pushq   %r10		/* pt_regs->r10 */
1158	pushq   %r11		/* pt_regs->r11 */
1159	pushq	%rbx		/* pt_regs->rbx */
1160	pushq	%rbp		/* pt_regs->rbp */
1161	pushq	%r12		/* pt_regs->r12 */
1162	pushq	%r13		/* pt_regs->r13 */
1163	pushq	%r14		/* pt_regs->r14 */
1164	pushq	%r15		/* pt_regs->r15 */
 
1165
1166	/*
1167	 * At this point we no longer need to worry about stack damage
1168	 * due to nesting -- we're on the normal thread stack and we're
1169	 * done with the NMI stack.
1170	 */
1171
1172	movq	%rsp, %rdi
1173	movq	$-1, %rsi
1174	call	do_nmi
1175
1176	/*
1177	 * Return back to user mode.  We must *not* do the normal exit
1178	 * work, because we don't want to enable interrupts.  Fortunately,
1179	 * do_nmi doesn't modify pt_regs.
1180	 */
1181	SWAPGS
1182	jmp	restore_c_regs_and_iret
1183
1184.Lnmi_from_kernel:
1185	/*
1186	 * Here's what our stack frame will look like:
1187	 * +---------------------------------------------------------+
1188	 * | original SS                                             |
1189	 * | original Return RSP                                     |
1190	 * | original RFLAGS                                         |
1191	 * | original CS                                             |
1192	 * | original RIP                                            |
1193	 * +---------------------------------------------------------+
1194	 * | temp storage for rdx                                    |
1195	 * +---------------------------------------------------------+
1196	 * | "NMI executing" variable                                |
1197	 * +---------------------------------------------------------+
1198	 * | iret SS          } Copied from "outermost" frame        |
1199	 * | iret Return RSP  } on each loop iteration; overwritten  |
1200	 * | iret RFLAGS      } by a nested NMI to force another     |
1201	 * | iret CS          } iteration if needed.                 |
1202	 * | iret RIP         }                                      |
1203	 * +---------------------------------------------------------+
1204	 * | outermost SS          } initialized in first_nmi;       |
1205	 * | outermost Return RSP  } will not be changed before      |
1206	 * | outermost RFLAGS      } NMI processing is done.         |
1207	 * | outermost CS          } Copied to "iret" frame on each  |
1208	 * | outermost RIP         } iteration.                      |
1209	 * +---------------------------------------------------------+
1210	 * | pt_regs                                                 |
1211	 * +---------------------------------------------------------+
1212	 *
1213	 * The "original" frame is used by hardware.  Before re-enabling
1214	 * NMIs, we need to be done with it, and we need to leave enough
1215	 * space for the asm code here.
1216	 *
1217	 * We return by executing IRET while RSP points to the "iret" frame.
1218	 * That will either return for real or it will loop back into NMI
1219	 * processing.
1220	 *
1221	 * The "outermost" frame is copied to the "iret" frame on each
1222	 * iteration of the loop, so each iteration starts with the "iret"
1223	 * frame pointing to the final return target.
1224	 */
1225
1226	/*
1227	 * Determine whether we're a nested NMI.
1228	 *
1229	 * If we interrupted kernel code between repeat_nmi and
1230	 * end_repeat_nmi, then we are a nested NMI.  We must not
1231	 * modify the "iret" frame because it's being written by
1232	 * the outer NMI.  That's okay; the outer NMI handler is
1233	 * about to about to call do_nmi anyway, so we can just
1234	 * resume the outer NMI.
1235	 */
1236
1237	movq	$repeat_nmi, %rdx
1238	cmpq	8(%rsp), %rdx
1239	ja	1f
1240	movq	$end_repeat_nmi, %rdx
1241	cmpq	8(%rsp), %rdx
1242	ja	nested_nmi_out
12431:
1244
1245	/*
1246	 * Now check "NMI executing".  If it's set, then we're nested.
1247	 * This will not detect if we interrupted an outer NMI just
1248	 * before IRET.
1249	 */
1250	cmpl	$1, -8(%rsp)
1251	je	nested_nmi
1252
1253	/*
1254	 * Now test if the previous stack was an NMI stack.  This covers
1255	 * the case where we interrupt an outer NMI after it clears
1256	 * "NMI executing" but before IRET.  We need to be careful, though:
1257	 * there is one case in which RSP could point to the NMI stack
1258	 * despite there being no NMI active: naughty userspace controls
1259	 * RSP at the very beginning of the SYSCALL targets.  We can
1260	 * pull a fast one on naughty userspace, though: we program
1261	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1262	 * if it controls the kernel's RSP.  We set DF before we clear
1263	 * "NMI executing".
1264	 */
1265	lea	6*8(%rsp), %rdx
1266	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1267	cmpq	%rdx, 4*8(%rsp)
1268	/* If the stack pointer is above the NMI stack, this is a normal NMI */
1269	ja	first_nmi
1270
1271	subq	$EXCEPTION_STKSZ, %rdx
1272	cmpq	%rdx, 4*8(%rsp)
1273	/* If it is below the NMI stack, it is a normal NMI */
1274	jb	first_nmi
1275
1276	/* Ah, it is within the NMI stack. */
1277
1278	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1279	jz	first_nmi	/* RSP was user controlled. */
1280
1281	/* This is a nested NMI. */
1282
1283nested_nmi:
1284	/*
1285	 * Modify the "iret" frame to point to repeat_nmi, forcing another
1286	 * iteration of NMI handling.
1287	 */
1288	subq	$8, %rsp
1289	leaq	-10*8(%rsp), %rdx
1290	pushq	$__KERNEL_DS
1291	pushq	%rdx
1292	pushfq
1293	pushq	$__KERNEL_CS
1294	pushq	$repeat_nmi
1295
1296	/* Put stack back */
1297	addq	$(6*8), %rsp
1298
1299nested_nmi_out:
1300	popq	%rdx
1301
1302	/* We are returning to kernel mode, so this cannot result in a fault. */
1303	INTERRUPT_RETURN
1304
1305first_nmi:
1306	/* Restore rdx. */
1307	movq	(%rsp), %rdx
1308
1309	/* Make room for "NMI executing". */
1310	pushq	$0
1311
1312	/* Leave room for the "iret" frame */
1313	subq	$(5*8), %rsp
1314
1315	/* Copy the "original" frame to the "outermost" frame */
1316	.rept 5
1317	pushq	11*8(%rsp)
1318	.endr
1319
1320	/* Everything up to here is safe from nested NMIs */
1321
1322#ifdef CONFIG_DEBUG_ENTRY
1323	/*
1324	 * For ease of testing, unmask NMIs right away.  Disabled by
1325	 * default because IRET is very expensive.
1326	 */
1327	pushq	$0		/* SS */
1328	pushq	%rsp		/* RSP (minus 8 because of the previous push) */
1329	addq	$8, (%rsp)	/* Fix up RSP */
1330	pushfq			/* RFLAGS */
1331	pushq	$__KERNEL_CS	/* CS */
1332	pushq	$1f		/* RIP */
1333	INTERRUPT_RETURN	/* continues at repeat_nmi below */
13341:
1335#endif
1336
1337repeat_nmi:
1338	/*
1339	 * If there was a nested NMI, the first NMI's iret will return
1340	 * here. But NMIs are still enabled and we can take another
1341	 * nested NMI. The nested NMI checks the interrupted RIP to see
1342	 * if it is between repeat_nmi and end_repeat_nmi, and if so
1343	 * it will just return, as we are about to repeat an NMI anyway.
1344	 * This makes it safe to copy to the stack frame that a nested
1345	 * NMI will update.
1346	 *
1347	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
1348	 * we're repeating an NMI, gsbase has the same value that it had on
1349	 * the first iteration.  paranoid_entry will load the kernel
1350	 * gsbase if needed before we call do_nmi.  "NMI executing"
1351	 * is zero.
1352	 */
1353	movq	$1, 10*8(%rsp)		/* Set "NMI executing". */
1354
1355	/*
1356	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
1357	 * here must not modify the "iret" frame while we're writing to
1358	 * it or it will end up containing garbage.
1359	 */
1360	addq	$(10*8), %rsp
1361	.rept 5
1362	pushq	-6*8(%rsp)
1363	.endr
1364	subq	$(5*8), %rsp
1365end_repeat_nmi:
1366
1367	/*
1368	 * Everything below this point can be preempted by a nested NMI.
1369	 * If this happens, then the inner NMI will change the "iret"
1370	 * frame to point back to repeat_nmi.
1371	 */
1372	pushq	$-1				/* ORIG_RAX: no syscall to restart */
1373	ALLOC_PT_GPREGS_ON_STACK
1374
1375	/*
1376	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1377	 * as we should not be calling schedule in NMI context.
1378	 * Even with normal interrupts enabled. An NMI should not be
1379	 * setting NEED_RESCHED or anything that normal interrupts and
1380	 * exceptions might do.
1381	 */
1382	call	paranoid_entry
1383
1384	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1385	movq	%rsp, %rdi
1386	movq	$-1, %rsi
1387	call	do_nmi
1388
1389	testl	%ebx, %ebx			/* swapgs needed? */
1390	jnz	nmi_restore
1391nmi_swapgs:
1392	SWAPGS_UNSAFE_STACK
1393nmi_restore:
1394	RESTORE_EXTRA_REGS
1395	RESTORE_C_REGS
1396
1397	/* Point RSP at the "iret" frame. */
1398	REMOVE_PT_GPREGS_FROM_STACK 6*8
1399
1400	/*
1401	 * Clear "NMI executing".  Set DF first so that we can easily
1402	 * distinguish the remaining code between here and IRET from
1403	 * the SYSCALL entry and exit paths.  On a native kernel, we
1404	 * could just inspect RIP, but, on paravirt kernels,
1405	 * INTERRUPT_RETURN can translate into a jump into a
1406	 * hypercall page.
1407	 */
1408	std
1409	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
1410
1411	/*
1412	 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1413	 * stack in a single instruction.  We are returning to kernel
1414	 * mode, so this cannot result in a fault.
1415	 */
1416	INTERRUPT_RETURN
1417END(nmi)
1418
1419ENTRY(ignore_sysret)
1420	mov	$-ENOSYS, %eax
1421	sysret
1422END(ignore_sysret)
v4.10.11
   1/*
   2 *  linux/arch/x86_64/entry.S
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
   6 *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
   7 *
   8 * entry.S contains the system-call and fault low-level handling routines.
   9 *
  10 * Some of this is documented in Documentation/x86/entry_64.txt
  11 *
  12 * A note on terminology:
  13 * - iret frame:	Architecture defined interrupt frame from SS to RIP
  14 *			at the top of the kernel process stack.
  15 *
  16 * Some macro usage:
  17 * - ENTRY/END:		Define functions in the symbol table.
  18 * - TRACE_IRQ_*:	Trace hardirq state for lock debugging.
  19 * - idtentry:		Define exception entry points.
  20 */
  21#include <linux/linkage.h>
  22#include <asm/segment.h>
  23#include <asm/cache.h>
  24#include <asm/errno.h>
  25#include "calling.h"
  26#include <asm/asm-offsets.h>
  27#include <asm/msr.h>
  28#include <asm/unistd.h>
  29#include <asm/thread_info.h>
  30#include <asm/hw_irq.h>
  31#include <asm/page_types.h>
  32#include <asm/irqflags.h>
  33#include <asm/paravirt.h>
  34#include <asm/percpu.h>
  35#include <asm/asm.h>
  36#include <asm/smap.h>
  37#include <asm/pgtable_types.h>
  38#include <asm/export.h>
  39#include <asm/frame.h>
  40#include <linux/err.h>
  41
 
 
 
 
 
 
  42.code64
  43.section .entry.text, "ax"
  44
  45#ifdef CONFIG_PARAVIRT
  46ENTRY(native_usergs_sysret64)
  47	swapgs
  48	sysretq
  49ENDPROC(native_usergs_sysret64)
  50#endif /* CONFIG_PARAVIRT */
  51
  52.macro TRACE_IRQS_IRETQ
  53#ifdef CONFIG_TRACE_IRQFLAGS
  54	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
  55	jnc	1f
  56	TRACE_IRQS_ON
  571:
  58#endif
  59.endm
  60
  61/*
  62 * When dynamic function tracer is enabled it will add a breakpoint
  63 * to all locations that it is about to modify, sync CPUs, update
  64 * all the code, sync CPUs, then remove the breakpoints. In this time
  65 * if lockdep is enabled, it might jump back into the debug handler
  66 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
  67 *
  68 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
  69 * make sure the stack pointer does not get reset back to the top
  70 * of the debug stack, and instead just reuses the current stack.
  71 */
  72#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
  73
  74.macro TRACE_IRQS_OFF_DEBUG
  75	call	debug_stack_set_zero
  76	TRACE_IRQS_OFF
  77	call	debug_stack_reset
  78.endm
  79
  80.macro TRACE_IRQS_ON_DEBUG
  81	call	debug_stack_set_zero
  82	TRACE_IRQS_ON
  83	call	debug_stack_reset
  84.endm
  85
  86.macro TRACE_IRQS_IRETQ_DEBUG
  87	bt	$9, EFLAGS(%rsp)		/* interrupts off? */
  88	jnc	1f
  89	TRACE_IRQS_ON_DEBUG
  901:
  91.endm
  92
  93#else
  94# define TRACE_IRQS_OFF_DEBUG			TRACE_IRQS_OFF
  95# define TRACE_IRQS_ON_DEBUG			TRACE_IRQS_ON
  96# define TRACE_IRQS_IRETQ_DEBUG			TRACE_IRQS_IRETQ
  97#endif
  98
  99/*
 100 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
 101 *
 102 * This is the only entry point used for 64-bit system calls.  The
 103 * hardware interface is reasonably well designed and the register to
 104 * argument mapping Linux uses fits well with the registers that are
 105 * available when SYSCALL is used.
 106 *
 107 * SYSCALL instructions can be found inlined in libc implementations as
 108 * well as some other programs and libraries.  There are also a handful
 109 * of SYSCALL instructions in the vDSO used, for example, as a
 110 * clock_gettimeofday fallback.
 111 *
 112 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
 113 * then loads new ss, cs, and rip from previously programmed MSRs.
 114 * rflags gets masked by a value from another MSR (so CLD and CLAC
 115 * are not needed). SYSCALL does not save anything on the stack
 116 * and does not change rsp.
 117 *
 118 * Registers on entry:
 119 * rax  system call number
 120 * rcx  return address
 121 * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
 122 * rdi  arg0
 123 * rsi  arg1
 124 * rdx  arg2
 125 * r10  arg3 (needs to be moved to rcx to conform to C ABI)
 126 * r8   arg4
 127 * r9   arg5
 128 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
 129 *
 130 * Only called from user space.
 131 *
 132 * When user can change pt_regs->foo always force IRET. That is because
 133 * it deals with uncanonical addresses better. SYSRET has trouble
 134 * with them due to bugs in both AMD and Intel CPUs.
 135 */
 136
 137ENTRY(entry_SYSCALL_64)
 138	/*
 139	 * Interrupts are off on entry.
 140	 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
 141	 * it is too small to ever cause noticeable irq latency.
 142	 */
 143	SWAPGS_UNSAFE_STACK
 144	/*
 145	 * A hypervisor implementation might want to use a label
 146	 * after the swapgs, so that it can do the swapgs
 147	 * for the guest and jump here on syscall.
 148	 */
 149GLOBAL(entry_SYSCALL_64_after_swapgs)
 150
 151	movq	%rsp, PER_CPU_VAR(rsp_scratch)
 152	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 153
 154	TRACE_IRQS_OFF
 155
 156	/* Construct struct pt_regs on stack */
 157	pushq	$__USER_DS			/* pt_regs->ss */
 158	pushq	PER_CPU_VAR(rsp_scratch)	/* pt_regs->sp */
 159	pushq	%r11				/* pt_regs->flags */
 160	pushq	$__USER_CS			/* pt_regs->cs */
 161	pushq	%rcx				/* pt_regs->ip */
 162	pushq	%rax				/* pt_regs->orig_ax */
 163	pushq	%rdi				/* pt_regs->di */
 164	pushq	%rsi				/* pt_regs->si */
 165	pushq	%rdx				/* pt_regs->dx */
 166	pushq	%rcx				/* pt_regs->cx */
 167	pushq	$-ENOSYS			/* pt_regs->ax */
 168	pushq	%r8				/* pt_regs->r8 */
 169	pushq	%r9				/* pt_regs->r9 */
 170	pushq	%r10				/* pt_regs->r10 */
 171	pushq	%r11				/* pt_regs->r11 */
 172	sub	$(6*8), %rsp			/* pt_regs->bp, bx, r12-15 not saved */
 173
 174	/*
 175	 * If we need to do entry work or if we guess we'll need to do
 176	 * exit work, go straight to the slow path.
 177	 */
 178	movq	PER_CPU_VAR(current_task), %r11
 179	testl	$_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
 180	jnz	entry_SYSCALL64_slow_path
 181
 182entry_SYSCALL_64_fastpath:
 183	/*
 184	 * Easy case: enable interrupts and issue the syscall.  If the syscall
 185	 * needs pt_regs, we'll call a stub that disables interrupts again
 186	 * and jumps to the slow path.
 187	 */
 188	TRACE_IRQS_ON
 189	ENABLE_INTERRUPTS(CLBR_NONE)
 190#if __SYSCALL_MASK == ~0
 191	cmpq	$__NR_syscall_max, %rax
 192#else
 193	andl	$__SYSCALL_MASK, %eax
 194	cmpl	$__NR_syscall_max, %eax
 195#endif
 196	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
 197	movq	%r10, %rcx
 198
 199	/*
 200	 * This call instruction is handled specially in stub_ptregs_64.
 201	 * It might end up jumping to the slow path.  If it jumps, RAX
 202	 * and all argument registers are clobbered.
 203	 */
 204	call	*sys_call_table(, %rax, 8)
 205.Lentry_SYSCALL_64_after_fastpath_call:
 206
 207	movq	%rax, RAX(%rsp)
 2081:
 209
 210	/*
 211	 * If we get here, then we know that pt_regs is clean for SYSRET64.
 212	 * If we see that no exit work is required (which we are required
 213	 * to check with IRQs off), then we can go straight to SYSRET64.
 214	 */
 215	DISABLE_INTERRUPTS(CLBR_NONE)
 216	TRACE_IRQS_OFF
 217	movq	PER_CPU_VAR(current_task), %r11
 218	testl	$_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
 219	jnz	1f
 220
 221	LOCKDEP_SYS_EXIT
 222	TRACE_IRQS_ON		/* user mode is traced as IRQs on */
 223	movq	RIP(%rsp), %rcx
 224	movq	EFLAGS(%rsp), %r11
 225	RESTORE_C_REGS_EXCEPT_RCX_R11
 226	movq	RSP(%rsp), %rsp
 227	USERGS_SYSRET64
 228
 2291:
 230	/*
 231	 * The fast path looked good when we started, but something changed
 232	 * along the way and we need to switch to the slow path.  Calling
 233	 * raise(3) will trigger this, for example.  IRQs are off.
 234	 */
 235	TRACE_IRQS_ON
 236	ENABLE_INTERRUPTS(CLBR_NONE)
 237	SAVE_EXTRA_REGS
 238	movq	%rsp, %rdi
 239	call	syscall_return_slowpath	/* returns with IRQs disabled */
 240	jmp	return_from_SYSCALL_64
 241
 242entry_SYSCALL64_slow_path:
 243	/* IRQs are off. */
 244	SAVE_EXTRA_REGS
 245	movq	%rsp, %rdi
 246	call	do_syscall_64		/* returns with IRQs disabled */
 247
 248return_from_SYSCALL_64:
 249	RESTORE_EXTRA_REGS
 250	TRACE_IRQS_IRETQ		/* we're about to change IF */
 251
 252	/*
 253	 * Try to use SYSRET instead of IRET if we're returning to
 254	 * a completely clean 64-bit userspace context.
 255	 */
 256	movq	RCX(%rsp), %rcx
 257	movq	RIP(%rsp), %r11
 258	cmpq	%rcx, %r11			/* RCX == RIP */
 259	jne	opportunistic_sysret_failed
 260
 261	/*
 262	 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
 263	 * in kernel space.  This essentially lets the user take over
 264	 * the kernel, since userspace controls RSP.
 265	 *
 266	 * If width of "canonical tail" ever becomes variable, this will need
 267	 * to be updated to remain correct on both old and new CPUs.
 268	 */
 269	.ifne __VIRTUAL_MASK_SHIFT - 47
 270	.error "virtual address width changed -- SYSRET checks need update"
 271	.endif
 272
 273	/* Change top 16 bits to be the sign-extension of 47th bit */
 274	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
 275	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
 276
 277	/* If this changed %rcx, it was not canonical */
 278	cmpq	%rcx, %r11
 279	jne	opportunistic_sysret_failed
 280
 281	cmpq	$__USER_CS, CS(%rsp)		/* CS must match SYSRET */
 282	jne	opportunistic_sysret_failed
 283
 284	movq	R11(%rsp), %r11
 285	cmpq	%r11, EFLAGS(%rsp)		/* R11 == RFLAGS */
 286	jne	opportunistic_sysret_failed
 287
 288	/*
 289	 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
 290	 * restore RF properly. If the slowpath sets it for whatever reason, we
 291	 * need to restore it correctly.
 292	 *
 293	 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
 294	 * trap from userspace immediately after SYSRET.  This would cause an
 295	 * infinite loop whenever #DB happens with register state that satisfies
 296	 * the opportunistic SYSRET conditions.  For example, single-stepping
 297	 * this user code:
 298	 *
 299	 *           movq	$stuck_here, %rcx
 300	 *           pushfq
 301	 *           popq %r11
 302	 *   stuck_here:
 303	 *
 304	 * would never get past 'stuck_here'.
 305	 */
 306	testq	$(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
 307	jnz	opportunistic_sysret_failed
 308
 309	/* nothing to check for RSP */
 310
 311	cmpq	$__USER_DS, SS(%rsp)		/* SS must match SYSRET */
 312	jne	opportunistic_sysret_failed
 313
 314	/*
 315	 * We win! This label is here just for ease of understanding
 316	 * perf profiles. Nothing jumps here.
 317	 */
 318syscall_return_via_sysret:
 319	/* rcx and r11 are already restored (see code above) */
 320	RESTORE_C_REGS_EXCEPT_RCX_R11
 321	movq	RSP(%rsp), %rsp
 322	USERGS_SYSRET64
 323
 324opportunistic_sysret_failed:
 325	SWAPGS
 326	jmp	restore_c_regs_and_iret
 327END(entry_SYSCALL_64)
 328
 329ENTRY(stub_ptregs_64)
 330	/*
 331	 * Syscalls marked as needing ptregs land here.
 332	 * If we are on the fast path, we need to save the extra regs,
 333	 * which we achieve by trying again on the slow path.  If we are on
 334	 * the slow path, the extra regs are already saved.
 335	 *
 336	 * RAX stores a pointer to the C function implementing the syscall.
 337	 * IRQs are on.
 338	 */
 339	cmpq	$.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
 340	jne	1f
 341
 342	/*
 343	 * Called from fast path -- disable IRQs again, pop return address
 344	 * and jump to slow path
 345	 */
 346	DISABLE_INTERRUPTS(CLBR_NONE)
 347	TRACE_IRQS_OFF
 348	popq	%rax
 349	jmp	entry_SYSCALL64_slow_path
 350
 3511:
 352	jmp	*%rax				/* Called from C */
 
 353END(stub_ptregs_64)
 354
 355.macro ptregs_stub func
 356ENTRY(ptregs_\func)
 357	leaq	\func(%rip), %rax
 358	jmp	stub_ptregs_64
 359END(ptregs_\func)
 360.endm
 361
 362/* Instantiate ptregs_stub for each ptregs-using syscall */
 363#define __SYSCALL_64_QUAL_(sym)
 364#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
 365#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
 366#include <asm/syscalls_64.h>
 367
 368/*
 369 * %rdi: prev task
 370 * %rsi: next task
 371 */
 372ENTRY(__switch_to_asm)
 373	/*
 374	 * Save callee-saved registers
 375	 * This must match the order in inactive_task_frame
 376	 */
 377	pushq	%rbp
 378	pushq	%rbx
 379	pushq	%r12
 380	pushq	%r13
 381	pushq	%r14
 382	pushq	%r15
 383
 384	/* switch stack */
 385	movq	%rsp, TASK_threadsp(%rdi)
 386	movq	TASK_threadsp(%rsi), %rsp
 387
 388#ifdef CONFIG_CC_STACKPROTECTOR
 389	movq	TASK_stack_canary(%rsi), %rbx
 390	movq	%rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
 391#endif
 392
 393	/* restore callee-saved registers */
 394	popq	%r15
 395	popq	%r14
 396	popq	%r13
 397	popq	%r12
 398	popq	%rbx
 399	popq	%rbp
 400
 401	jmp	__switch_to
 402END(__switch_to_asm)
 403
 404/*
 405 * A newly forked process directly context switches into this address.
 406 *
 407 * rax: prev task we switched from
 408 * rbx: kernel thread func (NULL for user thread)
 409 * r12: kernel thread arg
 410 */
 411ENTRY(ret_from_fork)
 412	FRAME_BEGIN			/* help unwinder find end of stack */
 413	movq	%rax, %rdi
 414	call	schedule_tail		/* rdi: 'prev' task parameter */
 
 
 
 
 
 
 415
 416	testq	%rbx, %rbx		/* from kernel_thread? */
 417	jnz	1f			/* kernel threads are uncommon */
 
 
 
 
 
 
 
 
 
 418
 4192:
 420	leaq	FRAME_OFFSET(%rsp),%rdi	/* pt_regs pointer */
 
 
 
 
 
 421	call	syscall_return_slowpath	/* returns with IRQs disabled */
 422	TRACE_IRQS_ON			/* user mode is traced as IRQS on */
 423	SWAPGS
 424	FRAME_END
 425	jmp	restore_regs_and_iret
 426
 4271:
 428	/* kernel thread */
 429	movq	%r12, %rdi
 430	call	*%rbx
 431	/*
 432	 * A kernel thread is allowed to return here after successfully
 433	 * calling do_execve().  Exit to userspace to complete the execve()
 434	 * syscall.
 435	 */
 436	movq	$0, RAX(%rsp)
 437	jmp	2b
 438END(ret_from_fork)
 439
 440/*
 441 * Build the entry stubs with some assembler magic.
 442 * We pack 1 stub into every 8-byte block.
 443 */
 444	.align 8
 445ENTRY(irq_entries_start)
 446    vector=FIRST_EXTERNAL_VECTOR
 447    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
 448	pushq	$(~vector+0x80)			/* Note: always in signed byte range */
 449    vector=vector+1
 450	jmp	common_interrupt
 451	.align	8
 452    .endr
 453END(irq_entries_start)
 454
 455/*
 456 * Interrupt entry/exit.
 457 *
 458 * Interrupt entry points save only callee clobbered registers in fast path.
 459 *
 460 * Entry runs with interrupts off.
 461 */
 462
 463/* 0(%rsp): ~(interrupt number) */
 464	.macro interrupt func
 465	cld
 466	ALLOC_PT_GPREGS_ON_STACK
 467	SAVE_C_REGS
 468	SAVE_EXTRA_REGS
 469	ENCODE_FRAME_POINTER
 470
 471	testb	$3, CS(%rsp)
 472	jz	1f
 473
 474	/*
 475	 * IRQ from user mode.  Switch to kernel gsbase and inform context
 476	 * tracking that we're in kernel mode.
 477	 */
 478	SWAPGS
 479
 480	/*
 481	 * We need to tell lockdep that IRQs are off.  We can't do this until
 482	 * we fix gsbase, and we should do it before enter_from_user_mode
 483	 * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
 484	 * the simplest way to handle it is to just call it twice if
 485	 * we enter from user mode.  There's no reason to optimize this since
 486	 * TRACE_IRQS_OFF is a no-op if lockdep is off.
 487	 */
 488	TRACE_IRQS_OFF
 489
 490	CALL_enter_from_user_mode
 491
 4921:
 493	/*
 494	 * Save previous stack pointer, optionally switch to interrupt stack.
 495	 * irq_count is used to check if a CPU is already on an interrupt stack
 496	 * or not. While this is essentially redundant with preempt_count it is
 497	 * a little cheaper to use a separate counter in the PDA (short of
 498	 * moving irq_enter into assembly, which would be too much work)
 499	 */
 500	movq	%rsp, %rdi
 501	incl	PER_CPU_VAR(irq_count)
 502	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
 503	pushq	%rdi
 504	/* We entered an interrupt context - irqs are off: */
 505	TRACE_IRQS_OFF
 506
 507	call	\func	/* rdi points to pt_regs */
 508	.endm
 509
 510	/*
 511	 * The interrupt stubs push (~vector+0x80) onto the stack and
 512	 * then jump to common_interrupt.
 513	 */
 514	.p2align CONFIG_X86_L1_CACHE_SHIFT
 515common_interrupt:
 516	ASM_CLAC
 517	addq	$-0x80, (%rsp)			/* Adjust vector to [-256, -1] range */
 518	interrupt do_IRQ
 519	/* 0(%rsp): old RSP */
 520ret_from_intr:
 521	DISABLE_INTERRUPTS(CLBR_NONE)
 522	TRACE_IRQS_OFF
 523	decl	PER_CPU_VAR(irq_count)
 524
 525	/* Restore saved previous stack */
 526	popq	%rsp
 527
 528	testb	$3, CS(%rsp)
 529	jz	retint_kernel
 530
 531	/* Interrupt came from user space */
 532GLOBAL(retint_user)
 533	mov	%rsp,%rdi
 534	call	prepare_exit_to_usermode
 535	TRACE_IRQS_IRETQ
 536	SWAPGS
 537	jmp	restore_regs_and_iret
 538
 539/* Returning to kernel space */
 540retint_kernel:
 541#ifdef CONFIG_PREEMPT
 542	/* Interrupts are off */
 543	/* Check if we need preemption */
 544	bt	$9, EFLAGS(%rsp)		/* were interrupts off? */
 545	jnc	1f
 5460:	cmpl	$0, PER_CPU_VAR(__preempt_count)
 547	jnz	1f
 548	call	preempt_schedule_irq
 549	jmp	0b
 5501:
 551#endif
 552	/*
 553	 * The iretq could re-enable interrupts:
 554	 */
 555	TRACE_IRQS_IRETQ
 556
 557/*
 558 * At this label, code paths which return to kernel and to user,
 559 * which come from interrupts/exception and from syscalls, merge.
 560 */
 561GLOBAL(restore_regs_and_iret)
 562	RESTORE_EXTRA_REGS
 563restore_c_regs_and_iret:
 564	RESTORE_C_REGS
 565	REMOVE_PT_GPREGS_FROM_STACK 8
 566	INTERRUPT_RETURN
 567
 568ENTRY(native_iret)
 569	/*
 570	 * Are we returning to a stack segment from the LDT?  Note: in
 571	 * 64-bit mode SS:RSP on the exception stack is always valid.
 572	 */
 573#ifdef CONFIG_X86_ESPFIX64
 574	testb	$4, (SS-RIP)(%rsp)
 575	jnz	native_irq_return_ldt
 576#endif
 577
 578.global native_irq_return_iret
 579native_irq_return_iret:
 580	/*
 581	 * This may fault.  Non-paranoid faults on return to userspace are
 582	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
 583	 * Double-faults due to espfix64 are handled in do_double_fault.
 584	 * Other faults here are fatal.
 585	 */
 586	iretq
 587
 588#ifdef CONFIG_X86_ESPFIX64
 589native_irq_return_ldt:
 590	/*
 591	 * We are running with user GSBASE.  All GPRs contain their user
 592	 * values.  We have a percpu ESPFIX stack that is eight slots
 593	 * long (see ESPFIX_STACK_SIZE).  espfix_waddr points to the bottom
 594	 * of the ESPFIX stack.
 595	 *
 596	 * We clobber RAX and RDI in this code.  We stash RDI on the
 597	 * normal stack and RAX on the ESPFIX stack.
 598	 *
 599	 * The ESPFIX stack layout we set up looks like this:
 600	 *
 601	 * --- top of ESPFIX stack ---
 602	 * SS
 603	 * RSP
 604	 * RFLAGS
 605	 * CS
 606	 * RIP  <-- RSP points here when we're done
 607	 * RAX  <-- espfix_waddr points here
 608	 * --- bottom of ESPFIX stack ---
 609	 */
 610
 611	pushq	%rdi				/* Stash user RDI */
 612	SWAPGS
 613	movq	PER_CPU_VAR(espfix_waddr), %rdi
 614	movq	%rax, (0*8)(%rdi)		/* user RAX */
 615	movq	(1*8)(%rsp), %rax		/* user RIP */
 616	movq	%rax, (1*8)(%rdi)
 617	movq	(2*8)(%rsp), %rax		/* user CS */
 618	movq	%rax, (2*8)(%rdi)
 619	movq	(3*8)(%rsp), %rax		/* user RFLAGS */
 620	movq	%rax, (3*8)(%rdi)
 621	movq	(5*8)(%rsp), %rax		/* user SS */
 622	movq	%rax, (5*8)(%rdi)
 623	movq	(4*8)(%rsp), %rax		/* user RSP */
 624	movq	%rax, (4*8)(%rdi)
 625	/* Now RAX == RSP. */
 626
 627	andl	$0xffff0000, %eax		/* RAX = (RSP & 0xffff0000) */
 628	popq	%rdi				/* Restore user RDI */
 629
 630	/*
 631	 * espfix_stack[31:16] == 0.  The page tables are set up such that
 632	 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
 633	 * espfix_waddr for any X.  That is, there are 65536 RO aliases of
 634	 * the same page.  Set up RSP so that RSP[31:16] contains the
 635	 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
 636	 * still points to an RO alias of the ESPFIX stack.
 637	 */
 638	orq	PER_CPU_VAR(espfix_stack), %rax
 639	SWAPGS
 640	movq	%rax, %rsp
 641
 642	/*
 643	 * At this point, we cannot write to the stack any more, but we can
 644	 * still read.
 645	 */
 646	popq	%rax				/* Restore user RAX */
 647
 648	/*
 649	 * RSP now points to an ordinary IRET frame, except that the page
 650	 * is read-only and RSP[31:16] are preloaded with the userspace
 651	 * values.  We can now IRET back to userspace.
 652	 */
 653	jmp	native_irq_return_iret
 654#endif
 655END(common_interrupt)
 656
 657/*
 658 * APIC interrupts.
 659 */
 660.macro apicinterrupt3 num sym do_sym
 661ENTRY(\sym)
 662	ASM_CLAC
 663	pushq	$~(\num)
 664.Lcommon_\sym:
 665	interrupt \do_sym
 666	jmp	ret_from_intr
 667END(\sym)
 668.endm
 669
 670#ifdef CONFIG_TRACING
 671#define trace(sym) trace_##sym
 672#define smp_trace(sym) smp_trace_##sym
 673
 674.macro trace_apicinterrupt num sym
 675apicinterrupt3 \num trace(\sym) smp_trace(\sym)
 676.endm
 677#else
 678.macro trace_apicinterrupt num sym do_sym
 679.endm
 680#endif
 681
 682/* Make sure APIC interrupt handlers end up in the irqentry section: */
 683#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
 684# define PUSH_SECTION_IRQENTRY	.pushsection .irqentry.text, "ax"
 685# define POP_SECTION_IRQENTRY	.popsection
 686#else
 687# define PUSH_SECTION_IRQENTRY
 688# define POP_SECTION_IRQENTRY
 689#endif
 690
 691.macro apicinterrupt num sym do_sym
 692PUSH_SECTION_IRQENTRY
 693apicinterrupt3 \num \sym \do_sym
 694trace_apicinterrupt \num \sym
 695POP_SECTION_IRQENTRY
 696.endm
 697
 698#ifdef CONFIG_SMP
 699apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR		irq_move_cleanup_interrupt	smp_irq_move_cleanup_interrupt
 700apicinterrupt3 REBOOT_VECTOR			reboot_interrupt		smp_reboot_interrupt
 701#endif
 702
 703#ifdef CONFIG_X86_UV
 704apicinterrupt3 UV_BAU_MESSAGE			uv_bau_message_intr1		uv_bau_message_interrupt
 705#endif
 706
 707apicinterrupt LOCAL_TIMER_VECTOR		apic_timer_interrupt		smp_apic_timer_interrupt
 708apicinterrupt X86_PLATFORM_IPI_VECTOR		x86_platform_ipi		smp_x86_platform_ipi
 709
 710#ifdef CONFIG_HAVE_KVM
 711apicinterrupt3 POSTED_INTR_VECTOR		kvm_posted_intr_ipi		smp_kvm_posted_intr_ipi
 712apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR	kvm_posted_intr_wakeup_ipi	smp_kvm_posted_intr_wakeup_ipi
 713#endif
 714
 715#ifdef CONFIG_X86_MCE_THRESHOLD
 716apicinterrupt THRESHOLD_APIC_VECTOR		threshold_interrupt		smp_threshold_interrupt
 717#endif
 718
 719#ifdef CONFIG_X86_MCE_AMD
 720apicinterrupt DEFERRED_ERROR_VECTOR		deferred_error_interrupt	smp_deferred_error_interrupt
 721#endif
 722
 723#ifdef CONFIG_X86_THERMAL_VECTOR
 724apicinterrupt THERMAL_APIC_VECTOR		thermal_interrupt		smp_thermal_interrupt
 725#endif
 726
 727#ifdef CONFIG_SMP
 728apicinterrupt CALL_FUNCTION_SINGLE_VECTOR	call_function_single_interrupt	smp_call_function_single_interrupt
 729apicinterrupt CALL_FUNCTION_VECTOR		call_function_interrupt		smp_call_function_interrupt
 730apicinterrupt RESCHEDULE_VECTOR			reschedule_interrupt		smp_reschedule_interrupt
 731#endif
 732
 733apicinterrupt ERROR_APIC_VECTOR			error_interrupt			smp_error_interrupt
 734apicinterrupt SPURIOUS_APIC_VECTOR		spurious_interrupt		smp_spurious_interrupt
 735
 736#ifdef CONFIG_IRQ_WORK
 737apicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
 738#endif
 739
 740/*
 741 * Exception entry points.
 742 */
 743#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
 744
 745.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 746ENTRY(\sym)
 747	/* Sanity check */
 748	.if \shift_ist != -1 && \paranoid == 0
 749	.error "using shift_ist requires paranoid=1"
 750	.endif
 751
 752	ASM_CLAC
 753	PARAVIRT_ADJUST_EXCEPTION_FRAME
 754
 755	.ifeq \has_error_code
 756	pushq	$-1				/* ORIG_RAX: no syscall to restart */
 757	.endif
 758
 759	ALLOC_PT_GPREGS_ON_STACK
 760
 761	.if \paranoid
 762	.if \paranoid == 1
 763	testb	$3, CS(%rsp)			/* If coming from userspace, switch stacks */
 764	jnz	1f
 765	.endif
 766	call	paranoid_entry
 767	.else
 768	call	error_entry
 769	.endif
 770	/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
 771
 772	.if \paranoid
 773	.if \shift_ist != -1
 774	TRACE_IRQS_OFF_DEBUG			/* reload IDT in case of recursion */
 775	.else
 776	TRACE_IRQS_OFF
 777	.endif
 778	.endif
 779
 780	movq	%rsp, %rdi			/* pt_regs pointer */
 781
 782	.if \has_error_code
 783	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
 784	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
 785	.else
 786	xorl	%esi, %esi			/* no error code */
 787	.endif
 788
 789	.if \shift_ist != -1
 790	subq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
 791	.endif
 792
 793	call	\do_sym
 794
 795	.if \shift_ist != -1
 796	addq	$EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
 797	.endif
 798
 799	/* these procedures expect "no swapgs" flag in ebx */
 800	.if \paranoid
 801	jmp	paranoid_exit
 802	.else
 803	jmp	error_exit
 804	.endif
 805
 806	.if \paranoid == 1
 807	/*
 808	 * Paranoid entry from userspace.  Switch stacks and treat it
 809	 * as a normal entry.  This means that paranoid handlers
 810	 * run in real process context if user_mode(regs).
 811	 */
 8121:
 813	call	error_entry
 814
 815
 816	movq	%rsp, %rdi			/* pt_regs pointer */
 817	call	sync_regs
 818	movq	%rax, %rsp			/* switch stack */
 819
 820	movq	%rsp, %rdi			/* pt_regs pointer */
 821
 822	.if \has_error_code
 823	movq	ORIG_RAX(%rsp), %rsi		/* get error code */
 824	movq	$-1, ORIG_RAX(%rsp)		/* no syscall to restart */
 825	.else
 826	xorl	%esi, %esi			/* no error code */
 827	.endif
 828
 829	call	\do_sym
 830
 831	jmp	error_exit			/* %ebx: no swapgs flag */
 832	.endif
 833END(\sym)
 834.endm
 835
 836#ifdef CONFIG_TRACING
 837.macro trace_idtentry sym do_sym has_error_code:req
 838idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
 839idtentry \sym \do_sym has_error_code=\has_error_code
 840.endm
 841#else
 842.macro trace_idtentry sym do_sym has_error_code:req
 843idtentry \sym \do_sym has_error_code=\has_error_code
 844.endm
 845#endif
 846
 847idtentry divide_error			do_divide_error			has_error_code=0
 848idtentry overflow			do_overflow			has_error_code=0
 849idtentry bounds				do_bounds			has_error_code=0
 850idtentry invalid_op			do_invalid_op			has_error_code=0
 851idtentry device_not_available		do_device_not_available		has_error_code=0
 852idtentry double_fault			do_double_fault			has_error_code=1 paranoid=2
 853idtentry coprocessor_segment_overrun	do_coprocessor_segment_overrun	has_error_code=0
 854idtentry invalid_TSS			do_invalid_TSS			has_error_code=1
 855idtentry segment_not_present		do_segment_not_present		has_error_code=1
 856idtentry spurious_interrupt_bug		do_spurious_interrupt_bug	has_error_code=0
 857idtentry coprocessor_error		do_coprocessor_error		has_error_code=0
 858idtentry alignment_check		do_alignment_check		has_error_code=1
 859idtentry simd_coprocessor_error		do_simd_coprocessor_error	has_error_code=0
 860
 861
 862	/*
 863	 * Reload gs selector with exception handling
 864	 * edi:  new selector
 865	 */
 866ENTRY(native_load_gs_index)
 867	pushfq
 868	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
 869	SWAPGS
 870.Lgs_change:
 871	movl	%edi, %gs
 8722:	ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
 873	SWAPGS
 874	popfq
 875	ret
 876END(native_load_gs_index)
 877EXPORT_SYMBOL(native_load_gs_index)
 878
 879	_ASM_EXTABLE(.Lgs_change, bad_gs)
 880	.section .fixup, "ax"
 881	/* running with kernelgs */
 882bad_gs:
 883	SWAPGS					/* switch back to user gs */
 884.macro ZAP_GS
 885	/* This can't be a string because the preprocessor needs to see it. */
 886	movl $__USER_DS, %eax
 887	movl %eax, %gs
 888.endm
 889	ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
 890	xorl	%eax, %eax
 891	movl	%eax, %gs
 892	jmp	2b
 893	.previous
 894
 895/* Call softirq on interrupt stack. Interrupts are off. */
 896ENTRY(do_softirq_own_stack)
 897	pushq	%rbp
 898	mov	%rsp, %rbp
 899	incl	PER_CPU_VAR(irq_count)
 900	cmove	PER_CPU_VAR(irq_stack_ptr), %rsp
 901	push	%rbp				/* frame pointer backlink */
 902	call	__do_softirq
 903	leaveq
 904	decl	PER_CPU_VAR(irq_count)
 905	ret
 906END(do_softirq_own_stack)
 907
 908#ifdef CONFIG_XEN
 909idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
 910
 911/*
 912 * A note on the "critical region" in our callback handler.
 913 * We want to avoid stacking callback handlers due to events occurring
 914 * during handling of the last event. To do this, we keep events disabled
 915 * until we've done all processing. HOWEVER, we must enable events before
 916 * popping the stack frame (can't be done atomically) and so it would still
 917 * be possible to get enough handler activations to overflow the stack.
 918 * Although unlikely, bugs of that kind are hard to track down, so we'd
 919 * like to avoid the possibility.
 920 * So, on entry to the handler we detect whether we interrupted an
 921 * existing activation in its critical region -- if so, we pop the current
 922 * activation and restart the handler using the previous one.
 923 */
 924ENTRY(xen_do_hypervisor_callback)		/* do_hypervisor_callback(struct *pt_regs) */
 925
 926/*
 927 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
 928 * see the correct pointer to the pt_regs
 929 */
 930	movq	%rdi, %rsp			/* we don't return, adjust the stack frame */
 93111:	incl	PER_CPU_VAR(irq_count)
 932	movq	%rsp, %rbp
 933	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
 934	pushq	%rbp				/* frame pointer backlink */
 935	call	xen_evtchn_do_upcall
 936	popq	%rsp
 937	decl	PER_CPU_VAR(irq_count)
 938#ifndef CONFIG_PREEMPT
 939	call	xen_maybe_preempt_hcall
 940#endif
 941	jmp	error_exit
 942END(xen_do_hypervisor_callback)
 943
 944/*
 945 * Hypervisor uses this for application faults while it executes.
 946 * We get here for two reasons:
 947 *  1. Fault while reloading DS, ES, FS or GS
 948 *  2. Fault while executing IRET
 949 * Category 1 we do not need to fix up as Xen has already reloaded all segment
 950 * registers that could be reloaded and zeroed the others.
 951 * Category 2 we fix up by killing the current process. We cannot use the
 952 * normal Linux return path in this case because if we use the IRET hypercall
 953 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 954 * We distinguish between categories by comparing each saved segment register
 955 * with its current contents: any discrepancy means we in category 1.
 956 */
 957ENTRY(xen_failsafe_callback)
 958	movl	%ds, %ecx
 959	cmpw	%cx, 0x10(%rsp)
 960	jne	1f
 961	movl	%es, %ecx
 962	cmpw	%cx, 0x18(%rsp)
 963	jne	1f
 964	movl	%fs, %ecx
 965	cmpw	%cx, 0x20(%rsp)
 966	jne	1f
 967	movl	%gs, %ecx
 968	cmpw	%cx, 0x28(%rsp)
 969	jne	1f
 970	/* All segments match their saved values => Category 2 (Bad IRET). */
 971	movq	(%rsp), %rcx
 972	movq	8(%rsp), %r11
 973	addq	$0x30, %rsp
 974	pushq	$0				/* RIP */
 975	pushq	%r11
 976	pushq	%rcx
 977	jmp	general_protection
 9781:	/* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
 979	movq	(%rsp), %rcx
 980	movq	8(%rsp), %r11
 981	addq	$0x30, %rsp
 982	pushq	$-1 /* orig_ax = -1 => not a system call */
 983	ALLOC_PT_GPREGS_ON_STACK
 984	SAVE_C_REGS
 985	SAVE_EXTRA_REGS
 986	ENCODE_FRAME_POINTER
 987	jmp	error_exit
 988END(xen_failsafe_callback)
 989
 990apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
 991	xen_hvm_callback_vector xen_evtchn_do_upcall
 992
 993#endif /* CONFIG_XEN */
 994
 995#if IS_ENABLED(CONFIG_HYPERV)
 996apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
 997	hyperv_callback_vector hyperv_vector_handler
 998#endif /* CONFIG_HYPERV */
 999
1000idtentry debug			do_debug		has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
1001idtentry int3			do_int3			has_error_code=0	paranoid=1 shift_ist=DEBUG_STACK
1002idtentry stack_segment		do_stack_segment	has_error_code=1
1003
1004#ifdef CONFIG_XEN
1005idtentry xen_debug		do_debug		has_error_code=0
1006idtentry xen_int3		do_int3			has_error_code=0
1007idtentry xen_stack_segment	do_stack_segment	has_error_code=1
1008#endif
1009
1010idtentry general_protection	do_general_protection	has_error_code=1
1011trace_idtentry page_fault	do_page_fault		has_error_code=1
1012
1013#ifdef CONFIG_KVM_GUEST
1014idtentry async_page_fault	do_async_page_fault	has_error_code=1
1015#endif
1016
1017#ifdef CONFIG_X86_MCE
1018idtentry machine_check					has_error_code=0	paranoid=1 do_sym=*machine_check_vector(%rip)
1019#endif
1020
1021/*
1022 * Save all registers in pt_regs, and switch gs if needed.
1023 * Use slow, but surefire "are we in kernel?" check.
1024 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1025 */
1026ENTRY(paranoid_entry)
1027	cld
1028	SAVE_C_REGS 8
1029	SAVE_EXTRA_REGS 8
1030	ENCODE_FRAME_POINTER 8
1031	movl	$1, %ebx
1032	movl	$MSR_GS_BASE, %ecx
1033	rdmsr
1034	testl	%edx, %edx
1035	js	1f				/* negative -> in kernel */
1036	SWAPGS
1037	xorl	%ebx, %ebx
10381:	ret
1039END(paranoid_entry)
1040
1041/*
1042 * "Paranoid" exit path from exception stack.  This is invoked
1043 * only on return from non-NMI IST interrupts that came
1044 * from kernel space.
1045 *
1046 * We may be returning to very strange contexts (e.g. very early
1047 * in syscall entry), so checking for preemption here would
1048 * be complicated.  Fortunately, we there's no good reason
1049 * to try to handle preemption here.
1050 *
1051 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
1052 */
1053ENTRY(paranoid_exit)
1054	DISABLE_INTERRUPTS(CLBR_NONE)
1055	TRACE_IRQS_OFF_DEBUG
1056	testl	%ebx, %ebx			/* swapgs needed? */
1057	jnz	paranoid_exit_no_swapgs
1058	TRACE_IRQS_IRETQ
1059	SWAPGS_UNSAFE_STACK
1060	jmp	paranoid_exit_restore
1061paranoid_exit_no_swapgs:
1062	TRACE_IRQS_IRETQ_DEBUG
1063paranoid_exit_restore:
1064	RESTORE_EXTRA_REGS
1065	RESTORE_C_REGS
1066	REMOVE_PT_GPREGS_FROM_STACK 8
1067	INTERRUPT_RETURN
1068END(paranoid_exit)
1069
1070/*
1071 * Save all registers in pt_regs, and switch gs if needed.
1072 * Return: EBX=0: came from user mode; EBX=1: otherwise
1073 */
1074ENTRY(error_entry)
1075	cld
1076	SAVE_C_REGS 8
1077	SAVE_EXTRA_REGS 8
1078	ENCODE_FRAME_POINTER 8
1079	xorl	%ebx, %ebx
1080	testb	$3, CS+8(%rsp)
1081	jz	.Lerror_kernelspace
1082
 
1083	/*
1084	 * We entered from user mode or we're pretending to have entered
1085	 * from user mode due to an IRET fault.
1086	 */
1087	SWAPGS
1088
1089.Lerror_entry_from_usermode_after_swapgs:
1090	/*
1091	 * We need to tell lockdep that IRQs are off.  We can't do this until
1092	 * we fix gsbase, and we should do it before enter_from_user_mode
1093	 * (which can take locks).
1094	 */
1095	TRACE_IRQS_OFF
1096	CALL_enter_from_user_mode
1097	ret
1098
1099.Lerror_entry_done:
1100	TRACE_IRQS_OFF
1101	ret
1102
1103	/*
1104	 * There are two places in the kernel that can potentially fault with
1105	 * usergs. Handle them here.  B stepping K8s sometimes report a
1106	 * truncated RIP for IRET exceptions returning to compat mode. Check
1107	 * for these here too.
1108	 */
1109.Lerror_kernelspace:
1110	incl	%ebx
1111	leaq	native_irq_return_iret(%rip), %rcx
1112	cmpq	%rcx, RIP+8(%rsp)
1113	je	.Lerror_bad_iret
1114	movl	%ecx, %eax			/* zero extend */
1115	cmpq	%rax, RIP+8(%rsp)
1116	je	.Lbstep_iret
1117	cmpq	$.Lgs_change, RIP+8(%rsp)
1118	jne	.Lerror_entry_done
1119
1120	/*
1121	 * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
1122	 * gsbase and proceed.  We'll fix up the exception and land in
1123	 * .Lgs_change's error handler with kernel gsbase.
1124	 */
1125	SWAPGS
1126	jmp .Lerror_entry_done
1127
1128.Lbstep_iret:
1129	/* Fix truncated RIP */
1130	movq	%rcx, RIP+8(%rsp)
1131	/* fall through */
1132
1133.Lerror_bad_iret:
1134	/*
1135	 * We came from an IRET to user mode, so we have user gsbase.
1136	 * Switch to kernel gsbase:
1137	 */
1138	SWAPGS
1139
1140	/*
1141	 * Pretend that the exception came from user mode: set up pt_regs
1142	 * as if we faulted immediately after IRET and clear EBX so that
1143	 * error_exit knows that we will be returning to user mode.
1144	 */
1145	mov	%rsp, %rdi
1146	call	fixup_bad_iret
1147	mov	%rax, %rsp
1148	decl	%ebx
1149	jmp	.Lerror_entry_from_usermode_after_swapgs
1150END(error_entry)
1151
1152
1153/*
1154 * On entry, EBX is a "return to kernel mode" flag:
1155 *   1: already in kernel mode, don't need SWAPGS
1156 *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1157 */
1158ENTRY(error_exit)
1159	movl	%ebx, %eax
1160	DISABLE_INTERRUPTS(CLBR_NONE)
1161	TRACE_IRQS_OFF
1162	testl	%eax, %eax
1163	jnz	retint_kernel
1164	jmp	retint_user
1165END(error_exit)
1166
1167/* Runs on exception stack */
1168ENTRY(nmi)
1169	/*
1170	 * Fix up the exception frame if we're on Xen.
1171	 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1172	 * one value to the stack on native, so it may clobber the rdx
1173	 * scratch slot, but it won't clobber any of the important
1174	 * slots past it.
1175	 *
1176	 * Xen is a different story, because the Xen frame itself overlaps
1177	 * the "NMI executing" variable.
1178	 */
1179	PARAVIRT_ADJUST_EXCEPTION_FRAME
1180
1181	/*
1182	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1183	 * the iretq it performs will take us out of NMI context.
1184	 * This means that we can have nested NMIs where the next
1185	 * NMI is using the top of the stack of the previous NMI. We
1186	 * can't let it execute because the nested NMI will corrupt the
1187	 * stack of the previous NMI. NMI handlers are not re-entrant
1188	 * anyway.
1189	 *
1190	 * To handle this case we do the following:
1191	 *  Check the a special location on the stack that contains
1192	 *  a variable that is set when NMIs are executing.
1193	 *  The interrupted task's stack is also checked to see if it
1194	 *  is an NMI stack.
1195	 *  If the variable is not set and the stack is not the NMI
1196	 *  stack then:
1197	 *    o Set the special variable on the stack
1198	 *    o Copy the interrupt frame into an "outermost" location on the
1199	 *      stack
1200	 *    o Copy the interrupt frame into an "iret" location on the stack
1201	 *    o Continue processing the NMI
1202	 *  If the variable is set or the previous stack is the NMI stack:
1203	 *    o Modify the "iret" location to jump to the repeat_nmi
1204	 *    o return back to the first NMI
1205	 *
1206	 * Now on exit of the first NMI, we first clear the stack variable
1207	 * The NMI stack will tell any nested NMIs at that point that it is
1208	 * nested. Then we pop the stack normally with iret, and if there was
1209	 * a nested NMI that updated the copy interrupt stack frame, a
1210	 * jump will be made to the repeat_nmi code that will handle the second
1211	 * NMI.
1212	 *
1213	 * However, espfix prevents us from directly returning to userspace
1214	 * with a single IRET instruction.  Similarly, IRET to user mode
1215	 * can fault.  We therefore handle NMIs from user space like
1216	 * other IST entries.
1217	 */
1218
1219	/* Use %rdx as our temp variable throughout */
1220	pushq	%rdx
1221
1222	testb	$3, CS-RIP+8(%rsp)
1223	jz	.Lnmi_from_kernel
1224
1225	/*
1226	 * NMI from user mode.  We need to run on the thread stack, but we
1227	 * can't go through the normal entry paths: NMIs are masked, and
1228	 * we don't want to enable interrupts, because then we'll end
1229	 * up in an awkward situation in which IRQs are on but NMIs
1230	 * are off.
1231	 *
1232	 * We also must not push anything to the stack before switching
1233	 * stacks lest we corrupt the "NMI executing" variable.
1234	 */
1235
1236	SWAPGS_UNSAFE_STACK
1237	cld
1238	movq	%rsp, %rdx
1239	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1240	pushq	5*8(%rdx)	/* pt_regs->ss */
1241	pushq	4*8(%rdx)	/* pt_regs->rsp */
1242	pushq	3*8(%rdx)	/* pt_regs->flags */
1243	pushq	2*8(%rdx)	/* pt_regs->cs */
1244	pushq	1*8(%rdx)	/* pt_regs->rip */
1245	pushq   $-1		/* pt_regs->orig_ax */
1246	pushq   %rdi		/* pt_regs->di */
1247	pushq   %rsi		/* pt_regs->si */
1248	pushq   (%rdx)		/* pt_regs->dx */
1249	pushq   %rcx		/* pt_regs->cx */
1250	pushq   %rax		/* pt_regs->ax */
1251	pushq   %r8		/* pt_regs->r8 */
1252	pushq   %r9		/* pt_regs->r9 */
1253	pushq   %r10		/* pt_regs->r10 */
1254	pushq   %r11		/* pt_regs->r11 */
1255	pushq	%rbx		/* pt_regs->rbx */
1256	pushq	%rbp		/* pt_regs->rbp */
1257	pushq	%r12		/* pt_regs->r12 */
1258	pushq	%r13		/* pt_regs->r13 */
1259	pushq	%r14		/* pt_regs->r14 */
1260	pushq	%r15		/* pt_regs->r15 */
1261	ENCODE_FRAME_POINTER
1262
1263	/*
1264	 * At this point we no longer need to worry about stack damage
1265	 * due to nesting -- we're on the normal thread stack and we're
1266	 * done with the NMI stack.
1267	 */
1268
1269	movq	%rsp, %rdi
1270	movq	$-1, %rsi
1271	call	do_nmi
1272
1273	/*
1274	 * Return back to user mode.  We must *not* do the normal exit
1275	 * work, because we don't want to enable interrupts.
 
1276	 */
1277	SWAPGS
1278	jmp	restore_regs_and_iret
1279
1280.Lnmi_from_kernel:
1281	/*
1282	 * Here's what our stack frame will look like:
1283	 * +---------------------------------------------------------+
1284	 * | original SS                                             |
1285	 * | original Return RSP                                     |
1286	 * | original RFLAGS                                         |
1287	 * | original CS                                             |
1288	 * | original RIP                                            |
1289	 * +---------------------------------------------------------+
1290	 * | temp storage for rdx                                    |
1291	 * +---------------------------------------------------------+
1292	 * | "NMI executing" variable                                |
1293	 * +---------------------------------------------------------+
1294	 * | iret SS          } Copied from "outermost" frame        |
1295	 * | iret Return RSP  } on each loop iteration; overwritten  |
1296	 * | iret RFLAGS      } by a nested NMI to force another     |
1297	 * | iret CS          } iteration if needed.                 |
1298	 * | iret RIP         }                                      |
1299	 * +---------------------------------------------------------+
1300	 * | outermost SS          } initialized in first_nmi;       |
1301	 * | outermost Return RSP  } will not be changed before      |
1302	 * | outermost RFLAGS      } NMI processing is done.         |
1303	 * | outermost CS          } Copied to "iret" frame on each  |
1304	 * | outermost RIP         } iteration.                      |
1305	 * +---------------------------------------------------------+
1306	 * | pt_regs                                                 |
1307	 * +---------------------------------------------------------+
1308	 *
1309	 * The "original" frame is used by hardware.  Before re-enabling
1310	 * NMIs, we need to be done with it, and we need to leave enough
1311	 * space for the asm code here.
1312	 *
1313	 * We return by executing IRET while RSP points to the "iret" frame.
1314	 * That will either return for real or it will loop back into NMI
1315	 * processing.
1316	 *
1317	 * The "outermost" frame is copied to the "iret" frame on each
1318	 * iteration of the loop, so each iteration starts with the "iret"
1319	 * frame pointing to the final return target.
1320	 */
1321
1322	/*
1323	 * Determine whether we're a nested NMI.
1324	 *
1325	 * If we interrupted kernel code between repeat_nmi and
1326	 * end_repeat_nmi, then we are a nested NMI.  We must not
1327	 * modify the "iret" frame because it's being written by
1328	 * the outer NMI.  That's okay; the outer NMI handler is
1329	 * about to about to call do_nmi anyway, so we can just
1330	 * resume the outer NMI.
1331	 */
1332
1333	movq	$repeat_nmi, %rdx
1334	cmpq	8(%rsp), %rdx
1335	ja	1f
1336	movq	$end_repeat_nmi, %rdx
1337	cmpq	8(%rsp), %rdx
1338	ja	nested_nmi_out
13391:
1340
1341	/*
1342	 * Now check "NMI executing".  If it's set, then we're nested.
1343	 * This will not detect if we interrupted an outer NMI just
1344	 * before IRET.
1345	 */
1346	cmpl	$1, -8(%rsp)
1347	je	nested_nmi
1348
1349	/*
1350	 * Now test if the previous stack was an NMI stack.  This covers
1351	 * the case where we interrupt an outer NMI after it clears
1352	 * "NMI executing" but before IRET.  We need to be careful, though:
1353	 * there is one case in which RSP could point to the NMI stack
1354	 * despite there being no NMI active: naughty userspace controls
1355	 * RSP at the very beginning of the SYSCALL targets.  We can
1356	 * pull a fast one on naughty userspace, though: we program
1357	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1358	 * if it controls the kernel's RSP.  We set DF before we clear
1359	 * "NMI executing".
1360	 */
1361	lea	6*8(%rsp), %rdx
1362	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1363	cmpq	%rdx, 4*8(%rsp)
1364	/* If the stack pointer is above the NMI stack, this is a normal NMI */
1365	ja	first_nmi
1366
1367	subq	$EXCEPTION_STKSZ, %rdx
1368	cmpq	%rdx, 4*8(%rsp)
1369	/* If it is below the NMI stack, it is a normal NMI */
1370	jb	first_nmi
1371
1372	/* Ah, it is within the NMI stack. */
1373
1374	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1375	jz	first_nmi	/* RSP was user controlled. */
1376
1377	/* This is a nested NMI. */
1378
1379nested_nmi:
1380	/*
1381	 * Modify the "iret" frame to point to repeat_nmi, forcing another
1382	 * iteration of NMI handling.
1383	 */
1384	subq	$8, %rsp
1385	leaq	-10*8(%rsp), %rdx
1386	pushq	$__KERNEL_DS
1387	pushq	%rdx
1388	pushfq
1389	pushq	$__KERNEL_CS
1390	pushq	$repeat_nmi
1391
1392	/* Put stack back */
1393	addq	$(6*8), %rsp
1394
1395nested_nmi_out:
1396	popq	%rdx
1397
1398	/* We are returning to kernel mode, so this cannot result in a fault. */
1399	INTERRUPT_RETURN
1400
1401first_nmi:
1402	/* Restore rdx. */
1403	movq	(%rsp), %rdx
1404
1405	/* Make room for "NMI executing". */
1406	pushq	$0
1407
1408	/* Leave room for the "iret" frame */
1409	subq	$(5*8), %rsp
1410
1411	/* Copy the "original" frame to the "outermost" frame */
1412	.rept 5
1413	pushq	11*8(%rsp)
1414	.endr
1415
1416	/* Everything up to here is safe from nested NMIs */
1417
1418#ifdef CONFIG_DEBUG_ENTRY
1419	/*
1420	 * For ease of testing, unmask NMIs right away.  Disabled by
1421	 * default because IRET is very expensive.
1422	 */
1423	pushq	$0		/* SS */
1424	pushq	%rsp		/* RSP (minus 8 because of the previous push) */
1425	addq	$8, (%rsp)	/* Fix up RSP */
1426	pushfq			/* RFLAGS */
1427	pushq	$__KERNEL_CS	/* CS */
1428	pushq	$1f		/* RIP */
1429	INTERRUPT_RETURN	/* continues at repeat_nmi below */
14301:
1431#endif
1432
1433repeat_nmi:
1434	/*
1435	 * If there was a nested NMI, the first NMI's iret will return
1436	 * here. But NMIs are still enabled and we can take another
1437	 * nested NMI. The nested NMI checks the interrupted RIP to see
1438	 * if it is between repeat_nmi and end_repeat_nmi, and if so
1439	 * it will just return, as we are about to repeat an NMI anyway.
1440	 * This makes it safe to copy to the stack frame that a nested
1441	 * NMI will update.
1442	 *
1443	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
1444	 * we're repeating an NMI, gsbase has the same value that it had on
1445	 * the first iteration.  paranoid_entry will load the kernel
1446	 * gsbase if needed before we call do_nmi.  "NMI executing"
1447	 * is zero.
1448	 */
1449	movq	$1, 10*8(%rsp)		/* Set "NMI executing". */
1450
1451	/*
1452	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
1453	 * here must not modify the "iret" frame while we're writing to
1454	 * it or it will end up containing garbage.
1455	 */
1456	addq	$(10*8), %rsp
1457	.rept 5
1458	pushq	-6*8(%rsp)
1459	.endr
1460	subq	$(5*8), %rsp
1461end_repeat_nmi:
1462
1463	/*
1464	 * Everything below this point can be preempted by a nested NMI.
1465	 * If this happens, then the inner NMI will change the "iret"
1466	 * frame to point back to repeat_nmi.
1467	 */
1468	pushq	$-1				/* ORIG_RAX: no syscall to restart */
1469	ALLOC_PT_GPREGS_ON_STACK
1470
1471	/*
1472	 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1473	 * as we should not be calling schedule in NMI context.
1474	 * Even with normal interrupts enabled. An NMI should not be
1475	 * setting NEED_RESCHED or anything that normal interrupts and
1476	 * exceptions might do.
1477	 */
1478	call	paranoid_entry
1479
1480	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1481	movq	%rsp, %rdi
1482	movq	$-1, %rsi
1483	call	do_nmi
1484
1485	testl	%ebx, %ebx			/* swapgs needed? */
1486	jnz	nmi_restore
1487nmi_swapgs:
1488	SWAPGS_UNSAFE_STACK
1489nmi_restore:
1490	RESTORE_EXTRA_REGS
1491	RESTORE_C_REGS
1492
1493	/* Point RSP at the "iret" frame. */
1494	REMOVE_PT_GPREGS_FROM_STACK 6*8
1495
1496	/*
1497	 * Clear "NMI executing".  Set DF first so that we can easily
1498	 * distinguish the remaining code between here and IRET from
1499	 * the SYSCALL entry and exit paths.  On a native kernel, we
1500	 * could just inspect RIP, but, on paravirt kernels,
1501	 * INTERRUPT_RETURN can translate into a jump into a
1502	 * hypercall page.
1503	 */
1504	std
1505	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
1506
1507	/*
1508	 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1509	 * stack in a single instruction.  We are returning to kernel
1510	 * mode, so this cannot result in a fault.
1511	 */
1512	INTERRUPT_RETURN
1513END(nmi)
1514
1515ENTRY(ignore_sysret)
1516	mov	$-ENOSYS, %eax
1517	sysret
1518END(ignore_sysret)
1519
1520ENTRY(rewind_stack_do_exit)
1521	/* Prevent any naive code from trying to unwind to our caller. */
1522	xorl	%ebp, %ebp
1523
1524	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rax
1525	leaq	-TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
1526
1527	call	do_exit
15281:	jmp 1b
1529END(rewind_stack_do_exit)