Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *  Copyright (C) 1991,1992  Linus Torvalds
   3 *
   4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
   5 *
   6 * Stack layout while running C code:
   7 *	ptrace needs to have all registers on the stack.
   8 *	If the order here is changed, it needs to be
   9 *	updated in fork.c:copy_process(), signal.c:do_signal(),
  10 *	ptrace.c and ptrace.h
  11 *
  12 *	 0(%esp) - %ebx
  13 *	 4(%esp) - %ecx
  14 *	 8(%esp) - %edx
  15 *	 C(%esp) - %esi
  16 *	10(%esp) - %edi
  17 *	14(%esp) - %ebp
  18 *	18(%esp) - %eax
  19 *	1C(%esp) - %ds
  20 *	20(%esp) - %es
  21 *	24(%esp) - %fs
  22 *	28(%esp) - %gs		saved iff !CONFIG_X86_32_LAZY_GS
  23 *	2C(%esp) - orig_eax
  24 *	30(%esp) - %eip
  25 *	34(%esp) - %cs
  26 *	38(%esp) - %eflags
  27 *	3C(%esp) - %oldesp
  28 *	40(%esp) - %oldss
  29 */
  30
  31#include <linux/linkage.h>
  32#include <linux/err.h>
  33#include <asm/thread_info.h>
  34#include <asm/irqflags.h>
  35#include <asm/errno.h>
  36#include <asm/segment.h>
  37#include <asm/smp.h>
  38#include <asm/page_types.h>
  39#include <asm/percpu.h>
  40#include <asm/processor-flags.h>
  41#include <asm/ftrace.h>
  42#include <asm/irq_vectors.h>
  43#include <asm/cpufeatures.h>
  44#include <asm/alternative-asm.h>
  45#include <asm/asm.h>
  46#include <asm/smap.h>
 
 
 
 
 
  47
  48	.section .entry.text, "ax"
  49
  50/*
  51 * We use macros for low-level operations which need to be overridden
  52 * for paravirtualization.  The following will never clobber any registers:
  53 *   INTERRUPT_RETURN (aka. "iret")
  54 *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  55 *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  56 *
  57 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  58 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  59 * Allowing a register to be clobbered can shrink the paravirt replacement
  60 * enough to patch inline, increasing performance.
  61 */
  62
  63#ifdef CONFIG_PREEMPT
  64# define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  65#else
  66# define preempt_stop(clobbers)
  67# define resume_kernel		restore_all
  68#endif
  69
  70.macro TRACE_IRQS_IRET
  71#ifdef CONFIG_TRACE_IRQFLAGS
  72	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
  73	jz	1f
  74	TRACE_IRQS_ON
  751:
 
 
 
 
 
 
 
 
 
 
 
 
  76#endif
  77.endm
  78
  79/*
  80 * User gs save/restore
  81 *
  82 * %gs is used for userland TLS and kernel only uses it for stack
  83 * canary which is required to be at %gs:20 by gcc.  Read the comment
  84 * at the top of stackprotector.h for more info.
  85 *
  86 * Local labels 98 and 99 are used.
  87 */
  88#ifdef CONFIG_X86_32_LAZY_GS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89
  90 /* unfortunately push/pop can't be no-op */
  91.macro PUSH_GS
  92	pushl	$0
  93.endm
  94.macro POP_GS pop=0
  95	addl	$(4 + \pop), %esp
  96.endm
  97.macro POP_GS_EX
  98.endm
  99
 100 /* all the rest are no-op */
 101.macro PTGS_TO_GS
 102.endm
 103.macro PTGS_TO_GS_EX
 104.endm
 105.macro GS_TO_REG reg
 106.endm
 107.macro REG_TO_PTGS reg
 108.endm
 109.macro SET_KERNEL_GS reg
 110.endm
 111
 112#else	/* CONFIG_X86_32_LAZY_GS */
 113
 114.macro PUSH_GS
 115	pushl	%gs
 116.endm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117
 118.macro POP_GS pop=0
 11998:	popl	%gs
 120  .if \pop <> 0
 121	add	$\pop, %esp
 122  .endif
 123.endm
 124.macro POP_GS_EX
 125.pushsection .fixup, "ax"
 12699:	movl	$0, (%esp)
 127	jmp	98b
 128.popsection
 129	_ASM_EXTABLE(98b, 99b)
 130.endm
 131
 132.macro PTGS_TO_GS
 13398:	mov	PT_GS(%esp), %gs
 134.endm
 135.macro PTGS_TO_GS_EX
 136.pushsection .fixup, "ax"
 13799:	movl	$0, PT_GS(%esp)
 138	jmp	98b
 139.popsection
 140	_ASM_EXTABLE(98b, 99b)
 141.endm
 142
 143.macro GS_TO_REG reg
 144	movl	%gs, \reg
 145.endm
 146.macro REG_TO_PTGS reg
 147	movl	\reg, PT_GS(%esp)
 148.endm
 149.macro SET_KERNEL_GS reg
 150	movl	$(__KERNEL_STACK_CANARY), \reg
 151	movl	\reg, %gs
 152.endm
 
 
 
 
 
 153
 154#endif /* CONFIG_X86_32_LAZY_GS */
 
 155
 156.macro SAVE_ALL pt_regs_ax=%eax
 
 
 
 
 
 
 
 
 
 157	cld
 158	PUSH_GS
 
 
 159	pushl	%fs
 
 
 
 
 
 
 
 
 
 
 160	pushl	%es
 161	pushl	%ds
 162	pushl	\pt_regs_ax
 163	pushl	%ebp
 164	pushl	%edi
 165	pushl	%esi
 166	pushl	%edx
 167	pushl	%ecx
 168	pushl	%ebx
 169	movl	$(__USER_DS), %edx
 170	movl	%edx, %ds
 171	movl	%edx, %es
 172	movl	$(__KERNEL_PERCPU), %edx
 173	movl	%edx, %fs
 174	SET_KERNEL_GS %edx
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 175.endm
 176
 177.macro RESTORE_INT_REGS
 178	popl	%ebx
 179	popl	%ecx
 180	popl	%edx
 181	popl	%esi
 182	popl	%edi
 183	popl	%ebp
 184	popl	%eax
 185.endm
 186
 187.macro RESTORE_REGS pop=0
 188	RESTORE_INT_REGS
 1891:	popl	%ds
 1902:	popl	%es
 1913:	popl	%fs
 192	POP_GS \pop
 
 193.pushsection .fixup, "ax"
 1944:	movl	$0, (%esp)
 195	jmp	1b
 1965:	movl	$0, (%esp)
 197	jmp	2b
 1986:	movl	$0, (%esp)
 199	jmp	3b
 200.popsection
 201	_ASM_EXTABLE(1b, 4b)
 202	_ASM_EXTABLE(2b, 5b)
 203	_ASM_EXTABLE(3b, 6b)
 204	POP_GS_EX
 205.endm
 206
 207ENTRY(ret_from_fork)
 208	pushl	%eax
 209	call	schedule_tail
 210	GET_THREAD_INFO(%ebp)
 211	popl	%eax
 212	pushl	$0x0202				# Reset kernel eflags
 213	popfl
 
 214
 215	/* When we fork, we trace the syscall return in the child, too. */
 216	movl    %esp, %eax
 217	call    syscall_return_slowpath
 218	jmp     restore_all
 219END(ret_from_fork)
 220
 221ENTRY(ret_from_kernel_thread)
 222	pushl	%eax
 223	call	schedule_tail
 224	GET_THREAD_INFO(%ebp)
 225	popl	%eax
 226	pushl	$0x0202				# Reset kernel eflags
 227	popfl
 228	movl	PT_EBP(%esp), %eax
 229	call	*PT_EBX(%esp)
 230	movl	$0, PT_EAX(%esp)
 231
 
 
 
 
 
 
 
 
 232	/*
 233	 * Kernel threads return to userspace as if returning from a syscall.
 234	 * We should check whether anything actually uses this path and, if so,
 235	 * consider switching it over to ret_from_fork.
 236	 */
 237	movl    %esp, %eax
 238	call    syscall_return_slowpath
 239	jmp     restore_all
 240ENDPROC(ret_from_kernel_thread)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241
 242/*
 243 * Return to user mode is not as complex as all this looks,
 244 * but we want the default path for a system call return to
 245 * go as quickly as possible which is why some of this is
 246 * less clear than it otherwise should be.
 
 
 
 
 247 */
 248
 249	# userspace resumption stub bypassing syscall exit tracing
 250	ALIGN
 251ret_from_exception:
 252	preempt_stop(CLBR_ANY)
 253ret_from_intr:
 254	GET_THREAD_INFO(%ebp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255#ifdef CONFIG_VM86
 256	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
 257	movb	PT_CS(%esp), %al
 258	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
 259#else
 
 
 
 
 
 
 
 
 
 
 
 
 
 260	/*
 261	 * We can be coming here from child spawned by kernel_thread().
 
 262	 */
 263	movl	PT_CS(%esp), %eax
 264	andl	$SEGMENT_RPL_MASK, %eax
 265#endif
 266	cmpl	$USER_RPL, %eax
 267	jb	resume_kernel			# not returning to v8086 or userspace
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 268
 269ENTRY(resume_userspace)
 270	DISABLE_INTERRUPTS(CLBR_ANY)
 271	TRACE_IRQS_OFF
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 272	movl	%esp, %eax
 273	call	prepare_exit_to_usermode
 274	jmp	restore_all
 275END(ret_from_exception)
 276
 277#ifdef CONFIG_PREEMPT
 278ENTRY(resume_kernel)
 279	DISABLE_INTERRUPTS(CLBR_ANY)
 280need_resched:
 281	cmpl	$0, PER_CPU_VAR(__preempt_count)
 282	jnz	restore_all
 283	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
 284	jz	restore_all
 285	call	preempt_schedule_irq
 286	jmp	need_resched
 287END(resume_kernel)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288#endif
 289
 290GLOBAL(__begin_SYSENTER_singlestep_region)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291/*
 292 * All code from here through __end_SYSENTER_singlestep_region is subject
 293 * to being single-stepped if a user program sets TF and executes SYSENTER.
 294 * There is absolutely nothing that we can do to prevent this from happening
 295 * (thanks Intel!).  To keep our handling of this situation as simple as
 296 * possible, we handle TF just like AC and NT, except that our #DB handler
 297 * will ignore all of the single-step traps generated in this range.
 298 */
 299
 300#ifdef CONFIG_XEN
 301/*
 302 * Xen doesn't set %esp to be precisely what the normal SYSENTER
 303 * entry point expects, so fix it up before using the normal path.
 304 */
 305ENTRY(xen_sysenter_target)
 306	addl	$5*4, %esp			/* remove xen-provided frame */
 307	jmp	sysenter_past_esp
 308#endif
 309
 310/*
 311 * 32-bit SYSENTER entry.
 312 *
 313 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 314 * if X86_FEATURE_SEP is available.  This is the preferred system call
 315 * entry on 32-bit systems.
 316 *
 317 * The SYSENTER instruction, in principle, should *only* occur in the
 318 * vDSO.  In practice, a small number of Android devices were shipped
 319 * with a copy of Bionic that inlined a SYSENTER instruction.  This
 320 * never happened in any of Google's Bionic versions -- it only happened
 321 * in a narrow range of Intel-provided versions.
 322 *
 323 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
 324 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
 325 * SYSENTER does not save anything on the stack,
 326 * and does not save old EIP (!!!), ESP, or EFLAGS.
 327 *
 328 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
 329 * user and/or vm86 state), we explicitly disable the SYSENTER
 330 * instruction in vm86 mode by reprogramming the MSRs.
 331 *
 332 * Arguments:
 333 * eax  system call number
 334 * ebx  arg1
 335 * ecx  arg2
 336 * edx  arg3
 337 * esi  arg4
 338 * edi  arg5
 339 * ebp  user stack
 340 * 0(%ebp) arg6
 341 */
 342ENTRY(entry_SYSENTER_32)
 343	movl	TSS_sysenter_sp0(%esp), %esp
 344sysenter_past_esp:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 345	pushl	$__USER_DS		/* pt_regs->ss */
 346	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
 347	pushfl				/* pt_regs->flags (except IF = 0) */
 348	orl	$X86_EFLAGS_IF, (%esp)	/* Fix IF */
 349	pushl	$__USER_CS		/* pt_regs->cs */
 350	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
 351	pushl	%eax			/* pt_regs->orig_ax */
 352	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
 353
 354	/*
 355	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
 356	 * and TF ourselves.  To save a few cycles, we can check whether
 357	 * either was set instead of doing an unconditional popfq.
 358	 * This needs to happen before enabling interrupts so that
 359	 * we don't get preempted with NT set.
 360	 *
 361	 * If TF is set, we will single-step all the way to here -- do_debug
 362	 * will ignore all the traps.  (Yes, this is slow, but so is
 363	 * single-stepping in general.  This allows us to avoid having
 364	 * a more complicated code to handle the case where a user program
 365	 * forces us to single-step through the SYSENTER entry code.)
 366	 *
 367	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
 368	 * out-of-line as an optimization: NT is unlikely to be set in the
 369	 * majority of the cases and instead of polluting the I$ unnecessarily,
 370	 * we're keeping that code behind a branch which will predict as
 371	 * not-taken and therefore its instructions won't be fetched.
 372	 */
 373	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
 374	jnz	.Lsysenter_fix_flags
 375.Lsysenter_flags_fixed:
 376
 
 
 
 
 
 
 
 
 
 377	/*
 378	 * User mode is traced as though IRQs are on, and SYSENTER
 379	 * turned them off.
 380	 */
 381	TRACE_IRQS_OFF
 382
 383	movl	%esp, %eax
 384	call	do_fast_syscall_32
 385	/* XEN PV guests always use IRET path */
 386	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
 387		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
 
 
 
 
 388
 389/* Opportunistic SYSEXIT */
 390	TRACE_IRQS_ON			/* User mode traces as IRQs on. */
 391	movl	PT_EIP(%esp), %edx	/* pt_regs->ip */
 392	movl	PT_OLDESP(%esp), %ecx	/* pt_regs->sp */
 3931:	mov	PT_FS(%esp), %fs
 394	PTGS_TO_GS
 395	popl	%ebx			/* pt_regs->bx */
 396	addl	$2*4, %esp		/* skip pt_regs->cx and pt_regs->dx */
 397	popl	%esi			/* pt_regs->si */
 398	popl	%edi			/* pt_regs->di */
 399	popl	%ebp			/* pt_regs->bp */
 400	popl	%eax			/* pt_regs->ax */
 
 
 
 
 
 401
 402	/*
 403	 * Restore all flags except IF. (We restore IF separately because
 404	 * STI gives a one-instruction window in which we won't be interrupted,
 405	 * whereas POPF does not.)
 406	 */
 407	addl	$PT_EFLAGS-PT_DS, %esp	/* point esp at pt_regs->flags */
 408	btr	$X86_EFLAGS_IF_BIT, (%esp)
 409	popfl
 
 410
 411	/*
 412	 * Return back to the vDSO, which will pop ecx and edx.
 413	 * Don't bother with DS and ES (they already contain __USER_DS).
 414	 */
 415	sti
 416	sysexit
 417
 418.pushsection .fixup, "ax"
 4192:	movl	$0, PT_FS(%esp)
 420	jmp	1b
 421.popsection
 422	_ASM_EXTABLE(1b, 2b)
 423	PTGS_TO_GS_EX
 424
 425.Lsysenter_fix_flags:
 426	pushl	$X86_EFLAGS_FIXED
 427	popfl
 428	jmp	.Lsysenter_flags_fixed
 429GLOBAL(__end_SYSENTER_singlestep_region)
 430ENDPROC(entry_SYSENTER_32)
 431
 432/*
 433 * 32-bit legacy system call entry.
 434 *
 435 * 32-bit x86 Linux system calls traditionally used the INT $0x80
 436 * instruction.  INT $0x80 lands here.
 437 *
 438 * This entry point can be used by any 32-bit perform system calls.
 439 * Instances of INT $0x80 can be found inline in various programs and
 440 * libraries.  It is also used by the vDSO's __kernel_vsyscall
 441 * fallback for hardware that doesn't support a faster entry method.
 442 * Restarted 32-bit system calls also fall back to INT $0x80
 443 * regardless of what instruction was originally used to do the system
 444 * call.  (64-bit programs can use INT $0x80 as well, but they can
 445 * only run on 64-bit kernels and therefore land in
 446 * entry_INT80_compat.)
 447 *
 448 * This is considered a slow path.  It is not used by most libc
 449 * implementations on modern hardware except during process startup.
 450 *
 451 * Arguments:
 452 * eax  system call number
 453 * ebx  arg1
 454 * ecx  arg2
 455 * edx  arg3
 456 * esi  arg4
 457 * edi  arg5
 458 * ebp  arg6
 459 */
 460ENTRY(entry_INT80_32)
 461	ASM_CLAC
 462	pushl	%eax			/* pt_regs->orig_ax */
 463	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest */
 464
 465	/*
 466	 * User mode is traced as though IRQs are on, and the interrupt gate
 467	 * turned them off.
 468	 */
 469	TRACE_IRQS_OFF
 470
 471	movl	%esp, %eax
 472	call	do_int80_syscall_32
 473.Lsyscall_32_done:
 
 474
 475restore_all:
 476	TRACE_IRQS_IRET
 477restore_all_notrace:
 478#ifdef CONFIG_X86_ESPFIX32
 479	ALTERNATIVE	"jmp restore_nocheck", "", X86_BUG_ESPFIX
 480
 481	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
 
 
 
 
 
 
 
 482	/*
 483	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
 484	 * are returning to the kernel.
 485	 * See comments in process.c:copy_thread() for details.
 486	 */
 487	movb	PT_OLDSS(%esp), %ah
 488	movb	PT_CS(%esp), %al
 489	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 490	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
 491	je ldt_ss				# returning to user-space with LDT SS
 492#endif
 493restore_nocheck:
 494	RESTORE_REGS 4				# skip orig_eax/error_code
 495irq_return:
 496	INTERRUPT_RETURN
 497.section .fixup, "ax"
 498ENTRY(iret_exc	)
 499	pushl	$0				# no error code
 500	pushl	$do_iret_error
 501	jmp	error_code
 502.previous
 503	_ASM_EXTABLE(irq_return, iret_exc)
 504
 505#ifdef CONFIG_X86_ESPFIX32
 506ldt_ss:
 507/*
 508 * Setup and switch to ESPFIX stack
 509 *
 510 * We're returning to userspace with a 16 bit stack. The CPU will not
 511 * restore the high word of ESP for us on executing iret... This is an
 512 * "official" bug of all the x86-compatible CPUs, which we can work
 513 * around to make dosemu and wine happy. We do this by preloading the
 514 * high word of ESP with the high word of the userspace ESP while
 515 * compensating for the offset by changing to the ESPFIX segment with
 516 * a base address that matches for the difference.
 517 */
 518#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
 519	mov	%esp, %edx			/* load kernel esp */
 520	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
 521	mov	%dx, %ax			/* eax: new kernel esp */
 522	sub	%eax, %edx			/* offset (low word is 0) */
 523	shr	$16, %edx
 524	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
 525	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
 526	pushl	$__ESPFIX_SS
 527	pushl	%eax				/* new kernel esp */
 528	/*
 529	 * Disable interrupts, but do not irqtrace this section: we
 530	 * will soon execute iret and the tracer was already set to
 531	 * the irqstate after the IRET:
 
 532	 */
 533	DISABLE_INTERRUPTS(CLBR_EAX)
 534	lss	(%esp), %esp			/* switch to espfix segment */
 535	jmp	restore_nocheck
 536#endif
 537ENDPROC(entry_INT80_32)
 
 
 
 
 
 538
 539.macro FIXUP_ESPFIX_STACK
 540/*
 541 * Switch back for ESPFIX stack to the normal zerobased stack
 542 *
 543 * We can't call C functions using the ESPFIX stack. This code reads
 544 * the high word of the segment base from the GDT and swiches to the
 545 * normal stack and adjusts ESP with the matching offset.
 
 
 
 
 546 */
 547#ifdef CONFIG_X86_ESPFIX32
 548	/* fixup the stack */
 549	mov	GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
 550	mov	GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
 
 
 
 
 
 
 
 
 551	shl	$16, %eax
 
 
 552	addl	%esp, %eax			/* the adjusted stack pointer */
 553	pushl	$__KERNEL_DS
 554	pushl	%eax
 555	lss	(%esp), %esp			/* switch to the normal stack segment */
 556#endif
 557.endm
 
 558.macro UNWIND_ESPFIX_STACK
 
 559#ifdef CONFIG_X86_ESPFIX32
 560	movl	%ss, %eax
 561	/* see if on espfix stack */
 562	cmpw	$__ESPFIX_SS, %ax
 563	jne	27f
 564	movl	$__KERNEL_DS, %eax
 565	movl	%eax, %ds
 566	movl	%eax, %es
 567	/* switch to normal stack */
 568	FIXUP_ESPFIX_STACK
 56927:
 570#endif
 571.endm
 572
 573/*
 574 * Build the entry stubs with some assembler magic.
 575 * We pack 1 stub into every 8-byte block.
 576 */
 577	.align 8
 578ENTRY(irq_entries_start)
 579    vector=FIRST_EXTERNAL_VECTOR
 580    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
 581	pushl	$(~vector+0x80)			/* Note: always in signed byte range */
 582    vector=vector+1
 583	jmp	common_interrupt
 584	.align	8
 585    .endr
 586END(irq_entries_start)
 587
 588/*
 589 * the CPU automatically disables interrupts when executing an IRQ vector,
 590 * so IRQ-flags tracing has to follow that:
 591 */
 592	.p2align CONFIG_X86_L1_CACHE_SHIFT
 593common_interrupt:
 594	ASM_CLAC
 595	addl	$-0x80, (%esp)			/* Adjust vector into the [-256, -1] range */
 596	SAVE_ALL
 597	TRACE_IRQS_OFF
 598	movl	%esp, %eax
 599	call	do_IRQ
 600	jmp	ret_from_intr
 601ENDPROC(common_interrupt)
 602
 603#define BUILD_INTERRUPT3(name, nr, fn)	\
 604ENTRY(name)				\
 605	ASM_CLAC;			\
 606	pushl	$~(nr);			\
 607	SAVE_ALL;			\
 608	TRACE_IRQS_OFF			\
 609	movl	%esp, %eax;		\
 610	call	fn;			\
 611	jmp	ret_from_intr;		\
 612ENDPROC(name)
 613
 614
 615#ifdef CONFIG_TRACING
 616# define TRACE_BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
 617#else
 618# define TRACE_BUILD_INTERRUPT(name, nr)
 619#endif
 620
 621#define BUILD_INTERRUPT(name, nr)		\
 622	BUILD_INTERRUPT3(name, nr, smp_##name);	\
 623	TRACE_BUILD_INTERRUPT(name, nr)
 624
 625/* The include is where all of the SMP etc. interrupts come from */
 626#include <asm/entry_arch.h>
 
 627
 628ENTRY(coprocessor_error)
 629	ASM_CLAC
 630	pushl	$0
 631	pushl	$do_coprocessor_error
 632	jmp	error_code
 633END(coprocessor_error)
 634
 635ENTRY(simd_coprocessor_error)
 636	ASM_CLAC
 637	pushl	$0
 638#ifdef CONFIG_X86_INVD_BUG
 639	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
 640	ALTERNATIVE "pushl	$do_general_protection",	\
 641		    "pushl	$do_simd_coprocessor_error",	\
 642		    X86_FEATURE_XMM
 643#else
 644	pushl	$do_simd_coprocessor_error
 645#endif
 646	jmp	error_code
 647END(simd_coprocessor_error)
 648
 649ENTRY(device_not_available)
 650	ASM_CLAC
 651	pushl	$-1				# mark this as an int
 652	pushl	$do_device_not_available
 653	jmp	error_code
 654END(device_not_available)
 655
 656#ifdef CONFIG_PARAVIRT
 657ENTRY(native_iret)
 658	iret
 659	_ASM_EXTABLE(native_iret, iret_exc)
 660END(native_iret)
 661#endif
 662
 663ENTRY(overflow)
 664	ASM_CLAC
 665	pushl	$0
 666	pushl	$do_overflow
 667	jmp	error_code
 668END(overflow)
 669
 670ENTRY(bounds)
 671	ASM_CLAC
 672	pushl	$0
 673	pushl	$do_bounds
 674	jmp	error_code
 675END(bounds)
 676
 677ENTRY(invalid_op)
 678	ASM_CLAC
 679	pushl	$0
 680	pushl	$do_invalid_op
 681	jmp	error_code
 682END(invalid_op)
 683
 684ENTRY(coprocessor_segment_overrun)
 685	ASM_CLAC
 686	pushl	$0
 687	pushl	$do_coprocessor_segment_overrun
 688	jmp	error_code
 689END(coprocessor_segment_overrun)
 690
 691ENTRY(invalid_TSS)
 692	ASM_CLAC
 693	pushl	$do_invalid_TSS
 694	jmp	error_code
 695END(invalid_TSS)
 696
 697ENTRY(segment_not_present)
 698	ASM_CLAC
 699	pushl	$do_segment_not_present
 700	jmp	error_code
 701END(segment_not_present)
 702
 703ENTRY(stack_segment)
 704	ASM_CLAC
 705	pushl	$do_stack_segment
 706	jmp	error_code
 707END(stack_segment)
 708
 709ENTRY(alignment_check)
 710	ASM_CLAC
 711	pushl	$do_alignment_check
 712	jmp	error_code
 713END(alignment_check)
 714
 715ENTRY(divide_error)
 716	ASM_CLAC
 717	pushl	$0				# no error code
 718	pushl	$do_divide_error
 719	jmp	error_code
 720END(divide_error)
 721
 722#ifdef CONFIG_X86_MCE
 723ENTRY(machine_check)
 724	ASM_CLAC
 725	pushl	$0
 726	pushl	machine_check_vector
 727	jmp	error_code
 728END(machine_check)
 729#endif
 730
 731ENTRY(spurious_interrupt_bug)
 732	ASM_CLAC
 733	pushl	$0
 734	pushl	$do_spurious_interrupt_bug
 735	jmp	error_code
 736END(spurious_interrupt_bug)
 737
 738#ifdef CONFIG_XEN
 739ENTRY(xen_hypervisor_callback)
 740	pushl	$-1				/* orig_ax = -1 => not a system call */
 741	SAVE_ALL
 742	TRACE_IRQS_OFF
 743
 744	/*
 745	 * Check to see if we got the event in the critical
 746	 * region in xen_iret_direct, after we've reenabled
 747	 * events and checked for pending events.  This simulates
 748	 * iret instruction's behaviour where it delivers a
 749	 * pending interrupt when enabling interrupts:
 750	 */
 751	movl	PT_EIP(%esp), %eax
 752	cmpl	$xen_iret_start_crit, %eax
 753	jb	1f
 754	cmpl	$xen_iret_end_crit, %eax
 755	jae	1f
 756
 757	jmp	xen_iret_crit_fixup
 758
 759ENTRY(xen_do_upcall)
 7601:	mov	%esp, %eax
 761	call	xen_evtchn_do_upcall
 762#ifndef CONFIG_PREEMPT
 763	call	xen_maybe_preempt_hcall
 764#endif
 765	jmp	ret_from_intr
 766ENDPROC(xen_hypervisor_callback)
 767
 768/*
 769 * Hypervisor uses this for application faults while it executes.
 770 * We get here for two reasons:
 771 *  1. Fault while reloading DS, ES, FS or GS
 772 *  2. Fault while executing IRET
 773 * Category 1 we fix up by reattempting the load, and zeroing the segment
 774 * register if the load fails.
 775 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
 776 * normal Linux return path in this case because if we use the IRET hypercall
 777 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 778 * We distinguish between categories by maintaining a status value in EAX.
 779 */
 780ENTRY(xen_failsafe_callback)
 781	pushl	%eax
 782	movl	$1, %eax
 7831:	mov	4(%esp), %ds
 7842:	mov	8(%esp), %es
 7853:	mov	12(%esp), %fs
 7864:	mov	16(%esp), %gs
 787	/* EAX == 0 => Category 1 (Bad segment)
 788	   EAX != 0 => Category 2 (Bad IRET) */
 789	testl	%eax, %eax
 790	popl	%eax
 791	lea	16(%esp), %esp
 792	jz	5f
 793	jmp	iret_exc
 7945:	pushl	$-1				/* orig_ax = -1 => not a system call */
 795	SAVE_ALL
 796	jmp	ret_from_exception
 797
 798.section .fixup, "ax"
 7996:	xorl	%eax, %eax
 800	movl	%eax, 4(%esp)
 801	jmp	1b
 8027:	xorl	%eax, %eax
 803	movl	%eax, 8(%esp)
 804	jmp	2b
 8058:	xorl	%eax, %eax
 806	movl	%eax, 12(%esp)
 807	jmp	3b
 8089:	xorl	%eax, %eax
 809	movl	%eax, 16(%esp)
 810	jmp	4b
 811.previous
 812	_ASM_EXTABLE(1b, 6b)
 813	_ASM_EXTABLE(2b, 7b)
 814	_ASM_EXTABLE(3b, 8b)
 815	_ASM_EXTABLE(4b, 9b)
 816ENDPROC(xen_failsafe_callback)
 817
 818BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
 819		xen_evtchn_do_upcall)
 820
 821#endif /* CONFIG_XEN */
 822
 823#if IS_ENABLED(CONFIG_HYPERV)
 824
 825BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
 826	hyperv_vector_handler)
 827
 828#endif /* CONFIG_HYPERV */
 829
 830#ifdef CONFIG_FUNCTION_TRACER
 831#ifdef CONFIG_DYNAMIC_FTRACE
 832
 833ENTRY(mcount)
 834	ret
 835END(mcount)
 836
 837ENTRY(ftrace_caller)
 838	pushl	%eax
 839	pushl	%ecx
 840	pushl	%edx
 841	pushl	$0				/* Pass NULL as regs pointer */
 842	movl	4*4(%esp), %eax
 843	movl	0x4(%ebp), %edx
 844	movl	function_trace_op, %ecx
 845	subl	$MCOUNT_INSN_SIZE, %eax
 846
 847.globl ftrace_call
 848ftrace_call:
 849	call	ftrace_stub
 850
 851	addl	$4, %esp			/* skip NULL pointer */
 852	popl	%edx
 853	popl	%ecx
 854	popl	%eax
 855ftrace_ret:
 856#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 857.globl ftrace_graph_call
 858ftrace_graph_call:
 859	jmp	ftrace_stub
 860#endif
 861
 862.globl ftrace_stub
 863ftrace_stub:
 864	ret
 865END(ftrace_caller)
 866
 867ENTRY(ftrace_regs_caller)
 868	pushf	/* push flags before compare (in cs location) */
 869
 870	/*
 871	 * i386 does not save SS and ESP when coming from kernel.
 872	 * Instead, to get sp, &regs->sp is used (see ptrace.h).
 873	 * Unfortunately, that means eflags must be at the same location
 874	 * as the current return ip is. We move the return ip into the
 875	 * ip location, and move flags into the return ip location.
 876	 */
 877	pushl	4(%esp)				/* save return ip into ip slot */
 878
 879	pushl	$0				/* Load 0 into orig_ax */
 880	pushl	%gs
 881	pushl	%fs
 882	pushl	%es
 883	pushl	%ds
 884	pushl	%eax
 885	pushl	%ebp
 886	pushl	%edi
 887	pushl	%esi
 888	pushl	%edx
 889	pushl	%ecx
 890	pushl	%ebx
 891
 892	movl	13*4(%esp), %eax		/* Get the saved flags */
 893	movl	%eax, 14*4(%esp)		/* Move saved flags into regs->flags location */
 894						/* clobbering return ip */
 895	movl	$__KERNEL_CS, 13*4(%esp)
 896
 897	movl	12*4(%esp), %eax		/* Load ip (1st parameter) */
 898	subl	$MCOUNT_INSN_SIZE, %eax		/* Adjust ip */
 899	movl	0x4(%ebp), %edx			/* Load parent ip (2nd parameter) */
 900	movl	function_trace_op, %ecx		/* Save ftrace_pos in 3rd parameter */
 901	pushl	%esp				/* Save pt_regs as 4th parameter */
 902
 903GLOBAL(ftrace_regs_call)
 904	call	ftrace_stub
 905
 906	addl	$4, %esp			/* Skip pt_regs */
 907	movl	14*4(%esp), %eax		/* Move flags back into cs */
 908	movl	%eax, 13*4(%esp)		/* Needed to keep addl	from modifying flags */
 909	movl	12*4(%esp), %eax		/* Get return ip from regs->ip */
 910	movl	%eax, 14*4(%esp)		/* Put return ip back for ret */
 911
 912	popl	%ebx
 913	popl	%ecx
 914	popl	%edx
 915	popl	%esi
 916	popl	%edi
 917	popl	%ebp
 918	popl	%eax
 919	popl	%ds
 920	popl	%es
 921	popl	%fs
 922	popl	%gs
 923	addl	$8, %esp			/* Skip orig_ax and ip */
 924	popf					/* Pop flags at end (no addl to corrupt flags) */
 925	jmp	ftrace_ret
 926
 927	popf
 928	jmp	ftrace_stub
 929#else /* ! CONFIG_DYNAMIC_FTRACE */
 930
 931ENTRY(mcount)
 932	cmpl	$__PAGE_OFFSET, %esp
 933	jb	ftrace_stub			/* Paging not enabled yet? */
 934
 935	cmpl	$ftrace_stub, ftrace_trace_function
 936	jnz	trace
 937#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 938	cmpl	$ftrace_stub, ftrace_graph_return
 939	jnz	ftrace_graph_caller
 940
 941	cmpl	$ftrace_graph_entry_stub, ftrace_graph_entry
 942	jnz	ftrace_graph_caller
 943#endif
 944.globl ftrace_stub
 945ftrace_stub:
 946	ret
 947
 948	/* taken from glibc */
 949trace:
 950	pushl	%eax
 951	pushl	%ecx
 952	pushl	%edx
 953	movl	0xc(%esp), %eax
 954	movl	0x4(%ebp), %edx
 955	subl	$MCOUNT_INSN_SIZE, %eax
 956
 957	call	*ftrace_trace_function
 958
 959	popl	%edx
 960	popl	%ecx
 961	popl	%eax
 962	jmp	ftrace_stub
 963END(mcount)
 964#endif /* CONFIG_DYNAMIC_FTRACE */
 965#endif /* CONFIG_FUNCTION_TRACER */
 966
 967#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 968ENTRY(ftrace_graph_caller)
 969	pushl	%eax
 970	pushl	%ecx
 971	pushl	%edx
 972	movl	0xc(%esp), %eax
 973	lea	0x4(%ebp), %edx
 974	movl	(%ebp), %ecx
 975	subl	$MCOUNT_INSN_SIZE, %eax
 976	call	prepare_ftrace_return
 977	popl	%edx
 978	popl	%ecx
 979	popl	%eax
 980	ret
 981END(ftrace_graph_caller)
 982
 983.globl return_to_handler
 984return_to_handler:
 985	pushl	%eax
 986	pushl	%edx
 987	movl	%ebp, %eax
 988	call	ftrace_return_to_handler
 989	movl	%eax, %ecx
 990	popl	%edx
 991	popl	%eax
 992	jmp	*%ecx
 993#endif
 994
 995#ifdef CONFIG_TRACING
 996ENTRY(trace_page_fault)
 997	ASM_CLAC
 998	pushl	$trace_do_page_fault
 999	jmp	error_code
1000END(trace_page_fault)
1001#endif
1002
1003ENTRY(page_fault)
1004	ASM_CLAC
1005	pushl	$do_page_fault
1006	ALIGN
1007error_code:
1008	/* the function address is in %gs's slot on the stack */
1009	pushl	%fs
1010	pushl	%es
1011	pushl	%ds
1012	pushl	%eax
1013	pushl	%ebp
1014	pushl	%edi
1015	pushl	%esi
1016	pushl	%edx
1017	pushl	%ecx
1018	pushl	%ebx
1019	cld
1020	movl	$(__KERNEL_PERCPU), %ecx
1021	movl	%ecx, %fs
1022	UNWIND_ESPFIX_STACK
1023	GS_TO_REG %ecx
1024	movl	PT_GS(%esp), %edi		# get the function address
1025	movl	PT_ORIG_EAX(%esp), %edx		# get the error code
1026	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
1027	REG_TO_PTGS %ecx
1028	SET_KERNEL_GS %ecx
1029	movl	$(__USER_DS), %ecx
1030	movl	%ecx, %ds
1031	movl	%ecx, %es
1032	TRACE_IRQS_OFF
1033	movl	%esp, %eax			# pt_regs pointer
1034	call	*%edi
1035	jmp	ret_from_exception
1036END(page_fault)
1037
1038ENTRY(debug)
1039	/*
1040	 * #DB can happen at the first instruction of
1041	 * entry_SYSENTER_32 or in Xen's SYSENTER prologue.  If this
1042	 * happens, then we will be running on a very small stack.  We
1043	 * need to detect this condition and switch to the thread
1044	 * stack before calling any C code at all.
1045	 *
1046	 * If you edit this code, keep in mind that NMIs can happen in here.
 
 
 
 
 
1047	 */
1048	ASM_CLAC
1049	pushl	$-1				# mark this as an int
1050	SAVE_ALL
1051	xorl	%edx, %edx			# error code 0
1052	movl	%esp, %eax			# pt_regs pointer
1053
1054	/* Are we currently on the SYSENTER stack? */
1055	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1056	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
1057	cmpl	$SIZEOF_SYSENTER_stack, %ecx
1058	jb	.Ldebug_from_sysenter_stack
1059
1060	TRACE_IRQS_OFF
1061	call	do_debug
1062	jmp	ret_from_exception
1063
1064.Ldebug_from_sysenter_stack:
1065	/* We're on the SYSENTER stack.  Switch off. */
1066	movl	%esp, %ebp
1067	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
1068	TRACE_IRQS_OFF
1069	call	do_debug
1070	movl	%ebp, %esp
1071	jmp	ret_from_exception
1072END(debug)
1073
1074/*
1075 * NMI is doubly nasty.  It can happen on the first instruction of
1076 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1077 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1078 * switched stacks.  We handle both conditions by simply checking whether we
1079 * interrupted kernel code running on the SYSENTER stack.
1080 */
1081ENTRY(nmi)
1082	ASM_CLAC
 
1083#ifdef CONFIG_X86_ESPFIX32
 
 
 
 
1084	pushl	%eax
1085	movl	%ss, %eax
1086	cmpw	$__ESPFIX_SS, %ax
1087	popl	%eax
1088	je	nmi_espfix_stack
1089#endif
1090
1091	pushl	%eax				# pt_regs->orig_ax
1092	SAVE_ALL
 
1093	xorl	%edx, %edx			# zero error code
1094	movl	%esp, %eax			# pt_regs pointer
1095
1096	/* Are we currently on the SYSENTER stack? */
1097	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1098	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
1099	cmpl	$SIZEOF_SYSENTER_stack, %ecx
 
1100	jb	.Lnmi_from_sysenter_stack
1101
1102	/* Not on SYSENTER stack. */
1103	call	do_nmi
1104	jmp	restore_all_notrace
1105
1106.Lnmi_from_sysenter_stack:
1107	/*
1108	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
1109	 * is using the thread stack right now, so it's safe for us to use it.
1110	 */
1111	movl	%esp, %ebp
1112	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
1113	call	do_nmi
1114	movl	%ebp, %esp
1115	jmp	restore_all_notrace
1116
 
1117#ifdef CONFIG_X86_ESPFIX32
1118nmi_espfix_stack:
 
 
 
 
 
 
 
 
 
1119	/*
1120	 * create the pointer to lss back
1121	 */
1122	pushl	%ss
1123	pushl	%esp
1124	addl	$4, (%esp)
1125	/* copy the iret frame of 12 bytes */
1126	.rept 3
1127	pushl	16(%esp)
1128	.endr
1129	pushl	%eax
1130	SAVE_ALL
1131	FIXUP_ESPFIX_STACK			# %eax == %esp
1132	xorl	%edx, %edx			# zero error code
1133	call	do_nmi
1134	RESTORE_REGS
1135	lss	12+4(%esp), %esp		# back to espfix stack
1136	jmp	irq_return
1137#endif
1138END(nmi)
1139
1140ENTRY(int3)
1141	ASM_CLAC
1142	pushl	$-1				# mark this as an int
1143	SAVE_ALL
1144	TRACE_IRQS_OFF
 
 
 
 
 
 
 
 
1145	xorl	%edx, %edx			# zero error code
1146	movl	%esp, %eax			# pt_regs pointer
1147	call	do_int3
1148	jmp	ret_from_exception
1149END(int3)
1150
1151ENTRY(general_protection)
1152	pushl	$do_general_protection
1153	jmp	error_code
1154END(general_protection)
1155
1156#ifdef CONFIG_KVM_GUEST
1157ENTRY(async_page_fault)
1158	ASM_CLAC
1159	pushl	$do_async_page_fault
1160	jmp	error_code
1161END(async_page_fault)
 
 
 
 
 
 
 
 
1162#endif
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  Copyright (C) 1991,1992  Linus Torvalds
   4 *
   5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
   6 *
   7 * Stack layout while running C code:
   8 *	ptrace needs to have all registers on the stack.
   9 *	If the order here is changed, it needs to be
  10 *	updated in fork.c:copy_process(), signal.c:do_signal(),
  11 *	ptrace.c and ptrace.h
  12 *
  13 *	 0(%esp) - %ebx
  14 *	 4(%esp) - %ecx
  15 *	 8(%esp) - %edx
  16 *	 C(%esp) - %esi
  17 *	10(%esp) - %edi
  18 *	14(%esp) - %ebp
  19 *	18(%esp) - %eax
  20 *	1C(%esp) - %ds
  21 *	20(%esp) - %es
  22 *	24(%esp) - %fs
  23 *	28(%esp) - unused -- was %gs on old stackprotector kernels
  24 *	2C(%esp) - orig_eax
  25 *	30(%esp) - %eip
  26 *	34(%esp) - %cs
  27 *	38(%esp) - %eflags
  28 *	3C(%esp) - %oldesp
  29 *	40(%esp) - %oldss
  30 */
  31
  32#include <linux/linkage.h>
  33#include <linux/err.h>
  34#include <asm/thread_info.h>
  35#include <asm/irqflags.h>
  36#include <asm/errno.h>
  37#include <asm/segment.h>
  38#include <asm/smp.h>
 
  39#include <asm/percpu.h>
  40#include <asm/processor-flags.h>
 
  41#include <asm/irq_vectors.h>
  42#include <asm/cpufeatures.h>
  43#include <asm/alternative.h>
  44#include <asm/asm.h>
  45#include <asm/smap.h>
  46#include <asm/frame.h>
  47#include <asm/trapnr.h>
  48#include <asm/nospec-branch.h>
  49
  50#include "calling.h"
  51
  52	.section .entry.text, "ax"
  53
  54#define PTI_SWITCH_MASK         (1 << PAGE_SHIFT)
 
 
 
 
 
 
 
 
 
 
 
  55
  56/* Unconditionally switch to user cr3 */
  57.macro SWITCH_TO_USER_CR3 scratch_reg:req
  58	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
  59
  60	movl	%cr3, \scratch_reg
  61	orl	$PTI_SWITCH_MASK, \scratch_reg
  62	movl	\scratch_reg, %cr3
  63.Lend_\@:
  64.endm
  65
  66.macro BUG_IF_WRONG_CR3 no_user_check=0
  67#ifdef CONFIG_DEBUG_ENTRY
  68	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
  69	.if \no_user_check == 0
  70	/* coming from usermode? */
  71	testl	$USER_SEGMENT_RPL_MASK, PT_CS(%esp)
  72	jz	.Lend_\@
  73	.endif
  74	/* On user-cr3? */
  75	movl	%cr3, %eax
  76	testl	$PTI_SWITCH_MASK, %eax
  77	jnz	.Lend_\@
  78	/* From userspace with kernel cr3 - BUG */
  79	ud2
  80.Lend_\@:
  81#endif
  82.endm
  83
  84/*
  85 * Switch to kernel cr3 if not already loaded and return current cr3 in
  86 * \scratch_reg
 
 
 
 
 
  87 */
  88.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
  89	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
  90	movl	%cr3, \scratch_reg
  91	/* Test if we are already on kernel CR3 */
  92	testl	$PTI_SWITCH_MASK, \scratch_reg
  93	jz	.Lend_\@
  94	andl	$(~PTI_SWITCH_MASK), \scratch_reg
  95	movl	\scratch_reg, %cr3
  96	/* Return original CR3 in \scratch_reg */
  97	orl	$PTI_SWITCH_MASK, \scratch_reg
  98.Lend_\@:
  99.endm
 100
 101#define CS_FROM_ENTRY_STACK	(1 << 31)
 102#define CS_FROM_USER_CR3	(1 << 30)
 103#define CS_FROM_KERNEL		(1 << 29)
 104#define CS_FROM_ESPFIX		(1 << 28)
 105
 106.macro FIXUP_FRAME
 107	/*
 108	 * The high bits of the CS dword (__csh) are used for CS_FROM_*.
 109	 * Clear them in case hardware didn't do this for us.
 110	 */
 111	andl	$0x0000ffff, 4*4(%esp)
 
 
 
 112
 113#ifdef CONFIG_VM86
 114	testl	$X86_EFLAGS_VM, 5*4(%esp)
 115	jnz	.Lfrom_usermode_no_fixup_\@
 116#endif
 117	testl	$USER_SEGMENT_RPL_MASK, 4*4(%esp)
 118	jnz	.Lfrom_usermode_no_fixup_\@
 
 
 
 
 
 119
 120	orl	$CS_FROM_KERNEL, 4*4(%esp)
 121
 122	/*
 123	 * When we're here from kernel mode; the (exception) stack looks like:
 124	 *
 125	 *  6*4(%esp) - <previous context>
 126	 *  5*4(%esp) - flags
 127	 *  4*4(%esp) - cs
 128	 *  3*4(%esp) - ip
 129	 *  2*4(%esp) - orig_eax
 130	 *  1*4(%esp) - gs / function
 131	 *  0*4(%esp) - fs
 132	 *
 133	 * Lets build a 5 entry IRET frame after that, such that struct pt_regs
 134	 * is complete and in particular regs->sp is correct. This gives us
 135	 * the original 6 entries as gap:
 136	 *
 137	 * 14*4(%esp) - <previous context>
 138	 * 13*4(%esp) - gap / flags
 139	 * 12*4(%esp) - gap / cs
 140	 * 11*4(%esp) - gap / ip
 141	 * 10*4(%esp) - gap / orig_eax
 142	 *  9*4(%esp) - gap / gs / function
 143	 *  8*4(%esp) - gap / fs
 144	 *  7*4(%esp) - ss
 145	 *  6*4(%esp) - sp
 146	 *  5*4(%esp) - flags
 147	 *  4*4(%esp) - cs
 148	 *  3*4(%esp) - ip
 149	 *  2*4(%esp) - orig_eax
 150	 *  1*4(%esp) - gs / function
 151	 *  0*4(%esp) - fs
 152	 */
 153
 154	pushl	%ss		# ss
 155	pushl	%esp		# sp (points at ss)
 156	addl	$7*4, (%esp)	# point sp back at the previous context
 157	pushl	7*4(%esp)	# flags
 158	pushl	7*4(%esp)	# cs
 159	pushl	7*4(%esp)	# ip
 160	pushl	7*4(%esp)	# orig_eax
 161	pushl	7*4(%esp)	# gs / function
 162	pushl	7*4(%esp)	# fs
 163.Lfrom_usermode_no_fixup_\@:
 
 
 164.endm
 165
 166.macro IRET_FRAME
 167	/*
 168	 * We're called with %ds, %es, %fs, and %gs from the interrupted
 169	 * frame, so we shouldn't use them.  Also, we may be in ESPFIX
 170	 * mode and therefore have a nonzero SS base and an offset ESP,
 171	 * so any attempt to access the stack needs to use SS.  (except for
 172	 * accesses through %esp, which automatically use SS.)
 173	 */
 174	testl $CS_FROM_KERNEL, 1*4(%esp)
 175	jz .Lfinished_frame_\@
 176
 177	/*
 178	 * Reconstruct the 3 entry IRET frame right after the (modified)
 179	 * regs->sp without lowering %esp in between, such that an NMI in the
 180	 * middle doesn't scribble our stack.
 181	 */
 182	pushl	%eax
 183	pushl	%ecx
 184	movl	5*4(%esp), %eax		# (modified) regs->sp
 185
 186	movl	4*4(%esp), %ecx		# flags
 187	movl	%ecx, %ss:-1*4(%eax)
 188
 189	movl	3*4(%esp), %ecx		# cs
 190	andl	$0x0000ffff, %ecx
 191	movl	%ecx, %ss:-2*4(%eax)
 192
 193	movl	2*4(%esp), %ecx		# ip
 194	movl	%ecx, %ss:-3*4(%eax)
 195
 196	movl	1*4(%esp), %ecx		# eax
 197	movl	%ecx, %ss:-4*4(%eax)
 198
 199	popl	%ecx
 200	lea	-4*4(%eax), %esp
 201	popl	%eax
 202.Lfinished_frame_\@:
 203.endm
 204
 205.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
 206	cld
 207.if \skip_gs == 0
 208	pushl	$0
 209.endif
 210	pushl	%fs
 211
 212	pushl	%eax
 213	movl	$(__KERNEL_PERCPU), %eax
 214	movl	%eax, %fs
 215.if \unwind_espfix > 0
 216	UNWIND_ESPFIX_STACK
 217.endif
 218	popl	%eax
 219
 220	FIXUP_FRAME
 221	pushl	%es
 222	pushl	%ds
 223	pushl	\pt_regs_ax
 224	pushl	%ebp
 225	pushl	%edi
 226	pushl	%esi
 227	pushl	%edx
 228	pushl	%ecx
 229	pushl	%ebx
 230	movl	$(__USER_DS), %edx
 231	movl	%edx, %ds
 232	movl	%edx, %es
 233	/* Switch to kernel stack if necessary */
 234.if \switch_stacks > 0
 235	SWITCH_TO_KERNEL_STACK
 236.endif
 237.endm
 238
 239.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
 240	SAVE_ALL unwind_espfix=\unwind_espfix
 241
 242	BUG_IF_WRONG_CR3
 243
 244	/*
 245	 * Now switch the CR3 when PTI is enabled.
 246	 *
 247	 * We can enter with either user or kernel cr3, the code will
 248	 * store the old cr3 in \cr3_reg and switches to the kernel cr3
 249	 * if necessary.
 250	 */
 251	SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
 252
 253.Lend_\@:
 254.endm
 255
 256.macro RESTORE_INT_REGS
 257	popl	%ebx
 258	popl	%ecx
 259	popl	%edx
 260	popl	%esi
 261	popl	%edi
 262	popl	%ebp
 263	popl	%eax
 264.endm
 265
 266.macro RESTORE_REGS pop=0
 267	RESTORE_INT_REGS
 2681:	popl	%ds
 2692:	popl	%es
 2703:	popl	%fs
 271	addl	$(4 + \pop), %esp	/* pop the unused "gs" slot */
 272	IRET_FRAME
 273.pushsection .fixup, "ax"
 2744:	movl	$0, (%esp)
 275	jmp	1b
 2765:	movl	$0, (%esp)
 277	jmp	2b
 2786:	movl	$0, (%esp)
 279	jmp	3b
 280.popsection
 281	_ASM_EXTABLE(1b, 4b)
 282	_ASM_EXTABLE(2b, 5b)
 283	_ASM_EXTABLE(3b, 6b)
 
 284.endm
 285
 286.macro RESTORE_ALL_NMI cr3_reg:req pop=0
 287	/*
 288	 * Now switch the CR3 when PTI is enabled.
 289	 *
 290	 * We enter with kernel cr3 and switch the cr3 to the value
 291	 * stored on \cr3_reg, which is either a user or a kernel cr3.
 292	 */
 293	ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
 294
 295	testl	$PTI_SWITCH_MASK, \cr3_reg
 296	jz	.Lswitched_\@
 
 
 
 297
 298	/* User cr3 in \cr3_reg - write it to hardware cr3 */
 299	movl	\cr3_reg, %cr3
 300
 301.Lswitched_\@:
 302
 303	BUG_IF_WRONG_CR3
 304
 305	RESTORE_REGS pop=\pop
 306.endm
 
 307
 308.macro CHECK_AND_APPLY_ESPFIX
 309#ifdef CONFIG_X86_ESPFIX32
 310#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
 311#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
 312
 313	ALTERNATIVE	"jmp .Lend_\@", "", X86_BUG_ESPFIX
 314
 315	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
 316	/*
 317	 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
 318	 * are returning to the kernel.
 319	 * See comments in process.c:copy_thread() for details.
 320	 */
 321	movb	PT_OLDSS(%esp), %ah
 322	movb	PT_CS(%esp), %al
 323	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 324	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
 325	jne	.Lend_\@	# returning to user-space with LDT SS
 326
 327	/*
 328	 * Setup and switch to ESPFIX stack
 329	 *
 330	 * We're returning to userspace with a 16 bit stack. The CPU will not
 331	 * restore the high word of ESP for us on executing iret... This is an
 332	 * "official" bug of all the x86-compatible CPUs, which we can work
 333	 * around to make dosemu and wine happy. We do this by preloading the
 334	 * high word of ESP with the high word of the userspace ESP while
 335	 * compensating for the offset by changing to the ESPFIX segment with
 336	 * a base address that matches for the difference.
 337	 */
 338	mov	%esp, %edx			/* load kernel esp */
 339	mov	PT_OLDESP(%esp), %eax		/* load userspace esp */
 340	mov	%dx, %ax			/* eax: new kernel esp */
 341	sub	%eax, %edx			/* offset (low word is 0) */
 342	shr	$16, %edx
 343	mov	%dl, GDT_ESPFIX_SS + 4		/* bits 16..23 */
 344	mov	%dh, GDT_ESPFIX_SS + 7		/* bits 24..31 */
 345	pushl	$__ESPFIX_SS
 346	pushl	%eax				/* new kernel esp */
 347	/*
 348	 * Disable interrupts, but do not irqtrace this section: we
 349	 * will soon execute iret and the tracer was already set to
 350	 * the irqstate after the IRET:
 351	 */
 352	cli
 353	lss	(%esp), %esp			/* switch to espfix segment */
 354.Lend_\@:
 355#endif /* CONFIG_X86_ESPFIX32 */
 356.endm
 357
 358/*
 359 * Called with pt_regs fully populated and kernel segments loaded,
 360 * so we can access PER_CPU and use the integer registers.
 361 *
 362 * We need to be very careful here with the %esp switch, because an NMI
 363 * can happen everywhere. If the NMI handler finds itself on the
 364 * entry-stack, it will overwrite the task-stack and everything we
 365 * copied there. So allocate the stack-frame on the task-stack and
 366 * switch to it before we do any copying.
 367 */
 368
 369.macro SWITCH_TO_KERNEL_STACK
 370
 371	BUG_IF_WRONG_CR3
 372
 373	SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
 374
 375	/*
 376	 * %eax now contains the entry cr3 and we carry it forward in
 377	 * that register for the time this macro runs
 378	 */
 379
 380	/* Are we on the entry stack? Bail out if not! */
 381	movl	PER_CPU_VAR(cpu_entry_area), %ecx
 382	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
 383	subl	%esp, %ecx	/* ecx = (end of entry_stack) - esp */
 384	cmpl	$SIZEOF_entry_stack, %ecx
 385	jae	.Lend_\@
 386
 387	/* Load stack pointer into %esi and %edi */
 388	movl	%esp, %esi
 389	movl	%esi, %edi
 390
 391	/* Move %edi to the top of the entry stack */
 392	andl	$(MASK_entry_stack), %edi
 393	addl	$(SIZEOF_entry_stack), %edi
 394
 395	/* Load top of task-stack into %edi */
 396	movl	TSS_entry2task_stack(%edi), %edi
 397
 398	/* Special case - entry from kernel mode via entry stack */
 399#ifdef CONFIG_VM86
 400	movl	PT_EFLAGS(%esp), %ecx		# mix EFLAGS and CS
 401	movb	PT_CS(%esp), %cl
 402	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
 403#else
 404	movl	PT_CS(%esp), %ecx
 405	andl	$SEGMENT_RPL_MASK, %ecx
 406#endif
 407	cmpl	$USER_RPL, %ecx
 408	jb	.Lentry_from_kernel_\@
 409
 410	/* Bytes to copy */
 411	movl	$PTREGS_SIZE, %ecx
 412
 413#ifdef CONFIG_VM86
 414	testl	$X86_EFLAGS_VM, PT_EFLAGS(%esi)
 415	jz	.Lcopy_pt_regs_\@
 416
 417	/*
 418	 * Stack-frame contains 4 additional segment registers when
 419	 * coming from VM86 mode
 420	 */
 421	addl	$(4 * 4), %ecx
 422
 423#endif
 424.Lcopy_pt_regs_\@:
 425
 426	/* Allocate frame on task-stack */
 427	subl	%ecx, %edi
 428
 429	/* Switch to task-stack */
 430	movl	%edi, %esp
 431
 432	/*
 433	 * We are now on the task-stack and can safely copy over the
 434	 * stack-frame
 435	 */
 436	shrl	$2, %ecx
 437	cld
 438	rep movsl
 439
 440	jmp .Lend_\@
 441
 442.Lentry_from_kernel_\@:
 443
 444	/*
 445	 * This handles the case when we enter the kernel from
 446	 * kernel-mode and %esp points to the entry-stack. When this
 447	 * happens we need to switch to the task-stack to run C code,
 448	 * but switch back to the entry-stack again when we approach
 449	 * iret and return to the interrupted code-path. This usually
 450	 * happens when we hit an exception while restoring user-space
 451	 * segment registers on the way back to user-space or when the
 452	 * sysenter handler runs with eflags.tf set.
 453	 *
 454	 * When we switch to the task-stack here, we can't trust the
 455	 * contents of the entry-stack anymore, as the exception handler
 456	 * might be scheduled out or moved to another CPU. Therefore we
 457	 * copy the complete entry-stack to the task-stack and set a
 458	 * marker in the iret-frame (bit 31 of the CS dword) to detect
 459	 * what we've done on the iret path.
 460	 *
 461	 * On the iret path we copy everything back and switch to the
 462	 * entry-stack, so that the interrupted kernel code-path
 463	 * continues on the same stack it was interrupted with.
 464	 *
 465	 * Be aware that an NMI can happen anytime in this code.
 466	 *
 467	 * %esi: Entry-Stack pointer (same as %esp)
 468	 * %edi: Top of the task stack
 469	 * %eax: CR3 on kernel entry
 470	 */
 471
 472	/* Calculate number of bytes on the entry stack in %ecx */
 473	movl	%esi, %ecx
 474
 475	/* %ecx to the top of entry-stack */
 476	andl	$(MASK_entry_stack), %ecx
 477	addl	$(SIZEOF_entry_stack), %ecx
 478
 479	/* Number of bytes on the entry stack to %ecx */
 480	sub	%esi, %ecx
 481
 482	/* Mark stackframe as coming from entry stack */
 483	orl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
 484
 485	/*
 486	 * Test the cr3 used to enter the kernel and add a marker
 487	 * so that we can switch back to it before iret.
 488	 */
 489	testl	$PTI_SWITCH_MASK, %eax
 490	jz	.Lcopy_pt_regs_\@
 491	orl	$CS_FROM_USER_CR3, PT_CS(%esp)
 492
 493	/*
 494	 * %esi and %edi are unchanged, %ecx contains the number of
 495	 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
 496	 * the stack-frame on task-stack and copy everything over
 497	 */
 498	jmp .Lcopy_pt_regs_\@
 499
 500.Lend_\@:
 501.endm
 502
 503/*
 504 * Switch back from the kernel stack to the entry stack.
 505 *
 506 * The %esp register must point to pt_regs on the task stack. It will
 507 * first calculate the size of the stack-frame to copy, depending on
 508 * whether we return to VM86 mode or not. With that it uses 'rep movsl'
 509 * to copy the contents of the stack over to the entry stack.
 510 *
 511 * We must be very careful here, as we can't trust the contents of the
 512 * task-stack once we switched to the entry-stack. When an NMI happens
 513 * while on the entry-stack, the NMI handler will switch back to the top
 514 * of the task stack, overwriting our stack-frame we are about to copy.
 515 * Therefore we switch the stack only after everything is copied over.
 516 */
 517.macro SWITCH_TO_ENTRY_STACK
 518
 519	/* Bytes to copy */
 520	movl	$PTREGS_SIZE, %ecx
 521
 522#ifdef CONFIG_VM86
 523	testl	$(X86_EFLAGS_VM), PT_EFLAGS(%esp)
 524	jz	.Lcopy_pt_regs_\@
 525
 526	/* Additional 4 registers to copy when returning to VM86 mode */
 527	addl    $(4 * 4), %ecx
 528
 529.Lcopy_pt_regs_\@:
 530#endif
 531
 532	/* Initialize source and destination for movsl */
 533	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
 534	subl	%ecx, %edi
 535	movl	%esp, %esi
 536
 537	/* Save future stack pointer in %ebx */
 538	movl	%edi, %ebx
 539
 540	/* Copy over the stack-frame */
 541	shrl	$2, %ecx
 542	cld
 543	rep movsl
 544
 545	/*
 546	 * Switch to entry-stack - needs to happen after everything is
 547	 * copied because the NMI handler will overwrite the task-stack
 548	 * when on entry-stack
 549	 */
 550	movl	%ebx, %esp
 551
 552.Lend_\@:
 553.endm
 554
 555/*
 556 * This macro handles the case when we return to kernel-mode on the iret
 557 * path and have to switch back to the entry stack and/or user-cr3
 558 *
 559 * See the comments below the .Lentry_from_kernel_\@ label in the
 560 * SWITCH_TO_KERNEL_STACK macro for more details.
 561 */
 562.macro PARANOID_EXIT_TO_KERNEL_MODE
 563
 564	/*
 565	 * Test if we entered the kernel with the entry-stack. Most
 566	 * likely we did not, because this code only runs on the
 567	 * return-to-kernel path.
 568	 */
 569	testl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
 570	jz	.Lend_\@
 571
 572	/* Unlikely slow-path */
 573
 574	/* Clear marker from stack-frame */
 575	andl	$(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
 576
 577	/* Copy the remaining task-stack contents to entry-stack */
 578	movl	%esp, %esi
 579	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
 580
 581	/* Bytes on the task-stack to ecx */
 582	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
 583	subl	%esi, %ecx
 584
 585	/* Allocate stack-frame on entry-stack */
 586	subl	%ecx, %edi
 587
 588	/*
 589	 * Save future stack-pointer, we must not switch until the
 590	 * copy is done, otherwise the NMI handler could destroy the
 591	 * contents of the task-stack we are about to copy.
 592	 */
 593	movl	%edi, %ebx
 594
 595	/* Do the copy */
 596	shrl	$2, %ecx
 597	cld
 598	rep movsl
 599
 600	/* Safe to switch to entry-stack now */
 601	movl	%ebx, %esp
 602
 603	/*
 604	 * We came from entry-stack and need to check if we also need to
 605	 * switch back to user cr3.
 606	 */
 607	testl	$CS_FROM_USER_CR3, PT_CS(%esp)
 608	jz	.Lend_\@
 609
 610	/* Clear marker from stack-frame */
 611	andl	$(~CS_FROM_USER_CR3), PT_CS(%esp)
 612
 613	SWITCH_TO_USER_CR3 scratch_reg=%eax
 614
 615.Lend_\@:
 616.endm
 617
 618/**
 619 * idtentry - Macro to generate entry stubs for simple IDT entries
 620 * @vector:		Vector number
 621 * @asmsym:		ASM symbol for the entry point
 622 * @cfunc:		C function to be called
 623 * @has_error_code:	Hardware pushed error code on stack
 624 */
 625.macro idtentry vector asmsym cfunc has_error_code:req
 626SYM_CODE_START(\asmsym)
 627	ASM_CLAC
 628	cld
 629
 630	.if \has_error_code == 0
 631		pushl	$0		/* Clear the error code */
 632	.endif
 633
 634	/* Push the C-function address into the GS slot */
 635	pushl	$\cfunc
 636	/* Invoke the common exception entry */
 637	jmp	handle_exception
 638SYM_CODE_END(\asmsym)
 639.endm
 640
 641.macro idtentry_irq vector cfunc
 642	.p2align CONFIG_X86_L1_CACHE_SHIFT
 643SYM_CODE_START_LOCAL(asm_\cfunc)
 644	ASM_CLAC
 645	SAVE_ALL switch_stacks=1
 646	ENCODE_FRAME_POINTER
 647	movl	%esp, %eax
 648	movl	PT_ORIG_EAX(%esp), %edx		/* get the vector from stack */
 649	movl	$-1, PT_ORIG_EAX(%esp)		/* no syscall to restart */
 650	call	\cfunc
 651	jmp	handle_exception_return
 652SYM_CODE_END(asm_\cfunc)
 653.endm
 654
 655.macro idtentry_sysvec vector cfunc
 656	idtentry \vector asm_\cfunc \cfunc has_error_code=0
 657.endm
 658
 659/*
 660 * Include the defines which emit the idt entries which are shared
 661 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
 662 * so the stacktrace boundary checks work.
 663 */
 664	.align 16
 665	.globl __irqentry_text_start
 666__irqentry_text_start:
 667
 668#include <asm/idtentry.h>
 669
 670	.align 16
 671	.globl __irqentry_text_end
 672__irqentry_text_end:
 673
 674/*
 675 * %eax: prev task
 676 * %edx: next task
 677 */
 678.pushsection .text, "ax"
 679SYM_CODE_START(__switch_to_asm)
 680	/*
 681	 * Save callee-saved registers
 682	 * This must match the order in struct inactive_task_frame
 683	 */
 684	pushl	%ebp
 685	pushl	%ebx
 686	pushl	%edi
 687	pushl	%esi
 688	/*
 689	 * Flags are saved to prevent AC leakage. This could go
 690	 * away if objtool would have 32bit support to verify
 691	 * the STAC/CLAC correctness.
 692	 */
 693	pushfl
 694
 695	/* switch stack */
 696	movl	%esp, TASK_threadsp(%eax)
 697	movl	TASK_threadsp(%edx), %esp
 698
 699#ifdef CONFIG_STACKPROTECTOR
 700	movl	TASK_stack_canary(%edx), %ebx
 701	movl	%ebx, PER_CPU_VAR(__stack_chk_guard)
 702#endif
 703
 704#ifdef CONFIG_RETPOLINE
 705	/*
 706	 * When switching from a shallower to a deeper call stack
 707	 * the RSB may either underflow or use entries populated
 708	 * with userspace addresses. On CPUs where those concerns
 709	 * exist, overwrite the RSB with entries which capture
 710	 * speculative execution to prevent attack.
 711	 */
 712	FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
 713#endif
 714
 715	/* Restore flags or the incoming task to restore AC state. */
 716	popfl
 717	/* restore callee-saved registers */
 718	popl	%esi
 719	popl	%edi
 720	popl	%ebx
 721	popl	%ebp
 722
 723	jmp	__switch_to
 724SYM_CODE_END(__switch_to_asm)
 725.popsection
 726
 727/*
 728 * The unwinder expects the last frame on the stack to always be at the same
 729 * offset from the end of the page, which allows it to validate the stack.
 730 * Calling schedule_tail() directly would break that convention because its an
 731 * asmlinkage function so its argument has to be pushed on the stack.  This
 732 * wrapper creates a proper "end of stack" frame header before the call.
 733 */
 734.pushsection .text, "ax"
 735SYM_FUNC_START(schedule_tail_wrapper)
 736	FRAME_BEGIN
 737
 738	pushl	%eax
 739	call	schedule_tail
 740	popl	%eax
 741
 742	FRAME_END
 743	ret
 744SYM_FUNC_END(schedule_tail_wrapper)
 745.popsection
 746
 747/*
 748 * A newly forked process directly context switches into this address.
 749 *
 750 * eax: prev task we switched from
 751 * ebx: kernel thread func (NULL for user thread)
 752 * edi: kernel thread arg
 753 */
 754.pushsection .text, "ax"
 755SYM_CODE_START(ret_from_fork)
 756	call	schedule_tail_wrapper
 757
 758	testl	%ebx, %ebx
 759	jnz	1f		/* kernel threads are uncommon */
 760
 7612:
 762	/* When we fork, we trace the syscall return in the child, too. */
 763	movl    %esp, %eax
 764	call    syscall_exit_to_user_mode
 765	jmp     .Lsyscall_32_done
 766
 767	/* kernel thread */
 7681:	movl	%edi, %eax
 769	CALL_NOSPEC ebx
 770	/*
 771	 * A kernel thread is allowed to return here after successfully
 772	 * calling kernel_execve().  Exit to userspace to complete the execve()
 773	 * syscall.
 774	 */
 775	movl	$0, PT_EAX(%esp)
 776	jmp	2b
 777SYM_CODE_END(ret_from_fork)
 778.popsection
 779
 780SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 781/*
 782 * All code from here through __end_SYSENTER_singlestep_region is subject
 783 * to being single-stepped if a user program sets TF and executes SYSENTER.
 784 * There is absolutely nothing that we can do to prevent this from happening
 785 * (thanks Intel!).  To keep our handling of this situation as simple as
 786 * possible, we handle TF just like AC and NT, except that our #DB handler
 787 * will ignore all of the single-step traps generated in this range.
 788 */
 789
 
 
 
 
 
 
 
 
 
 
 790/*
 791 * 32-bit SYSENTER entry.
 792 *
 793 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 794 * if X86_FEATURE_SEP is available.  This is the preferred system call
 795 * entry on 32-bit systems.
 796 *
 797 * The SYSENTER instruction, in principle, should *only* occur in the
 798 * vDSO.  In practice, a small number of Android devices were shipped
 799 * with a copy of Bionic that inlined a SYSENTER instruction.  This
 800 * never happened in any of Google's Bionic versions -- it only happened
 801 * in a narrow range of Intel-provided versions.
 802 *
 803 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
 804 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
 805 * SYSENTER does not save anything on the stack,
 806 * and does not save old EIP (!!!), ESP, or EFLAGS.
 807 *
 808 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
 809 * user and/or vm86 state), we explicitly disable the SYSENTER
 810 * instruction in vm86 mode by reprogramming the MSRs.
 811 *
 812 * Arguments:
 813 * eax  system call number
 814 * ebx  arg1
 815 * ecx  arg2
 816 * edx  arg3
 817 * esi  arg4
 818 * edi  arg5
 819 * ebp  user stack
 820 * 0(%ebp) arg6
 821 */
 822SYM_FUNC_START(entry_SYSENTER_32)
 823	/*
 824	 * On entry-stack with all userspace-regs live - save and
 825	 * restore eflags and %eax to use it as scratch-reg for the cr3
 826	 * switch.
 827	 */
 828	pushfl
 829	pushl	%eax
 830	BUG_IF_WRONG_CR3 no_user_check=1
 831	SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
 832	popl	%eax
 833	popfl
 834
 835	/* Stack empty again, switch to task stack */
 836	movl	TSS_entry2task_stack(%esp), %esp
 837
 838.Lsysenter_past_esp:
 839	pushl	$__USER_DS		/* pt_regs->ss */
 840	pushl	$0			/* pt_regs->sp (placeholder) */
 841	pushfl				/* pt_regs->flags (except IF = 0) */
 
 842	pushl	$__USER_CS		/* pt_regs->cs */
 843	pushl	$0			/* pt_regs->ip = 0 (placeholder) */
 844	pushl	%eax			/* pt_regs->orig_ax */
 845	SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest, stack already switched */
 846
 847	/*
 848	 * SYSENTER doesn't filter flags, so we need to clear NT, AC
 849	 * and TF ourselves.  To save a few cycles, we can check whether
 850	 * either was set instead of doing an unconditional popfq.
 851	 * This needs to happen before enabling interrupts so that
 852	 * we don't get preempted with NT set.
 853	 *
 854	 * If TF is set, we will single-step all the way to here -- do_debug
 855	 * will ignore all the traps.  (Yes, this is slow, but so is
 856	 * single-stepping in general.  This allows us to avoid having
 857	 * a more complicated code to handle the case where a user program
 858	 * forces us to single-step through the SYSENTER entry code.)
 859	 *
 860	 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
 861	 * out-of-line as an optimization: NT is unlikely to be set in the
 862	 * majority of the cases and instead of polluting the I$ unnecessarily,
 863	 * we're keeping that code behind a branch which will predict as
 864	 * not-taken and therefore its instructions won't be fetched.
 865	 */
 866	testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
 867	jnz	.Lsysenter_fix_flags
 868.Lsysenter_flags_fixed:
 869
 870	movl	%esp, %eax
 871	call	do_SYSENTER_32
 872	testl	%eax, %eax
 873	jz	.Lsyscall_32_done
 874
 875	STACKLEAK_ERASE
 876
 877	/* Opportunistic SYSEXIT */
 878
 879	/*
 880	 * Setup entry stack - we keep the pointer in %eax and do the
 881	 * switch after almost all user-state is restored.
 882	 */
 
 883
 884	/* Load entry stack pointer and allocate frame for eflags/eax */
 885	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
 886	subl	$(2*4), %eax
 887
 888	/* Copy eflags and eax to entry stack */
 889	movl	PT_EFLAGS(%esp), %edi
 890	movl	PT_EAX(%esp), %esi
 891	movl	%edi, (%eax)
 892	movl	%esi, 4(%eax)
 893
 894	/* Restore user registers and segments */
 
 895	movl	PT_EIP(%esp), %edx	/* pt_regs->ip */
 896	movl	PT_OLDESP(%esp), %ecx	/* pt_regs->sp */
 8971:	mov	PT_FS(%esp), %fs
 898
 899	popl	%ebx			/* pt_regs->bx */
 900	addl	$2*4, %esp		/* skip pt_regs->cx and pt_regs->dx */
 901	popl	%esi			/* pt_regs->si */
 902	popl	%edi			/* pt_regs->di */
 903	popl	%ebp			/* pt_regs->bp */
 904
 905	/* Switch to entry stack */
 906	movl	%eax, %esp
 907
 908	/* Now ready to switch the cr3 */
 909	SWITCH_TO_USER_CR3 scratch_reg=%eax
 910
 911	/*
 912	 * Restore all flags except IF. (We restore IF separately because
 913	 * STI gives a one-instruction window in which we won't be interrupted,
 914	 * whereas POPF does not.)
 915	 */
 916	btrl	$X86_EFLAGS_IF_BIT, (%esp)
 917	BUG_IF_WRONG_CR3 no_user_check=1
 918	popfl
 919	popl	%eax
 920
 921	/*
 922	 * Return back to the vDSO, which will pop ecx and edx.
 923	 * Don't bother with DS and ES (they already contain __USER_DS).
 924	 */
 925	sti
 926	sysexit
 927
 928.pushsection .fixup, "ax"
 9292:	movl	$0, PT_FS(%esp)
 930	jmp	1b
 931.popsection
 932	_ASM_EXTABLE(1b, 2b)
 
 933
 934.Lsysenter_fix_flags:
 935	pushl	$X86_EFLAGS_FIXED
 936	popfl
 937	jmp	.Lsysenter_flags_fixed
 938SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 939SYM_FUNC_END(entry_SYSENTER_32)
 940
 941/*
 942 * 32-bit legacy system call entry.
 943 *
 944 * 32-bit x86 Linux system calls traditionally used the INT $0x80
 945 * instruction.  INT $0x80 lands here.
 946 *
 947 * This entry point can be used by any 32-bit perform system calls.
 948 * Instances of INT $0x80 can be found inline in various programs and
 949 * libraries.  It is also used by the vDSO's __kernel_vsyscall
 950 * fallback for hardware that doesn't support a faster entry method.
 951 * Restarted 32-bit system calls also fall back to INT $0x80
 952 * regardless of what instruction was originally used to do the system
 953 * call.  (64-bit programs can use INT $0x80 as well, but they can
 954 * only run on 64-bit kernels and therefore land in
 955 * entry_INT80_compat.)
 956 *
 957 * This is considered a slow path.  It is not used by most libc
 958 * implementations on modern hardware except during process startup.
 959 *
 960 * Arguments:
 961 * eax  system call number
 962 * ebx  arg1
 963 * ecx  arg2
 964 * edx  arg3
 965 * esi  arg4
 966 * edi  arg5
 967 * ebp  arg6
 968 */
 969SYM_FUNC_START(entry_INT80_32)
 970	ASM_CLAC
 971	pushl	%eax			/* pt_regs->orig_ax */
 
 972
 973	SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1	/* save rest */
 
 
 
 
 974
 975	movl	%esp, %eax
 976	call	do_int80_syscall_32
 977.Lsyscall_32_done:
 978	STACKLEAK_ERASE
 979
 980restore_all_switch_stack:
 981	SWITCH_TO_ENTRY_STACK
 982	CHECK_AND_APPLY_ESPFIX
 
 
 983
 984	/* Switch back to user CR3 */
 985	SWITCH_TO_USER_CR3 scratch_reg=%eax
 986
 987	BUG_IF_WRONG_CR3
 988
 989	/* Restore user state */
 990	RESTORE_REGS pop=4			# skip orig_eax/error_code
 991.Lirq_return:
 992	/*
 993	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
 994	 * when returning from IPI handler and when returning from
 995	 * scheduler to user-space.
 996	 */
 997	iret
 998
 
 
 
 
 
 
 
 
 999.section .fixup, "ax"
1000SYM_CODE_START(asm_iret_error)
1001	pushl	$0				# no error code
1002	pushl	$iret_error
 
 
 
1003
1004#ifdef CONFIG_DEBUG_ENTRY
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1005	/*
1006	 * The stack-frame here is the one that iret faulted on, so its a
1007	 * return-to-user frame. We are on kernel-cr3 because we come here from
1008	 * the fixup code. This confuses the CR3 checker, so switch to user-cr3
1009	 * as the checker expects it.
1010	 */
1011	pushl	%eax
1012	SWITCH_TO_USER_CR3 scratch_reg=%eax
1013	popl	%eax
1014#endif
1015
1016	jmp	handle_exception
1017SYM_CODE_END(asm_iret_error)
1018.previous
1019	_ASM_EXTABLE(.Lirq_return, asm_iret_error)
1020SYM_FUNC_END(entry_INT80_32)
1021
1022.macro FIXUP_ESPFIX_STACK
1023/*
1024 * Switch back for ESPFIX stack to the normal zerobased stack
1025 *
1026 * We can't call C functions using the ESPFIX stack. This code reads
1027 * the high word of the segment base from the GDT and swiches to the
1028 * normal stack and adjusts ESP with the matching offset.
1029 *
1030 * We might be on user CR3 here, so percpu data is not mapped and we can't
1031 * access the GDT through the percpu segment.  Instead, use SGDT to find
1032 * the cpu_entry_area alias of the GDT.
1033 */
1034#ifdef CONFIG_X86_ESPFIX32
1035	/* fixup the stack */
1036	pushl	%ecx
1037	subl	$2*4, %esp
1038	sgdt	(%esp)
1039	movl	2(%esp), %ecx				/* GDT address */
1040	/*
1041	 * Careful: ECX is a linear pointer, so we need to force base
1042	 * zero.  %cs is the only known-linear segment we have right now.
1043	 */
1044	mov	%cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al	/* bits 16..23 */
1045	mov	%cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah	/* bits 24..31 */
1046	shl	$16, %eax
1047	addl	$2*4, %esp
1048	popl	%ecx
1049	addl	%esp, %eax			/* the adjusted stack pointer */
1050	pushl	$__KERNEL_DS
1051	pushl	%eax
1052	lss	(%esp), %esp			/* switch to the normal stack segment */
1053#endif
1054.endm
1055
1056.macro UNWIND_ESPFIX_STACK
1057	/* It's safe to clobber %eax, all other regs need to be preserved */
1058#ifdef CONFIG_X86_ESPFIX32
1059	movl	%ss, %eax
1060	/* see if on espfix stack */
1061	cmpw	$__ESPFIX_SS, %ax
1062	jne	.Lno_fixup_\@
 
 
 
1063	/* switch to normal stack */
1064	FIXUP_ESPFIX_STACK
1065.Lno_fixup_\@:
1066#endif
1067.endm
1068
1069SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
1070	/* the function address is in %gs's slot on the stack */
1071	SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1072	ENCODE_FRAME_POINTER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073
1074	movl	PT_GS(%esp), %edi		# get the function address
 
 
1075
1076	/* fixup orig %eax */
1077	movl	PT_ORIG_EAX(%esp), %edx		# get the error code
1078	movl	$-1, PT_ORIG_EAX(%esp)		# no syscall to restart
1079
1080	movl	%esp, %eax			# pt_regs pointer
1081	CALL_NOSPEC edi
 
 
 
 
1082
1083handle_exception_return:
1084#ifdef CONFIG_VM86
1085	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
1086	movb	PT_CS(%esp), %al
1087	andl	$(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
 
 
 
1088#else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1089	/*
1090	 * We can be coming here from child spawned by kernel_thread().
 
 
 
 
1091	 */
1092	movl	PT_CS(%esp), %eax
1093	andl	$SEGMENT_RPL_MASK, %eax
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094#endif
1095	cmpl	$USER_RPL, %eax			# returning to v8086 or userspace ?
1096	jnb	ret_to_user
 
 
 
 
 
 
 
 
 
 
 
 
1097
1098	PARANOID_EXIT_TO_KERNEL_MODE
1099	BUG_IF_WRONG_CR3
1100	RESTORE_REGS 4
1101	jmp	.Lirq_return
 
 
 
1102
1103ret_to_user:
1104	movl	%esp, %eax
1105	jmp	restore_all_switch_stack
1106SYM_CODE_END(handle_exception)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
1108SYM_CODE_START(asm_exc_double_fault)
11091:
1110	/*
1111	 * This is a task gate handler, not an interrupt gate handler.
1112	 * The error code is on the stack, but the stack is otherwise
1113	 * empty.  Interrupts are off.  Our state is sane with the following
1114	 * exceptions:
1115	 *
1116	 *  - CR0.TS is set.  "TS" literally means "task switched".
1117	 *  - EFLAGS.NT is set because we're a "nested task".
1118	 *  - The doublefault TSS has back_link set and has been marked busy.
1119	 *  - TR points to the doublefault TSS and the normal TSS is busy.
1120	 *  - CR3 is the normal kernel PGD.  This would be delightful, except
1121	 *    that the CPU didn't bother to save the old CR3 anywhere.  This
1122	 *    would make it very awkward to return back to the context we came
1123	 *    from.
1124	 *
1125	 * The rest of EFLAGS is sanitized for us, so we don't need to
1126	 * worry about AC or DF.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127	 *
1128	 * Don't even bother popping the error code.  It's always zero,
1129	 * and ignoring it makes us a bit more robust against buggy
1130	 * hypervisor task gate implementations.
1131	 *
1132	 * We will manually undo the task switch instead of doing a
1133	 * task-switching IRET.
1134	 */
 
 
 
 
 
1135
1136	clts				/* clear CR0.TS */
1137	pushl	$X86_EFLAGS_FIXED
1138	popfl				/* clear EFLAGS.NT */
1139
1140	call	doublefault_shim
1141
1142	/* We don't support returning, so we have no IRET here. */
11431:
1144	hlt
1145	jmp 1b
1146SYM_CODE_END(asm_exc_double_fault)
 
 
 
 
 
 
 
 
1147
1148/*
1149 * NMI is doubly nasty.  It can happen on the first instruction of
1150 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1151 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1152 * switched stacks.  We handle both conditions by simply checking whether we
1153 * interrupted kernel code running on the SYSENTER stack.
1154 */
1155SYM_CODE_START(asm_exc_nmi)
1156	ASM_CLAC
1157
1158#ifdef CONFIG_X86_ESPFIX32
1159	/*
1160	 * ESPFIX_SS is only ever set on the return to user path
1161	 * after we've switched to the entry stack.
1162	 */
1163	pushl	%eax
1164	movl	%ss, %eax
1165	cmpw	$__ESPFIX_SS, %ax
1166	popl	%eax
1167	je	.Lnmi_espfix_stack
1168#endif
1169
1170	pushl	%eax				# pt_regs->orig_ax
1171	SAVE_ALL_NMI cr3_reg=%edi
1172	ENCODE_FRAME_POINTER
1173	xorl	%edx, %edx			# zero error code
1174	movl	%esp, %eax			# pt_regs pointer
1175
1176	/* Are we currently on the SYSENTER stack? */
1177	movl	PER_CPU_VAR(cpu_entry_area), %ecx
1178	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1179	subl	%eax, %ecx	/* ecx = (end of entry_stack) - esp */
1180	cmpl	$SIZEOF_entry_stack, %ecx
1181	jb	.Lnmi_from_sysenter_stack
1182
1183	/* Not on SYSENTER stack. */
1184	call	exc_nmi
1185	jmp	.Lnmi_return
1186
1187.Lnmi_from_sysenter_stack:
1188	/*
1189	 * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
1190	 * is using the thread stack right now, so it's safe for us to use it.
1191	 */
1192	movl	%esp, %ebx
1193	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
1194	call	exc_nmi
1195	movl	%ebx, %esp
 
1196
1197.Lnmi_return:
1198#ifdef CONFIG_X86_ESPFIX32
1199	testl	$CS_FROM_ESPFIX, PT_CS(%esp)
1200	jnz	.Lnmi_from_espfix
1201#endif
1202
1203	CHECK_AND_APPLY_ESPFIX
1204	RESTORE_ALL_NMI cr3_reg=%edi pop=4
1205	jmp	.Lirq_return
1206
1207#ifdef CONFIG_X86_ESPFIX32
1208.Lnmi_espfix_stack:
1209	/*
1210	 * Create the pointer to LSS back
1211	 */
1212	pushl	%ss
1213	pushl	%esp
1214	addl	$4, (%esp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1215
1216	/* Copy the (short) IRET frame */
1217	pushl	4*4(%esp)	# flags
1218	pushl	4*4(%esp)	# cs
1219	pushl	4*4(%esp)	# ip
1220
1221	pushl	%eax		# orig_ax
1222
1223	SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
1224	ENCODE_FRAME_POINTER
1225
1226	/* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
1227	xorl	$(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
1228
1229	xorl	%edx, %edx			# zero error code
1230	movl	%esp, %eax			# pt_regs pointer
1231	jmp	.Lnmi_from_sysenter_stack
 
 
 
 
 
 
 
1232
1233.Lnmi_from_espfix:
1234	RESTORE_ALL_NMI cr3_reg=%edi
1235	/*
1236	 * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
1237	 * fix up the gap and long frame:
1238	 *
1239	 *  3 - original frame	(exception)
1240	 *  2 - ESPFIX block	(above)
1241	 *  6 - gap		(FIXUP_FRAME)
1242	 *  5 - long frame	(FIXUP_FRAME)
1243	 *  1 - orig_ax
1244	 */
1245	lss	(1+5+6)*4(%esp), %esp			# back to espfix stack
1246	jmp	.Lirq_return
1247#endif
1248SYM_CODE_END(asm_exc_nmi)
1249
1250.pushsection .text, "ax"
1251SYM_CODE_START(rewind_stack_do_exit)
1252	/* Prevent any naive code from trying to unwind to our caller. */
1253	xorl	%ebp, %ebp
1254
1255	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esi
1256	leal	-TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1257
1258	call	do_exit
12591:	jmp 1b
1260SYM_CODE_END(rewind_stack_do_exit)
1261.popsection