Linux Audio

Check our new training course

Loading...
v4.6
 
  1#include <linux/jump_label.h>
 
 
 
 
 
 
 
 
 
  2
  3/*
  4
  5 x86 function call convention, 64-bit:
  6 -------------------------------------
  7  arguments           |  callee-saved      | extra caller-saved | return
  8 [callee-clobbered]   |                    | [callee-clobbered] |
  9 ---------------------------------------------------------------------------
 10 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
 11
 12 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
 13   functions when it sees tail-call optimization possibilities) rflags is
 14   clobbered. Leftover arguments are passed over the stack frame.)
 15
 16 [*]  In the frame-pointers case rbp is fixed to the stack frame.
 17
 18 [**] for struct return values wider than 64 bits the return convention is a
 19      bit more complex: up to 128 bits width we return small structures
 20      straight in rax, rdx. For structures larger than that (3 words or
 21      larger) the caller puts a pointer to an on-stack return struct
 22      [allocated in the caller's stack frame] into the first argument - i.e.
 23      into rdi. All other arguments shift up by one in this case.
 24      Fortunately this case is rare in the kernel.
 25
 26For 32-bit we have the following conventions - kernel is built with
 27-mregparm=3 and -freg-struct-return:
 28
 29 x86 function calling convention, 32-bit:
 30 ----------------------------------------
 31  arguments         | callee-saved        | extra caller-saved | return
 32 [callee-clobbered] |                     | [callee-clobbered] |
 33 -------------------------------------------------------------------------
 34 eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
 35
 36 ( here too esp is obviously invariant across normal function calls. eflags
 37   is clobbered. Leftover arguments are passed over the stack frame. )
 38
 39 [*]  In the frame-pointers case ebp is fixed to the stack frame.
 40
 41 [**] We build with -freg-struct-return, which on 32-bit means similar
 42      semantics as on 64-bit: edx can be used for a second return value
 43      (i.e. covering integer and structure sizes up to 64 bits) - after that
 44      it gets more complex and more expensive: 3-word or larger struct returns
 45      get done in the caller's frame and the pointer to the return struct goes
 46      into regparm0, i.e. eax - the other arguments shift up and the
 47      function's register parameters degenerate to regparm=2 in essence.
 48
 49*/
 50
 51#ifdef CONFIG_X86_64
 52
 53/*
 54 * 64-bit system call stack frame layout defines and helpers,
 55 * for assembly code:
 56 */
 57
 58/* The layout forms the "struct pt_regs" on the stack: */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59/*
 60 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
 61 * unless syscall needs a complete, fully filled "struct pt_regs".
 62 */
 63#define R15		0*8
 64#define R14		1*8
 65#define R13		2*8
 66#define R12		3*8
 67#define RBP		4*8
 68#define RBX		5*8
 69/* These regs are callee-clobbered. Always saved on kernel entry. */
 70#define R11		6*8
 71#define R10		7*8
 72#define R9		8*8
 73#define R8		9*8
 74#define RAX		10*8
 75#define RCX		11*8
 76#define RDX		12*8
 77#define RSI		13*8
 78#define RDI		14*8
 79/*
 80 * On syscall entry, this is syscall#. On CPU exception, this is error code.
 81 * On hw interrupt, it's IRQ number:
 82 */
 83#define ORIG_RAX	15*8
 84/* Return frame for iretq */
 85#define RIP		16*8
 86#define CS		17*8
 87#define EFLAGS		18*8
 88#define RSP		19*8
 89#define SS		20*8
 90
 91#define SIZEOF_PTREGS	21*8
 
 
 92
 93	.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
 94	addq	$-(15*8+\addskip), %rsp
 95	.endm
 
 
 96
 97	.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
 98	.if \r11
 99	movq %r11, 6*8+\offset(%rsp)
100	.endif
101	.if \r8910
102	movq %r10, 7*8+\offset(%rsp)
103	movq %r9,  8*8+\offset(%rsp)
104	movq %r8,  9*8+\offset(%rsp)
105	.endif
106	.if \rax
107	movq %rax, 10*8+\offset(%rsp)
108	.endif
109	.if \rcx
110	movq %rcx, 11*8+\offset(%rsp)
111	.endif
112	movq %rdx, 12*8+\offset(%rsp)
113	movq %rsi, 13*8+\offset(%rsp)
114	movq %rdi, 14*8+\offset(%rsp)
115	.endm
116	.macro SAVE_C_REGS offset=0
117	SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
118	.endm
119	.macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
120	SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
121	.endm
122	.macro SAVE_C_REGS_EXCEPT_R891011
123	SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
124	.endm
125	.macro SAVE_C_REGS_EXCEPT_RCX_R891011
126	SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
127	.endm
128	.macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
129	SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
130	.endm
131
132	.macro SAVE_EXTRA_REGS offset=0
133	movq %r15, 0*8+\offset(%rsp)
134	movq %r14, 1*8+\offset(%rsp)
135	movq %r13, 2*8+\offset(%rsp)
136	movq %r12, 3*8+\offset(%rsp)
137	movq %rbp, 4*8+\offset(%rsp)
138	movq %rbx, 5*8+\offset(%rsp)
139	.endm
140
141	.macro RESTORE_EXTRA_REGS offset=0
142	movq 0*8+\offset(%rsp), %r15
143	movq 1*8+\offset(%rsp), %r14
144	movq 2*8+\offset(%rsp), %r13
145	movq 3*8+\offset(%rsp), %r12
146	movq 4*8+\offset(%rsp), %rbp
147	movq 5*8+\offset(%rsp), %rbx
148	.endm
149
150	.macro ZERO_EXTRA_REGS
151	xorl	%r15d, %r15d
152	xorl	%r14d, %r14d
153	xorl	%r13d, %r13d
154	xorl	%r12d, %r12d
155	xorl	%ebp, %ebp
156	xorl	%ebx, %ebx
157	.endm
158
159	.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
160	.if \rstor_r11
161	movq 6*8(%rsp), %r11
162	.endif
163	.if \rstor_r8910
164	movq 7*8(%rsp), %r10
165	movq 8*8(%rsp), %r9
166	movq 9*8(%rsp), %r8
167	.endif
168	.if \rstor_rax
169	movq 10*8(%rsp), %rax
170	.endif
171	.if \rstor_rcx
172	movq 11*8(%rsp), %rcx
173	.endif
174	.if \rstor_rdx
175	movq 12*8(%rsp), %rdx
176	.endif
177	movq 13*8(%rsp), %rsi
178	movq 14*8(%rsp), %rdi
179	.endm
180	.macro RESTORE_C_REGS
181	RESTORE_C_REGS_HELPER 1,1,1,1,1
182	.endm
183	.macro RESTORE_C_REGS_EXCEPT_RAX
184	RESTORE_C_REGS_HELPER 0,1,1,1,1
185	.endm
186	.macro RESTORE_C_REGS_EXCEPT_RCX
187	RESTORE_C_REGS_HELPER 1,0,1,1,1
188	.endm
189	.macro RESTORE_C_REGS_EXCEPT_R11
190	RESTORE_C_REGS_HELPER 1,1,0,1,1
191	.endm
192	.macro RESTORE_C_REGS_EXCEPT_RCX_R11
193	RESTORE_C_REGS_HELPER 1,0,0,1,1
194	.endm
195
196	.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
197	subq $-(15*8+\addskip), %rsp
198	.endm
 
 
199
200	.macro icebp
201	.byte 0xf1
202	.endm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
204#endif /* CONFIG_X86_64 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
206/*
207 * This does 'call enter_from_user_mode' unless we can avoid it based on
208 * kernel config or using the static jump infrastructure.
209 */
210.macro CALL_enter_from_user_mode
211#ifdef CONFIG_CONTEXT_TRACKING
212#ifdef HAVE_JUMP_LABEL
213	STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
 
 
 
 
 
 
 
 
 
 
 
 
214#endif
215	call enter_from_user_mode
216.Lafter_call_\@:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217#endif
218.endm
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/jump_label.h>
  3#include <asm/unwind_hints.h>
  4#include <asm/cpufeatures.h>
  5#include <asm/page_types.h>
  6#include <asm/percpu.h>
  7#include <asm/asm-offsets.h>
  8#include <asm/processor-flags.h>
  9#include <asm/ptrace-abi.h>
 10#include <asm/msr.h>
 11#include <asm/nospec-branch.h>
 12
 13/*
 14
 15 x86 function call convention, 64-bit:
 16 -------------------------------------
 17  arguments           |  callee-saved      | extra caller-saved | return
 18 [callee-clobbered]   |                    | [callee-clobbered] |
 19 ---------------------------------------------------------------------------
 20 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
 21
 22 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
 23   functions when it sees tail-call optimization possibilities) rflags is
 24   clobbered. Leftover arguments are passed over the stack frame.)
 25
 26 [*]  In the frame-pointers case rbp is fixed to the stack frame.
 27
 28 [**] for struct return values wider than 64 bits the return convention is a
 29      bit more complex: up to 128 bits width we return small structures
 30      straight in rax, rdx. For structures larger than that (3 words or
 31      larger) the caller puts a pointer to an on-stack return struct
 32      [allocated in the caller's stack frame] into the first argument - i.e.
 33      into rdi. All other arguments shift up by one in this case.
 34      Fortunately this case is rare in the kernel.
 35
 36For 32-bit we have the following conventions - kernel is built with
 37-mregparm=3 and -freg-struct-return:
 38
 39 x86 function calling convention, 32-bit:
 40 ----------------------------------------
 41  arguments         | callee-saved        | extra caller-saved | return
 42 [callee-clobbered] |                     | [callee-clobbered] |
 43 -------------------------------------------------------------------------
 44 eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
 45
 46 ( here too esp is obviously invariant across normal function calls. eflags
 47   is clobbered. Leftover arguments are passed over the stack frame. )
 48
 49 [*]  In the frame-pointers case ebp is fixed to the stack frame.
 50
 51 [**] We build with -freg-struct-return, which on 32-bit means similar
 52      semantics as on 64-bit: edx can be used for a second return value
 53      (i.e. covering integer and structure sizes up to 64 bits) - after that
 54      it gets more complex and more expensive: 3-word or larger struct returns
 55      get done in the caller's frame and the pointer to the return struct goes
 56      into regparm0, i.e. eax - the other arguments shift up and the
 57      function's register parameters degenerate to regparm=2 in essence.
 58
 59*/
 60
 61#ifdef CONFIG_X86_64
 62
 63/*
 64 * 64-bit system call stack frame layout defines and helpers,
 65 * for assembly code:
 66 */
 67
 68.macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 unwind_hint=1
 69	.if \save_ret
 70	pushq	%rsi		/* pt_regs->si */
 71	movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */
 72	movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */
 73	.else
 74	pushq   %rdi		/* pt_regs->di */
 75	pushq   %rsi		/* pt_regs->si */
 76	.endif
 77	pushq	\rdx		/* pt_regs->dx */
 78	pushq   \rcx		/* pt_regs->cx */
 79	pushq   \rax		/* pt_regs->ax */
 80	pushq   %r8		/* pt_regs->r8 */
 81	pushq   %r9		/* pt_regs->r9 */
 82	pushq   %r10		/* pt_regs->r10 */
 83	pushq   %r11		/* pt_regs->r11 */
 84	pushq	%rbx		/* pt_regs->rbx */
 85	pushq	%rbp		/* pt_regs->rbp */
 86	pushq	%r12		/* pt_regs->r12 */
 87	pushq	%r13		/* pt_regs->r13 */
 88	pushq	%r14		/* pt_regs->r14 */
 89	pushq	%r15		/* pt_regs->r15 */
 90
 91	.if \unwind_hint
 92	UNWIND_HINT_REGS
 93	.endif
 94
 95	.if \save_ret
 96	pushq	%rsi		/* return address on top of stack */
 97	.endif
 98.endm
 99
100.macro CLEAR_REGS clear_bp=1
101	/*
102	 * Sanitize registers of values that a speculation attack might
103	 * otherwise want to exploit. The lower registers are likely clobbered
104	 * well before they could be put to use in a speculative execution
105	 * gadget.
106	 */
107	xorl	%esi,  %esi	/* nospec si  */
108	xorl	%edx,  %edx	/* nospec dx  */
109	xorl	%ecx,  %ecx	/* nospec cx  */
110	xorl	%r8d,  %r8d	/* nospec r8  */
111	xorl	%r9d,  %r9d	/* nospec r9  */
112	xorl	%r10d, %r10d	/* nospec r10 */
113	xorl	%r11d, %r11d	/* nospec r11 */
114	xorl	%ebx,  %ebx	/* nospec rbx */
115	.if \clear_bp
116	xorl	%ebp,  %ebp	/* nospec rbp */
117	.endif
118	xorl	%r12d, %r12d	/* nospec r12 */
119	xorl	%r13d, %r13d	/* nospec r13 */
120	xorl	%r14d, %r14d	/* nospec r14 */
121	xorl	%r15d, %r15d	/* nospec r15 */
122
123.endm
124
125.macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 clear_bp=1 unwind_hint=1
126	PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret unwind_hint=\unwind_hint
127	CLEAR_REGS clear_bp=\clear_bp
128.endm
129
130.macro POP_REGS pop_rdi=1
131	popq %r15
132	popq %r14
133	popq %r13
134	popq %r12
135	popq %rbp
136	popq %rbx
137	popq %r11
138	popq %r10
139	popq %r9
140	popq %r8
141	popq %rax
142	popq %rcx
143	popq %rdx
144	popq %rsi
145	.if \pop_rdi
146	popq %rdi
147	.endif
148.endm
149
150#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
151
152/*
153 * MITIGATION_PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
154 * halves:
155 */
156#define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
157#define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
158#define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
159#define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
160#define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
162.macro SET_NOFLUSH_BIT	reg:req
163	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
164.endm
165
166.macro ADJUST_KERNEL_CR3 reg:req
167	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
168	/* Clear PCID and "MITIGATION_PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
169	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
170.endm
171
172.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
173	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
174	mov	%cr3, \scratch_reg
175	ADJUST_KERNEL_CR3 \scratch_reg
176	mov	\scratch_reg, %cr3
177.Lend_\@:
178.endm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
180#define THIS_CPU_user_pcid_flush_mask   \
181	PER_CPU_VAR(cpu_tlbstate + TLB_STATE_user_pcid_flush_mask)
 
 
 
 
 
 
182
183.macro SWITCH_TO_USER_CR3 scratch_reg:req scratch_reg2:req
184	mov	%cr3, \scratch_reg
 
 
 
 
 
 
185
186	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
 
 
 
 
 
 
 
187
188	/*
189	 * Test if the ASID needs a flush.
190	 */
191	movq	\scratch_reg, \scratch_reg2
192	andq	$(0x7FF), \scratch_reg		/* mask ASID */
193	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
194	jnc	.Lnoflush_\@
195
196	/* Flush needed, clear the bit */
197	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
198	movq	\scratch_reg2, \scratch_reg
199	jmp	.Lwrcr3_pcid_\@
200
201.Lnoflush_\@:
202	movq	\scratch_reg2, \scratch_reg
203	SET_NOFLUSH_BIT \scratch_reg
204
205.Lwrcr3_pcid_\@:
206	/* Flip the ASID to the user version */
207	orq	$(PTI_USER_PCID_MASK), \scratch_reg
208
209.Lwrcr3_\@:
210	/* Flip the PGD to the user version */
211	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
212	mov	\scratch_reg, %cr3
213.endm
 
 
 
 
 
 
 
 
 
 
214
215.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
216	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
217	SWITCH_TO_USER_CR3 \scratch_reg \scratch_reg2
218.Lend_\@:
219.endm
220
221.macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
222	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
223	pushq	%rax
224	SWITCH_TO_USER_CR3 scratch_reg=\scratch_reg scratch_reg2=%rax
225	popq	%rax
226.Lend_\@:
227.endm
228
229.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
230	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
231	movq	%cr3, \scratch_reg
232	movq	\scratch_reg, \save_reg
233	/*
234	 * Test the user pagetable bit. If set, then the user page tables
235	 * are active. If clear CR3 already has the kernel page table
236	 * active.
237	 */
238	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
239	jnc	.Ldone_\@
240
241	ADJUST_KERNEL_CR3 \scratch_reg
242	movq	\scratch_reg, %cr3
243
244.Ldone_\@:
245.endm
246
247/* Restore CR3 from a kernel context. May restore a user CR3 value. */
248.macro PARANOID_RESTORE_CR3 scratch_reg:req save_reg:req
249	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
250
251	/*
252	 * If CR3 contained the kernel page tables at the paranoid exception
253	 * entry, then there is nothing to restore as CR3 is not modified while
254	 * handling the exception.
255	 */
256	bt	$PTI_USER_PGTABLE_BIT, \save_reg
257	jnc	.Lend_\@
258
259	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
260
261	/*
262	 * Check if there's a pending flush for the user ASID we're
263	 * about to set.
264	 */
265	movq	\save_reg, \scratch_reg
266	andq	$(0x7FF), \scratch_reg
267	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
268	jc	.Lwrcr3_\@
269
270	SET_NOFLUSH_BIT \save_reg
271
272.Lwrcr3_\@:
273	movq	\save_reg, %cr3
274.Lend_\@:
275.endm
276
277#else /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION=n: */
278
279.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
280.endm
281.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
282.endm
283.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
284.endm
285.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
286.endm
287.macro PARANOID_RESTORE_CR3 scratch_reg:req save_reg:req
288.endm
289
290#endif
291
292/*
293 * IBRS kernel mitigation for Spectre_v2.
294 *
295 * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
296 * the regs it uses (AX, CX, DX). Must be called before the first RET
297 * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
298 *
299 * The optional argument is used to save/restore the current value,
300 * which is used on the paranoid paths.
301 *
302 * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
303 */
304.macro IBRS_ENTER save_reg
305#ifdef CONFIG_MITIGATION_IBRS_ENTRY
306	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
307	movl	$MSR_IA32_SPEC_CTRL, %ecx
308
309.ifnb \save_reg
310	rdmsr
311	shl	$32, %rdx
312	or	%rdx, %rax
313	mov	%rax, \save_reg
314	test	$SPEC_CTRL_IBRS, %eax
315	jz	.Ldo_wrmsr_\@
316	lfence
317	jmp	.Lend_\@
318.Ldo_wrmsr_\@:
319.endif
320
321	movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx
322	movl	%edx, %eax
323	shr	$32, %rdx
324	wrmsr
325.Lend_\@:
326#endif
327.endm
328
329/*
330 * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
331 * regs. Must be called after the last RET.
332 */
333.macro IBRS_EXIT save_reg
334#ifdef CONFIG_MITIGATION_IBRS_ENTRY
335	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
336	movl	$MSR_IA32_SPEC_CTRL, %ecx
337
338.ifnb \save_reg
339	mov	\save_reg, %rdx
340.else
341	movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx
342	andl	$(~SPEC_CTRL_IBRS), %edx
343.endif
344
345	movl	%edx, %eax
346	shr	$32, %rdx
347	wrmsr
348.Lend_\@:
349#endif
350.endm
351
352/*
353 * Mitigate Spectre v1 for conditional swapgs code paths.
354 *
355 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
356 * prevent a speculative swapgs when coming from kernel space.
357 *
358 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
359 * to prevent the swapgs from getting speculatively skipped when coming from
360 * user space.
361 */
362.macro FENCE_SWAPGS_USER_ENTRY
363	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
364.endm
365.macro FENCE_SWAPGS_KERNEL_ENTRY
366	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
367.endm
368
369.macro STACKLEAK_ERASE_NOCLOBBER
370#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
371	PUSH_AND_CLEAR_REGS
372	call stackleak_erase
373	POP_REGS
374#endif
375.endm
376
377.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
378	rdgsbase \save_reg
379	GET_PERCPU_BASE \scratch_reg
380	wrgsbase \scratch_reg
381.endm
382
383#else /* CONFIG_X86_64 */
384# undef		UNWIND_HINT_IRET_REGS
385# define	UNWIND_HINT_IRET_REGS
386#endif /* !CONFIG_X86_64 */
387
388.macro STACKLEAK_ERASE
389#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
390	call stackleak_erase
391#endif
392.endm
393
394#ifdef CONFIG_SMP
395
396/*
397 * CPU/node NR is loaded from the limit (size) field of a special segment
398 * descriptor entry in GDT.
399 */
400.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
401	movq	$__CPUNODE_SEG, \reg
402	lsl	\reg, \reg
403.endm
404
405/*
406 * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
407 * We normally use %gs for accessing per-CPU data, but we are setting up
408 * %gs here and obviously can not use %gs itself to access per-CPU data.
409 *
410 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
411 * may not restore the host's value until the CPU returns to userspace.
412 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
413 * while running KVM's run loop.
414 */
415.macro GET_PERCPU_BASE reg:req
416	LOAD_CPU_AND_NODE_SEG_LIMIT \reg
417	andq	$VDSO_CPUNODE_MASK, \reg
418	movq	__per_cpu_offset(, \reg, 8), \reg
419.endm
420
421#else
422
423.macro GET_PERCPU_BASE reg:req
424	movq	pcpu_unit_offsets(%rip), \reg
425.endm
426
427#endif /* CONFIG_SMP */
428
429#ifdef CONFIG_X86_64
430
431/* rdi:	arg1 ... normal C conventions. rax is saved/restored. */
432.macro THUNK name, func
433SYM_FUNC_START(\name)
434	pushq %rbp
435	movq %rsp, %rbp
436
437	pushq %rdi
438	pushq %rsi
439	pushq %rdx
440	pushq %rcx
441	pushq %rax
442	pushq %r8
443	pushq %r9
444	pushq %r10
445	pushq %r11
446
447	call \func
448
449	popq %r11
450	popq %r10
451	popq %r9
452	popq %r8
453	popq %rax
454	popq %rcx
455	popq %rdx
456	popq %rsi
457	popq %rdi
458	popq %rbp
459	RET
460SYM_FUNC_END(\name)
461	_ASM_NOKPROBE(\name)
462.endm
463
464#else /* CONFIG_X86_32 */
465
466/* put return address in eax (arg1) */
467.macro THUNK name, func, put_ret_addr_in_eax=0
468SYM_CODE_START_NOALIGN(\name)
469	pushl %eax
470	pushl %ecx
471	pushl %edx
472
473	.if \put_ret_addr_in_eax
474	/* Place EIP in the arg1 */
475	movl 3*4(%esp), %eax
476	.endif
477
478	call \func
479	popl %edx
480	popl %ecx
481	popl %eax
482	RET
483	_ASM_NOKPROBE(\name)
484SYM_CODE_END(\name)
485	.endm
486
487#endif