Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/jump_label.h>
  3#include <asm/unwind_hints.h>
  4#include <asm/cpufeatures.h>
  5#include <asm/page_types.h>
  6#include <asm/percpu.h>
  7#include <asm/asm-offsets.h>
  8#include <asm/processor-flags.h>
  9#include <asm/ptrace-abi.h>
 10#include <asm/msr.h>
 11#include <asm/nospec-branch.h>
 12
 13/*
 14
 15 x86 function call convention, 64-bit:
 16 -------------------------------------
 17  arguments           |  callee-saved      | extra caller-saved | return
 18 [callee-clobbered]   |                    | [callee-clobbered] |
 19 ---------------------------------------------------------------------------
 20 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
 21
 22 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
 23   functions when it sees tail-call optimization possibilities) rflags is
 24   clobbered. Leftover arguments are passed over the stack frame.)
 25
 26 [*]  In the frame-pointers case rbp is fixed to the stack frame.
 27
 28 [**] for struct return values wider than 64 bits the return convention is a
 29      bit more complex: up to 128 bits width we return small structures
 30      straight in rax, rdx. For structures larger than that (3 words or
 31      larger) the caller puts a pointer to an on-stack return struct
 32      [allocated in the caller's stack frame] into the first argument - i.e.
 33      into rdi. All other arguments shift up by one in this case.
 34      Fortunately this case is rare in the kernel.
 35
 36For 32-bit we have the following conventions - kernel is built with
 37-mregparm=3 and -freg-struct-return:
 38
 39 x86 function calling convention, 32-bit:
 40 ----------------------------------------
 41  arguments         | callee-saved        | extra caller-saved | return
 42 [callee-clobbered] |                     | [callee-clobbered] |
 43 -------------------------------------------------------------------------
 44 eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
 45
 46 ( here too esp is obviously invariant across normal function calls. eflags
 47   is clobbered. Leftover arguments are passed over the stack frame. )
 48
 49 [*]  In the frame-pointers case ebp is fixed to the stack frame.
 50
 51 [**] We build with -freg-struct-return, which on 32-bit means similar
 52      semantics as on 64-bit: edx can be used for a second return value
 53      (i.e. covering integer and structure sizes up to 64 bits) - after that
 54      it gets more complex and more expensive: 3-word or larger struct returns
 55      get done in the caller's frame and the pointer to the return struct goes
 56      into regparm0, i.e. eax - the other arguments shift up and the
 57      function's register parameters degenerate to regparm=2 in essence.
 58
 59*/
 60
 61#ifdef CONFIG_X86_64
 62
 63/*
 64 * 64-bit system call stack frame layout defines and helpers,
 65 * for assembly code:
 66 */
 67
 68.macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69	.if \save_ret
 70	pushq	%rsi		/* pt_regs->si */
 71	movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */
 72	movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */
 73	.else
 74	pushq   %rdi		/* pt_regs->di */
 75	pushq   %rsi		/* pt_regs->si */
 76	.endif
 77	pushq	\rdx		/* pt_regs->dx */
 78	pushq   \rcx		/* pt_regs->cx */
 
 
 79	pushq   \rax		/* pt_regs->ax */
 80	pushq   %r8		/* pt_regs->r8 */
 
 81	pushq   %r9		/* pt_regs->r9 */
 
 82	pushq   %r10		/* pt_regs->r10 */
 
 83	pushq   %r11		/* pt_regs->r11 */
 
 84	pushq	%rbx		/* pt_regs->rbx */
 
 85	pushq	%rbp		/* pt_regs->rbp */
 
 86	pushq	%r12		/* pt_regs->r12 */
 
 87	pushq	%r13		/* pt_regs->r13 */
 
 88	pushq	%r14		/* pt_regs->r14 */
 
 89	pushq	%r15		/* pt_regs->r15 */
 
 90	UNWIND_HINT_REGS
 91
 92	.if \save_ret
 93	pushq	%rsi		/* return address on top of stack */
 94	.endif
 95.endm
 96
 97.macro CLEAR_REGS
 98	/*
 99	 * Sanitize registers of values that a speculation attack might
100	 * otherwise want to exploit. The lower registers are likely clobbered
101	 * well before they could be put to use in a speculative execution
102	 * gadget.
103	 */
104	xorl	%esi,  %esi	/* nospec si  */
105	xorl	%edx,  %edx	/* nospec dx  */
106	xorl	%ecx,  %ecx	/* nospec cx  */
107	xorl	%r8d,  %r8d	/* nospec r8  */
108	xorl	%r9d,  %r9d	/* nospec r9  */
109	xorl	%r10d, %r10d	/* nospec r10 */
110	xorl	%r11d, %r11d	/* nospec r11 */
111	xorl	%ebx,  %ebx	/* nospec rbx */
112	xorl	%ebp,  %ebp	/* nospec rbp */
113	xorl	%r12d, %r12d	/* nospec r12 */
114	xorl	%r13d, %r13d	/* nospec r13 */
115	xorl	%r14d, %r14d	/* nospec r14 */
116	xorl	%r15d, %r15d	/* nospec r15 */
117
118.endm
119
120.macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0
121	PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret
122	CLEAR_REGS
123.endm
124
125.macro POP_REGS pop_rdi=1
126	popq %r15
127	popq %r14
128	popq %r13
129	popq %r12
130	popq %rbp
131	popq %rbx
 
 
 
132	popq %r11
 
133	popq %r10
134	popq %r9
135	popq %r8
136	popq %rax
 
 
 
137	popq %rcx
 
138	popq %rdx
139	popq %rsi
140	.if \pop_rdi
141	popq %rdi
142	.endif
143.endm
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145#ifdef CONFIG_PAGE_TABLE_ISOLATION
146
147/*
148 * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
149 * halves:
150 */
151#define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
152#define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
153#define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
154#define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
155#define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
156
157.macro SET_NOFLUSH_BIT	reg:req
158	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
159.endm
160
161.macro ADJUST_KERNEL_CR3 reg:req
162	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
163	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
164	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
165.endm
166
167.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
168	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
169	mov	%cr3, \scratch_reg
170	ADJUST_KERNEL_CR3 \scratch_reg
171	mov	\scratch_reg, %cr3
172.Lend_\@:
173.endm
174
175#define THIS_CPU_user_pcid_flush_mask   \
176	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
177
178.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
179	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
180	mov	%cr3, \scratch_reg
181
182	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
183
184	/*
185	 * Test if the ASID needs a flush.
186	 */
187	movq	\scratch_reg, \scratch_reg2
188	andq	$(0x7FF), \scratch_reg		/* mask ASID */
189	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
190	jnc	.Lnoflush_\@
191
192	/* Flush needed, clear the bit */
193	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
194	movq	\scratch_reg2, \scratch_reg
195	jmp	.Lwrcr3_pcid_\@
196
197.Lnoflush_\@:
198	movq	\scratch_reg2, \scratch_reg
199	SET_NOFLUSH_BIT \scratch_reg
200
201.Lwrcr3_pcid_\@:
202	/* Flip the ASID to the user version */
203	orq	$(PTI_USER_PCID_MASK), \scratch_reg
204
205.Lwrcr3_\@:
206	/* Flip the PGD to the user version */
207	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
208	mov	\scratch_reg, %cr3
209.Lend_\@:
210.endm
211
212.macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
213	pushq	%rax
214	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
215	popq	%rax
216.endm
217
218.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
219	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
220	movq	%cr3, \scratch_reg
221	movq	\scratch_reg, \save_reg
222	/*
223	 * Test the user pagetable bit. If set, then the user page tables
224	 * are active. If clear CR3 already has the kernel page table
225	 * active.
226	 */
227	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
228	jnc	.Ldone_\@
229
230	ADJUST_KERNEL_CR3 \scratch_reg
231	movq	\scratch_reg, %cr3
232
233.Ldone_\@:
234.endm
235
236.macro RESTORE_CR3 scratch_reg:req save_reg:req
237	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
238
239	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
240
241	/*
242	 * KERNEL pages can always resume with NOFLUSH as we do
243	 * explicit flushes.
244	 */
245	bt	$PTI_USER_PGTABLE_BIT, \save_reg
246	jnc	.Lnoflush_\@
247
248	/*
249	 * Check if there's a pending flush for the user ASID we're
250	 * about to set.
251	 */
252	movq	\save_reg, \scratch_reg
253	andq	$(0x7FF), \scratch_reg
254	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
255	jnc	.Lnoflush_\@
256
257	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
258	jmp	.Lwrcr3_\@
259
260.Lnoflush_\@:
261	SET_NOFLUSH_BIT \save_reg
262
263.Lwrcr3_\@:
264	/*
265	 * The CR3 write could be avoided when not changing its value,
266	 * but would require a CR3 read *and* a scratch register.
267	 */
268	movq	\save_reg, %cr3
269.Lend_\@:
270.endm
271
272#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
273
274.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
275.endm
276.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
277.endm
278.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
279.endm
280.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
281.endm
282.macro RESTORE_CR3 scratch_reg:req save_reg:req
283.endm
284
285#endif
286
287/*
288 * IBRS kernel mitigation for Spectre_v2.
289 *
290 * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
291 * the regs it uses (AX, CX, DX). Must be called before the first RET
292 * instruction (NOTE! UNTRAIN_RET includes a RET instruction)
293 *
294 * The optional argument is used to save/restore the current value,
295 * which is used on the paranoid paths.
296 *
297 * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
298 */
299.macro IBRS_ENTER save_reg
300#ifdef CONFIG_CPU_IBRS_ENTRY
301	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
302	movl	$MSR_IA32_SPEC_CTRL, %ecx
303
304.ifnb \save_reg
305	rdmsr
306	shl	$32, %rdx
307	or	%rdx, %rax
308	mov	%rax, \save_reg
309	test	$SPEC_CTRL_IBRS, %eax
310	jz	.Ldo_wrmsr_\@
311	lfence
312	jmp	.Lend_\@
313.Ldo_wrmsr_\@:
314.endif
315
316	movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx
317	movl	%edx, %eax
318	shr	$32, %rdx
319	wrmsr
320.Lend_\@:
321#endif
322.endm
323
324/*
325 * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
326 * regs. Must be called after the last RET.
327 */
328.macro IBRS_EXIT save_reg
329#ifdef CONFIG_CPU_IBRS_ENTRY
330	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
331	movl	$MSR_IA32_SPEC_CTRL, %ecx
332
333.ifnb \save_reg
334	mov	\save_reg, %rdx
335.else
336	movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx
337	andl	$(~SPEC_CTRL_IBRS), %edx
338.endif
339
340	movl	%edx, %eax
341	shr	$32, %rdx
342	wrmsr
343.Lend_\@:
344#endif
345.endm
346
347/*
348 * Mitigate Spectre v1 for conditional swapgs code paths.
349 *
350 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
351 * prevent a speculative swapgs when coming from kernel space.
352 *
353 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
354 * to prevent the swapgs from getting speculatively skipped when coming from
355 * user space.
356 */
357.macro FENCE_SWAPGS_USER_ENTRY
358	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
359.endm
360.macro FENCE_SWAPGS_KERNEL_ENTRY
361	ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
362.endm
363
364.macro STACKLEAK_ERASE_NOCLOBBER
365#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
366	PUSH_AND_CLEAR_REGS
367	call stackleak_erase
368	POP_REGS
369#endif
370.endm
371
372.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
373	rdgsbase \save_reg
374	GET_PERCPU_BASE \scratch_reg
375	wrgsbase \scratch_reg
376.endm
377
378#else /* CONFIG_X86_64 */
379# undef		UNWIND_HINT_IRET_REGS
380# define	UNWIND_HINT_IRET_REGS
381#endif /* !CONFIG_X86_64 */
382
383.macro STACKLEAK_ERASE
384#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
385	call stackleak_erase
386#endif
387.endm
388
389#ifdef CONFIG_SMP
390
391/*
392 * CPU/node NR is loaded from the limit (size) field of a special segment
393 * descriptor entry in GDT.
394 */
395.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
396	movq	$__CPUNODE_SEG, \reg
397	lsl	\reg, \reg
398.endm
399
400/*
401 * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
402 * We normally use %gs for accessing per-CPU data, but we are setting up
403 * %gs here and obviously can not use %gs itself to access per-CPU data.
404 *
405 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
406 * may not restore the host's value until the CPU returns to userspace.
407 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
408 * while running KVM's run loop.
409 */
410.macro GET_PERCPU_BASE reg:req
411	LOAD_CPU_AND_NODE_SEG_LIMIT \reg
412	andq	$VDSO_CPUNODE_MASK, \reg
413	movq	__per_cpu_offset(, \reg, 8), \reg
414.endm
415
416#else
417
418.macro GET_PERCPU_BASE reg:req
419	movq	pcpu_unit_offsets(%rip), \reg
420.endm
421
422#endif /* CONFIG_SMP */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/jump_label.h>
  3#include <asm/unwind_hints.h>
  4#include <asm/cpufeatures.h>
  5#include <asm/page_types.h>
  6#include <asm/percpu.h>
  7#include <asm/asm-offsets.h>
  8#include <asm/processor-flags.h>
 
 
 
  9
 10/*
 11
 12 x86 function call convention, 64-bit:
 13 -------------------------------------
 14  arguments           |  callee-saved      | extra caller-saved | return
 15 [callee-clobbered]   |                    | [callee-clobbered] |
 16 ---------------------------------------------------------------------------
 17 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
 18
 19 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
 20   functions when it sees tail-call optimization possibilities) rflags is
 21   clobbered. Leftover arguments are passed over the stack frame.)
 22
 23 [*]  In the frame-pointers case rbp is fixed to the stack frame.
 24
 25 [**] for struct return values wider than 64 bits the return convention is a
 26      bit more complex: up to 128 bits width we return small structures
 27      straight in rax, rdx. For structures larger than that (3 words or
 28      larger) the caller puts a pointer to an on-stack return struct
 29      [allocated in the caller's stack frame] into the first argument - i.e.
 30      into rdi. All other arguments shift up by one in this case.
 31      Fortunately this case is rare in the kernel.
 32
 33For 32-bit we have the following conventions - kernel is built with
 34-mregparm=3 and -freg-struct-return:
 35
 36 x86 function calling convention, 32-bit:
 37 ----------------------------------------
 38  arguments         | callee-saved        | extra caller-saved | return
 39 [callee-clobbered] |                     | [callee-clobbered] |
 40 -------------------------------------------------------------------------
 41 eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
 42
 43 ( here too esp is obviously invariant across normal function calls. eflags
 44   is clobbered. Leftover arguments are passed over the stack frame. )
 45
 46 [*]  In the frame-pointers case ebp is fixed to the stack frame.
 47
 48 [**] We build with -freg-struct-return, which on 32-bit means similar
 49      semantics as on 64-bit: edx can be used for a second return value
 50      (i.e. covering integer and structure sizes up to 64 bits) - after that
 51      it gets more complex and more expensive: 3-word or larger struct returns
 52      get done in the caller's frame and the pointer to the return struct goes
 53      into regparm0, i.e. eax - the other arguments shift up and the
 54      function's register parameters degenerate to regparm=2 in essence.
 55
 56*/
 57
 58#ifdef CONFIG_X86_64
 59
 60/*
 61 * 64-bit system call stack frame layout defines and helpers,
 62 * for assembly code:
 63 */
 64
 65/* The layout forms the "struct pt_regs" on the stack: */
 66/*
 67 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
 68 * unless syscall needs a complete, fully filled "struct pt_regs".
 69 */
 70#define R15		0*8
 71#define R14		1*8
 72#define R13		2*8
 73#define R12		3*8
 74#define RBP		4*8
 75#define RBX		5*8
 76/* These regs are callee-clobbered. Always saved on kernel entry. */
 77#define R11		6*8
 78#define R10		7*8
 79#define R9		8*8
 80#define R8		9*8
 81#define RAX		10*8
 82#define RCX		11*8
 83#define RDX		12*8
 84#define RSI		13*8
 85#define RDI		14*8
 86/*
 87 * On syscall entry, this is syscall#. On CPU exception, this is error code.
 88 * On hw interrupt, it's IRQ number:
 89 */
 90#define ORIG_RAX	15*8
 91/* Return frame for iretq */
 92#define RIP		16*8
 93#define CS		17*8
 94#define EFLAGS		18*8
 95#define RSP		19*8
 96#define SS		20*8
 97
 98#define SIZEOF_PTREGS	21*8
 99
100.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
101	/*
102	 * Push registers and sanitize registers of values that a
103	 * speculation attack might otherwise want to exploit. The
104	 * lower registers are likely clobbered well before they
105	 * could be put to use in a speculative execution gadget.
106	 * Interleave XOR with PUSH for better uop scheduling:
107	 */
108	.if \save_ret
109	pushq	%rsi		/* pt_regs->si */
110	movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */
111	movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */
112	.else
113	pushq   %rdi		/* pt_regs->di */
114	pushq   %rsi		/* pt_regs->si */
115	.endif
116	pushq	\rdx		/* pt_regs->dx */
117	xorl	%edx, %edx	/* nospec   dx */
118	pushq   %rcx		/* pt_regs->cx */
119	xorl	%ecx, %ecx	/* nospec   cx */
120	pushq   \rax		/* pt_regs->ax */
121	pushq   %r8		/* pt_regs->r8 */
122	xorl	%r8d, %r8d	/* nospec   r8 */
123	pushq   %r9		/* pt_regs->r9 */
124	xorl	%r9d, %r9d	/* nospec   r9 */
125	pushq   %r10		/* pt_regs->r10 */
126	xorl	%r10d, %r10d	/* nospec   r10 */
127	pushq   %r11		/* pt_regs->r11 */
128	xorl	%r11d, %r11d	/* nospec   r11*/
129	pushq	%rbx		/* pt_regs->rbx */
130	xorl    %ebx, %ebx	/* nospec   rbx*/
131	pushq	%rbp		/* pt_regs->rbp */
132	xorl    %ebp, %ebp	/* nospec   rbp*/
133	pushq	%r12		/* pt_regs->r12 */
134	xorl	%r12d, %r12d	/* nospec   r12*/
135	pushq	%r13		/* pt_regs->r13 */
136	xorl	%r13d, %r13d	/* nospec   r13*/
137	pushq	%r14		/* pt_regs->r14 */
138	xorl	%r14d, %r14d	/* nospec   r14*/
139	pushq	%r15		/* pt_regs->r15 */
140	xorl	%r15d, %r15d	/* nospec   r15*/
141	UNWIND_HINT_REGS
 
142	.if \save_ret
143	pushq	%rsi		/* return address on top of stack */
144	.endif
145.endm
146
147.macro POP_REGS pop_rdi=1 skip_r11rcx=0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148	popq %r15
149	popq %r14
150	popq %r13
151	popq %r12
152	popq %rbp
153	popq %rbx
154	.if \skip_r11rcx
155	popq %rsi
156	.else
157	popq %r11
158	.endif
159	popq %r10
160	popq %r9
161	popq %r8
162	popq %rax
163	.if \skip_r11rcx
164	popq %rsi
165	.else
166	popq %rcx
167	.endif
168	popq %rdx
169	popq %rsi
170	.if \pop_rdi
171	popq %rdi
172	.endif
173.endm
174
175/*
176 * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
177 * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
178 * is just setting the LSB, which makes it an invalid stack address and is also
179 * a signal to the unwinder that it's a pt_regs pointer in disguise.
180 *
181 * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
182 * the original rbp.
183 */
184.macro ENCODE_FRAME_POINTER ptregs_offset=0
185#ifdef CONFIG_FRAME_POINTER
186	leaq 1+\ptregs_offset(%rsp), %rbp
187#endif
188.endm
189
190#ifdef CONFIG_PAGE_TABLE_ISOLATION
191
192/*
193 * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
194 * halves:
195 */
196#define PTI_USER_PGTABLE_BIT		PAGE_SHIFT
197#define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT)
198#define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT
199#define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT)
200#define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
201
202.macro SET_NOFLUSH_BIT	reg:req
203	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
204.endm
205
206.macro ADJUST_KERNEL_CR3 reg:req
207	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
208	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
209	andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
210.endm
211
212.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
213	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
214	mov	%cr3, \scratch_reg
215	ADJUST_KERNEL_CR3 \scratch_reg
216	mov	\scratch_reg, %cr3
217.Lend_\@:
218.endm
219
220#define THIS_CPU_user_pcid_flush_mask   \
221	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
222
223.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
224	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
225	mov	%cr3, \scratch_reg
226
227	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
228
229	/*
230	 * Test if the ASID needs a flush.
231	 */
232	movq	\scratch_reg, \scratch_reg2
233	andq	$(0x7FF), \scratch_reg		/* mask ASID */
234	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
235	jnc	.Lnoflush_\@
236
237	/* Flush needed, clear the bit */
238	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
239	movq	\scratch_reg2, \scratch_reg
240	jmp	.Lwrcr3_pcid_\@
241
242.Lnoflush_\@:
243	movq	\scratch_reg2, \scratch_reg
244	SET_NOFLUSH_BIT \scratch_reg
245
246.Lwrcr3_pcid_\@:
247	/* Flip the ASID to the user version */
248	orq	$(PTI_USER_PCID_MASK), \scratch_reg
249
250.Lwrcr3_\@:
251	/* Flip the PGD to the user version */
252	orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg
253	mov	\scratch_reg, %cr3
254.Lend_\@:
255.endm
256
257.macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
258	pushq	%rax
259	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
260	popq	%rax
261.endm
262
263.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
264	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
265	movq	%cr3, \scratch_reg
266	movq	\scratch_reg, \save_reg
267	/*
268	 * Test the user pagetable bit. If set, then the user page tables
269	 * are active. If clear CR3 already has the kernel page table
270	 * active.
271	 */
272	bt	$PTI_USER_PGTABLE_BIT, \scratch_reg
273	jnc	.Ldone_\@
274
275	ADJUST_KERNEL_CR3 \scratch_reg
276	movq	\scratch_reg, %cr3
277
278.Ldone_\@:
279.endm
280
281.macro RESTORE_CR3 scratch_reg:req save_reg:req
282	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
283
284	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
285
286	/*
287	 * KERNEL pages can always resume with NOFLUSH as we do
288	 * explicit flushes.
289	 */
290	bt	$PTI_USER_PGTABLE_BIT, \save_reg
291	jnc	.Lnoflush_\@
292
293	/*
294	 * Check if there's a pending flush for the user ASID we're
295	 * about to set.
296	 */
297	movq	\save_reg, \scratch_reg
298	andq	$(0x7FF), \scratch_reg
299	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
300	jnc	.Lnoflush_\@
301
302	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
303	jmp	.Lwrcr3_\@
304
305.Lnoflush_\@:
306	SET_NOFLUSH_BIT \save_reg
307
308.Lwrcr3_\@:
309	/*
310	 * The CR3 write could be avoided when not changing its value,
311	 * but would require a CR3 read *and* a scratch register.
312	 */
313	movq	\save_reg, %cr3
314.Lend_\@:
315.endm
316
317#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
318
319.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
320.endm
321.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
322.endm
323.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
324.endm
325.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
326.endm
327.macro RESTORE_CR3 scratch_reg:req save_reg:req
328.endm
329
330#endif
331
332#endif /* CONFIG_X86_64 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
334/*
335 * This does 'call enter_from_user_mode' unless we can avoid it based on
336 * kernel config or using the static jump infrastructure.
 
 
 
 
 
 
337 */
338.macro CALL_enter_from_user_mode
339#ifdef CONFIG_CONTEXT_TRACKING
340#ifdef HAVE_JUMP_LABEL
341	STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
 
 
 
 
 
 
 
 
342#endif
343	call enter_from_user_mode
344.Lafter_call_\@:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345#endif
346.endm