Loading...
1#include <linux/jump_label.h>
2
3/*
4
5 x86 function call convention, 64-bit:
6 -------------------------------------
7 arguments | callee-saved | extra caller-saved | return
8 [callee-clobbered] | | [callee-clobbered] |
9 ---------------------------------------------------------------------------
10 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
11
12 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
13 functions when it sees tail-call optimization possibilities) rflags is
14 clobbered. Leftover arguments are passed over the stack frame.)
15
16 [*] In the frame-pointers case rbp is fixed to the stack frame.
17
18 [**] for struct return values wider than 64 bits the return convention is a
19 bit more complex: up to 128 bits width we return small structures
20 straight in rax, rdx. For structures larger than that (3 words or
21 larger) the caller puts a pointer to an on-stack return struct
22 [allocated in the caller's stack frame] into the first argument - i.e.
23 into rdi. All other arguments shift up by one in this case.
24 Fortunately this case is rare in the kernel.
25
26For 32-bit we have the following conventions - kernel is built with
27-mregparm=3 and -freg-struct-return:
28
29 x86 function calling convention, 32-bit:
30 ----------------------------------------
31 arguments | callee-saved | extra caller-saved | return
32 [callee-clobbered] | | [callee-clobbered] |
33 -------------------------------------------------------------------------
34 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
35
36 ( here too esp is obviously invariant across normal function calls. eflags
37 is clobbered. Leftover arguments are passed over the stack frame. )
38
39 [*] In the frame-pointers case ebp is fixed to the stack frame.
40
41 [**] We build with -freg-struct-return, which on 32-bit means similar
42 semantics as on 64-bit: edx can be used for a second return value
43 (i.e. covering integer and structure sizes up to 64 bits) - after that
44 it gets more complex and more expensive: 3-word or larger struct returns
45 get done in the caller's frame and the pointer to the return struct goes
46 into regparm0, i.e. eax - the other arguments shift up and the
47 function's register parameters degenerate to regparm=2 in essence.
48
49*/
50
51#ifdef CONFIG_X86_64
52
53/*
54 * 64-bit system call stack frame layout defines and helpers,
55 * for assembly code:
56 */
57
58/* The layout forms the "struct pt_regs" on the stack: */
59/*
60 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
61 * unless syscall needs a complete, fully filled "struct pt_regs".
62 */
63#define R15 0*8
64#define R14 1*8
65#define R13 2*8
66#define R12 3*8
67#define RBP 4*8
68#define RBX 5*8
69/* These regs are callee-clobbered. Always saved on kernel entry. */
70#define R11 6*8
71#define R10 7*8
72#define R9 8*8
73#define R8 9*8
74#define RAX 10*8
75#define RCX 11*8
76#define RDX 12*8
77#define RSI 13*8
78#define RDI 14*8
79/*
80 * On syscall entry, this is syscall#. On CPU exception, this is error code.
81 * On hw interrupt, it's IRQ number:
82 */
83#define ORIG_RAX 15*8
84/* Return frame for iretq */
85#define RIP 16*8
86#define CS 17*8
87#define EFLAGS 18*8
88#define RSP 19*8
89#define SS 20*8
90
91#define SIZEOF_PTREGS 21*8
92
93 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
94 addq $-(15*8+\addskip), %rsp
95 .endm
96
97 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
98 .if \r11
99 movq %r11, 6*8+\offset(%rsp)
100 .endif
101 .if \r8910
102 movq %r10, 7*8+\offset(%rsp)
103 movq %r9, 8*8+\offset(%rsp)
104 movq %r8, 9*8+\offset(%rsp)
105 .endif
106 .if \rax
107 movq %rax, 10*8+\offset(%rsp)
108 .endif
109 .if \rcx
110 movq %rcx, 11*8+\offset(%rsp)
111 .endif
112 movq %rdx, 12*8+\offset(%rsp)
113 movq %rsi, 13*8+\offset(%rsp)
114 movq %rdi, 14*8+\offset(%rsp)
115 .endm
116 .macro SAVE_C_REGS offset=0
117 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
118 .endm
119 .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
120 SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
121 .endm
122 .macro SAVE_C_REGS_EXCEPT_R891011
123 SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
124 .endm
125 .macro SAVE_C_REGS_EXCEPT_RCX_R891011
126 SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
127 .endm
128 .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
129 SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
130 .endm
131
132 .macro SAVE_EXTRA_REGS offset=0
133 movq %r15, 0*8+\offset(%rsp)
134 movq %r14, 1*8+\offset(%rsp)
135 movq %r13, 2*8+\offset(%rsp)
136 movq %r12, 3*8+\offset(%rsp)
137 movq %rbp, 4*8+\offset(%rsp)
138 movq %rbx, 5*8+\offset(%rsp)
139 .endm
140
141 .macro RESTORE_EXTRA_REGS offset=0
142 movq 0*8+\offset(%rsp), %r15
143 movq 1*8+\offset(%rsp), %r14
144 movq 2*8+\offset(%rsp), %r13
145 movq 3*8+\offset(%rsp), %r12
146 movq 4*8+\offset(%rsp), %rbp
147 movq 5*8+\offset(%rsp), %rbx
148 .endm
149
150 .macro ZERO_EXTRA_REGS
151 xorl %r15d, %r15d
152 xorl %r14d, %r14d
153 xorl %r13d, %r13d
154 xorl %r12d, %r12d
155 xorl %ebp, %ebp
156 xorl %ebx, %ebx
157 .endm
158
159 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
160 .if \rstor_r11
161 movq 6*8(%rsp), %r11
162 .endif
163 .if \rstor_r8910
164 movq 7*8(%rsp), %r10
165 movq 8*8(%rsp), %r9
166 movq 9*8(%rsp), %r8
167 .endif
168 .if \rstor_rax
169 movq 10*8(%rsp), %rax
170 .endif
171 .if \rstor_rcx
172 movq 11*8(%rsp), %rcx
173 .endif
174 .if \rstor_rdx
175 movq 12*8(%rsp), %rdx
176 .endif
177 movq 13*8(%rsp), %rsi
178 movq 14*8(%rsp), %rdi
179 .endm
180 .macro RESTORE_C_REGS
181 RESTORE_C_REGS_HELPER 1,1,1,1,1
182 .endm
183 .macro RESTORE_C_REGS_EXCEPT_RAX
184 RESTORE_C_REGS_HELPER 0,1,1,1,1
185 .endm
186 .macro RESTORE_C_REGS_EXCEPT_RCX
187 RESTORE_C_REGS_HELPER 1,0,1,1,1
188 .endm
189 .macro RESTORE_C_REGS_EXCEPT_R11
190 RESTORE_C_REGS_HELPER 1,1,0,1,1
191 .endm
192 .macro RESTORE_C_REGS_EXCEPT_RCX_R11
193 RESTORE_C_REGS_HELPER 1,0,0,1,1
194 .endm
195
196 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
197 subq $-(15*8+\addskip), %rsp
198 .endm
199
200 .macro icebp
201 .byte 0xf1
202 .endm
203
204#endif /* CONFIG_X86_64 */
205
206/*
207 * This does 'call enter_from_user_mode' unless we can avoid it based on
208 * kernel config or using the static jump infrastructure.
209 */
210.macro CALL_enter_from_user_mode
211#ifdef CONFIG_CONTEXT_TRACKING
212#ifdef HAVE_JUMP_LABEL
213 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
214#endif
215 call enter_from_user_mode
216.Lafter_call_\@:
217#endif
218.endm
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/jump_label.h>
3#include <asm/unwind_hints.h>
4#include <asm/cpufeatures.h>
5#include <asm/page_types.h>
6#include <asm/percpu.h>
7#include <asm/asm-offsets.h>
8#include <asm/processor-flags.h>
9#include <asm/inst.h>
10
11/*
12
13 x86 function call convention, 64-bit:
14 -------------------------------------
15 arguments | callee-saved | extra caller-saved | return
16 [callee-clobbered] | | [callee-clobbered] |
17 ---------------------------------------------------------------------------
18 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
19
20 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
21 functions when it sees tail-call optimization possibilities) rflags is
22 clobbered. Leftover arguments are passed over the stack frame.)
23
24 [*] In the frame-pointers case rbp is fixed to the stack frame.
25
26 [**] for struct return values wider than 64 bits the return convention is a
27 bit more complex: up to 128 bits width we return small structures
28 straight in rax, rdx. For structures larger than that (3 words or
29 larger) the caller puts a pointer to an on-stack return struct
30 [allocated in the caller's stack frame] into the first argument - i.e.
31 into rdi. All other arguments shift up by one in this case.
32 Fortunately this case is rare in the kernel.
33
34For 32-bit we have the following conventions - kernel is built with
35-mregparm=3 and -freg-struct-return:
36
37 x86 function calling convention, 32-bit:
38 ----------------------------------------
39 arguments | callee-saved | extra caller-saved | return
40 [callee-clobbered] | | [callee-clobbered] |
41 -------------------------------------------------------------------------
42 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
43
44 ( here too esp is obviously invariant across normal function calls. eflags
45 is clobbered. Leftover arguments are passed over the stack frame. )
46
47 [*] In the frame-pointers case ebp is fixed to the stack frame.
48
49 [**] We build with -freg-struct-return, which on 32-bit means similar
50 semantics as on 64-bit: edx can be used for a second return value
51 (i.e. covering integer and structure sizes up to 64 bits) - after that
52 it gets more complex and more expensive: 3-word or larger struct returns
53 get done in the caller's frame and the pointer to the return struct goes
54 into regparm0, i.e. eax - the other arguments shift up and the
55 function's register parameters degenerate to regparm=2 in essence.
56
57*/
58
59#ifdef CONFIG_X86_64
60
61/*
62 * 64-bit system call stack frame layout defines and helpers,
63 * for assembly code:
64 */
65
66/* The layout forms the "struct pt_regs" on the stack: */
67/*
68 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
69 * unless syscall needs a complete, fully filled "struct pt_regs".
70 */
71#define R15 0*8
72#define R14 1*8
73#define R13 2*8
74#define R12 3*8
75#define RBP 4*8
76#define RBX 5*8
77/* These regs are callee-clobbered. Always saved on kernel entry. */
78#define R11 6*8
79#define R10 7*8
80#define R9 8*8
81#define R8 9*8
82#define RAX 10*8
83#define RCX 11*8
84#define RDX 12*8
85#define RSI 13*8
86#define RDI 14*8
87/*
88 * On syscall entry, this is syscall#. On CPU exception, this is error code.
89 * On hw interrupt, it's IRQ number:
90 */
91#define ORIG_RAX 15*8
92/* Return frame for iretq */
93#define RIP 16*8
94#define CS 17*8
95#define EFLAGS 18*8
96#define RSP 19*8
97#define SS 20*8
98
99#define SIZEOF_PTREGS 21*8
100
101.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
102 .if \save_ret
103 pushq %rsi /* pt_regs->si */
104 movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
105 movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
106 .else
107 pushq %rdi /* pt_regs->di */
108 pushq %rsi /* pt_regs->si */
109 .endif
110 pushq \rdx /* pt_regs->dx */
111 pushq %rcx /* pt_regs->cx */
112 pushq \rax /* pt_regs->ax */
113 pushq %r8 /* pt_regs->r8 */
114 pushq %r9 /* pt_regs->r9 */
115 pushq %r10 /* pt_regs->r10 */
116 pushq %r11 /* pt_regs->r11 */
117 pushq %rbx /* pt_regs->rbx */
118 pushq %rbp /* pt_regs->rbp */
119 pushq %r12 /* pt_regs->r12 */
120 pushq %r13 /* pt_regs->r13 */
121 pushq %r14 /* pt_regs->r14 */
122 pushq %r15 /* pt_regs->r15 */
123 UNWIND_HINT_REGS
124
125 .if \save_ret
126 pushq %rsi /* return address on top of stack */
127 .endif
128
129 /*
130 * Sanitize registers of values that a speculation attack might
131 * otherwise want to exploit. The lower registers are likely clobbered
132 * well before they could be put to use in a speculative execution
133 * gadget.
134 */
135 xorl %edx, %edx /* nospec dx */
136 xorl %ecx, %ecx /* nospec cx */
137 xorl %r8d, %r8d /* nospec r8 */
138 xorl %r9d, %r9d /* nospec r9 */
139 xorl %r10d, %r10d /* nospec r10 */
140 xorl %r11d, %r11d /* nospec r11 */
141 xorl %ebx, %ebx /* nospec rbx */
142 xorl %ebp, %ebp /* nospec rbp */
143 xorl %r12d, %r12d /* nospec r12 */
144 xorl %r13d, %r13d /* nospec r13 */
145 xorl %r14d, %r14d /* nospec r14 */
146 xorl %r15d, %r15d /* nospec r15 */
147
148.endm
149
150.macro POP_REGS pop_rdi=1 skip_r11rcx=0
151 popq %r15
152 popq %r14
153 popq %r13
154 popq %r12
155 popq %rbp
156 popq %rbx
157 .if \skip_r11rcx
158 popq %rsi
159 .else
160 popq %r11
161 .endif
162 popq %r10
163 popq %r9
164 popq %r8
165 popq %rax
166 .if \skip_r11rcx
167 popq %rsi
168 .else
169 popq %rcx
170 .endif
171 popq %rdx
172 popq %rsi
173 .if \pop_rdi
174 popq %rdi
175 .endif
176.endm
177
178#ifdef CONFIG_PAGE_TABLE_ISOLATION
179
180/*
181 * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two
182 * halves:
183 */
184#define PTI_USER_PGTABLE_BIT PAGE_SHIFT
185#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT)
186#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT
187#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT)
188#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK)
189
190.macro SET_NOFLUSH_BIT reg:req
191 bts $X86_CR3_PCID_NOFLUSH_BIT, \reg
192.endm
193
194.macro ADJUST_KERNEL_CR3 reg:req
195 ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
196 /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
197 andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg
198.endm
199
200.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
201 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
202 mov %cr3, \scratch_reg
203 ADJUST_KERNEL_CR3 \scratch_reg
204 mov \scratch_reg, %cr3
205.Lend_\@:
206.endm
207
208#define THIS_CPU_user_pcid_flush_mask \
209 PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
210
211.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
212 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
213 mov %cr3, \scratch_reg
214
215 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
216
217 /*
218 * Test if the ASID needs a flush.
219 */
220 movq \scratch_reg, \scratch_reg2
221 andq $(0x7FF), \scratch_reg /* mask ASID */
222 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
223 jnc .Lnoflush_\@
224
225 /* Flush needed, clear the bit */
226 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
227 movq \scratch_reg2, \scratch_reg
228 jmp .Lwrcr3_pcid_\@
229
230.Lnoflush_\@:
231 movq \scratch_reg2, \scratch_reg
232 SET_NOFLUSH_BIT \scratch_reg
233
234.Lwrcr3_pcid_\@:
235 /* Flip the ASID to the user version */
236 orq $(PTI_USER_PCID_MASK), \scratch_reg
237
238.Lwrcr3_\@:
239 /* Flip the PGD to the user version */
240 orq $(PTI_USER_PGTABLE_MASK), \scratch_reg
241 mov \scratch_reg, %cr3
242.Lend_\@:
243.endm
244
245.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
246 pushq %rax
247 SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
248 popq %rax
249.endm
250
251.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
252 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
253 movq %cr3, \scratch_reg
254 movq \scratch_reg, \save_reg
255 /*
256 * Test the user pagetable bit. If set, then the user page tables
257 * are active. If clear CR3 already has the kernel page table
258 * active.
259 */
260 bt $PTI_USER_PGTABLE_BIT, \scratch_reg
261 jnc .Ldone_\@
262
263 ADJUST_KERNEL_CR3 \scratch_reg
264 movq \scratch_reg, %cr3
265
266.Ldone_\@:
267.endm
268
269.macro RESTORE_CR3 scratch_reg:req save_reg:req
270 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
271
272 ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
273
274 /*
275 * KERNEL pages can always resume with NOFLUSH as we do
276 * explicit flushes.
277 */
278 bt $PTI_USER_PGTABLE_BIT, \save_reg
279 jnc .Lnoflush_\@
280
281 /*
282 * Check if there's a pending flush for the user ASID we're
283 * about to set.
284 */
285 movq \save_reg, \scratch_reg
286 andq $(0x7FF), \scratch_reg
287 bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
288 jnc .Lnoflush_\@
289
290 btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
291 jmp .Lwrcr3_\@
292
293.Lnoflush_\@:
294 SET_NOFLUSH_BIT \save_reg
295
296.Lwrcr3_\@:
297 /*
298 * The CR3 write could be avoided when not changing its value,
299 * but would require a CR3 read *and* a scratch register.
300 */
301 movq \save_reg, %cr3
302.Lend_\@:
303.endm
304
305#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
306
307.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
308.endm
309.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
310.endm
311.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
312.endm
313.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
314.endm
315.macro RESTORE_CR3 scratch_reg:req save_reg:req
316.endm
317
318#endif
319
320/*
321 * Mitigate Spectre v1 for conditional swapgs code paths.
322 *
323 * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
324 * prevent a speculative swapgs when coming from kernel space.
325 *
326 * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
327 * to prevent the swapgs from getting speculatively skipped when coming from
328 * user space.
329 */
330.macro FENCE_SWAPGS_USER_ENTRY
331 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
332.endm
333.macro FENCE_SWAPGS_KERNEL_ENTRY
334 ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
335.endm
336
337.macro STACKLEAK_ERASE_NOCLOBBER
338#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
339 PUSH_AND_CLEAR_REGS
340 call stackleak_erase
341 POP_REGS
342#endif
343.endm
344
345.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
346 rdgsbase \save_reg
347 GET_PERCPU_BASE \scratch_reg
348 wrgsbase \scratch_reg
349.endm
350
351#else /* CONFIG_X86_64 */
352# undef UNWIND_HINT_IRET_REGS
353# define UNWIND_HINT_IRET_REGS
354#endif /* !CONFIG_X86_64 */
355
356.macro STACKLEAK_ERASE
357#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
358 call stackleak_erase
359#endif
360.endm
361
362#ifdef CONFIG_SMP
363
364/*
365 * CPU/node NR is loaded from the limit (size) field of a special segment
366 * descriptor entry in GDT.
367 */
368.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
369 movq $__CPUNODE_SEG, \reg
370 lsl \reg, \reg
371.endm
372
373/*
374 * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
375 * We normally use %gs for accessing per-CPU data, but we are setting up
376 * %gs here and obviously can not use %gs itself to access per-CPU data.
377 *
378 * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
379 * may not restore the host's value until the CPU returns to userspace.
380 * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
381 * while running KVM's run loop.
382 */
383.macro GET_PERCPU_BASE reg:req
384 LOAD_CPU_AND_NODE_SEG_LIMIT \reg
385 andq $VDSO_CPUNODE_MASK, \reg
386 movq __per_cpu_offset(, \reg, 8), \reg
387.endm
388
389#else
390
391.macro GET_PERCPU_BASE reg:req
392 movq pcpu_unit_offsets(%rip), \reg
393.endm
394
395#endif /* CONFIG_SMP */