Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#include <linux/linkage.h>
  4#include <asm/export.h>
  5#include <asm/errno.h>
  6#include <asm/enclu.h>
  7
  8#include "extable.h"
  9
 10/* Relative to %rbp. */
 11#define SGX_ENCLAVE_OFFSET_OF_RUN		16
 12
 13/* The offsets relative to struct sgx_enclave_run. */
 14#define SGX_ENCLAVE_RUN_TCS			0
 15#define SGX_ENCLAVE_RUN_LEAF			8
 16#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR	12
 17#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE	14
 18#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR		16
 19#define SGX_ENCLAVE_RUN_USER_HANDLER		24
 20#define SGX_ENCLAVE_RUN_USER_DATA		32	/* not used */
 21#define SGX_ENCLAVE_RUN_RESERVED_START		40
 22#define SGX_ENCLAVE_RUN_RESERVED_END		256
 23
 24.code64
 25.section .text, "ax"
 26
 27SYM_FUNC_START(__vdso_sgx_enter_enclave)
 28	/* Prolog */
 29	.cfi_startproc
 30	push	%rbp
 31	.cfi_adjust_cfa_offset	8
 32	.cfi_rel_offset		%rbp, 0
 33	mov	%rsp, %rbp
 34	.cfi_def_cfa_register	%rbp
 35	push	%rbx
 36	.cfi_rel_offset		%rbx, -8
 37
 38	mov	%ecx, %eax
 39.Lenter_enclave:
 40	/* EENTER <= function <= ERESUME */
 41	cmp	$EENTER, %eax
 42	jb	.Linvalid_input
 43	cmp	$ERESUME, %eax
 44	ja	.Linvalid_input
 45
 46	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
 47
 48	/* Validate that the reserved area contains only zeros. */
 49	mov	$SGX_ENCLAVE_RUN_RESERVED_START, %rbx
 501:
 51	cmpq	$0, (%rcx, %rbx)
 52	jne	.Linvalid_input
 53	add	$8, %rbx
 54	cmpq	$SGX_ENCLAVE_RUN_RESERVED_END, %rbx
 55	jne	1b
 56
 57	/* Load TCS and AEP */
 58	mov	SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
 59	lea	.Lasync_exit_pointer(%rip), %rcx
 60
 61	/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
 62.Lasync_exit_pointer:
 63.Lenclu_eenter_eresume:
 64	enclu
 65
 66	/* EEXIT jumps here unless the enclave is doing something fancy. */
 67	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
 68
 69	/* Set exit_reason. */
 70	movl	$EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
 71
 72	/* Invoke userspace's exit handler if one was provided. */
 73.Lhandle_exit:
 74	cmpq	$0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
 75	jne	.Linvoke_userspace_handler
 76
 77	/* Success, in the sense that ENCLU was attempted. */
 78	xor	%eax, %eax
 79
 80.Lout:
 81	pop	%rbx
 82	leave
 83	.cfi_def_cfa		%rsp, 8
 84	RET
 85
 86	/* The out-of-line code runs with the pre-leave stack frame. */
 87	.cfi_def_cfa		%rbp, 16
 88
 89.Linvalid_input:
 90	mov	$(-EINVAL), %eax
 91	jmp	.Lout
 92
 93.Lhandle_exception:
 94	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
 95
 96	/* Set the exception info. */
 97	mov	%eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
 98	mov	%di,  (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
 99	mov	%si,  (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
100	mov	%rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
101	jmp	.Lhandle_exit
102
103.Linvoke_userspace_handler:
104	/* Pass the untrusted RSP (at exit) to the callback via %rcx. */
105	mov	%rsp, %rcx
106
107	/* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
108	mov	%rbx, %rax
109
110	/* Save the untrusted RSP offset in %rbx (non-volatile register). */
111	mov	%rsp, %rbx
112	and	$0xf, %rbx
113
114	/*
115	 * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
116	 * _after_ pushing the parameters on the stack, hence the bonus push.
117	 */
118	and	$-0x10, %rsp
119	push	%rax
120
121	/* Push struct sgx_enclave_exception as a param to the callback. */
122	push	%rax
123
124	/* Clear RFLAGS.DF per x86_64 ABI */
125	cld
126
127	/*
128	 * Load the callback pointer to %rax and lfence for LVI (load value
129	 * injection) protection before making the call.
130	 */
131	mov	SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
132	lfence
133	call	*%rax
134
135	/* Undo the post-exit %rsp adjustment. */
136	lea	0x10(%rsp, %rbx), %rsp
137
138	/*
139	 * If the return from callback is zero or negative, return immediately,
140	 * else re-execute ENCLU with the positive return value interpreted as
141	 * the requested ENCLU function.
142	 */
143	cmp	$0, %eax
144	jle	.Lout
145	jmp	.Lenter_enclave
146
147	.cfi_endproc
148
149_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
150
151SYM_FUNC_END(__vdso_sgx_enter_enclave)
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#include <linux/linkage.h>
 
  4#include <asm/errno.h>
  5#include <asm/enclu.h>
  6
  7#include "extable.h"
  8
  9/* Relative to %rbp. */
 10#define SGX_ENCLAVE_OFFSET_OF_RUN		16
 11
 12/* The offsets relative to struct sgx_enclave_run. */
 13#define SGX_ENCLAVE_RUN_TCS			0
 14#define SGX_ENCLAVE_RUN_LEAF			8
 15#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR	12
 16#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE	14
 17#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR		16
 18#define SGX_ENCLAVE_RUN_USER_HANDLER		24
 19#define SGX_ENCLAVE_RUN_USER_DATA		32	/* not used */
 20#define SGX_ENCLAVE_RUN_RESERVED_START		40
 21#define SGX_ENCLAVE_RUN_RESERVED_END		256
 22
 23.code64
 24.section .text, "ax"
 25
 26SYM_FUNC_START(__vdso_sgx_enter_enclave)
 27	/* Prolog */
 28	.cfi_startproc
 29	push	%rbp
 30	.cfi_adjust_cfa_offset	8
 31	.cfi_rel_offset		%rbp, 0
 32	mov	%rsp, %rbp
 33	.cfi_def_cfa_register	%rbp
 34	push	%rbx
 35	.cfi_rel_offset		%rbx, -8
 36
 37	mov	%ecx, %eax
 38.Lenter_enclave:
 39	/* EENTER <= function <= ERESUME */
 40	cmp	$EENTER, %eax
 41	jb	.Linvalid_input
 42	cmp	$ERESUME, %eax
 43	ja	.Linvalid_input
 44
 45	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
 46
 47	/* Validate that the reserved area contains only zeros. */
 48	mov	$SGX_ENCLAVE_RUN_RESERVED_START, %rbx
 491:
 50	cmpq	$0, (%rcx, %rbx)
 51	jne	.Linvalid_input
 52	add	$8, %rbx
 53	cmpq	$SGX_ENCLAVE_RUN_RESERVED_END, %rbx
 54	jne	1b
 55
 56	/* Load TCS and AEP */
 57	mov	SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
 58	lea	.Lasync_exit_pointer(%rip), %rcx
 59
 60	/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
 61.Lasync_exit_pointer:
 62.Lenclu_eenter_eresume:
 63	enclu
 64
 65	/* EEXIT jumps here unless the enclave is doing something fancy. */
 66	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
 67
 68	/* Set exit_reason. */
 69	movl	$EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
 70
 71	/* Invoke userspace's exit handler if one was provided. */
 72.Lhandle_exit:
 73	cmpq	$0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
 74	jne	.Linvoke_userspace_handler
 75
 76	/* Success, in the sense that ENCLU was attempted. */
 77	xor	%eax, %eax
 78
 79.Lout:
 80	pop	%rbx
 81	leave
 82	.cfi_def_cfa		%rsp, 8
 83	RET
 84
 85	/* The out-of-line code runs with the pre-leave stack frame. */
 86	.cfi_def_cfa		%rbp, 16
 87
 88.Linvalid_input:
 89	mov	$(-EINVAL), %eax
 90	jmp	.Lout
 91
 92.Lhandle_exception:
 93	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
 94
 95	/* Set the exception info. */
 96	mov	%eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
 97	mov	%di,  (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
 98	mov	%si,  (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
 99	mov	%rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
100	jmp	.Lhandle_exit
101
102.Linvoke_userspace_handler:
103	/* Pass the untrusted RSP (at exit) to the callback via %rcx. */
104	mov	%rsp, %rcx
105
106	/* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
107	mov	%rbx, %rax
108
109	/* Save the untrusted RSP offset in %rbx (non-volatile register). */
110	mov	%rsp, %rbx
111	and	$0xf, %rbx
112
113	/*
114	 * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
115	 * _after_ pushing the parameters on the stack, hence the bonus push.
116	 */
117	and	$-0x10, %rsp
118	push	%rax
119
120	/* Push struct sgx_enclave_exception as a param to the callback. */
121	push	%rax
122
123	/* Clear RFLAGS.DF per x86_64 ABI */
124	cld
125
126	/*
127	 * Load the callback pointer to %rax and lfence for LVI (load value
128	 * injection) protection before making the call.
129	 */
130	mov	SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
131	lfence
132	call	*%rax
133
134	/* Undo the post-exit %rsp adjustment. */
135	lea	0x10(%rsp, %rbx), %rsp
136
137	/*
138	 * If the return from callback is zero or negative, return immediately,
139	 * else re-execute ENCLU with the positive return value interpreted as
140	 * the requested ENCLU function.
141	 */
142	cmp	$0, %eax
143	jle	.Lout
144	jmp	.Lenter_enclave
145
146	.cfi_endproc
147
148_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
149
150SYM_FUNC_END(__vdso_sgx_enter_enclave)