Files
linux-cryptodev-2.6/arch/x86/entry/vdso/vdso64/vsgx.S
H. Peter Anvin 693c819fed x86/entry/vdso: Refactor the vdso build
- Separate out the vdso sources into common, vdso32, and vdso64
  directories.
- Build the 32- and 64-bit vdsos in their respective subdirectories;
  this greatly simplifies the build flags handling.
- Unify the mangling of Makefile flags between the 32- and 64-bit
  vdso code as much as possible; all common rules are put in
  arch/x86/entry/vdso/common/Makefile.include. The remaining
  is very simple for 32 bits; the 64-bit one is only slightly more
  complicated because it contains the x32 generation rule.
- Define __DISABLE_EXPORTS when building the vdso. This need seems to
  have been masked by different ordering compile flags before.
- Change CONFIG_X86_64 to BUILD_VDSO32_64 in vdso32/system_call.S,
  to make it compatible with including fake_32bit_build.h.
- The -fcf-protection= option was "leaking" from the kernel build,
  for reasons that was not clear to me. Furthermore, several
  distributions ship with it set to a default value other than
  "-fcf-protection=none". Make it match the configuration options
  for *user space*.

Note that this patch may seem large, but the vast majority of it is
simply code movement.

Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://patch.msgid.link/20251216212606.1325678-4-hpa@zytor.com
2026-01-13 15:35:09 -08:00

151 lines
3.6 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/enclu.h>
#include "extable.h"
/* Relative to %rbp. */
#define SGX_ENCLAVE_OFFSET_OF_RUN 16
/* The offsets relative to struct sgx_enclave_run. */
#define SGX_ENCLAVE_RUN_TCS 0
#define SGX_ENCLAVE_RUN_LEAF 8
#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12
#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14
#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16
#define SGX_ENCLAVE_RUN_USER_HANDLER 24
#define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */
#define SGX_ENCLAVE_RUN_RESERVED_START 40
#define SGX_ENCLAVE_RUN_RESERVED_END 256
.code64
.section .text, "ax"
SYM_FUNC_START(__vdso_sgx_enter_enclave)
/* Prolog */
.cfi_startproc
push %rbp
.cfi_adjust_cfa_offset 8
.cfi_rel_offset %rbp, 0
mov %rsp, %rbp
.cfi_def_cfa_register %rbp
push %rbx
.cfi_rel_offset %rbx, -8
mov %ecx, %eax
.Lenter_enclave:
/* EENTER <= function <= ERESUME */
cmp $EENTER, %eax
jb .Linvalid_input
cmp $ERESUME, %eax
ja .Linvalid_input
mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
/* Validate that the reserved area contains only zeros. */
mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx
1:
cmpq $0, (%rcx, %rbx)
jne .Linvalid_input
add $8, %rbx
cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx
jne 1b
/* Load TCS and AEP */
mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
lea .Lasync_exit_pointer(%rip), %rcx
/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
.Lasync_exit_pointer:
.Lenclu_eenter_eresume:
enclu
/* EEXIT jumps here unless the enclave is doing something fancy. */
mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
/* Set exit_reason. */
movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
/* Invoke userspace's exit handler if one was provided. */
.Lhandle_exit:
cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
jne .Linvoke_userspace_handler
/* Success, in the sense that ENCLU was attempted. */
xor %eax, %eax
.Lout:
pop %rbx
leave
.cfi_def_cfa %rsp, 8
RET
/* The out-of-line code runs with the pre-leave stack frame. */
.cfi_def_cfa %rbp, 16
.Linvalid_input:
mov $(-EINVAL), %eax
jmp .Lout
.Lhandle_exception:
mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
/* Set the exception info. */
mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
jmp .Lhandle_exit
.Linvoke_userspace_handler:
/* Pass the untrusted RSP (at exit) to the callback via %rcx. */
mov %rsp, %rcx
/* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
mov %rbx, %rax
/* Save the untrusted RSP offset in %rbx (non-volatile register). */
mov %rsp, %rbx
and $0xf, %rbx
/*
* Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
* _after_ pushing the parameters on the stack, hence the bonus push.
*/
and $-0x10, %rsp
push %rax
/* Push struct sgx_enclave_exception as a param to the callback. */
push %rax
/* Clear RFLAGS.DF per x86_64 ABI */
cld
/*
* Load the callback pointer to %rax and lfence for LVI (load value
* injection) protection before making the call.
*/
mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
lfence
call *%rax
/* Undo the post-exit %rsp adjustment. */
lea 0x10(%rsp, %rbx), %rsp
/*
* If the return from callback is zero or negative, return immediately,
* else re-execute ENCLU with the positive return value interpreted as
* the requested ENCLU function.
*/
cmp $0, %eax
jle .Lout
jmp .Lenter_enclave
.cfi_endproc
_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
SYM_FUNC_END(__vdso_sgx_enter_enclave)