mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
powerpc64/bpf: Support exceptions
The modified prologue/epilogue generation code now enables exception-callback to use the stack frame of the program marked as exception boundary, where callee saved registers are stored. As per ppc64 ABIv2 documentation[1], r14-r31 are callee saved registers. BPF programs on ppc64 already saves r26-r31 registers. Saving the remaining set of callee saved registers(r14-r25) is handled in the next patch. [1] https://ftp.rtems.org/pub/rtems/people/sebh/ABI64BitOpenPOWERv1.1_16July2015_pub.pdf Signed-off-by: Abhishek Dubey <adubey@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20260124075223.6033-6-adubey@linux.ibm.com
This commit is contained in:
committed by
Madhavan Srinivasan
parent
b1c24f089b
commit
c169930292
@@ -179,6 +179,8 @@ struct codegen_context {
|
||||
u64 arena_vm_start;
|
||||
u64 user_vm_start;
|
||||
bool is_subprog;
|
||||
bool exception_boundary;
|
||||
bool exception_cb;
|
||||
};
|
||||
|
||||
#define bpf_to_ppc(r) (ctx->b2p[r])
|
||||
|
||||
@@ -207,6 +207,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
|
||||
cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
|
||||
cgctx.is_subprog = bpf_is_subprog(fp);
|
||||
cgctx.exception_boundary = fp->aux->exception_boundary;
|
||||
cgctx.exception_cb = fp->aux->exception_cb;
|
||||
|
||||
/* Scouting faux-generate pass 0 */
|
||||
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
|
||||
@@ -436,6 +438,11 @@ void bpf_jit_free(struct bpf_prog *fp)
|
||||
bpf_prog_unlock_free(fp);
|
||||
}
|
||||
|
||||
bool bpf_jit_supports_exceptions(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_PPC64);
|
||||
}
|
||||
|
||||
bool bpf_jit_supports_subprog_tailcalls(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_PPC64);
|
||||
|
||||
@@ -89,7 +89,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
|
||||
* - the bpf program uses its stack area
|
||||
* The latter condition is deduced from the usage of BPF_REG_FP
|
||||
*/
|
||||
return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
|
||||
return ctx->seen & SEEN_FUNC ||
|
||||
bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) ||
|
||||
ctx->exception_cb;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -161,8 +163,13 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
|
||||
EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
|
||||
/* this goes in the redzone */
|
||||
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
|
||||
} else {
|
||||
} else if (!ctx->exception_cb) {
|
||||
/*
|
||||
* Tailcall jitting for non exception_cb progs only.
|
||||
* exception_cb won't require tail_call_info to be setup.
|
||||
*
|
||||
* tail_call_info interpretation logic:
|
||||
*
|
||||
* if tail_call_info < MAX_TAIL_CALL_CNT
|
||||
* main prog calling first subprog -> copy reference
|
||||
* else
|
||||
@@ -177,8 +184,12 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
|
||||
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL)));
|
||||
}
|
||||
|
||||
if (bpf_has_stack_frame(ctx)) {
|
||||
if (bpf_has_stack_frame(ctx) && !ctx->exception_cb) {
|
||||
/*
|
||||
* exception_cb uses boundary frame after stack walk.
|
||||
* It can simply use redzone, this optimization reduces
|
||||
* stack walk loop by one level.
|
||||
*
|
||||
* We need a stack frame, but we don't necessarily need to
|
||||
* save/restore LR unless we call other functions
|
||||
*/
|
||||
@@ -190,23 +201,35 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
|
||||
EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Back up non-volatile regs -- BPF registers 6-10
|
||||
* If we haven't created our own stack frame, we save these
|
||||
* in the protected zone below the previous stack frame
|
||||
*/
|
||||
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
|
||||
if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
|
||||
EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
|
||||
if (!ctx->exception_cb) {
|
||||
/*
|
||||
* Back up non-volatile regs -- BPF registers 6-10
|
||||
* If we haven't created our own stack frame, we save these
|
||||
* in the protected zone below the previous stack frame
|
||||
*/
|
||||
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
|
||||
if (ctx->exception_boundary || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
|
||||
EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1,
|
||||
bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
|
||||
|
||||
if (ctx->arena_vm_start)
|
||||
EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
|
||||
if (ctx->exception_boundary || ctx->arena_vm_start)
|
||||
EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1,
|
||||
bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
|
||||
} else {
|
||||
/*
|
||||
* Exception callback receives Frame Pointer of boundary
|
||||
* program(main prog) as third arg
|
||||
*/
|
||||
EMIT(PPC_RAW_MR(_R1, _R5));
|
||||
}
|
||||
|
||||
/* Setup frame pointer to point to the bpf stack area */
|
||||
/*
|
||||
* Exception_cb not restricted from using stack area or arena.
|
||||
* Setup frame pointer to point to the bpf stack area
|
||||
*/
|
||||
if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
|
||||
EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
|
||||
STACK_FRAME_MIN_SIZE + ctx->stack_size));
|
||||
STACK_FRAME_MIN_SIZE + ctx->stack_size));
|
||||
|
||||
if (ctx->arena_vm_start)
|
||||
PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start);
|
||||
@@ -218,17 +241,17 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
|
||||
|
||||
/* Restore NVRs */
|
||||
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
|
||||
if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
|
||||
if (ctx->exception_cb || bpf_is_seen_register(ctx, bpf_to_ppc(i)))
|
||||
EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
|
||||
|
||||
if (ctx->arena_vm_start)
|
||||
if (ctx->exception_cb || ctx->arena_vm_start)
|
||||
EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1,
|
||||
bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START))));
|
||||
|
||||
/* Tear down our stack frame */
|
||||
if (bpf_has_stack_frame(ctx)) {
|
||||
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
|
||||
if (ctx->seen & SEEN_FUNC) {
|
||||
if (ctx->seen & SEEN_FUNC || ctx->exception_cb) {
|
||||
EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
|
||||
EMIT(PPC_RAW_MTLR(_R0));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user