Commit b2e9dfe5 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Peter Zijlstra

x86/bpf: Emit call depth accounting if required

Ensure that calls in BPF jitted programs are emitting call depth accounting
when enabled to keep the call/return balanced. The return thunk jump is
already injected due to the earlier retbleed mitigations.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111148.615413406@infradead.org
parent 396e0b8e
...@@ -93,6 +93,7 @@ extern void callthunks_patch_module_calls(struct callthunk_sites *sites, ...@@ -93,6 +93,7 @@ extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
struct module *mod); struct module *mod);
extern void *callthunks_translate_call_dest(void *dest); extern void *callthunks_translate_call_dest(void *dest);
extern bool is_callthunk(void *addr); extern bool is_callthunk(void *addr);
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
#else #else
static __always_inline void callthunks_patch_builtin_calls(void) {} static __always_inline void callthunks_patch_builtin_calls(void) {}
static __always_inline void static __always_inline void
...@@ -106,6 +107,11 @@ static __always_inline bool is_callthunk(void *addr) ...@@ -106,6 +107,11 @@ static __always_inline bool is_callthunk(void *addr)
{ {
return false; return false;
} }
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
void *func)
{
return 0;
}
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -306,6 +306,25 @@ bool is_callthunk(void *addr) ...@@ -306,6 +306,25 @@ bool is_callthunk(void *addr)
return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size); return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
} }
#ifdef CONFIG_BPF_JIT
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
{
unsigned int tmpl_size = SKL_TMPL_SIZE;
void *tmpl = skl_call_thunk_template;
if (!thunks_initialized)
return 0;
/* Is function call target a thunk? */
if (is_callthunk(func))
return 0;
memcpy(*pprog, tmpl, tmpl_size);
*pprog += tmpl_size;
return tmpl_size;
}
#endif
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
void noinline callthunks_patch_module_calls(struct callthunk_sites *cs, void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
struct module *mod) struct module *mod)
......
...@@ -340,6 +340,13 @@ static int emit_call(u8 **pprog, void *func, void *ip) ...@@ -340,6 +340,13 @@ static int emit_call(u8 **pprog, void *func, void *ip)
return emit_patch(pprog, func, ip, 0xE8); return emit_patch(pprog, func, ip, 0xE8);
} }
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
{
OPTIMIZER_HIDE_VAR(func);
x86_call_depth_emit_accounting(pprog, func);
return emit_patch(pprog, func, ip, 0xE8);
}
static int emit_jump(u8 **pprog, void *func, void *ip) static int emit_jump(u8 **pprog, void *func, void *ip)
{ {
return emit_patch(pprog, func, ip, 0xE9); return emit_patch(pprog, func, ip, 0xE9);
...@@ -1436,19 +1443,26 @@ st: if (is_imm8(insn->off)) ...@@ -1436,19 +1443,26 @@ st: if (is_imm8(insn->off))
break; break;
/* call */ /* call */
case BPF_JMP | BPF_CALL: case BPF_JMP | BPF_CALL: {
int offs;
func = (u8 *) __bpf_call_base + imm32; func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) { if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85, EMIT3_off32(0x48, 0x8B, 0x85,
-round_up(bpf_prog->aux->stack_depth, 8) - 8); -round_up(bpf_prog->aux->stack_depth, 8) - 8);
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) if (!imm32)
return -EINVAL; return -EINVAL;
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
} else { } else {
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) if (!imm32)
return -EINVAL; return -EINVAL;
offs = x86_call_depth_emit_accounting(&prog, func);
} }
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
return -EINVAL;
break; break;
}
case BPF_JMP | BPF_TAIL_CALL: case BPF_JMP | BPF_TAIL_CALL:
if (imm32) if (imm32)
...@@ -1854,7 +1868,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, ...@@ -1854,7 +1868,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
/* arg2: lea rsi, [rbp - ctx_cookie_off] */ /* arg2: lea rsi, [rbp - ctx_cookie_off] */
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
if (emit_call(&prog, enter, prog)) if (emit_rsb_call(&prog, enter, prog))
return -EINVAL; return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */ /* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
...@@ -1875,7 +1889,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, ...@@ -1875,7 +1889,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
(long) p->insnsi >> 32, (long) p->insnsi >> 32,
(u32) (long) p->insnsi); (u32) (long) p->insnsi);
/* call JITed bpf program or interpreter */ /* call JITed bpf program or interpreter */
if (emit_call(&prog, p->bpf_func, prog)) if (emit_rsb_call(&prog, p->bpf_func, prog))
return -EINVAL; return -EINVAL;
/* /*
...@@ -1899,7 +1913,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, ...@@ -1899,7 +1913,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */ /* arg3: lea rdx, [rbp - run_ctx_off] */
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
if (emit_call(&prog, exit, prog)) if (emit_rsb_call(&prog, exit, prog))
return -EINVAL; return -EINVAL;
*pprog = prog; *pprog = prog;
...@@ -2147,7 +2161,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2147,7 +2161,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (flags & BPF_TRAMP_F_CALL_ORIG) { if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */ /* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_enter, prog)) { if (emit_rsb_call(&prog, __bpf_tramp_enter, prog)) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
...@@ -2179,7 +2193,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2179,7 +2193,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
EMIT2(0xff, 0xd0); /* call *rax */ EMIT2(0xff, 0xd0); /* call *rax */
} else { } else {
/* call original function */ /* call original function */
if (emit_call(&prog, orig_call, prog)) { if (emit_rsb_call(&prog, orig_call, prog)) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
...@@ -2223,7 +2237,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2223,7 +2237,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
im->ip_epilogue = prog; im->ip_epilogue = prog;
/* arg1: mov rdi, im */ /* arg1: mov rdi, im */
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
if (emit_call(&prog, __bpf_tramp_exit, prog)) { if (emit_rsb_call(&prog, __bpf_tramp_exit, prog)) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment