Commit fdac315d authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'arm64-jit-fixes'

Daniel Borkmann says:

====================
This set contains a fix for arm64 BPF JIT. First patch generalizes
ppc64 way of retrieving subprog into bpf_jit_get_func_addr() as core
code and uses the same on arm64 in second patch. Tested on both arm64
and ppc64.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 69500127 8c11ea5c
...@@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx) ...@@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx)
* >0 - successfully JITed a 16-byte eBPF instruction. * >0 - successfully JITed a 16-byte eBPF instruction.
* <0 - failed to JIT. * <0 - failed to JIT.
*/ */
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
bool extra_pass)
{ {
const u8 code = insn->code; const u8 code = insn->code;
const u8 dst = bpf2a64[insn->dst_reg]; const u8 dst = bpf2a64[insn->dst_reg];
...@@ -625,12 +626,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -625,12 +626,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_JMP | BPF_CALL: case BPF_JMP | BPF_CALL:
{ {
const u8 r0 = bpf2a64[BPF_REG_0]; const u8 r0 = bpf2a64[BPF_REG_0];
const u64 func = (u64)__bpf_call_base + imm; bool func_addr_fixed;
u64 func_addr;
int ret;
if (ctx->prog->is_func) ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
emit_addr_mov_i64(tmp, func, ctx); &func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
if (func_addr_fixed)
/* We can use optimized emission here. */
emit_a64_mov_i64(tmp, func_addr, ctx);
else else
emit_a64_mov_i64(tmp, func, ctx); emit_addr_mov_i64(tmp, func_addr, ctx);
emit(A64_BLR(tmp), ctx); emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx); emit(A64_MOV(1, r0, A64_R(0)), ctx);
break; break;
...@@ -753,7 +761,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -753,7 +761,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 0; return 0;
} }
static int build_body(struct jit_ctx *ctx) static int build_body(struct jit_ctx *ctx, bool extra_pass)
{ {
const struct bpf_prog *prog = ctx->prog; const struct bpf_prog *prog = ctx->prog;
int i; int i;
...@@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx) ...@@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx)
const struct bpf_insn *insn = &prog->insnsi[i]; const struct bpf_insn *insn = &prog->insnsi[i];
int ret; int ret;
ret = build_insn(insn, ctx); ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) { if (ret > 0) {
i++; i++;
if (ctx->image == NULL) if (ctx->image == NULL)
...@@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 1. Initial fake pass to compute ctx->idx. */ /* 1. Initial fake pass to compute ctx->idx. */
/* Fake pass to fill in ctx->offset. */ /* Fake pass to fill in ctx->offset. */
if (build_body(&ctx)) { if (build_body(&ctx, extra_pass)) {
prog = orig_prog; prog = orig_prog;
goto out_off; goto out_off;
} }
...@@ -888,7 +896,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -888,7 +896,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
build_prologue(&ctx, was_classic); build_prologue(&ctx, was_classic);
if (build_body(&ctx)) { if (build_body(&ctx, extra_pass)) {
bpf_jit_binary_free(header); bpf_jit_binary_free(header);
prog = orig_prog; prog = orig_prog;
goto out_off; goto out_off;
......
...@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) ...@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
PPC_BLR(); PPC_BLR();
} }
static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
u64 func)
{
#ifdef PPC64_ELF_ABI_v1
/* func points to the function descriptor */
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
/* ... and move it to LR */
PPC_MTLR(b2p[TMP_REG_1]);
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2)
* and since we don't use a TOC ourself.
*/
PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
PPC_MTLR(12);
#endif
PPC_BLRL();
}
static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
u64 func)
{ {
unsigned int i, ctx_idx = ctx->idx; unsigned int i, ctx_idx = ctx->idx;
...@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
{ {
const struct bpf_insn *insn = fp->insnsi; const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len; int flen = fp->len;
int i; int i, ret;
/* Start of epilogue code - will only be valid 2nd pass onwards */ /* Start of epilogue code - will only be valid 2nd pass onwards */
u32 exit_addr = addrs[flen]; u32 exit_addr = addrs[flen];
...@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u32 src_reg = b2p[insn[i].src_reg]; u32 src_reg = b2p[insn[i].src_reg];
s16 off = insn[i].off; s16 off = insn[i].off;
s32 imm = insn[i].imm; s32 imm = insn[i].imm;
bool func_addr_fixed;
u64 func_addr;
u64 imm64; u64 imm64;
u8 *func;
u32 true_cond; u32 true_cond;
u32 tmp_idx; u32 tmp_idx;
...@@ -711,23 +738,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, ...@@ -711,23 +738,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_JMP | BPF_CALL: case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC; ctx->seen |= SEEN_FUNC;
/* bpf function call */ ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
if (insn[i].src_reg == BPF_PSEUDO_CALL) &func_addr, &func_addr_fixed);
if (!extra_pass) if (ret < 0)
func = NULL; return ret;
else if (fp->aux->func && off < fp->aux->func_cnt)
/* use the subprog id from the off
* field to lookup the callee address
*/
func = (u8 *) fp->aux->func[off]->bpf_func;
else
return -EINVAL;
/* kernel helper call */
else
func = (u8 *) __bpf_call_base + imm;
bpf_jit_emit_func_call(image, ctx, (u64)func);
if (func_addr_fixed)
bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
bpf_jit_emit_func_call_rel(image, ctx, func_addr);
/* move return value from r3 to BPF_REG_0 */ /* move return value from r3 to BPF_REG_0 */
PPC_MR(b2p[BPF_REG_0], 3); PPC_MR(b2p[BPF_REG_0], 3);
break; break;
......
...@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); ...@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp);
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
......
...@@ -672,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp) ...@@ -672,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp); bpf_prog_unlock_free(fp);
} }
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed)
{
s16 off = insn->off;
s32 imm = insn->imm;
u8 *addr;
*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
if (!*func_addr_fixed) {
/* Place-holder address till the last pass has collected
* all addresses for JITed subprograms in which case we
* can pick them up from prog->aux.
*/
if (!extra_pass)
addr = NULL;
else if (prog->aux->func &&
off >= 0 && off < prog->aux->func_cnt)
addr = (u8 *)prog->aux->func[off]->bpf_func;
else
return -EINVAL;
} else {
/* Address of a BPF helper call. Since part of the core
* kernel, it's always at a fixed location. __bpf_call_base
* and the helper with imm relative to it are both in core
* kernel.
*/
addr = (u8 *)__bpf_call_base + imm;
}
*func_addr = (unsigned long)addr;
return 0;
}
static int bpf_jit_blind_insn(const struct bpf_insn *from, static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux, const struct bpf_insn *aux,
struct bpf_insn *to_buff) struct bpf_insn *to_buff)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment