Commit 94880a5b authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-08-31

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix 32-bit zero-extension during constant blinding which
   has been causing a regression on ppc64, from Naveen.

2) Fix a latency bug in nfp driver when updating stack index
   register, from Jiong.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d12040b6 ede7c460
...@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
bool clr_gpr, lmem_step step) bool clr_gpr, lmem_step step)
{ {
s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
bool first = true, last; bool first = true, narrow_ld, last;
bool needs_inc = false; bool needs_inc = false;
swreg stack_off_reg; swreg stack_off_reg;
u8 prev_gpr = 255; u8 prev_gpr = 255;
...@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
needs_inc = true; needs_inc = true;
} }
narrow_ld = clr_gpr && size < 8;
if (lm3) { if (lm3) {
unsigned int nop_cnt;
emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
/* For size < 4 one slot will be filled by zeroing of upper. */ /* For size < 4 one slot will be filled by zeroing of upper,
wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); * but be careful, that zeroing could be eliminated by zext
* optimization.
*/
nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
wrp_nops(nfp_prog, nop_cnt);
} }
if (clr_gpr && size < 8) if (narrow_ld)
wrp_zext(nfp_prog, meta, gpr); wrp_zext(nfp_prog, meta, gpr);
while (size) { while (size) {
......
...@@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog, ...@@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
static int bpf_jit_blind_insn(const struct bpf_insn *from, static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux, const struct bpf_insn *aux,
struct bpf_insn *to_buff) struct bpf_insn *to_buff,
bool emit_zext)
{ {
struct bpf_insn *to = to_buff; struct bpf_insn *to = to_buff;
u32 imm_rnd = get_random_int(); u32 imm_rnd = get_random_int();
...@@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, ...@@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
if (emit_zext)
*to++ = BPF_ZEXT_REG(BPF_REG_AX);
*to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
break; break;
...@@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) ...@@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
insn[1].code == 0) insn[1].code == 0)
memcpy(aux, insn, sizeof(aux)); memcpy(aux, insn, sizeof(aux));
rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
clone->aux->verifier_zext);
if (!rewritten) if (!rewritten)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment