Commit 485b7778 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-blinding'

Daniel Borkmann says:

====================
BPF updates

This set implements constant blinding for BPF, first couple of
patches are some preparatory cleanups, followed by the blinding.
Please see individual patches for details.

Thanks a lot!
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 553eb544 d93a47f7
...@@ -43,6 +43,17 @@ Values : ...@@ -43,6 +43,17 @@ Values :
1 - enable the JIT 1 - enable the JIT
2 - enable the JIT and ask the compiler to emit traces on kernel log. 2 - enable the JIT and ask the compiler to emit traces on kernel log.
bpf_jit_harden
--------------
This enables hardening for the Berkeley Packet Filter Just in Time compiler.
Supported are eBPF JIT backends. Enabling hardening trades off performance,
but can mitigate JIT spraying.
Values :
0 - disable JIT hardening (default value)
1 - enable JIT hardening for unprivileged users only
2 - enable JIT hardening for all users
dev_weight dev_weight
-------------- --------------
......
...@@ -41,7 +41,7 @@ config ARM ...@@ -41,7 +41,7 @@ config ARM
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARM_SMCCC if CPU_V7 select HAVE_ARM_SMCCC if CPU_V7
select HAVE_BPF_JIT select HAVE_CBPF_JIT
select HAVE_CC_STACKPROTECTOR select HAVE_CC_STACKPROTECTOR
select HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
......
...@@ -58,7 +58,7 @@ config ARM64 ...@@ -58,7 +58,7 @@ config ARM64
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT select HAVE_EBPF_JIT
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_DOUBLE
......
...@@ -31,8 +31,8 @@ ...@@ -31,8 +31,8 @@
int bpf_jit_enable __read_mostly; int bpf_jit_enable __read_mostly;
#define TMP_REG_1 (MAX_BPF_REG + 0) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_REG + 1) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
/* Map BPF registers to A64 registers */ /* Map BPF registers to A64 registers */
static const int bpf2a64[] = { static const int bpf2a64[] = {
...@@ -54,6 +54,8 @@ static const int bpf2a64[] = { ...@@ -54,6 +54,8 @@ static const int bpf2a64[] = {
/* temporary register for internal BPF JIT */ /* temporary register for internal BPF JIT */
[TMP_REG_1] = A64_R(23), [TMP_REG_1] = A64_R(23),
[TMP_REG_2] = A64_R(24), [TMP_REG_2] = A64_R(24),
/* temporary register for blinding constants */
[BPF_REG_AX] = A64_R(9),
}; };
struct jit_ctx { struct jit_ctx {
...@@ -762,31 +764,45 @@ void bpf_jit_compile(struct bpf_prog *prog) ...@@ -762,31 +764,45 @@ void bpf_jit_compile(struct bpf_prog *prog)
/* Nothing to do here. We support Internal BPF. */ /* Nothing to do here. We support Internal BPF. */
} }
void bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header; struct bpf_binary_header *header;
bool tmp_blinded = false;
struct jit_ctx ctx; struct jit_ctx ctx;
int image_size; int image_size;
u8 *image_ptr; u8 *image_ptr;
if (!bpf_jit_enable) if (!bpf_jit_enable)
return; return orig_prog;
if (!prog || !prog->len) tmp = bpf_jit_blind_constants(prog);
return; /* If blinding was requested and we failed during blinding,
* we must fall back to the interpreter.
*/
if (IS_ERR(tmp))
return orig_prog;
if (tmp != prog) {
tmp_blinded = true;
prog = tmp;
}
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog; ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) if (ctx.offset == NULL) {
return; prog = orig_prog;
goto out;
}
/* 1. Initial fake pass to compute ctx->idx. */ /* 1. Initial fake pass to compute ctx->idx. */
/* Fake pass to fill in ctx->offset and ctx->tmp_used. */ /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
if (build_body(&ctx)) if (build_body(&ctx)) {
goto out; prog = orig_prog;
goto out_off;
}
build_prologue(&ctx); build_prologue(&ctx);
...@@ -797,8 +813,10 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -797,8 +813,10 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
image_size = sizeof(u32) * ctx.idx; image_size = sizeof(u32) * ctx.idx;
header = bpf_jit_binary_alloc(image_size, &image_ptr, header = bpf_jit_binary_alloc(image_size, &image_ptr,
sizeof(u32), jit_fill_hole); sizeof(u32), jit_fill_hole);
if (header == NULL) if (header == NULL) {
goto out; prog = orig_prog;
goto out_off;
}
/* 2. Now, the actual pass. */ /* 2. Now, the actual pass. */
...@@ -809,7 +827,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -809,7 +827,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
if (build_body(&ctx)) { if (build_body(&ctx)) {
bpf_jit_binary_free(header); bpf_jit_binary_free(header);
goto out; prog = orig_prog;
goto out_off;
} }
build_epilogue(&ctx); build_epilogue(&ctx);
...@@ -817,7 +836,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -817,7 +836,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
/* 3. Extra pass to validate JITed code. */ /* 3. Extra pass to validate JITed code. */
if (validate_code(&ctx)) { if (validate_code(&ctx)) {
bpf_jit_binary_free(header); bpf_jit_binary_free(header);
goto out; prog = orig_prog;
goto out_off;
} }
/* And we're done. */ /* And we're done. */
...@@ -829,8 +849,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -829,8 +849,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
set_memory_ro((unsigned long)header, header->pages); set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)ctx.image; prog->bpf_func = (void *)ctx.image;
prog->jited = 1; prog->jited = 1;
out:
out_off:
kfree(ctx.offset); kfree(ctx.offset);
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
} }
void bpf_jit_free(struct bpf_prog *prog) void bpf_jit_free(struct bpf_prog *prog)
......
...@@ -15,7 +15,7 @@ config MIPS ...@@ -15,7 +15,7 @@ config MIPS
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT if !CPU_MICROMIPS select HAVE_CBPF_JIT if !CPU_MICROMIPS
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
......
...@@ -126,7 +126,7 @@ config PPC ...@@ -126,7 +126,7 @@ config PPC
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_BPF_JIT select HAVE_CBPF_JIT
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
......
...@@ -126,7 +126,7 @@ config S390 ...@@ -126,7 +126,7 @@ config S390
select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_SOFT_DIRTY
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
......
...@@ -54,16 +54,17 @@ struct bpf_jit { ...@@ -54,16 +54,17 @@ struct bpf_jit {
#define SEEN_FUNC 16 /* calls C functions */ #define SEEN_FUNC 16 /* calls C functions */
#define SEEN_TAIL_CALL 32 /* code uses tail calls */ #define SEEN_TAIL_CALL 32 /* code uses tail calls */
#define SEEN_SKB_CHANGE 64 /* code changes skb data */ #define SEEN_SKB_CHANGE 64 /* code changes skb data */
#define SEEN_REG_AX 128 /* code uses constant blinding */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) #define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
/* /*
* s390 registers * s390 registers
*/ */
#define REG_W0 (__MAX_BPF_REG+0) /* Work register 1 (even) */ #define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
#define REG_W1 (__MAX_BPF_REG+1) /* Work register 2 (odd) */ #define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
#define REG_SKB_DATA (__MAX_BPF_REG+2) /* SKB data register */ #define REG_SKB_DATA (MAX_BPF_JIT_REG + 2) /* SKB data register */
#define REG_L (__MAX_BPF_REG+3) /* Literal pool register */ #define REG_L (MAX_BPF_JIT_REG + 3) /* Literal pool register */
#define REG_15 (__MAX_BPF_REG+4) /* Register 15 */ #define REG_15 (MAX_BPF_JIT_REG + 4) /* Register 15 */
#define REG_0 REG_W0 /* Register 0 */ #define REG_0 REG_W0 /* Register 0 */
#define REG_1 REG_W1 /* Register 1 */ #define REG_1 REG_W1 /* Register 1 */
#define REG_2 BPF_REG_1 /* Register 2 */ #define REG_2 BPF_REG_1 /* Register 2 */
...@@ -88,6 +89,8 @@ static const int reg2hex[] = { ...@@ -88,6 +89,8 @@ static const int reg2hex[] = {
[BPF_REG_9] = 10, [BPF_REG_9] = 10,
/* BPF stack pointer */ /* BPF stack pointer */
[BPF_REG_FP] = 13, [BPF_REG_FP] = 13,
/* Register for blinding (shared with REG_SKB_DATA) */
[BPF_REG_AX] = 12,
/* SKB data pointer */ /* SKB data pointer */
[REG_SKB_DATA] = 12, [REG_SKB_DATA] = 12,
/* Work registers for s390x backend */ /* Work registers for s390x backend */
...@@ -385,7 +388,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op) ...@@ -385,7 +388,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
/* /*
* For SKB access %b1 contains the SKB pointer. For "bpf_jit.S" * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
* we store the SKB header length on the stack and the SKB data * we store the SKB header length on the stack and the SKB data
* pointer in REG_SKB_DATA. * pointer in REG_SKB_DATA if BPF_REG_AX is not used.
*/ */
static void emit_load_skb_data_hlen(struct bpf_jit *jit) static void emit_load_skb_data_hlen(struct bpf_jit *jit)
{ {
...@@ -397,9 +400,10 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit) ...@@ -397,9 +400,10 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit)
offsetof(struct sk_buff, data_len)); offsetof(struct sk_buff, data_len));
/* stg %w1,ST_OFF_HLEN(%r0,%r15) */ /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN); EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
/* lg %skb_data,data_off(%b1) */ if (!(jit->seen & SEEN_REG_AX))
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, /* lg %skb_data,data_off(%b1) */
BPF_REG_1, offsetof(struct sk_buff, data)); EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
BPF_REG_1, offsetof(struct sk_buff, data));
} }
/* /*
...@@ -487,6 +491,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -487,6 +491,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
s32 imm = insn->imm; s32 imm = insn->imm;
s16 off = insn->off; s16 off = insn->off;
if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
jit->seen |= SEEN_REG_AX;
switch (insn->code) { switch (insn->code) {
/* /*
* BPF_MOV * BPF_MOV
...@@ -1188,7 +1194,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -1188,7 +1194,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* /*
* Implicit input: * Implicit input:
* BPF_REG_6 (R7) : skb pointer * BPF_REG_6 (R7) : skb pointer
* REG_SKB_DATA (R12): skb data pointer * REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX)
* *
* Calculated input: * Calculated input:
* BPF_REG_2 (R3) : offset of byte(s) to fetch in skb * BPF_REG_2 (R3) : offset of byte(s) to fetch in skb
...@@ -1209,6 +1215,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -1209,6 +1215,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* agfr %b2,%src (%src is s32 here) */ /* agfr %b2,%src (%src is s32 here) */
EMIT4(0xb9180000, BPF_REG_2, src_reg); EMIT4(0xb9180000, BPF_REG_2, src_reg);
/* Reload REG_SKB_DATA if BPF_REG_AX is used */
if (jit->seen & SEEN_REG_AX)
/* lg %skb_data,data_off(%b6) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
BPF_REG_6, offsetof(struct sk_buff, data));
/* basr %b5,%w1 (%b5 is call saved) */ /* basr %b5,%w1 (%b5 is call saved) */
EMIT2(0x0d00, BPF_REG_5, REG_W1); EMIT2(0x0d00, BPF_REG_5, REG_W1);
...@@ -1262,37 +1273,62 @@ void bpf_jit_compile(struct bpf_prog *fp) ...@@ -1262,37 +1273,62 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* /*
* Compile eBPF program "fp" * Compile eBPF program "fp"
*/ */
void bpf_int_jit_compile(struct bpf_prog *fp) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{ {
struct bpf_prog *tmp, *orig_fp = fp;
struct bpf_binary_header *header; struct bpf_binary_header *header;
bool tmp_blinded = false;
struct bpf_jit jit; struct bpf_jit jit;
int pass; int pass;
if (!bpf_jit_enable) if (!bpf_jit_enable)
return; return orig_fp;
tmp = bpf_jit_blind_constants(fp);
/*
* If blinding was requested and we failed during blinding,
* we must fall back to the interpreter.
*/
if (IS_ERR(tmp))
return orig_fp;
if (tmp != fp) {
tmp_blinded = true;
fp = tmp;
}
memset(&jit, 0, sizeof(jit)); memset(&jit, 0, sizeof(jit));
jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) if (jit.addrs == NULL) {
return; fp = orig_fp;
goto out;
}
/* /*
* Three initial passes: * Three initial passes:
* - 1/2: Determine clobbered registers * - 1/2: Determine clobbered registers
* - 3: Calculate program size and addrs arrray * - 3: Calculate program size and addrs arrray
*/ */
for (pass = 1; pass <= 3; pass++) { for (pass = 1; pass <= 3; pass++) {
if (bpf_jit_prog(&jit, fp)) if (bpf_jit_prog(&jit, fp)) {
fp = orig_fp;
goto free_addrs; goto free_addrs;
}
} }
/* /*
* Final pass: Allocate and generate program * Final pass: Allocate and generate program
*/ */
if (jit.size >= BPF_SIZE_MAX) if (jit.size >= BPF_SIZE_MAX) {
fp = orig_fp;
goto free_addrs; goto free_addrs;
}
header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole); header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
if (!header) if (!header) {
fp = orig_fp;
goto free_addrs; goto free_addrs;
if (bpf_jit_prog(&jit, fp)) }
if (bpf_jit_prog(&jit, fp)) {
fp = orig_fp;
goto free_addrs; goto free_addrs;
}
if (bpf_jit_enable > 1) { if (bpf_jit_enable > 1) {
bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
if (jit.prg_buf) if (jit.prg_buf)
...@@ -1305,6 +1341,11 @@ void bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -1305,6 +1341,11 @@ void bpf_int_jit_compile(struct bpf_prog *fp)
} }
free_addrs: free_addrs:
kfree(jit.addrs); kfree(jit.addrs);
out:
if (tmp_blinded)
bpf_jit_prog_release_other(fp, fp == orig_fp ?
tmp : orig_fp);
return fp;
} }
/* /*
......
...@@ -32,7 +32,7 @@ config SPARC ...@@ -32,7 +32,7 @@ config SPARC
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64 select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_BPF_JIT select HAVE_CBPF_JIT
select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_BUGVERBOSE
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
......
...@@ -91,7 +91,7 @@ config X86 ...@@ -91,7 +91,7 @@ config X86
select HAVE_ARCH_SOFT_DIRTY if X86_64 select HAVE_ARCH_SOFT_DIRTY if X86_64
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_BPF_JIT if X86_64 select HAVE_EBPF_JIT if X86_64
select HAVE_CC_STACKPROTECTOR select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_LOCAL
......
...@@ -110,11 +110,16 @@ static void bpf_flush_icache(void *start, void *end) ...@@ -110,11 +110,16 @@ static void bpf_flush_icache(void *start, void *end)
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
/* pick a register outside of BPF range for JIT internal work */ /* pick a register outside of BPF range for JIT internal work */
#define AUX_REG (MAX_BPF_REG + 1) #define AUX_REG (MAX_BPF_JIT_REG + 1)
/* the following table maps BPF registers to x64 registers. /* The following table maps BPF registers to x64 registers.
* x64 register r12 is unused, since if used as base address register *
* in load/store instructions, it always needs an extra byte of encoding * x64 register r12 is unused, since if used as base address
* register in load/store instructions, it always needs an
* extra byte of encoding and is callee saved.
*
* r9 caches skb->len - skb->data_len
* r10 caches skb->data, and used for blinding (if enabled)
*/ */
static const int reg2hex[] = { static const int reg2hex[] = {
[BPF_REG_0] = 0, /* rax */ [BPF_REG_0] = 0, /* rax */
...@@ -128,6 +133,7 @@ static const int reg2hex[] = { ...@@ -128,6 +133,7 @@ static const int reg2hex[] = {
[BPF_REG_8] = 6, /* r14 callee saved */ [BPF_REG_8] = 6, /* r14 callee saved */
[BPF_REG_9] = 7, /* r15 callee saved */ [BPF_REG_9] = 7, /* r15 callee saved */
[BPF_REG_FP] = 5, /* rbp readonly */ [BPF_REG_FP] = 5, /* rbp readonly */
[BPF_REG_AX] = 2, /* r10 temp register */
[AUX_REG] = 3, /* r11 temp register */ [AUX_REG] = 3, /* r11 temp register */
}; };
...@@ -141,7 +147,8 @@ static bool is_ereg(u32 reg) ...@@ -141,7 +147,8 @@ static bool is_ereg(u32 reg)
BIT(AUX_REG) | BIT(AUX_REG) |
BIT(BPF_REG_7) | BIT(BPF_REG_7) |
BIT(BPF_REG_8) | BIT(BPF_REG_8) |
BIT(BPF_REG_9)); BIT(BPF_REG_9) |
BIT(BPF_REG_AX));
} }
/* add modifiers if 'reg' maps to x64 registers r8..r15 */ /* add modifiers if 'reg' maps to x64 registers r8..r15 */
...@@ -182,6 +189,7 @@ static void jit_fill_hole(void *area, unsigned int size) ...@@ -182,6 +189,7 @@ static void jit_fill_hole(void *area, unsigned int size)
struct jit_context { struct jit_context {
int cleanup_addr; /* epilogue code offset */ int cleanup_addr; /* epilogue code offset */
bool seen_ld_abs; bool seen_ld_abs;
bool seen_ax_reg;
}; };
/* maximum number of bytes emitted while JITing one eBPF insn */ /* maximum number of bytes emitted while JITing one eBPF insn */
...@@ -345,6 +353,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, ...@@ -345,6 +353,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
struct bpf_insn *insn = bpf_prog->insnsi; struct bpf_insn *insn = bpf_prog->insnsi;
int insn_cnt = bpf_prog->len; int insn_cnt = bpf_prog->len;
bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
bool seen_exit = false; bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i, cnt = 0; int i, cnt = 0;
...@@ -367,6 +376,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, ...@@ -367,6 +376,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int ilen; int ilen;
u8 *func; u8 *func;
if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
ctx->seen_ax_reg = seen_ax_reg = true;
switch (insn->code) { switch (insn->code) {
/* ALU */ /* ALU */
case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_ADD | BPF_X:
...@@ -1002,6 +1014,10 @@ xadd: if (is_imm8(insn->off)) ...@@ -1002,6 +1014,10 @@ xadd: if (is_imm8(insn->off))
* sk_load_* helpers also use %r10 and %r9d. * sk_load_* helpers also use %r10 and %r9d.
* See bpf_jit.S * See bpf_jit.S
*/ */
if (seen_ax_reg)
/* r10 = skb->data, mov %r10, off32(%rbx) */
EMIT3_off32(0x4c, 0x8b, 0x93,
offsetof(struct sk_buff, data));
EMIT1_off32(0xE8, jmp_offset); /* call */ EMIT1_off32(0xE8, jmp_offset); /* call */
break; break;
...@@ -1073,25 +1089,37 @@ void bpf_jit_compile(struct bpf_prog *prog) ...@@ -1073,25 +1089,37 @@ void bpf_jit_compile(struct bpf_prog *prog)
{ {
} }
void bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
struct bpf_binary_header *header = NULL; struct bpf_binary_header *header = NULL;
struct bpf_prog *tmp, *orig_prog = prog;
int proglen, oldproglen = 0; int proglen, oldproglen = 0;
struct jit_context ctx = {}; struct jit_context ctx = {};
bool tmp_blinded = false;
u8 *image = NULL; u8 *image = NULL;
int *addrs; int *addrs;
int pass; int pass;
int i; int i;
if (!bpf_jit_enable) if (!bpf_jit_enable)
return; return orig_prog;
if (!prog || !prog->len) tmp = bpf_jit_blind_constants(prog);
return; /* If blinding was requested and we failed during blinding,
* we must fall back to the interpreter.
*/
if (IS_ERR(tmp))
return orig_prog;
if (tmp != prog) {
tmp_blinded = true;
prog = tmp;
}
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs) if (!addrs) {
return; prog = orig_prog;
goto out;
}
/* Before first pass, make a rough estimation of addrs[] /* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes * each bpf instruction is translated to less than 64 bytes
...@@ -1113,21 +1141,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1113,21 +1141,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
image = NULL; image = NULL;
if (header) if (header)
bpf_jit_binary_free(header); bpf_jit_binary_free(header);
goto out; prog = orig_prog;
goto out_addrs;
} }
if (image) { if (image) {
if (proglen != oldproglen) { if (proglen != oldproglen) {
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
proglen, oldproglen); proglen, oldproglen);
goto out; prog = orig_prog;
goto out_addrs;
} }
break; break;
} }
if (proglen == oldproglen) { if (proglen == oldproglen) {
header = bpf_jit_binary_alloc(proglen, &image, header = bpf_jit_binary_alloc(proglen, &image,
1, jit_fill_hole); 1, jit_fill_hole);
if (!header) if (!header) {
goto out; prog = orig_prog;
goto out_addrs;
}
} }
oldproglen = proglen; oldproglen = proglen;
} }
...@@ -1141,8 +1173,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1141,8 +1173,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
prog->bpf_func = (void *)image; prog->bpf_func = (void *)image;
prog->jited = 1; prog->jited = 1;
} }
out:
out_addrs:
kfree(addrs); kfree(addrs);
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
} }
void bpf_jit_free(struct bpf_prog *fp) void bpf_jit_free(struct bpf_prog *fp)
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/capability.h>
#include <net/sch_generic.h> #include <net/sch_generic.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -42,6 +44,15 @@ struct bpf_prog_aux; ...@@ -42,6 +44,15 @@ struct bpf_prog_aux;
#define BPF_REG_X BPF_REG_7 #define BPF_REG_X BPF_REG_7
#define BPF_REG_TMP BPF_REG_8 #define BPF_REG_TMP BPF_REG_8
/* Kernel hidden auxiliary/helper register for hardening step.
* Only used by eBPF JITs. It's nothing more than a temporary
* register that JITs use internally, only that here it's part
* of eBPF instructions that have been rewritten for blinding
* constants. See JIT pre-step in bpf_jit_blind_constants().
*/
#define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
/* BPF program can access up to 512 bytes of stack space. */ /* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512 #define MAX_BPF_STACK 512
...@@ -458,7 +469,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) ...@@ -458,7 +469,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb); int sk_filter(struct sock *sk, struct sk_buff *skb);
int bpf_prog_select_runtime(struct bpf_prog *fp); struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp); void bpf_prog_free(struct bpf_prog *fp);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
...@@ -492,10 +503,17 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); ...@@ -492,10 +503,17 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_skb_data(void *func); bool bpf_helper_changes_skb_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
struct bpf_binary_header * struct bpf_binary_header *
...@@ -507,6 +525,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); ...@@ -507,6 +525,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image) u32 pass, void *image)
{ {
...@@ -517,6 +538,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, ...@@ -517,6 +538,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false); 16, 1, image, proglen, false);
} }
static inline bool bpf_jit_is_ebpf(void)
{
# ifdef CONFIG_HAVE_EBPF_JIT
return true;
# else
return false;
# endif
}
static inline bool bpf_jit_blinding_enabled(void)
{
/* These are the prerequisites, should someone ever have the
* idea to call blinding outside of them, we make sure to
* bail out.
*/
if (!bpf_jit_is_ebpf())
return false;
if (!bpf_jit_enable)
return false;
if (!bpf_jit_harden)
return false;
if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
return false;
return true;
}
#else #else
static inline void bpf_jit_compile(struct bpf_prog *fp) static inline void bpf_jit_compile(struct bpf_prog *fp)
{ {
......
...@@ -3759,7 +3759,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, ...@@ -3759,7 +3759,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
extern int netdev_max_backlog; extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue; extern int netdev_tstamp_prequeue;
extern int weight_p; extern int weight_p;
extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
......
This diff is collapsed.
...@@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr) ...@@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr)
fixup_bpf_calls(prog); fixup_bpf_calls(prog);
/* eBPF program is ready to be JITed */ /* eBPF program is ready to be JITed */
err = bpf_prog_select_runtime(prog); prog = bpf_prog_select_runtime(prog, &err);
if (err < 0) if (err < 0)
goto free_used_maps; goto free_used_maps;
......
...@@ -2587,26 +2587,6 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env) ...@@ -2587,26 +2587,6 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
insn->src_reg = 0; insn->src_reg = 0;
} }
static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
{
struct bpf_insn *insn = prog->insnsi;
int insn_cnt = prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) != BPF_JMP ||
BPF_OP(insn->code) == BPF_CALL ||
BPF_OP(insn->code) == BPF_EXIT)
continue;
/* adjust offset of jmps if necessary */
if (i < pos && i + insn->off + 1 > pos)
insn->off += delta;
else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
insn->off -= delta;
}
}
/* convert load instructions that access fields of 'struct __sk_buff' /* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff' * into sequence of instructions that access fields of 'struct sk_buff'
*/ */
...@@ -2616,14 +2596,15 @@ static int convert_ctx_accesses(struct verifier_env *env) ...@@ -2616,14 +2596,15 @@ static int convert_ctx_accesses(struct verifier_env *env)
int insn_cnt = env->prog->len; int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16]; struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog; struct bpf_prog *new_prog;
u32 cnt;
int i;
enum bpf_access_type type; enum bpf_access_type type;
int i;
if (!env->prog->aux->ops->convert_ctx_access) if (!env->prog->aux->ops->convert_ctx_access)
return 0; return 0;
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
u32 insn_delta, cnt;
if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
type = BPF_READ; type = BPF_READ;
else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
...@@ -2645,34 +2626,18 @@ static int convert_ctx_accesses(struct verifier_env *env) ...@@ -2645,34 +2626,18 @@ static int convert_ctx_accesses(struct verifier_env *env)
return -EINVAL; return -EINVAL;
} }
if (cnt == 1) { new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt);
memcpy(insn, insn_buf, sizeof(*insn));
continue;
}
/* several new insns need to be inserted. Make room for them */
insn_cnt += cnt - 1;
new_prog = bpf_prog_realloc(env->prog,
bpf_prog_size(insn_cnt),
GFP_USER);
if (!new_prog) if (!new_prog)
return -ENOMEM; return -ENOMEM;
new_prog->len = insn_cnt; insn_delta = cnt - 1;
memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
sizeof(*insn) * (insn_cnt - i - cnt));
/* copy substitute insns in place of load instruction */
memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
/* adjust branches in the whole program */
adjust_branches(new_prog, i, cnt - 1);
/* keep walking new program and skip insns we just inserted */ /* keep walking new program and skip insns we just inserted */
env->prog = new_prog; env->prog = new_prog;
insn = new_prog->insnsi + i + cnt - 1; insn = new_prog->insnsi + i + insn_delta;
i += cnt - 1;
insn_cnt += insn_delta;
i += insn_delta;
} }
return 0; return 0;
......
...@@ -5621,7 +5621,10 @@ static struct bpf_prog *generate_filter(int which, int *err) ...@@ -5621,7 +5621,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
fp->type = BPF_PROG_TYPE_SOCKET_FILTER; fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
bpf_prog_select_runtime(fp); /* We cannot error here as we don't need type compatibility
* checks.
*/
fp = bpf_prog_select_runtime(fp, err);
break; break;
} }
......
...@@ -289,14 +289,17 @@ config BQL ...@@ -289,14 +289,17 @@ config BQL
config BPF_JIT config BPF_JIT
bool "enable BPF Just In Time compiler" bool "enable BPF Just In Time compiler"
depends on HAVE_BPF_JIT depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
depends on MODULES depends on MODULES
---help--- ---help---
Berkeley Packet Filter filtering capabilities are normally handled Berkeley Packet Filter filtering capabilities are normally handled
by an interpreter. This option allows kernel to generate a native by an interpreter. This option allows kernel to generate a native
code when filter is loaded in memory. This should speedup code when filter is loaded in memory. This should speedup
packet sniffing (libpcap/tcpdump). Note : Admin should enable packet sniffing (libpcap/tcpdump).
this feature changing /proc/sys/net/core/bpf_jit_enable
Note, admin should enable this feature changing:
/proc/sys/net/core/bpf_jit_enable
/proc/sys/net/core/bpf_jit_harden (optional)
config NET_FLOW_LIMIT config NET_FLOW_LIMIT
bool bool
...@@ -419,6 +422,14 @@ config MAY_USE_DEVLINK ...@@ -419,6 +422,14 @@ config MAY_USE_DEVLINK
endif # if NET endif # if NET
# Used by archs to tell that they support BPF_JIT # Used by archs to tell that they support BPF JIT compiler plus which flavour.
config HAVE_BPF_JIT # Only one of the two can be selected for a specific arch since eBPF JIT supersedes
# the cBPF JIT.
# Classic BPF JIT (cBPF)
config HAVE_CBPF_JIT
bool
# Extended BPF JIT (eBPF)
config HAVE_EBPF_JIT
bool bool
...@@ -994,7 +994,11 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) ...@@ -994,7 +994,11 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
*/ */
goto out_err_free; goto out_err_free;
bpf_prog_select_runtime(fp); /* We are guaranteed to never error here with cBPF to eBPF
* transitions, since there's no issue with type compatibility
* checks on program arrays.
*/
fp = bpf_prog_select_runtime(fp, &err);
kfree(old_prog); kfree(old_prog);
return fp; return fp;
...@@ -2069,16 +2073,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) ...@@ -2069,16 +2073,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
static bool __is_valid_access(int off, int size, enum bpf_access_type type) static bool __is_valid_access(int off, int size, enum bpf_access_type type)
{ {
/* check bounds */
if (off < 0 || off >= sizeof(struct __sk_buff)) if (off < 0 || off >= sizeof(struct __sk_buff))
return false; return false;
/* The verifier guarantees that size > 0. */
/* disallow misaligned access */
if (off % size != 0) if (off % size != 0)
return false; return false;
if (size != sizeof(__u32))
/* all __sk_buff fields are __u32 */
if (size != 4)
return false; return false;
return true; return true;
...@@ -2097,7 +2097,7 @@ static bool sk_filter_is_valid_access(int off, int size, ...@@ -2097,7 +2097,7 @@ static bool sk_filter_is_valid_access(int off, int size,
if (type == BPF_WRITE) { if (type == BPF_WRITE) {
switch (off) { switch (off) {
case offsetof(struct __sk_buff, cb[0]) ... case offsetof(struct __sk_buff, cb[0]) ...
offsetof(struct __sk_buff, cb[4]): offsetof(struct __sk_buff, cb[4]):
break; break;
default: default:
return false; return false;
...@@ -2278,30 +2278,30 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, ...@@ -2278,30 +2278,30 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
} }
static const struct bpf_verifier_ops sk_filter_ops = { static const struct bpf_verifier_ops sk_filter_ops = {
.get_func_proto = sk_filter_func_proto, .get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access, .is_valid_access = sk_filter_is_valid_access,
.convert_ctx_access = bpf_net_convert_ctx_access, .convert_ctx_access = bpf_net_convert_ctx_access,
}; };
static const struct bpf_verifier_ops tc_cls_act_ops = { static const struct bpf_verifier_ops tc_cls_act_ops = {
.get_func_proto = tc_cls_act_func_proto, .get_func_proto = tc_cls_act_func_proto,
.is_valid_access = tc_cls_act_is_valid_access, .is_valid_access = tc_cls_act_is_valid_access,
.convert_ctx_access = bpf_net_convert_ctx_access, .convert_ctx_access = bpf_net_convert_ctx_access,
}; };
static struct bpf_prog_type_list sk_filter_type __read_mostly = { static struct bpf_prog_type_list sk_filter_type __read_mostly = {
.ops = &sk_filter_ops, .ops = &sk_filter_ops,
.type = BPF_PROG_TYPE_SOCKET_FILTER, .type = BPF_PROG_TYPE_SOCKET_FILTER,
}; };
static struct bpf_prog_type_list sched_cls_type __read_mostly = { static struct bpf_prog_type_list sched_cls_type __read_mostly = {
.ops = &tc_cls_act_ops, .ops = &tc_cls_act_ops,
.type = BPF_PROG_TYPE_SCHED_CLS, .type = BPF_PROG_TYPE_SCHED_CLS,
}; };
static struct bpf_prog_type_list sched_act_type __read_mostly = { static struct bpf_prog_type_list sched_act_type __read_mostly = {
.ops = &tc_cls_act_ops, .ops = &tc_cls_act_ops,
.type = BPF_PROG_TYPE_SCHED_ACT, .type = BPF_PROG_TYPE_SCHED_ACT,
}; };
static int __init register_sk_filter_ops(void) static int __init register_sk_filter_ops(void)
......
...@@ -294,6 +294,15 @@ static struct ctl_table net_core_table[] = { ...@@ -294,6 +294,15 @@ static struct ctl_table net_core_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec .proc_handler = proc_dointvec
}, },
# ifdef CONFIG_HAVE_EBPF_JIT
{
.procname = "bpf_jit_harden",
.data = &bpf_jit_harden,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = proc_dointvec,
},
# endif
#endif #endif
{ {
.procname = "netdev_tstamp_prequeue", .procname = "netdev_tstamp_prequeue",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment