Commit 6944caad authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/bpf: Remove classical BPF support for PPC32

At the time being, PPC32 has Classical BPF support.

The test_bpf module exhibits some failure:

	test_bpf: #298 LD_IND byte frag jited:1 ret 202 != 66 FAIL (1 times)
	test_bpf: #299 LD_IND halfword frag jited:1 ret 51958 != 17220 FAIL (1 times)
	test_bpf: #301 LD_IND halfword mixed head/frag jited:1 ret 51958 != 1305 FAIL (1 times)
	test_bpf: #303 LD_ABS byte frag jited:1 ret 202 != 66 FAIL (1 times)
	test_bpf: #304 LD_ABS halfword frag jited:1 ret 51958 != 17220 FAIL (1 times)
	test_bpf: #306 LD_ABS halfword mixed head/frag jited:1 ret 51958 != 1305 FAIL (1 times)

	test_bpf: Summary: 371 PASSED, 7 FAILED, [119/366 JIT'ed]

Fixing this is not worth the effort. Instead, remove support for
classical BPF and prepare for adding Extended BPF support instead.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/fbc3e4fcc9c8f6131d6c705212530b2aa50149ee.1616430991.git.christophe.leroy@csgroup.eu
parent c7393a71
...@@ -195,7 +195,6 @@ config PPC ...@@ -195,7 +195,6 @@ config PPC
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_CBPF_JIT if !PPC64
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_CONTEXT_TRACKING if PPC64 select HAVE_CONTEXT_TRACKING if PPC64
......
...@@ -2,8 +2,4 @@ ...@@ -2,8 +2,4 @@
# #
# Arch-specific network modules # Arch-specific network modules
# #
ifdef CONFIG_PPC64
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
else
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
endif
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* bpf_jit32.h: BPF JIT compiler for PPC
*
* Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
*
* Split from bpf_jit.h
*/
#ifndef _BPF_JIT32_H
#define _BPF_JIT32_H
#include <asm/asm-compat.h>
#include "bpf_jit.h"
#ifdef CONFIG_PPC64
#define BPF_PPC_STACK_R3_OFF 48
#define BPF_PPC_STACK_LOCALS 32
#define BPF_PPC_STACK_BASIC (48+64)
#define BPF_PPC_STACK_SAVE (18*8)
#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
BPF_PPC_STACK_SAVE)
#define BPF_PPC_SLOWPATH_FRAME (48+64)
#else
#define BPF_PPC_STACK_R3_OFF 24
#define BPF_PPC_STACK_LOCALS 16
#define BPF_PPC_STACK_BASIC (24+32)
#define BPF_PPC_STACK_SAVE (18*4)
#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
BPF_PPC_STACK_SAVE)
#define BPF_PPC_SLOWPATH_FRAME (24+32)
#endif
#define REG_SZ (BITS_PER_LONG/8)
/*
* Generated code register usage:
*
* As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
*
* skb r3 (Entry parameter)
* A register r4
* X register r5
* addr param r6
* r7-r10 scratch
* skb->data r14
* skb headlen r15 (skb->len - skb->data_len)
* m[0] r16
* m[...] ...
* m[15] r31
*/
#define r_skb 3
#define r_ret 3
#define r_A 4
#define r_X 5
#define r_addr 6
#define r_scratch1 7
#define r_scratch2 8
#define r_D 14
#define r_HL 15
#define r_M 16
#ifndef __ASSEMBLY__
/*
* Assembly helpers from arch/powerpc/net/bpf_jit.S:
*/
#define DECLARE_LOAD_FUNC(func) \
extern u8 func[], func##_negative_offset[], func##_positive_offset[]
DECLARE_LOAD_FUNC(sk_load_word);
DECLARE_LOAD_FUNC(sk_load_half);
DECLARE_LOAD_FUNC(sk_load_byte);
DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LBZ(r, base, i)); \
else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
EMIT(PPC_RAW_LBZ(r, r, IMM_L(i))); } } while(0)
#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LD(r, base, i)); \
else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
EMIT(PPC_RAW_LD(r, r, IMM_L(i))); } } while(0)
#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LWZ(r, base, i)); \
else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
EMIT(PPC_RAW_LWZ(r, r, IMM_L(i))); } } while(0)
#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LHZ(r, base, i)); \
else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
EMIT(PPC_RAW_LHZ(r, r, IMM_L(i))); } } while(0)
#ifdef CONFIG_PPC64
#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
#else
#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC64
#define PPC_BPF_LOAD_CPU(r) \
do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2); \
PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
} while (0)
#else
#define PPC_BPF_LOAD_CPU(r) \
do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4); \
PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \
} while(0)
#endif
#else
#define PPC_BPF_LOAD_CPU(r) do { EMIT(PPC_RAW_LI(r, 0)); } while(0)
#endif
#define PPC_LHBRX_OFFS(r, base, i) \
do { PPC_LI32(r, i); EMIT(PPC_RAW_LHBRX(r, r, base)); } while(0)
#ifdef __LITTLE_ENDIAN__
#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
#else
#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
#endif
#define PPC_BPF_LL(r, base, i) do { EMIT(PPC_RAW_LWZ(r, base, i)); } while(0)
#define PPC_BPF_STL(r, base, i) do { EMIT(PPC_RAW_STW(r, base, i)); } while(0)
#define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STWU(r, base, i)); } while(0)
#define SEEN_DATAREF 0x10000 /* might call external helpers */
#define SEEN_XREG 0x20000 /* X reg is used */
#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
* storage */
#define SEEN_MEM_MSK 0x0ffff
struct codegen_context {
unsigned int seen;
unsigned int idx;
int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
};
#endif
#endif
/* SPDX-License-Identifier: GPL-2.0-only */
/* bpf_jit.S: Packet/header access helper functions
* for PPC64 BPF compiler.
*
* Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
*/
#include <asm/ppc_asm.h>
#include <asm/asm-compat.h>
#include "bpf_jit32.h"
/*
* All of these routines are called directly from generated code,
* whose register usage is:
*
* r3 skb
* r4,r5 A,X
* r6 *** address parameter to helper ***
* r7-r10 scratch
* r14 skb->data
* r15 skb headlen
* r16-31 M[]
*/
/*
* To consider: These helpers are so small it could be better to just
* generate them inline. Inline code can do the simple headlen check
* then branch directly to slow_path_XXX if required. (In fact, could
* load a spare GPR with the address of slow_path_generic and pass size
* as an argument, making the call site a mtlr, li and bllr.)
*/
.globl sk_load_word
sk_load_word:
PPC_LCMPI r_addr, 0
blt bpf_slow_path_word_neg
.globl sk_load_word_positive_offset
sk_load_word_positive_offset:
/* Are we accessing past headlen? */
subi r_scratch1, r_HL, 4
PPC_LCMP r_scratch1, r_addr
blt bpf_slow_path_word
/* Nope, just hitting the header. cr0 here is eq or gt! */
#ifdef __LITTLE_ENDIAN__
lwbrx r_A, r_D, r_addr
#else
lwzx r_A, r_D, r_addr
#endif
blr /* Return success, cr0 != LT */
.globl sk_load_half
sk_load_half:
PPC_LCMPI r_addr, 0
blt bpf_slow_path_half_neg
.globl sk_load_half_positive_offset
sk_load_half_positive_offset:
subi r_scratch1, r_HL, 2
PPC_LCMP r_scratch1, r_addr
blt bpf_slow_path_half
#ifdef __LITTLE_ENDIAN__
lhbrx r_A, r_D, r_addr
#else
lhzx r_A, r_D, r_addr
#endif
blr
.globl sk_load_byte
sk_load_byte:
PPC_LCMPI r_addr, 0
blt bpf_slow_path_byte_neg
.globl sk_load_byte_positive_offset
sk_load_byte_positive_offset:
PPC_LCMP r_HL, r_addr
ble bpf_slow_path_byte
lbzx r_A, r_D, r_addr
blr
/*
* BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
* r_addr is the offset value
*/
.globl sk_load_byte_msh
sk_load_byte_msh:
PPC_LCMPI r_addr, 0
blt bpf_slow_path_byte_msh_neg
.globl sk_load_byte_msh_positive_offset
sk_load_byte_msh_positive_offset:
PPC_LCMP r_HL, r_addr
ble bpf_slow_path_byte_msh
lbzx r_X, r_D, r_addr
rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr
/* Call out to skb_copy_bits:
* We'll need to back up our volatile regs first; we have
* local variable space at r1+(BPF_PPC_STACK_BASIC).
* Allocate a new stack frame here to remain ABI-compliant in
* stashing LR.
*/
#define bpf_slow_path_common(SIZE) \
mflr r0; \
PPC_STL r0, PPC_LR_STKOFF(r1); \
/* R3 goes in parameter space of caller's frame */ \
PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
/* R3 = r_skb, as passed */ \
mr r4, r_addr; \
li r6, SIZE; \
bl skb_copy_bits; \
nop; \
/* R3 = 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
PPC_LL r0, PPC_LR_STKOFF(r1); \
PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
mtlr r0; \
PPC_LCMPI r3, 0; \
blt bpf_error; /* cr0 = LT */ \
PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
/* Great success! */
bpf_slow_path_word:
bpf_slow_path_common(4)
/* Data value is on stack, and cr0 != LT */
lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
blr
bpf_slow_path_half:
bpf_slow_path_common(2)
lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
blr
bpf_slow_path_byte:
bpf_slow_path_common(1)
lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
blr
bpf_slow_path_byte_msh:
bpf_slow_path_common(1)
lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr
/* Call out to bpf_internal_load_pointer_neg_helper:
* We'll need to back up our volatile regs first; we have
* local variable space at r1+(BPF_PPC_STACK_BASIC).
* Allocate a new stack frame here to remain ABI-compliant in
* stashing LR.
*/
#define sk_negative_common(SIZE) \
mflr r0; \
PPC_STL r0, PPC_LR_STKOFF(r1); \
/* R3 goes in parameter space of caller's frame */ \
PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
/* R3 = r_skb, as passed */ \
mr r4, r_addr; \
li r5, SIZE; \
bl bpf_internal_load_pointer_neg_helper; \
nop; \
/* R3 != 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
PPC_LL r0, PPC_LR_STKOFF(r1); \
PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
mtlr r0; \
PPC_LCMPLI r3, 0; \
beq bpf_error_slow; /* cr0 = EQ */ \
mr r_addr, r3; \
PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
/* Great success! */
bpf_slow_path_word_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_word_negative_offset
sk_load_word_negative_offset:
sk_negative_common(4)
lwz r_A, 0(r_addr)
blr
bpf_slow_path_half_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_half_negative_offset
sk_load_half_negative_offset:
sk_negative_common(2)
lhz r_A, 0(r_addr)
blr
bpf_slow_path_byte_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_negative_offset
sk_load_byte_negative_offset:
sk_negative_common(1)
lbz r_A, 0(r_addr)
blr
bpf_slow_path_byte_msh_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_msh_negative_offset
sk_load_byte_msh_negative_offset:
sk_negative_common(1)
lbz r_X, 0(r_addr)
rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr
bpf_error_slow:
/* fabricate a cr0 = lt */
li r_scratch1, -1
PPC_LCMPI r_scratch1, 0
bpf_error:
/* Entered with cr0 = lt */
li r3, 0
/* Generated code will 'blt epilogue', returning 0. */
blr
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment