Commit b0d3df48 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'PROG_TEST_RUN support for sk_lookup programs'

Lorenz Bauer says:

====================

We don't have PROG_TEST_RUN support for sk_lookup programs at the
moment. So far this hasn't been a problem, since we can run our
tests in a separate network namespace. For benchmarking it's nice
to have PROG_TEST_RUN, so I've gone and implemented it.

Based on discussion on the v1 I've dropped support for testing multiple
programs at once.

Changes since v3:
- Use bpf_test_timer prefix (Andrii)

Changes since v2:
- Fix test_verifier failure (Alexei)

Changes since v1:
- Add sparse annotations to the t_* functions
- Add appropriate type casts in bpf_prog_test_run_sk_lookup
- Drop running multiple programs
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 2374e0f1 b4f89463
......@@ -1491,6 +1491,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
......@@ -1692,6 +1695,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
return -ENOTSUPP;
}
static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
return -ENOTSUPP;
}
static inline void bpf_map_put(struct bpf_map *map)
{
}
......
......@@ -5953,7 +5953,10 @@ struct bpf_pidns_info {
/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
struct bpf_sk_lookup {
union {
__bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
__u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
};
__u32 family; /* Protocol family (AF_INET, AF_INET6) */
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
......
......@@ -10,20 +10,86 @@
#include <net/bpf_sk_storage.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/net_namespace.h>
#include <linux/error-injection.h>
#include <linux/smp.h>
#include <linux/sock_diag.h>
#define CREATE_TRACE_POINTS
#include <trace/events/bpf_test_run.h>
struct bpf_test_timer {
enum { NO_PREEMPT, NO_MIGRATE } mode;
u32 i;
u64 time_start, time_spent;
};
static void bpf_test_timer_enter(struct bpf_test_timer *t)
__acquires(rcu)
{
rcu_read_lock();
if (t->mode == NO_PREEMPT)
preempt_disable();
else
migrate_disable();
t->time_start = ktime_get_ns();
}
static void bpf_test_timer_leave(struct bpf_test_timer *t)
__releases(rcu)
{
t->time_start = 0;
if (t->mode == NO_PREEMPT)
preempt_enable();
else
migrate_enable();
rcu_read_unlock();
}
static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
__must_hold(rcu)
{
t->i++;
if (t->i >= repeat) {
/* We're done. */
t->time_spent += ktime_get_ns() - t->time_start;
do_div(t->time_spent, t->i);
*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
*err = 0;
goto reset;
}
if (signal_pending(current)) {
/* During iteration: we've been cancelled, abort. */
*err = -EINTR;
goto reset;
}
if (need_resched()) {
/* During iteration: we need to reschedule between runs. */
t->time_spent += ktime_get_ns() - t->time_start;
bpf_test_timer_leave(t);
cond_resched();
bpf_test_timer_enter(t);
}
/* Do another round. */
return true;
reset:
t->i = 0;
return false;
}
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
u32 *retval, u32 *time, bool xdp)
{
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
struct bpf_test_timer t = { NO_MIGRATE };
enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0;
int ret = 0;
u32 i;
int ret;
for_each_cgroup_storage_type(stype) {
storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
......@@ -38,40 +104,16 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
if (!repeat)
repeat = 1;
rcu_read_lock();
migrate_disable();
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
bpf_test_timer_enter(&t);
do {
bpf_cgroup_storage_set(storage);
if (xdp)
*retval = bpf_prog_run_xdp(prog, ctx);
else
*retval = BPF_PROG_RUN(prog, ctx);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
if (need_resched()) {
time_spent += ktime_get_ns() - time_start;
migrate_enable();
rcu_read_unlock();
cond_resched();
rcu_read_lock();
migrate_disable();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
migrate_enable();
rcu_read_unlock();
do_div(time_spent, repeat);
*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
} while (bpf_test_timer_continue(&t, repeat, &ret, time));
bpf_test_timer_leave(&t);
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
......@@ -674,18 +716,17 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
struct bpf_test_timer t = { NO_PREEMPT };
u32 size = kattr->test.data_size_in;
struct bpf_flow_dissector ctx = {};
u32 repeat = kattr->test.repeat;
struct bpf_flow_keys *user_ctx;
struct bpf_flow_keys flow_keys;
u64 time_start, time_spent = 0;
const struct ethhdr *eth;
unsigned int flags = 0;
u32 retval, duration;
void *data;
int ret;
u32 i;
if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
return -EINVAL;
......@@ -721,48 +762,127 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
ctx.data = data;
ctx.data_end = (__u8 *)data + size;
rcu_read_lock();
preempt_disable();
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
bpf_test_timer_enter(&t);
do {
retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
size, flags);
} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
bpf_test_timer_leave(&t);
if (signal_pending(current)) {
preempt_enable();
rcu_read_unlock();
if (ret < 0)
goto out;
ret = -EINTR;
ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
retval, duration);
if (!ret)
ret = bpf_ctx_finish(kattr, uattr, user_ctx,
sizeof(struct bpf_flow_keys));
out:
kfree(user_ctx);
kfree(data);
return ret;
}
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
struct bpf_test_timer t = { NO_PREEMPT };
struct bpf_prog_array *progs = NULL;
struct bpf_sk_lookup_kern ctx = {};
u32 repeat = kattr->test.repeat;
struct bpf_sk_lookup *user_ctx;
u32 retval, duration;
int ret = -EINVAL;
if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
return -EINVAL;
if (kattr->test.flags || kattr->test.cpu)
return -EINVAL;
if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
kattr->test.data_size_out)
return -EINVAL;
if (!repeat)
repeat = 1;
user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
if (IS_ERR(user_ctx))
return PTR_ERR(user_ctx);
if (!user_ctx)
return -EINVAL;
if (user_ctx->sk)
goto out;
if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
goto out;
if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
ret = -ERANGE;
goto out;
}
if (need_resched()) {
time_spent += ktime_get_ns() - time_start;
preempt_enable();
rcu_read_unlock();
ctx.family = (u16)user_ctx->family;
ctx.protocol = (u16)user_ctx->protocol;
ctx.dport = (u16)user_ctx->local_port;
ctx.sport = (__force __be16)user_ctx->remote_port;
cond_resched();
switch (ctx.family) {
case AF_INET:
ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
break;
rcu_read_lock();
preempt_disable();
time_start = ktime_get_ns();
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
break;
#endif
default:
ret = -EAFNOSUPPORT;
goto out;
}
progs = bpf_prog_array_alloc(1, GFP_KERNEL);
if (!progs) {
ret = -ENOMEM;
goto out;
}
time_spent += ktime_get_ns() - time_start;
preempt_enable();
rcu_read_unlock();
do_div(time_spent, repeat);
duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
progs->items[0].prog = prog;
ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
retval, duration);
bpf_test_timer_enter(&t);
do {
ctx.selected_sk = NULL;
retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
bpf_test_timer_leave(&t);
if (ret < 0)
goto out;
user_ctx->cookie = 0;
if (ctx.selected_sk) {
if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
ret = -EOPNOTSUPP;
goto out;
}
user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
}
ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
if (!ret)
ret = bpf_ctx_finish(kattr, uattr, user_ctx,
sizeof(struct bpf_flow_keys));
ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
out:
bpf_prog_array_free(progs);
kfree(user_ctx);
kfree(data);
return ret;
}
......@@ -10457,6 +10457,7 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
}
const struct bpf_prog_ops sk_lookup_prog_ops = {
.test_run = bpf_prog_test_run_sk_lookup,
};
const struct bpf_verifier_ops sk_lookup_verifier_ops = {
......
......@@ -5953,7 +5953,10 @@ struct bpf_pidns_info {
/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
struct bpf_sk_lookup {
union {
__bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
__u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
};
__u32 family; /* Protocol family (AF_INET, AF_INET6) */
__u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
......
......@@ -2,12 +2,31 @@
#include <test_progs.h>
#include <network_helpers.h>
void test_prog_run_xattr(void)
#include "test_pkt_access.skel.h"
static const __u32 duration;
static void check_run_cnt(int prog_fd, __u64 run_cnt)
{
const char *file = "./test_pkt_access.o";
struct bpf_object *obj;
char buf[10];
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int err;
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd))
return;
CHECK(run_cnt != info.run_cnt, "run_cnt",
"incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt);
}
void test_prog_run_xattr(void)
{
struct test_pkt_access *skel;
int err, stats_fd = -1;
char buf[10] = {};
__u64 run_cnt = 0;
struct bpf_prog_test_run_attr tattr = {
.repeat = 1,
.data_in = &pkt_v4,
......@@ -16,12 +35,15 @@ void test_prog_run_xattr(void)
.data_size_out = 5,
};
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
&tattr.prog_fd);
if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
if (CHECK_ATTR(stats_fd < 0, "enable_stats", "failed %d\n", errno))
return;
memset(buf, 0, sizeof(buf));
skel = test_pkt_access__open_and_load();
if (CHECK_ATTR(!skel, "open_and_load", "failed\n"))
goto cleanup;
tattr.prog_fd = bpf_program__fd(skel->progs.test_pkt_access);
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
......@@ -34,8 +56,12 @@ void test_prog_run_xattr(void)
CHECK_ATTR(buf[5] != 0, "overflow",
"BPF_PROG_TEST_RUN ignored size hint\n");
run_cnt += tattr.repeat;
check_run_cnt(tattr.prog_fd, run_cnt);
tattr.data_out = NULL;
tattr.data_size_out = 0;
tattr.repeat = 2;
errno = 0;
err = bpf_prog_test_run_xattr(&tattr);
......@@ -46,5 +72,12 @@ void test_prog_run_xattr(void)
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
bpf_object__close(obj);
run_cnt += tattr.repeat;
check_run_cnt(tattr.prog_fd, run_cnt);
cleanup:
if (skel)
test_pkt_access__destroy(skel);
if (stats_fd != -1)
close(stats_fd);
}
......@@ -241,6 +241,48 @@ static int make_client(int sotype, const char *ip, int port)
return -1;
}
static __u64 socket_cookie(int fd)
{
__u64 cookie;
socklen_t cookie_len = sizeof(cookie);
if (CHECK(getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len) < 0,
"getsockopt(SO_COOKIE)", "%s\n", strerror(errno)))
return 0;
return cookie;
}
static int fill_sk_lookup_ctx(struct bpf_sk_lookup *ctx, const char *local_ip, __u16 local_port,
const char *remote_ip, __u16 remote_port)
{
void *local, *remote;
int err;
memset(ctx, 0, sizeof(*ctx));
ctx->local_port = local_port;
ctx->remote_port = htons(remote_port);
if (is_ipv6(local_ip)) {
ctx->family = AF_INET6;
local = &ctx->local_ip6[0];
remote = &ctx->remote_ip6[0];
} else {
ctx->family = AF_INET;
local = &ctx->local_ip4;
remote = &ctx->remote_ip4;
}
err = inet_pton(ctx->family, local_ip, local);
if (CHECK(err != 1, "inet_pton", "local_ip failed\n"))
return 1;
err = inet_pton(ctx->family, remote_ip, remote);
if (CHECK(err != 1, "inet_pton", "remote_ip failed\n"))
return 1;
return 0;
}
static int send_byte(int fd)
{
ssize_t n;
......@@ -1009,18 +1051,27 @@ static void test_drop_on_reuseport(struct test_sk_lookup *skel)
static void run_sk_assign(struct test_sk_lookup *skel,
struct bpf_program *lookup_prog,
const char *listen_ip, const char *connect_ip)
const char *remote_ip, const char *local_ip)
{
int client_fd, peer_fd, server_fds[MAX_SERVERS] = { -1 };
struct bpf_link *lookup_link;
int server_fds[MAX_SERVERS] = { -1 };
struct bpf_sk_lookup ctx;
__u64 server_cookie;
int i, err;
lookup_link = attach_lookup_prog(lookup_prog);
if (!lookup_link)
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.ctx_in = &ctx,
.ctx_size_in = sizeof(ctx),
.ctx_out = &ctx,
.ctx_size_out = sizeof(ctx),
);
if (fill_sk_lookup_ctx(&ctx, local_ip, EXT_PORT, remote_ip, INT_PORT))
return;
ctx.protocol = IPPROTO_TCP;
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
server_fds[i] = make_server(SOCK_STREAM, listen_ip, 0, NULL);
server_fds[i] = make_server(SOCK_STREAM, local_ip, 0, NULL);
if (server_fds[i] < 0)
goto close_servers;
......@@ -1030,23 +1081,25 @@ static void run_sk_assign(struct test_sk_lookup *skel,
goto close_servers;
}
client_fd = make_client(SOCK_STREAM, connect_ip, EXT_PORT);
if (client_fd < 0)
server_cookie = socket_cookie(server_fds[SERVER_B]);
if (!server_cookie)
return;
err = bpf_prog_test_run_opts(bpf_program__fd(lookup_prog), &opts);
if (CHECK(err, "test_run", "failed with error %d\n", errno))
goto close_servers;
if (CHECK(ctx.cookie == 0, "ctx.cookie", "no socket selected\n"))
goto close_servers;
peer_fd = accept(server_fds[SERVER_B], NULL, NULL);
if (CHECK(peer_fd < 0, "accept", "failed\n"))
goto close_client;
CHECK(ctx.cookie != server_cookie, "ctx.cookie",
"selected sk %llu instead of %llu\n", ctx.cookie, server_cookie);
close(peer_fd);
close_client:
close(client_fd);
close_servers:
for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
if (server_fds[i] != -1)
close(server_fds[i]);
}
bpf_link__destroy(lookup_link);
}
static void run_sk_assign_v4(struct test_sk_lookup *skel,
......
......@@ -64,6 +64,10 @@ static const int PROG_DONE = 1;
static const __u32 KEY_SERVER_A = SERVER_A;
static const __u32 KEY_SERVER_B = SERVER_B;
static const __u16 SRC_PORT = bpf_htons(8008);
static const __u32 SRC_IP4 = IP4(127, 0, 0, 2);
static const __u32 SRC_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000002);
static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
......@@ -398,11 +402,12 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
return SK_DROP;
/* Narrow loads from remote_port field. Expect non-0 value. */
if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 &&
LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0)
/* Narrow loads from remote_port field. Expect SRC_PORT. */
if (LSB(ctx->remote_port, 0) != ((SRC_PORT >> 0) & 0xff) ||
LSB(ctx->remote_port, 1) != ((SRC_PORT >> 8) & 0xff) ||
LSB(ctx->remote_port, 2) != 0 || LSB(ctx->remote_port, 3) != 0)
return SK_DROP;
if (LSW(ctx->remote_port, 0) == 0)
if (LSW(ctx->remote_port, 0) != SRC_PORT)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
......@@ -415,11 +420,14 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv4 fields */
if (v4) {
/* Expect non-0.0.0.0 in remote_ip4 */
if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 &&
LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0)
/* Expect SRC_IP4 in remote_ip4 */
if (LSB(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xff) ||
LSB(ctx->remote_ip4, 1) != ((SRC_IP4 >> 8) & 0xff) ||
LSB(ctx->remote_ip4, 2) != ((SRC_IP4 >> 16) & 0xff) ||
LSB(ctx->remote_ip4, 3) != ((SRC_IP4 >> 24) & 0xff))
return SK_DROP;
if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0)
if (LSW(ctx->remote_ip4, 0) != ((SRC_IP4 >> 0) & 0xffff) ||
LSW(ctx->remote_ip4, 1) != ((SRC_IP4 >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP4 in local_ip4 */
......@@ -448,20 +456,32 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
/* Narrow loads from IPv6 fields */
if (!v4) {
/* Expect non-:: IP in remote_ip6 */
if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 &&
LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 &&
LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 &&
LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 &&
LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 &&
LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 &&
LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 &&
LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0)
/* Expect SRC_IP6 in remote_ip6 */
if (LSB(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[0], 2) != ((SRC_IP6[0] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[0], 3) != ((SRC_IP6[0] >> 24) & 0xff) ||
LSB(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[1], 2) != ((SRC_IP6[1] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[1], 3) != ((SRC_IP6[1] >> 24) & 0xff) ||
LSB(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[2], 2) != ((SRC_IP6[2] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[2], 3) != ((SRC_IP6[2] >> 24) & 0xff) ||
LSB(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xff) ||
LSB(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 8) & 0xff) ||
LSB(ctx->remote_ip6[3], 2) != ((SRC_IP6[3] >> 16) & 0xff) ||
LSB(ctx->remote_ip6[3], 3) != ((SRC_IP6[3] >> 24) & 0xff))
return SK_DROP;
if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 &&
LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 &&
LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 &&
LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0)
if (LSW(ctx->remote_ip6[0], 0) != ((SRC_IP6[0] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[0], 1) != ((SRC_IP6[0] >> 16) & 0xffff) ||
LSW(ctx->remote_ip6[1], 0) != ((SRC_IP6[1] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[1], 1) != ((SRC_IP6[1] >> 16) & 0xffff) ||
LSW(ctx->remote_ip6[2], 0) != ((SRC_IP6[2] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[2], 1) != ((SRC_IP6[2] >> 16) & 0xffff) ||
LSW(ctx->remote_ip6[3], 0) != ((SRC_IP6[3] >> 0) & 0xffff) ||
LSW(ctx->remote_ip6[3], 1) != ((SRC_IP6[3] >> 16) & 0xffff))
return SK_DROP;
/* Expect DST_IP6 in local_ip6 */
if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
......
......@@ -105,7 +105,7 @@ struct bpf_test {
enum bpf_prog_type prog_type;
uint8_t flags;
void (*fill_helper)(struct bpf_test *self);
uint8_t runs;
int runs;
#define bpf_testdata_struct_t \
struct { \
uint32_t retval, retval_unpriv; \
......@@ -1165,7 +1165,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
run_errs = 0;
run_successes = 0;
if (!alignment_prevented_execution && fd_prog >= 0) {
if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
uint32_t expected_val;
int i;
......
......@@ -239,6 +239,7 @@
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.runs = -1,
},
/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment