Commit ad2f8eb0 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov

bpf: selftests: Tcp header options

This patch adds tests for the new bpf tcp header option feature.

test_tcp_hdr_options.c:
- It tests header option writing and parsing in 3WHS: regular
  connection establishment, fastopen, and syncookie.
- In syncookie, the passive side's bpf prog is asking the active side
  to resend its bpf header option by specifying a RESEND bit in the
  outgoing SYNACK. handle_active_estab() and write_nodata_opt() has
  some details.
- handle_passive_estab() has comments on fastopen.
- It also has test for header writing and parsing in FIN packet.
- Most of the tests is writing an experimental option 254 with magic 0xeB9F.
- The no_exprm_estab() also tests writing a regular TCP option
  without any magic.

test_misc_tcp_options.c:
- It is an one directional test.  Active side writes option and
  passive side parses option.  The focus is to exercise
  the new helpers and API.
- Testing the new helper: bpf_load_hdr_opt() and bpf_store_hdr_opt().
- Testing the bpf_getsockopt(TCP_BPF_SYN).
- Negative tests for the above helpers.
- Testing the sock_ops->skb_data.
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200820190117.2886749-1-kafai@fb.com
parent 8085e1dc
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <linux/compiler.h>
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "test_tcp_hdr_options.h"
#include "test_tcp_hdr_options.skel.h"
#include "test_misc_tcp_hdr_options.skel.h"
#define LO_ADDR6 "::eB9F"
#define CG_NAME "/tcpbpf-hdr-opt-test"
struct bpf_test_option exp_passive_estab_in;
struct bpf_test_option exp_active_estab_in;
struct bpf_test_option exp_passive_fin_in;
struct bpf_test_option exp_active_fin_in;
struct hdr_stg exp_passive_hdr_stg;
struct hdr_stg exp_active_hdr_stg = { .active = true, };
static struct test_misc_tcp_hdr_options *misc_skel;
static struct test_tcp_hdr_options *skel;
static int lport_linum_map_fd;
static int hdr_stg_map_fd;
static __u32 duration;
static int cg_fd;
struct sk_fds {
int srv_fd;
int passive_fd;
int active_fd;
int passive_lport;
int active_lport;
};
static int add_lo_addr(void)
{
char ip_addr_cmd[256];
int cmdlen;
cmdlen = snprintf(ip_addr_cmd, sizeof(ip_addr_cmd),
"ip -6 addr add %s/128 dev lo scope host",
LO_ADDR6);
if (CHECK(cmdlen >= sizeof(ip_addr_cmd), "compile ip cmd",
"failed to add host addr %s to lo. ip cmdlen is too long\n",
LO_ADDR6))
return -1;
if (CHECK(system(ip_addr_cmd), "run ip cmd",
"failed to add host addr %s to lo\n", LO_ADDR6))
return -1;
return 0;
}
static int create_netns(void)
{
if (CHECK(unshare(CLONE_NEWNET), "create netns",
"unshare(CLONE_NEWNET): %s (%d)",
strerror(errno), errno))
return -1;
if (CHECK(system("ip link set dev lo up"), "run ip cmd",
"failed to bring lo link up\n"))
return -1;
if (add_lo_addr())
return -1;
return 0;
}
static int write_sysctl(const char *sysctl, const char *value)
{
int fd, err, len;
fd = open(sysctl, O_WRONLY);
if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n",
sysctl, strerror(errno), errno))
return -1;
len = strlen(value);
err = write(fd, value, len);
close(fd);
if (CHECK(err != len, "write sysctl",
"write(%s, %s): err:%d %s (%d)\n",
sysctl, value, err, strerror(errno), errno))
return -1;
return 0;
}
static void print_hdr_stg(const struct hdr_stg *hdr_stg, const char *prefix)
{
fprintf(stderr, "%s{active:%u, resend_syn:%u, syncookie:%u, fastopen:%u}\n",
prefix ? : "", hdr_stg->active, hdr_stg->resend_syn,
hdr_stg->syncookie, hdr_stg->fastopen);
}
static void print_option(const struct bpf_test_option *opt, const char *prefix)
{
fprintf(stderr, "%s{flags:0x%x, max_delack_ms:%u, rand:0x%x}\n",
prefix ? : "", opt->flags, opt->max_delack_ms, opt->rand);
}
static void sk_fds_close(struct sk_fds *sk_fds)
{
close(sk_fds->srv_fd);
close(sk_fds->passive_fd);
close(sk_fds->active_fd);
}
static int sk_fds_shutdown(struct sk_fds *sk_fds)
{
int ret, abyte;
shutdown(sk_fds->active_fd, SHUT_WR);
ret = read(sk_fds->passive_fd, &abyte, sizeof(abyte));
if (CHECK(ret != 0, "read-after-shutdown(passive_fd):",
"ret:%d %s (%d)\n",
ret, strerror(errno), errno))
return -1;
shutdown(sk_fds->passive_fd, SHUT_WR);
ret = read(sk_fds->active_fd, &abyte, sizeof(abyte));
if (CHECK(ret != 0, "read-after-shutdown(active_fd):",
"ret:%d %s (%d)\n",
ret, strerror(errno), errno))
return -1;
return 0;
}
static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open)
{
const char fast[] = "FAST!!!";
struct sockaddr_in6 addr6;
socklen_t len;
sk_fds->srv_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
if (CHECK(sk_fds->srv_fd == -1, "start_server", "%s (%d)\n",
strerror(errno), errno))
goto error;
if (fast_open)
sk_fds->active_fd = fastopen_connect(sk_fds->srv_fd, fast,
sizeof(fast), 0);
else
sk_fds->active_fd = connect_to_fd(sk_fds->srv_fd, 0);
if (CHECK_FAIL(sk_fds->active_fd == -1)) {
close(sk_fds->srv_fd);
goto error;
}
len = sizeof(addr6);
if (CHECK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6,
&len), "getsockname(srv_fd)", "%s (%d)\n",
strerror(errno), errno))
goto error_close;
sk_fds->passive_lport = ntohs(addr6.sin6_port);
len = sizeof(addr6);
if (CHECK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6,
&len), "getsockname(active_fd)", "%s (%d)\n",
strerror(errno), errno))
goto error_close;
sk_fds->active_lport = ntohs(addr6.sin6_port);
sk_fds->passive_fd = accept(sk_fds->srv_fd, NULL, 0);
if (CHECK(sk_fds->passive_fd == -1, "accept(srv_fd)", "%s (%d)\n",
strerror(errno), errno))
goto error_close;
if (fast_open) {
char bytes_in[sizeof(fast)];
int ret;
ret = read(sk_fds->passive_fd, bytes_in, sizeof(bytes_in));
if (CHECK(ret != sizeof(fast), "read fastopen syn data",
"expected=%lu actual=%d\n", sizeof(fast), ret)) {
close(sk_fds->passive_fd);
goto error_close;
}
}
return 0;
error_close:
close(sk_fds->active_fd);
close(sk_fds->srv_fd);
error:
memset(sk_fds, -1, sizeof(*sk_fds));
return -1;
}
static int check_hdr_opt(const struct bpf_test_option *exp,
const struct bpf_test_option *act,
const char *hdr_desc)
{
if (CHECK(memcmp(exp, act, sizeof(*exp)),
"expected-vs-actual", "unexpected %s\n", hdr_desc)) {
print_option(exp, "expected: ");
print_option(act, " actual: ");
return -1;
}
return 0;
}
static int check_hdr_stg(const struct hdr_stg *exp, int fd,
const char *stg_desc)
{
struct hdr_stg act;
if (CHECK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act),
"map_lookup(hdr_stg_map_fd)", "%s %s (%d)\n",
stg_desc, strerror(errno), errno))
return -1;
if (CHECK(memcmp(exp, &act, sizeof(*exp)),
"expected-vs-actual", "unexpected %s\n", stg_desc)) {
print_hdr_stg(exp, "expected: ");
print_hdr_stg(&act, " actual: ");
return -1;
}
return 0;
}
static int check_error_linum(const struct sk_fds *sk_fds)
{
unsigned int nr_errors = 0;
struct linum_err linum_err;
int lport;
lport = sk_fds->passive_lport;
if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
fprintf(stderr,
"bpf prog error out at lport:passive(%d), linum:%u err:%d\n",
lport, linum_err.linum, linum_err.err);
nr_errors++;
}
lport = sk_fds->active_lport;
if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
fprintf(stderr,
"bpf prog error out at lport:active(%d), linum:%u err:%d\n",
lport, linum_err.linum, linum_err.err);
nr_errors++;
}
return nr_errors;
}
static void check_hdr_and_close_fds(struct sk_fds *sk_fds)
{
if (sk_fds_shutdown(sk_fds))
goto check_linum;
if (check_hdr_stg(&exp_passive_hdr_stg, sk_fds->passive_fd,
"passive_hdr_stg"))
goto check_linum;
if (check_hdr_stg(&exp_active_hdr_stg, sk_fds->active_fd,
"active_hdr_stg"))
goto check_linum;
if (check_hdr_opt(&exp_passive_estab_in, &skel->bss->passive_estab_in,
"passive_estab_in"))
goto check_linum;
if (check_hdr_opt(&exp_active_estab_in, &skel->bss->active_estab_in,
"active_estab_in"))
goto check_linum;
if (check_hdr_opt(&exp_passive_fin_in, &skel->bss->passive_fin_in,
"passive_fin_in"))
goto check_linum;
check_hdr_opt(&exp_active_fin_in, &skel->bss->active_fin_in,
"active_fin_in");
check_linum:
CHECK_FAIL(check_error_linum(sk_fds));
sk_fds_close(sk_fds);
}
static void prepare_out(void)
{
skel->bss->active_syn_out = exp_passive_estab_in;
skel->bss->passive_synack_out = exp_active_estab_in;
skel->bss->active_fin_out = exp_passive_fin_in;
skel->bss->passive_fin_out = exp_active_fin_in;
}
static void reset_test(void)
{
size_t optsize = sizeof(struct bpf_test_option);
int lport, err;
memset(&skel->bss->passive_synack_out, 0, optsize);
memset(&skel->bss->passive_fin_out, 0, optsize);
memset(&skel->bss->passive_estab_in, 0, optsize);
memset(&skel->bss->passive_fin_in, 0, optsize);
memset(&skel->bss->active_syn_out, 0, optsize);
memset(&skel->bss->active_fin_out, 0, optsize);
memset(&skel->bss->active_estab_in, 0, optsize);
memset(&skel->bss->active_fin_in, 0, optsize);
skel->data->test_kind = TCPOPT_EXP;
skel->data->test_magic = 0xeB9F;
memset(&exp_passive_estab_in, 0, optsize);
memset(&exp_active_estab_in, 0, optsize);
memset(&exp_passive_fin_in, 0, optsize);
memset(&exp_active_fin_in, 0, optsize);
memset(&exp_passive_hdr_stg, 0, sizeof(exp_passive_hdr_stg));
memset(&exp_active_hdr_stg, 0, sizeof(exp_active_hdr_stg));
exp_active_hdr_stg.active = true;
err = bpf_map_get_next_key(lport_linum_map_fd, NULL, &lport);
while (!err) {
bpf_map_delete_elem(lport_linum_map_fd, &lport);
err = bpf_map_get_next_key(lport_linum_map_fd, &lport, &lport);
}
}
static void fastopen_estab(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
exp_passive_hdr_stg.fastopen = true;
prepare_out();
/* Allow fastopen without fastopen cookie */
if (write_sysctl("/proc/sys/net/ipv4/tcp_fastopen", "1543"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, true)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void syncookie_estab(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS |
OPTION_F_RESEND;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
exp_passive_hdr_stg.syncookie = true;
exp_active_hdr_stg.resend_syn = true,
prepare_out();
/* Clear the RESEND to ensure the bpf prog can learn
* want_cookie and set the RESEND by itself.
*/
skel->bss->passive_synack_out.flags &= ~OPTION_F_RESEND;
/* Enforce syncookie mode */
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void fin(void)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_fin_in.flags = OPTION_F_RAND;
exp_passive_fin_in.rand = 0xfa;
exp_active_fin_in.flags = OPTION_F_RAND;
exp_active_fin_in.rand = 0xce;
prepare_out();
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void __simple_estab(bool exprm)
{
struct bpf_link *link;
struct sk_fds sk_fds;
hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_passive_estab_in.rand = 0xfa;
exp_passive_estab_in.max_delack_ms = 11;
exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
exp_active_estab_in.rand = 0xce;
exp_active_estab_in.max_delack_ms = 22;
prepare_out();
if (!exprm) {
skel->data->test_kind = 0xB9;
skel->data->test_magic = 0;
}
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
check_hdr_and_close_fds(&sk_fds);
bpf_link__destroy(link);
}
static void no_exprm_estab(void)
{
__simple_estab(false);
}
static void simple_estab(void)
{
__simple_estab(true);
}
static void misc(void)
{
const char send_msg[] = "MISC!!!";
char recv_msg[sizeof(send_msg)];
const unsigned int nr_data = 2;
struct bpf_link *link;
struct sk_fds sk_fds;
int i, ret;
lport_linum_map_fd = bpf_map__fd(misc_skel->maps.lport_linum_map);
if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
return;
link = bpf_program__attach_cgroup(misc_skel->progs.misc_estab, cg_fd);
if (CHECK(IS_ERR(link), "attach_cgroup(misc_estab)", "err: %ld\n",
PTR_ERR(link)))
return;
if (sk_fds_connect(&sk_fds, false)) {
bpf_link__destroy(link);
return;
}
for (i = 0; i < nr_data; i++) {
/* MSG_EOR to ensure skb will not be combined */
ret = send(sk_fds.active_fd, send_msg, sizeof(send_msg),
MSG_EOR);
if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n",
ret))
goto check_linum;
ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
if (CHECK(ret != sizeof(send_msg), "read(msg)", "ret:%d\n",
ret))
goto check_linum;
}
if (sk_fds_shutdown(&sk_fds))
goto check_linum;
CHECK(misc_skel->bss->nr_syn != 1, "unexpected nr_syn",
"expected (1) != actual (%u)\n",
misc_skel->bss->nr_syn);
CHECK(misc_skel->bss->nr_data != nr_data, "unexpected nr_data",
"expected (%u) != actual (%u)\n",
nr_data, misc_skel->bss->nr_data);
/* The last ACK may have been delayed, so it is either 1 or 2. */
CHECK(misc_skel->bss->nr_pure_ack != 1 &&
misc_skel->bss->nr_pure_ack != 2,
"unexpected nr_pure_ack",
"expected (1 or 2) != actual (%u)\n",
misc_skel->bss->nr_pure_ack);
CHECK(misc_skel->bss->nr_fin != 1, "unexpected nr_fin",
"expected (1) != actual (%u)\n",
misc_skel->bss->nr_fin);
check_linum:
CHECK_FAIL(check_error_linum(&sk_fds));
sk_fds_close(&sk_fds);
bpf_link__destroy(link);
}
struct test {
const char *desc;
void (*run)(void);
};
#define DEF_TEST(name) { #name, name }
static struct test tests[] = {
DEF_TEST(simple_estab),
DEF_TEST(no_exprm_estab),
DEF_TEST(syncookie_estab),
DEF_TEST(fastopen_estab),
DEF_TEST(fin),
DEF_TEST(misc),
};
void test_tcp_hdr_options(void)
{
int i;
skel = test_tcp_hdr_options__open_and_load();
if (CHECK(!skel, "open and load skel", "failed"))
return;
misc_skel = test_misc_tcp_hdr_options__open_and_load();
if (CHECK(!misc_skel, "open and load misc test skel", "failed"))
goto skel_destroy;
cg_fd = test__join_cgroup(CG_NAME);
if (CHECK_FAIL(cg_fd < 0))
goto skel_destroy;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!test__start_subtest(tests[i].desc))
continue;
if (create_netns())
break;
tests[i].run();
reset_test();
}
close(cg_fd);
skel_destroy:
test_misc_tcp_hdr_options__destroy(misc_skel);
test_tcp_hdr_options__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/socket.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
__u16 last_addr16_n = __bpf_htons(0xeB9F);
__u16 active_lport_n = 0;
__u16 active_lport_h = 0;
__u16 passive_lport_n = 0;
__u16 passive_lport_h = 0;
/* options received at passive side */
unsigned int nr_pure_ack = 0;
unsigned int nr_data = 0;
unsigned int nr_syn = 0;
unsigned int nr_fin = 0;
/* Check the header received from the active side */
static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
{
union {
struct tcphdr th;
struct ipv6hdr ip6;
struct tcp_exprm_opt exprm_opt;
struct tcp_opt reg_opt;
__u8 data[100]; /* IPv6 (40) + Max TCP hdr (60) */
} hdr = {};
__u64 load_flags = check_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
struct tcphdr *pth;
int ret;
hdr.reg_opt.kind = 0xB9;
/* The option is 4 bytes long instead of 2 bytes */
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, 2, load_flags);
if (ret != -ENOSPC)
RET_CG_ERR(ret);
/* Test searching magic with regular kind */
hdr.reg_opt.len = 4;
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
load_flags);
if (ret != -EINVAL)
RET_CG_ERR(ret);
hdr.reg_opt.len = 0;
ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
load_flags);
if (ret != 4 || hdr.reg_opt.len != 4 || hdr.reg_opt.kind != 0xB9 ||
hdr.reg_opt.data[0] != 0xfa || hdr.reg_opt.data[1] != 0xce)
RET_CG_ERR(ret);
/* Test searching experimental option with invalid kind length */
hdr.exprm_opt.kind = TCPOPT_EXP;
hdr.exprm_opt.len = 5;
hdr.exprm_opt.magic = 0;
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != -EINVAL)
RET_CG_ERR(ret);
/* Test searching experimental option with 0 magic value */
hdr.exprm_opt.len = 4;
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != -ENOMSG)
RET_CG_ERR(ret);
hdr.exprm_opt.magic = __bpf_htons(0xeB9F);
ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
load_flags);
if (ret != 4 || hdr.exprm_opt.len != 4 ||
hdr.exprm_opt.kind != TCPOPT_EXP ||
hdr.exprm_opt.magic != __bpf_htons(0xeB9F))
RET_CG_ERR(ret);
if (!check_syn)
return CG_OK;
/* Test loading from skops->syn_skb if sk_state == TCP_NEW_SYN_RECV
*
* Test loading from tp->saved_syn for other sk_state.
*/
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr.ip6,
sizeof(hdr.ip6));
if (ret != -ENOSPC)
RET_CG_ERR(ret);
if (hdr.ip6.saddr.s6_addr16[7] != last_addr16_n ||
hdr.ip6.daddr.s6_addr16[7] != last_addr16_n)
RET_CG_ERR(0);
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr, sizeof(hdr));
if (ret < 0)
RET_CG_ERR(ret);
pth = (struct tcphdr *)(&hdr.ip6 + 1);
if (pth->dest != passive_lport_n || pth->source != active_lport_n)
RET_CG_ERR(0);
ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN, &hdr, sizeof(hdr));
if (ret < 0)
RET_CG_ERR(ret);
if (hdr.th.dest != passive_lport_n || hdr.th.source != active_lport_n)
RET_CG_ERR(0);
return CG_OK;
}
static int check_active_syn_in(struct bpf_sock_ops *skops)
{
return __check_active_hdr_in(skops, true);
}
static int check_active_hdr_in(struct bpf_sock_ops *skops)
{
struct tcphdr *th;
if (__check_active_hdr_in(skops, false) == CG_ERR)
return CG_ERR;
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (tcp_hdrlen(th) < skops->skb_len)
nr_data++;
if (th->fin)
nr_fin++;
if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len)
nr_pure_ack++;
return CG_OK;
}
static int active_opt_len(struct bpf_sock_ops *skops)
{
int err;
/* Reserve more than enough to allow the -EEXIST test in
* the write_active_opt().
*/
err = bpf_reserve_hdr_opt(skops, 12, 0);
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int write_active_opt(struct bpf_sock_ops *skops)
{
struct tcp_exprm_opt exprm_opt = {};
struct tcp_opt win_scale_opt = {};
struct tcp_opt reg_opt = {};
struct tcphdr *th;
int err, ret;
exprm_opt.kind = TCPOPT_EXP;
exprm_opt.len = 4;
exprm_opt.magic = __bpf_htons(0xeB9F);
reg_opt.kind = 0xB9;
reg_opt.len = 4;
reg_opt.data[0] = 0xfa;
reg_opt.data[1] = 0xce;
win_scale_opt.kind = TCPOPT_WINDOW;
err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (err)
RET_CG_ERR(err);
/* Store the same exprm option */
err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
err = bpf_store_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
if (err)
RET_CG_ERR(err);
err = bpf_store_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
/* Check the option has been written and can be searched */
ret = bpf_load_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
if (ret != 4 || exprm_opt.len != 4 || exprm_opt.kind != TCPOPT_EXP ||
exprm_opt.magic != __bpf_htons(0xeB9F))
RET_CG_ERR(ret);
reg_opt.len = 0;
ret = bpf_load_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
if (ret != 4 || reg_opt.len != 4 || reg_opt.kind != 0xB9 ||
reg_opt.data[0] != 0xfa || reg_opt.data[1] != 0xce)
RET_CG_ERR(ret);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (th->syn) {
active_lport_h = skops->local_port;
active_lport_n = th->source;
/* Search the win scale option written by kernel
* in the SYN packet.
*/
ret = bpf_load_hdr_opt(skops, &win_scale_opt,
sizeof(win_scale_opt), 0);
if (ret != 3 || win_scale_opt.len != 3 ||
win_scale_opt.kind != TCPOPT_WINDOW)
RET_CG_ERR(ret);
/* Write the win scale option that kernel
* has already written.
*/
err = bpf_store_hdr_opt(skops, &win_scale_opt,
sizeof(win_scale_opt), 0);
if (err != -EEXIST)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
/* Check the SYN from bpf_sock_ops_kern->syn_skb */
return check_active_syn_in(skops);
/* Passive side should have cleared the write hdr cb by now */
if (skops->local_port == passive_lport_h)
RET_CG_ERR(0);
return active_opt_len(skops);
}
static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
{
if (skops->local_port == passive_lport_h)
RET_CG_ERR(0);
return write_active_opt(skops);
}
static int handle_parse_hdr(struct bpf_sock_ops *skops)
{
/* Passive side is not writing any non-standard/unknown
* option, so the active side should never be called.
*/
if (skops->local_port == active_lport_h)
RET_CG_ERR(0);
return check_active_hdr_in(skops);
}
static int handle_passive_estab(struct bpf_sock_ops *skops)
{
int err;
/* No more write hdr cb */
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags &
~BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG);
/* Recheck the SYN but check the tp->saved_syn this time */
err = check_active_syn_in(skops);
if (err == CG_ERR)
return err;
nr_syn++;
/* The ack has header option written by the active side also */
return check_active_hdr_in(skops);
}
SEC("sockops/misc_estab")
int misc_estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
passive_lport_h = skops->local_port;
passive_lport_n = __bpf_htons(passive_lport_h);
bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
&true_val, sizeof(true_val));
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
return handle_parse_hdr(skops);
case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
return handle_hdr_opt_len(skops);
case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
return handle_write_hdr_opt(skops);
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
return handle_passive_estab(skops);
}
return CG_OK;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <stddef.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <linux/tcp.h>
#include <linux/socket.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define BPF_PROG_TEST_TCP_HDR_OPTIONS
#include "test_tcp_hdr_options.h"
#ifndef sizeof_field
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
#endif
__u8 test_kind = TCPOPT_EXP;
__u16 test_magic = 0xeB9F;
struct bpf_test_option passive_synack_out = {};
struct bpf_test_option passive_fin_out = {};
struct bpf_test_option passive_estab_in = {};
struct bpf_test_option passive_fin_in = {};
struct bpf_test_option active_syn_out = {};
struct bpf_test_option active_fin_out = {};
struct bpf_test_option active_estab_in = {};
struct bpf_test_option active_fin_in = {};
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct hdr_stg);
} hdr_stg_map SEC(".maps");
static bool skops_want_cookie(const struct bpf_sock_ops *skops)
{
return skops->args[0] == BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
}
static bool skops_current_mss(const struct bpf_sock_ops *skops)
{
return skops->args[0] == BPF_WRITE_HDR_TCP_CURRENT_MSS;
}
static __u8 option_total_len(__u8 flags)
{
__u8 i, len = 1; /* +1 for flags */
if (!flags)
return 0;
/* RESEND bit does not use a byte */
for (i = OPTION_RESEND + 1; i < __NR_OPTION_FLAGS; i++)
len += !!TEST_OPTION_FLAGS(flags, i);
if (test_kind == TCPOPT_EXP)
return len + TCP_BPF_EXPOPT_BASE_LEN;
else
return len + 2; /* +1 kind, +1 kind-len */
}
static void write_test_option(const struct bpf_test_option *test_opt,
__u8 *data)
{
__u8 offset = 0;
data[offset++] = test_opt->flags;
if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_MAX_DELACK_MS))
data[offset++] = test_opt->max_delack_ms;
if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_RAND))
data[offset++] = test_opt->rand;
}
static int store_option(struct bpf_sock_ops *skops,
const struct bpf_test_option *test_opt)
{
union {
struct tcp_exprm_opt exprm;
struct tcp_opt regular;
} write_opt;
int err;
if (test_kind == TCPOPT_EXP) {
write_opt.exprm.kind = TCPOPT_EXP;
write_opt.exprm.len = option_total_len(test_opt->flags);
write_opt.exprm.magic = __bpf_htons(test_magic);
write_opt.exprm.data32 = 0;
write_test_option(test_opt, write_opt.exprm.data);
err = bpf_store_hdr_opt(skops, &write_opt.exprm,
sizeof(write_opt.exprm), 0);
} else {
write_opt.regular.kind = test_kind;
write_opt.regular.len = option_total_len(test_opt->flags);
write_opt.regular.data32 = 0;
write_test_option(test_opt, write_opt.regular.data);
err = bpf_store_hdr_opt(skops, &write_opt.regular,
sizeof(write_opt.regular), 0);
}
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int parse_test_option(struct bpf_test_option *opt, const __u8 *start)
{
opt->flags = *start++;
if (TEST_OPTION_FLAGS(opt->flags, OPTION_MAX_DELACK_MS))
opt->max_delack_ms = *start++;
if (TEST_OPTION_FLAGS(opt->flags, OPTION_RAND))
opt->rand = *start++;
return 0;
}
static int load_option(struct bpf_sock_ops *skops,
struct bpf_test_option *test_opt, bool from_syn)
{
union {
struct tcp_exprm_opt exprm;
struct tcp_opt regular;
} search_opt;
int ret, load_flags = from_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
if (test_kind == TCPOPT_EXP) {
search_opt.exprm.kind = TCPOPT_EXP;
search_opt.exprm.len = 4;
search_opt.exprm.magic = __bpf_htons(test_magic);
search_opt.exprm.data32 = 0;
ret = bpf_load_hdr_opt(skops, &search_opt.exprm,
sizeof(search_opt.exprm), load_flags);
if (ret < 0)
return ret;
return parse_test_option(test_opt, search_opt.exprm.data);
} else {
search_opt.regular.kind = test_kind;
search_opt.regular.len = 0;
search_opt.regular.data32 = 0;
ret = bpf_load_hdr_opt(skops, &search_opt.regular,
sizeof(search_opt.regular), load_flags);
if (ret < 0)
return ret;
return parse_test_option(test_opt, search_opt.regular.data);
}
}
static int synack_opt_len(struct bpf_sock_ops *skops)
{
struct bpf_test_option test_opt = {};
__u8 optlen;
int err;
if (!passive_synack_out.flags)
return CG_OK;
err = load_option(skops, &test_opt, true);
/* bpf_test_option is not found */
if (err == -ENOMSG)
return CG_OK;
if (err)
RET_CG_ERR(err);
optlen = option_total_len(passive_synack_out.flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_synack_opt(struct bpf_sock_ops *skops)
{
struct bpf_test_option opt;
if (!passive_synack_out.flags)
/* We should not even be called since no header
* space has been reserved.
*/
RET_CG_ERR(0);
opt = passive_synack_out;
if (skops_want_cookie(skops))
SET_OPTION_FLAGS(opt.flags, OPTION_RESEND);
return store_option(skops, &opt);
}
static int syn_opt_len(struct bpf_sock_ops *skops)
{
__u8 optlen;
int err;
if (!active_syn_out.flags)
return CG_OK;
optlen = option_total_len(active_syn_out.flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_syn_opt(struct bpf_sock_ops *skops)
{
if (!active_syn_out.flags)
RET_CG_ERR(0);
return store_option(skops, &active_syn_out);
}
static int fin_opt_len(struct bpf_sock_ops *skops)
{
struct bpf_test_option *opt;
struct hdr_stg *hdr_stg;
__u8 optlen;
int err;
if (!skops->sk)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->active)
opt = &active_fin_out;
else
opt = &passive_fin_out;
optlen = option_total_len(opt->flags);
if (optlen) {
err = bpf_reserve_hdr_opt(skops, optlen, 0);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int write_fin_opt(struct bpf_sock_ops *skops)
{
struct bpf_test_option *opt;
struct hdr_stg *hdr_stg;
if (!skops->sk)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->active)
opt = &active_fin_out;
else
opt = &passive_fin_out;
if (!opt->flags)
RET_CG_ERR(0);
return store_option(skops, opt);
}
static int resend_in_ack(struct bpf_sock_ops *skops)
{
struct hdr_stg *hdr_stg;
if (!skops->sk)
return -1;
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
return -1;
return !!hdr_stg->resend_syn;
}
static int nodata_opt_len(struct bpf_sock_ops *skops)
{
int resend;
resend = resend_in_ack(skops);
if (resend < 0)
RET_CG_ERR(0);
if (resend)
return syn_opt_len(skops);
return CG_OK;
}
static int write_nodata_opt(struct bpf_sock_ops *skops)
{
int resend;
resend = resend_in_ack(skops);
if (resend < 0)
RET_CG_ERR(0);
if (resend)
return write_syn_opt(skops);
return CG_OK;
}
static int data_opt_len(struct bpf_sock_ops *skops)
{
/* Same as the nodata version. Mostly to show
* an example usage on skops->skb_len.
*/
return nodata_opt_len(skops);
}
static int write_data_opt(struct bpf_sock_ops *skops)
{
return write_nodata_opt(skops);
}
static int current_mss_opt_len(struct bpf_sock_ops *skops)
{
/* Reserve maximum that may be needed */
int err;
err = bpf_reserve_hdr_opt(skops, option_total_len(OPTION_MASK), 0);
if (err)
RET_CG_ERR(err);
return CG_OK;
}
static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
return synack_opt_len(skops);
if (tcp_flags & TCPHDR_SYN)
return syn_opt_len(skops);
if (tcp_flags & TCPHDR_FIN)
return fin_opt_len(skops);
if (skops_current_mss(skops))
/* The kernel is calculating the MSS */
return current_mss_opt_len(skops);
if (skops->skb_len)
return data_opt_len(skops);
return nodata_opt_len(skops);
}
static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
{
__u8 tcp_flags = skops_tcp_flags(skops);
struct tcphdr *th;
if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
return write_synack_opt(skops);
if (tcp_flags & TCPHDR_SYN)
return write_syn_opt(skops);
if (tcp_flags & TCPHDR_FIN)
return write_fin_opt(skops);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (skops->skb_len > tcp_hdrlen(th))
return write_data_opt(skops);
return write_nodata_opt(skops);
}
static int set_delack_max(struct bpf_sock_ops *skops, __u8 max_delack_ms)
{
__u32 max_delack_us = max_delack_ms * 1000;
return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_DELACK_MAX,
&max_delack_us, sizeof(max_delack_us));
}
static int set_rto_min(struct bpf_sock_ops *skops, __u8 peer_max_delack_ms)
{
__u32 min_rto_us = peer_max_delack_ms * 1000;
return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_RTO_MIN, &min_rto_us,
sizeof(min_rto_us));
}
static int handle_active_estab(struct bpf_sock_ops *skops)
{
struct hdr_stg init_stg = {
.active = true,
};
int err;
err = load_option(skops, &active_estab_in, false);
if (err && err != -ENOMSG)
RET_CG_ERR(err);
init_stg.resend_syn = TEST_OPTION_FLAGS(active_estab_in.flags,
OPTION_RESEND);
if (!skops->sk || !bpf_sk_storage_get(&hdr_stg_map, skops->sk,
&init_stg,
BPF_SK_STORAGE_GET_F_CREATE))
RET_CG_ERR(0);
if (init_stg.resend_syn)
/* Don't clear the write_hdr cb now because
* the ACK may get lost and retransmit may
* be needed.
*
* PARSE_ALL_HDR cb flag is set to learn if this
* resend_syn option has received by the peer.
*
* The header option will be resent until a valid
* packet is received at handle_parse_hdr()
* and all hdr cb flags will be cleared in
* handle_parse_hdr().
*/
set_parse_all_hdr_cb_flags(skops);
else if (!active_fin_out.flags)
/* No options will be written from now */
clear_hdr_cb_flags(skops);
if (active_syn_out.max_delack_ms) {
err = set_delack_max(skops, active_syn_out.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
if (active_estab_in.max_delack_ms) {
err = set_rto_min(skops, active_estab_in.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_passive_estab(struct bpf_sock_ops *skops)
{
struct hdr_stg init_stg = {};
struct tcphdr *th;
int err;
err = load_option(skops, &passive_estab_in, true);
if (err == -ENOENT) {
/* saved_syn is not found. It was in syncookie mode.
* We have asked the active side to resend the options
* in ACK, so try to find the bpf_test_option from ACK now.
*/
err = load_option(skops, &passive_estab_in, false);
init_stg.syncookie = true;
}
/* ENOMSG: The bpf_test_option is not found which is fine.
* Bail out now for all other errors.
*/
if (err && err != -ENOMSG)
RET_CG_ERR(err);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
if (th->syn) {
/* Fastopen */
/* Cannot clear cb_flags to stop write_hdr cb.
* synack is not sent yet for fast open.
* Even it was, the synack may need to be retransmitted.
*
* PARSE_ALL_HDR cb flag is set to learn
* if synack has reached the peer.
* All cb_flags will be cleared in handle_parse_hdr().
*/
set_parse_all_hdr_cb_flags(skops);
init_stg.fastopen = true;
} else if (!passive_fin_out.flags) {
/* No options will be written from now */
clear_hdr_cb_flags(skops);
}
if (!skops->sk ||
!bpf_sk_storage_get(&hdr_stg_map, skops->sk, &init_stg,
BPF_SK_STORAGE_GET_F_CREATE))
RET_CG_ERR(0);
if (passive_synack_out.max_delack_ms) {
err = set_delack_max(skops, passive_synack_out.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
if (passive_estab_in.max_delack_ms) {
err = set_rto_min(skops, passive_estab_in.max_delack_ms);
if (err)
RET_CG_ERR(err);
}
return CG_OK;
}
static int handle_parse_hdr(struct bpf_sock_ops *skops)
{
struct hdr_stg *hdr_stg;
struct tcphdr *th;
if (!skops->sk)
RET_CG_ERR(0);
th = skops->skb_data;
if (th + 1 > skops->skb_data_end)
RET_CG_ERR(0);
hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
if (!hdr_stg)
RET_CG_ERR(0);
if (hdr_stg->resend_syn || hdr_stg->fastopen)
/* The PARSE_ALL_HDR cb flag was turned on
* to ensure that the previously written
* options have reached the peer.
* Those previously written option includes:
* - Active side: resend_syn in ACK during syncookie
* or
* - Passive side: SYNACK during fastopen
*
* A valid packet has been received here after
* the 3WHS, so the PARSE_ALL_HDR cb flag
* can be cleared now.
*/
clear_parse_all_hdr_cb_flags(skops);
if (hdr_stg->resend_syn && !active_fin_out.flags)
/* Active side resent the syn option in ACK
* because the server was in syncookie mode.
* A valid packet has been received, so
* clear header cb flags if there is no
* more option to send.
*/
clear_hdr_cb_flags(skops);
if (hdr_stg->fastopen && !passive_fin_out.flags)
/* Passive side was in fastopen.
* A valid packet has been received, so
* the SYNACK has reached the peer.
* Clear header cb flags if there is no more
* option to send.
*/
clear_hdr_cb_flags(skops);
if (th->fin) {
struct bpf_test_option *fin_opt;
int err;
if (hdr_stg->active)
fin_opt = &active_fin_in;
else
fin_opt = &passive_fin_in;
err = load_option(skops, fin_opt, false);
if (err && err != -ENOMSG)
RET_CG_ERR(err);
}
return CG_OK;
}
SEC("sockops/estab")
int estab(struct bpf_sock_ops *skops)
{
int true_val = 1;
switch (skops->op) {
case BPF_SOCK_OPS_TCP_LISTEN_CB:
bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
&true_val, sizeof(true_val));
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_TCP_CONNECT_CB:
set_hdr_cb_flags(skops);
break;
case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
return handle_parse_hdr(skops);
case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
return handle_hdr_opt_len(skops);
case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
return handle_write_hdr_opt(skops);
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
return handle_passive_estab(skops);
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
return handle_active_estab(skops);
}
return CG_OK;
}
char _license[] SEC("license") = "GPL";
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
#ifndef _TEST_TCP_HDR_OPTIONS_H
#define _TEST_TCP_HDR_OPTIONS_H
struct bpf_test_option {
__u8 flags;
__u8 max_delack_ms;
__u8 rand;
} __attribute__((packed));
enum {
OPTION_RESEND,
OPTION_MAX_DELACK_MS,
OPTION_RAND,
__NR_OPTION_FLAGS,
};
#define OPTION_F_RESEND (1 << OPTION_RESEND)
#define OPTION_F_MAX_DELACK_MS (1 << OPTION_MAX_DELACK_MS)
#define OPTION_F_RAND (1 << OPTION_RAND)
#define OPTION_MASK ((1 << __NR_OPTION_FLAGS) - 1)
#define TEST_OPTION_FLAGS(flags, option) (1 & ((flags) >> (option)))
#define SET_OPTION_FLAGS(flags, option) ((flags) |= (1 << (option)))
/* Store in bpf_sk_storage */
struct hdr_stg {
bool active;
bool resend_syn; /* active side only */
bool syncookie; /* passive side only */
bool fastopen; /* passive side only */
};
struct linum_err {
unsigned int linum;
int err;
};
#define TCPHDR_FIN 0x01
#define TCPHDR_SYN 0x02
#define TCPHDR_RST 0x04
#define TCPHDR_PSH 0x08
#define TCPHDR_ACK 0x10
#define TCPHDR_URG 0x20
#define TCPHDR_ECE 0x40
#define TCPHDR_CWR 0x80
#define TCPHDR_SYNACK (TCPHDR_SYN | TCPHDR_ACK)
#define TCPOPT_EOL 0
#define TCPOPT_NOP 1
#define TCPOPT_WINDOW 3
#define TCPOPT_EXP 254
#define TCP_BPF_EXPOPT_BASE_LEN 4
#define MAX_TCP_HDR_LEN 60
#define MAX_TCP_OPTION_SPACE 40
#ifdef BPF_PROG_TEST_TCP_HDR_OPTIONS
#define CG_OK 1
#define CG_ERR 0
#ifndef SOL_TCP
#define SOL_TCP 6
#endif
struct tcp_exprm_opt {
__u8 kind;
__u8 len;
__u16 magic;
union {
__u8 data[4];
__u32 data32;
};
} __attribute__((packed));
struct tcp_opt {
__u8 kind;
__u8 len;
union {
__u8 data[4];
__u32 data32;
};
} __attribute__((packed));
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 2);
__type(key, int);
__type(value, struct linum_err);
} lport_linum_map SEC(".maps");
static inline unsigned int tcp_hdrlen(const struct tcphdr *th)
{
return th->doff << 2;
}
static inline __u8 skops_tcp_flags(const struct bpf_sock_ops *skops)
{
return skops->skb_tcp_flags;
}
static inline void clear_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags &
~(BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG));
}
static inline void set_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags |
BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG);
}
static inline void
clear_parse_all_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags &
~BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
}
static inline void
set_parse_all_hdr_cb_flags(struct bpf_sock_ops *skops)
{
bpf_sock_ops_cb_flags_set(skops,
skops->bpf_sock_ops_cb_flags |
BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
}
#define RET_CG_ERR(__err) ({ \
struct linum_err __linum_err; \
int __lport; \
\
__linum_err.linum = __LINE__; \
__linum_err.err = __err; \
__lport = skops->local_port; \
bpf_map_update_elem(&lport_linum_map, &__lport, &__linum_err, BPF_NOEXIST); \
clear_hdr_cb_flags(skops); \
clear_parse_all_hdr_cb_flags(skops); \
return CG_ERR; \
})
#endif /* BPF_PROG_TEST_TCP_HDR_OPTIONS */
#endif /* _TEST_TCP_HDR_OPTIONS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment