Commit 99cacdc6 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'replace-cg_bpf-prog'

Andrey Ignatov says:

====================
v3->v4:
- use OPTS_VALID and OPTS_GET to handle bpf_prog_attach_opts.

v2->v3:
- rely on DECLARE_LIBBPF_OPTS from libbpf_common.h;
- separate "required" and "optional" arguments in bpf_prog_attach_xattr;
- convert test_cgroup_attach to prog_tests;
- move the new selftest to prog_tests/cgroup_attach_multi.

v1->v2:
- move DECLARE_LIBBPF_OPTS from libbpf.h to bpf.h (patch 4);
- switch new libbpf API to OPTS framework;
- switch selftest to libbpf OPTS framework.

This patch set adds support for replacing cgroup-bpf programs attached with
BPF_F_ALLOW_MULTI flag so that any program in a list can be updated to a new
version without service interruption and order of programs can be preserved.

Please see patch 3 for details on the use-case and API changes.

Other patches:
Patch 1 is preliminary refactoring of __cgroup_bpf_attach to simplify it.
Patch 2 is minor cleanup of hierarchy_allows_attach.
Patch 4 extends libbpf API to support new set of attach attributes.
Patch 5 converts test_cgroup_attach to prog_tests.
Patch 6 adds selftest coverage for the new API.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents c92bbaa0 06ac0186
......@@ -85,6 +85,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
struct bpf_prog *replace_prog,
enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type);
......@@ -93,7 +94,8 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
struct bpf_prog *replace_prog, enum bpf_attach_type type,
u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
......
......@@ -231,6 +231,11 @@ enum bpf_attach_type {
* When children program makes decision (like picking TCP CA or sock bind)
* parent program has a chance to override it.
*
* With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
* programs for a cgroup. Though it's possible to replace an old program at
* any position by also specifying BPF_F_REPLACE flag and position itself in
* replace_bpf_fd attribute. Old program at this position will be released.
*
* A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
* A cgroup with NONE doesn't allow any programs in sub-cgroups.
* Ex1:
......@@ -249,6 +254,7 @@ enum bpf_attach_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
#define BPF_F_REPLACE (1U << 2)
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
......@@ -442,6 +448,10 @@ union bpf_attr {
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
__u32 replace_bpf_fd; /* previously attached eBPF
* program to replace if
* BPF_F_REPLACE is used
*/
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
......
......@@ -103,8 +103,7 @@ static u32 prog_list_length(struct list_head *head)
* if parent has overridable or multi-prog, allow attaching
*/
static bool hierarchy_allows_attach(struct cgroup *cgrp,
enum bpf_attach_type type,
u32 new_flags)
enum bpf_attach_type type)
{
struct cgroup *p;
......@@ -283,31 +282,34 @@ static int update_effective_progs(struct cgroup *cgrp,
* propagate the change to descendants
* @cgrp: The cgroup which descendants to traverse
* @prog: A program to attach
* @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
* @type: Type of attach operation
* @flags: Option flags
*
* Must be called with cgroup_mutex held.
*/
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
struct bpf_prog *replace_prog,
enum bpf_attach_type type, u32 flags)
{
u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
struct list_head *progs = &cgrp->bpf.progs[type];
struct bpf_prog *old_prog = NULL;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
struct bpf_prog_list *pl, *replace_pl = NULL;
enum bpf_cgroup_storage_type stype;
struct bpf_prog_list *pl;
bool pl_was_allocated;
int err;
if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
/* invalid combination */
return -EINVAL;
if (!hierarchy_allows_attach(cgrp, type, flags))
if (!hierarchy_allows_attach(cgrp, type))
return -EPERM;
if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
/* Disallow attaching non-overridable on top
* of existing overridable in this cgroup.
* Disallow attaching multi-prog if overridable or none
......@@ -317,6 +319,21 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
return -E2BIG;
if (flags & BPF_F_ALLOW_MULTI) {
list_for_each_entry(pl, progs, node) {
if (pl->prog == prog)
/* disallow attaching the same prog twice */
return -EINVAL;
if (pl->prog == replace_prog)
replace_pl = pl;
}
if ((flags & BPF_F_REPLACE) && !replace_pl)
/* prog to replace not found for cgroup */
return -ENOENT;
} else if (!list_empty(progs)) {
replace_pl = list_first_entry(progs, typeof(*pl), node);
}
for_each_cgroup_storage_type(stype) {
storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
if (IS_ERR(storage[stype])) {
......@@ -327,53 +344,28 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
}
}
if (flags & BPF_F_ALLOW_MULTI) {
list_for_each_entry(pl, progs, node) {
if (pl->prog == prog) {
/* disallow attaching the same prog twice */
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
return -EINVAL;
}
if (replace_pl) {
pl = replace_pl;
old_prog = pl->prog;
for_each_cgroup_storage_type(stype) {
old_storage[stype] = pl->storage[stype];
bpf_cgroup_storage_unlink(old_storage[stype]);
}
} else {
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
if (!pl) {
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
return -ENOMEM;
}
pl_was_allocated = true;
pl->prog = prog;
for_each_cgroup_storage_type(stype)
pl->storage[stype] = storage[stype];
list_add_tail(&pl->node, progs);
} else {
if (list_empty(progs)) {
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
if (!pl) {
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
return -ENOMEM;
}
pl_was_allocated = true;
list_add_tail(&pl->node, progs);
} else {
pl = list_first_entry(progs, typeof(*pl), node);
old_prog = pl->prog;
for_each_cgroup_storage_type(stype) {
old_storage[stype] = pl->storage[stype];
bpf_cgroup_storage_unlink(old_storage[stype]);
}
pl_was_allocated = false;
}
pl->prog = prog;
for_each_cgroup_storage_type(stype)
pl->storage[stype] = storage[stype];
}
cgrp->bpf.flags[type] = flags;
pl->prog = prog;
for_each_cgroup_storage_type(stype)
pl->storage[stype] = storage[stype];
cgrp->bpf.flags[type] = saved_flags;
err = update_effective_progs(cgrp, type);
if (err)
......@@ -401,7 +393,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
pl->storage[stype] = old_storage[stype];
bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
}
if (pl_was_allocated) {
if (!replace_pl) {
list_del(&pl->node);
kfree(pl);
}
......@@ -539,6 +531,7 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype, struct bpf_prog *prog)
{
struct bpf_prog *replace_prog = NULL;
struct cgroup *cgrp;
int ret;
......@@ -546,8 +539,20 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr,
if (IS_ERR(cgrp))
return PTR_ERR(cgrp);
ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
(attr->attach_flags & BPF_F_REPLACE)) {
replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
if (IS_ERR(replace_prog)) {
cgroup_put(cgrp);
return PTR_ERR(replace_prog);
}
}
ret = cgroup_bpf_attach(cgrp, prog, replace_prog, attr->attach_type,
attr->attach_flags);
if (replace_prog)
bpf_prog_put(replace_prog);
cgroup_put(cgrp);
return ret;
}
......
......@@ -2073,10 +2073,10 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
}
}
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
#define BPF_F_ATTACH_MASK \
(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
static int bpf_prog_attach(const union bpf_attr *attr)
{
......
......@@ -6288,12 +6288,13 @@ void cgroup_sk_free(struct sock_cgroup_data *skcd)
#ifdef CONFIG_CGROUP_BPF
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags)
struct bpf_prog *replace_prog, enum bpf_attach_type type,
u32 flags)
{
int ret;
mutex_lock(&cgroup_mutex);
ret = __cgroup_bpf_attach(cgrp, prog, type, flags);
ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, type, flags);
mutex_unlock(&cgroup_mutex);
return ret;
}
......
......@@ -231,6 +231,11 @@ enum bpf_attach_type {
* When children program makes decision (like picking TCP CA or sock bind)
* parent program has a chance to override it.
*
* With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
* programs for a cgroup. Though it's possible to replace an old program at
* any position by also specifying BPF_F_REPLACE flag and position itself in
* replace_bpf_fd attribute. Old program at this position will be released.
*
* A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
* A cgroup with NONE doesn't allow any programs in sub-cgroups.
* Ex1:
......@@ -249,6 +254,7 @@ enum bpf_attach_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
#define BPF_F_REPLACE (1U << 2)
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
......@@ -442,6 +448,10 @@ union bpf_attr {
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
__u32 replace_bpf_fd; /* previously attached eBPF
* program to replace if
* BPF_F_REPLACE is used
*/
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
......
......@@ -466,14 +466,29 @@ int bpf_obj_get(const char *pathname)
int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
unsigned int flags)
{
DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
.flags = flags,
);
return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts);
}
int bpf_prog_attach_xattr(int prog_fd, int target_fd,
enum bpf_attach_type type,
const struct bpf_prog_attach_opts *opts)
{
union bpf_attr attr;
if (!OPTS_VALID(opts, bpf_prog_attach_opts))
return -EINVAL;
memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
attr.attach_flags = flags;
attr.attach_flags = OPTS_GET(opts, flags, 0);
attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
}
......
......@@ -126,8 +126,19 @@ LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
LIBBPF_API int bpf_map_freeze(int fd);
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
LIBBPF_API int bpf_obj_get(const char *pathname);
struct bpf_prog_attach_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
unsigned int flags;
int replace_prog_fd;
};
#define bpf_prog_attach_opts__last_field replace_prog_fd
LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
enum bpf_attach_type type, unsigned int flags);
LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd,
enum bpf_attach_type type,
const struct bpf_prog_attach_opts *opts);
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
......
......@@ -219,6 +219,7 @@ LIBBPF_0.0.7 {
bpf_object__detach_skeleton;
bpf_object__load_skeleton;
bpf_object__open_skeleton;
bpf_prog_attach_xattr;
bpf_program__attach;
bpf_program__name;
btf__align_of;
......
......@@ -21,7 +21,6 @@ test_lirc_mode2_user
get_cgroup_id_user
test_skb_cgroup_id_user
test_socket_cookie
test_cgroup_attach
test_cgroup_storage
test_select_reuseport
test_flow_dissector
......
......@@ -32,7 +32,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
test_cgroup_attach test_progs-no_alu32
test_progs-no_alu32
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
......@@ -136,7 +136,6 @@ $(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
$(OUTPUT)/test_sock_fields: cgroup_helpers.c
$(OUTPUT)/test_sysctl: cgroup_helpers.c
$(OUTPUT)/test_cgroup_attach: cgroup_helpers.c
.PHONY: force
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(void)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
void test_cgroup_attach_autodetach(void)
{
__u32 duration = 0, prog_cnt = 4, attach_flags;
int allow_prog[2] = {-1};
__u32 prog_ids[2] = {0};
void *ptr = NULL;
int cg = 0, i;
int attempts;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
allow_prog[i] = prog_load();
if (CHECK(allow_prog[i] < 0, "prog_load",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
}
if (CHECK_FAIL(setup_cgroup_environment()))
goto err;
/* create a cgroup, attach two programs and remember their ids */
cg = create_and_get_cgroup("/cg_autodetach");
if (CHECK_FAIL(cg < 0))
goto err;
if (CHECK_FAIL(join_cgroup("/cg_autodetach")))
goto err;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
if (CHECK(bpf_prog_attach(allow_prog[i], cg,
BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog_attach", "prog[%d], errno=%d\n", i, errno))
goto err;
/* make sure that programs are attached and run some traffic */
if (CHECK(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags,
prog_ids, &prog_cnt),
"prog_query", "errno=%d\n", errno))
goto err;
if (CHECK_FAIL(system(PING_CMD)))
goto err;
/* allocate some memory (4Mb) to pin the original cgroup */
ptr = malloc(4 * (1 << 20));
if (CHECK_FAIL(!ptr))
goto err;
/* close programs and cgroup fd */
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
close(allow_prog[i]);
allow_prog[i] = -1;
}
close(cg);
cg = 0;
/* leave the cgroup and remove it. don't detach programs */
cleanup_cgroup_environment();
/* wait for the asynchronous auto-detachment.
* wait for no more than 5 sec and give up.
*/
for (i = 0; i < ARRAY_SIZE(prog_ids); i++) {
for (attempts = 5; attempts >= 0; attempts--) {
int fd = bpf_prog_get_fd_by_id(prog_ids[i]);
if (fd < 0)
break;
/* don't leave the fd open */
close(fd);
if (CHECK_FAIL(!attempts))
goto err;
sleep(1);
}
}
err:
for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
if (allow_prog[i] >= 0)
close(allow_prog[i]);
if (cg)
close(cg);
free(ptr);
cleanup_cgroup_environment();
}
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int map_fd = -1;
static int prog_load_cnt(int verdict, int val)
{
int cgroup_storage_fd, percpu_cgroup_storage_fd;
if (map_fd < 0)
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
if (map_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
if (cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
percpu_cgroup_storage_fd = bpf_create_map(
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
if (percpu_cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
struct bpf_insn prog[] = {
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, val),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
int ret;
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
close(cgroup_storage_fd);
return ret;
}
void test_cgroup_attach_multi(void)
{
__u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
int allow_prog[7] = {-1};
unsigned long long value;
__u32 duration = 0;
int i = 0;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
allow_prog[i] = prog_load_cnt(1, 1 << i);
if (CHECK(allow_prog[i] < 0, "prog_load",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
}
if (CHECK_FAIL(setup_cgroup_environment()))
goto err;
cg1 = create_and_get_cgroup("/cg1");
if (CHECK_FAIL(cg1 < 0))
goto err;
cg2 = create_and_get_cgroup("/cg1/cg2");
if (CHECK_FAIL(cg2 < 0))
goto err;
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
if (CHECK_FAIL(cg3 < 0))
goto err;
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
if (CHECK_FAIL(cg4 < 0))
goto err;
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
if (CHECK_FAIL(cg5 < 0))
goto err;
if (CHECK_FAIL(join_cgroup("/cg1/cg2/cg3/cg4/cg5")))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog0_attach_to_cg1_multi", "errno=%d\n", errno))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"fail_same_prog_attach_to_cg1", "unexpected success\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog1_attach_to_cg1_multi", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog2_attach_to_cg2_override", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI),
"prog3_attach_to_cg3_multi", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog4_attach_to_cg4_override", "errno=%d\n", errno))
goto err;
if (CHECK(bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0),
"prog5_attach_to_cg5_none", "errno=%d\n", errno))
goto err;
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 1 + 2 + 8 + 32);
/* query the number of effective progs in cg5 */
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt));
CHECK_FAIL(prog_cnt != 4);
/* retrieve prog_ids of effective progs in cg5 */
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 4);
CHECK_FAIL(attach_flags != 0);
saved_prog_id = prog_ids[0];
/* check enospc handling */
prog_ids[0] = 0;
prog_cnt = 2;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
prog_ids, &prog_cnt) != -1);
CHECK_FAIL(errno != ENOSPC);
CHECK_FAIL(prog_cnt != 4);
/* check that prog_ids are returned even when buffer is too small */
CHECK_FAIL(prog_ids[0] != saved_prog_id);
/* retrieve prog_id of single attached prog in cg5 */
prog_ids[0] = 0;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 1);
CHECK_FAIL(prog_ids[0] != saved_prog_id);
/* detach bottom program and ping again */
if (CHECK(bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS),
"prog_detach_from_cg5", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 1 + 2 + 8 + 16);
/* test replace */
attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE;
attach_opts.replace_prog_fd = allow_prog[0];
if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_override", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != EINVAL);
attach_opts.flags = BPF_F_REPLACE;
if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_no_multi", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != EINVAL);
attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE;
attach_opts.replace_prog_fd = -1;
if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_bad_fd", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != EBADF);
/* replacing a program that is not attached to cgroup should fail */
attach_opts.replace_prog_fd = allow_prog[3];
if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"fail_prog_replace_no_ent", "unexpected success\n"))
goto err;
CHECK_FAIL(errno != ENOENT);
/* replace 1st from the top program */
attach_opts.replace_prog_fd = allow_prog[0];
if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1,
BPF_CGROUP_INET_EGRESS, &attach_opts),
"prog_replace", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 64 + 2 + 8 + 16);
/* detach 3rd from bottom program and ping again */
if (CHECK(!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS),
"fail_prog_detach_from_cg3", "unexpected success\n"))
goto err;
if (CHECK(bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS),
"prog3_detach_from_cg3", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 64 + 2 + 16);
/* detach 2nd from bottom program and ping again */
if (CHECK(bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS),
"prog_detach_from_cg4", "errno=%d\n", errno))
goto err;
value = 0;
CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0));
CHECK_FAIL(system(PING_CMD));
CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value));
CHECK_FAIL(value != 64 + 2 + 4);
prog_cnt = 4;
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 3);
CHECK_FAIL(attach_flags != 0);
CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL,
prog_ids, &prog_cnt));
CHECK_FAIL(prog_cnt != 0);
err:
for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
if (allow_prog[i] >= 0)
close(allow_prog[i]);
close(cg1);
close(cg2);
close(cg3);
close(cg4);
close(cg5);
cleanup_cgroup_environment();
}
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#define FOO "/foo"
#define BAR "/foo/bar/"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(int verdict)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
void test_cgroup_attach_override(void)
{
int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1;
__u32 duration = 0;
allow_prog = prog_load(1);
if (CHECK(allow_prog < 0, "prog_load_allow",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
drop_prog = prog_load(0);
if (CHECK(drop_prog < 0, "prog_load_drop",
"verifier output:\n%s\n-------\n", bpf_log_buf))
goto err;
foo = test__join_cgroup(FOO);
if (CHECK(foo < 0, "cgroup_join_foo", "cgroup setup failed\n"))
goto err;
if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_drop_foo_override",
"attach prog to %s failed, errno=%d\n", FOO, errno))
goto err;
if (CHECK(!system(PING_CMD), "ping_fail",
"ping unexpectedly succeeded\n"))
goto err;
bar = test__join_cgroup(BAR);
if (CHECK(bar < 0, "cgroup_join_bar", "cgroup setup failed\n"))
goto err;
if (CHECK(!system(PING_CMD), "ping_fail",
"ping unexpectedly succeeded\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_allow_bar_override",
"attach prog to %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n"))
goto err;
if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS),
"prog_detach_bar",
"detach prog from %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(!system(PING_CMD), "ping_fail",
"ping unexpectedly succeeded\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_allow_bar_override",
"attach prog to %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS),
"prog_detach_foo",
"detach prog from %s failed, errno=%d\n", FOO, errno))
goto err;
if (CHECK(system(PING_CMD), "ping_ok", "ping failed\n"))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"prog_attach_allow_bar_override",
"attach prog to %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0),
"fail_prog_attach_allow_bar_none",
"attach prog to %s unexpectedly succeeded\n", BAR))
goto err;
if (CHECK(bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS),
"prog_detach_bar",
"detach prog from %s failed, errno=%d\n", BAR, errno))
goto err;
if (CHECK(!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS),
"fail_prog_detach_foo",
"double detach from %s unexpectedly succeeded\n", FOO))
goto err;
if (CHECK(bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0),
"prog_attach_allow_foo_none",
"attach prog to %s failed, errno=%d\n", FOO, errno))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0),
"fail_prog_attach_allow_bar_none",
"attach prog to %s unexpectedly succeeded\n", BAR))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"fail_prog_attach_allow_bar_override",
"attach prog to %s unexpectedly succeeded\n", BAR))
goto err;
if (CHECK(!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE),
"fail_prog_attach_allow_foo_override",
"attach prog to %s unexpectedly succeeded\n", FOO))
goto err;
if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0),
"prog_attach_drop_foo_none",
"attach prog to %s failed, errno=%d\n", FOO, errno))
goto err;
err:
close(foo);
close(bar);
close(allow_prog);
close(drop_prog);
}
// SPDX-License-Identifier: GPL-2.0
/* eBPF example program:
*
* - Creates arraymap in kernel with 4 bytes keys and 8 byte values
*
* - Loads eBPF program
*
* The eBPF program accesses the map passed in to store two pieces of
* information. The number of invocations of the program, which maps
* to the number of packets received, is stored to key 0. Key 1 is
* incremented on each iteration by the number of bytes stored in
* the skb. The program also stores the number of received bytes
* in the cgroup storage.
*
* - Attaches the new program to a cgroup using BPF_PROG_ATTACH
*
* - Every second, reads map[0] and map[1] to see how many bytes and
* packets were seen on any socket of tasks in the given cgroup.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <unistd.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include "bpf_util.h"
#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
#define FOO "/foo"
#define BAR "/foo/bar/"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
char bpf_log_buf[BPF_LOG_BUF_SIZE];
#ifdef DEBUG
#define debug(args...) printf(args)
#else
#define debug(args...)
#endif
static int prog_load(int verdict)
{
int ret;
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
if (ret < 0) {
log_err("Loading program");
printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
return 0;
}
return ret;
}
static int test_foo_bar(void)
{
int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0;
allow_prog = prog_load(1);
if (!allow_prog)
goto err;
drop_prog = prog_load(0);
if (!drop_prog)
goto err;
if (setup_cgroup_environment())
goto err;
/* Create cgroup /foo, get fd, and join it */
foo = create_and_get_cgroup(FOO);
if (foo < 0)
goto err;
if (join_cgroup(FOO))
goto err;
if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo");
goto err;
}
debug("Attached DROP prog. This ping in cgroup /foo should fail...\n");
assert(system(PING_CMD) != 0);
/* Create cgroup /foo/bar, get fd, and join it */
bar = create_and_get_cgroup(BAR);
if (bar < 0)
goto err;
if (join_cgroup(BAR))
goto err;
debug("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
assert(system(PING_CMD) != 0);
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
debug("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n");
assert(system(PING_CMD) == 0);
if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching program from /foo/bar");
goto err;
}
debug("Detached PASS from /foo/bar while DROP is attached to /foo.\n"
"This ping in cgroup /foo/bar should fail...\n");
assert(system(PING_CMD) != 0);
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
if (bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching program from /foo");
goto err;
}
debug("Attached PASS from /foo/bar and detached DROP from /foo.\n"
"This ping in cgroup /foo/bar should pass...\n");
assert(system(PING_CMD) == 0);
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
errno = 0;
log_err("Unexpected success attaching prog to /foo/bar");
goto err;
}
if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching program from /foo/bar");
goto err;
}
if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
errno = 0;
log_err("Unexpected success in double detach from /foo");
goto err;
}
if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
log_err("Attaching non-overridable prog to /foo");
goto err;
}
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
errno = 0;
log_err("Unexpected success attaching non-overridable prog to /foo/bar");
goto err;
}
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
errno = 0;
log_err("Unexpected success attaching overridable prog to /foo/bar");
goto err;
}
if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
errno = 0;
log_err("Unexpected success attaching overridable prog to /foo");
goto err;
}
if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
log_err("Attaching different non-overridable prog to /foo");
goto err;
}
goto out;
err:
rc = 1;
out:
close(foo);
close(bar);
cleanup_cgroup_environment();
if (!rc)
printf("#override:PASS\n");
else
printf("#override:FAIL\n");
return rc;
}
static int map_fd = -1;
static int prog_load_cnt(int verdict, int val)
{
int cgroup_storage_fd, percpu_cgroup_storage_fd;
if (map_fd < 0)
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
if (map_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
if (cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
percpu_cgroup_storage_fd = bpf_create_map(
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
if (percpu_cgroup_storage_fd < 0) {
printf("failed to create map '%s'\n", strerror(errno));
return -1;
}
struct bpf_insn prog[] = {
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, val),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
int ret;
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
if (ret < 0) {
log_err("Loading program");
printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
return 0;
}
close(cgroup_storage_fd);
return ret;
}
static int test_multiprog(void)
{
__u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
int drop_prog, allow_prog[6] = {}, rc = 0;
unsigned long long value;
int i = 0;
for (i = 0; i < 6; i++) {
allow_prog[i] = prog_load_cnt(1, 1 << i);
if (!allow_prog[i])
goto err;
}
drop_prog = prog_load_cnt(0, 1);
if (!drop_prog)
goto err;
if (setup_cgroup_environment())
goto err;
cg1 = create_and_get_cgroup("/cg1");
if (cg1 < 0)
goto err;
cg2 = create_and_get_cgroup("/cg1/cg2");
if (cg2 < 0)
goto err;
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
if (cg3 < 0)
goto err;
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
if (cg4 < 0)
goto err;
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
if (cg5 < 0)
goto err;
if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
goto err;
if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI)) {
log_err("Attaching prog to cg1");
goto err;
}
if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI)) {
log_err("Unexpected success attaching the same prog to cg1");
goto err;
}
if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI)) {
log_err("Attaching prog2 to cg1");
goto err;
}
if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to cg2");
goto err;
}
if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI)) {
log_err("Attaching prog to cg3");
goto err;
}
if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_OVERRIDE)) {
log_err("Attaching prog to cg4");
goto err;
}
if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) {
log_err("Attaching prog to cg5");
goto err;
}
assert(system(PING_CMD) == 0);
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
assert(value == 1 + 2 + 8 + 32);
/* query the number of effective progs in cg5 */
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
NULL, NULL, &prog_cnt) == 0);
assert(prog_cnt == 4);
/* retrieve prog_ids of effective progs in cg5 */
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
&attach_flags, prog_ids, &prog_cnt) == 0);
assert(prog_cnt == 4);
assert(attach_flags == 0);
saved_prog_id = prog_ids[0];
/* check enospc handling */
prog_ids[0] = 0;
prog_cnt = 2;
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
&attach_flags, prog_ids, &prog_cnt) == -1 &&
errno == ENOSPC);
assert(prog_cnt == 4);
/* check that prog_ids are returned even when buffer is too small */
assert(prog_ids[0] == saved_prog_id);
/* retrieve prog_id of single attached prog in cg5 */
prog_ids[0] = 0;
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
NULL, prog_ids, &prog_cnt) == 0);
assert(prog_cnt == 1);
assert(prog_ids[0] == saved_prog_id);
/* detach bottom program and ping again */
if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching prog from cg5");
goto err;
}
value = 0;
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
assert(system(PING_CMD) == 0);
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
assert(value == 1 + 2 + 8 + 16);
/* detach 3rd from bottom program and ping again */
errno = 0;
if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) {
log_err("Unexpected success on detach from cg3");
goto err;
}
if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching from cg3");
goto err;
}
value = 0;
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
assert(system(PING_CMD) == 0);
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
assert(value == 1 + 2 + 16);
/* detach 2nd from bottom program and ping again */
if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching prog from cg4");
goto err;
}
value = 0;
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
assert(system(PING_CMD) == 0);
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
assert(value == 1 + 2 + 4);
prog_cnt = 4;
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
&attach_flags, prog_ids, &prog_cnt) == 0);
assert(prog_cnt == 3);
assert(attach_flags == 0);
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
NULL, prog_ids, &prog_cnt) == 0);
assert(prog_cnt == 0);
goto out;
err:
rc = 1;
out:
for (i = 0; i < 6; i++)
if (allow_prog[i] > 0)
close(allow_prog[i]);
close(cg1);
close(cg2);
close(cg3);
close(cg4);
close(cg5);
cleanup_cgroup_environment();
if (!rc)
printf("#multi:PASS\n");
else
printf("#multi:FAIL\n");
return rc;
}
static int test_autodetach(void)
{
__u32 prog_cnt = 4, attach_flags;
int allow_prog[2] = {0};
__u32 prog_ids[2] = {0};
int cg = 0, i, rc = -1;
void *ptr = NULL;
int attempts;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
allow_prog[i] = prog_load_cnt(1, 1 << i);
if (!allow_prog[i])
goto err;
}
if (setup_cgroup_environment())
goto err;
/* create a cgroup, attach two programs and remember their ids */
cg = create_and_get_cgroup("/cg_autodetach");
if (cg < 0)
goto err;
if (join_cgroup("/cg_autodetach"))
goto err;
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
if (bpf_prog_attach(allow_prog[i], cg, BPF_CGROUP_INET_EGRESS,
BPF_F_ALLOW_MULTI)) {
log_err("Attaching prog[%d] to cg:egress", i);
goto err;
}
}
/* make sure that programs are attached and run some traffic */
assert(bpf_prog_query(cg, BPF_CGROUP_INET_EGRESS, 0, &attach_flags,
prog_ids, &prog_cnt) == 0);
assert(system(PING_CMD) == 0);
/* allocate some memory (4Mb) to pin the original cgroup */
ptr = malloc(4 * (1 << 20));
if (!ptr)
goto err;
/* close programs and cgroup fd */
for (i = 0; i < ARRAY_SIZE(allow_prog); i++) {
close(allow_prog[i]);
allow_prog[i] = 0;
}
close(cg);
cg = 0;
/* leave the cgroup and remove it. don't detach programs */
cleanup_cgroup_environment();
/* wait for the asynchronous auto-detachment.
* wait for no more than 5 sec and give up.
*/
for (i = 0; i < ARRAY_SIZE(prog_ids); i++) {
for (attempts = 5; attempts >= 0; attempts--) {
int fd = bpf_prog_get_fd_by_id(prog_ids[i]);
if (fd < 0)
break;
/* don't leave the fd open */
close(fd);
if (!attempts)
goto err;
sleep(1);
}
}
rc = 0;
err:
for (i = 0; i < ARRAY_SIZE(allow_prog); i++)
if (allow_prog[i] > 0)
close(allow_prog[i]);
if (cg)
close(cg);
free(ptr);
cleanup_cgroup_environment();
if (!rc)
printf("#autodetach:PASS\n");
else
printf("#autodetach:FAIL\n");
return rc;
}
int main(void)
{
int (*tests[])(void) = {
test_foo_bar,
test_multiprog,
test_autodetach,
};
int errors = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tests); i++)
if (tests[i]())
errors++;
if (errors)
printf("test_cgroup_attach:FAIL\n");
else
printf("test_cgroup_attach:PASS\n");
return errors ? EXIT_FAILURE : EXIT_SUCCESS;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment