Commit b79c9fc9 authored by Stanislav Fomichev's avatar Stanislav Fomichev Committed by Alexei Starovoitov

bpf: implement BPF_PROG_QUERY for BPF_LSM_CGROUP

We have two options:
1. Treat all BPF_LSM_CGROUP the same, regardless of attach_btf_id
2. Treat BPF_LSM_CGROUP+attach_btf_id as a separate hook point

I was doing (2) in the original patch, but switching to (1) here:

* bpf_prog_query returns all attached BPF_LSM_CGROUP programs
regardless of attach_btf_id
* attach_btf_id is exported via bpf_prog_info
Reviewed-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarStanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/r/20220628174314.1216643-6-sdf@google.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent c0e19f2c
...@@ -1432,6 +1432,7 @@ union bpf_attr { ...@@ -1432,6 +1432,7 @@ union bpf_attr {
__u32 attach_flags; __u32 attach_flags;
__aligned_u64 prog_ids; __aligned_u64 prog_ids;
__u32 prog_cnt; __u32 prog_cnt;
__aligned_u64 prog_attach_flags; /* output: per-program attach_flags */
} query; } query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
...@@ -6076,6 +6077,8 @@ struct bpf_prog_info { ...@@ -6076,6 +6077,8 @@ struct bpf_prog_info {
__u64 run_cnt; __u64 run_cnt;
__u64 recursion_misses; __u64 recursion_misses;
__u32 verified_insns; __u32 verified_insns;
__u32 attach_btf_obj_id;
__u32 attach_btf_id;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
struct bpf_map_info { struct bpf_map_info {
......
...@@ -1017,57 +1017,90 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, ...@@ -1017,57 +1017,90 @@ static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
{ {
__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
enum bpf_attach_type type = attr->query.attach_type; enum bpf_attach_type type = attr->query.attach_type;
enum cgroup_bpf_attach_type from_atype, to_atype;
enum cgroup_bpf_attach_type atype; enum cgroup_bpf_attach_type atype;
struct bpf_prog_array *effective; struct bpf_prog_array *effective;
struct hlist_head *progs;
struct bpf_prog *prog;
int cnt, ret = 0, i; int cnt, ret = 0, i;
int total_cnt = 0;
u32 flags; u32 flags;
atype = to_cgroup_bpf_attach_type(type); if (type == BPF_LSM_CGROUP) {
if (atype < 0) if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
return -EINVAL; return -EINVAL;
progs = &cgrp->bpf.progs[atype];
flags = cgrp->bpf.flags[atype];
effective = rcu_dereference_protected(cgrp->bpf.effective[atype], from_atype = CGROUP_LSM_START;
lockdep_is_held(&cgroup_mutex)); to_atype = CGROUP_LSM_END;
flags = 0;
} else {
from_atype = to_cgroup_bpf_attach_type(type);
if (from_atype < 0)
return -EINVAL;
to_atype = from_atype;
flags = cgrp->bpf.flags[from_atype];
}
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) for (atype = from_atype; atype <= to_atype; atype++) {
cnt = bpf_prog_array_length(effective); if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
else effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
cnt = prog_list_length(progs); lockdep_is_held(&cgroup_mutex));
total_cnt += bpf_prog_array_length(effective);
} else {
total_cnt += prog_list_length(&cgrp->bpf.progs[atype]);
}
}
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
return -EFAULT; return -EFAULT;
if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
return -EFAULT; return -EFAULT;
if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) if (attr->query.prog_cnt == 0 || !prog_ids || !total_cnt)
/* return early if user requested only program count + flags */ /* return early if user requested only program count + flags */
return 0; return 0;
if (attr->query.prog_cnt < cnt) {
cnt = attr->query.prog_cnt; if (attr->query.prog_cnt < total_cnt) {
total_cnt = attr->query.prog_cnt;
ret = -ENOSPC; ret = -ENOSPC;
} }
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
} else { effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
struct bpf_prog_list *pl; lockdep_is_held(&cgroup_mutex));
u32 id; cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
ret = bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
} else {
struct hlist_head *progs;
struct bpf_prog_list *pl;
struct bpf_prog *prog;
u32 id;
progs = &cgrp->bpf.progs[atype];
cnt = min_t(int, prog_list_length(progs), total_cnt);
i = 0;
hlist_for_each_entry(pl, progs, node) {
prog = prog_list_prog(pl);
id = prog->aux->id;
if (copy_to_user(prog_ids + i, &id, sizeof(id)))
return -EFAULT;
if (++i == cnt)
break;
}
}
i = 0; if (prog_attach_flags) {
hlist_for_each_entry(pl, progs, node) { flags = cgrp->bpf.flags[atype];
prog = prog_list_prog(pl);
id = prog->aux->id; for (i = 0; i < cnt; i++)
if (copy_to_user(prog_ids + i, &id, sizeof(id))) if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
return -EFAULT; return -EFAULT;
if (++i == cnt) prog_attach_flags += cnt;
break;
} }
prog_ids += cnt;
total_cnt -= cnt;
} }
return ret; return ret;
} }
......
...@@ -3520,7 +3520,7 @@ static int bpf_prog_detach(const union bpf_attr *attr) ...@@ -3520,7 +3520,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
} }
} }
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt #define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags
static int bpf_prog_query(const union bpf_attr *attr, static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
...@@ -3556,6 +3556,7 @@ static int bpf_prog_query(const union bpf_attr *attr, ...@@ -3556,6 +3556,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_SYSCTL: case BPF_CGROUP_SYSCTL:
case BPF_CGROUP_GETSOCKOPT: case BPF_CGROUP_GETSOCKOPT:
case BPF_CGROUP_SETSOCKOPT: case BPF_CGROUP_SETSOCKOPT:
case BPF_LSM_CGROUP:
return cgroup_bpf_prog_query(attr, uattr); return cgroup_bpf_prog_query(attr, uattr);
case BPF_LIRC_MODE2: case BPF_LIRC_MODE2:
return lirc_prog_query(attr, uattr); return lirc_prog_query(attr, uattr);
...@@ -4066,6 +4067,11 @@ static int bpf_prog_get_info_by_fd(struct file *file, ...@@ -4066,6 +4067,11 @@ static int bpf_prog_get_info_by_fd(struct file *file,
if (prog->aux->btf) if (prog->aux->btf)
info.btf_id = btf_obj_id(prog->aux->btf); info.btf_id = btf_obj_id(prog->aux->btf);
info.attach_btf_id = prog->aux->attach_btf_id;
if (prog->aux->attach_btf)
info.attach_btf_obj_id = btf_obj_id(prog->aux->attach_btf);
else if (prog->aux->dst_prog)
info.attach_btf_obj_id = btf_obj_id(prog->aux->dst_prog->aux->attach_btf);
ulen = info.nr_func_info; ulen = info.nr_func_info;
info.nr_func_info = prog->aux->func_info_cnt; info.nr_func_info = prog->aux->func_info_cnt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment