Commit d9d31cf8 authored by Stanislav Fomichev's avatar Stanislav Fomichev Committed by Alexei Starovoitov

bpf: Use bpf_prog_run_array_cg_flags everywhere

Rename bpf_prog_run_array_cg_flags to bpf_prog_run_array_cg and
use it everywhere. check_return_code already enforces sane
return ranges for all cgroup types. (only egress and bind hooks have
uncanonical return ranges, the rest is using [0, 1])

No functional changes.

v2:
- 'func_ret & 1' under explicit test (Andrii & Martin)
Suggested-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarStanislav Fomichev <sdf@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20220425220448.3669032-1-sdf@google.com
parent 246bdfa5
...@@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, ...@@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \ #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \ ({ \
u32 __unused_flags; \
int __ret = 0; \ int __ret = 0; \
if (cgroup_bpf_enabled(atype)) \ if (cgroup_bpf_enabled(atype)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
NULL, \ NULL, NULL); \
&__unused_flags); \
__ret; \ __ret; \
}) })
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \ #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
({ \ ({ \
u32 __unused_flags; \
int __ret = 0; \ int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \ lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
t_ctx, \ t_ctx, NULL); \
&__unused_flags); \
release_sock(sk); \ release_sock(sk); \
} \ } \
__ret; \ __ret; \
......
...@@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key); ...@@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
/* __always_inline is necessary to prevent indirect call through run_prog /* __always_inline is necessary to prevent indirect call through run_prog
* function pointer. * function pointer.
*/ */
static __always_inline int
bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp,
enum cgroup_bpf_attach_type atype,
const void *ctx, bpf_prog_run_fn run_prog,
int retval, u32 *ret_flags)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx;
u32 func_ret;
run_ctx.retval = retval;
migrate_disable();
rcu_read_lock();
array = rcu_dereference(cgrp->effective[atype]);
item = &array->items[0];
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = READ_ONCE(item->prog))) {
run_ctx.prog_item = item;
func_ret = run_prog(prog, ctx);
if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
run_ctx.retval = -EPERM;
*(ret_flags) |= (func_ret >> 1);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
migrate_enable();
return run_ctx.retval;
}
static __always_inline int static __always_inline int
bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp, bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
enum cgroup_bpf_attach_type atype, enum cgroup_bpf_attach_type atype,
const void *ctx, bpf_prog_run_fn run_prog, const void *ctx, bpf_prog_run_fn run_prog,
int retval) int retval, u32 *ret_flags)
{ {
const struct bpf_prog_array_item *item; const struct bpf_prog_array_item *item;
const struct bpf_prog *prog; const struct bpf_prog *prog;
const struct bpf_prog_array *array; const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx; struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx; struct bpf_cg_run_ctx run_ctx;
u32 func_ret;
run_ctx.retval = retval; run_ctx.retval = retval;
migrate_disable(); migrate_disable();
...@@ -78,7 +46,12 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp, ...@@ -78,7 +46,12 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = READ_ONCE(item->prog))) { while ((prog = READ_ONCE(item->prog))) {
run_ctx.prog_item = item; run_ctx.prog_item = item;
if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval)) func_ret = run_prog(prog, ctx);
if (ret_flags) {
*(ret_flags) |= (func_ret >> 1);
func_ret &= 1;
}
if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
run_ctx.retval = -EPERM; run_ctx.retval = -EPERM;
item++; item++;
} }
...@@ -1144,9 +1117,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, ...@@ -1144,9 +1117,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
u32 flags = 0; u32 flags = 0;
bool cn; bool cn;
ret = bpf_prog_run_array_cg_flags( ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
&cgrp->bpf, atype, __bpf_prog_run_save_cb, 0, &flags);
skb, __bpf_prog_run_save_cb, 0, &flags);
/* Return values of CGROUP EGRESS BPF programs are: /* Return values of CGROUP EGRESS BPF programs are:
* 0: drop packet * 0: drop packet
...@@ -1172,7 +1144,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, ...@@ -1172,7 +1144,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
ret = (cn ? NET_XMIT_DROP : ret); ret = (cn ? NET_XMIT_DROP : ret);
} else { } else {
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
skb, __bpf_prog_run_save_cb, 0); skb, __bpf_prog_run_save_cb, 0,
NULL);
if (ret && !IS_ERR_VALUE((long)ret)) if (ret && !IS_ERR_VALUE((long)ret))
ret = -EFAULT; ret = -EFAULT;
} }
...@@ -1202,7 +1175,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, ...@@ -1202,7 +1175,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
{ {
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0); return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
NULL);
} }
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
...@@ -1247,8 +1221,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, ...@@ -1247,8 +1221,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
} }
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return bpf_prog_run_array_cg_flags(&cgrp->bpf, atype, return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
&ctx, bpf_prog_run, 0, flags); 0, flags);
} }
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
...@@ -1275,7 +1249,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, ...@@ -1275,7 +1249,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run, return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
0); 0, NULL);
} }
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
...@@ -1292,7 +1266,8 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, ...@@ -1292,7 +1266,8 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
rcu_read_lock(); rcu_read_lock();
cgrp = task_dfl_cgroup(current); cgrp = task_dfl_cgroup(current);
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0); ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
NULL);
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
...@@ -1457,7 +1432,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, ...@@ -1457,7 +1432,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
rcu_read_lock(); rcu_read_lock();
cgrp = task_dfl_cgroup(current); cgrp = task_dfl_cgroup(current);
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0); ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
NULL);
rcu_read_unlock(); rcu_read_unlock();
kfree(ctx.cur_val); kfree(ctx.cur_val);
...@@ -1550,7 +1526,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, ...@@ -1550,7 +1526,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
lock_sock(sk); lock_sock(sk);
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT, ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
&ctx, bpf_prog_run, 0); &ctx, bpf_prog_run, 0, NULL);
release_sock(sk); release_sock(sk);
if (ret) if (ret)
...@@ -1650,7 +1626,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, ...@@ -1650,7 +1626,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
lock_sock(sk); lock_sock(sk);
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
&ctx, bpf_prog_run, retval); &ctx, bpf_prog_run, retval, NULL);
release_sock(sk); release_sock(sk);
if (ret < 0) if (ret < 0)
...@@ -1699,7 +1675,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, ...@@ -1699,7 +1675,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
*/ */
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
&ctx, bpf_prog_run, retval); &ctx, bpf_prog_run, retval, NULL);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment