Commit f10d0596 authored by YiFei Zhu's avatar YiFei Zhu Committed by Alexei Starovoitov

bpf: Make BPF_PROG_RUN_ARRAY return -err instead of allow boolean

Right now BPF_PROG_RUN_ARRAY and related macros return 1 or 0
for whether the prog array allows or rejects whatever is being
hooked. The caller of these macros then return -EPERM or continue
processing based on thw macro's return value. Unforunately this is
inflexible, since -EPERM is the only err that can be returned.

This patch should be a no-op; it prepares for the next patch. The
returning of the -EPERM is moved to inside the macros, so the outer
functions are directly returning what the macros returned if they
are non-zero.
Signed-off-by: default avatarYiFei Zhu <zhuyifei@google.com>
Reviewed-by: default avatarStanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/r/788abcdca55886d1f43274c918eaa9f792a9f33b.1639619851.git.zhuyifei@google.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent d81283d2
...@@ -1277,7 +1277,7 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) ...@@ -1277,7 +1277,7 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
static __always_inline u32 static __always_inline int
BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog, const void *ctx, bpf_prog_run_fn run_prog,
u32 *ret_flags) u32 *ret_flags)
...@@ -1287,7 +1287,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, ...@@ -1287,7 +1287,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
const struct bpf_prog_array *array; const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx; struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx; struct bpf_cg_run_ctx run_ctx;
u32 ret = 1; int ret = 0;
u32 func_ret; u32 func_ret;
migrate_disable(); migrate_disable();
...@@ -1298,7 +1298,8 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, ...@@ -1298,7 +1298,8 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
while ((prog = READ_ONCE(item->prog))) { while ((prog = READ_ONCE(item->prog))) {
run_ctx.prog_item = item; run_ctx.prog_item = item;
func_ret = run_prog(prog, ctx); func_ret = run_prog(prog, ctx);
ret &= (func_ret & 1); if (!(func_ret & 1))
ret = -EPERM;
*(ret_flags) |= (func_ret >> 1); *(ret_flags) |= (func_ret >> 1);
item++; item++;
} }
...@@ -1308,7 +1309,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, ...@@ -1308,7 +1309,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
return ret; return ret;
} }
static __always_inline u32 static __always_inline int
BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog) const void *ctx, bpf_prog_run_fn run_prog)
{ {
...@@ -1317,7 +1318,7 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, ...@@ -1317,7 +1318,7 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
const struct bpf_prog_array *array; const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx; struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx; struct bpf_cg_run_ctx run_ctx;
u32 ret = 1; int ret = 0;
migrate_disable(); migrate_disable();
rcu_read_lock(); rcu_read_lock();
...@@ -1326,7 +1327,8 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, ...@@ -1326,7 +1327,8 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = READ_ONCE(item->prog))) { while ((prog = READ_ONCE(item->prog))) {
run_ctx.prog_item = item; run_ctx.prog_item = item;
ret &= run_prog(prog, ctx); if (!run_prog(prog, ctx))
ret = -EPERM;
item++; item++;
} }
bpf_reset_run_ctx(old_run_ctx); bpf_reset_run_ctx(old_run_ctx);
...@@ -1394,7 +1396,7 @@ BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, ...@@ -1394,7 +1396,7 @@ BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
u32 _ret; \ u32 _ret; \
_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
_cn = _flags & BPF_RET_SET_CN; \ _cn = _flags & BPF_RET_SET_CN; \
if (_ret) \ if (!_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
else \ else \
_ret = (_cn ? NET_XMIT_DROP : -EPERM); \ _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
......
...@@ -1080,7 +1080,6 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, ...@@ -1080,7 +1080,6 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
} else { } else {
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
__bpf_prog_run_save_cb); __bpf_prog_run_save_cb);
ret = (ret == 1 ? 0 : -EPERM);
} }
bpf_restore_data_end(skb, saved_data_end); bpf_restore_data_end(skb, saved_data_end);
__skb_pull(skb, offset); __skb_pull(skb, offset);
...@@ -1107,10 +1106,9 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, ...@@ -1107,10 +1106,9 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
enum cgroup_bpf_attach_type atype) enum cgroup_bpf_attach_type atype)
{ {
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
int ret;
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run); return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk,
return ret == 1 ? 0 : -EPERM; bpf_prog_run);
} }
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
...@@ -1142,7 +1140,6 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, ...@@ -1142,7 +1140,6 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
}; };
struct sockaddr_storage unspec; struct sockaddr_storage unspec;
struct cgroup *cgrp; struct cgroup *cgrp;
int ret;
/* Check socket family since not all sockets represent network /* Check socket family since not all sockets represent network
* endpoint (e.g. AF_UNIX). * endpoint (e.g. AF_UNIX).
...@@ -1156,10 +1153,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, ...@@ -1156,10 +1153,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
} }
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
bpf_prog_run, flags); bpf_prog_run, flags);
return ret == 1 ? 0 : -EPERM;
} }
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
...@@ -1184,11 +1179,9 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, ...@@ -1184,11 +1179,9 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
enum cgroup_bpf_attach_type atype) enum cgroup_bpf_attach_type atype)
{ {
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
int ret;
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops, return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
bpf_prog_run); bpf_prog_run);
return ret == 1 ? 0 : -EPERM;
} }
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
...@@ -1201,15 +1194,15 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, ...@@ -1201,15 +1194,15 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
.major = major, .major = major,
.minor = minor, .minor = minor,
}; };
int allow; int ret;
rcu_read_lock(); rcu_read_lock();
cgrp = task_dfl_cgroup(current); cgrp = task_dfl_cgroup(current);
allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
bpf_prog_run); bpf_prog_run);
rcu_read_unlock(); rcu_read_unlock();
return !allow; return ret;
} }
static const struct bpf_func_proto * static const struct bpf_func_proto *
...@@ -1350,7 +1343,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, ...@@ -1350,7 +1343,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
kfree(ctx.new_val); kfree(ctx.new_val);
} }
return ret == 1 ? 0 : -EPERM; return ret;
} }
#ifdef CONFIG_NET #ifdef CONFIG_NET
...@@ -1455,10 +1448,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, ...@@ -1455,10 +1448,8 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
&ctx, bpf_prog_run); &ctx, bpf_prog_run);
release_sock(sk); release_sock(sk);
if (!ret) { if (ret)
ret = -EPERM;
goto out; goto out;
}
if (ctx.optlen == -1) { if (ctx.optlen == -1) {
/* optlen set to -1, bypass kernel */ /* optlen set to -1, bypass kernel */
...@@ -1565,10 +1556,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, ...@@ -1565,10 +1556,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
&ctx, bpf_prog_run); &ctx, bpf_prog_run);
release_sock(sk); release_sock(sk);
if (!ret) { if (ret)
ret = -EPERM;
goto out; goto out;
}
if (ctx.optlen > max_optlen || ctx.optlen < 0) { if (ctx.optlen > max_optlen || ctx.optlen < 0) {
ret = -EFAULT; ret = -EFAULT;
...@@ -1624,8 +1613,8 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, ...@@ -1624,8 +1613,8 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
&ctx, bpf_prog_run); &ctx, bpf_prog_run);
if (!ret) if (ret)
return -EPERM; return ret;
if (ctx.optlen > *optlen) if (ctx.optlen > *optlen)
return -EFAULT; return -EFAULT;
......
...@@ -838,7 +838,7 @@ int devcgroup_check_permission(short type, u32 major, u32 minor, short access) ...@@ -838,7 +838,7 @@ int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access); int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
if (rc) if (rc)
return -EPERM; return rc;
#ifdef CONFIG_CGROUP_DEVICE #ifdef CONFIG_CGROUP_DEVICE
return devcgroup_legacy_check_permission(type, major, minor, access); return devcgroup_legacy_check_permission(type, major, minor, access);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment