Commit 78e60bbb authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov

bpf: selftests: Bpf_cubic and bpf_dctcp calling kernel functions

This patch removes the bpf implementation of tcp_slow_start()
and tcp_cong_avoid_ai().  Instead, it directly uses the kernel
implementation.

It also replaces the bpf_cubic_undo_cwnd implementation by directly
calling tcp_reno_undo_cwnd().  bpf_dctcp also directly calls
tcp_reno_cong_avoid() instead.
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20210325015246.1551062-1-kafai@fb.com
parent 39cd9e0f
...@@ -187,16 +187,6 @@ struct tcp_congestion_ops { ...@@ -187,16 +187,6 @@ struct tcp_congestion_ops {
typeof(y) __y = (y); \ typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
static __always_inline __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked)
{
__u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
acked -= cwnd - tp->snd_cwnd;
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
return acked;
}
static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp) static __always_inline bool tcp_in_slow_start(const struct tcp_sock *tp)
{ {
return tp->snd_cwnd < tp->snd_ssthresh; return tp->snd_cwnd < tp->snd_ssthresh;
...@@ -213,22 +203,7 @@ static __always_inline bool tcp_is_cwnd_limited(const struct sock *sk) ...@@ -213,22 +203,7 @@ static __always_inline bool tcp_is_cwnd_limited(const struct sock *sk)
return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited); return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited);
} }
static __always_inline void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
{ extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
/* If credits accumulated at a higher w, apply them gently now. */
if (tp->snd_cwnd_cnt >= w) {
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd++;
}
tp->snd_cwnd_cnt += acked;
if (tp->snd_cwnd_cnt >= w) {
__u32 delta = tp->snd_cwnd_cnt / w;
tp->snd_cwnd_cnt -= delta * w;
tp->snd_cwnd += delta;
}
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
}
#endif #endif
...@@ -525,11 +525,11 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, ...@@ -525,11 +525,11 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
hystart_update(sk, delay); hystart_update(sk, delay);
} }
extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk) __u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); return tcp_reno_undo_cwnd(sk);
return max(tp->snd_cwnd, tp->prior_cwnd);
} }
SEC(".struct_ops") SEC(".struct_ops")
......
...@@ -194,22 +194,12 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) ...@@ -194,22 +194,12 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
} }
SEC("struct_ops/tcp_reno_cong_avoid") extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
void BPF_PROG(tcp_reno_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk))
return;
/* In "safe" area, increase. */ SEC("struct_ops/dctcp_reno_cong_avoid")
if (tcp_in_slow_start(tp)) { void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
acked = tcp_slow_start(tp, acked); {
if (!acked) tcp_reno_cong_avoid(sk, ack, acked);
return;
}
/* In dangerous area, increase slowly. */
tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
} }
SEC(".struct_ops") SEC(".struct_ops")
...@@ -226,7 +216,7 @@ struct tcp_congestion_ops dctcp = { ...@@ -226,7 +216,7 @@ struct tcp_congestion_ops dctcp = {
.in_ack_event = (void *)dctcp_update_alpha, .in_ack_event = (void *)dctcp_update_alpha,
.cwnd_event = (void *)dctcp_cwnd_event, .cwnd_event = (void *)dctcp_cwnd_event,
.ssthresh = (void *)dctcp_ssthresh, .ssthresh = (void *)dctcp_ssthresh,
.cong_avoid = (void *)tcp_reno_cong_avoid, .cong_avoid = (void *)dctcp_cong_avoid,
.undo_cwnd = (void *)dctcp_cwnd_undo, .undo_cwnd = (void *)dctcp_cwnd_undo,
.set_state = (void *)dctcp_state, .set_state = (void *)dctcp_state,
.flags = TCP_CONG_NEEDS_ECN, .flags = TCP_CONG_NEEDS_ECN,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment