Commit e60402d0 authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller

[TCP]: Move sack_ok access to obviously named funcs & cleanup

Previously code had IsReno/IsFack defined as macros that were
local to tcp_input.c though sack_ok field has user elsewhere too
for the same purpose. This changes them to static inlines as
preferred according the current coding style and unifies the
access to sack_ok across multiple files. Magic bitops of sack_ok
for FACK and DSACK are also abstracted to functions with
appropriate names.

Note:
- One sack_ok = 1 remains but that's self explanary, i.e., it
  enables sack
- Couple of !IsReno cases are changed to tcp_is_sack
- There were no users for IsDSack => I dropped it
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b9c4595b
...@@ -719,6 +719,34 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) ...@@ -719,6 +719,34 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event); icsk->icsk_ca_ops->cwnd_event(sk, event);
} }
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
*
* tcp_is_sack - SACK enabled
* tcp_is_reno - No SACK
* tcp_is_fack - FACK enabled, implies SACK enabled
*/
static inline int tcp_is_sack(const struct tcp_sock *tp)
{
return tp->rx_opt.sack_ok;
}
static inline int tcp_is_reno(const struct tcp_sock *tp)
{
return !tcp_is_sack(tp);
}
static inline int tcp_is_fack(const struct tcp_sock *tp)
{
return tp->rx_opt.sack_ok & 2;
}
static inline void tcp_enable_fack(struct tcp_sock *tp)
{
tp->rx_opt.sack_ok |= 2;
}
static inline unsigned int tcp_left_out(const struct tcp_sock *tp) static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
{ {
return tp->sacked_out + tp->lost_out; return tp->sacked_out + tp->lost_out;
......
...@@ -2014,7 +2014,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -2014,7 +2014,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
if (tp->rx_opt.tstamp_ok) if (tp->rx_opt.tstamp_ok)
info->tcpi_options |= TCPI_OPT_TIMESTAMPS; info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
if (tp->rx_opt.sack_ok) if (tcp_is_sack(tp))
info->tcpi_options |= TCPI_OPT_SACK; info->tcpi_options |= TCPI_OPT_SACK;
if (tp->rx_opt.wscale_ok) { if (tp->rx_opt.wscale_ok) {
info->tcpi_options |= TCPI_OPT_WSCALE; info->tcpi_options |= TCPI_OPT_WSCALE;
......
This diff is collapsed.
...@@ -445,7 +445,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, ...@@ -445,7 +445,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
if (sysctl_tcp_fack) if (sysctl_tcp_fack)
newtp->rx_opt.sack_ok |= 2; tcp_enable_fack(newtp);
} }
newtp->window_clamp = req->window_clamp; newtp->window_clamp = req->window_clamp;
newtp->rcv_ssthresh = req->rcv_wnd; newtp->rcv_ssthresh = req->rcv_wnd;
......
...@@ -737,7 +737,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss ...@@ -737,7 +737,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
if (diff > 0) { if (diff > 0) {
/* Adjust Reno SACK estimate. */ /* Adjust Reno SACK estimate. */
if (!tp->rx_opt.sack_ok) { if (tcp_is_reno(tp)) {
tcp_dec_pcount_approx_int(&tp->sacked_out, diff); tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
tcp_verify_left_out(tp); tcp_verify_left_out(tp);
} }
...@@ -1728,7 +1728,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m ...@@ -1728,7 +1728,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST)
tp->lost_out -= tcp_skb_pcount(next_skb); tp->lost_out -= tcp_skb_pcount(next_skb);
/* Reno case is special. Sigh... */ /* Reno case is special. Sigh... */
if (!tp->rx_opt.sack_ok && tp->sacked_out) if (tcp_is_reno(tp) && tp->sacked_out)
tcp_dec_pcount_approx(&tp->sacked_out, next_skb); tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
/* Not quite right: it can be > snd.fack, but /* Not quite right: it can be > snd.fack, but
...@@ -1976,7 +1976,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -1976,7 +1976,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
return; return;
/* No forward retransmissions in Reno are possible. */ /* No forward retransmissions in Reno are possible. */
if (!tp->rx_opt.sack_ok) if (tcp_is_reno(tp))
return; return;
/* Yeah, we have to make difficult choice between forward transmission /* Yeah, we have to make difficult choice between forward transmission
......
...@@ -315,7 +315,7 @@ static void tcp_retransmit_timer(struct sock *sk) ...@@ -315,7 +315,7 @@ static void tcp_retransmit_timer(struct sock *sk)
if (icsk->icsk_retransmits == 0) { if (icsk->icsk_retransmits == 0) {
if (icsk->icsk_ca_state == TCP_CA_Disorder || if (icsk->icsk_ca_state == TCP_CA_Disorder ||
icsk->icsk_ca_state == TCP_CA_Recovery) { icsk->icsk_ca_state == TCP_CA_Recovery) {
if (tp->rx_opt.sack_ok) { if (tcp_is_sack(tp)) {
if (icsk->icsk_ca_state == TCP_CA_Recovery) if (icsk->icsk_ca_state == TCP_CA_Recovery)
NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment