Commit 4389dded authored by Adam Langley's avatar Adam Langley Committed by David S. Miller

tcp: Remove redundant checks when setting eff_sacks

Remove redundant checks when setting eff_sacks and make the number of SACKs a
compile time constant. Now that the options code knows how many SACK blocks can
fit in the header, we don't need to have the SACK code guessing at it.
Signed-off-by: default avatarAdam Langley <agl@imperialviolet.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 33ad798c
...@@ -224,6 +224,12 @@ struct tcp_options_received { ...@@ -224,6 +224,12 @@ struct tcp_options_received {
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
}; };
/* This is the max number of SACKS that we'll generate and process. It's safe
* to increse this, although since:
* size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8)
* only four options will fit in a standard TCP header */
#define TCP_NUM_SACKS 4
struct tcp_request_sock { struct tcp_request_sock {
struct inet_request_sock req; struct inet_request_sock req;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
......
...@@ -1423,10 +1423,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, ...@@ -1423,10 +1423,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
unsigned char *ptr = (skb_transport_header(ack_skb) + unsigned char *ptr = (skb_transport_header(ack_skb) +
TCP_SKB_CB(ack_skb)->sacked); TCP_SKB_CB(ack_skb)->sacked);
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[4]; struct tcp_sack_block sp[TCP_NUM_SACKS];
struct tcp_sack_block *cache; struct tcp_sack_block *cache;
struct sk_buff *skb; struct sk_buff *skb;
int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE) >> 3; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
int used_sacks; int used_sacks;
int reord = tp->packets_out; int reord = tp->packets_out;
int flag = 0; int flag = 0;
...@@ -3735,8 +3735,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) ...@@ -3735,8 +3735,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
tp->rx_opt.dsack = 1; tp->rx_opt.dsack = 1;
tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq; tp->duplicate_sack[0].end_seq = end_seq;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
4 - tp->rx_opt.tstamp_ok);
} }
} }
...@@ -3791,9 +3790,8 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) ...@@ -3791,9 +3790,8 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
* Decrease num_sacks. * Decrease num_sacks.
*/ */
tp->rx_opt.num_sacks--; tp->rx_opt.num_sacks--;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
tp->rx_opt.dsack, tp->rx_opt.dsack;
4 - tp->rx_opt.tstamp_ok);
for (i = this_sack; i < tp->rx_opt.num_sacks; i++) for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
sp[i] = sp[i + 1]; sp[i] = sp[i + 1];
continue; continue;
...@@ -3843,7 +3841,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) ...@@ -3843,7 +3841,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
* *
* If the sack array is full, forget about the last one. * If the sack array is full, forget about the last one.
*/ */
if (this_sack >= 4) { if (this_sack >= TCP_NUM_SACKS) {
this_sack--; this_sack--;
tp->rx_opt.num_sacks--; tp->rx_opt.num_sacks--;
sp--; sp--;
...@@ -3856,8 +3854,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) ...@@ -3856,8 +3854,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
sp->start_seq = seq; sp->start_seq = seq;
sp->end_seq = end_seq; sp->end_seq = end_seq;
tp->rx_opt.num_sacks++; tp->rx_opt.num_sacks++;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
4 - tp->rx_opt.tstamp_ok);
} }
/* RCV.NXT advances, some SACKs should be eaten. */ /* RCV.NXT advances, some SACKs should be eaten. */
...@@ -3894,9 +3891,8 @@ static void tcp_sack_remove(struct tcp_sock *tp) ...@@ -3894,9 +3891,8 @@ static void tcp_sack_remove(struct tcp_sock *tp)
} }
if (num_sacks != tp->rx_opt.num_sacks) { if (num_sacks != tp->rx_opt.num_sacks) {
tp->rx_opt.num_sacks = num_sacks; tp->rx_opt.num_sacks = num_sacks;
tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
tp->rx_opt.dsack, tp->rx_opt.dsack;
4 - tp->rx_opt.tstamp_ok);
} }
} }
...@@ -3975,8 +3971,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3975,8 +3971,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (tp->rx_opt.dsack) { if (tp->rx_opt.dsack) {
tp->rx_opt.dsack = 0; tp->rx_opt.dsack = 0;
tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
4 - tp->rx_opt.tstamp_ok);
} }
/* Queue data for delivery to the user. /* Queue data for delivery to the user.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment