Commit cabeccbd authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller

tcp: kill eff_sacks "cache", the sole user can calculate itself

Also fixes insignificant bug that would cause sending of stale
SACK block (would occur in some corner cases).
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 758ce5c8
......@@ -218,7 +218,6 @@ struct tcp_options_received {
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
/* SACKs data */
u8 eff_sacks; /* Size of SACK array to send with next packet */
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
......
......@@ -926,7 +926,6 @@ extern void tcp_done(struct sock *sk);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
rx_opt->dsack = 0;
rx_opt->eff_sacks = 0;
rx_opt->num_sacks = 0;
}
......
......@@ -4099,7 +4099,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
tp->rx_opt.dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq;
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
}
}
......@@ -4154,8 +4153,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
* Decrease num_sacks.
*/
tp->rx_opt.num_sacks--;
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
tp->rx_opt.dsack;
for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
sp[i] = sp[i + 1];
continue;
......@@ -4218,7 +4215,6 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
sp->start_seq = seq;
sp->end_seq = end_seq;
tp->rx_opt.num_sacks++;
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
}
/* RCV.NXT advances, some SACKs should be eaten. */
......@@ -4232,7 +4228,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
if (skb_queue_empty(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
return;
}
......@@ -4253,11 +4248,8 @@ static void tcp_sack_remove(struct tcp_sock *tp)
this_sack++;
sp++;
}
if (num_sacks != tp->rx_opt.num_sacks) {
if (num_sacks != tp->rx_opt.num_sacks)
tp->rx_opt.num_sacks = num_sacks;
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
tp->rx_opt.dsack;
}
}
/* This one checks to see if we can put data from the
......@@ -4333,10 +4325,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
TCP_ECN_accept_cwr(tp, skb);
if (tp->rx_opt.dsack) {
if (tp->rx_opt.dsack)
tp->rx_opt.dsack = 0;
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
}
/* Queue data for delivery to the user.
* Packets in sequence go to the receive queue.
......@@ -4456,7 +4446,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (tcp_is_sack(tp)) {
tp->rx_opt.num_sacks = 1;
tp->rx_opt.dsack = 0;
tp->rx_opt.eff_sacks = 1;
tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
tp->selective_acks[0].end_seq =
TCP_SKB_CB(skb)->end_seq;
......
......@@ -434,9 +434,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.saw_tstamp = 0;
newtp->rx_opt.dsack = 0;
newtp->rx_opt.eff_sacks = 0;
newtp->rx_opt.num_sacks = 0;
newtp->urg_data = 0;
if (sock_flag(newsk, SOCK_KEEPOPEN))
......
......@@ -441,10 +441,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
*ptr++ = htonl(sp[this_sack].end_seq);
}
if (tp->rx_opt.dsack) {
if (tp->rx_opt.dsack)
tp->rx_opt.dsack = 0;
tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
}
}
}
......@@ -550,6 +548,7 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
unsigned size = 0;
unsigned int eff_sacks;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
......@@ -568,10 +567,11 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
size += TCPOLEN_TSTAMP_ALIGNED;
}
if (unlikely(tp->rx_opt.eff_sacks)) {
eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
if (unlikely(eff_sacks)) {
const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
opts->num_sack_blocks =
min_t(unsigned, tp->rx_opt.eff_sacks,
min_t(unsigned, eff_sacks,
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
TCPOLEN_SACK_PERBLOCK);
size += TCPOLEN_SACK_BASE_ALIGNED +
......@@ -1418,7 +1418,7 @@ static int tcp_mtu_probe(struct sock *sk)
icsk->icsk_mtup.probe_size ||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
tp->snd_cwnd < 11 ||
tp->rx_opt.eff_sacks)
tp->rx_opt.num_sacks || tp->rx_opt.dsack)
return -1;
/* Very simple search strategy: just double the MSS. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment