Commit 5d746e6a authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by Arnaldo Carvalho de Melo

[NET] generalize some simple tcp sk_ack_backlog handling routines

Will be used by the poor cousins, starting with LLC.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@conectiva.com.br>
parent fdaea996
......@@ -398,6 +398,21 @@ static inline int sock_flag(struct sock *sk, enum sock_flags flag)
return test_bit(flag, &sk->sk_flags);
}
static inline void sk_acceptq_removed(struct sock *sk)
{
sk->sk_ack_backlog--;
}
static inline void sk_acceptq_added(struct sock *sk)
{
sk->sk_ack_backlog++;
}
static inline int sk_acceptq_is_full(struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
/* The per-socket spinlock must be held here. */
#define sk_add_backlog(__sk, __skb) \
do { if (!(__sk)->sk_backlog.tail) { \
......
......@@ -1794,28 +1794,13 @@ static inline int tcp_full_space( struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf);
}
static inline void tcp_acceptq_removed(struct sock *sk)
{
sk->sk_ack_backlog--;
}
static inline void tcp_acceptq_added(struct sock *sk)
{
sk->sk_ack_backlog++;
}
static inline int tcp_acceptq_is_full(struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
struct sock *child)
{
struct tcp_opt *tp = tcp_sk(sk);
req->sk = child;
tcp_acceptq_added(sk);
sk_acceptq_added(sk);
if (!tp->accept_queue_tail) {
tp->accept_queue = req;
......
......@@ -648,7 +648,7 @@ static void tcp_listen_stop (struct sock *sk)
local_bh_enable();
sock_put(child);
tcp_acceptq_removed(sk);
sk_acceptq_removed(sk);
tcp_openreq_fastfree(req);
}
BUG_TRAP(!sk->sk_ack_backlog);
......@@ -2225,7 +2225,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
tp->accept_queue_tail = NULL;
newsk = req->sk;
tcp_acceptq_removed(sk);
sk_acceptq_removed(sk);
tcp_openreq_fastfree(req);
BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
release_sock(sk);
......
......@@ -1442,7 +1442,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
goto drop;
req = tcp_openreq_alloc();
......@@ -1567,7 +1567,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct tcp_opt *newtp;
struct sock *newsk;
if (tcp_acceptq_is_full(sk))
if (sk_acceptq_is_full(sk))
goto exit_overflow;
if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
......
......@@ -1183,7 +1183,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop;
}
if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
goto drop;
req = tcp_openreq_alloc();
......@@ -1300,7 +1300,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
opt = np->opt;
if (tcp_acceptq_is_full(sk))
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (np->rxopt.bits.srcrt == 2 &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment