Commit 0a5578cf authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by David S. Miller

[ICSK]: Generalise tcp_listen_{start,stop}

This also moved inet_iif from tcp to inet_hashtables.h, as it is
needed by the inet_lookup callers, perhaps this needs a bit of
polishing, but for now seems fine.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9f1d2604
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <net/inet_connection_sock.h> #include <net/inet_connection_sock.h>
#include <net/route.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/tcp_states.h> #include <net/tcp_states.h>
...@@ -280,6 +281,11 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk) ...@@ -280,6 +281,11 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
wake_up(&hashinfo->lhash_wait); wake_up(&hashinfo->lhash_wait);
} }
static inline int inet_iif(const struct sk_buff *skb)
{
return ((struct rtable *)skb->dst)->rt_iif;
}
extern struct sock *__inet_lookup_listener(const struct hlist_head *head, extern struct sock *__inet_lookup_listener(const struct hlist_head *head,
const u32 daddr, const u32 daddr,
const unsigned short hnum, const unsigned short hnum,
......
...@@ -558,6 +558,7 @@ struct proto { ...@@ -558,6 +558,7 @@ struct proto {
kmem_cache_t *twsk_slab; kmem_cache_t *twsk_slab;
unsigned int twsk_obj_size; unsigned int twsk_obj_size;
atomic_t *orphan_count;
struct request_sock_ops *rsk_prot; struct request_sock_ops *rsk_prot;
......
...@@ -860,7 +860,7 @@ static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) ...@@ -860,7 +860,7 @@ static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
tp->snd_wl1 = seq; tp->snd_wl1 = seq;
} }
extern void tcp_destroy_sock(struct sock *sk); extern void inet_csk_destroy_sock(struct sock *sk);
/* /*
...@@ -987,7 +987,7 @@ static __inline__ void tcp_done(struct sock *sk) ...@@ -987,7 +987,7 @@ static __inline__ void tcp_done(struct sock *sk)
if (!sock_flag(sk, SOCK_DEAD)) if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk); sk->sk_state_change(sk);
else else
tcp_destroy_sock(sk); inet_csk_destroy_sock(sk);
} }
static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt) static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
......
...@@ -202,7 +202,7 @@ int inet_listen(struct socket *sock, int backlog) ...@@ -202,7 +202,7 @@ int inet_listen(struct socket *sock, int backlog)
* we can only allow the backlog to be adjusted. * we can only allow the backlog to be adjusted.
*/ */
if (old_state != TCP_LISTEN) { if (old_state != TCP_LISTEN) {
err = tcp_listen_start(sk); err = inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
if (err) if (err)
goto out; goto out;
} }
......
...@@ -273,6 +273,8 @@ DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); ...@@ -273,6 +273,8 @@ DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
atomic_t tcp_orphan_count = ATOMIC_INIT(0); atomic_t tcp_orphan_count = ATOMIC_INIT(0);
EXPORT_SYMBOL_GPL(tcp_orphan_count);
int sysctl_tcp_mem[3]; int sysctl_tcp_mem[3];
int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
...@@ -454,12 +456,11 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) ...@@ -454,12 +456,11 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
return put_user(answ, (int __user *)arg); return put_user(answ, (int __user *)arg);
} }
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
int tcp_listen_start(struct sock *sk)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE); int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
if (rc != 0) if (rc != 0)
return rc; return rc;
...@@ -488,12 +489,13 @@ int tcp_listen_start(struct sock *sk) ...@@ -488,12 +489,13 @@ int tcp_listen_start(struct sock *sk)
return -EADDRINUSE; return -EADDRINUSE;
} }
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
/* /*
* This routine closes sockets which have been at least partially * This routine closes sockets which have been at least partially
* opened, but not yet accepted. * opened, but not yet accepted.
*/ */
static void inet_csk_listen_stop(struct sock *sk)
static void tcp_listen_stop (struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock *acc_req; struct request_sock *acc_req;
...@@ -524,13 +526,13 @@ static void tcp_listen_stop (struct sock *sk) ...@@ -524,13 +526,13 @@ static void tcp_listen_stop (struct sock *sk)
BUG_TRAP(!sock_owned_by_user(child)); BUG_TRAP(!sock_owned_by_user(child));
sock_hold(child); sock_hold(child);
tcp_disconnect(child, O_NONBLOCK); sk->sk_prot->disconnect(child, O_NONBLOCK);
sock_orphan(child); sock_orphan(child);
atomic_inc(&tcp_orphan_count); atomic_inc(sk->sk_prot->orphan_count);
tcp_destroy_sock(child); inet_csk_destroy_sock(child);
bh_unlock_sock(child); bh_unlock_sock(child);
local_bh_enable(); local_bh_enable();
...@@ -542,6 +544,8 @@ static void tcp_listen_stop (struct sock *sk) ...@@ -542,6 +544,8 @@ static void tcp_listen_stop (struct sock *sk)
BUG_TRAP(!sk->sk_ack_backlog); BUG_TRAP(!sk->sk_ack_backlog);
} }
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{ {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
...@@ -1561,7 +1565,7 @@ void tcp_shutdown(struct sock *sk, int how) ...@@ -1561,7 +1565,7 @@ void tcp_shutdown(struct sock *sk, int how)
* can assume the socket waitqueue is inactive and nobody will * can assume the socket waitqueue is inactive and nobody will
* try to jump onto it. * try to jump onto it.
*/ */
void tcp_destroy_sock(struct sock *sk) void inet_csk_destroy_sock(struct sock *sk)
{ {
BUG_TRAP(sk->sk_state == TCP_CLOSE); BUG_TRAP(sk->sk_state == TCP_CLOSE);
BUG_TRAP(sock_flag(sk, SOCK_DEAD)); BUG_TRAP(sock_flag(sk, SOCK_DEAD));
...@@ -1580,7 +1584,7 @@ void tcp_destroy_sock(struct sock *sk) ...@@ -1580,7 +1584,7 @@ void tcp_destroy_sock(struct sock *sk)
sk_refcnt_debug_release(sk); sk_refcnt_debug_release(sk);
atomic_dec(&tcp_orphan_count); atomic_dec(sk->sk_prot->orphan_count);
sock_put(sk); sock_put(sk);
} }
...@@ -1596,7 +1600,7 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -1596,7 +1600,7 @@ void tcp_close(struct sock *sk, long timeout)
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
/* Special case. */ /* Special case. */
tcp_listen_stop(sk); inet_csk_listen_stop(sk);
goto adjudge_to_death; goto adjudge_to_death;
} }
...@@ -1704,7 +1708,7 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -1704,7 +1708,7 @@ void tcp_close(struct sock *sk, long timeout)
if (tmo > TCP_TIMEWAIT_LEN) { if (tmo > TCP_TIMEWAIT_LEN) {
inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
} else { } else {
atomic_inc(&tcp_orphan_count); atomic_inc(sk->sk_prot->orphan_count);
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto out; goto out;
} }
...@@ -1712,7 +1716,7 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -1712,7 +1716,7 @@ void tcp_close(struct sock *sk, long timeout)
} }
if (sk->sk_state != TCP_CLOSE) { if (sk->sk_state != TCP_CLOSE) {
sk_stream_mem_reclaim(sk); sk_stream_mem_reclaim(sk);
if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans || if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
(sk->sk_wmem_queued > SOCK_MIN_SNDBUF && (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
if (net_ratelimit()) if (net_ratelimit())
...@@ -1723,10 +1727,10 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -1723,10 +1727,10 @@ void tcp_close(struct sock *sk, long timeout)
NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
} }
} }
atomic_inc(&tcp_orphan_count); atomic_inc(sk->sk_prot->orphan_count);
if (sk->sk_state == TCP_CLOSE) if (sk->sk_state == TCP_CLOSE)
tcp_destroy_sock(sk); inet_csk_destroy_sock(sk);
/* Otherwise, socket is reprieved until protocol close. */ /* Otherwise, socket is reprieved until protocol close. */
out: out:
...@@ -1757,7 +1761,7 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -1757,7 +1761,7 @@ int tcp_disconnect(struct sock *sk, int flags)
/* ABORT function of RFC793 */ /* ABORT function of RFC793 */
if (old_state == TCP_LISTEN) { if (old_state == TCP_LISTEN) {
tcp_listen_stop(sk); inet_csk_listen_stop(sk);
} else if (tcp_need_reset(old_state) || } else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq && (tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
...@@ -2253,7 +2257,7 @@ void __init tcp_init(void) ...@@ -2253,7 +2257,7 @@ void __init tcp_init(void)
} }
EXPORT_SYMBOL(tcp_close); EXPORT_SYMBOL(tcp_close);
EXPORT_SYMBOL(tcp_destroy_sock); EXPORT_SYMBOL(inet_csk_destroy_sock);
EXPORT_SYMBOL(tcp_disconnect); EXPORT_SYMBOL(tcp_disconnect);
EXPORT_SYMBOL(tcp_getsockopt); EXPORT_SYMBOL(tcp_getsockopt);
EXPORT_SYMBOL(tcp_ioctl); EXPORT_SYMBOL(tcp_ioctl);
......
...@@ -431,11 +431,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -431,11 +431,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
return err; return err;
} }
static inline int inet_iif(const struct sk_buff *skb)
{
return ((struct rtable *)skb->dst)->rt_iif;
}
/* /*
* This routine does path mtu discovery as defined in RFC1191. * This routine does path mtu discovery as defined in RFC1191.
*/ */
...@@ -1993,6 +1988,7 @@ struct proto tcp_prot = { ...@@ -1993,6 +1988,7 @@ struct proto tcp_prot = {
.get_port = tcp_v4_get_port, .get_port = tcp_v4_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure, .enter_memory_pressure = tcp_enter_memory_pressure,
.sockets_allocated = &tcp_sockets_allocated, .sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated, .memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure, .memory_pressure = &tcp_memory_pressure,
.sysctl_mem = sysctl_tcp_mem, .sysctl_mem = sysctl_tcp_mem,
......
...@@ -2248,6 +2248,7 @@ struct proto tcpv6_prot = { ...@@ -2248,6 +2248,7 @@ struct proto tcpv6_prot = {
.sockets_allocated = &tcp_sockets_allocated, .sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated, .memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure, .memory_pressure = &tcp_memory_pressure,
.orphan_count = &tcp_orphan_count,
.sysctl_mem = sysctl_tcp_mem, .sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem, .sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem, .sysctl_rmem = sysctl_tcp_rmem,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment