Commit b21dd450 authored by Sowmini Varadhan's avatar Sowmini Varadhan Committed by David S. Miller

rds: tcp: Sequence teardown of listen and acceptor sockets to avoid races

Commit a93d01f5 ("RDS: TCP: avoid bad page reference in
rds_tcp_listen_data_ready") added the function
rds_tcp_listen_sock_def_readable()  to handle the case when a
partially set-up acceptor socket drops into rds_tcp_listen_data_ready().
However, if the listen socket (rtn->rds_tcp_listen_sock) is itself going
through a tear-down via rds_tcp_listen_stop(), the (*ready)() will be
null and we would hit a panic  of the form
  BUG: unable to handle kernel NULL pointer dereference at   (null)
  IP:           (null)
   :
  ? rds_tcp_listen_data_ready+0x59/0xb0 [rds_tcp]
  tcp_data_queue+0x39d/0x5b0
  tcp_rcv_established+0x2e5/0x660
  tcp_v4_do_rcv+0x122/0x220
  tcp_v4_rcv+0x8b7/0x980
    :
In the above case, it is not fatal to encounter a NULL value for
ready- we should just drop the packet and let the flush of the
acceptor thread finish gracefully.

In general, the tear-down sequence for listen() and accept() socket
that is ensured by this commit is:
     rtn->rds_tcp_listen_sock = NULL; /* prevent any new accepts */
     In rds_tcp_listen_stop():
         serialize with, and prevent, further callbacks using lock_sock()
         flush rds_wq
         flush acceptor workq
         sock_release(listen socket)
Signed-off-by: default avatarSowmini Varadhan <sowmini.varadhan@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 16c09b1c
...@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net) ...@@ -484,9 +484,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
* we do need to clean up the listen socket here. * we do need to clean up the listen socket here.
*/ */
if (rtn->rds_tcp_listen_sock) { if (rtn->rds_tcp_listen_sock) {
rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); struct socket *lsock = rtn->rds_tcp_listen_sock;
rtn->rds_tcp_listen_sock = NULL; rtn->rds_tcp_listen_sock = NULL;
flush_work(&rtn->rds_tcp_accept_w); rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
} }
} }
...@@ -523,10 +524,10 @@ static void rds_tcp_kill_sock(struct net *net) ...@@ -523,10 +524,10 @@ static void rds_tcp_kill_sock(struct net *net)
struct rds_tcp_connection *tc, *_tc; struct rds_tcp_connection *tc, *_tc;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct socket *lsock = rtn->rds_tcp_listen_sock;
rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
rtn->rds_tcp_listen_sock = NULL; rtn->rds_tcp_listen_sock = NULL;
flush_work(&rtn->rds_tcp_accept_w); rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_irq(&rds_tcp_conn_lock); spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = tc->t_cpath->cp_conn->c_net; struct net *c_net = tc->t_cpath->cp_conn->c_net;
...@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net) ...@@ -546,8 +547,12 @@ static void rds_tcp_kill_sock(struct net *net)
void *rds_tcp_listen_sock_def_readable(struct net *net) void *rds_tcp_listen_sock_def_readable(struct net *net)
{ {
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct socket *lsock = rtn->rds_tcp_listen_sock;
if (!lsock)
return NULL;
return rtn->rds_tcp_listen_sock->sk->sk_user_data; return lsock->sk->sk_user_data;
} }
static int rds_tcp_dev_event(struct notifier_block *this, static int rds_tcp_dev_event(struct notifier_block *this,
......
...@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk); ...@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */ /* tcp_listen.c */
struct socket *rds_tcp_listen_init(struct net *); struct socket *rds_tcp_listen_init(struct net *);
void rds_tcp_listen_stop(struct socket *); void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
void rds_tcp_listen_data_ready(struct sock *sk); void rds_tcp_listen_data_ready(struct sock *sk);
int rds_tcp_accept_one(struct socket *sock); int rds_tcp_accept_one(struct socket *sock);
int rds_tcp_keepalive(struct socket *sock); int rds_tcp_keepalive(struct socket *sock);
......
...@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk) ...@@ -223,6 +223,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
* before it has been accepted and the accepter has set up their * before it has been accepted and the accepter has set up their
* data_ready.. we only want to queue listen work for our listening * data_ready.. we only want to queue listen work for our listening
* socket * socket
*
* (*ready)() may be null if we are racing with netns delete, and
* the listen socket is being torn down.
*/ */
if (sk->sk_state == TCP_LISTEN) if (sk->sk_state == TCP_LISTEN)
rds_tcp_accept_work(sk); rds_tcp_accept_work(sk);
...@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk) ...@@ -231,7 +234,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
out: out:
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
ready(sk); if (ready)
ready(sk);
} }
struct socket *rds_tcp_listen_init(struct net *net) struct socket *rds_tcp_listen_init(struct net *net)
...@@ -271,7 +275,7 @@ struct socket *rds_tcp_listen_init(struct net *net) ...@@ -271,7 +275,7 @@ struct socket *rds_tcp_listen_init(struct net *net)
return NULL; return NULL;
} }
void rds_tcp_listen_stop(struct socket *sock) void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
{ {
struct sock *sk; struct sock *sk;
...@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock) ...@@ -292,5 +296,6 @@ void rds_tcp_listen_stop(struct socket *sock)
/* wait for accepts to stop and close the socket */ /* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq); flush_workqueue(rds_wq);
flush_work(acceptor);
sock_release(sock); sock_release(sock);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment