Commit 38036629 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

rds: tcp: block BH in TCP callbacks

TCP stack can now run from process context.

Use read_lock_bh(&sk->sk_callback_lock) variant to restore previous
assumption.

Fixes: 5413d1ba ("net: do not block BH while processing socket backlog")
Fixes: d41a69f1 ("tcp: make tcp_sendmsg() aware of socket backlog")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e1daca28
...@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk) ...@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
struct rds_connection *conn; struct rds_connection *conn;
struct rds_tcp_connection *tc; struct rds_tcp_connection *tc;
read_lock(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data; conn = sk->sk_user_data;
if (!conn) { if (!conn) {
state_change = sk->sk_state_change; state_change = sk->sk_state_change;
...@@ -69,7 +69,7 @@ void rds_tcp_state_change(struct sock *sk) ...@@ -69,7 +69,7 @@ void rds_tcp_state_change(struct sock *sk)
break; break;
} }
out: out:
read_unlock(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
state_change(sk); state_change(sk);
} }
......
...@@ -166,7 +166,7 @@ void rds_tcp_listen_data_ready(struct sock *sk) ...@@ -166,7 +166,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
rdsdebug("listen data ready sk %p\n", sk); rdsdebug("listen data ready sk %p\n", sk);
read_lock(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data; ready = sk->sk_user_data;
if (!ready) { /* check for teardown race */ if (!ready) { /* check for teardown race */
ready = sk->sk_data_ready; ready = sk->sk_data_ready;
...@@ -183,7 +183,7 @@ void rds_tcp_listen_data_ready(struct sock *sk) ...@@ -183,7 +183,7 @@ void rds_tcp_listen_data_ready(struct sock *sk)
rds_tcp_accept_work(sk); rds_tcp_accept_work(sk);
out: out:
read_unlock(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
ready(sk); ready(sk);
} }
......
...@@ -301,7 +301,7 @@ void rds_tcp_data_ready(struct sock *sk) ...@@ -301,7 +301,7 @@ void rds_tcp_data_ready(struct sock *sk)
rdsdebug("data ready sk %p\n", sk); rdsdebug("data ready sk %p\n", sk);
read_lock(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data; conn = sk->sk_user_data;
if (!conn) { /* check for teardown race */ if (!conn) { /* check for teardown race */
ready = sk->sk_data_ready; ready = sk->sk_data_ready;
...@@ -315,7 +315,7 @@ void rds_tcp_data_ready(struct sock *sk) ...@@ -315,7 +315,7 @@ void rds_tcp_data_ready(struct sock *sk)
if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM) if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
queue_delayed_work(rds_wq, &conn->c_recv_w, 0); queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out: out:
read_unlock(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
ready(sk); ready(sk);
} }
......
...@@ -180,7 +180,7 @@ void rds_tcp_write_space(struct sock *sk) ...@@ -180,7 +180,7 @@ void rds_tcp_write_space(struct sock *sk)
struct rds_connection *conn; struct rds_connection *conn;
struct rds_tcp_connection *tc; struct rds_tcp_connection *tc;
read_lock(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data; conn = sk->sk_user_data;
if (!conn) { if (!conn) {
write_space = sk->sk_write_space; write_space = sk->sk_write_space;
...@@ -200,7 +200,7 @@ void rds_tcp_write_space(struct sock *sk) ...@@ -200,7 +200,7 @@ void rds_tcp_write_space(struct sock *sk)
queue_delayed_work(rds_wq, &conn->c_send_w, 0); queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out: out:
read_unlock(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
/* /*
* write_space is only called when data leaves tcp's send queue if * write_space is only called when data leaves tcp's send queue if
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment