Commit 6b88af83 authored by Dust Li's avatar Dust Li Committed by David S. Miller

net/smc: don't send in the BH context if sock_owned_by_user

Send data all the way down to the RDMA device is a time
consuming operation(get a new slot, maybe do RDMA Write
and send a CDC, etc). Moving those operations from BH
to user context is good for performance.

If the sock_lock is hold by user, we don't try to send
data out in the BH context, but just mark we should
send. Since the user will release the sock_lock soon, we
can do the sending there.

Add smc_release_cb() which will be called in release_sock()
and try send in the callback if needed.

This patch moves the sending part out from BH if sock lock
is hold by user. In my testing environment, this saves about
20% softirq in the qperf 4K tcp_bw test in the sender side
with no noticeable throughput drop.
Signed-off-by: default avatarDust Li <dust.li@linux.alibaba.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a505cce6
...@@ -193,12 +193,27 @@ void smc_unhash_sk(struct sock *sk) ...@@ -193,12 +193,27 @@ void smc_unhash_sk(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(smc_unhash_sk); EXPORT_SYMBOL_GPL(smc_unhash_sk);
/* This will be called before user really release sock_lock. So do the
* work which we didn't do because of user hold the sock_lock in the
* BH context
*/
static void smc_release_cb(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
if (smc->conn.tx_in_release_sock) {
smc_tx_pending(&smc->conn);
smc->conn.tx_in_release_sock = false;
}
}
struct proto smc_proto = { struct proto smc_proto = {
.name = "SMC", .name = "SMC",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.keepalive = smc_set_keepalive, .keepalive = smc_set_keepalive,
.hash = smc_hash_sk, .hash = smc_hash_sk,
.unhash = smc_unhash_sk, .unhash = smc_unhash_sk,
.release_cb = smc_release_cb,
.obj_size = sizeof(struct smc_sock), .obj_size = sizeof(struct smc_sock),
.h.smc_hash = &smc_v4_hashinfo, .h.smc_hash = &smc_v4_hashinfo,
.slab_flags = SLAB_TYPESAFE_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
...@@ -211,6 +226,7 @@ struct proto smc_proto6 = { ...@@ -211,6 +226,7 @@ struct proto smc_proto6 = {
.keepalive = smc_set_keepalive, .keepalive = smc_set_keepalive,
.hash = smc_hash_sk, .hash = smc_hash_sk,
.unhash = smc_unhash_sk, .unhash = smc_unhash_sk,
.release_cb = smc_release_cb,
.obj_size = sizeof(struct smc_sock), .obj_size = sizeof(struct smc_sock),
.h.smc_hash = &smc_v6_hashinfo, .h.smc_hash = &smc_v6_hashinfo,
.slab_flags = SLAB_TYPESAFE_BY_RCU, .slab_flags = SLAB_TYPESAFE_BY_RCU,
......
...@@ -213,6 +213,10 @@ struct smc_connection { ...@@ -213,6 +213,10 @@ struct smc_connection {
* data still pending * data still pending
*/ */
char urg_rx_byte; /* urgent byte */ char urg_rx_byte; /* urgent byte */
bool tx_in_release_sock;
/* flush pending tx data in
* sock release_cb()
*/
atomic_t bytes_to_rcv; /* arrived data, atomic_t bytes_to_rcv; /* arrived data,
* not yet received * not yet received
*/ */
......
...@@ -49,10 +49,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, ...@@ -49,10 +49,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
} }
if (atomic_dec_and_test(&conn->cdc_pend_tx_wr)) { if (atomic_dec_and_test(&conn->cdc_pend_tx_wr)) {
/* If this is the last pending WR complete, we must push to /* If user owns the sock_lock, mark the connection need sending.
* prevent hang when autocork enabled. * User context will later try to send when it release sock_lock
* in smc_release_cb()
*/ */
smc_tx_sndbuf_nonempty(conn); if (sock_owned_by_user(&smc->sk))
conn->tx_in_release_sock = true;
else
smc_tx_pending(conn);
if (unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq))) if (unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
wake_up(&conn->cdc_pend_tx_wq); wake_up(&conn->cdc_pend_tx_wq);
} }
...@@ -355,8 +360,12 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, ...@@ -355,8 +360,12 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
if ((diff_cons && smc_tx_prepared_sends(conn)) || if ((diff_cons && smc_tx_prepared_sends(conn)) ||
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
conn->local_rx_ctrl.prod_flags.urg_data_pending) conn->local_rx_ctrl.prod_flags.urg_data_pending) {
smc_tx_sndbuf_nonempty(conn); if (!sock_owned_by_user(&smc->sk))
smc_tx_pending(conn);
else
conn->tx_in_release_sock = true;
}
if (diff_cons && conn->urg_tx_pend && if (diff_cons && conn->urg_tx_pend &&
atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) { atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment