Commit 9e29db0e authored by Chris Mason's avatar Chris Mason Committed by Andy Grover

RDS: Use a generation counter to avoid rds_send_xmit loop

rds_send_xmit is required to loop around after it releases the lock
because someone else could done a trylock, found someone working on the
list and backed off.

But, once we drop our lock, it is possible that someone else does come
in and make progress on the list.  We should detect this and not loop
around if another process is actually working on the list.

This patch adds a generation counter that is bumped every time we
get the lock and do some send work.  If the retry notices someone else
has bumped the generation counter, it does not need to loop around and
continue working.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
Signed-off-by: default avatarAndy Grover <andy.grover@oracle.com>
parent acfcd4d4
...@@ -147,6 +147,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, ...@@ -147,6 +147,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
conn->c_next_tx_seq = 1; conn->c_next_tx_seq = 1;
spin_lock_init(&conn->c_send_lock); spin_lock_init(&conn->c_send_lock);
atomic_set(&conn->c_send_generation, 1);
INIT_LIST_HEAD(&conn->c_send_queue); INIT_LIST_HEAD(&conn->c_send_queue);
INIT_LIST_HEAD(&conn->c_retrans); INIT_LIST_HEAD(&conn->c_retrans);
......
...@@ -92,6 +92,7 @@ struct rds_connection { ...@@ -92,6 +92,7 @@ struct rds_connection {
struct rds_cong_map *c_fcong; struct rds_cong_map *c_fcong;
spinlock_t c_send_lock; /* protect send ring */ spinlock_t c_send_lock; /* protect send ring */
atomic_t c_send_generation;
struct rds_message *c_xmit_rm; struct rds_message *c_xmit_rm;
unsigned long c_xmit_sg; unsigned long c_xmit_sg;
unsigned int c_xmit_hdr_off; unsigned int c_xmit_hdr_off;
......
...@@ -112,6 +112,7 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -112,6 +112,7 @@ int rds_send_xmit(struct rds_connection *conn)
unsigned int tmp; unsigned int tmp;
struct scatterlist *sg; struct scatterlist *sg;
int ret = 0; int ret = 0;
int gen = 0;
LIST_HEAD(to_be_dropped); LIST_HEAD(to_be_dropped);
restart: restart:
...@@ -134,6 +135,8 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -134,6 +135,8 @@ int rds_send_xmit(struct rds_connection *conn)
if (conn->c_trans->xmit_prepare) if (conn->c_trans->xmit_prepare)
conn->c_trans->xmit_prepare(conn); conn->c_trans->xmit_prepare(conn);
gen = atomic_inc_return(&conn->c_send_generation);
/* /*
* spin trying to push headers and data down the connection until * spin trying to push headers and data down the connection until
* the connection doesn't make forward progress. * the connection doesn't make forward progress.
...@@ -359,13 +362,13 @@ int rds_send_xmit(struct rds_connection *conn) ...@@ -359,13 +362,13 @@ int rds_send_xmit(struct rds_connection *conn)
if (ret == 0) { if (ret == 0) {
/* A simple bit test would be way faster than taking the /* A simple bit test would be way faster than taking the
* spin lock */ * spin lock */
spin_lock_irqsave(&conn->c_lock, flags); smp_mb();
if (!list_empty(&conn->c_send_queue)) { if (!list_empty(&conn->c_send_queue)) {
rds_stats_inc(s_send_lock_queue_raced); rds_stats_inc(s_send_lock_queue_raced);
spin_unlock_irqrestore(&conn->c_lock, flags); if (gen == atomic_read(&conn->c_send_generation)) {
goto restart; goto restart;
} }
spin_unlock_irqrestore(&conn->c_lock, flags); }
} }
out: out:
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment