Commit d4700e20 authored by Chris Leech's avatar Chris Leech Committed by Greg Kroah-Hartman

scsi: libiscsi: add lock around task lists to fix list corruption regression

commit 6f8830f5 upstream.

There's a rather long standing regression from the commit "libiscsi:
Reduce locking contention in fast path"

Depending on iSCSI target behavior, it's possible to hit the case in
iscsi_complete_task where the task is still on a pending list
(!list_empty(&task->running)).  When that happens the task is removed
from the list while holding the session back_lock, but other task list
modification occur under the frwd_lock.  That leads to linked list
corruption and eventually a panicked system.

Rather than back out the session lock split entirely, in order to try
and keep some of the performance gains this patch adds another lock to
maintain the task lists integrity.

Major enterprise supported kernels have been backing out the lock split
for while now, thanks to the efforts at IBM where a lab setup has the
most reliable reproducer I've seen on this issue.  This patch has been
tested there successfully.
Signed-off-by: default avatarChris Leech <cleech@redhat.com>
Fixes: 659743b0 ("[SCSI] libiscsi: Reduce locking contention in fast path")
Reported-by: default avatarPrashantha Subbarao <psubbara@us.ibm.com>
Reviewed-by: default avatarGuilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5b769ee1
...@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) ...@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state; task->state = state;
if (!list_empty(&task->running)) spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&task->running)) {
pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running); list_del_init(&task->running);
}
spin_unlock_bh(&conn->taskqueuelock);
if (conn->task == task) if (conn->task == task)
conn->task = NULL; conn->task = NULL;
...@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, ...@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task)) if (session->tt->xmit_task(task))
goto free_task; goto free_task;
} else { } else {
spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue); list_add_tail(&task->running, &conn->mgmtqueue);
spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn); iscsi_conn_queue_work(conn);
} }
...@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task) ...@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout * this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones * is handling the r2ts while we are adding new ones
*/ */
spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running)) if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue); list_add_tail(&task->running, &conn->requeue);
spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn); iscsi_conn_queue_work(conn);
} }
EXPORT_SYMBOL_GPL(iscsi_requeue_task); EXPORT_SYMBOL_GPL(iscsi_requeue_task);
...@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) ...@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not * only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins * overflow us with nop-ins
*/ */
spin_lock_bh(&conn->taskqueuelock);
check_mgmt: check_mgmt:
while (!list_empty(&conn->mgmtqueue)) { while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next, conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running); struct iscsi_task, running);
list_del_init(&conn->task->running); list_del_init(&conn->task->running);
spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) { if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */ /* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock); spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task); __iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock); spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL; conn->task = NULL;
spin_lock_bh(&conn->taskqueuelock);
continue; continue;
} }
rc = iscsi_xmit_task(conn); rc = iscsi_xmit_task(conn);
if (rc) if (rc)
goto done; goto done;
spin_lock_bh(&conn->taskqueuelock);
} }
/* process pending command queue */ /* process pending command queue */
...@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) ...@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running); running);
list_del_init(&conn->task->running); list_del_init(&conn->task->running);
spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY); fail_scsi_task(conn->task, DID_IMM_RETRY);
spin_lock_bh(&conn->taskqueuelock);
continue; continue;
} }
rc = iscsi_prep_scsi_cmd_pdu(conn->task); rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) { if (rc) {
if (rc == -ENOMEM || rc == -EACCES) { if (rc == -ENOMEM || rc == -EACCES) {
spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running, list_add_tail(&conn->task->running,
&conn->cmdqueue); &conn->cmdqueue);
conn->task = NULL; conn->task = NULL;
spin_unlock_bh(&conn->taskqueuelock);
goto done; goto done;
} else } else
fail_scsi_task(conn->task, DID_ABORT); fail_scsi_task(conn->task, DID_ABORT);
spin_lock_bh(&conn->taskqueuelock);
continue; continue;
} }
rc = iscsi_xmit_task(conn); rc = iscsi_xmit_task(conn);
...@@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) ...@@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* we need to check the mgmt queue for nops that need to * we need to check the mgmt queue for nops that need to
* be sent to aviod starvation * be sent to aviod starvation
*/ */
spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue)) if (!list_empty(&conn->mgmtqueue))
goto check_mgmt; goto check_mgmt;
} }
...@@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) ...@@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
conn->task = task; conn->task = task;
list_del_init(&conn->task->running); list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING; conn->task->state = ISCSI_TASK_RUNNING;
spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn); rc = iscsi_xmit_task(conn);
if (rc) if (rc)
goto done; goto done;
spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue)) if (!list_empty(&conn->mgmtqueue))
goto check_mgmt; goto check_mgmt;
} }
spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock); spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA; return -ENODATA;
...@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) ...@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject; goto prepd_reject;
} }
} else { } else {
spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue); list_add_tail(&task->running, &conn->cmdqueue);
spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn); iscsi_conn_queue_work(conn);
} }
...@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, ...@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue); INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue); INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue); INIT_LIST_HEAD(&conn->requeue);
spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker); INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */ /* allocate login_task used for the login/text sequences */
......
...@@ -196,6 +196,7 @@ struct iscsi_conn { ...@@ -196,6 +196,7 @@ struct iscsi_conn {
struct iscsi_task *task; /* xmit task in progress */ struct iscsi_task *task; /* xmit task in progress */
/* xmit */ /* xmit */
spinlock_t taskqueuelock; /* protects the next three lists */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head cmdqueue; /* data-path cmd queue */ struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */ struct list_head requeue; /* tasks needing another run */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment