Commit f2b7daf5 authored by Asias He's avatar Asias He Committed by Nicholas Bellinger

tcm_vhost: Refactor the lock nesting rule

We want to use tcm_vhost_mutex to make sure hotplug/hotunplug will not
happen when set_endpoint/clear_endpoint is in process.
Signed-off-by: default avatarAsias He <asias@redhat.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent d3682b1a
...@@ -808,6 +808,9 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) ...@@ -808,6 +808,9 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
/* /*
* Called from vhost_scsi_ioctl() context to walk the list of available * Called from vhost_scsi_ioctl() context to walk the list of available
* tcm_vhost_tpg with an active struct tcm_vhost_nexus * tcm_vhost_tpg with an active struct tcm_vhost_nexus
*
* The lock nesting rule is:
* tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
*/ */
static int vhost_scsi_set_endpoint( static int vhost_scsi_set_endpoint(
struct vhost_scsi *vs, struct vhost_scsi *vs,
...@@ -820,26 +823,27 @@ static int vhost_scsi_set_endpoint( ...@@ -820,26 +823,27 @@ static int vhost_scsi_set_endpoint(
int index, ret, i, len; int index, ret, i, len;
bool match = false; bool match = false;
mutex_lock(&tcm_vhost_mutex);
mutex_lock(&vs->dev.mutex); mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) { for (index = 0; index < vs->dev.nvqs; ++index) {
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(&vs->vqs[index])) { if (!vhost_vq_access_ok(&vs->vqs[index])) {
mutex_unlock(&vs->dev.mutex); ret = -EFAULT;
return -EFAULT; goto out;
} }
} }
len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
vs_tpg = kzalloc(len, GFP_KERNEL); vs_tpg = kzalloc(len, GFP_KERNEL);
if (!vs_tpg) { if (!vs_tpg) {
mutex_unlock(&vs->dev.mutex); ret = -ENOMEM;
return -ENOMEM; goto out;
} }
if (vs->vs_tpg) if (vs->vs_tpg)
memcpy(vs_tpg, vs->vs_tpg, len); memcpy(vs_tpg, vs->vs_tpg, len);
mutex_lock(&tcm_vhost_mutex);
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
mutex_lock(&tv_tpg->tv_tpg_mutex); mutex_lock(&tv_tpg->tv_tpg_mutex);
if (!tv_tpg->tpg_nexus) { if (!tv_tpg->tpg_nexus) {
...@@ -854,11 +858,10 @@ static int vhost_scsi_set_endpoint( ...@@ -854,11 +858,10 @@ static int vhost_scsi_set_endpoint(
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
mutex_unlock(&tv_tpg->tv_tpg_mutex);
mutex_unlock(&tcm_vhost_mutex);
mutex_unlock(&vs->dev.mutex);
kfree(vs_tpg); kfree(vs_tpg);
return -EEXIST; mutex_unlock(&tv_tpg->tv_tpg_mutex);
ret = -EEXIST;
goto out;
} }
tv_tpg->tv_tpg_vhost_count++; tv_tpg->tv_tpg_vhost_count++;
vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
...@@ -867,7 +870,6 @@ static int vhost_scsi_set_endpoint( ...@@ -867,7 +870,6 @@ static int vhost_scsi_set_endpoint(
} }
mutex_unlock(&tv_tpg->tv_tpg_mutex); mutex_unlock(&tv_tpg->tv_tpg_mutex);
} }
mutex_unlock(&tcm_vhost_mutex);
if (match) { if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
...@@ -893,7 +895,9 @@ static int vhost_scsi_set_endpoint( ...@@ -893,7 +895,9 @@ static int vhost_scsi_set_endpoint(
kfree(vs->vs_tpg); kfree(vs->vs_tpg);
vs->vs_tpg = vs_tpg; vs->vs_tpg = vs_tpg;
out:
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
mutex_unlock(&tcm_vhost_mutex);
return ret; return ret;
} }
...@@ -908,6 +912,7 @@ static int vhost_scsi_clear_endpoint( ...@@ -908,6 +912,7 @@ static int vhost_scsi_clear_endpoint(
int index, ret, i; int index, ret, i;
u8 target; u8 target;
mutex_lock(&tcm_vhost_mutex);
mutex_lock(&vs->dev.mutex); mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */ /* Verify that ring has been setup correctly. */
for (index = 0; index < vs->dev.nvqs; ++index) { for (index = 0; index < vs->dev.nvqs; ++index) {
...@@ -918,8 +923,8 @@ static int vhost_scsi_clear_endpoint( ...@@ -918,8 +923,8 @@ static int vhost_scsi_clear_endpoint(
} }
if (!vs->vs_tpg) { if (!vs->vs_tpg) {
mutex_unlock(&vs->dev.mutex); ret = 0;
return 0; goto err_dev;
} }
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
...@@ -965,13 +970,14 @@ static int vhost_scsi_clear_endpoint( ...@@ -965,13 +970,14 @@ static int vhost_scsi_clear_endpoint(
kfree(vs->vs_tpg); kfree(vs->vs_tpg);
vs->vs_tpg = NULL; vs->vs_tpg = NULL;
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
mutex_unlock(&tcm_vhost_mutex);
return 0; return 0;
err_tpg: err_tpg:
mutex_unlock(&tv_tpg->tv_tpg_mutex); mutex_unlock(&tv_tpg->tv_tpg_mutex);
err_dev: err_dev:
mutex_unlock(&vs->dev.mutex); mutex_unlock(&vs->dev.mutex);
mutex_unlock(&tcm_vhost_mutex);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment