Commit 0db6ca8a authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: Protect SCSI device state changes with a mutex

Serializing SCSI device state changes avoids that two state changes can
occur concurrently, e.g. the state changes in scsi_target_block() and
__scsi_remove_device(). This serialization is essential to make patch
"Make __scsi_remove_device go straight from BLOCKED to DEL" work
reliably.

Enable this mechanism for all scsi_target_*block() callers but not for
the scsi_internal_device_unblock() calls from the mpt3sas driver because
that driver can call scsi_internal_device_unblock() from atomic context.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 43f7571b
...@@ -1628,11 +1628,17 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, ...@@ -1628,11 +1628,17 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
struct list_head *done_q) struct list_head *done_q)
{ {
struct scsi_cmnd *scmd, *next; struct scsi_cmnd *scmd, *next;
struct scsi_device *sdev;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) { list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
sdev_printk(KERN_INFO, scmd->device, "Device offlined - " sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
"not ready after error recovery\n"); "not ready after error recovery\n");
scsi_device_set_state(scmd->device, SDEV_OFFLINE); sdev = scmd->device;
mutex_lock(&sdev->state_mutex);
scsi_device_set_state(sdev, SDEV_OFFLINE);
mutex_unlock(&sdev->state_mutex);
scsi_eh_finish_cmd(scmd, done_q); scsi_eh_finish_cmd(scmd, done_q);
} }
return; return;
......
...@@ -2882,7 +2882,12 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev) ...@@ -2882,7 +2882,12 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
int int
scsi_device_quiesce(struct scsi_device *sdev) scsi_device_quiesce(struct scsi_device *sdev)
{ {
int err = scsi_device_set_state(sdev, SDEV_QUIESCE); int err;
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
mutex_unlock(&sdev->state_mutex);
if (err) if (err)
return err; return err;
...@@ -2910,10 +2915,11 @@ void scsi_device_resume(struct scsi_device *sdev) ...@@ -2910,10 +2915,11 @@ void scsi_device_resume(struct scsi_device *sdev)
* so assume the state is being managed elsewhere (for example * so assume the state is being managed elsewhere (for example
* device deleted during suspend) * device deleted during suspend)
*/ */
if (sdev->sdev_state != SDEV_QUIESCE || mutex_lock(&sdev->state_mutex);
scsi_device_set_state(sdev, SDEV_RUNNING)) if (sdev->sdev_state == SDEV_QUIESCE &&
return; scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
scsi_run_queue(sdev->request_queue); scsi_run_queue(sdev->request_queue);
mutex_unlock(&sdev->state_mutex);
} }
EXPORT_SYMBOL(scsi_device_resume); EXPORT_SYMBOL(scsi_device_resume);
...@@ -3012,6 +3018,7 @@ static int scsi_internal_device_block(struct scsi_device *sdev) ...@@ -3012,6 +3018,7 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
struct request_queue *q = sdev->request_queue; struct request_queue *q = sdev->request_queue;
int err; int err;
mutex_lock(&sdev->state_mutex);
err = scsi_internal_device_block_nowait(sdev); err = scsi_internal_device_block_nowait(sdev);
if (err == 0) { if (err == 0) {
if (q->mq_ops) if (q->mq_ops)
...@@ -3019,6 +3026,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev) ...@@ -3019,6 +3026,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
else else
scsi_wait_for_queuecommand(sdev); scsi_wait_for_queuecommand(sdev);
} }
mutex_unlock(&sdev->state_mutex);
return err; return err;
} }
...@@ -3089,7 +3098,13 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); ...@@ -3089,7 +3098,13 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
static int scsi_internal_device_unblock(struct scsi_device *sdev, static int scsi_internal_device_unblock(struct scsi_device *sdev,
enum scsi_device_state new_state) enum scsi_device_state new_state)
{ {
return scsi_internal_device_unblock_nowait(sdev, new_state); int ret;
mutex_lock(&sdev->state_mutex);
ret = scsi_internal_device_unblock_nowait(sdev, new_state);
mutex_unlock(&sdev->state_mutex);
return ret;
} }
static void static void
......
...@@ -231,6 +231,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, ...@@ -231,6 +231,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->id = starget->id; sdev->id = starget->id;
sdev->lun = lun; sdev->lun = lun;
sdev->channel = starget->channel; sdev->channel = starget->channel;
mutex_init(&sdev->state_mutex);
sdev->sdev_state = SDEV_CREATED; sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings); INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings); INIT_LIST_HEAD(&sdev->same_target_siblings);
...@@ -943,9 +944,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, ...@@ -943,9 +944,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* set the device running here so that slave configure /* set the device running here so that slave configure
* may do I/O */ * may do I/O */
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, SDEV_RUNNING); ret = scsi_device_set_state(sdev, SDEV_RUNNING);
if (ret) { if (ret)
ret = scsi_device_set_state(sdev, SDEV_BLOCK); ret = scsi_device_set_state(sdev, SDEV_BLOCK);
mutex_unlock(&sdev->state_mutex);
if (ret) { if (ret) {
sdev_printk(KERN_ERR, sdev, sdev_printk(KERN_ERR, sdev,
...@@ -953,7 +956,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, ...@@ -953,7 +956,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
scsi_device_state_name(sdev->sdev_state)); scsi_device_state_name(sdev->sdev_state));
return SCSI_SCAN_NO_RESPONSE; return SCSI_SCAN_NO_RESPONSE;
} }
}
if (*bflags & BLIST_MS_192_BYTES_FOR_3F) if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
sdev->use_192_bytes_for_3f = 1; sdev->use_192_bytes_for_3f = 1;
......
...@@ -719,7 +719,7 @@ static ssize_t ...@@ -719,7 +719,7 @@ static ssize_t
store_state_field(struct device *dev, struct device_attribute *attr, store_state_field(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int i; int i, ret;
struct scsi_device *sdev = to_scsi_device(dev); struct scsi_device *sdev = to_scsi_device(dev);
enum scsi_device_state state = 0; enum scsi_device_state state = 0;
...@@ -734,9 +734,11 @@ store_state_field(struct device *dev, struct device_attribute *attr, ...@@ -734,9 +734,11 @@ store_state_field(struct device *dev, struct device_attribute *attr,
if (!state) if (!state)
return -EINVAL; return -EINVAL;
if (scsi_device_set_state(sdev, state)) mutex_lock(&sdev->state_mutex);
return -EINVAL; ret = scsi_device_set_state(sdev, state);
return count; mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;
} }
static ssize_t static ssize_t
...@@ -1272,6 +1274,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) ...@@ -1272,6 +1274,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
void __scsi_remove_device(struct scsi_device *sdev) void __scsi_remove_device(struct scsi_device *sdev)
{ {
struct device *dev = &sdev->sdev_gendev; struct device *dev = &sdev->sdev_gendev;
int res;
/* /*
* This cleanup path is not reentrant and while it is impossible * This cleanup path is not reentrant and while it is impossible
...@@ -1282,7 +1285,15 @@ void __scsi_remove_device(struct scsi_device *sdev) ...@@ -1282,7 +1285,15 @@ void __scsi_remove_device(struct scsi_device *sdev)
return; return;
if (sdev->is_visible) { if (sdev->is_visible) {
if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) /*
* If scsi_internal_target_block() is running concurrently,
* wait until it has finished before changing the device state.
*/
mutex_lock(&sdev->state_mutex);
res = scsi_device_set_state(sdev, SDEV_CANCEL);
mutex_unlock(&sdev->state_mutex);
if (res != 0)
return; return;
bsg_unregister_queue(sdev->request_queue); bsg_unregister_queue(sdev->request_queue);
...@@ -1298,7 +1309,10 @@ void __scsi_remove_device(struct scsi_device *sdev) ...@@ -1298,7 +1309,10 @@ void __scsi_remove_device(struct scsi_device *sdev)
* scsi_run_queue() invocations have finished before tearing down the * scsi_run_queue() invocations have finished before tearing down the
* device. * device.
*/ */
mutex_lock(&sdev->state_mutex);
scsi_device_set_state(sdev, SDEV_DEL); scsi_device_set_state(sdev, SDEV_DEL);
mutex_unlock(&sdev->state_mutex);
blk_cleanup_queue(sdev->request_queue); blk_cleanup_queue(sdev->request_queue);
cancel_work_sync(&sdev->requeue_work); cancel_work_sync(&sdev->requeue_work);
......
...@@ -554,11 +554,12 @@ int srp_reconnect_rport(struct srp_rport *rport) ...@@ -554,11 +554,12 @@ int srp_reconnect_rport(struct srp_rport *rport)
* invoking scsi_target_unblock() won't change the state of * invoking scsi_target_unblock() won't change the state of
* these devices into running so do that explicitly. * these devices into running so do that explicitly.
*/ */
spin_lock_irq(shost->host_lock); shost_for_each_device(sdev, shost) {
__shost_for_each_device(sdev, shost) mutex_lock(&sdev->state_mutex);
if (sdev->sdev_state == SDEV_OFFLINE) if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING; sdev->sdev_state = SDEV_RUNNING;
spin_unlock_irq(shost->host_lock); mutex_unlock(&sdev->state_mutex);
}
} else if (rport->state == SRP_RPORT_RUNNING) { } else if (rport->state == SRP_RPORT_RUNNING) {
/* /*
* srp_reconnect_rport() has been invoked with fast_io_fail * srp_reconnect_rport() has been invoked with fast_io_fail
......
...@@ -1818,8 +1818,9 @@ static void sd_eh_reset(struct scsi_cmnd *scmd) ...@@ -1818,8 +1818,9 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{ {
struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
struct scsi_device *sdev = scmd->device;
if (!scsi_device_online(scmd->device) || if (!scsi_device_online(sdev) ||
!scsi_medium_access_command(scmd) || !scsi_medium_access_command(scmd) ||
host_byte(scmd->result) != DID_TIME_OUT || host_byte(scmd->result) != DID_TIME_OUT ||
eh_disp != SUCCESS) eh_disp != SUCCESS)
...@@ -1845,7 +1846,9 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) ...@@ -1845,7 +1846,9 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) { if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
scmd_printk(KERN_ERR, scmd, scmd_printk(KERN_ERR, scmd,
"Medium access timeout failure. Offlining disk!\n"); "Medium access timeout failure. Offlining disk!\n");
scsi_device_set_state(scmd->device, SDEV_OFFLINE); mutex_lock(&sdev->state_mutex);
scsi_device_set_state(sdev, SDEV_OFFLINE);
mutex_unlock(&sdev->state_mutex);
return SUCCESS; return SUCCESS;
} }
......
...@@ -207,6 +207,7 @@ struct scsi_device { ...@@ -207,6 +207,7 @@ struct scsi_device {
void *handler_data; void *handler_data;
unsigned char access_state; unsigned char access_state;
struct mutex state_mutex;
enum scsi_device_state sdev_state; enum scsi_device_state sdev_state;
unsigned long sdev_data[0]; unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long)))); } __attribute__((aligned(sizeof(unsigned long))));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment