Commit f2088267 authored by Dan Williams's avatar Dan Williams

isci: kill isci_remote_device_change_state()

Now that "stopping/stopped" are one in the same and signalled by a NULL device
pointer the rest of the device status infrastructure can be removed (->status
and ->state_lock).  The "not ready for i/o state" is replaced with a state
flag, and is evaluated under scic_lock so that we don't see transients from
taking the device reference to submitting the i/o.

This also fixes a potential leakage of can_queue slots in the rare case that
SAS_TASK_ABORTED is set at submission.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 209fae14
......@@ -2504,7 +2504,6 @@ int isci_host_init(struct isci_host *isci_host)
INIT_LIST_HEAD(&idev->reqs_in_process);
INIT_LIST_HEAD(&idev->node);
spin_lock_init(&idev->state_lock);
}
return 0;
......
......@@ -61,24 +61,6 @@
#include "scu_event_codes.h"
#include "task.h"
/**
* isci_remote_device_change_state() - This function gets the status of the
* remote_device object.
* @isci_device: This parameter points to the isci_remote_device object
*
* status of the object as a isci_status enum.
*/
void isci_remote_device_change_state(
struct isci_remote_device *isci_device,
enum isci_status status)
{
unsigned long flags;
spin_lock_irqsave(&isci_device->state_lock, flags);
isci_device->status = status;
spin_unlock_irqrestore(&isci_device->state_lock, flags);
}
/**
* isci_remote_device_not_ready() - This function is called by the scic when
* the remote device is not ready. We mark the isci device as ready (not
......@@ -96,8 +78,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
if (reason == SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED)
set_bit(IDEV_GONE, &idev->flags);
else
/* device ready is actually a "not ready for io" state. */
isci_remote_device_change_state(idev, isci_ready);
clear_bit(IDEV_IO_READY, &idev->flags);
}
/**
......@@ -113,7 +94,7 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote
dev_dbg(&ihost->pdev->dev,
"%s: idev = %p\n", __func__, idev);
isci_remote_device_change_state(idev, isci_ready_for_io);
set_bit(IDEV_IO_READY, &idev->flags);
if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
wake_up(&ihost->eventq);
}
......@@ -871,26 +852,6 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
isci_put_device(idev);
}
/**
* isci_remote_device_stop_complete() - This function is called by the scic
* when the remote device stop has completed. We mark the isci device as not
* ready and remove the isci remote device.
* @ihost: This parameter specifies the isci host object.
* @idev: This parameter specifies the remote device.
* @status: This parameter specifies status of the completion.
*
*/
static void isci_remote_device_stop_complete(struct isci_host *ihost,
struct isci_remote_device *idev)
{
dev_dbg(&ihost->pdev->dev, "%s: complete idev = %p\n", __func__, idev);
isci_remote_device_change_state(idev, isci_stopped);
/* after stop, we can tear down resources. */
isci_remote_device_deconstruct(ihost, idev);
}
static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_device *sci_dev = container_of(sm, typeof(*sci_dev), sm);
......@@ -903,7 +864,7 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac
*/
prev_state = sci_dev->sm.previous_state_id;
if (prev_state == SCI_DEV_STOPPING)
isci_remote_device_stop_complete(scic_to_ihost(scic), idev);
isci_remote_device_deconstruct(scic_to_ihost(scic), idev);
scic_sds_controller_remote_device_stopped(scic, sci_dev);
}
......@@ -1301,8 +1262,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
return NULL;
isci_remote_device_change_state(idev, isci_freed);
return idev;
}
......@@ -1315,6 +1274,7 @@ void isci_remote_device_release(struct kref *kref)
idev->isci_port = NULL;
clear_bit(IDEV_START_PENDING, &idev->flags);
clear_bit(IDEV_STOP_PENDING, &idev->flags);
clear_bit(IDEV_IO_READY, &idev->flags);
clear_bit(IDEV_GONE, &idev->flags);
clear_bit(IDEV_EH, &idev->flags);
smp_mb__before_clear_bit();
......@@ -1341,7 +1301,6 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
spin_lock_irqsave(&ihost->scic_lock, flags);
idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
set_bit(IDEV_GONE, &idev->flags);
isci_remote_device_change_state(idev, isci_stopping);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* Kill all outstanding requests. */
......@@ -1430,7 +1389,6 @@ int isci_remote_device_found(struct domain_device *domain_dev)
spin_lock_irq(&isci_host->scic_lock);
isci_device->domain_dev = domain_dev;
isci_device->isci_port = isci_port;
isci_remote_device_change_state(isci_device, isci_starting);
list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
set_bit(IDEV_START_PENDING, &isci_device->flags);
......
......@@ -130,19 +130,18 @@ struct scic_sds_remote_device {
};
struct isci_remote_device {
enum isci_status status;
#define IDEV_START_PENDING 0
#define IDEV_STOP_PENDING 1
#define IDEV_ALLOCATED 2
#define IDEV_EH 3
#define IDEV_GONE 4
#define IDEV_IO_READY 5
unsigned long flags;
struct kref kref;
struct isci_port *isci_port;
struct domain_device *domain_dev;
struct list_head node;
struct list_head reqs_in_process;
spinlock_t state_lock;
struct scic_sds_remote_device sci;
};
......@@ -178,8 +177,6 @@ bool isci_device_is_reset_pending(struct isci_host *ihost,
struct isci_remote_device *idev);
void isci_device_clear_reset_pending(struct isci_host *ihost,
struct isci_remote_device *idev);
void isci_remote_device_change_state(struct isci_remote_device *idev,
enum isci_status status);
/**
* scic_remote_device_stop() - This method will stop both transmission and
* reception of link activity for the supplied remote device. This method
......
......@@ -147,10 +147,10 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
{
struct isci_host *ihost = dev_to_ihost(task->dev);
struct isci_remote_device *idev;
enum sci_status status;
unsigned long flags;
bool io_ready;
int ret;
enum sci_status status;
enum isci_status device_status;
dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
......@@ -163,64 +163,40 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
}
for_each_sas_task(num, task) {
dev_dbg(&ihost->pdev->dev,
"task = %p, num = %d; dev = %p; cmd = %p\n",
task, num, task->dev, task->uldd_task);
spin_lock_irqsave(&ihost->scic_lock, flags);
idev = isci_lookup_device(task->dev);
io_ready = idev ? test_bit(IDEV_IO_READY, &idev->flags) : 0;
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (idev)
device_status = idev->status;
else
device_status = isci_freed;
/* From this point onward, any process that needs to guarantee
* that there is no kernel I/O being started will have to wait
* for the quiesce spinlock.
*/
if (device_status != isci_ready_for_io) {
dev_dbg(&ihost->pdev->dev,
"task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
task, num, task->dev, idev, idev ? idev->flags : 0,
task->uldd_task);
/* Forces a retry from scsi mid layer. */
dev_dbg(&ihost->pdev->dev,
"%s: task %p: isci_host->status = %d, "
"device = %p; device_status = 0x%x\n\n",
__func__,
task,
isci_host_get_state(ihost),
idev,
device_status);
if (device_status == isci_ready) {
/* Indicate QUEUE_FULL so that the scsi midlayer
* retries.
*/
isci_task_refuse(ihost, task,
SAS_TASK_COMPLETE,
SAS_QUEUE_FULL);
} else {
/* Else, the device is going down. */
isci_task_refuse(ihost, task,
SAS_TASK_UNDELIVERED,
SAS_DEVICE_UNKNOWN);
}
if (!idev) {
isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
SAS_DEVICE_UNKNOWN);
isci_host_can_dequeue(ihost, 1);
} else if (!io_ready) {
/* Indicate QUEUE_FULL so that the scsi midlayer
* retries.
*/
isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
SAS_QUEUE_FULL);
isci_host_can_dequeue(ihost, 1);
} else {
/* There is a device and it's ready for I/O. */
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
/* The I/O was aborted. */
spin_unlock_irqrestore(&task->task_state_lock,
flags);
isci_task_refuse(ihost, task,
SAS_TASK_UNDELIVERED,
SAM_STAT_TASK_ABORTED);
/* The I/O was aborted. */
isci_host_can_dequeue(ihost, 1);
} else {
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
......@@ -323,11 +299,11 @@ int isci_task_execute_tmf(struct isci_host *ihost,
/* sanity check, return TMF_RESP_FUNC_FAILED
* if the device is not there and ready.
*/
if (!isci_device || isci_device->status != isci_ready_for_io) {
if (!isci_device || !test_bit(IDEV_IO_READY, &isci_device->flags)) {
dev_dbg(&ihost->pdev->dev,
"%s: isci_device = %p not ready (%d)\n",
"%s: isci_device = %p not ready (%#lx)\n",
__func__,
isci_device, isci_device->status);
isci_device, isci_device ? isci_device->flags : 0);
return TMF_RESP_FUNC_FAILED;
} else
dev_dbg(&ihost->pdev->dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment