Commit 1077a574 authored by Dan Williams's avatar Dan Williams

isci: fix incorrect assumptions about task->dev and task->dev->port being NULL

A domain_device has the same lifetime as its related scsi_target.  The
scsi_target is reference counted based on outstanding commands,
therefore it is safe to assume that if we have a valid sas_task that the
->dev pointer is also valid.

The asd_sas_port of a domain_device has the same lifetime as the driver
so it can also never be NULL as long as the sas_task is valid and the
driver is loaded.

This also cleans up isci_task_complete_for_upper_layer(), renames it to
isci_task_refuse() and notices that the isci_completion_selection
parameter was set to isci_perform_normal_io_completion by all callers.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 34cad85d
...@@ -67,39 +67,36 @@ ...@@ -67,39 +67,36 @@
#include "task.h" #include "task.h"
/** /**
* isci_task_complete_for_upper_layer() - This function completes the request * isci_task_refuse() - complete the request to the upper layer driver in
* to the upper layer driver in the case where an I/O needs to be completed * the case where an I/O needs to be completed back in the submit path.
* back in the submit path. * @ihost: host on which the the request was queued
* @host: This parameter is a pointer to the host on which the the request * @task: request to complete
* should be queued (either as an error or success). * @response: response code for the completed task.
* @task: This parameter is the completed request. * @status: status code for the completed task.
* @response: This parameter is the response code for the completed task.
* @status: This parameter is the status code for the completed task.
* *
* none.
*/ */
static void isci_task_complete_for_upper_layer(struct sas_task *task, static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
enum service_response response, enum service_response response,
enum exec_status status, enum exec_status status)
enum isci_completion_selection task_notification_selection)
{ {
unsigned long flags = 0; enum isci_completion_selection disposition;
struct Scsi_Host *host = NULL;
task_notification_selection disposition = isci_perform_normal_io_completion;
= isci_task_set_completion_status(task, response, status, disposition = isci_task_set_completion_status(task, response, status,
task_notification_selection); disposition);
/* Tasks aborted specifically by a call to the lldd_abort_task /* Tasks aborted specifically by a call to the lldd_abort_task
* function should not be completed to the host in the regular path. * function should not be completed to the host in the regular path.
*/ */
switch (task_notification_selection) { switch (disposition) {
case isci_perform_normal_io_completion: case isci_perform_normal_io_completion:
/* Normal notification (task_done) */ /* Normal notification (task_done) */
dev_dbg(task->dev->port->ha->dev, dev_dbg(&ihost->pdev->dev,
"%s: Normal - task = %p, response=%d, status=%d\n", "%s: Normal - task = %p, response=%d, status=%d\n",
__func__, task, response, status); __func__, task, response, status);
task->lldd_task = NULL;
if (dev_is_sata(task->dev)) { if (dev_is_sata(task->dev)) {
/* Since we are still in the submit path, and since /* Since we are still in the submit path, and since
* libsas takes the host lock on behalf of SATA * libsas takes the host lock on behalf of SATA
...@@ -107,44 +104,36 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task, ...@@ -107,44 +104,36 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task,
* before we can call back and report the I/O * before we can call back and report the I/O
* submission error. * submission error.
*/ */
if (task->dev unsigned long flags;
&& task->dev->port
&& task->dev->port->ha) {
host = task->dev->port->ha->core.shost; raw_local_irq_save(flags);
raw_local_irq_save(flags); spin_unlock(ihost->shost->host_lock);
spin_unlock(host->host_lock);
}
task->task_done(task); task->task_done(task);
if (host) { spin_lock(ihost->shost->host_lock);
spin_lock(host->host_lock); raw_local_irq_restore(flags);
raw_local_irq_restore(flags);
}
} else } else
task->task_done(task); task->task_done(task);
task->lldd_task = NULL;
break; break;
case isci_perform_aborted_io_completion: case isci_perform_aborted_io_completion:
/* No notification because this request is already in the /* No notification because this request is already in the
* abort path. * abort path.
*/ */
dev_warn(task->dev->port->ha->dev, dev_warn(&ihost->pdev->dev,
"%s: Aborted - task = %p, response=%d, status=%d\n", "%s: Aborted - task = %p, response=%d, status=%d\n",
__func__, task, response, status); __func__, task, response, status);
break; break;
case isci_perform_error_io_completion: case isci_perform_error_io_completion:
/* Use sas_task_abort */ /* Use sas_task_abort */
dev_warn(task->dev->port->ha->dev, dev_warn(&ihost->pdev->dev,
"%s: Error - task = %p, response=%d, status=%d\n", "%s: Error - task = %p, response=%d, status=%d\n",
__func__, task, response, status); __func__, task, response, status);
sas_task_abort(task); sas_task_abort(task);
break; break;
default: default:
dev_warn(task->dev->port->ha->dev, dev_warn(&ihost->pdev->dev,
"%s: isci task notification default case!", "%s: isci task notification default case!",
__func__); __func__);
sas_task_abort(task); sas_task_abort(task);
...@@ -152,6 +141,10 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task, ...@@ -152,6 +141,10 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task,
} }
} }
#define for_each_sas_task(num, task) \
for (; num > 0; num--,\
task = list_entry(task->list.next, struct sas_task, list))
/** /**
* isci_task_execute_task() - This function is one of the SAS Domain Template * isci_task_execute_task() - This function is one of the SAS Domain Template
* functions. This function is called by libsas to send a task down to * functions. This function is called by libsas to send a task down to
...@@ -164,7 +157,7 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task, ...@@ -164,7 +157,7 @@ static void isci_task_complete_for_upper_layer(struct sas_task *task,
*/ */
int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
{ {
struct isci_host *isci_host; struct isci_host *ihost = task->dev->port->ha->lldd_ha;
struct isci_request *request = NULL; struct isci_request *request = NULL;
struct isci_remote_device *device; struct isci_remote_device *device;
unsigned long flags; unsigned long flags;
...@@ -172,60 +165,23 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -172,60 +165,23 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
enum sci_status status; enum sci_status status;
enum isci_status device_status; enum isci_status device_status;
dev_dbg(task->dev->port->ha->dev, "%s: num=%d\n", __func__, num); dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
if ((task->dev == NULL) || (task->dev->port == NULL)) {
/* Indicate SAS_TASK_UNDELIVERED, so that the scsi midlayer
* removes the target.
*/
isci_task_complete_for_upper_layer(
task,
SAS_TASK_UNDELIVERED,
SAS_DEVICE_UNKNOWN,
isci_perform_normal_io_completion
);
return 0; /* The I/O was accepted (and failed). */
}
isci_host = isci_host_from_sas_ha(task->dev->port->ha);
/* Check if we have room for more tasks */ /* Check if we have room for more tasks */
ret = isci_host_can_queue(isci_host, num); ret = isci_host_can_queue(ihost, num);
if (ret) { if (ret) {
dev_warn(task->dev->port->ha->dev, "%s: queue full\n", __func__); dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
return ret; return ret;
} }
do { for_each_sas_task(num, task) {
dev_dbg(task->dev->port->ha->dev, dev_dbg(&ihost->pdev->dev,
"task = %p, num = %d; dev = %p; cmd = %p\n", "task = %p, num = %d; dev = %p; cmd = %p\n",
task, num, task->dev, task->uldd_task); task, num, task->dev, task->uldd_task);
if ((task->dev == NULL) || (task->dev->port == NULL)) {
dev_warn(task->dev->port->ha->dev,
"%s: task %p's port or dev == NULL!\n",
__func__, task);
/* Indicate SAS_TASK_UNDELIVERED, so that the scsi
* midlayer removes the target.
*/
isci_task_complete_for_upper_layer(
task,
SAS_TASK_UNDELIVERED,
SAS_DEVICE_UNKNOWN,
isci_perform_normal_io_completion
);
/* We don't have a valid host reference, so we
* can't control the host queueing condition.
*/
goto next_task;
}
device = isci_dev_from_domain_dev(task->dev); device = isci_dev_from_domain_dev(task->dev);
isci_host = isci_host_from_sas_ha(task->dev->port->ha);
if (device) if (device)
device_status = device->status; device_status = device->status;
else else
...@@ -239,34 +195,28 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -239,34 +195,28 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
if (device_status != isci_ready_for_io) { if (device_status != isci_ready_for_io) {
/* Forces a retry from scsi mid layer. */ /* Forces a retry from scsi mid layer. */
dev_warn(task->dev->port->ha->dev, dev_warn(&ihost->pdev->dev,
"%s: task %p: isci_host->status = %d, " "%s: task %p: isci_host->status = %d, "
"device = %p; device_status = 0x%x\n\n", "device = %p; device_status = 0x%x\n\n",
__func__, __func__,
task, task,
isci_host_get_state(isci_host), isci_host_get_state(ihost),
device, device_status); device, device_status);
if (device_status == isci_ready) { if (device_status == isci_ready) {
/* Indicate QUEUE_FULL so that the scsi midlayer /* Indicate QUEUE_FULL so that the scsi midlayer
* retries. * retries.
*/ */
isci_task_complete_for_upper_layer( isci_task_refuse(ihost, task,
task, SAS_TASK_COMPLETE,
SAS_TASK_COMPLETE, SAS_QUEUE_FULL);
SAS_QUEUE_FULL,
isci_perform_normal_io_completion
);
} else { } else {
/* Else, the device is going down. */ /* Else, the device is going down. */
isci_task_complete_for_upper_layer( isci_task_refuse(ihost, task,
task, SAS_TASK_UNDELIVERED,
SAS_TASK_UNDELIVERED, SAS_DEVICE_UNKNOWN);
SAS_DEVICE_UNKNOWN,
isci_perform_normal_io_completion
);
} }
isci_host_can_dequeue(isci_host, 1); isci_host_can_dequeue(ihost, 1);
} else { } else {
/* There is a device and it's ready for I/O. */ /* There is a device and it's ready for I/O. */
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
...@@ -276,12 +226,9 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -276,12 +226,9 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
spin_unlock_irqrestore(&task->task_state_lock, spin_unlock_irqrestore(&task->task_state_lock,
flags); flags);
isci_task_complete_for_upper_layer( isci_task_refuse(ihost, task,
task, SAS_TASK_UNDELIVERED,
SAS_TASK_UNDELIVERED, SAM_STAT_TASK_ABORTED);
SAM_STAT_TASK_ABORTED,
isci_perform_normal_io_completion
);
/* The I/O was aborted. */ /* The I/O was aborted. */
...@@ -290,7 +237,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -290,7 +237,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */ /* build and send the request. */
status = isci_request_execute(isci_host, task, &request, status = isci_request_execute(ihost, task, &request,
gfp_flags); gfp_flags);
if (status != SCI_SUCCESS) { if (status != SCI_SUCCESS) {
...@@ -307,19 +254,14 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -307,19 +254,14 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
* SAS_TASK_UNDELIVERED next time * SAS_TASK_UNDELIVERED next time
* through. * through.
*/ */
isci_task_complete_for_upper_layer( isci_task_refuse(ihost, task,
task, SAS_TASK_COMPLETE,
SAS_TASK_COMPLETE, SAS_QUEUE_FULL);
SAS_QUEUE_FULL, isci_host_can_dequeue(ihost, 1);
isci_perform_normal_io_completion
);
isci_host_can_dequeue(isci_host, 1);
} }
} }
} }
next_task: }
task = list_entry(task->list.next, struct sas_task, list);
} while (--num > 0);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment