Commit 312e0c24 authored by Dan Williams's avatar Dan Williams

isci: unify can_queue tracking on the tci_pool, uplevel tag assignment

The tci_pool tracks our outstanding command slots which are also the 'index'
portion of our tags.  Grabbing the tag early in ->lldd_execute_task let's us
drop the isci_host_can_queue() and ->was_tag_assigned_by_user infrastructure.
->was_tag_assigned_by_user required the task context to be duplicated in
request-local buffer.  With the tci established early we can build the
task_context directly into its final location and skip a memcpy.

With the task context buffer at a known address at request construction we
have the opportunity/obligation to also fix sgl handling.  This rework feels
like it belongs in another patch but the sgl handling and task_context are too
intertwined.
1/ fix the 'ab' pair embedded in the task context to point to the 'cd' pair in
   the task context (previously we were prematurely linking to the staging
   buffer).
2/ fix the broken iteration of pio sgls that assumes all sgls are relative to
   the request, and does a dangerous looking reverse lookup of physical
   address to virtual address.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 9274f45e
This diff is collapsed.
...@@ -192,6 +192,7 @@ struct scic_sds_controller { ...@@ -192,6 +192,7 @@ struct scic_sds_controller {
* context table. This data is shared between the hardware and software. * context table. This data is shared between the hardware and software.
*/ */
struct scu_task_context *task_context_table; struct scu_task_context *task_context_table;
dma_addr_t task_context_dma;
/** /**
* This field is a pointer to the memory allocated by the driver for the * This field is a pointer to the memory allocated by the driver for the
...@@ -302,12 +303,8 @@ struct isci_host { ...@@ -302,12 +303,8 @@ struct isci_host {
struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
struct sas_ha_struct sas_ha; struct sas_ha_struct sas_ha;
int can_queue;
spinlock_t queue_lock;
spinlock_t state_lock; spinlock_t state_lock;
struct pci_dev *pdev; struct pci_dev *pdev;
enum isci_status status; enum isci_status status;
#define IHOST_START_PENDING 0 #define IHOST_START_PENDING 0
#define IHOST_STOP_PENDING 1 #define IHOST_STOP_PENDING 1
...@@ -451,36 +448,6 @@ static inline void isci_host_change_state(struct isci_host *isci_host, ...@@ -451,36 +448,6 @@ static inline void isci_host_change_state(struct isci_host *isci_host,
} }
static inline int isci_host_can_queue(struct isci_host *isci_host, int num)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&isci_host->queue_lock, flags);
if ((isci_host->can_queue - num) < 0) {
dev_dbg(&isci_host->pdev->dev,
"%s: isci_host->can_queue = %d\n",
__func__,
isci_host->can_queue);
ret = -SAS_QUEUE_FULL;
} else
isci_host->can_queue -= num;
spin_unlock_irqrestore(&isci_host->queue_lock, flags);
return ret;
}
static inline void isci_host_can_dequeue(struct isci_host *isci_host, int num)
{
unsigned long flags;
spin_lock_irqsave(&isci_host->queue_lock, flags);
isci_host->can_queue += num;
spin_unlock_irqrestore(&isci_host->queue_lock, flags);
}
static inline void wait_for_start(struct isci_host *ihost) static inline void wait_for_start(struct isci_host *ihost)
{ {
wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
...@@ -646,10 +613,6 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe ...@@ -646,10 +613,6 @@ union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffe
struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic, struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
u16 io_tag); u16 io_tag);
struct scu_task_context *scic_sds_controller_get_task_context_buffer(
struct scic_sds_controller *scic,
u16 io_tag);
void scic_sds_controller_power_control_queue_insert( void scic_sds_controller_power_control_queue_insert(
struct scic_sds_controller *scic, struct scic_sds_controller *scic,
struct scic_sds_phy *sci_phy); struct scic_sds_phy *sci_phy);
...@@ -681,6 +644,9 @@ void scic_sds_controller_register_setup(struct scic_sds_controller *scic); ...@@ -681,6 +644,9 @@ void scic_sds_controller_register_setup(struct scic_sds_controller *scic);
enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req); enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long); int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
void isci_host_scan_start(struct Scsi_Host *); void isci_host_scan_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
void isci_tci_free(struct isci_host *ihost, u16 tci);
int isci_host_init(struct isci_host *); int isci_host_init(struct isci_host *);
...@@ -708,14 +674,12 @@ void scic_controller_disable_interrupts( ...@@ -708,14 +674,12 @@ void scic_controller_disable_interrupts(
enum sci_status scic_controller_start_io( enum sci_status scic_controller_start_io(
struct scic_sds_controller *scic, struct scic_sds_controller *scic,
struct scic_sds_remote_device *remote_device, struct scic_sds_remote_device *remote_device,
struct scic_sds_request *io_request, struct scic_sds_request *io_request);
u16 io_tag);
enum sci_task_status scic_controller_start_task( enum sci_task_status scic_controller_start_task(
struct scic_sds_controller *scic, struct scic_sds_controller *scic,
struct scic_sds_remote_device *remote_device, struct scic_sds_remote_device *remote_device,
struct scic_sds_request *task_request, struct scic_sds_request *task_request);
u16 io_tag);
enum sci_status scic_controller_terminate_request( enum sci_status scic_controller_terminate_request(
struct scic_sds_controller *scic, struct scic_sds_controller *scic,
...@@ -727,13 +691,6 @@ enum sci_status scic_controller_complete_io( ...@@ -727,13 +691,6 @@ enum sci_status scic_controller_complete_io(
struct scic_sds_remote_device *remote_device, struct scic_sds_remote_device *remote_device,
struct scic_sds_request *io_request); struct scic_sds_request *io_request);
u16 scic_controller_allocate_io_tag(
struct scic_sds_controller *scic);
enum sci_status scic_controller_free_io_tag(
struct scic_sds_controller *scic,
u16 io_tag);
void scic_sds_port_configuration_agent_construct( void scic_sds_port_configuration_agent_construct(
struct scic_sds_port_configuration_agent *port_agent); struct scic_sds_port_configuration_agent *port_agent);
......
...@@ -695,35 +695,21 @@ static void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u1 ...@@ -695,35 +695,21 @@ static void scic_sds_port_construct_dummy_rnc(struct scic_sds_port *sci_port, u1
*/ */
static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tag) static void scic_sds_port_construct_dummy_task(struct scic_sds_port *sci_port, u16 tag)
{ {
struct scic_sds_controller *scic = sci_port->owning_controller;
struct scu_task_context *task_context; struct scu_task_context *task_context;
task_context = scic_sds_controller_get_task_context_buffer(sci_port->owning_controller, tag); task_context = &scic->task_context_table[ISCI_TAG_TCI(tag)];
memset(task_context, 0, sizeof(struct scu_task_context)); memset(task_context, 0, sizeof(struct scu_task_context));
task_context->abort = 0;
task_context->priority = 0;
task_context->initiator_request = 1; task_context->initiator_request = 1;
task_context->connection_rate = 1; task_context->connection_rate = 1;
task_context->protocol_engine_index = 0;
task_context->logical_port_index = sci_port->physical_port_index; task_context->logical_port_index = sci_port->physical_port_index;
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
task_context->task_index = ISCI_TAG_TCI(tag); task_context->task_index = ISCI_TAG_TCI(tag);
task_context->valid = SCU_TASK_CONTEXT_VALID; task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->context_type = SCU_TASK_CONTEXT_TYPE;
task_context->remote_node_index = sci_port->reserved_rni; task_context->remote_node_index = sci_port->reserved_rni;
task_context->command_code = 0;
task_context->link_layer_control = 0;
task_context->do_not_dma_ssp_good_response = 1; task_context->do_not_dma_ssp_good_response = 1;
task_context->strict_ordering = 0;
task_context->control_frame = 0;
task_context->timeout_enable = 0;
task_context->block_guard_enable = 0;
task_context->address_modifier = 0;
task_context->task_phase = 0x01; task_context->task_phase = 0x01;
} }
...@@ -731,15 +717,15 @@ static void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port ...@@ -731,15 +717,15 @@ static void scic_sds_port_destroy_dummy_resources(struct scic_sds_port *sci_port
{ {
struct scic_sds_controller *scic = sci_port->owning_controller; struct scic_sds_controller *scic = sci_port->owning_controller;
if (sci_port->reserved_tci != SCU_DUMMY_INDEX) if (sci_port->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
scic_controller_free_io_tag(scic, sci_port->reserved_tci); isci_free_tag(scic_to_ihost(scic), sci_port->reserved_tag);
if (sci_port->reserved_rni != SCU_DUMMY_INDEX) if (sci_port->reserved_rni != SCU_DUMMY_INDEX)
scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes, scic_sds_remote_node_table_release_remote_node_index(&scic->available_remote_nodes,
1, sci_port->reserved_rni); 1, sci_port->reserved_rni);
sci_port->reserved_rni = SCU_DUMMY_INDEX; sci_port->reserved_rni = SCU_DUMMY_INDEX;
sci_port->reserved_tci = SCU_DUMMY_INDEX; sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
} }
/** /**
...@@ -1119,18 +1105,17 @@ scic_sds_port_suspend_port_task_scheduler(struct scic_sds_port *port) ...@@ -1119,18 +1105,17 @@ scic_sds_port_suspend_port_task_scheduler(struct scic_sds_port *port)
*/ */
static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port)
{ {
u32 command;
struct scu_task_context *task_context;
struct scic_sds_controller *scic = sci_port->owning_controller; struct scic_sds_controller *scic = sci_port->owning_controller;
u16 tci = sci_port->reserved_tci; u16 tag = sci_port->reserved_tag;
struct scu_task_context *tc;
task_context = scic_sds_controller_get_task_context_buffer(scic, tci); u32 command;
task_context->abort = 0; tc = &scic->task_context_table[ISCI_TAG_TCI(tag)];
tc->abort = 0;
command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
tci; ISCI_TAG_TCI(tag);
scic_sds_controller_post_request(scic, command); scic_sds_controller_post_request(scic, command);
} }
...@@ -1145,17 +1130,16 @@ static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port) ...@@ -1145,17 +1130,16 @@ static void scic_sds_port_post_dummy_request(struct scic_sds_port *sci_port)
static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port) static void scic_sds_port_abort_dummy_request(struct scic_sds_port *sci_port)
{ {
struct scic_sds_controller *scic = sci_port->owning_controller; struct scic_sds_controller *scic = sci_port->owning_controller;
u16 tci = sci_port->reserved_tci; u16 tag = sci_port->reserved_tag;
struct scu_task_context *tc; struct scu_task_context *tc;
u32 command; u32 command;
tc = scic_sds_controller_get_task_context_buffer(scic, tci); tc = &scic->task_context_table[ISCI_TAG_TCI(tag)];
tc->abort = 1; tc->abort = 1;
command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | sci_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
tci; ISCI_TAG_TCI(tag);
scic_sds_controller_post_request(scic, command); scic_sds_controller_post_request(scic, command);
} }
...@@ -1333,15 +1317,16 @@ enum sci_status scic_sds_port_start(struct scic_sds_port *sci_port) ...@@ -1333,15 +1317,16 @@ enum sci_status scic_sds_port_start(struct scic_sds_port *sci_port)
sci_port->reserved_rni = rni; sci_port->reserved_rni = rni;
} }
if (sci_port->reserved_tci == SCU_DUMMY_INDEX) { if (sci_port->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
/* Allocate a TCI and remove the sequence nibble */ struct isci_host *ihost = scic_to_ihost(scic);
u16 tci = scic_controller_allocate_io_tag(scic); u16 tag;
if (tci != SCU_DUMMY_INDEX) tag = isci_alloc_tag(ihost);
scic_sds_port_construct_dummy_task(sci_port, tci); if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
else
status = SCI_FAILURE_INSUFFICIENT_RESOURCES; status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
sci_port->reserved_tci = tci; else
scic_sds_port_construct_dummy_task(sci_port, tag);
sci_port->reserved_tag = tag;
} }
if (status == SCI_SUCCESS) { if (status == SCI_SUCCESS) {
...@@ -1859,7 +1844,7 @@ void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 index, ...@@ -1859,7 +1844,7 @@ void scic_sds_port_construct(struct scic_sds_port *sci_port, u8 index,
sci_port->assigned_device_count = 0; sci_port->assigned_device_count = 0;
sci_port->reserved_rni = SCU_DUMMY_INDEX; sci_port->reserved_rni = SCU_DUMMY_INDEX;
sci_port->reserved_tci = SCU_DUMMY_INDEX; sci_port->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
sci_init_timer(&sci_port->timer, port_timeout); sci_init_timer(&sci_port->timer, port_timeout);
......
...@@ -108,7 +108,7 @@ struct scic_sds_port { ...@@ -108,7 +108,7 @@ struct scic_sds_port {
u8 active_phy_mask; u8 active_phy_mask;
u16 reserved_rni; u16 reserved_rni;
u16 reserved_tci; u16 reserved_tag;
/** /**
* This field contains the count of the io requests started on this port * This field contains the count of the io requests started on this port
......
This diff is collapsed.
...@@ -136,7 +136,7 @@ struct scic_sds_stp_request { ...@@ -136,7 +136,7 @@ struct scic_sds_stp_request {
u8 ending_error; u8 ending_error;
struct scic_sds_request_pio_sgl { struct scic_sds_request_pio_sgl {
struct scu_sgl_element_pair *sgl_pair; int sgl_index;
u8 sgl_set; u8 sgl_set;
u32 sgl_offset; u32 sgl_offset;
} request_current; } request_current;
...@@ -171,12 +171,6 @@ struct scic_sds_request { ...@@ -171,12 +171,6 @@ struct scic_sds_request {
*/ */
struct scic_sds_remote_device *target_device; struct scic_sds_remote_device *target_device;
/*
* This field is utilized to determine if the SCI user is managing
* the IO tag for this request or if the core is managing it.
*/
bool was_tag_assigned_by_user;
/* /*
* This field indicates the IO tag for this request. The IO tag is * This field indicates the IO tag for this request. The IO tag is
* comprised of the task_index and a sequence count. The sequence count * comprised of the task_index and a sequence count. The sequence count
...@@ -209,8 +203,7 @@ struct scic_sds_request { ...@@ -209,8 +203,7 @@ struct scic_sds_request {
*/ */
u32 post_context; u32 post_context;
struct scu_task_context *task_context_buffer; struct scu_task_context *tc;
struct scu_task_context tc ____cacheline_aligned;
/* could be larger with sg chaining */ /* could be larger with sg chaining */
#define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
...@@ -465,35 +458,6 @@ enum sci_base_request_states { ...@@ -465,35 +458,6 @@ enum sci_base_request_states {
(request)->sci_status = (sci_status_code); \ (request)->sci_status = (sci_status_code); \
} }
/**
* SCU_SGL_ZERO() -
*
* This macro zeros the hardware SGL element data
*/
#define SCU_SGL_ZERO(scu_sge) \
{ \
(scu_sge).length = 0; \
(scu_sge).address_lower = 0; \
(scu_sge).address_upper = 0; \
(scu_sge).address_modifier = 0; \
}
/**
* SCU_SGL_COPY() -
*
* This macro copys the SGL Element data from the host os to the hardware SGL
* elment data
*/
#define SCU_SGL_COPY(scu_sge, os_sge) \
{ \
(scu_sge).length = sg_dma_len(sg); \
(scu_sge).address_upper = \
upper_32_bits(sg_dma_address(sg)); \
(scu_sge).address_lower = \
lower_32_bits(sg_dma_address(sg)); \
(scu_sge).address_modifier = 0; \
}
enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req); enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req); enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
enum sci_status enum sci_status
...@@ -509,22 +473,6 @@ scic_sds_request_complete(struct scic_sds_request *sci_req); ...@@ -509,22 +473,6 @@ scic_sds_request_complete(struct scic_sds_request *sci_req);
extern enum sci_status extern enum sci_status
scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code); scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
/* XXX open code in caller */
static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
dma_addr_t phys_addr)
{
struct isci_request *ireq = sci_req_to_ireq(sci_req);
dma_addr_t offset;
BUG_ON(phys_addr < ireq->request_daddr);
offset = phys_addr - ireq->request_daddr;
BUG_ON(offset >= sizeof(*ireq));
return (char *)ireq + offset;
}
/* XXX open code in caller */ /* XXX open code in caller */
static inline dma_addr_t static inline dma_addr_t
scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr) scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr)
...@@ -672,7 +620,7 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost, ...@@ -672,7 +620,7 @@ struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
struct isci_tmf *isci_tmf, struct isci_tmf *isci_tmf,
gfp_t gfp_flags); gfp_t gfp_flags);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
struct sas_task *task, gfp_t gfp_flags); struct sas_task *task, u16 tag, gfp_t gfp_flags);
void isci_terminate_pending_requests(struct isci_host *ihost, void isci_terminate_pending_requests(struct isci_host *ihost,
struct isci_remote_device *idev); struct isci_remote_device *idev);
enum sci_status enum sci_status
......
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include "request.h" #include "request.h"
#include "sata.h" #include "sata.h"
#include "task.h" #include "task.h"
#include "host.h"
/** /**
* isci_task_refuse() - complete the request to the upper layer driver in * isci_task_refuse() - complete the request to the upper layer driver in
...@@ -156,25 +157,19 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -156,25 +157,19 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
{ {
struct isci_host *ihost = dev_to_ihost(task->dev); struct isci_host *ihost = dev_to_ihost(task->dev);
struct isci_remote_device *idev; struct isci_remote_device *idev;
enum sci_status status;
unsigned long flags; unsigned long flags;
bool io_ready; bool io_ready;
int ret; u16 tag;
dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
/* Check if we have room for more tasks */
ret = isci_host_can_queue(ihost, num);
if (ret) {
dev_warn(&ihost->pdev->dev, "%s: queue full\n", __func__);
return ret;
}
for_each_sas_task(num, task) { for_each_sas_task(num, task) {
enum sci_status status = SCI_FAILURE;
spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock_irqsave(&ihost->scic_lock, flags);
idev = isci_lookup_device(task->dev); idev = isci_lookup_device(task->dev);
io_ready = isci_device_io_ready(idev, task); io_ready = isci_device_io_ready(idev, task);
tag = isci_alloc_tag(ihost);
spin_unlock_irqrestore(&ihost->scic_lock, flags); spin_unlock_irqrestore(&ihost->scic_lock, flags);
dev_dbg(&ihost->pdev->dev, dev_dbg(&ihost->pdev->dev,
...@@ -185,15 +180,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -185,15 +180,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
if (!idev) { if (!idev) {
isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
SAS_DEVICE_UNKNOWN); SAS_DEVICE_UNKNOWN);
isci_host_can_dequeue(ihost, 1); } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
} else if (!io_ready) {
/* Indicate QUEUE_FULL so that the scsi midlayer /* Indicate QUEUE_FULL so that the scsi midlayer
* retries. * retries.
*/ */
isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
SAS_QUEUE_FULL); SAS_QUEUE_FULL);
isci_host_can_dequeue(ihost, 1);
} else { } else {
/* There is a device and it's ready for I/O. */ /* There is a device and it's ready for I/O. */
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
...@@ -206,13 +198,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -206,13 +198,12 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
isci_task_refuse(ihost, task, isci_task_refuse(ihost, task,
SAS_TASK_UNDELIVERED, SAS_TASK_UNDELIVERED,
SAM_STAT_TASK_ABORTED); SAM_STAT_TASK_ABORTED);
isci_host_can_dequeue(ihost, 1);
} else { } else {
task->task_state_flags |= SAS_TASK_AT_INITIATOR; task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */ /* build and send the request. */
status = isci_request_execute(ihost, idev, task, gfp_flags); status = isci_request_execute(ihost, idev, task, tag, gfp_flags);
if (status != SCI_SUCCESS) { if (status != SCI_SUCCESS) {
...@@ -231,10 +222,17 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -231,10 +222,17 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
isci_task_refuse(ihost, task, isci_task_refuse(ihost, task,
SAS_TASK_COMPLETE, SAS_TASK_COMPLETE,
SAS_QUEUE_FULL); SAS_QUEUE_FULL);
isci_host_can_dequeue(ihost, 1);
} }
} }
} }
if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
spin_lock_irqsave(&ihost->scic_lock, flags);
/* command never hit the device, so just free
* the tci and skip the sequence increment
*/
isci_tci_free(ihost, ISCI_TAG_TCI(tag));
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
isci_put_device(idev); isci_put_device(idev);
} }
return 0; return 0;
...@@ -242,7 +240,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) ...@@ -242,7 +240,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
static struct isci_request *isci_task_request_build(struct isci_host *ihost, static struct isci_request *isci_task_request_build(struct isci_host *ihost,
struct isci_remote_device *idev, struct isci_remote_device *idev,
struct isci_tmf *isci_tmf) u16 tag, struct isci_tmf *isci_tmf)
{ {
enum sci_status status = SCI_FAILURE; enum sci_status status = SCI_FAILURE;
struct isci_request *ireq = NULL; struct isci_request *ireq = NULL;
...@@ -259,8 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, ...@@ -259,8 +257,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return NULL; return NULL;
/* let the core do it's construct. */ /* let the core do it's construct. */
status = scic_task_request_construct(&ihost->sci, &idev->sci, status = scic_task_request_construct(&ihost->sci, &idev->sci, tag,
SCI_CONTROLLER_INVALID_IO_TAG,
&ireq->sci); &ireq->sci);
if (status != SCI_SUCCESS) { if (status != SCI_SUCCESS) {
...@@ -290,8 +287,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, ...@@ -290,8 +287,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return ireq; return ireq;
errout: errout:
isci_request_free(ihost, ireq); isci_request_free(ihost, ireq);
ireq = NULL; return NULL;
return ireq;
} }
int isci_task_execute_tmf(struct isci_host *ihost, int isci_task_execute_tmf(struct isci_host *ihost,
...@@ -305,6 +301,14 @@ int isci_task_execute_tmf(struct isci_host *ihost, ...@@ -305,6 +301,14 @@ int isci_task_execute_tmf(struct isci_host *ihost,
int ret = TMF_RESP_FUNC_FAILED; int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags; unsigned long flags;
unsigned long timeleft; unsigned long timeleft;
u16 tag;
spin_lock_irqsave(&ihost->scic_lock, flags);
tag = isci_alloc_tag(ihost);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
return ret;
/* sanity check, return TMF_RESP_FUNC_FAILED /* sanity check, return TMF_RESP_FUNC_FAILED
* if the device is not there and ready. * if the device is not there and ready.
...@@ -316,7 +320,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, ...@@ -316,7 +320,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
"%s: isci_device = %p not ready (%#lx)\n", "%s: isci_device = %p not ready (%#lx)\n",
__func__, __func__,
isci_device, isci_device ? isci_device->flags : 0); isci_device, isci_device ? isci_device->flags : 0);
return TMF_RESP_FUNC_FAILED; goto err_tci;
} else } else
dev_dbg(&ihost->pdev->dev, dev_dbg(&ihost->pdev->dev,
"%s: isci_device = %p\n", "%s: isci_device = %p\n",
...@@ -327,22 +331,16 @@ int isci_task_execute_tmf(struct isci_host *ihost, ...@@ -327,22 +331,16 @@ int isci_task_execute_tmf(struct isci_host *ihost,
/* Assign the pointer to the TMF's completion kernel wait structure. */ /* Assign the pointer to the TMF's completion kernel wait structure. */
tmf->complete = &completion; tmf->complete = &completion;
ireq = isci_task_request_build(ihost, isci_device, tmf); ireq = isci_task_request_build(ihost, isci_device, tag, tmf);
if (!ireq) { if (!ireq)
dev_warn(&ihost->pdev->dev, goto err_tci;
"%s: isci_task_request_build failed\n",
__func__);
return TMF_RESP_FUNC_FAILED;
}
spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock_irqsave(&ihost->scic_lock, flags);
/* start the TMF io. */ /* start the TMF io. */
status = scic_controller_start_task( status = scic_controller_start_task(&ihost->sci,
&ihost->sci, sci_device,
sci_device, &ireq->sci);
&ireq->sci,
SCI_CONTROLLER_INVALID_IO_TAG);
if (status != SCI_TASK_SUCCESS) { if (status != SCI_TASK_SUCCESS) {
dev_warn(&ihost->pdev->dev, dev_warn(&ihost->pdev->dev,
...@@ -351,8 +349,7 @@ int isci_task_execute_tmf(struct isci_host *ihost, ...@@ -351,8 +349,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
status, status,
ireq); ireq);
spin_unlock_irqrestore(&ihost->scic_lock, flags); spin_unlock_irqrestore(&ihost->scic_lock, flags);
isci_request_free(ihost, ireq); goto err_ireq;
return ret;
} }
if (tmf->cb_state_func != NULL) if (tmf->cb_state_func != NULL)
...@@ -403,6 +400,15 @@ int isci_task_execute_tmf(struct isci_host *ihost, ...@@ -403,6 +400,15 @@ int isci_task_execute_tmf(struct isci_host *ihost,
ireq); ireq);
return ret; return ret;
err_ireq:
isci_request_free(ihost, ireq);
err_tci:
spin_lock_irqsave(&ihost->scic_lock, flags);
isci_tci_free(ihost, ISCI_TAG_TCI(tag));
spin_unlock_irqrestore(&ihost->scic_lock, flags);
return ret;
} }
void isci_task_build_tmf( void isci_task_build_tmf(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment