Commit a1ea04b3 authored by Uma Krishnan's avatar Uma Krishnan Committed by Martin K. Petersen

scsi: cxlflash: Flush pending commands in cleanup path

When the AFU is reset in an error path, pending scsi commands can be
silently dropped without completion or a formal abort. This puts the onus
on the cxlflash driver to notify mid-layer and indicating that the command
can be retried.

Once the card has been quiesced, the hardware send queue lock is acquired
to prevent any data movement while the pending commands are processed.
Signed-off-by: default avatarUma Krishnan <ukrishn@linux.vnet.ibm.com>
Acked-by: default avatarMatthew R. Ochs <mrochs@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent a002bf83
...@@ -157,7 +157,9 @@ struct afu_cmd { ...@@ -157,7 +157,9 @@ struct afu_cmd {
struct list_head queue; struct list_head queue;
u32 hwq_index; u32 hwq_index;
u8 cmd_tmf:1; u8 cmd_tmf:1,
cmd_aborted:1;
struct list_head list; /* Pending commands link */ struct list_head list; /* Pending commands link */
/* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned. /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
...@@ -176,6 +178,7 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) ...@@ -176,6 +178,7 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
struct afu_cmd *afuc = sc_to_afuc(sc); struct afu_cmd *afuc = sc_to_afuc(sc);
memset(afuc, 0, sizeof(*afuc)); memset(afuc, 0, sizeof(*afuc));
INIT_LIST_HEAD(&afuc->queue);
return afuc; return afuc;
} }
......
...@@ -193,6 +193,36 @@ static void cmd_complete(struct afu_cmd *cmd) ...@@ -193,6 +193,36 @@ static void cmd_complete(struct afu_cmd *cmd)
complete(&cmd->cevent); complete(&cmd->cevent);
} }
/**
* flush_pending_cmds() - flush all pending commands on this hardware queue
* @hwq: Hardware queue to flush.
*
* The hardware send queue lock associated with this hardware queue must be
* held when calling this routine.
*/
static void flush_pending_cmds(struct hwq *hwq)
{
struct afu_cmd *cmd, *tmp;
struct scsi_cmnd *scp;
list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
/* Bypass command when on a doneq, cmd_complete() will handle */
if (!list_empty(&cmd->queue))
continue;
list_del(&cmd->list);
if (cmd->scp) {
scp = cmd->scp;
scp->result = (DID_IMM_RETRY << 16);
scp->scsi_done(scp);
} else {
cmd->cmd_aborted = true;
complete(&cmd->cevent);
}
}
}
/** /**
* context_reset() - reset context via specified register * context_reset() - reset context via specified register
* @hwq: Hardware queue owning the context to be reset. * @hwq: Hardware queue owning the context to be reset.
...@@ -357,6 +387,9 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) ...@@ -357,6 +387,9 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
if (!timeout) if (!timeout)
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
if (cmd->cmd_aborted)
rc = -EAGAIN;
if (unlikely(cmd->sa.ioasc != 0)) { if (unlikely(cmd->sa.ioasc != 0)) {
dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
__func__, cmd->rcb.cdb[0], cmd->sa.ioasc); __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
...@@ -702,6 +735,7 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index) ...@@ -702,6 +735,7 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
struct afu *afu = cfg->afu; struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev; struct device *dev = &cfg->dev->dev;
struct hwq *hwq; struct hwq *hwq;
ulong lock_flags;
if (!afu) { if (!afu) {
dev_err(dev, "%s: returning with NULL afu\n", __func__); dev_err(dev, "%s: returning with NULL afu\n", __func__);
...@@ -719,6 +753,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index) ...@@ -719,6 +753,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
if (index != PRIMARY_HWQ) if (index != PRIMARY_HWQ)
WARN_ON(cxl_release_context(hwq->ctx)); WARN_ON(cxl_release_context(hwq->ctx));
hwq->ctx = NULL; hwq->ctx = NULL;
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
} }
/** /**
...@@ -2155,7 +2193,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, ...@@ -2155,7 +2193,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
mutex_lock(&sync_active); mutex_lock(&sync_active);
atomic_inc(&afu->cmds_active); atomic_inc(&afu->cmds_active);
buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
if (unlikely(!buf)) { if (unlikely(!buf)) {
dev_err(dev, "%s: no memory for command\n", __func__); dev_err(dev, "%s: no memory for command\n", __func__);
rc = -ENOMEM; rc = -ENOMEM;
...@@ -2165,6 +2203,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, ...@@ -2165,6 +2203,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
retry: retry:
memset(cmd, 0, sizeof(*cmd));
INIT_LIST_HEAD(&cmd->queue);
init_completion(&cmd->cevent); init_completion(&cmd->cevent);
cmd->parent = afu; cmd->parent = afu;
cmd->hwq_index = hwq->index; cmd->hwq_index = hwq->index;
...@@ -2191,11 +2231,20 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, ...@@ -2191,11 +2231,20 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
} }
rc = wait_resp(afu, cmd); rc = wait_resp(afu, cmd);
if (rc == -ETIMEDOUT) { switch (rc) {
case -ETIMEDOUT:
rc = afu->context_reset(hwq); rc = afu->context_reset(hwq);
if (!rc && ++nretry < 2) if (rc) {
cxlflash_schedule_async_reset(cfg);
break;
}
/* fall through to retry */
case -EAGAIN:
if (++nretry < 2)
goto retry; goto retry;
cxlflash_schedule_async_reset(cfg); /* fall through to exit */
default:
break;
} }
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment