Commit c00f62e6 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Merge per-protocol WQ/CQ pairs into single per-cpu pair

Currently, each hardware queue, typically allocated per-cpu, consists of a
WQ/CQ pair per protocol. Meaning if both SCSI and NVMe are supported 2
WQ/CQ pairs will exist for the hardware queue. Separate queues are
unnecessary. The current implementation wastes memory backing the 2nd set
of queues, and the use of double the SLI-4 WQ/CQ's means less hardware
queues can be supported which means there may not always be enough to have
a pair per cpu. If there is only 1 pair per cpu, more cpu's may get their
own WQ/CQ.

Rework the implementation to use a single WQ/CQ pair by both protocols.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0d8af096
......@@ -734,14 +734,13 @@ struct lpfc_hba {
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
......
......@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
......
......@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
spin_lock(&qp->abts_nvme_buf_list_lock);
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
......@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp->abts_nvme_io_bufs, out);
spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock(&qp->io_buf_list_get_lock);
spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
lpfc_debugfs_last_xripool++;
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
......@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
continue;
pbl_pool = &multixri_pool->pbl_pool;
pvt_pool = &multixri_pool->pvt_pool;
txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
if (qp->nvme_wq)
txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
scnprintf(tmp, sizeof(tmp),
"%03d: %4d %4d %4d %4d | %10d %10d ",
......@@ -3786,23 +3782,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
int qidx;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
}
return 0;
}
......@@ -3868,9 +3854,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
struct lpfc_queue *qp;
int rc;
qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
qp = phba->sli4_hba.hdwq[eqidx].io_cq;
*len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
*len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
......@@ -3878,28 +3864,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
if (*len >= max_cnt)
return 1;
rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
if (*len >= max_cnt)
return 1;
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
}
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
......@@ -4348,7 +4317,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) {
qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
qp = phba->sli4_hba.hdwq[qidx].io_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
......@@ -4360,22 +4329,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
/* NVME complete queue */
if (phba->sli4_hba.hdwq) {
qidx = 0;
do {
qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
qp, index, count);
if (rc)
goto error_out;
idiag.ptr_private = qp;
goto pass_check;
}
} while (++qidx < phba->cfg_hdw_queue);
}
goto error_out;
break;
case LPFC_IDIAG_MQ:
......@@ -4419,20 +4372,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
/* FCP/SCSI work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
qp, index, count);
if (rc)
goto error_out;
idiag.ptr_private = qp;
goto pass_check;
}
}
/* NVME work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
......@@ -6442,12 +6382,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
}
lpfc_debug_dump_wq(phba, DUMP_IO, idx);
lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba);
......@@ -6459,12 +6394,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
}
lpfc_debug_dump_cq(phba, DUMP_IO, idx);
/*
* Dump Event Queues (EQs)
......
......@@ -291,8 +291,7 @@ struct lpfc_idiag {
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
enum {
DUMP_FCP,
DUMP_NVME,
DUMP_IO,
DUMP_MBX,
DUMP_ELS,
DUMP_NVMELS,
......@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
struct lpfc_queue *wq;
char *qtypestr;
if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
qtypestr = "NVME";
if (qtype == DUMP_IO) {
wq = phba->sli4_hba.hdwq[wqidx].io_wq;
qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
qtypestr = "MBX";
......@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
} else
return;
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
if (qtype == DUMP_IO)
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
qtypestr, wqidx, wq->queue_id);
else
......@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr;
int eqidx;
/* fcp/nvme wq and cq are 1:1, thus same indexes */
/* io wq and cq are 1:1, thus same indexes */
eq = NULL;
if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
qtypestr = "NVME";
if (qtype == DUMP_IO) {
wq = phba->sli4_hba.hdwq[wqidx].io_wq;
cq = phba->sli4_hba.hdwq[wqidx].io_cq;
qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
cq = phba->sli4_hba.mbx_cq;
......@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
eq = phba->sli4_hba.hdwq[0].hba_eq;
}
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
if (qtype == DUMP_IO)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
......@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
break;
if (wq_idx < phba->cfg_hdw_queue) {
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
return;
}
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
break;
if (wq_idx < phba->cfg_hdw_queue) {
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
return;
}
......@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
int cq_idx;
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
break;
if (cq_idx < phba->cfg_hdw_queue) {
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
return;
}
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
break;
if (cq_idx < phba->cfg_hdw_queue) {
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
return;
}
......
This diff is collapsed.
......@@ -1830,7 +1830,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6139 Driver in reset cleanup - flushing "
......@@ -2091,11 +2091,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->cur_iocbq.iotag);
spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
list_add_tail(&lpfc_ncmd->list,
&qp->lpfc_abts_nvme_buf_list);
&qp->lpfc_abts_io_buf_list);
qp->abts_nvme_io_bufs++;
spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
}
......@@ -2220,7 +2220,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (unlikely(!ret)) {
pending = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
if (pring->txcmplq_cnt)
......@@ -2624,6 +2624,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
* @lpfc_ncmd: The nvme job structure for the request being aborted.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* NVME aborted xri. Aborted NVME IO commands are completed to the transport
......@@ -2631,37 +2632,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx)
struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_nvme_buf_list_lock);
list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
&qp->lpfc_abts_nvme_buf_list, list) {
if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
list_del_init(&lpfc_ncmd->list);
qp->abts_nvme_io_bufs--;
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp;
if (ndlp)
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6311 nvme_cmd x%px xri x%x tag x%x "
"abort complete and xri released\n",
"6311 nvme_cmd %p xri x%x tag x%x abort complete and "
"xri released\n",
lpfc_ncmd->nvmeCmd, xri,
lpfc_ncmd->cur_iocbq.iotag);
......@@ -2675,15 +2659,6 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
lpfc_ncmd->nvmeCmd = NULL;
}
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
}
spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6312 XRI Aborted xri x%x not found\n", xri);
}
/**
......@@ -2705,13 +2680,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
return;
/* Cycle through all NVME rings and make sure all outstanding
/* Cycle through all IO rings and make sure all outstanding
* WQEs have been removed from the txcmplqs.
*/
for (i = 0; i < phba->cfg_hdw_queue; i++) {
if (!phba->sli4_hba.hdwq[i].nvme_wq)
if (!phba->sli4_hba.hdwq[i].io_wq)
continue;
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
......
......@@ -1026,7 +1026,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE
*/
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
wq = ctxp->hdwq->nvme_wq;
wq = ctxp->hdwq->io_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
......@@ -1104,7 +1104,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
wq = ctxp->hdwq->nvme_wq;
wq = ctxp->hdwq->io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return;
}
......@@ -1918,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
wq = phba->sli4_hba.hdwq[qidx].io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
......@@ -3295,7 +3295,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
......
......@@ -537,28 +537,31 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
spin_lock(&qp->abts_scsi_buf_list_lock);
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_scsi_buf_list, list) {
&qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode &&
psb->rdata->pnode->vport == vport)
psb->rdata = NULL;
}
spin_unlock(&qp->abts_scsi_buf_list_lock);
spin_unlock(&qp->abts_io_buf_list_lock);
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
/**
* lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
* lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* FCP aborted xri.
* FCP or NVME aborted xri.
**/
void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
......@@ -577,16 +580,25 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_scsi_buf_list_lock);
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_scsi_buf_list, list) {
&qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list);
qp->abts_scsi_io_bufs--;
list_del_init(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
spin_unlock(
&qp->abts_scsi_buf_list_lock);
#ifdef BUILD_NVME
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
return;
}
#endif
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
if (psb->rdata && psb->rdata->pnode)
ndlp = psb->rdata->pnode;
else
......@@ -605,7 +617,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
return;
}
}
spin_unlock(&qp->abts_scsi_buf_list_lock);
spin_unlock(&qp->abts_io_buf_list_lock);
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
......@@ -836,11 +848,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
qp = psb->hdwq;
if (psb->exch_busy) {
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++;
spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else {
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
}
......@@ -4800,7 +4812,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
......@@ -4821,7 +4833,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
goto out_unlock_buf;
......
This diff is collapsed.
......@@ -109,9 +109,8 @@ enum lpfc_sli4_queue_type {
enum lpfc_sli4_queue_subtype {
LPFC_NONE,
LPFC_MBOX,
LPFC_FCP,
LPFC_IO,
LPFC_ELS,
LPFC_NVME,
LPFC_NVMET,
LPFC_NVME_LS,
LPFC_USOL
......@@ -641,22 +640,17 @@ struct lpfc_eq_intr_info {
struct lpfc_sli4_hdw_queue {
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue *hba_eq; /* Event queues for HBA */
struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
uint16_t fcp_cq_map;
uint16_t nvme_cq_map;
struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
uint16_t io_cq_map;
/* Keep track of IO buffers for this hardware queue */
spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
struct list_head lpfc_io_buf_list_get;
spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
struct list_head lpfc_io_buf_list_put;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
struct list_head lpfc_abts_scsi_buf_list;
spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
struct list_head lpfc_abts_nvme_buf_list;
spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
struct list_head lpfc_abts_io_buf_list;
uint32_t total_io_bufs;
uint32_t get_io_bufs;
uint32_t put_io_bufs;
......@@ -852,8 +846,8 @@ struct lpfc_sli4_hba {
struct lpfc_queue **cq_lookup;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
struct list_head lpfc_abts_scsi_buf_list;
spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
struct list_head lpfc_abts_io_buf_list;
struct list_head lpfc_nvmet_sgl_list;
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list;
......@@ -1058,9 +1052,10 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *, int);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd);
void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx);
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment