Commit c00f62e6 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Merge per-protocol WQ/CQ pairs into single per-cpu pair

Currently, each hardware queue, typically allocated per-cpu, consists of a
WQ/CQ pair per protocol. Meaning if both SCSI and NVMe are supported 2
WQ/CQ pairs will exist for the hardware queue. Separate queues are
unnecessary. The current implementation wastes memory backing the 2nd set
of queues, and the use of double the SLI-4 WQ/CQ's means less hardware
queues can be supported which means there may not always be enough to have
a pair per cpu. If there is only 1 pair per cpu, more cpu's may get their
own WQ/CQ.

Rework the implementation to use a single WQ/CQ pair by both protocols.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0d8af096
...@@ -734,14 +734,13 @@ struct lpfc_hba { ...@@ -734,14 +734,13 @@ struct lpfc_hba {
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ #define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ #define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /* #define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed * Firmware supports Forced Link Speed
* capability * capability
*/ */
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
......
...@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); ...@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *); struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
......
...@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) ...@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool]; qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i); len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
spin_lock(&qp->abts_nvme_buf_list_lock);
spin_lock(&qp->io_buf_list_get_lock); spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock); spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
...@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) ...@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp->abts_nvme_io_bufs, out); qp->abts_nvme_io_bufs, out);
spin_unlock(&qp->io_buf_list_put_lock); spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock(&qp->io_buf_list_get_lock); spin_unlock(&qp->io_buf_list_get_lock);
spin_unlock(&qp->abts_nvme_buf_list_lock); spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
lpfc_debugfs_last_xripool++; lpfc_debugfs_last_xripool++;
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue) if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
...@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) ...@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
continue; continue;
pbl_pool = &multixri_pool->pbl_pool; pbl_pool = &multixri_pool->pbl_pool;
pvt_pool = &multixri_pool->pvt_pool; pvt_pool = &multixri_pool->pvt_pool;
txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
if (qp->nvme_wq)
txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
scnprintf(tmp, sizeof(tmp), scnprintf(tmp, sizeof(tmp),
"%03d: %4d %4d %4d %4d | %10d %10d ", "%03d: %4d %4d %4d %4d | %10d %10d ",
...@@ -3786,23 +3782,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, ...@@ -3786,23 +3782,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
int qidx; int qidx;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].fcp_wq; qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp->assoc_qid != cq_id) if (qp->assoc_qid != cq_id)
continue; continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt) if (*len >= max_cnt)
return 1; return 1;
} }
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
}
return 0; return 0;
} }
...@@ -3868,9 +3854,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, ...@@ -3868,9 +3854,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
struct lpfc_queue *qp; struct lpfc_queue *qp;
int rc; int rc;
qp = phba->sli4_hba.hdwq[eqidx].fcp_cq; qp = phba->sli4_hba.hdwq[eqidx].io_cq;
*len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
/* Reset max counter */ /* Reset max counter */
qp->CQ_max_cqe = 0; qp->CQ_max_cqe = 0;
...@@ -3878,28 +3864,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, ...@@ -3878,28 +3864,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
if (*len >= max_cnt) if (*len >= max_cnt)
return 1; return 1;
rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
max_cnt, qp->queue_id); max_cnt, qp->queue_id);
if (rc) if (rc)
return 1; return 1;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
if (*len >= max_cnt)
return 1;
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
}
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */ /* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx]; qp = phba->sli4_hba.nvmet_cqset[eqidx];
...@@ -4348,7 +4317,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -4348,7 +4317,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) { if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_hdw_queue; for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) { qidx++) {
qp = phba->sli4_hba.hdwq[qidx].fcp_cq; qp = phba->sli4_hba.hdwq[qidx].io_cq;
if (qp && qp->queue_id == queid) { if (qp && qp->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
...@@ -4360,22 +4329,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -4360,22 +4329,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
} }
} }
} }
/* NVME complete queue */
if (phba->sli4_hba.hdwq) {
qidx = 0;
do {
qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
qp, index, count);
if (rc)
goto error_out;
idiag.ptr_private = qp;
goto pass_check;
}
} while (++qidx < phba->cfg_hdw_queue);
}
goto error_out; goto error_out;
break; break;
case LPFC_IDIAG_MQ: case LPFC_IDIAG_MQ:
...@@ -4419,20 +4372,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -4419,20 +4372,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) { if (phba->sli4_hba.hdwq) {
/* FCP/SCSI work queue */ /* FCP/SCSI work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].fcp_wq; qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
qp, index, count);
if (rc)
goto error_out;
idiag.ptr_private = qp;
goto pass_check;
}
}
/* NVME work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
if (qp && qp->queue_id == queid) { if (qp && qp->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
...@@ -6442,12 +6382,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -6442,12 +6382,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_wq(phba, DUMP_FCP, idx); lpfc_debug_dump_wq(phba, DUMP_IO, idx);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
}
lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba); lpfc_debug_dump_dat_rq(phba);
...@@ -6459,12 +6394,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -6459,12 +6394,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_cq(phba, DUMP_FCP, idx); lpfc_debug_dump_cq(phba, DUMP_IO, idx);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
}
/* /*
* Dump Event Queues (EQs) * Dump Event Queues (EQs)
......
...@@ -291,8 +291,7 @@ struct lpfc_idiag { ...@@ -291,8 +291,7 @@ struct lpfc_idiag {
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 #define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
enum { enum {
DUMP_FCP, DUMP_IO,
DUMP_NVME,
DUMP_MBX, DUMP_MBX,
DUMP_ELS, DUMP_ELS,
DUMP_NVMELS, DUMP_NVMELS,
...@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
struct lpfc_queue *wq; struct lpfc_queue *wq;
char *qtypestr; char *qtypestr;
if (qtype == DUMP_FCP) { if (qtype == DUMP_IO) {
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; wq = phba->sli4_hba.hdwq[wqidx].io_wq;
qtypestr = "FCP"; qtypestr = "IO";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) { } else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq; wq = phba->sli4_hba.mbx_wq;
qtypestr = "MBX"; qtypestr = "MBX";
...@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
} else } else
return; return;
if (qtype == DUMP_FCP || qtype == DUMP_NVME) if (qtype == DUMP_IO)
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
qtypestr, wqidx, wq->queue_id); qtypestr, wqidx, wq->queue_id);
else else
...@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr; char *qtypestr;
int eqidx; int eqidx;
/* fcp/nvme wq and cq are 1:1, thus same indexes */ /* io wq and cq are 1:1, thus same indexes */
eq = NULL; eq = NULL;
if (qtype == DUMP_FCP) { if (qtype == DUMP_IO) {
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; wq = phba->sli4_hba.hdwq[wqidx].io_wq;
cq = phba->sli4_hba.hdwq[wqidx].fcp_cq; cq = phba->sli4_hba.hdwq[wqidx].io_cq;
qtypestr = "FCP"; qtypestr = "IO";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) { } else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq; wq = phba->sli4_hba.mbx_wq;
cq = phba->sli4_hba.mbx_cq; cq = phba->sli4_hba.mbx_cq;
...@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
eq = phba->sli4_hba.hdwq[0].hba_eq; eq = phba->sli4_hba.hdwq[0].hba_eq;
} }
if (qtype == DUMP_FCP || qtype == DUMP_NVME) if (qtype == DUMP_IO)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n", "->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
...@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) ...@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
int wq_idx; int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++) for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid) if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
break; break;
if (wq_idx < phba->cfg_hdw_queue) { if (wq_idx < phba->cfg_hdw_queue) {
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq); lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
return;
}
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
break;
if (wq_idx < phba->cfg_hdw_queue) {
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
return; return;
} }
...@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) ...@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
int cq_idx; int cq_idx;
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid) if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
break;
if (cq_idx < phba->cfg_hdw_queue) {
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
return;
}
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
break; break;
if (cq_idx < phba->cfg_hdw_queue) { if (cq_idx < phba->cfg_hdw_queue) {
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq); lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
return; return;
} }
......
This diff is collapsed.
...@@ -1830,7 +1830,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ...@@ -1830,7 +1830,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
*/ */
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */ /* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6139 Driver in reset cleanup - flushing " "6139 Driver in reset cleanup - flushing "
...@@ -2091,11 +2091,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) ...@@ -2091,11 +2091,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->cur_iocbq.iotag); lpfc_ncmd->cur_iocbq.iotag);
spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag); spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
list_add_tail(&lpfc_ncmd->list, list_add_tail(&lpfc_ncmd->list,
&qp->lpfc_abts_nvme_buf_list); &qp->lpfc_abts_io_buf_list);
qp->abts_nvme_io_bufs++; qp->abts_nvme_io_bufs++;
spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag); spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else } else
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
} }
...@@ -2220,7 +2220,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, ...@@ -2220,7 +2220,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (unlikely(!ret)) { if (unlikely(!ret)) {
pending = 0; pending = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) { for (i = 0; i < phba->cfg_hdw_queue; i++) {
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring) if (!pring)
continue; continue;
if (pring->txcmplq_cnt) if (pring->txcmplq_cnt)
...@@ -2624,6 +2624,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ...@@ -2624,6 +2624,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure. * @axri: pointer to the fcp xri abort wcqe structure.
* @lpfc_ncmd: The nvme job structure for the request being aborted.
* *
* This routine is invoked by the worker thread to process a SLI4 fast-path * This routine is invoked by the worker thread to process a SLI4 fast-path
* NVME aborted xri. Aborted NVME IO commands are completed to the transport * NVME aborted xri. Aborted NVME IO commands are completed to the transport
...@@ -2631,59 +2632,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ...@@ -2631,59 +2632,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
**/ **/
void void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx) struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd)
{ {
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL; struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_nvme_buf_list_lock);
list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
&qp->lpfc_abts_nvme_buf_list, list) {
if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
list_del_init(&lpfc_ncmd->list);
qp->abts_nvme_io_bufs--;
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp;
if (ndlp)
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6311 nvme_cmd x%px xri x%x tag x%x "
"abort complete and xri released\n",
lpfc_ncmd->nvmeCmd, xri,
lpfc_ncmd->cur_iocbq.iotag);
/* Aborted NVME commands are required to not complete
* before the abort exchange command fully completes.
* Once completed, it is available via the put list.
*/
if (lpfc_ncmd->nvmeCmd) {
nvme_cmd = lpfc_ncmd->nvmeCmd;
nvme_cmd->done(nvme_cmd);
lpfc_ncmd->nvmeCmd = NULL;
}
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
}
spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, if (ndlp)
"6312 XRI Aborted xri x%x not found\n", xri); lpfc_sli4_abts_err_handler(phba, ndlp, axri);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6311 nvme_cmd %p xri x%x tag x%x abort complete and "
"xri released\n",
lpfc_ncmd->nvmeCmd, xri,
lpfc_ncmd->cur_iocbq.iotag);
/* Aborted NVME commands are required to not complete
* before the abort exchange command fully completes.
* Once completed, it is available via the put list.
*/
if (lpfc_ncmd->nvmeCmd) {
nvme_cmd = lpfc_ncmd->nvmeCmd;
nvme_cmd->done(nvme_cmd);
lpfc_ncmd->nvmeCmd = NULL;
}
lpfc_release_nvme_buf(phba, lpfc_ncmd);
} }
/** /**
...@@ -2705,13 +2680,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) ...@@ -2705,13 +2680,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
return; return;
/* Cycle through all NVME rings and make sure all outstanding /* Cycle through all IO rings and make sure all outstanding
* WQEs have been removed from the txcmplqs. * WQEs have been removed from the txcmplqs.
*/ */
for (i = 0; i < phba->cfg_hdw_queue; i++) { for (i = 0; i < phba->cfg_hdw_queue; i++) {
if (!phba->sli4_hba.hdwq[i].nvme_wq) if (!phba->sli4_hba.hdwq[i].io_wq)
continue; continue;
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring) if (!pring)
continue; continue;
......
...@@ -1026,7 +1026,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -1026,7 +1026,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE * WQE release CQE
*/ */
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
wq = ctxp->hdwq->nvme_wq; wq = ctxp->hdwq->io_wq;
pring = wq->pring; pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags); spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list); list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
...@@ -1104,7 +1104,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, ...@@ -1104,7 +1104,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, flags); spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid); ctxp->oxid);
wq = ctxp->hdwq->nvme_wq; wq = ctxp->hdwq->io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, ctxp); lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return; return;
} }
...@@ -1918,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) ...@@ -1918,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (phba->targetport) { if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
wq = phba->sli4_hba.hdwq[qidx].nvme_wq; wq = phba->sli4_hba.hdwq[qidx].io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL); lpfc_nvmet_wqfull_flush(phba, wq, NULL);
} }
tgtp->tport_unreg_cmp = &tport_unreg_cmp; tgtp->tport_unreg_cmp = &tport_unreg_cmp;
...@@ -3295,7 +3295,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, ...@@ -3295,7 +3295,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
*/ */
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */ /* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error); atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME, lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
......
...@@ -537,29 +537,32 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) ...@@ -537,29 +537,32 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx]; qp = &phba->sli4_hba.hdwq[idx];
spin_lock(&qp->abts_scsi_buf_list_lock); spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb, list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_scsi_buf_list, list) { &qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode && if (psb->rdata && psb->rdata->pnode &&
psb->rdata->pnode->vport == vport) psb->rdata->pnode->vport == vport)
psb->rdata = NULL; psb->rdata = NULL;
} }
spin_unlock(&qp->abts_scsi_buf_list_lock); spin_unlock(&qp->abts_io_buf_list_lock);
} }
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
} }
/** /**
* lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure. * @axri: pointer to the fcp xri abort wcqe structure.
* *
* This routine is invoked by the worker thread to process a SLI4 fast-path * This routine is invoked by the worker thread to process a SLI4 fast-path
* FCP aborted xri. * FCP or NVME aborted xri.
**/ **/
void void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx) struct sli4_wcqe_xri_aborted *axri, int idx)
{ {
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
...@@ -577,16 +580,25 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, ...@@ -577,16 +580,25 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[idx]; qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_scsi_buf_list_lock); spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb, list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_scsi_buf_list, list) { &qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) { if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list); list_del_init(&psb->list);
qp->abts_scsi_io_bufs--;
psb->exch_busy = 0; psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS; psb->status = IOSTAT_SUCCESS;
spin_unlock( #ifdef BUILD_NVME
&qp->abts_scsi_buf_list_lock); if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
return;
}
#endif
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
if (psb->rdata && psb->rdata->pnode) if (psb->rdata && psb->rdata->pnode)
ndlp = psb->rdata->pnode; ndlp = psb->rdata->pnode;
else else
...@@ -605,12 +617,12 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, ...@@ -605,12 +617,12 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
return; return;
} }
} }
spin_unlock(&qp->abts_scsi_buf_list_lock); spin_unlock(&qp->abts_io_buf_list_lock);
for (i = 1; i <= phba->sli.last_iotag; i++) { for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i]; iocbq = phba->sli.iocbq_lookup[i];
if (!(iocbq->iocb_flag & LPFC_IO_FCP) || if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
(iocbq->iocb_flag & LPFC_IO_LIBDFC)) (iocbq->iocb_flag & LPFC_IO_LIBDFC))
continue; continue;
if (iocbq->sli4_xritag != xri) if (iocbq->sli4_xritag != xri)
continue; continue;
...@@ -836,11 +848,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) ...@@ -836,11 +848,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
qp = psb->hdwq; qp = psb->hdwq;
if (psb->exch_busy) { if (psb->exch_busy) {
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL; psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list); list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++; qp->abts_scsi_io_bufs++;
spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else { } else {
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
} }
...@@ -4800,7 +4812,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) ...@@ -4800,7 +4812,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */ /* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been " "3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n"); "flushed by LLD.\n");
...@@ -4821,7 +4833,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) ...@@ -4821,7 +4833,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq; iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring; pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) { if (!pring_s4) {
ret = FAILED; ret = FAILED;
goto out_unlock_buf; goto out_unlock_buf;
......
This diff is collapsed.
...@@ -109,9 +109,8 @@ enum lpfc_sli4_queue_type { ...@@ -109,9 +109,8 @@ enum lpfc_sli4_queue_type {
enum lpfc_sli4_queue_subtype { enum lpfc_sli4_queue_subtype {
LPFC_NONE, LPFC_NONE,
LPFC_MBOX, LPFC_MBOX,
LPFC_FCP, LPFC_IO,
LPFC_ELS, LPFC_ELS,
LPFC_NVME,
LPFC_NVMET, LPFC_NVMET,
LPFC_NVME_LS, LPFC_NVME_LS,
LPFC_USOL LPFC_USOL
...@@ -641,22 +640,17 @@ struct lpfc_eq_intr_info { ...@@ -641,22 +640,17 @@ struct lpfc_eq_intr_info {
struct lpfc_sli4_hdw_queue { struct lpfc_sli4_hdw_queue {
/* Pointers to the constructed SLI4 queues */ /* Pointers to the constructed SLI4 queues */
struct lpfc_queue *hba_eq; /* Event queues for HBA */ struct lpfc_queue *hba_eq; /* Event queues for HBA */
struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */ struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */ struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */ uint16_t io_cq_map;
struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
uint16_t fcp_cq_map;
uint16_t nvme_cq_map;
/* Keep track of IO buffers for this hardware queue */ /* Keep track of IO buffers for this hardware queue */
spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */ spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
struct list_head lpfc_io_buf_list_get; struct list_head lpfc_io_buf_list_get;
spinlock_t io_buf_list_put_lock; /* Common buf free list lock */ spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
struct list_head lpfc_io_buf_list_put; struct list_head lpfc_io_buf_list_put;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_io_buf_list;
spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
struct list_head lpfc_abts_nvme_buf_list;
uint32_t total_io_bufs; uint32_t total_io_bufs;
uint32_t get_io_bufs; uint32_t get_io_bufs;
uint32_t put_io_bufs; uint32_t put_io_bufs;
...@@ -852,8 +846,8 @@ struct lpfc_sli4_hba { ...@@ -852,8 +846,8 @@ struct lpfc_sli4_hba {
struct lpfc_queue **cq_lookup; struct lpfc_queue **cq_lookup;
struct list_head lpfc_els_sgl_list; struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list; struct list_head lpfc_abts_els_sgl_list;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_abts_io_buf_list;
struct list_head lpfc_nvmet_sgl_list; struct list_head lpfc_nvmet_sgl_list;
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */ spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_nvmet_ctx_list;
...@@ -1058,10 +1052,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, ...@@ -1058,10 +1052,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *, int);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx); struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd);
void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx);
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri); struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment