Commit a680a929 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: SLI path split: Refactor lpfc_iocbq

Currently, SLI3 and SLI4 data paths use the same lpfc_iocbq structure.
This is a "common" structure but many of the components refer to sli-rev
specific entities which can lead the developer astray as to what they
actually mean, should be set to, or when they should be used.

This first patch prepares the lpfc_iocbq structure so that elements common
to both SLI3 and SLI4 data paths are more appropriately named, making it
clear they apply generically.

Fieldnames based on 'iocb' (sli3) or 'wqe' (sli4) which are actually
generic to the paths are renamed to 'cmd':

 - iocb_flag is renamed to cmd_flag

 - lpfc_vmid_iocb_tag is renamed to lpfc_vmid_tag

 - fabric_iocb_cmpl is renamed to fabric_cmd_cmpl

 - wait_iocb_cmpl is renamed to wait_cmd_cmpl

 - iocb_cmpl and wqe_cmpl are combined and renamed to cmd_cmpl

 - rsvd2 member is renamed to num_bdes due to pre-existing usage

The structure name itself will retain the iocb reference as changing to a
more relevant "job" or "cmd" title induces many hundreds of line changes
for only a name change.

lpfc_post_buffer is also renamed to lpfc_sli3_post_buffer to indicate use
in the SLI3 path only.

Link: https://lore.kernel.org/r/20220225022308.16486-2-jsmart2021@gmail.comCo-developed-by: default avatarJustin Tee <justin.tee@broadcom.com>
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent dc264641
......@@ -325,7 +325,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
......@@ -481,11 +481,11 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
cmdiocbq->context3 = bmp;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
timeout = phba->fc_ratov * 2;
cmd->ulpTimeout = timeout;
cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
......@@ -516,9 +516,9 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
......@@ -600,7 +600,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
rsp = &rspiocbq->iocb;
......@@ -726,10 +726,10 @@ lpfc_bsg_rport_els(struct bsg_job *job)
cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
else
cmdiocbq->iocb.ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->context1 = dd_data;
cmdiocbq->context_un.ndlp = ndlp;
cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
......@@ -757,9 +757,9 @@ lpfc_bsg_rport_els(struct bsg_job *job)
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
......@@ -1053,7 +1053,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_in_buf_free(phba,
dmabuf);
} else {
lpfc_post_buffer(phba,
lpfc_sli3_post_buffer(phba,
pring,
1);
}
......@@ -1061,7 +1061,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba,
lpfc_sli3_post_buffer(phba,
pring,
1);
break;
......@@ -1395,7 +1395,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
ndlp = dd_data->context_un.iocb.ndlp;
......@@ -1549,13 +1549,13 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
ctiocb->context1 = dd_data;
ctiocb->context2 = cmp;
ctiocb->context3 = bmp;
ctiocb->context_un.ndlp = ndlp;
ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
......@@ -1582,9 +1582,9 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
......@@ -2713,9 +2713,9 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
cmd->ulpClass = CLASS3;
cmd->ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
cmdiocbq->iocb_cmpl = NULL;
cmdiocbq->cmd_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq,
......@@ -3286,10 +3286,10 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
cmdiocbq->sli4_xritag = NO_XRI;
cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
cmdiocbq->vport = phba->pport;
cmdiocbq->iocb_cmpl = NULL;
cmdiocbq->cmd_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
......@@ -5273,11 +5273,11 @@ lpfc_menlo_cmd(struct bsg_job *job)
cmd->ulpClass = CLASS3;
cmd->ulpOwner = OWN_CHIP;
cmd->ulpLe = 1; /* Limited Edition */
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
/* We want the firmware to timeout before we do */
cmd->ulpTimeout = MENLO_TIMEOUT - 5;
cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
......@@ -6001,7 +6001,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O abort window is still open */
if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return -EAGAIN;
}
......
......@@ -211,7 +211,7 @@ int lpfc_sli4_refresh_params(struct lpfc_hba *phba);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
int lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
......
......@@ -239,7 +239,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
cmdiocbq->context1 = lpfc_nlp_get(ndlp);
cmdiocbq->context2 = (uint8_t *)mp;
cmdiocbq->context3 = (uint8_t *)bmp;
cmdiocbq->iocb_cmpl = lpfc_ct_unsol_cmpl;
cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
icmd->ulpContext = rx_id; /* Xri / rx_id */
icmd->unsli3.rcvsli3.ox_id = ox_id;
icmd->un.ulpWord[3] =
......@@ -370,7 +370,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_post_buffer(phba, pring, 2);
lpfc_sli3_post_buffer(phba, pring, 2);
return;
}
......@@ -447,7 +447,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
lpfc_post_buffer(phba, pring, i);
lpfc_sli3_post_buffer(phba, pring, i);
}
list_del(&head);
}
......@@ -652,7 +652,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
"Data: x%x x%x\n",
ndlp->nlp_DID, icmd->ulpIoTag,
vport->port_state);
geniocb->iocb_cmpl = cmpl;
geniocb->cmd_cmpl = cmpl;
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
......
This diff is collapsed.
......@@ -1017,7 +1017,7 @@ lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
spin_lock_irq(&pring->ring_lock);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
list_splice_init(&pring->txcmplq, &completions);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&pring->ring_lock);
......@@ -2678,7 +2678,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
}
/**
* lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
* lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a IOCB ring.
* @cnt: the number of IOCBs to be posted to the IOCB ring.
......@@ -2690,7 +2690,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
* The number of IOCBs NOT able to be posted to the IOCB ring.
**/
int
lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
IOCB_t *icmd;
struct lpfc_iocbq *iocb;
......@@ -2796,7 +2796,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */
lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */
return 0;
......@@ -4250,8 +4250,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
qp = &phba->sli4_hba.hdwq[idx];
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
......
......@@ -2137,9 +2137,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr = NULL;
nvpr = NULL;
temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
npr = (PRLI *) temp_ptr;
else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
nvpr = (struct lpfc_nvme_prli *) temp_ptr;
irsp = &rspiocb->iocb;
......
......@@ -352,11 +352,12 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
static void
lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
struct lpfc_iocbq *rspwqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
struct lpfc_nvme_lport *lport;
uint32_t status;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
......@@ -380,7 +381,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_wcqe_complete *),
struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
......@@ -401,7 +402,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
memset(wqe, 0, sizeof(union lpfc_wqe));
genwqe->context3 = (uint8_t *)bmp;
genwqe->iocb_flag |= LPFC_IO_NVME_LS;
genwqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Save for completion so we can release these resources */
genwqe->context1 = lpfc_nlp_get(ndlp);
......@@ -432,7 +433,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
first_len = xmit_len;
}
genwqe->rsvd2 = num_entry;
genwqe->num_bdes = num_entry;
genwqe->hba_wqidx = 0;
/* Words 0 - 2 */
......@@ -483,8 +484,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
genwqe->wqe_cmpl = cmpl;
genwqe->iocb_cmpl = NULL;
genwqe->cmd_cmpl = cmpl;
genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
genwqe->vport = vport;
genwqe->retry = retry;
......@@ -534,7 +534,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe))
struct lpfc_iocbq *rspwqe))
{
struct lpfc_dmabuf *bmp;
struct ulp_bde64 *bpl;
......@@ -722,7 +722,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
if (wqe->context2 == pnvme_lsreq) {
wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
foundit = true;
break;
}
......@@ -906,7 +906,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
/*
* lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
* lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
......@@ -917,11 +917,12 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
* TODO: What are the failure codes.
**/
static void
lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_wcqe_complete *wcqe)
lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_iocbq *pwqeOut)
{
struct lpfc_io_buf *lpfc_ncmd =
(struct lpfc_io_buf *)pwqeIn->context1;
struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
struct nvme_fc_ersp_iu *ep;
......@@ -1866,7 +1867,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
}
/* Don't abort IOs no longer on the pending queue. */
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
......@@ -1880,7 +1881,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
/* Outstanding abort is in progress */
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq x%px, "
......@@ -1975,8 +1976,8 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
pwqeq->iocb_flag = LPFC_IO_NVME;
pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
pwqeq->cmd_flag = LPFC_IO_NVME;
pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
......@@ -2742,6 +2743,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
bf_set(lpfc_wcqe_c_xb, wcqep, 1);
(pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
(pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
#endif
}
......@@ -234,7 +234,7 @@ int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe));
struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
......@@ -248,6 +248,6 @@ int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
struct nvmefc_ls_rsp *ls_rsp,
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe));
struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -35,7 +35,7 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
union lpfc_vmid_iocb_tag {
union lpfc_vmid_tag {
uint32_t app_id;
uint8_t cs_ctl_vmid;
struct lpfc_vmid_context *vmid_context; /* UVEM context information */
......@@ -69,16 +69,16 @@ struct lpfc_iocbq {
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
uint16_t hba_wqidx; /* index to HBA work queue */
struct lpfc_cq_event cq_event;
struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
uint64_t isr_timestamp;
union lpfc_wqe128 wqe; /* SLI-4 */
IOCB_t iocb; /* SLI-3 */
struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
uint8_t rsvd2;
uint8_t num_bdes;
uint8_t priority; /* OAS priority */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint32_t iocb_flag;
u32 cmd_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
......@@ -123,15 +123,13 @@ struct lpfc_iocbq {
struct lpfc_node_rrq *rrq;
} context_un;
union lpfc_vmid_iocb_tag vmid_tag;
void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_wcqe_complete *);
union lpfc_vmid_tag vmid_tag;
void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
struct lpfc_iocbq *rsp);
void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
struct lpfc_iocbq *rsp);
void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
struct lpfc_iocbq *rsp);
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment