Commit 7370d10a authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Remove extra vector and SLI4 queue for Expresslane

There is a extra queue and msix vector for expresslane. Now that the driver
will be doing queues per cpu, this oddball queue is no longer needed.
Expresslane will utilize the normal per-cpu queues.

Updated debugfs sli4 queue output to go along with the change
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0794d601
...@@ -199,11 +199,6 @@ void lpfc_reset_hba(struct lpfc_hba *); ...@@ -199,11 +199,6 @@ void lpfc_reset_hba(struct lpfc_hba *);
int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd, int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd,
spinlock_t *slock); spinlock_t *slock);
int lpfc_fof_queue_create(struct lpfc_hba *);
int lpfc_fof_queue_setup(struct lpfc_hba *);
int lpfc_fof_queue_destroy(struct lpfc_hba *);
irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli4_setup(struct lpfc_hba *phba); int lpfc_sli4_setup(struct lpfc_hba *phba);
void lpfc_sli_queue_init(struct lpfc_hba *phba); void lpfc_sli_queue_init(struct lpfc_hba *phba);
......
...@@ -3390,14 +3390,9 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -3390,14 +3390,9 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) { if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
x = phba->lpfc_idiag_last_eq; x = phba->lpfc_idiag_last_eq;
if (phba->cfg_fof && (x >= phba->io_channel_irqs)) {
phba->lpfc_idiag_last_eq = 0;
goto fof;
}
phba->lpfc_idiag_last_eq++; phba->lpfc_idiag_last_eq++;
if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs) if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
if (phba->cfg_fof == 0) phba->lpfc_idiag_last_eq = 0;
phba->lpfc_idiag_last_eq = 0;
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQ %d out of %d HBA EQs\n", "EQ %d out of %d HBA EQs\n",
...@@ -3479,35 +3474,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -3479,35 +3474,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
goto out; goto out;
} }
fof:
if (phba->cfg_fof) {
/* FOF EQ */
qp = phba->sli4_hba.fof_eq;
len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len);
/* Reset max counter */
if (qp)
qp->EQ_max_eqe = 0;
if (len >= max_cnt)
goto too_big;
/* OAS CQ */
qp = phba->sli4_hba.oas_cq;
len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len);
/* Reset max counter */
if (qp)
qp->CQ_max_cqe = 0;
if (len >= max_cnt)
goto too_big;
/* OAS WQ */
qp = phba->sli4_hba.oas_wq;
len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len);
if (len >= max_cnt)
goto too_big;
}
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
......
...@@ -6059,7 +6059,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -6059,7 +6059,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe; struct lpfc_mqe *mqe;
int longs; int longs;
int fof_vectors = 0;
int extra; int extra;
uint64_t wwn; uint64_t wwn;
u32 if_type; u32 if_type;
...@@ -6433,8 +6432,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -6433,8 +6432,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Verify OAS is supported */ /* Verify OAS is supported */
lpfc_sli4_oas_verify(phba); lpfc_sli4_oas_verify(phba);
if (phba->cfg_fof)
fof_vectors = 1;
/* Verify RAS support on adapter */ /* Verify RAS support on adapter */
lpfc_sli4_ras_init(phba); lpfc_sli4_ras_init(phba);
...@@ -6478,7 +6475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -6478,7 +6475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_remove_rpi_hdrs; goto out_remove_rpi_hdrs;
} }
phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, phba->sli4_hba.hba_eq_hdl = kcalloc(phba->io_channel_irqs,
sizeof(struct lpfc_hba_eq_hdl), sizeof(struct lpfc_hba_eq_hdl),
GFP_KERNEL); GFP_KERNEL);
if (!phba->sli4_hba.hba_eq_hdl) { if (!phba->sli4_hba.hba_eq_hdl) {
...@@ -8048,7 +8045,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) ...@@ -8048,7 +8045,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
/* /*
* Whats left after this can go toward NVME. * Whats left after this can go toward NVME.
* The minus 6 accounts for ELS, NVME LS, MBOX * The minus 6 accounts for ELS, NVME LS, MBOX
* fof plus a couple extra. When configured for * plus a couple extra. When configured for
* NVMET, FCP io channel WQs are not created. * NVMET, FCP io channel WQs are not created.
*/ */
length -= 6; length -= 6;
...@@ -8280,7 +8277,6 @@ static int ...@@ -8280,7 +8277,6 @@ static int
lpfc_sli4_queue_verify(struct lpfc_hba *phba) lpfc_sli4_queue_verify(struct lpfc_hba *phba)
{ {
int io_channel; int io_channel;
int fof_vectors = phba->cfg_fof ? 1 : 0;
/* /*
* Sanity check for configured queue parameters against the run-time * Sanity check for configured queue parameters against the run-time
...@@ -8299,13 +8295,13 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) ...@@ -8299,13 +8295,13 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
io_channel = phba->sli4_hba.num_online_cpu; io_channel = phba->sli4_hba.num_online_cpu;
} }
if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { if (io_channel > phba->sli4_hba.max_cfg_param.max_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2575 Reducing IO channels to match number of " "2575 Reducing IO channels to match number of "
"available EQs: from %d to %d\n", "available EQs: from %d to %d\n",
io_channel, io_channel,
phba->sli4_hba.max_cfg_param.max_eq); phba->sli4_hba.max_cfg_param.max_eq);
io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; io_channel = phba->sli4_hba.max_cfg_param.max_eq;
} }
/* The actual number of FCP / NVME event queues adopted */ /* The actual number of FCP / NVME event queues adopted */
...@@ -8769,10 +8765,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) ...@@ -8769,10 +8765,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
} }
} }
/* Create the Queues needed for Flash Optimized Fabric operations */
if (phba->cfg_fof)
lpfc_fof_queue_create(phba);
return 0; return 0;
out_error: out_error:
...@@ -8828,9 +8820,6 @@ lpfc_sli4_release_queue_map(uint16_t **qmap) ...@@ -8828,9 +8820,6 @@ lpfc_sli4_release_queue_map(uint16_t **qmap)
void void
lpfc_sli4_queue_destroy(struct lpfc_hba *phba) lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{ {
if (phba->cfg_fof)
lpfc_fof_queue_destroy(phba);
/* Release HBA eqs */ /* Release HBA eqs */
lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
...@@ -9331,16 +9320,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) ...@@ -9331,16 +9320,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.dat_rq->queue_id,
phba->sli4_hba.els_cq->queue_id); phba->sli4_hba.els_cq->queue_id);
if (phba->cfg_fof) {
rc = lpfc_fof_queue_setup(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0549 Failed setup of FOF Queues: "
"rc = 0x%x\n", rc);
goto out_destroy;
}
}
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
phba->cfg_fcp_imax); phba->cfg_fcp_imax);
...@@ -9370,10 +9349,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) ...@@ -9370,10 +9349,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{ {
int qidx; int qidx;
/* Unset the queues created for Flash Optimized Fabric operations */
if (phba->cfg_fof)
lpfc_fof_queue_destroy(phba);
/* Unset mailbox command work queue */ /* Unset mailbox command work queue */
if (phba->sli4_hba.mbx_wq) if (phba->sli4_hba.mbx_wq)
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
...@@ -10297,8 +10272,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) ...@@ -10297,8 +10272,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Set up MSI-X multi-message vectors */ /* Set up MSI-X multi-message vectors */
vectors = phba->io_channel_irqs; vectors = phba->io_channel_irqs;
if (phba->cfg_fof)
vectors++;
rc = pci_alloc_irq_vectors(phba->pcidev, rc = pci_alloc_irq_vectors(phba->pcidev,
(phba->nvmet_support) ? 1 : 2, (phba->nvmet_support) ? 1 : 2,
...@@ -10320,16 +10293,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) ...@@ -10320,16 +10293,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->sli4_hba.hba_eq_hdl[index].idx = index; phba->sli4_hba.hba_eq_hdl[index].idx = index;
phba->sli4_hba.hba_eq_hdl[index].phba = phba; phba->sli4_hba.hba_eq_hdl[index].phba = phba;
atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
if (phba->cfg_fof && (index == (vectors - 1))) rc = request_irq(pci_irq_vector(phba->pcidev, index),
rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_hba_intr_handler, 0,
&lpfc_sli4_fof_intr_handler, 0, name,
name, &phba->sli4_hba.hba_eq_hdl[index]);
&phba->sli4_hba.hba_eq_hdl[index]);
else
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
name,
&phba->sli4_hba.hba_eq_hdl[index]);
if (rc) { if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) " "0486 MSI-X fast-path (%d) "
...@@ -10338,9 +10305,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) ...@@ -10338,9 +10305,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
} }
} }
if (phba->cfg_fof)
vectors--;
if (vectors != phba->io_channel_irqs) { if (vectors != phba->io_channel_irqs) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3238 Reducing IO channels to match number of " "3238 Reducing IO channels to match number of "
...@@ -10415,10 +10379,6 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) ...@@ -10415,10 +10379,6 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
phba->sli4_hba.hba_eq_hdl[index].phba = phba; phba->sli4_hba.hba_eq_hdl[index].phba = phba;
} }
if (phba->cfg_fof) {
phba->sli4_hba.hba_eq_hdl[index].idx = index;
phba->sli4_hba.hba_eq_hdl[index].phba = phba;
}
return 0; return 0;
} }
...@@ -10485,12 +10445,6 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) ...@@ -10485,12 +10445,6 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
eqhdl->phba = phba; eqhdl->phba = phba;
atomic_set(&eqhdl->hba_eq_in_use, 1); atomic_set(&eqhdl->hba_eq_in_use, 1);
} }
if (phba->cfg_fof) {
eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
eqhdl->idx = idx;
eqhdl->phba = phba;
atomic_set(&eqhdl->hba_eq_in_use, 1);
}
} }
} }
return intr_mode; return intr_mode;
...@@ -10516,10 +10470,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba) ...@@ -10516,10 +10470,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
for (index = 0; index < phba->io_channel_irqs; index++) for (index = 0; index < phba->io_channel_irqs; index++)
free_irq(pci_irq_vector(phba->pcidev, index), free_irq(pci_irq_vector(phba->pcidev, index),
&phba->sli4_hba.hba_eq_hdl[index]); &phba->sli4_hba.hba_eq_hdl[index]);
if (phba->cfg_fof)
free_irq(pci_irq_vector(phba->pcidev, index),
&phba->sli4_hba.hba_eq_hdl[index]);
} else { } else {
free_irq(phba->pcidev->irq, phba); free_irq(phba->pcidev->irq, phba);
} }
...@@ -12692,165 +12642,6 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba) ...@@ -12692,165 +12642,6 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba)
} }
} }
/**
* lpfc_fof_queue_setup - Set up all the fof queues
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to set up all the fof queues for the FC HBA
* operation.
*
* Return codes
* 0 - successful
* -ENOMEM - No available memory
**/
int
lpfc_fof_queue_setup(struct lpfc_hba *phba)
{
struct lpfc_sli_ring *pring;
int rc;
rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
if (rc)
return -ENOMEM;
if (phba->cfg_fof) {
rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
if (rc)
goto out_oas_cq;
rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
phba->sli4_hba.oas_cq, LPFC_FCP);
if (rc)
goto out_oas_wq;
/* Bind this CQ/WQ to the NVME ring */
pring = phba->sli4_hba.oas_wq->pring;
pring->sli.sli4.wqp =
(void *)phba->sli4_hba.oas_wq;
phba->sli4_hba.oas_cq->pring = pring;
}
return 0;
out_oas_wq:
lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
out_oas_cq:
lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
return rc;
}
/**
* lpfc_fof_queue_create - Create all the fof queues
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to allocate all the fof queues for the FC HBA
* operation. For each SLI4 queue type, the parameters such as queue entry
* count (queue depth) shall be taken from the module parameter. For now,
* we just use some constant number as place holder.
*
* Return codes
* 0 - successful
* -ENOMEM - No availble memory
* -EIO - The mailbox failed to complete successfully.
**/
int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
uint32_t wqesize;
/* Create FOF EQ */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
qdesc->qe_valid = 1;
phba->sli4_hba.fof_eq = qdesc;
if (phba->cfg_fof) {
/* Create OAS CQ */
if (phba->enab_exp_wqcq_pages)
qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_EXPANDED_PAGE_SIZE,
phba->sli4_hba.cq_esize,
LPFC_CQE_EXP_COUNT);
else
qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
qdesc->qe_valid = 1;
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
if (phba->enab_exp_wqcq_pages) {
wqesize = (phba->fcp_embed_io) ?
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_EXPANDED_PAGE_SIZE,
wqesize,
LPFC_WQE_EXP_COUNT);
} else
qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;
phba->sli4_hba.oas_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
}
return 0;
out_error:
lpfc_fof_queue_destroy(phba);
return -ENOMEM;
}
/**
* lpfc_fof_queue_destroy - Destroy all the fof queues
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to release all the SLI4 queues with the FC HBA
* operation.
*
* Return codes
* 0 - successful
**/
int
lpfc_fof_queue_destroy(struct lpfc_hba *phba)
{
/* Release FOF Event queue */
if (phba->sli4_hba.fof_eq != NULL) {
lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
phba->sli4_hba.fof_eq = NULL;
}
/* Release OAS Completion queue */
if (phba->sli4_hba.oas_cq != NULL) {
lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
phba->sli4_hba.oas_cq = NULL;
}
/* Release OAS Work queue */
if (phba->sli4_hba.oas_wq != NULL) {
lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
phba->sli4_hba.oas_wq = NULL;
}
return 0;
}
MODULE_DEVICE_TABLE(pci, lpfc_id_table); MODULE_DEVICE_TABLE(pci, lpfc_id_table);
......
...@@ -4598,14 +4598,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) ...@@ -4598,14 +4598,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq; iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
if (!(phba->cfg_fof) || pring_s4 = phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
(!(iocb->iocb_flag & LPFC_IO_FOF))) {
pring_s4 =
phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
} else {
iocb->hba_wqidx = 0;
pring_s4 = phba->sli4_hba.oas_wq->pring;
}
if (!pring_s4) { if (!pring_s4) {
ret = FAILED; ret = FAILED;
goto out_unlock; goto out_unlock;
......
...@@ -5581,9 +5581,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) ...@@ -5581,9 +5581,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx], sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
LPFC_QUEUE_REARM); LPFC_QUEUE_REARM);
if (phba->cfg_fof)
sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
if (sli4_hba->hba_eq) if (sli4_hba->hba_eq)
for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx], sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
...@@ -5596,9 +5593,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) ...@@ -5596,9 +5593,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
LPFC_QUEUE_REARM); LPFC_QUEUE_REARM);
} }
} }
if (phba->cfg_fof)
sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
} }
/** /**
...@@ -9872,10 +9866,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, ...@@ -9872,10 +9866,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
/* Get the WQ */ /* Get the WQ */
if ((piocb->iocb_flag & LPFC_IO_FCP) || if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
else
wq = phba->sli4_hba.oas_wq;
} else { } else {
wq = phba->sli4_hba.els_wq; wq = phba->sli4_hba.els_wq;
} }
...@@ -10010,28 +10001,20 @@ struct lpfc_sli_ring * ...@@ -10010,28 +10001,20 @@ struct lpfc_sli_ring *
lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
{ {
if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (!(phba->cfg_fof) || if (unlikely(!phba->sli4_hba.fcp_wq))
(!(piocb->iocb_flag & LPFC_IO_FOF))) { return NULL;
if (unlikely(!phba->sli4_hba.fcp_wq)) /*
return NULL; * for abort iocb hba_wqidx should already
/* * be setup based on what work queue we used.
* for abort iocb hba_wqidx should already */
* be setup based on what work queue we used. if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
*/ piocb->hba_wqidx =
if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { lpfc_sli4_scmd_to_wqidx_distr(
piocb->hba_wqidx = phba, piocb->context1);
lpfc_sli4_scmd_to_wqidx_distr(phba, piocb->hba_wqidx = piocb->hba_wqidx %
piocb->context1); phba->cfg_fcp_io_channel;
piocb->hba_wqidx = piocb->hba_wqidx % }
phba->cfg_fcp_io_channel; return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
}
return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
} else {
if (unlikely(!phba->sli4_hba.oas_wq))
return NULL;
piocb->hba_wqidx = 0;
return phba->sli4_hba.oas_wq->pring;
}
} else { } else {
if (unlikely(!phba->sli4_hba.els_wq)) if (unlikely(!phba->sli4_hba.els_wq))
return NULL; return NULL;
...@@ -10550,16 +10533,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba) ...@@ -10550,16 +10533,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
spin_lock_init(&pring->ring_lock); spin_lock_init(&pring->ring_lock);
} }
if (phba->cfg_fof) {
pring = phba->sli4_hba.oas_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_FCP_RING;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
spin_lock_init(&pring->ring_lock);
}
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
} }
...@@ -14219,154 +14192,6 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) ...@@ -14219,154 +14192,6 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
} }
/**
* lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
* entry
* @phba: Pointer to HBA context object.
* @eqe: Pointer to fast-path event queue entry.
*
* This routine process a event queue entry from the Flash Optimized Fabric
* event queue. It will check the MajorCode and MinorCode to determine this
* is for a completion event on a completion queue, if not, an error shall be
* logged and just return. Otherwise, it will get to the corresponding
* completion queue and process all the entries on the completion queue, rearm
* the completion queue, and then return.
**/
static void
lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
{
struct lpfc_queue *cq;
uint16_t cqid;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"9147 Not a valid completion "
"event: majorcode=x%x, minorcode=x%x\n",
bf_get_le32(lpfc_eqe_major_code, eqe),
bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
/* Get the reference to the corresponding CQ */
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
/* Next check for OAS */
cq = phba->sli4_hba.oas_cq;
if (unlikely(!cq)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"9148 OAS completion queue "
"does not exist\n");
return;
}
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"9149 Miss-matched fast-path compl "
"queue id: eqcqid=%d, fcpcqid=%d\n",
cqid, cq->queue_id);
return;
}
/* Save EQ associated with this CQ */
cq->assoc_qp = phba->sli4_hba.fof_eq;
/* CQ work will be processed on CPU affinitized to this IRQ */
if (!queue_work(phba->wq, &cq->irqwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0367 Cannot schedule soft IRQ "
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
cqid, cq->queue_id, smp_processor_id());
}
/**
* lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is directly called from the PCI layer as an interrupt
* service routine when device with SLI-4 interface spec is enabled with
* MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
* IOCB ring event in the HBA. However, when the device is enabled with either
* MSI or Pin-IRQ interrupt mode, this function is called as part of the
* device-level interrupt handler. When the PCI slot is in error recovery
* or the HBA is undergoing initialization, the interrupt handler will not
* process the interrupt. The Flash Optimized Fabric ring event are handled in
* the intrrupt context. This function is called without any lock held.
* It gets the hbalock to access and update SLI data structures. Note that,
* the EQ to CQ are one-to-one map such that the EQ index is
* equal to that of CQ index.
*
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *eq;
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
/* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
phba = hba_eq_hdl->phba;
if (unlikely(!phba))
return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
eq = phba->sli4_hba.fof_eq;
if (unlikely(!eq))
return IRQ_NONE;
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
lpfc_sli4_eq_flush(phba, eq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
/*
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(eq))) {
lpfc_sli4_fof_handle_eqe(phba, eqe);
if (!(++ecount % eq->entry_repost))
break;
eq->EQ_processed++;
}
/* Track the max number of EQEs processed in 1 intr */
if (ecount > eq->EQ_max_eqe)
eq->EQ_max_eqe = ecount;
if (unlikely(ecount == 0)) {
eq->EQ_no_entry++;
if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"9145 MSI-X interrupt with no EQE\n");
else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"9146 ISR interrupt with no EQE\n");
/* Non MSI-X treated on interrupt as EQ share INT */
return IRQ_NONE;
}
}
/* Always clear and re-arm the fast-path EQ */
phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
return IRQ_HANDLED;
}
/** /**
* lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
* @irq: Interrupt number. * @irq: Interrupt number.
...@@ -14522,13 +14347,6 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) ...@@ -14522,13 +14347,6 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
hba_handled |= true; hba_handled |= true;
} }
if (phba->cfg_fof) {
hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
&phba->sli4_hba.hba_eq_hdl[qidx]);
if (hba_irq_rc == IRQ_HANDLED)
hba_handled |= true;
}
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */ } /* lpfc_sli4_intr_handler */
......
...@@ -634,13 +634,7 @@ struct lpfc_sli4_hba { ...@@ -634,13 +634,7 @@ struct lpfc_sli4_hba {
uint32_t ulp0_mode; /* ULP0 protocol mode */ uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */ uint32_t ulp1_mode; /* ULP1 protocol mode */
struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
/* Optimized Access Storage specific queues/structures */ /* Optimized Access Storage specific queues/structures */
struct lpfc_queue *oas_cq; /* OAS completion queue */
struct lpfc_queue *oas_wq; /* OAS Work queue */
struct lpfc_sli_ring *oas_ring;
uint64_t oas_next_lun; uint64_t oas_next_lun;
uint8_t oas_next_tgt_wwpn[8]; uint8_t oas_next_tgt_wwpn[8];
uint8_t oas_next_vpt_wwpn[8]; uint8_t oas_next_vpt_wwpn[8];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment