Commit cdb42bec authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu

Currently, both nvme and fcp each have their own concept of an io_channel,
which is a combination wq/cq and associated msix.  Different cpus would
share an io_channel.

The driver is now moving to per-cpu wq/cq pairs and msix vectors.  The
driver will still use separate wq/cq pairs per protocol on each cpu, but
the protocols will share the msix vector.

Given the elimination of the nvme and fcp io channels, the module
parameters will be removed.  A new parameter, lpfc_hdw_queue is added which
allows the wq/cq pair allocation per cpu to be overridden and allocated to
lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will
be based on the number of cpus. If non-zero, the parameter specifies the
number of queues to allocate. At this time, the maximum non-zero value is
64.

To manage this new paradigm, a new hardware queue structure is created to
track queue activity and relationships.

As MSIX vector allocation must be known before setting up the
relationships, msix allocation now occurs before queue datastructures are
allocated. If the number of vectors allocated is less than the desired
hardware queues, the hardware queue counts will be reduced to the number of
vectors
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7370d10a
...@@ -810,11 +810,10 @@ struct lpfc_hba { ...@@ -810,11 +810,10 @@ struct lpfc_hba {
uint32_t cfg_auto_imax; uint32_t cfg_auto_imax;
uint32_t cfg_fcp_imax; uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_io_channel; uint32_t cfg_hdw_queue;
uint32_t cfg_suppress_rsp; uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas; uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd; uint32_t cfg_nvme_embed_cmd;
uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvmet_mrq_post; uint32_t cfg_nvmet_mrq_post;
uint32_t cfg_nvmet_mrq; uint32_t cfg_nvmet_mrq;
uint32_t cfg_enable_nvmet; uint32_t cfg_enable_nvmet;
...@@ -877,7 +876,6 @@ struct lpfc_hba { ...@@ -877,7 +876,6 @@ struct lpfc_hba {
#define LPFC_ENABLE_NVME 2 #define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3 #define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde; uint32_t cfg_enable_pbde;
uint32_t io_channel_irqs; /* number of irqs for io channels */
struct nvmet_fc_target_port *targetport; struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */ lpfc_vpd_t vpd; /* vital product data */
......
...@@ -456,7 +456,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -456,7 +456,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
totin = 0; totin = 0;
totout = 0; totout = 0;
for (i = 0; i < phba->cfg_nvme_io_channel; i++) { for (i = 0; i < phba->cfg_hdw_queue; i++) {
cstat = &lport->cstat[i]; cstat = &lport->cstat[i];
tot = atomic_read(&cstat->fc4NvmeIoCmpls); tot = atomic_read(&cstat->fc4NvmeIoCmpls);
totin += tot; totin += tot;
...@@ -4909,7 +4909,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, ...@@ -4909,7 +4909,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
phba->cfg_fcp_imax = (uint32_t)val; phba->cfg_fcp_imax = (uint32_t)val;
phba->initial_imax = phba->cfg_fcp_imax; phba->initial_imax = phba->cfg_fcp_imax;
for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT) for (i = 0; i < phba->cfg_hdw_queue; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
val); val);
...@@ -5398,41 +5398,23 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, ...@@ -5398,41 +5398,23 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE"); "Embed NVME Command in WQE");
/* /*
* lpfc_fcp_io_channel: Set the number of FCP IO channels the driver * lpfc_hdw_queue: Set the number of IO channels the driver
* will advertise it supports to the SCSI layer. This also will map to * will advertise it supports to the NVME and SCSI layers. This also
* the number of WQs the driver will create. * will map to the number of EQ/CQ/WQs the driver will create.
*
* 0 = Configure the number of io channels to the number of active CPUs.
* 1,32 = Manually specify how many io channels to use.
*
* Value range is [0,32]. Default value is 4.
*/
LPFC_ATTR_R(fcp_io_channel,
LPFC_FCP_IO_CHAN_DEF,
LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
"Set the number of FCP I/O channels");
/*
* lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
* will advertise it supports to the NVME layer. This also will map to
* the number of WQs the driver will create.
*
* This module parameter is valid when lpfc_enable_fc4_type is set
* to support NVME.
* *
* The NVME Layer will try to create this many, plus 1 administrative * The NVME Layer will try to create this many, plus 1 administrative
* hardware queue. The administrative queue will always map to WQ 0 * hardware queue. The administrative queue will always map to WQ 0
* A hardware IO queue maps (qidx) to a specific driver WQ. * A hardware IO queue maps (qidx) to a specific driver WQ.
* *
* 0 = Configure the number of io channels to the number of active CPUs. * 0 = Configure the number of hdw queues to the number of active CPUs.
* 1,32 = Manually specify how many io channels to use. * 1,64 = Manually specify how many hdw queues to use.
* *
* Value range is [0,32]. Default value is 0. * Value range is [0,64]. Default value is 0.
*/ */
LPFC_ATTR_R(nvme_io_channel, LPFC_ATTR_R(hdw_queue,
LPFC_NVME_IO_CHAN_DEF, LPFC_HBA_HDWQ_DEF,
LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX, LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of NVME I/O channels"); "Set the number of I/O Hardware Queues");
/* /*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
...@@ -5727,9 +5709,8 @@ struct device_attribute *lpfc_hba_attrs[] = { ...@@ -5727,9 +5709,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_auto_imax, &dev_attr_lpfc_auto_imax,
&dev_attr_lpfc_fcp_imax, &dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_io_channel, &dev_attr_lpfc_hdw_queue,
&dev_attr_lpfc_suppress_rsp, &dev_attr_lpfc_suppress_rsp,
&dev_attr_lpfc_nvme_io_channel,
&dev_attr_lpfc_nvmet_mrq, &dev_attr_lpfc_nvmet_mrq,
&dev_attr_lpfc_nvmet_mrq_post, &dev_attr_lpfc_nvmet_mrq_post,
&dev_attr_lpfc_nvme_enable_fb, &dev_attr_lpfc_nvme_enable_fb,
...@@ -6806,8 +6787,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) ...@@ -6806,8 +6787,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
/* Initialize first burst. Target vs Initiator are different. */ /* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
lpfc_enable_dpp_init(phba, lpfc_enable_dpp); lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
...@@ -6829,21 +6809,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) ...@@ -6829,21 +6809,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_enable_pbde = 0; phba->cfg_enable_pbde = 0;
/* A value of 0 means use the number of CPUs found in the system */ /* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_fcp_io_channel == 0) if (phba->cfg_hdw_queue == 0)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
if (phba->cfg_nvme_io_channel == 0)
phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
phba->cfg_fcp_io_channel = 0;
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
phba->cfg_nvme_io_channel = 0;
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
else
phba->io_channel_irqs = phba->cfg_nvme_io_channel;
phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L; phba->cfg_soft_wwpn = 0L;
...@@ -6884,16 +6851,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) ...@@ -6884,16 +6851,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
void void
lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
{ {
if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu) if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu; phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
phba->nvmet_support) { phba->nvmet_support) {
phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
phba->cfg_fcp_io_channel = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6013 %s x%x fb_size x%x, fb_max x%x\n", "6013 %s x%x fb_size x%x, fb_max x%x\n",
...@@ -6910,11 +6873,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) ...@@ -6910,11 +6873,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
} }
if (!phba->cfg_nvmet_mrq) if (!phba->cfg_nvmet_mrq)
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) { if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n", "6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq); phba->cfg_nvmet_mrq);
...@@ -6928,11 +6891,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) ...@@ -6928,11 +6891,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0; phba->cfg_nvmet_fb_size = 0;
} }
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
else
phba->io_channel_irqs = phba->cfg_nvme_io_channel;
} }
/** /**
......
...@@ -919,13 +919,13 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -919,13 +919,13 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&lport->fc4NvmeLsRequests), atomic_read(&lport->fc4NvmeLsRequests),
atomic_read(&lport->fc4NvmeLsCmpls)); atomic_read(&lport->fc4NvmeLsCmpls));
if (phba->cfg_nvme_io_channel < 32) if (phba->cfg_hdw_queue < LPFC_HBA_HDWQ_MAX)
maxch = phba->cfg_nvme_io_channel; maxch = phba->cfg_hdw_queue;
else else
maxch = 32; maxch = LPFC_HBA_HDWQ_MAX;
totin = 0; totin = 0;
totout = 0; totout = 0;
for (i = 0; i < phba->cfg_nvme_io_channel; i++) { for (i = 0; i < phba->cfg_hdw_queue; i++) {
cstat = &lport->cstat[i]; cstat = &lport->cstat[i];
tot = atomic_read(&cstat->fc4NvmeIoCmpls); tot = atomic_read(&cstat->fc4NvmeIoCmpls);
totin += tot; totin += tot;
...@@ -3182,21 +3182,23 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, ...@@ -3182,21 +3182,23 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
struct lpfc_queue *qp; struct lpfc_queue *qp;
int qidx; int qidx;
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.fcp_wq[qidx]; qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
if (qp->assoc_qid != cq_id) if (qp->assoc_qid != cq_id)
continue; continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt) if (*len >= max_cnt)
return 1; return 1;
} }
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
qp = phba->sli4_hba.nvme_wq[qidx]; for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
if (qp->assoc_qid != cq_id) qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
continue; if (qp->assoc_qid != cq_id)
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); continue;
if (*len >= max_cnt) *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
return 1; if (*len >= max_cnt)
return 1;
}
} }
return 0; return 0;
} }
...@@ -3262,8 +3264,8 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, ...@@ -3262,8 +3264,8 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
struct lpfc_queue *qp; struct lpfc_queue *qp;
int qidx, rc; int qidx, rc;
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.fcp_cq[qidx]; qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
if (qp->assoc_qid != eq_id) if (qp->assoc_qid != eq_id)
continue; continue;
...@@ -3281,23 +3283,25 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, ...@@ -3281,23 +3283,25 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1; return 1;
} }
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
qp = phba->sli4_hba.nvme_cq[qidx]; for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
if (qp->assoc_qid != eq_id) qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
continue; if (qp->assoc_qid != eq_id)
continue;
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len); *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
/* Reset max counter */ /* Reset max counter */
qp->CQ_max_cqe = 0; qp->CQ_max_cqe = 0;
if (*len >= max_cnt) if (*len >= max_cnt)
return 1; return 1;
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len, rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
max_cnt, qp->queue_id); max_cnt, qp->queue_id);
if (rc) if (rc)
return 1; return 1;
}
} }
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
...@@ -3387,19 +3391,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -3387,19 +3391,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
/* Fast-path event queue */ /* Fast-path event queue */
if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) { if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) {
x = phba->lpfc_idiag_last_eq; x = phba->lpfc_idiag_last_eq;
phba->lpfc_idiag_last_eq++; phba->lpfc_idiag_last_eq++;
if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs) if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue)
phba->lpfc_idiag_last_eq = 0; phba->lpfc_idiag_last_eq = 0;
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQ %d out of %d HBA EQs\n", "EQ %d out of %d HBA EQs\n",
x, phba->io_channel_irqs); x, phba->cfg_hdw_queue);
/* Fast-path EQ */ /* Fast-path EQ */
qp = phba->sli4_hba.hba_eq[x]; qp = phba->sli4_hba.hdwq[x].hba_eq;
if (!qp) if (!qp)
goto out; goto out;
...@@ -3691,9 +3695,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -3691,9 +3695,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) { switch (quetp) {
case LPFC_IDIAG_EQ: case LPFC_IDIAG_EQ:
/* HBA event queue */ /* HBA event queue */
if (phba->sli4_hba.hba_eq) { if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.hba_eq[qidx]; qp = phba->sli4_hba.hdwq[qidx].hba_eq;
if (qp && qp->queue_id == queid) { if (qp && qp->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check(qp, rc = lpfc_idiag_que_param_check(qp,
...@@ -3742,10 +3746,10 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -3742,10 +3746,10 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check; goto pass_check;
} }
/* FCP complete queue */ /* FCP complete queue */
if (phba->sli4_hba.fcp_cq) { if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) { qidx++) {
qp = phba->sli4_hba.fcp_cq[qidx]; qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
if (qp && qp->queue_id == queid) { if (qp && qp->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
...@@ -3758,23 +3762,20 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -3758,23 +3762,20 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
} }
} }
/* NVME complete queue */ /* NVME complete queue */
if (phba->sli4_hba.nvme_cq) { if (phba->sli4_hba.hdwq) {
qidx = 0; qidx = 0;
do { do {
if (phba->sli4_hba.nvme_cq[qidx] && qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
phba->sli4_hba.nvme_cq[qidx]->queue_id == if (qp && qp->queue_id == queid) {
queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.nvme_cq[qidx], qp, index, count);
index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = idiag.ptr_private = qp;
phba->sli4_hba.nvme_cq[qidx];
goto pass_check; goto pass_check;
} }
} while (++qidx < phba->cfg_nvme_io_channel); } while (++qidx < phba->cfg_hdw_queue);
} }
goto error_out; goto error_out;
break; break;
...@@ -3815,11 +3816,11 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -3815,11 +3816,11 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_wq; idiag.ptr_private = phba->sli4_hba.nvmels_wq;
goto pass_check; goto pass_check;
} }
/* FCP work queue */
if (phba->sli4_hba.fcp_wq) { if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; /* FCP/SCSI work queue */
qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = phba->sli4_hba.fcp_wq[qidx]; qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
if (qp && qp->queue_id == queid) { if (qp && qp->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
...@@ -3830,12 +3831,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -3830,12 +3831,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check; goto pass_check;
} }
} }
} /* NVME work queue */
/* NVME work queue */ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
if (phba->sli4_hba.nvme_wq) { qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
qidx++) {
qp = phba->sli4_hba.nvme_wq[qidx];
if (qp && qp->queue_id == queid) { if (qp && qp->queue_id == queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
...@@ -3848,26 +3846,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -3848,26 +3846,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
} }
} }
/* NVME work queues */
if (phba->sli4_hba.nvme_wq) {
for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
qidx++) {
if (!phba->sli4_hba.nvme_wq[qidx])
continue;
if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.nvme_wq[qidx],
index, count);
if (rc)
goto error_out;
idiag.ptr_private =
phba->sli4_hba.nvme_wq[qidx];
goto pass_check;
}
}
}
goto error_out; goto error_out;
break; break;
case LPFC_IDIAG_RQ: case LPFC_IDIAG_RQ:
...@@ -5784,11 +5762,13 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -5784,11 +5762,13 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_ELS, 0); lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_wq(phba, DUMP_FCP, idx); lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
lpfc_debug_dump_wq(phba, DUMP_NVME, idx); for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
}
lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba); lpfc_debug_dump_dat_rq(phba);
...@@ -5799,15 +5779,17 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -5799,15 +5779,17 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_ELS, 0); lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_cq(phba, DUMP_FCP, idx); lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
lpfc_debug_dump_cq(phba, DUMP_NVME, idx); for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
}
/* /*
* Dump Event Queues (EQs) * Dump Event Queues (EQs)
*/ */
for (idx = 0; idx < phba->io_channel_irqs; idx++) for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
lpfc_debug_dump_hba_eq(phba, idx); lpfc_debug_dump_hba_eq(phba, idx);
} }
...@@ -410,10 +410,10 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -410,10 +410,10 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr; char *qtypestr;
if (qtype == DUMP_FCP) { if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.fcp_wq[wqidx]; wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
qtypestr = "FCP"; qtypestr = "FCP";
} else if (qtype == DUMP_NVME) { } else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.nvme_wq[wqidx]; wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
qtypestr = "NVME"; qtypestr = "NVME";
} else if (qtype == DUMP_MBX) { } else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq; wq = phba->sli4_hba.mbx_wq;
...@@ -454,14 +454,15 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -454,14 +454,15 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
int eqidx; int eqidx;
/* fcp/nvme wq and cq are 1:1, thus same indexes */ /* fcp/nvme wq and cq are 1:1, thus same indexes */
eq = NULL;
if (qtype == DUMP_FCP) { if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.fcp_wq[wqidx]; wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
cq = phba->sli4_hba.fcp_cq[wqidx]; cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
qtypestr = "FCP"; qtypestr = "FCP";
} else if (qtype == DUMP_NVME) { } else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.nvme_wq[wqidx]; wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
cq = phba->sli4_hba.nvme_cq[wqidx]; cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
qtypestr = "NVME"; qtypestr = "NVME";
} else if (qtype == DUMP_MBX) { } else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq; wq = phba->sli4_hba.mbx_wq;
...@@ -478,17 +479,17 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -478,17 +479,17 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
} else } else
return; return;
for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) { for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) {
if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id) eq = phba->sli4_hba.hdwq[eqidx].hba_eq;
if (cq->assoc_qid == eq->queue_id)
break; break;
} }
if (eqidx == phba->io_channel_irqs) { if (eqidx == phba->cfg_hdw_queue) {
pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
eqidx = 0; eqidx = 0;
eq = phba->sli4_hba.hdwq[0].hba_eq;
} }
eq = phba->sli4_hba.hba_eq[eqidx];
if (qtype == DUMP_FCP || qtype == DUMP_NVME) if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n", "->EQ[Idx:%d|Qid:%d]:\n",
...@@ -516,7 +517,7 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) ...@@ -516,7 +517,7 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx)
{ {
struct lpfc_queue *qp; struct lpfc_queue *qp;
qp = phba->sli4_hba.hba_eq[qidx]; qp = phba->sli4_hba.hdwq[qidx].hba_eq;
pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
...@@ -564,21 +565,21 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) ...@@ -564,21 +565,21 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
{ {
int wq_idx; int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++) for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
break; break;
if (wq_idx < phba->cfg_fcp_io_channel) { if (wq_idx < phba->cfg_hdw_queue) {
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
return; return;
} }
for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++) for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid) if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
break; break;
if (wq_idx < phba->cfg_nvme_io_channel) { if (wq_idx < phba->cfg_hdw_queue) {
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]); lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
return; return;
} }
...@@ -646,23 +647,23 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) ...@@ -646,23 +647,23 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
{ {
int cq_idx; int cq_idx;
for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++) for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
break; break;
if (cq_idx < phba->cfg_fcp_io_channel) { if (cq_idx < phba->cfg_hdw_queue) {
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
return; return;
} }
for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++) for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid) if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
break; break;
if (cq_idx < phba->cfg_nvme_io_channel) { if (cq_idx < phba->cfg_hdw_queue) {
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]); lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
return; return;
} }
...@@ -697,13 +698,13 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) ...@@ -697,13 +698,13 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{ {
int eq_idx; int eq_idx;
for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++) for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++)
if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid)
break; break;
if (eq_idx < phba->io_channel_irqs) { if (eq_idx < phba->cfg_hdw_queue) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq);
return; return;
} }
} }
......
This diff is collapsed.
...@@ -239,7 +239,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, ...@@ -239,7 +239,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
if (qidx) { if (qidx) {
str = "IO "; /* IO queue */ str = "IO "; /* IO queue */
qhandle->index = ((qidx - 1) % qhandle->index = ((qidx - 1) %
vport->phba->cfg_nvme_io_channel); vport->phba->cfg_hdw_queue);
} else { } else {
str = "ADM"; /* Admin queue */ str = "ADM"; /* Admin queue */
qhandle->index = qidx; qhandle->index = qidx;
...@@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, ...@@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6073 Binding %s HdwQueue %d (cpu %d) to " "6073 Binding %s HdwQueue %d (cpu %d) to "
"io_channel %d qhandle %p\n", str, "hdw_queue %d qhandle %p\n", str,
qidx, qhandle->cpu_id, qhandle->index, qhandle); qidx, qhandle->cpu_id, qhandle->index, qhandle);
*handle = (void *)qhandle; *handle = (void *)qhandle;
return 0; return 0;
...@@ -2083,10 +2083,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) ...@@ -2083,10 +2083,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
* allocate + 3, one for cmd, one for rsp and one for this alignment * allocate + 3, one for cmd, one for rsp and one for this alignment
*/ */
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) * cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
phba->cfg_nvme_io_channel), GFP_KERNEL); phba->cfg_hdw_queue), GFP_KERNEL);
if (!cstat) if (!cstat)
return -ENOMEM; return -ENOMEM;
...@@ -2130,7 +2130,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) ...@@ -2130,7 +2130,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
atomic_set(&lport->fc4NvmeLsRequests, 0); atomic_set(&lport->fc4NvmeLsRequests, 0);
atomic_set(&lport->fc4NvmeLsCmpls, 0); atomic_set(&lport->fc4NvmeLsCmpls, 0);
for (i = 0; i < phba->cfg_nvme_io_channel; i++) { for (i = 0; i < phba->cfg_hdw_queue; i++) {
cstat = &lport->cstat[i]; cstat = &lport->cstat[i];
atomic_set(&cstat->fc4NvmeInputRequests, 0); atomic_set(&cstat->fc4NvmeInputRequests, 0);
atomic_set(&cstat->fc4NvmeOutputRequests, 0); atomic_set(&cstat->fc4NvmeOutputRequests, 0);
...@@ -2587,14 +2587,14 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) ...@@ -2587,14 +2587,14 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
u32 i, wait_cnt = 0; u32 i, wait_cnt = 0;
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq) if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
return; return;
/* Cycle through all NVME rings and make sure all outstanding /* Cycle through all NVME rings and make sure all outstanding
* WQEs have been removed from the txcmplqs. * WQEs have been removed from the txcmplqs.
*/ */
for (i = 0; i < phba->cfg_nvme_io_channel; i++) { for (i = 0; i < phba->cfg_hdw_queue; i++) {
pring = phba->sli4_hba.nvme_wq[i]->pring; pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
if (!pring) if (!pring)
continue; continue;
......
...@@ -973,7 +973,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -973,7 +973,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE * WQE release CQE
*/ */
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
wq = phba->sli4_hba.nvme_wq[rsp->hwqid]; wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
pring = wq->pring; pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags); spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list); list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
...@@ -1047,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, ...@@ -1047,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid); ctxp->oxid);
wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx]; wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
spin_unlock_irqrestore(&ctxp->ctxlock, flags); spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_wqfull_flush(phba, wq, ctxp); lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return; return;
...@@ -1377,7 +1377,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) ...@@ -1377,7 +1377,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
* allocate + 3, one for cmd, one for rsp and one for this alignment * allocate + 3, one for cmd, one for rsp and one for this alignment
*/ */
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
...@@ -1697,8 +1697,8 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) ...@@ -1697,8 +1697,8 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
return; return;
if (phba->targetport) { if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
wq = phba->sli4_hba.nvme_wq[qidx]; wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL); lpfc_nvmet_wqfull_flush(phba, wq, NULL);
} }
init_completion(&tgtp->tport_unreg_done); init_completion(&tgtp->tport_unreg_done);
......
...@@ -3661,8 +3661,8 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, ...@@ -3661,8 +3661,8 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
return hwq; return hwq;
} }
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU &&
&& phba->cfg_fcp_io_channel > 1) { phba->cfg_hdw_queue > 1) {
cpu = lpfc_cmd->cpu; cpu = lpfc_cmd->cpu;
if (cpu < phba->sli4_hba.num_present_cpu) { if (cpu < phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map; cpup = phba->sli4_hba.cpu_map;
...@@ -3671,7 +3671,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, ...@@ -3671,7 +3671,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
} }
} }
chann = atomic_add_return(1, &phba->fcp_qidx); chann = atomic_add_return(1, &phba->fcp_qidx);
chann = chann % phba->cfg_fcp_io_channel; chann = chann % phba->cfg_hdw_queue;
return chann; return chann;
} }
...@@ -4598,7 +4598,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) ...@@ -4598,7 +4598,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq; iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 = phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring; pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
if (!pring_s4) { if (!pring_s4) {
ret = FAILED; ret = FAILED;
goto out_unlock; goto out_unlock;
......
This diff is collapsed.
...@@ -36,18 +36,13 @@ ...@@ -36,18 +36,13 @@
#define LPFC_NEMBED_MBOX_SGL_CNT 254 #define LPFC_NEMBED_MBOX_SGL_CNT 254
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_IO_CHAN_MIN 0 #define LPFC_HBA_HDWQ_MIN 0
#define LPFC_HBA_IO_CHAN_MAX 32 #define LPFC_HBA_HDWQ_MAX 64
#define LPFC_FCP_IO_CHAN_DEF 4 #define LPFC_HBA_HDWQ_DEF 0
#define LPFC_NVME_IO_CHAN_DEF 0
/* Common buffer size to accomidate SCSI and NVME IO buffers */ /* Common buffer size to accomidate SCSI and NVME IO buffers */
#define LPFC_COMMON_IO_BUF_SZ 768 #define LPFC_COMMON_IO_BUF_SZ 768
/* Number of channels used for Flash Optimized Fabric (FOF) operations */
#define LPFC_FOF_IO_CHAN_NUM 1
/* /*
* Provide the default FCF Record attributes used by the driver * Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default * when nonFIP mode is configured and there is no other default
...@@ -534,6 +529,17 @@ struct lpfc_vector_map_info { ...@@ -534,6 +529,17 @@ struct lpfc_vector_map_info {
#define LPFC_VECTOR_MAP_EMPTY 0xffff #define LPFC_VECTOR_MAP_EMPTY 0xffff
/* SLI4 HBA data structure entries */ /* SLI4 HBA data structure entries */
struct lpfc_sli4_hdw_queue {
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue *hba_eq; /* Event queues for HBA */
struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
uint16_t fcp_cq_map;
uint16_t nvme_cq_map;
};
struct lpfc_sli4_hba { struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
* config space registers * config space registers
...@@ -606,17 +612,13 @@ struct lpfc_sli4_hba { ...@@ -606,17 +612,13 @@ struct lpfc_sli4_hba {
uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm); uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm);
/* Pointers to the constructed SLI4 queues */ /* Pointers to the constructed SLI4 queues */
struct lpfc_queue **hba_eq; /* Event queues for HBA */ struct lpfc_sli4_hdw_queue *hdwq;
struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */ struct list_head lpfc_wq_list;
struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
/* Pointers to the constructed SLI4 queues for NVMET */
struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */ struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */ struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */ struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
uint16_t *fcp_cq_map;
uint16_t *nvme_cq_map;
struct list_head lpfc_wq_list;
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
...@@ -817,7 +819,7 @@ int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, ...@@ -817,7 +819,7 @@ int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t); struct lpfc_queue *, uint32_t, uint32_t);
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
struct lpfc_queue **eqp, uint32_t type, struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
uint32_t subtype); uint32_t subtype);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t); struct lpfc_queue *, uint32_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment