Commit 895427bd authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: NVME Initiator: Base modifications

NVME Initiator: Base modifications

This patch adds base modifications for NVME initiator support.

The base modifications consist of:
- Formal split of SLI3 rings from SLI-4 WQs (sometimes referred to as
  rings as well) as implementation now widely varies between the two.
- Addition of configuration modes:
   SCSI initiator only; NVME initiator only; NVME target only; and
   SCSI and NVME initiator.
   The configuration mode drives overall adapter configuration,
   offloads enabled, and resource splits.
   NVME support is only available on SLI-4 devices and newer fw.
- Implements the following based on configuration mode:
  - Exchange resources are split by protocol; Obviously, if only
     1 mode, then no split occurs. Default is 50/50. module attribute
     allows tuning.
  - Pools and config parameters are separated per-protocol
  - Each protocol has it's own set of queues, but share interrupt
    vectors.
     SCSI:
       SLI3 devices have few queues and the original style of queue
         allocation remains.
       SLI4 devices piggy back on an "io-channel" concept that
         eventually needs to merge with scsi-mq/blk-mq support (it is
	 underway).  For now, the paradigm continues as it existed
	 prior. io channel allocates N msix and N WQs (N=4 default)
	 and either round robins or uses cpu # modulo N for scheduling.
	 A bunch of module parameters allow the configuration to be
	 tuned.
     NVME (initiator):
       Allocates an msix per cpu (or whatever pci_alloc_irq_vectors
         gets)
       Allocates a WQ per cpu, and maps the WQs to msix on a WQ #
         modulo msix vector count basis.
       Module parameters exist to cap/control the config if desired.
  - Each protocol has its own buffer and dma pools.

I apologize for the size of the patch.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>

----
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 1d9d5a98
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*******************************************************************/ *******************************************************************/
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <linux/ktime.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
#define CONFIG_SCSI_LPFC_DEBUG_FS #define CONFIG_SCSI_LPFC_DEBUG_FS
...@@ -53,6 +54,7 @@ struct lpfc_sli2_slim; ...@@ -53,6 +54,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ #define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ #define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
#define LPFC_MIN_NVME_SEG_CNT 254
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
...@@ -114,6 +116,13 @@ enum lpfc_polling_flags { ...@@ -114,6 +116,13 @@ enum lpfc_polling_flags {
DISABLE_FCP_RING_INT = 0x2 DISABLE_FCP_RING_INT = 0x2
}; };
struct perf_prof {
uint16_t cmd_cpu[40];
uint16_t rsp_cpu[40];
uint16_t qh_cpu[40];
uint16_t wqidx[40];
};
/* Provide DMA memory definitions the driver uses per port instance. */ /* Provide DMA memory definitions the driver uses per port instance. */
struct lpfc_dmabuf { struct lpfc_dmabuf {
struct list_head list; struct list_head list;
...@@ -131,10 +140,24 @@ struct lpfc_dma_pool { ...@@ -131,10 +140,24 @@ struct lpfc_dma_pool {
struct hbq_dmabuf { struct hbq_dmabuf {
struct lpfc_dmabuf hbuf; struct lpfc_dmabuf hbuf;
struct lpfc_dmabuf dbuf; struct lpfc_dmabuf dbuf;
uint32_t size; uint16_t total_size;
uint16_t bytes_recv;
uint32_t tag; uint32_t tag;
struct lpfc_cq_event cq_event; struct lpfc_cq_event cq_event;
unsigned long time_stamp; unsigned long time_stamp;
void *context;
};
struct rqb_dmabuf {
struct lpfc_dmabuf hbuf;
struct lpfc_dmabuf dbuf;
uint16_t total_size;
uint16_t bytes_recv;
void *context;
struct lpfc_iocbq *iocbq;
struct lpfc_sglq *sglq;
struct lpfc_queue *hrq; /* ptr to associated Header RQ */
struct lpfc_queue *drq; /* ptr to associated Data RQ */
}; };
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ /* Priority bit. Set value to exceed low water mark in lpfc_mem. */
...@@ -442,6 +465,11 @@ struct lpfc_vport { ...@@ -442,6 +465,11 @@ struct lpfc_vport {
uint16_t fdmi_num_disc; uint16_t fdmi_num_disc;
uint32_t fdmi_hba_mask; uint32_t fdmi_hba_mask;
uint32_t fdmi_port_mask; uint32_t fdmi_port_mask;
/* There is a single nvme instance per vport. */
struct nvme_fc_local_port *localport;
uint8_t nvmei_support; /* driver supports NVME Initiator */
uint32_t last_fcp_wqidx;
}; };
struct hbq_s { struct hbq_s {
...@@ -459,10 +487,9 @@ struct hbq_s { ...@@ -459,10 +487,9 @@ struct hbq_s {
struct hbq_dmabuf *); struct hbq_dmabuf *);
}; };
#define LPFC_MAX_HBQS 4
/* this matches the position in the lpfc_hbq_defs array */ /* this matches the position in the lpfc_hbq_defs array */
#define LPFC_ELS_HBQ 0 #define LPFC_ELS_HBQ 0
#define LPFC_EXTRA_HBQ 1 #define LPFC_MAX_HBQS 1
enum hba_temp_state { enum hba_temp_state {
HBA_NORMAL_TEMP, HBA_NORMAL_TEMP,
...@@ -652,6 +679,8 @@ struct lpfc_hba { ...@@ -652,6 +679,8 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed * Firmware supports Forced Link Speed
* capability * capability
*/ */
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p; struct lpfc_dmabuf slim2p;
...@@ -700,6 +729,8 @@ struct lpfc_hba { ...@@ -700,6 +729,8 @@ struct lpfc_hba {
uint8_t wwpn[8]; uint8_t wwpn[8];
uint32_t RandomData[7]; uint32_t RandomData[7];
uint8_t fcp_embed_io; uint8_t fcp_embed_io;
uint8_t nvme_support; /* Firmware supports NVME */
uint8_t nvmet_support; /* driver supports NVMET */
uint8_t mds_diags_support; uint8_t mds_diags_support;
/* HBA Config Parameters */ /* HBA Config Parameters */
...@@ -725,6 +756,9 @@ struct lpfc_hba { ...@@ -725,6 +756,9 @@ struct lpfc_hba {
uint32_t cfg_fcp_imax; uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_io_channel; uint32_t cfg_fcp_io_channel;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_io_channel;
uint32_t cfg_nvme_enable_fb;
uint32_t cfg_total_seg_cnt; uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size; uint32_t cfg_sg_dma_buf_size;
...@@ -770,6 +804,12 @@ struct lpfc_hba { ...@@ -770,6 +804,12 @@ struct lpfc_hba {
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN; uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags; uint32_t cfg_enable_mds_diags;
uint32_t cfg_enable_fc4_type;
uint32_t cfg_xri_split;
#define LPFC_ENABLE_FCP 1
#define LPFC_ENABLE_NVME 2
#define LPFC_ENABLE_BOTH 3
uint32_t io_channel_irqs; /* number of irqs for io channels */
lpfc_vpd_t vpd; /* vital product data */ lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev; struct pci_dev *pcidev;
...@@ -784,11 +824,11 @@ struct lpfc_hba { ...@@ -784,11 +824,11 @@ struct lpfc_hba {
unsigned long data_flags; unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */ uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head rb_pend_list; /* Received buffers to be processed */
uint32_t hbq_count; /* Count of configured HBQs */ uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
atomic_t fcp_qidx; /* next work queue to post work to */ atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */
atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */
phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */ phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */ phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
...@@ -843,9 +883,17 @@ struct lpfc_hba { ...@@ -843,9 +883,17 @@ struct lpfc_hba {
/* /*
* stat counters * stat counters
*/ */
uint64_t fc4InputRequests; uint64_t fc4ScsiInputRequests;
uint64_t fc4OutputRequests; uint64_t fc4ScsiOutputRequests;
uint64_t fc4ControlRequests; uint64_t fc4ScsiControlRequests;
uint64_t fc4ScsiIoCmpls;
uint64_t fc4NvmeInputRequests;
uint64_t fc4NvmeOutputRequests;
uint64_t fc4NvmeControlRequests;
uint64_t fc4NvmeIoCmpls;
uint64_t fc4NvmeLsRequests;
uint64_t fc4NvmeLsCmpls;
uint64_t bg_guard_err_cnt; uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt; uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt; uint64_t bg_reftag_err_cnt;
...@@ -856,17 +904,23 @@ struct lpfc_hba { ...@@ -856,17 +904,23 @@ struct lpfc_hba {
struct list_head lpfc_scsi_buf_list_get; struct list_head lpfc_scsi_buf_list_get;
struct list_head lpfc_scsi_buf_list_put; struct list_head lpfc_scsi_buf_list_put;
uint32_t total_scsi_bufs; uint32_t total_scsi_bufs;
spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */
spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */
struct list_head lpfc_nvme_buf_list_get;
struct list_head lpfc_nvme_buf_list_put;
uint32_t total_nvme_bufs;
struct list_head lpfc_iocb_list; struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs; uint32_t total_iocbq_bufs;
struct list_head active_rrq_list; struct list_head active_rrq_list;
spinlock_t hbalock; spinlock_t hbalock;
/* pci_mem_pools */ /* pci_mem_pools */
struct pci_pool *lpfc_scsi_dma_buf_pool; struct pci_pool *lpfc_sg_dma_buf_pool;
struct pci_pool *lpfc_mbuf_pool; struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct pci_pool *txrdy_payload_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool; mempool_t *mbox_mem_pool;
...@@ -1092,3 +1146,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) ...@@ -1092,3 +1146,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
return 0; return 0;
} }
static inline struct lpfc_sli_ring *
lpfc_phba_elsring(struct lpfc_hba *phba)
{
if (phba->sli_rev == LPFC_SLI_REV4)
return phba->sli4_hba.els_wq->pring;
return &phba->sli.sli3_ring[LPFC_ELS_RING];
}
This diff is collapsed.
...@@ -1704,6 +1704,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) ...@@ -1704,6 +1704,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
struct lpfc_vport **vports; struct lpfc_vport **vports;
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct lpfc_sli *psli; struct lpfc_sli *psli;
struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
int i = 0; int i = 0;
...@@ -1711,9 +1712,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) ...@@ -1711,9 +1712,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
if (!psli) if (!psli)
return -ENODEV; return -ENODEV;
pring = &psli->ring[LPFC_FCP_RING];
if (!pring)
return -ENODEV;
if ((phba->link_state == LPFC_HBA_ERROR) || if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) || (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
...@@ -1732,10 +1730,18 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) ...@@ -1732,10 +1730,18 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
scsi_block_requests(shost); scsi_block_requests(shost);
} }
while (!list_empty(&pring->txcmplq)) { if (phba->sli_rev != LPFC_SLI_REV4) {
if (i++ > 500) /* wait up to 5 seconds */ pring = &psli->sli3_ring[LPFC_FCP_RING];
lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
return 0;
}
list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
pring = qp->pring;
if (!pring || (pring->ringno != LPFC_FCP_RING))
continue;
if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
&pring->ring_lock))
break; break;
msleep(10);
} }
return 0; return 0;
} }
...@@ -2875,8 +2881,7 @@ diag_cmd_data_alloc(struct lpfc_hba *phba, ...@@ -2875,8 +2881,7 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
size_t len) size_t len)
{ {
struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_iocbq *cmdiocbq; struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd = NULL; IOCB_t *cmd = NULL;
struct list_head head, *curr, *next; struct list_head head, *curr, *next;
...@@ -2890,6 +2895,8 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, ...@@ -2890,6 +2895,8 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
int iocb_stat; int iocb_stat;
int i = 0; int i = 0;
pring = lpfc_phba_elsring(phba);
cmdiocbq = lpfc_sli_get_iocbq(phba); cmdiocbq = lpfc_sli_get_iocbq(phba);
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) { if (rxbmp != NULL) {
...@@ -5403,13 +5410,15 @@ lpfc_bsg_timeout(struct bsg_job *job) ...@@ -5403,13 +5410,15 @@ lpfc_bsg_timeout(struct bsg_job *job)
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb; struct lpfc_iocbq *cmdiocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct lpfc_sli_ring *pring;
struct bsg_job_data *dd_data; struct bsg_job_data *dd_data;
unsigned long flags; unsigned long flags;
int rc = 0; int rc = 0;
LIST_HEAD(completions); LIST_HEAD(completions);
struct lpfc_iocbq *check_iocb, *next_iocb; struct lpfc_iocbq *check_iocb, *next_iocb;
pring = lpfc_phba_elsring(phba);
/* if job's driver data is NULL, the command completed or is in the /* if job's driver data is NULL, the command completed or is in the
* the process of completing. In this case, return status to request * the process of completing. In this case, return status to request
* so the timeout is retried. This avoids double completion issues * so the timeout is retried. This avoids double completion issues
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
typedef int (*node_filter)(struct lpfc_nodelist *, void *); typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport; struct fc_rport;
struct fc_frame_header;
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli_read_link_ste(struct lpfc_hba *); void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t); void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
...@@ -167,6 +168,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *); ...@@ -167,6 +168,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *); struct lpfc_iocbq *);
int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
int lpfc_issue_gidft(struct lpfc_vport *vport);
int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
void lpfc_fdmi_num_disc_check(struct lpfc_vport *); void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
...@@ -186,6 +189,8 @@ void lpfc_unblock_mgmt_io(struct lpfc_hba *); ...@@ -186,6 +189,8 @@ void lpfc_unblock_mgmt_io(struct lpfc_hba *);
void lpfc_offline_prep(struct lpfc_hba *, int); void lpfc_offline_prep(struct lpfc_hba *, int);
void lpfc_offline(struct lpfc_hba *); void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *); void lpfc_reset_hba(struct lpfc_hba *);
int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd,
spinlock_t *slock);
int lpfc_fof_queue_create(struct lpfc_hba *); int lpfc_fof_queue_create(struct lpfc_hba *);
int lpfc_fof_queue_setup(struct lpfc_hba *); int lpfc_fof_queue_setup(struct lpfc_hba *);
...@@ -193,7 +198,11 @@ int lpfc_fof_queue_destroy(struct lpfc_hba *); ...@@ -193,7 +198,11 @@ int lpfc_fof_queue_destroy(struct lpfc_hba *);
irqreturn_t lpfc_sli4_fof_intr_handler(int, void *); irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_setup(struct lpfc_hba *);
int lpfc_sli_queue_setup(struct lpfc_hba *); int lpfc_sli4_setup(struct lpfc_hba *phba);
void lpfc_sli_queue_init(struct lpfc_hba *phba);
void lpfc_sli4_queue_init(struct lpfc_hba *phba);
struct lpfc_sli_ring *lpfc_sli4_calc_ring(struct lpfc_hba *phba,
struct lpfc_iocbq *iocbq);
void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *);
...@@ -233,6 +242,11 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); ...@@ -233,6 +242,11 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t); uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
struct lpfc_queue *dq, int count);
int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
void lpfc_unregister_fcf(struct lpfc_hba *); void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
void lpfc_unregister_unused_fcf(struct lpfc_hba *); void lpfc_unregister_unused_fcf(struct lpfc_hba *);
...@@ -287,6 +301,9 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); ...@@ -287,6 +301,9 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t); struct lpfc_iocbq *, uint32_t);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
struct lpfc_iocbq *iocbq);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
...@@ -356,6 +373,7 @@ extern struct device_attribute *lpfc_hba_attrs[]; ...@@ -356,6 +373,7 @@ extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[]; extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template; extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_s3; extern struct scsi_host_template lpfc_template_s3;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template; extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions;
...@@ -471,7 +489,9 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *); ...@@ -471,7 +489,9 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
int lpfc_selective_reset(struct lpfc_hba *); int lpfc_selective_reset(struct lpfc_hba *);
int lpfc_sli4_read_config(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *);
void lpfc_sli4_node_prep(struct lpfc_hba *); void lpfc_sli4_node_prep(struct lpfc_hba *);
int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
...@@ -496,3 +516,6 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, ...@@ -496,3 +516,6 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
uint32_t *, uint32_t *); uint32_t *, uint32_t *);
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
/* NVME interfaces. */
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
...@@ -484,20 +484,23 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) ...@@ -484,20 +484,23 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
off += (8 * sizeof(uint32_t)); off += (8 * sizeof(uint32_t));
} }
if (phba->sli_rev <= LPFC_SLI_REV3) {
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
pgpp = &phba->port_gp[i]; pgpp = &phba->port_gp[i];
pring = &psli->ring[i]; pring = &psli->sli3_ring[i];
len += snprintf(buf+len, size-len, len += snprintf(buf+len, size-len,
"Ring %d: CMD GetInx:%d (Max:%d Next:%d " "Ring %d: CMD GetInx:%d "
"Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", "(Max:%d Next:%d "
i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb, "Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
i, pgpp->cmdGetInx,
pring->sli.sli3.numCiocb,
pring->sli.sli3.next_cmdidx, pring->sli.sli3.next_cmdidx,
pring->sli.sli3.local_getidx, pring->sli.sli3.local_getidx,
pring->flag, pgpp->rspPutInx, pring->flag, pgpp->rspPutInx,
pring->sli.sli3.numRiocb); pring->sli.sli3.numRiocb);
} }
if (phba->sli_rev <= LPFC_SLI_REV3) {
word0 = readl(phba->HAregaddr); word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr); word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr); word2 = readl(phba->HSregaddr);
...@@ -535,6 +538,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -535,6 +538,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!cnt) { if (!cnt) {
...@@ -2011,6 +2015,14 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, ...@@ -2011,6 +2015,14 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
if (*len >= max_cnt) if (*len >= max_cnt)
return 1; return 1;
} }
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
qp = phba->sli4_hba.nvme_wq[qidx];
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
return 0; return 0;
} }
...@@ -2096,6 +2108,25 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, ...@@ -2096,6 +2108,25 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1; return 1;
} }
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
qp = phba->sli4_hba.nvme_cq[qidx];
if (qp->assoc_qid != eq_id)
continue;
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
if (*len >= max_cnt)
return 1;
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
}
return 0; return 0;
} }
...@@ -2162,21 +2193,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -2162,21 +2193,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
/* Fast-path event queue */ /* Fast-path event queue */
if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) { if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
x = phba->lpfc_idiag_last_eq; x = phba->lpfc_idiag_last_eq;
if (phba->cfg_fof && (x >= phba->cfg_fcp_io_channel)) { if (phba->cfg_fof && (x >= phba->io_channel_irqs)) {
phba->lpfc_idiag_last_eq = 0; phba->lpfc_idiag_last_eq = 0;
goto fof; goto fof;
} }
phba->lpfc_idiag_last_eq++; phba->lpfc_idiag_last_eq++;
if (phba->lpfc_idiag_last_eq >= phba->cfg_fcp_io_channel) if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
if (phba->cfg_fof == 0) if (phba->cfg_fof == 0)
phba->lpfc_idiag_last_eq = 0; phba->lpfc_idiag_last_eq = 0;
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQ %d out of %d HBA EQs\n", "EQ %d out of %d HBA EQs\n",
x, phba->cfg_fcp_io_channel); x, phba->io_channel_irqs);
/* Fast-path EQ */ /* Fast-path EQ */
qp = phba->sli4_hba.hba_eq[x]; qp = phba->sli4_hba.hba_eq[x];
...@@ -2191,6 +2222,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -2191,6 +2222,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt) if (len >= max_cnt)
goto too_big; goto too_big;
/* will dump both fcp and nvme cqs/wqs for the eq */
rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len, rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len,
max_cnt, qp->queue_id); max_cnt, qp->queue_id);
if (rc) if (rc)
...@@ -2227,6 +2259,23 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, ...@@ -2227,6 +2259,23 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt) if (len >= max_cnt)
goto too_big; goto too_big;
/* Slow-path NVME LS response CQ */
qp = phba->sli4_hba.nvmels_cq;
len = __lpfc_idiag_print_cq(qp, "NVME LS",
pbuffer, len);
/* Reset max counter */
if (qp)
qp->CQ_max_cqe = 0;
if (len >= max_cnt)
goto too_big;
/* Slow-path NVME LS WQ */
qp = phba->sli4_hba.nvmels_wq;
len = __lpfc_idiag_print_wq(qp, "NVME LS",
pbuffer, len);
if (len >= max_cnt)
goto too_big;
qp = phba->sli4_hba.hdr_rq; qp = phba->sli4_hba.hdr_rq;
len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq, len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
"RQpair", pbuffer, len); "RQpair", pbuffer, len);
...@@ -2447,7 +2496,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -2447,7 +2496,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
uint32_t qidx, quetp, queid, index, count, offset, value; uint32_t qidx, quetp, queid, index, count, offset, value;
uint32_t *pentry; uint32_t *pentry;
struct lpfc_queue *pque; struct lpfc_queue *pque, *qp;
int rc; int rc;
/* This is a user write operation */ /* This is a user write operation */
...@@ -2483,19 +2532,15 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -2483,19 +2532,15 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
case LPFC_IDIAG_EQ: case LPFC_IDIAG_EQ:
/* HBA event queue */ /* HBA event queue */
if (phba->sli4_hba.hba_eq) { if (phba->sli4_hba.hba_eq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
qidx++) { qp = phba->sli4_hba.hba_eq[qidx];
if (phba->sli4_hba.hba_eq[qidx] && if (qp && qp->queue_id == queid) {
phba->sli4_hba.hba_eq[qidx]->queue_id ==
queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(qp,
phba->sli4_hba.hba_eq[qidx],
index, count); index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = idiag.ptr_private = qp;
phba->sli4_hba.hba_eq[qidx];
goto pass_check; goto pass_check;
} }
} }
...@@ -2525,24 +2570,32 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -2525,24 +2570,32 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.els_cq; idiag.ptr_private = phba->sli4_hba.els_cq;
goto pass_check; goto pass_check;
} }
/* NVME LS complete queue */
if (phba->sli4_hba.nvmels_cq &&
phba->sli4_hba.nvmels_cq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.nvmels_cq, index, count);
if (rc)
goto error_out;
idiag.ptr_private = phba->sli4_hba.nvmels_cq;
goto pass_check;
}
/* FCP complete queue */ /* FCP complete queue */
if (phba->sli4_hba.fcp_cq) { if (phba->sli4_hba.fcp_cq) {
qidx = 0; for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
do { qidx++) {
if (phba->sli4_hba.fcp_cq[qidx] && qp = phba->sli4_hba.fcp_cq[qidx];
phba->sli4_hba.fcp_cq[qidx]->queue_id == if (qp && qp->queue_id == queid) {
queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_cq[qidx], qp, index, count);
index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = idiag.ptr_private = qp;
phba->sli4_hba.fcp_cq[qidx];
goto pass_check; goto pass_check;
} }
} while (++qidx < phba->cfg_fcp_io_channel); }
} }
goto error_out; goto error_out;
break; break;
...@@ -2572,22 +2625,45 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, ...@@ -2572,22 +2625,45 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.els_wq; idiag.ptr_private = phba->sli4_hba.els_wq;
goto pass_check; goto pass_check;
} }
/* NVME LS work queue */
if (phba->sli4_hba.nvmels_wq &&
phba->sli4_hba.nvmels_wq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.nvmels_wq, index, count);
if (rc)
goto error_out;
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
goto pass_check;
}
/* FCP work queue */ /* FCP work queue */
if (phba->sli4_hba.fcp_wq) { if (phba->sli4_hba.fcp_wq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
qidx++) { qidx++) {
if (!phba->sli4_hba.fcp_wq[qidx]) qp = phba->sli4_hba.fcp_wq[qidx];
continue; if (qp && qp->queue_id == queid) {
if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
queid) {
/* Sanity check */ /* Sanity check */
rc = lpfc_idiag_que_param_check( rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_wq[qidx], qp, index, count);
index, count); if (rc)
goto error_out;
idiag.ptr_private = qp;
goto pass_check;
}
}
}
/* NVME work queue */
if (phba->sli4_hba.nvme_wq) {
for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
qidx++) {
qp = phba->sli4_hba.nvme_wq[qidx];
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
qp, index, count);
if (rc) if (rc)
goto error_out; goto error_out;
idiag.ptr_private = idiag.ptr_private = qp;
phba->sli4_hba.fcp_wq[qidx];
goto pass_check; goto pass_check;
} }
} }
...@@ -4562,10 +4638,14 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -4562,10 +4638,14 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
*/ */
lpfc_debug_dump_wq(phba, DUMP_MBX, 0); lpfc_debug_dump_wq(phba, DUMP_MBX, 0);
lpfc_debug_dump_wq(phba, DUMP_ELS, 0); lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
lpfc_debug_dump_wq(phba, DUMP_FCP, idx); lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba); lpfc_debug_dump_dat_rq(phba);
/* /*
...@@ -4573,13 +4653,17 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) ...@@ -4573,13 +4653,17 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
*/ */
lpfc_debug_dump_cq(phba, DUMP_MBX, 0); lpfc_debug_dump_cq(phba, DUMP_MBX, 0);
lpfc_debug_dump_cq(phba, DUMP_ELS, 0); lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
lpfc_debug_dump_cq(phba, DUMP_FCP, idx); lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
/* /*
* Dump Event Queues (EQs) * Dump Event Queues (EQs)
*/ */
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) for (idx = 0; idx < phba->io_channel_irqs; idx++)
lpfc_debug_dump_hba_eq(phba, idx); lpfc_debug_dump_hba_eq(phba, idx);
} }
...@@ -44,8 +44,10 @@ ...@@ -44,8 +44,10 @@
enum { enum {
DUMP_FCP, DUMP_FCP,
DUMP_NVME,
DUMP_MBX, DUMP_MBX,
DUMP_ELS, DUMP_ELS,
DUMP_NVMELS,
}; };
/* /*
...@@ -364,11 +366,11 @@ lpfc_debug_dump_q(struct lpfc_queue *q) ...@@ -364,11 +366,11 @@ lpfc_debug_dump_q(struct lpfc_queue *q)
} }
/** /**
* lpfc_debug_dump_wq - dump all entries from the fcp work queue * lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
* @wqidx: Index to a FCP work queue. * @wqidx: Index to a FCP or NVME work queue.
* *
* This function dumps all entries from a FCP work queue specified * This function dumps all entries from a FCP or NVME work queue specified
* by the wqidx. * by the wqidx.
**/ **/
static inline void static inline void
...@@ -380,16 +382,22 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -380,16 +382,22 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
if (qtype == DUMP_FCP) { if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.fcp_wq[wqidx]; wq = phba->sli4_hba.fcp_wq[wqidx];
qtypestr = "FCP"; qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.nvme_wq[wqidx];
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) { } else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq; wq = phba->sli4_hba.mbx_wq;
qtypestr = "MBX"; qtypestr = "MBX";
} else if (qtype == DUMP_ELS) { } else if (qtype == DUMP_ELS) {
wq = phba->sli4_hba.els_wq; wq = phba->sli4_hba.els_wq;
qtypestr = "ELS"; qtypestr = "ELS";
} else if (qtype == DUMP_NVMELS) {
wq = phba->sli4_hba.nvmels_wq;
qtypestr = "NVMELS";
} else } else
return; return;
if (qtype == DUMP_FCP) if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
qtypestr, wqidx, wq->queue_id); qtypestr, wqidx, wq->queue_id);
else else
...@@ -400,12 +408,12 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -400,12 +408,12 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
} }
/** /**
* lpfc_debug_dump_cq - dump all entries from a fcp work queue's * lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's
* cmpl queue * cmpl queue
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
* @wqidx: Index to a FCP work queue. * @wqidx: Index to a FCP work queue.
* *
* This function dumps all entries from a FCP completion queue * This function dumps all entries from a FCP or NVME completion queue
* which is associated to the work queue specified by the @wqidx. * which is associated to the work queue specified by the @wqidx.
**/ **/
static inline void static inline void
...@@ -415,12 +423,16 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -415,12 +423,16 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr; char *qtypestr;
int eqidx; int eqidx;
/* fcp wq and cq are 1:1, thus same indexes */ /* fcp/nvme wq and cq are 1:1, thus same indexes */
if (qtype == DUMP_FCP) { if (qtype == DUMP_FCP) {
wq = phba->sli4_hba.fcp_wq[wqidx]; wq = phba->sli4_hba.fcp_wq[wqidx];
cq = phba->sli4_hba.fcp_cq[wqidx]; cq = phba->sli4_hba.fcp_cq[wqidx];
qtypestr = "FCP"; qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
wq = phba->sli4_hba.nvme_wq[wqidx];
cq = phba->sli4_hba.nvme_cq[wqidx];
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) { } else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq; wq = phba->sli4_hba.mbx_wq;
cq = phba->sli4_hba.mbx_cq; cq = phba->sli4_hba.mbx_cq;
...@@ -429,21 +441,25 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) ...@@ -429,21 +441,25 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
wq = phba->sli4_hba.els_wq; wq = phba->sli4_hba.els_wq;
cq = phba->sli4_hba.els_cq; cq = phba->sli4_hba.els_cq;
qtypestr = "ELS"; qtypestr = "ELS";
} else if (qtype == DUMP_NVMELS) {
wq = phba->sli4_hba.nvmels_wq;
cq = phba->sli4_hba.nvmels_cq;
qtypestr = "NVMELS";
} else } else
return; return;
for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) { for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
eq = phba->sli4_hba.hba_eq[eqidx]; eq = phba->sli4_hba.hba_eq[eqidx];
if (cq->assoc_qid == eq->queue_id) if (cq->assoc_qid == eq->queue_id)
break; break;
} }
if (eqidx == phba->cfg_fcp_io_channel) { if (eqidx == phba->io_channel_irqs) {
pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
eqidx = 0; eqidx = 0;
eq = phba->sli4_hba.hba_eq[0]; eq = phba->sli4_hba.hba_eq[0];
} }
if (qtype == DUMP_FCP) if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n", "->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
...@@ -527,11 +543,25 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) ...@@ -527,11 +543,25 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
return; return;
} }
for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
break;
if (wq_idx < phba->cfg_nvme_io_channel) {
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
return;
}
if (phba->sli4_hba.els_wq->queue_id == qid) { if (phba->sli4_hba.els_wq->queue_id == qid) {
pr_err("ELS WQ[Qid:%d]\n", qid); pr_err("ELS WQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.els_wq); lpfc_debug_dump_q(phba->sli4_hba.els_wq);
return; return;
} }
if (phba->sli4_hba.nvmels_wq->queue_id == qid) {
pr_err("NVME LS WQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq);
}
} }
/** /**
...@@ -596,12 +626,28 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) ...@@ -596,12 +626,28 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
return; return;
} }
for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
break;
if (cq_idx < phba->cfg_nvme_io_channel) {
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
return;
}
if (phba->sli4_hba.els_cq->queue_id == qid) { if (phba->sli4_hba.els_cq->queue_id == qid) {
pr_err("ELS CQ[Qid:%d]\n", qid); pr_err("ELS CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.els_cq); lpfc_debug_dump_q(phba->sli4_hba.els_cq);
return; return;
} }
if (phba->sli4_hba.nvmels_cq->queue_id == qid) {
pr_err("NVME LS CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq);
return;
}
if (phba->sli4_hba.mbx_cq->queue_id == qid) { if (phba->sli4_hba.mbx_cq->queue_id == qid) {
pr_err("MBX CQ[Qid:%d]\n", qid); pr_err("MBX CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.mbx_cq); lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
...@@ -621,17 +667,15 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) ...@@ -621,17 +667,15 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{ {
int eq_idx; int eq_idx;
for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) { for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
break; break;
}
if (eq_idx < phba->cfg_fcp_io_channel) { if (eq_idx < phba->io_channel_irqs) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
return; return;
} }
} }
void lpfc_debug_dump_all_queues(struct lpfc_hba *); void lpfc_debug_dump_all_queues(struct lpfc_hba *);
...@@ -1323,7 +1323,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba) ...@@ -1323,7 +1323,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
"0201 Abort outstanding I/O on NPort x%x\n", "0201 Abort outstanding I/O on NPort x%x\n",
Fabric_DID); Fabric_DID);
pring = &phba->sli.ring[LPFC_ELS_RING]; pring = lpfc_phba_elsring(phba);
/* /*
* Check the txcmplq for an iocb that matches the nport the driver is * Check the txcmplq for an iocb that matches the nport the driver is
...@@ -7155,7 +7155,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) ...@@ -7155,7 +7155,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
timeout = (uint32_t)(phba->fc_ratov << 1); timeout = (uint32_t)(phba->fc_ratov << 1);
pring = &phba->sli.ring[LPFC_ELS_RING]; pring = lpfc_phba_elsring(phba);
if ((phba->pport->load_flag & FC_UNLOADING)) if ((phba->pport->load_flag & FC_UNLOADING))
return; return;
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
...@@ -7224,7 +7225,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) ...@@ -7224,7 +7225,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
} }
if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) if (!list_empty(&pring->txcmplq))
if (!(phba->pport->load_flag & FC_UNLOADING)) if (!(phba->pport->load_flag & FC_UNLOADING))
mod_timer(&vport->els_tmofunc, mod_timer(&vport->els_tmofunc,
jiffies + msecs_to_jiffies(1000 * timeout)); jiffies + msecs_to_jiffies(1000 * timeout));
...@@ -7255,7 +7256,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) ...@@ -7255,7 +7256,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
{ {
LIST_HEAD(abort_list); LIST_HEAD(abort_list);
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb; struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL; IOCB_t *cmd = NULL;
...@@ -7267,6 +7268,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) ...@@ -7267,6 +7268,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* a working list and release the locks before calling the abort. * a working list and release the locks before calling the abort.
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
pring = lpfc_phba_elsring(phba);
if (phba->sli_rev == LPFC_SLI_REV4) if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock); spin_lock(&pring->ring_lock);
...@@ -9013,7 +9015,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) ...@@ -9013,7 +9015,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
LIST_HEAD(completions); LIST_HEAD(completions);
struct lpfc_hba *phba = ndlp->phba; struct lpfc_hba *phba = ndlp->phba;
struct lpfc_iocbq *tmp_iocb, *piocb; struct lpfc_iocbq *tmp_iocb, *piocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct lpfc_sli_ring *pring;
pring = lpfc_phba_elsring(phba);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
...@@ -9069,13 +9073,13 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) ...@@ -9069,13 +9073,13 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
unsigned long iflag = 0; unsigned long iflag = 0;
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_sgl_list_lock); spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next, list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) { &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
sglq_entry->ndlp = NULL; sglq_entry->ndlp = NULL;
} }
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
...@@ -9099,22 +9103,22 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, ...@@ -9099,22 +9103,22 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0; unsigned long iflag = 0;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct lpfc_sli_ring *pring;
pring = lpfc_phba_elsring(phba);
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_sgl_list_lock); spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next, list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) { &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) { if (sglq_entry->sli4_xritag == xri) {
list_del(&sglq_entry->list); list_del(&sglq_entry->list);
ndlp = sglq_entry->ndlp; ndlp = sglq_entry->ndlp;
sglq_entry->ndlp = NULL; sglq_entry->ndlp = NULL;
spin_lock(&pring->ring_lock);
list_add_tail(&sglq_entry->list, list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_sgl_list); &phba->sli4_hba.lpfc_els_sgl_list);
sglq_entry->state = SGL_FREED; sglq_entry->state = SGL_FREED;
spin_unlock(&pring->ring_lock); spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_set_rrq_active(phba, ndlp, lpfc_set_rrq_active(phba, ndlp,
sglq_entry->sli4_lxritag, sglq_entry->sli4_lxritag,
...@@ -9126,21 +9130,21 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, ...@@ -9126,21 +9130,21 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
return; return;
} }
} }
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock(&phba->sli4_hba.sgl_list_lock);
lxri = lpfc_sli4_xri_inrange(phba, xri); lxri = lpfc_sli4_xri_inrange(phba, xri);
if (lxri == NO_XRI) { if (lxri == NO_XRI) {
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
spin_lock(&pring->ring_lock); spin_lock(&phba->sli4_hba.sgl_list_lock);
sglq_entry = __lpfc_get_active_sglq(phba, lxri); sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
spin_unlock(&pring->ring_lock); spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
sglq_entry->state = SGL_XRI_ABORTED; sglq_entry->state = SGL_XRI_ABORTED;
spin_unlock(&pring->ring_lock); spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
......
This diff is collapsed.
...@@ -44,8 +44,6 @@ ...@@ -44,8 +44,6 @@
#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */ #define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ #define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ #define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
#define LPFC_FCP_NEXT_RING 3
#define LPFC_FCP_OAS_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ #define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ #define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
...@@ -1791,6 +1789,7 @@ typedef struct { /* FireFly BIU registers */ ...@@ -1791,6 +1789,7 @@ typedef struct { /* FireFly BIU registers */
#define MBX_INIT_VFI 0xA3 #define MBX_INIT_VFI 0xA3
#define MBX_INIT_VPI 0xA4 #define MBX_INIT_VPI 0xA4
#define MBX_ACCESS_VDATA 0xA5 #define MBX_ACCESS_VDATA 0xA5
#define MBX_REG_FCFI_MRQ 0xAF
#define MBX_AUTH_PORT 0xF8 #define MBX_AUTH_PORT 0xF8
#define MBX_SECURITY_MGMT 0xF9 #define MBX_SECURITY_MGMT 0xF9
......
...@@ -108,6 +108,7 @@ struct lpfc_sli_intf { ...@@ -108,6 +108,7 @@ struct lpfc_sli_intf {
#define LPFC_MAX_MQ_PAGE 8 #define LPFC_MAX_MQ_PAGE 8
#define LPFC_MAX_WQ_PAGE_V0 4 #define LPFC_MAX_WQ_PAGE_V0 4
#define LPFC_MAX_WQ_PAGE 8 #define LPFC_MAX_WQ_PAGE 8
#define LPFC_MAX_RQ_PAGE 8
#define LPFC_MAX_CQ_PAGE 4 #define LPFC_MAX_CQ_PAGE 4
#define LPFC_MAX_EQ_PAGE 8 #define LPFC_MAX_EQ_PAGE 8
...@@ -198,7 +199,7 @@ struct lpfc_sli_intf { ...@@ -198,7 +199,7 @@ struct lpfc_sli_intf {
/* Configuration of Interrupts / sec for entire HBA port */ /* Configuration of Interrupts / sec for entire HBA port */
#define LPFC_MIN_IMAX 5000 #define LPFC_MIN_IMAX 5000
#define LPFC_MAX_IMAX 5000000 #define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 50000 #define LPFC_DEF_IMAX 150000
#define LPFC_MIN_CPU_MAP 0 #define LPFC_MIN_CPU_MAP 0
#define LPFC_MAX_CPU_MAP 2 #define LPFC_MAX_CPU_MAP 2
...@@ -348,6 +349,7 @@ struct lpfc_cqe { ...@@ -348,6 +349,7 @@ struct lpfc_cqe {
#define CQE_CODE_RECEIVE 0x4 #define CQE_CODE_RECEIVE 0x4
#define CQE_CODE_XRI_ABORTED 0x5 #define CQE_CODE_XRI_ABORTED 0x5
#define CQE_CODE_RECEIVE_V1 0x9 #define CQE_CODE_RECEIVE_V1 0x9
#define CQE_CODE_NVME_ERSP 0xd
/* /*
* Define mask value for xri_aborted and wcqe completed CQE extended status. * Define mask value for xri_aborted and wcqe completed CQE extended status.
...@@ -367,6 +369,9 @@ struct lpfc_wcqe_complete { ...@@ -367,6 +369,9 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_hw_status_SHIFT 0 #define lpfc_wcqe_c_hw_status_SHIFT 0
#define lpfc_wcqe_c_hw_status_MASK 0x000000FF #define lpfc_wcqe_c_hw_status_MASK 0x000000FF
#define lpfc_wcqe_c_hw_status_WORD word0 #define lpfc_wcqe_c_hw_status_WORD word0
#define lpfc_wcqe_c_ersp0_SHIFT 0
#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF
#define lpfc_wcqe_c_ersp0_WORD word0
uint32_t total_data_placed; uint32_t total_data_placed;
uint32_t parameter; uint32_t parameter;
#define lpfc_wcqe_c_bg_edir_SHIFT 5 #define lpfc_wcqe_c_bg_edir_SHIFT 5
...@@ -400,6 +405,9 @@ struct lpfc_wcqe_complete { ...@@ -400,6 +405,9 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT #define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK #define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD #define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
#define lpfc_wcqe_c_sqhead_SHIFT 0
#define lpfc_wcqe_c_sqhead_MASK 0x0000FFFF
#define lpfc_wcqe_c_sqhead_WORD word3
}; };
/* completion queue entry for wqe release */ /* completion queue entry for wqe release */
...@@ -2841,12 +2849,18 @@ struct lpfc_sli4_parameters { ...@@ -2841,12 +2849,18 @@ struct lpfc_sli4_parameters {
#define cfg_mqv_WORD word6 #define cfg_mqv_WORD word6
uint32_t word7; uint32_t word7;
uint32_t word8; uint32_t word8;
#define cfg_wqpcnt_SHIFT 0
#define cfg_wqpcnt_MASK 0x0000000f
#define cfg_wqpcnt_WORD word8
#define cfg_wqsize_SHIFT 8 #define cfg_wqsize_SHIFT 8
#define cfg_wqsize_MASK 0x0000000f #define cfg_wqsize_MASK 0x0000000f
#define cfg_wqsize_WORD word8 #define cfg_wqsize_WORD word8
#define cfg_wqv_SHIFT 14 #define cfg_wqv_SHIFT 14
#define cfg_wqv_MASK 0x00000003 #define cfg_wqv_MASK 0x00000003
#define cfg_wqv_WORD word8 #define cfg_wqv_WORD word8
#define cfg_wqpsize_SHIFT 16
#define cfg_wqpsize_MASK 0x000000ff
#define cfg_wqpsize_WORD word8
uint32_t word9; uint32_t word9;
uint32_t word10; uint32_t word10;
#define cfg_rqv_SHIFT 14 #define cfg_rqv_SHIFT 14
...@@ -2897,6 +2911,12 @@ struct lpfc_sli4_parameters { ...@@ -2897,6 +2911,12 @@ struct lpfc_sli4_parameters {
#define cfg_mds_diags_SHIFT 1 #define cfg_mds_diags_SHIFT 1
#define cfg_mds_diags_MASK 0x00000001 #define cfg_mds_diags_MASK 0x00000001
#define cfg_mds_diags_WORD word19 #define cfg_mds_diags_WORD word19
#define cfg_nvme_SHIFT 3
#define cfg_nvme_MASK 0x00000001
#define cfg_nvme_WORD word19
#define cfg_xib_SHIFT 4
#define cfg_xib_MASK 0x00000001
#define cfg_xib_WORD word19
}; };
#define LPFC_SET_UE_RECOVERY 0x10 #define LPFC_SET_UE_RECOVERY 0x10
...@@ -3659,6 +3679,9 @@ struct wqe_common { ...@@ -3659,6 +3679,9 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0 #define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f #define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10 #define wqe_ebde_cnt_WORD word10
#define wqe_nvme_SHIFT 4
#define wqe_nvme_MASK 0x00000001
#define wqe_nvme_WORD word10
#define wqe_oas_SHIFT 6 #define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001 #define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10 #define wqe_oas_WORD word10
...@@ -4017,11 +4040,39 @@ struct lpfc_grp_hdr { ...@@ -4017,11 +4040,39 @@ struct lpfc_grp_hdr {
uint8_t revision[32]; uint8_t revision[32];
}; };
/* Defines for WQE command type */
#define FCP_COMMAND 0x0 #define FCP_COMMAND 0x0
#define NVME_READ_CMD 0x0
#define FCP_COMMAND_DATA_OUT 0x1 #define FCP_COMMAND_DATA_OUT 0x1
#define NVME_WRITE_CMD 0x1
#define FCP_COMMAND_TRECEIVE 0x2
#define FCP_COMMAND_TRSP 0x3
#define FCP_COMMAND_TSEND 0x7
#define OTHER_COMMAND 0x8
#define ELS_COMMAND_NON_FIP 0xC #define ELS_COMMAND_NON_FIP 0xC
#define ELS_COMMAND_FIP 0xD #define ELS_COMMAND_FIP 0xD
#define OTHER_COMMAND 0x8
#define LPFC_NVME_EMBED_CMD 0x0
#define LPFC_NVME_EMBED_WRITE 0x1
#define LPFC_NVME_EMBED_READ 0x2
/* WQE Commands */
#define CMD_ABORT_XRI_WQE 0x0F
#define CMD_XMIT_SEQUENCE64_WQE 0x82
#define CMD_XMIT_BCAST64_WQE 0x84
#define CMD_ELS_REQUEST64_WQE 0x8A
#define CMD_XMIT_ELS_RSP64_WQE 0x95
#define CMD_XMIT_BLS_RSP64_WQE 0x97
#define CMD_FCP_IWRITE64_WQE 0x98
#define CMD_FCP_IREAD64_WQE 0x9A
#define CMD_FCP_ICMND64_WQE 0x9C
#define CMD_FCP_TSEND64_WQE 0x9F
#define CMD_FCP_TRECEIVE64_WQE 0xA1
#define CMD_FCP_TRSP64_WQE 0xA3
#define CMD_GEN_REQUEST64_WQE 0xC2
#define CMD_WQE_MASK 0xff
#define LPFC_FW_DUMP 1 #define LPFC_FW_DUMP 1
#define LPFC_FW_RESET 2 #define LPFC_FW_RESET 2
......
This diff is collapsed.
...@@ -38,6 +38,10 @@ ...@@ -38,6 +38,10 @@
#define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ #define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
#define LOG_NVME 0x00100000 /* NVME general events. */
#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */
#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
......
...@@ -954,7 +954,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) ...@@ -954,7 +954,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
pcbp->maxRing = (psli->num_rings - 1); pcbp->maxRing = (psli->num_rings - 1);
for (i = 0; i < psli->num_rings; i++) { for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i]; pring = &psli->sli3_ring[i];
pring->sli.sli3.sizeCiocb = pring->sli.sli3.sizeCiocb =
phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
...@@ -1217,7 +1217,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) ...@@ -1217,7 +1217,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
mb->un.varCfgRing.recvNotify = 1; mb->un.varCfgRing.recvNotify = 1;
psli = &phba->sli; psli = &phba->sli;
pring = &psli->ring[ring]; pring = &psli->sli3_ring[ring];
mb->un.varCfgRing.numMask = pring->num_mask; mb->un.varCfgRing.numMask = pring->num_mask;
mb->mbxCommand = MBX_CONFIG_RING; mb->mbxCommand = MBX_CONFIG_RING;
mb->mbxOwner = OWN_HOST; mb->mbxOwner = OWN_HOST;
...@@ -2434,14 +2434,25 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) ...@@ -2434,14 +2434,25 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
memset(mbox, 0, sizeof(*mbox)); memset(mbox, 0, sizeof(*mbox));
reg_fcfi = &mbox->u.mqe.un.reg_fcfi; reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); if (phba->nvmet_support == 0) {
bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
phba->sli4_hba.hdr_rq->queue_id);
/* Match everything - rq_id0 */
bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0);
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
/* addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
(~phba->fcf.addr_mode) & 0x3);
}
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
phba->fcf.current_rec.fcf_indx); phba->fcf.current_rec.fcf_indx);
/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
......
This diff is collapsed.
...@@ -204,10 +204,11 @@ int ...@@ -204,10 +204,11 @@ int
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{ {
LIST_HEAD(abort_list); LIST_HEAD(abort_list);
struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_iocbq *iocb, *next_iocb; struct lpfc_iocbq *iocb, *next_iocb;
pring = lpfc_phba_elsring(phba);
/* Abort outstanding I/O on NPort <nlp_DID> */ /* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
"2819 Abort outstanding I/O on NPort x%x " "2819 Abort outstanding I/O on NPort x%x "
...@@ -2104,7 +2105,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -2104,7 +2105,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */ /* flush the target */
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT); ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */ /* Treat like rcv logo */
......
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
********************************************************************/
#define LPFC_NVME_MIN_SEGS 16
#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */
#define LPFC_NVME_MAX_SEGS 510
#define LPFC_NVMET_MIN_POSTBUF 16
#define LPFC_NVMET_DEFAULT_POSTBUF 1024
#define LPFC_NVMET_MAX_POSTBUF 4096
#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct list_head rport_list;
struct completion lport_unreg_done;
/* Add sttats counters here */
};
struct lpfc_nvme_rport {
struct list_head list;
struct lpfc_nvme_lport *lport;
struct nvme_fc_remote_port *remoteport;
struct lpfc_nodelist *ndlp;
struct completion rport_unreg_done;
};
struct lpfc_nvme_buf {
struct list_head list;
struct nvmefc_fcp_req *nvmeCmd;
struct lpfc_nvme_rport *nrport;
uint32_t timeout;
uint16_t flags; /* TBD convert exch_busy to flags */
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint16_t cpu;
uint16_t qidx;
uint16_t sqid;
uint32_t result; /* From IOCB Word 4. */
uint32_t seg_cnt; /* Number of scatter-gather segments returned by
* dma_map_sg. The driver needs this for calls
* to dma_unmap_sg.
*/
dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
/*
* data and dma_handle are the kernel virtual and bus address of the
* dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
* gather bde list that supports the sg_tablesize value.
*/
void *data;
dma_addr_t dma_handle;
struct sli4_sge *nvme_sgl;
dma_addr_t dma_phys_sgl;
/* cur_iocbq has phys of the dma-able buffer.
* Iotag is in here
*/
struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq;
unsigned long start_time;
};
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -403,6 +403,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) ...@@ -403,6 +403,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->fdmi_port_mask = phba->pport->fdmi_port_mask; vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
} }
/* todo: init: register port with nvme */
/* /*
* In SLI4, the vpi must be activated before it can be used * In SLI4, the vpi must be activated before it can be used
* by the port. * by the port.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment