Commit e84067d7 authored by Duane Grigsby's avatar Duane Grigsby Committed by Martin K. Petersen

scsi: qla2xxx: Add FC-NVMe F/W initialization and transport registration

This code provides the interfaces to register remote and local ports of
FC4 type 0x28 with the FC-NVMe transport and transports the requests
(FC-NVMe FC link services and FC-NVMe commands IUs) to the fabric. It
also provides the support for allocating h/w queues and aborting FC-NVMe
FC requests.
Signed-off-by: default avatarDarren Trapp <darren.trapp@cavium.com>
Signed-off-by: default avatarDuane Grigsby <duane.grigsby@cavium.com>
Signed-off-by: default avatarAnil Gurumurthy <anil.gurumurhty@cavium.com>
Signed-off-by: default avatarGiridhar Malavali <giridhar.malavali@cavium.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@cavium.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7401bc18
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
......@@ -15,7 +15,7 @@
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
* | Device Discovery | 0x2131 | 0x210e-0x2116 |
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
* | | | 0x212a-0x2130 |
......
......@@ -37,6 +37,7 @@
#include "qla_bsg.h"
#include "qla_nx.h"
#include "qla_nx2.h"
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
#define QLA2XXX_MANUFACTURER "QLogic Corporation"
......@@ -423,6 +424,7 @@ struct srb_iocb {
int rsp_len;
dma_addr_t cmd_dma;
dma_addr_t rsp_dma;
enum nvmefc_fcp_datadir dir;
uint32_t dl;
uint32_t timeout_sec;
} nvme;
......@@ -452,6 +454,7 @@ struct srb_iocb {
#define SRB_NACK_PRLI 17
#define SRB_NACK_LOGO 18
#define SRB_NVME_CMD 19
#define SRB_NVME_LS 20
#define SRB_PRLI_CMD 21
enum {
......@@ -467,6 +470,7 @@ typedef struct srb {
uint8_t cmd_type;
uint8_t pad[3];
atomic_t ref_count;
wait_queue_head_t nvme_ls_waitQ;
struct fc_port *fcport;
struct scsi_qla_host *vha;
uint32_t handle;
......@@ -2298,6 +2302,7 @@ typedef struct fc_port {
struct work_struct nvme_del_work;
atomic_t nvme_ref_count;
wait_queue_head_t nvme_waitQ;
uint32_t nvme_prli_service_param;
#define NVME_PRLI_SP_CONF BIT_7
#define NVME_PRLI_SP_INITIATOR BIT_5
......@@ -4124,6 +4129,7 @@ typedef struct scsi_qla_host {
struct nvme_fc_local_port *nvme_local_port;
atomic_t nvme_ref_count;
wait_queue_head_t nvme_waitQ;
struct list_head nvme_rport_list;
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen;
......
......@@ -9,6 +9,16 @@
#include <linux/interrupt.h>
/*
* Global functions prototype in qla_nvme.c source file.
*/
extern void qla_nvme_register_hba(scsi_qla_host_t *);
extern int qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
extern void qla_nvme_delete(scsi_qla_host_t *);
extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
struct req_que *);
/*
* Global Function Prototypes in qla_init.c source file.
*/
......@@ -141,6 +151,7 @@ extern int ql2xiniexchg;
extern int ql2xfwholdabts;
extern int ql2xmvasynctoatio;
extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
......
......@@ -4520,6 +4520,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->deleted = 0;
fcport->logout_on_delete = 1;
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
return;
}
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
......@@ -4669,6 +4674,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
break;
} while (0);
if (!vha->nvme_local_port && vha->flags.nvme_enabled)
qla_nvme_register_hba(vha);
if (rval)
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
......
......@@ -3155,6 +3155,39 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
}
/*
* Build NVME LS request
*/
static int
qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
{
struct srb_iocb *nvme;
int rval = QLA_SUCCESS;
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
cmd_pkt->tx_dseg_count = 1;
cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
cmd_pkt->rx_dseg_count = 1;
cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
return rval;
}
int
qla2x00_start_sp(srb_t *sp)
{
......@@ -3211,6 +3244,9 @@ qla2x00_start_sp(srb_t *sp)
case SRB_FXIOCB_BCMD:
qlafx00_fxdisc_iocb(sp, pkt);
break;
case SRB_NVME_LS:
qla_nvme_ls(sp, pkt);
break;
case SRB_ABT_CMD:
IS_QLAFX00(ha) ?
qlafx00_abort_iocb(sp, pkt) :
......
......@@ -2828,6 +2828,21 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->done(sp, 0);
}
void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
struct req_que *req)
{
srb_t *sp;
const char func[] = "LS4_IOCB";
uint16_t comp_status;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
comp_status = le16_to_cpu(pkt->status);
sp->done(sp, comp_status);
}
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
......@@ -2901,6 +2916,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case CTIO_CRC2:
qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
break;
case PT_LS4_REQUEST:
qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
rsp->req);
break;
case NOTIFY_ACK_TYPE:
if (pkt->handle == QLA_TGT_SKIP_HANDLE)
qlt_response_pkt_all_vps(vha, rsp,
......
......@@ -560,6 +560,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
}
#define EXTENDED_BB_CREDITS BIT_0
#define NVME_ENABLE_FLAG BIT_3
/*
* qla2x00_execute_fw
* Start adapter firmware.
......@@ -601,6 +603,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
} else
mcp->mb[4] = 0;
if (ql2xnvmeenable && IS_QLA27XX(ha))
mcp->mb[4] |= NVME_ENABLE_FLAG;
if (ha->flags.exlogins_enabled)
mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
......@@ -941,6 +946,22 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
"%s: Firmware supports Exchange Offload 0x%x\n",
__func__, ha->fw_attributes_h);
/* bit 26 of fw_attributes */
if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
struct init_cb_24xx *icb;
icb = (struct init_cb_24xx *)ha->init_cb;
/*
* fw supports nvme and driver load
* parameter requested nvme
*/
vha->flags.nvme_enabled = 1;
icb->firmware_options_2 &= cpu_to_le32(~0xf);
ha->zio_mode = 0;
ha->zio_timer = 0;
}
}
if (IS_QLA27XX(ha)) {
......
This diff is collapsed.
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NVME_H
#define __QLA_NVME_H
#include <linux/blk-mq.h>
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
#include <linux/nvme-fc-driver.h>
#define NVME_ATIO_CMD_OFF 32
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
#define Q2T_NVME_NUM_TAGS 2048
#define QLA_MAX_FC_SEGMENTS 64
struct srb;
struct nvme_private {
struct srb *sp;
struct nvmefc_ls_req *fd;
struct work_struct ls_work;
int comp_status;
};
struct nvme_rport {
struct nvme_fc_port_info req;
struct list_head list;
struct fc_port *fcport;
};
#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
struct cmd_nvme {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint16_t nport_handle; /* N_PORT handle. */
uint16_t timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */
uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
uint64_t rsvd;
uint16_t control_flags; /* Control Flags */
#define CF_NVME_ENABLE BIT_9
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
uint32_t nvme_cmnd_dseg_address[2]; /* Data segment address. */
uint32_t nvme_rsp_dseg_address[2]; /* Data segment address. */
uint32_t byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
uint32_t nvme_data_dseg_address[2]; /* Data segment address. */
uint32_t nvme_data_dseg_len; /* Data segment length. */
};
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
struct pt_ls4_request {
uint8_t entry_type;
uint8_t entry_count;
uint8_t sys_define;
uint8_t entry_status;
uint32_t handle;
uint16_t status;
uint16_t nport_handle;
uint16_t tx_dseg_count;
uint8_t vp_index;
uint8_t rsvd;
uint16_t timeout;
uint16_t control_flags;
#define CF_LS4_SHIFT 13
#define CF_LS4_ORIGINATOR 0
#define CF_LS4_RESPONDER 1
#define CF_LS4_RESPONDER_TERM 2
uint16_t rx_dseg_count;
uint16_t rsvd2;
uint32_t exchange_address;
uint32_t rsvd3;
uint32_t rx_byte_count;
uint32_t tx_byte_count;
uint32_t dseg0_address[2];
uint32_t dseg0_len;
uint32_t dseg1_address[2];
uint32_t dseg1_len;
};
#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */
struct pt_ls4_rx_unsol {
uint8_t entry_type;
uint8_t entry_count;
uint16_t rsvd0;
uint16_t rsvd1;
uint8_t vp_index;
uint8_t rsvd2;
uint16_t rsvd3;
uint16_t nport_handle;
uint16_t frame_size;
uint16_t rsvd4;
uint32_t exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
uint8_t s_id[3];
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
uint16_t seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
uint16_t rx_id;
uint16_t ox_id;
uint32_t param;
uint32_t desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
uint32_t desc_len;
uint32_t payload[3];
};
#endif
......@@ -120,7 +120,11 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 32.");
#if (IS_ENABLED(CONFIG_NVME_FC))
int ql2xenabledif;
#else
int ql2xenabledif = 2;
#endif
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
......@@ -129,6 +133,16 @@ MODULE_PARM_DESC(ql2xenabledif,
" 1 -- Enable DIF for all types\n"
" 2 -- Enable DIF for all types, except Type 0.\n");
#if (IS_ENABLED(CONFIG_NVME_FC))
int ql2xnvmeenable = 1;
#else
int ql2xnvmeenable;
#endif
module_param(ql2xnvmeenable, int, 0644);
MODULE_PARM_DESC(ql2xnvmeenable,
"Enables NVME support. "
"0 - no NVMe. Default is Y");
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
......@@ -267,6 +281,7 @@ static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
......@@ -695,7 +710,7 @@ qla2x00_sp_free_dma(void *ptr)
}
end:
if (sp->type != SRB_NVME_CMD) {
if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) {
CMD_SP(cmd) = NULL;
qla2x00_rel_sp(sp);
}
......@@ -1700,15 +1715,23 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
if (sp) {
req->outstanding_cmds[cnt] = NULL;
if (sp->cmd_type == TYPE_SRB) {
/*
* Don't abort commands in adapter
* during EEH recovery as it's not
* accessible/responding.
*/
if (GET_CMD_SP(sp) &&
if ((sp->type == SRB_NVME_CMD) ||
(sp->type == SRB_NVME_LS)) {
sp_get(sp);
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
qla_nvme_abort(ha, sp);
spin_lock_irqsave(
&ha->hardware_lock, flags);
} else if (GET_CMD_SP(sp) &&
!ha->flags.eeh_busy &&
(sp->type == SRB_SCSI_CMD)) {
/*
* Don't abort commands in
* adapter during EEH
* recovery as it's not
* accessible/responding.
*
* Get a reference to the sp
* and drop the lock. The
* reference ensures this
......@@ -3534,6 +3557,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
return;
set_bit(UNLOADING, &base_vha->dpc_flags);
qla_nvme_delete(base_vha);
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment