Commit 9e522cd8 authored by Arun Easi's avatar Arun Easi Committed by James Bottomley

[SCSI] qla2xxx: T10 DIF - ISP83xx changes.

Signed-off-by: default avatarArun Easi <arun.easi@qlogic.com>
Signed-off-by: default avatarChad Dupuis <chad.dupuis@qlogic.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent cb860bbd
...@@ -1812,7 +1812,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) ...@@ -1812,7 +1812,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) { if (ha->fw_attributes & BIT_4) {
int prot = 0; int prot = 0, guard;
vha->flags.difdix_supported = 1; vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082, ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n"); "Registered for DIF/DIX type 1 and 3 protection.\n");
...@@ -1825,7 +1825,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) ...@@ -1825,7 +1825,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
| SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION); | SHOST_DIX_TYPE3_PROTECTION);
scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
guard = SHOST_DIX_GUARD_CRC;
if (IS_PI_IPGUARD_CAPABLE(ha) &&
(ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
guard |= SHOST_DIX_GUARD_IP;
scsi_host_set_guard(vha->host, guard);
} else } else
vha->flags.difdix_supported = 0; vha->flags.difdix_supported = 0;
} }
......
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
* | | | 0x113a | * | | | 0x113a |
* | Device Discovery | 0x2087 | 0x2020-0x2022, | * | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 | * | | | 0x2016 |
* | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e | * | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 | * | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f | * | Async Events | 0x5071 | 0x502b-0x502f |
......
...@@ -1468,9 +1468,10 @@ typedef struct { ...@@ -1468,9 +1468,10 @@ typedef struct {
} cont_a64_entry_t; } cont_a64_entry_t;
#define PO_MODE_DIF_INSERT 0 #define PO_MODE_DIF_INSERT 0
#define PO_MODE_DIF_REMOVE BIT_0 #define PO_MODE_DIF_REMOVE 1
#define PO_MODE_DIF_PASS BIT_1 #define PO_MODE_DIF_PASS 2
#define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1) #define PO_MODE_DIF_REPLACE 3
#define PO_MODE_DIF_TCP_CKSUM 6
#define PO_ENABLE_DIF_BUNDLING BIT_8 #define PO_ENABLE_DIF_BUNDLING BIT_8
#define PO_ENABLE_INCR_GUARD_SEED BIT_3 #define PO_ENABLE_INCR_GUARD_SEED BIT_3
#define PO_DISABLE_INCR_REF_TAG BIT_5 #define PO_DISABLE_INCR_REF_TAG BIT_5
...@@ -2781,6 +2782,12 @@ struct qla_hw_data { ...@@ -2781,6 +2782,12 @@ struct qla_hw_data {
/* Bit 21 of fw_attributes decides the MCTP capabilities */ /* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
((ha)->fw_attributes_ext[0] & BIT_0)) ((ha)->fw_attributes_ext[0] & BIT_0))
#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha))
#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
/* HBA serial number */ /* HBA serial number */
uint8_t serial0; uint8_t serial0;
......
...@@ -5498,6 +5498,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) ...@@ -5498,6 +5498,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
rval = 1; rval = 1;
} }
if (IS_T10_PI_CAPABLE(ha))
nv->frame_payload_size &= ~7;
/* Reset Initialization control block */ /* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size); memset(icb, 0, ha->init_cb_size);
......
...@@ -147,13 +147,6 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) ...@@ -147,13 +147,6 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t guard = scsi_host_get_guard(cmd->device->host); uint8_t guard = scsi_host_get_guard(cmd->device->host);
/* We only support T10 DIF right now */
if (guard != SHOST_DIX_GUARD_CRC) {
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
"Unsupported guard: %d for cmd=%p.\n", guard, cmd);
return 0;
}
/* We always use DIFF Bundling for best performance */ /* We always use DIFF Bundling for best performance */
*fw_prot_opts = 0; *fw_prot_opts = 0;
...@@ -172,9 +165,10 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) ...@@ -172,9 +165,10 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
*fw_prot_opts |= PO_MODE_DIF_REMOVE; *fw_prot_opts |= PO_MODE_DIF_REMOVE;
break; break;
case SCSI_PROT_READ_PASS: case SCSI_PROT_READ_PASS:
*fw_prot_opts |= PO_MODE_DIF_PASS;
break;
case SCSI_PROT_WRITE_PASS: case SCSI_PROT_WRITE_PASS:
if (guard & SHOST_DIX_GUARD_IP)
*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
else
*fw_prot_opts |= PO_MODE_DIF_PASS; *fw_prot_opts |= PO_MODE_DIF_PASS;
break; break;
default: /* Normal Request */ default: /* Normal Request */
...@@ -821,7 +815,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, ...@@ -821,7 +815,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt) unsigned int protcnt)
{ {
struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_cmnd *cmd = GET_CMD_SP(sp);
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
switch (scsi_get_prot_type(cmd)) { switch (scsi_get_prot_type(cmd)) {
case SCSI_PROT_DIF_TYPE0: case SCSI_PROT_DIF_TYPE0:
...@@ -891,12 +884,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, ...@@ -891,12 +884,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
pkt->ref_tag_mask[3] = 0xff; pkt->ref_tag_mask[3] = 0xff;
break; break;
} }
ql_dbg(ql_dbg_io, vha, 0x3009,
"Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
"prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
scsi_get_prot_type(cmd), cmd);
} }
struct qla2_sgx { struct qla2_sgx {
...@@ -1068,9 +1055,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, ...@@ -1068,9 +1055,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int i; int i;
uint16_t used_dsds = tot_dsds; uint16_t used_dsds = tot_dsds;
struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_cmnd *cmd = GET_CMD_SP(sp);
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
uint8_t *cp;
scsi_for_each_sg(cmd, sg, tot_dsds, i) { scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma; dma_addr_t sle_dma;
...@@ -1113,19 +1097,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, ...@@ -1113,19 +1097,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
cur_dsd = (uint32_t *)next_dsd; cur_dsd = (uint32_t *)next_dsd;
} }
sle_dma = sg_dma_address(sg); sle_dma = sg_dma_address(sg);
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
avail_dsds--; avail_dsds--;
if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x300b,
"User data buffer=%p for cmd=%p.\n", cp, cmd);
}
} }
/* Null termination */ /* Null termination */
*cur_dsd++ = 0; *cur_dsd++ = 0;
...@@ -1148,8 +1125,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, ...@@ -1148,8 +1125,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd; uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds; uint16_t used_dsds = tot_dsds;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
uint8_t *cp;
cmd = GET_CMD_SP(sp); cmd = GET_CMD_SP(sp);
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
...@@ -1193,23 +1168,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, ...@@ -1193,23 +1168,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd = (uint32_t *)next_dsd; cur_dsd = (uint32_t *)next_dsd;
} }
sle_dma = sg_dma_address(sg); sle_dma = sg_dma_address(sg);
if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
ql_dbg(ql_dbg_io, vha, 0x3027,
"%s(): %p, sg_entry %d - "
"addr=0x%x0x%x, len=%d.\n",
__func__, cur_dsd, i,
LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
}
*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
ql_dbg(ql_dbg_io, vha, 0x3028,
"%s(): Protection Data buffer = %p.\n", __func__,
cp);
}
avail_dsds--; avail_dsds--;
} }
/* Null termination */ /* Null termination */
...@@ -1386,6 +1349,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, ...@@ -1386,6 +1349,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (!qla2x00_hba_err_chk_enabled(sp)) if (!qla2x00_hba_err_chk_enabled(sp))
fw_prot_opts |= 0x10; /* Disable Guard tag checking */ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
/* HBA error checking enabled */
else if (IS_PI_UNINIT_CAPABLE(ha)) {
if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
|| (scsi_get_prot_type(GET_CMD_SP(sp)) ==
SCSI_PROT_DIF_TYPE2))
fw_prot_opts |= BIT_10;
else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
SCSI_PROT_DIF_TYPE3)
fw_prot_opts |= BIT_11;
}
if (!bundling) { if (!bundling) {
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
......
...@@ -2163,6 +2163,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ...@@ -2163,6 +2163,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
case CS_DIF_ERROR: case CS_DIF_ERROR:
logit = qla2x00_handle_dif_error(sp, sts24); logit = qla2x00_handle_dif_error(sp, sts24);
break; break;
case CS_TRANSPORT:
res = DID_ERROR << 16;
if (!IS_PI_SPLIT_DET_CAPABLE(ha))
break;
if (state_flags & BIT_4)
scmd_printk(KERN_WARNING, cp,
"Unsupported device '%s' found.\n",
cp->device->vendor);
break;
default: default:
res = DID_ERROR << 16; res = DID_ERROR << 16;
break; break;
......
...@@ -116,9 +116,8 @@ MODULE_PARM_DESC(ql2xmaxqdepth, ...@@ -116,9 +116,8 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. " "Maximum queue depth to set for each LUN. "
"Default is 32."); "Default is 32.");
/* Do not change the value of this after module load */ int ql2xenabledif = 2;
int ql2xenabledif = 0; module_param(ql2xenabledif, int, S_IRUGO);
module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenabledif, MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF " " Enable T10-CRC-DIF "
" Default is 0 - No DIF Support. 1 - Enable it" " Default is 0 - No DIF Support. 1 - Enable it"
...@@ -1358,6 +1357,9 @@ qla2xxx_slave_configure(struct scsi_device *sdev) ...@@ -1358,6 +1357,9 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_qla_host_t *vha = shost_priv(sdev->host); scsi_qla_host_t *vha = shost_priv(sdev->host);
struct req_que *req = vha->req; struct req_que *req = vha->req;
if (IS_T10_PI_CAPABLE(vha->hw))
blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
if (sdev->tagged_supported) if (sdev->tagged_supported)
scsi_activate_tcq(sdev, req->max_q_depth); scsi_activate_tcq(sdev, req->max_q_depth);
else else
...@@ -2574,7 +2576,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2574,7 +2576,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) { if (ha->fw_attributes & BIT_4) {
int prot = 0; int prot = 0, guard;
base_vha->flags.difdix_supported = 1; base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1, ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n"); "Registering for DIF/DIX type 1 and 3 protection.\n");
...@@ -2587,7 +2589,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2587,7 +2589,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
| SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE1_PROTECTION
| SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION
| SHOST_DIX_TYPE3_PROTECTION); | SHOST_DIX_TYPE3_PROTECTION);
scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
guard = SHOST_DIX_GUARD_CRC;
if (IS_PI_IPGUARD_CAPABLE(ha) &&
(ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
guard |= SHOST_DIX_GUARD_IP;
scsi_host_set_guard(host, guard);
} else } else
base_vha->flags.difdix_supported = 0; base_vha->flags.difdix_supported = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment