Commit 8f9b5654 authored by Nicholas Bellinger's avatar Nicholas Bellinger

target/qla2xxx: Honor max_data_sg_nents I/O transfer limit

This patch adds an optional fabric driver provided SGL limit
that target-core will honor as it's own internal I/O maximum
transfer length limit, as exposed by EVPD=0xb0 block limits
parameters.

This is required for handling cases when host I/O transfer
length exceeds the requested EVPD block limits maximum
transfer length. The initial user of this logic is qla2xxx,
so that we can avoid having to reject I/Os from some legacy
FC hosts where EVPD=0xb0 parameters are not honored.

When se_cmd payload length exceeds the provided limit in
target_check_max_data_sg_nents() code, se_cmd->data_length +
se_cmd->prot_length are reset with se_cmd->residual_count
plus underflow bit for outgoing TFO response callbacks.
It also checks for existing CDB level underflow + overflow
and recalculates final residual_count as necessary.

Note this patch currently assumes 1:1 mapping of PAGE_SIZE
per struct scatterlist entry.
Reported-by: default avatarCraig Watson <craig.watson@vanguard-rugged.com>
Cc: Craig Watson <craig.watson@vanguard-rugged.com>
Tested-by: default avatarHimanshu Madhani <himanshu.madhani@qlogic.com>
Cc: Roland Dreier <roland@purestorage.com>
Cc: Arun Easi <arun.easi@qlogic.com>
Cc: Giridhar Malavali <giridhar.malavali@qlogic.com>
Cc: Andrew Vasquez <andrew.vasquez@qlogic.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 13a3cf08
...@@ -1808,6 +1808,11 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { ...@@ -1808,6 +1808,11 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.module = THIS_MODULE, .module = THIS_MODULE,
.name = "qla2xxx", .name = "qla2xxx",
.node_acl_size = sizeof(struct tcm_qla2xxx_nacl), .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
/*
* XXX: Limit assumes single page per scatter-gather-list entry.
* Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
*/
.max_data_sg_nents = 1200,
.get_fabric_name = tcm_qla2xxx_get_fabric_name, .get_fabric_name = tcm_qla2xxx_get_fabric_name,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag, .tpg_get_tag = tcm_qla2xxx_get_tag,
......
...@@ -477,8 +477,8 @@ static sense_reason_t ...@@ -477,8 +477,8 @@ static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
int have_tp = 0; u32 mtl = 0;
int opt, min; int have_tp = 0, opt, min;
/* /*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when * Following spc3r22 section 6.5.3 Block Limits VPD page, when
...@@ -509,8 +509,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) ...@@ -509,8 +509,15 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/* /*
* Set MAXIMUM TRANSFER LENGTH * Set MAXIMUM TRANSFER LENGTH
*
* XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
* enforcing maximum HW scatter-gather-list entry limit
*/ */
put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); if (cmd->se_tfo->max_data_sg_nents) {
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
dev->dev_attrib.block_size;
}
put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]);
/* /*
* Set OPTIMAL TRANSFER LENGTH * Set OPTIMAL TRANSFER LENGTH
......
...@@ -1075,6 +1075,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) ...@@ -1075,6 +1075,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
} }
EXPORT_SYMBOL(transport_set_vpd_ident); EXPORT_SYMBOL(transport_set_vpd_ident);
static sense_reason_t
target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
unsigned int size)
{
u32 mtl;
if (!cmd->se_tfo->max_data_sg_nents)
return TCM_NO_SENSE;
/*
* Check if fabric enforced maximum SGL entries per I/O descriptor
* exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
* residual_count and reduce original cmd->data_length to maximum
* length based on single PAGE_SIZE entry scatter-lists.
*/
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
if (cmd->data_length > mtl) {
/*
* If an existing CDB overflow is present, calculate new residual
* based on CDB size minus fabric maximum transfer length.
*
* If an existing CDB underflow is present, calculate new residual
* based on original cmd->data_length minus fabric maximum transfer
* length.
*
* Otherwise, set the underflow residual based on cmd->data_length
* minus fabric maximum transfer length.
*/
if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
cmd->residual_count = (size - mtl);
} else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
u32 orig_dl = size + cmd->residual_count;
cmd->residual_count = (orig_dl - mtl);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - mtl);
}
cmd->data_length = mtl;
/*
* Reset sbc_check_prot() calculated protection payload
* length based upon the new smaller MTL.
*/
if (cmd->prot_length) {
u32 sectors = (mtl / dev->dev_attrib.block_size);
cmd->prot_length = dev->prot_length * sectors;
}
}
return TCM_NO_SENSE;
}
sense_reason_t sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size) target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{ {
...@@ -1120,7 +1169,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) ...@@ -1120,7 +1169,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
} }
} }
return 0; return target_check_max_data_sg_nents(cmd, dev, size);
} }
......
...@@ -5,6 +5,19 @@ struct target_core_fabric_ops { ...@@ -5,6 +5,19 @@ struct target_core_fabric_ops {
struct module *module; struct module *module;
const char *name; const char *name;
size_t node_acl_size; size_t node_acl_size;
/*
* Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
* Setting this value tells target-core to enforce this limit, and
* report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
*
* target-core will currently reset se_cmd->data_length to this
* maximum size, and set UNDERFLOW residual count if length exceeds
* this limit.
*
* XXX: Not all initiator hosts honor this block-limit EVPD
* XXX: Currently assumes single PAGE_SIZE per scatterlist entry
*/
u32 max_data_sg_nents;
char *(*get_fabric_name)(void); char *(*get_fabric_name)(void);
char *(*tpg_get_wwn)(struct se_portal_group *); char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *); u16 (*tpg_get_tag)(struct se_portal_group *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment