Commit 84905dfe authored by Quinn Tran's avatar Quinn Tran Committed by Martin K. Petersen

scsi: qla2xxx: Fix TMF and Multi-Queue config

For target mode, task management command is queued to specific cpu base
on where the SCSI command is residing.  This prevent race condition of
task management command getting ahead of regular scsi command.
Signed-off-by: default avatarQuinn Tran <quinn.tran@cavium.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent fc31b7a8
...@@ -1924,13 +1924,84 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, ...@@ -1924,13 +1924,84 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&vha->cmd_list_lock, flags); spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
} }
static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
uint64_t unpacked_lun)
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_qpair_hint *h = NULL;
if (vha->flags.qpairs_available) {
h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
if (!h)
h = &tgt->qphints[0];
} else {
h = &tgt->qphints[0];
}
return h;
}
static void qlt_do_tmr_work(struct work_struct *work)
{
struct qla_tgt_mgmt_cmd *mcmd =
container_of(work, struct qla_tgt_mgmt_cmd, work);
struct qla_hw_data *ha = mcmd->vha->hw;
int rc = EIO;
uint32_t tag;
unsigned long flags;
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
break;
default:
tag = 0;
break;
}
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
mcmd->tmr_func, tag);
if (rc != 0) {
spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
qlt_24xx_send_abts_resp(mcmd->qpair,
&mcmd->orig_iocb.abts,
FCP_TMF_REJECTED, false);
break;
case QLA_TGT_LUN_RESET:
case QLA_TGT_CLEAR_TS:
case QLA_TGT_ABORT_TS:
case QLA_TGT_CLEAR_ACA:
case QLA_TGT_TARGET_RESET:
qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
qla_sam_status);
break;
case QLA_TGT_ABORT_ALL:
case QLA_TGT_NEXUS_LOSS_SESS:
case QLA_TGT_NEXUS_LOSS:
qlt_send_notify_ack(mcmd->qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
break;
}
spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
"qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
mcmd->vha->vp_idx, rc);
mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
}
}
/* ha->hardware_lock supposed to be held on entry */ /* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct fc_port *sess) struct abts_recv_from_24xx *abts, struct fc_port *sess)
{ {
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd; struct qla_tgt_mgmt_cmd *mcmd;
int rc; struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
/* send TASK_ABORT response immediately */ /* send TASK_ABORT response immediately */
...@@ -1955,22 +2026,28 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, ...@@ -1955,22 +2026,28 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->tmr_func = QLA_TGT_ABTS; mcmd->tmr_func = QLA_TGT_ABTS;
mcmd->qpair = ha->base_qpair; mcmd->qpair = h->qpair;
mcmd->vha = vha; mcmd->vha = vha;
/* /*
* LUN is looked up by target-core internally based on the passed * LUN is looked up by target-core internally based on the passed
* abts->exchange_addr_to_abort tag. * abts->exchange_addr_to_abort tag.
*/ */
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func, mcmd->se_cmd.cpuid = h->cpuid;
if (ha->tgt.tgt_ops->find_cmd_by_tag) {
struct qla_tgt_cmd *abort_cmd;
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
abts->exchange_addr_to_abort); abts->exchange_addr_to_abort);
if (rc != 0) { if (abort_cmd && abort_cmd->qpair) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, mcmd->qpair = abort_cmd->qpair;
"qla_target(%d): tgt_ops->handle_tmr()" mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
" failed: %d", vha->vp_idx, rc);
mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
return -EFAULT;
} }
}
INIT_WORK(&mcmd->work, qlt_do_tmr_work);
queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
return 0; return 0;
} }
...@@ -4320,7 +4397,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, ...@@ -4320,7 +4397,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd; struct qla_tgt_mgmt_cmd *mcmd;
struct atio_from_isp *a = (struct atio_from_isp *)iocb; struct atio_from_isp *a = (struct atio_from_isp *)iocb;
int res; struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
if (!mcmd) { if (!mcmd) {
...@@ -4340,24 +4417,36 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, ...@@ -4340,24 +4417,36 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
mcmd->tmr_func = fn; mcmd->tmr_func = fn;
mcmd->flags = flags; mcmd->flags = flags;
mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->qpair = ha->base_qpair; mcmd->qpair = h->qpair;
mcmd->vha = vha; mcmd->vha = vha;
mcmd->se_cmd.cpuid = h->cpuid;
mcmd->unpacked_lun = lun;
switch (fn) { switch (fn) {
case QLA_TGT_LUN_RESET: case QLA_TGT_LUN_RESET:
case QLA_TGT_CLEAR_TS:
case QLA_TGT_ABORT_TS:
abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
/* drop through */
case QLA_TGT_CLEAR_ACA:
h = qlt_find_qphint(vha, mcmd->unpacked_lun);
mcmd->qpair = h->qpair;
mcmd->se_cmd.cpuid = h->cpuid;
break; break;
}
res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0); case QLA_TGT_TARGET_RESET:
if (res != 0) { case QLA_TGT_NEXUS_LOSS_SESS:
ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, case QLA_TGT_NEXUS_LOSS:
"qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", case QLA_TGT_ABORT_ALL:
sess->vha->vp_idx, res); default:
mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); /* no-op */
return -EFAULT; break;
} }
INIT_WORK(&mcmd->work, qlt_do_tmr_work);
queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
&mcmd->work);
return 0; return 0;
} }
...@@ -5097,8 +5186,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, ...@@ -5097,8 +5186,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
"qla_target(%d): Immediate notify task %x\n", "qla_target(%d): Immediate notify task %x\n",
vha->vp_idx, iocb->u.isp2x.task_flags); vha->vp_idx, iocb->u.isp2x.task_flags);
if (qlt_handle_task_mgmt(vha, iocb) == 0)
send_notify_ack = 0;
break; break;
case IMM_NTFY_ELS: case IMM_NTFY_ELS:
......
...@@ -682,7 +682,7 @@ struct qla_tgt_cmd; ...@@ -682,7 +682,7 @@ struct qla_tgt_cmd;
* target module (tcm_qla2xxx). * target module (tcm_qla2xxx).
*/ */
struct qla_tgt_func_tmpl { struct qla_tgt_func_tmpl {
struct qla_tgt_cmd *(*find_cmd_by_tag)(struct fc_port *, uint64_t);
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int); unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *); void (*handle_data)(struct qla_tgt_cmd *);
...@@ -966,6 +966,8 @@ struct qla_tgt_mgmt_cmd { ...@@ -966,6 +966,8 @@ struct qla_tgt_mgmt_cmd {
unsigned int flags; unsigned int flags;
uint32_t reset_count; uint32_t reset_count;
#define QLA24XX_MGMT_SEND_NACK 1 #define QLA24XX_MGMT_SEND_NACK 1
struct work_struct work;
uint64_t unpacked_lun;
union { union {
struct atio_from_isp atio; struct atio_from_isp atio;
struct imm_ntfy_from_isp imm_ntfy; struct imm_ntfy_from_isp imm_ntfy;
......
...@@ -630,6 +630,32 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, ...@@ -630,6 +630,32 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
transl_tmr_func, GFP_ATOMIC, tag, flags); transl_tmr_func, GFP_ATOMIC, tag, flags);
} }
static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess,
uint64_t tag)
{
struct qla_tgt_cmd *cmd = NULL;
struct se_cmd *secmd;
unsigned long flags;
if (!sess->se_sess)
return NULL;
spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags);
list_for_each_entry(secmd, &sess->se_sess->sess_cmd_list, se_cmd_list) {
/* skip task management functions, including tmr->task_cmd */
if (secmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue;
if (secmd->tag == tag) {
cmd = container_of(secmd, struct qla_tgt_cmd, se_cmd);
break;
}
}
spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags);
return cmd;
}
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{ {
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd *cmd = container_of(se_cmd,
...@@ -1608,6 +1634,7 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, ...@@ -1608,6 +1634,7 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
* Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
*/ */
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag,
.handle_cmd = tcm_qla2xxx_handle_cmd, .handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data, .handle_data = tcm_qla2xxx_handle_data,
.handle_tmr = tcm_qla2xxx_handle_tmr, .handle_tmr = tcm_qla2xxx_handle_tmr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment