Commit 8abfa9e2 authored by Quinn Tran's avatar Quinn Tran Committed by Martin K. Petersen

scsi: qla2xxx: Add function call to qpair for door bell

Add call back to door bell for qpair. This help reduce access to
qla_hw_data structure, in order to reduce cach thrash.
Signed-off-by: default avatarQuinn Tran <quinn.tran@cavium.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent af7bb382
......@@ -3272,6 +3272,9 @@ struct qla_qpair {
uint16_t vp_idx; /* vport ID */
mempool_t *srb_mempool;
struct pci_dev *pdev;
void (*reqq_start_iocbs)(struct qla_qpair *);
/* to do: New driver: move queues to here instead of pointers */
struct req_que *req;
struct rsp_que *rsp;
......
......@@ -7653,6 +7653,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->msix->in_use = 1;
list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
qpair->pdev = ha->pdev;
if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
mutex_unlock(&ha->mq_lock);
......
......@@ -352,3 +352,18 @@ qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
return NULL;
}
static inline void
qla_83xx_start_iocbs(struct qla_qpair *qpair)
{
struct req_que *req = qpair->req;
req->ring_index++;
if (req->ring_index == req->length) {
req->ring_index = 0;
req->ring_ptr = req->ring;
} else
req->ring_ptr++;
WRT_REG_DWORD(req->req_q_in, req->ring_index);
}
......@@ -351,6 +351,28 @@ int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
struct rsp_que *rsp)
{
struct qla_hw_data *ha = vha->hw;
rsp->qpair = ha->base_qpair;
rsp->req = req;
ha->base_qpair->req = req;
ha->base_qpair->rsp = rsp;
ha->base_qpair->vha = vha;
ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
qla_cpu_update(rsp->qpair, smp_processor_id());
ha->base_qpair->pdev = ha->pdev;
if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
}
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
struct rsp_que *rsp)
{
......@@ -378,18 +400,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
goto fail_base_qpair;
}
rsp->qpair = ha->base_qpair;
rsp->req = req;
ha->base_qpair->req = req;
ha->base_qpair->rsp = rsp;
ha->base_qpair->vha = vha;
ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
/* init qpair to this cpu. Will adjust at run time. */
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
qla_cpu_update(rsp->qpair, smp_processor_id());
qla_init_base_qpair(vha, req, rsp);
if (ql2xmqsupport && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
......
......@@ -1728,7 +1728,10 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
/* Memory Barrier */
wmb();
qla2x00_start_iocbs(vha, qpair->req);
if (qpair->reqq_start_iocbs)
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
}
/*
......@@ -2058,7 +2061,10 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
/* Memory Barrier */
wmb();
qla2x00_start_iocbs(ha, qpair->req);
if (qpair->reqq_start_iocbs)
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(ha, qpair->req);
}
void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
......@@ -2071,12 +2077,13 @@ EXPORT_SYMBOL(qlt_free_mcmd);
* ha->hardware_lock supposed to be held on entry. Might drop it, then
* reacquire
*/
void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
{
struct atio_from_isp *atio = &cmd->atio;
struct ctio7_to_24xx *ctio;
uint16_t temp;
struct scsi_qla_host *vha = cmd->vha;
ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
"Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
......@@ -2127,7 +2134,11 @@ void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
/* Memory Barrier */
wmb();
qla2x00_start_iocbs(vha, vha->req);
if (qpair->reqq_start_iocbs)
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
out:
return;
}
......@@ -2205,7 +2216,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
BUG_ON(cmd->sg_cnt == 0);
prm->sg = (struct scatterlist *)cmd->sg;
prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
cmd->sg_cnt, cmd->dma_data_direction);
if (unlikely(prm->seg_cnt == 0))
goto out_err;
......@@ -2232,7 +2243,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
if (cmd->prot_sg_cnt) {
prm->prot_sg = cmd->prot_sg;
prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (unlikely(prm->prot_seg_cnt == 0))
......@@ -2260,21 +2271,24 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
struct qla_hw_data *ha = vha->hw;
struct qla_hw_data *ha;
struct qla_qpair *qpair;
if (!cmd->sg_mapped)
return;
pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
qpair = cmd->qpair;
pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
cmd->dma_data_direction);
cmd->sg_mapped = 0;
if (cmd->prot_sg_cnt)
pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (!cmd->ctx)
return;
ha = vha->hw;
if (cmd->ctx_dsd_alloced)
qla2x00_clean_dsd_pool(ha, cmd->ctx);
......@@ -2324,7 +2338,6 @@ static inline void *qlt_get_req_pkt(struct req_que *req)
/* ha->hardware_lock supposed to be held on entry */
static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
{
struct scsi_qla_host *vha = qpair->vha;
uint32_t h;
int index;
uint8_t found = 0;
......@@ -2349,9 +2362,9 @@ static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
if (found) {
req->current_outstanding_cmd = h;
} else {
ql_dbg(ql_dbg_io, vha, 0x305b,
"qla_target(%d): Ran out of empty cmd slots\n",
vha->vp_idx);
ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
"qla_target(%d): Ran out of empty cmd slots\n",
qpair->vha->vp_idx);
h = QLA_TGT_NULL_HANDLE;
}
......@@ -3189,7 +3202,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
/* Memory Barrier */
wmb();
qla2x00_start_iocbs(vha, qpair->req);
if (qpair->reqq_start_iocbs)
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
......@@ -3264,7 +3280,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
/* Memory Barrier */
wmb();
qla2x00_start_iocbs(vha, qpair->req);
if (qpair->reqq_start_iocbs)
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return res;
......@@ -3282,7 +3301,7 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
* it is assumed either hardware_lock or qpair lock is held.
*/
static void
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
struct ctio_crc_from_fw *sts)
{
uint8_t *ap = &sts->actual_dif[0];
......@@ -3290,6 +3309,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
uint64_t lba = cmd->se_cmd.t_task_lba;
uint8_t scsi_status, sense_key, asc, ascq;
unsigned long flags;
struct scsi_qla_host *vha = cmd->vha;
cmd->trc_flags |= TRC_DIF_ERR;
......@@ -3370,7 +3390,8 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
ascq);
/* assume scsi status gets out on the wire.
* Will not wait for completion.
*/
......@@ -3525,7 +3546,10 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
/* Memory Barrier */
wmb();
qla2x00_start_iocbs(vha, qpair->req);
if (qpair->reqq_start_iocbs)
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
return ret;
}
......@@ -3883,7 +3907,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
qlt_handle_dif_error(vha, cmd, ctio);
qlt_handle_dif_error(qpair, cmd, ctio);
return;
}
default:
......@@ -5121,7 +5145,10 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
ctio24->u.status1.scsi_status = cpu_to_le16(status);
/* Memory Barrier */
wmb();
qla2x00_start_iocbs(vha, qpair->req);
if (qpair->reqq_start_iocbs)
qpair->reqq_start_iocbs(qpair);
else
qla2x00_start_iocbs(vha, qpair->req);
return 0;
}
......
......@@ -1097,7 +1097,7 @@ extern int qlt_free_qfull_cmds(struct qla_qpair *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
uint8_t, uint8_t, uint8_t);
extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
struct qla_tgt_cmd *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment