Commit fb3269ba authored by Quinn Tran's avatar Quinn Tran Committed by Nicholas Bellinger

qla2xxx: Add selective command queuing

queue work element to specific process lessen cache miss
Signed-off-by: default avatarQuinn Tran <quinn.tran@qlogic.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@qlogic.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent cdb898c5
...@@ -3302,7 +3302,7 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, ...@@ -3302,7 +3302,7 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
} }
} }
void qla_irq_affinity_release(struct kref *ref) static void qla_irq_affinity_release(struct kref *ref)
{ {
struct irq_affinity_notify *notify = struct irq_affinity_notify *notify =
container_of(ref, struct irq_affinity_notify, kref); container_of(ref, struct irq_affinity_notify, kref);
......
...@@ -3982,13 +3982,24 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, ...@@ -3982,13 +3982,24 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
cmd->cmd_in_wq = 1; cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0; cmd->cmd_flags |= BIT_0;
cmd->se_cmd.cpuid = -1;
spin_lock(&vha->cmd_list_lock); spin_lock(&vha->cmd_list_lock);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
spin_unlock(&vha->cmd_list_lock); spin_unlock(&vha->cmd_list_lock);
INIT_WORK(&cmd->work, qlt_do_work); INIT_WORK(&cmd->work, qlt_do_work);
queue_work(qla_tgt_wq, &cmd->work); if (ha->msix_count) {
cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
queue_work_on(smp_processor_id(), qla_tgt_wq,
&cmd->work);
else
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
&cmd->work);
} else {
queue_work(qla_tgt_wq, &cmd->work);
}
return 0; return 0;
} }
......
...@@ -299,7 +299,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) ...@@ -299,7 +299,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
cmd->vha->tgt_counters.core_qla_free_cmd++; cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1; cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work(tcm_qla2xxx_free_wq, &cmd->work); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
} }
/* /*
...@@ -504,7 +504,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) ...@@ -504,7 +504,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
cmd->cmd_flags |= BIT_10; cmd->cmd_flags |= BIT_10;
cmd->cmd_in_wq = 1; cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work(tcm_qla2xxx_free_wq, &cmd->work); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
} }
static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
......
...@@ -715,7 +715,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) ...@@ -715,7 +715,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
queue_work(target_completion_wq, &cmd->work); if (cmd->cpuid == -1)
queue_work(target_completion_wq, &cmd->work);
else
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
} }
EXPORT_SYMBOL(target_complete_cmd); EXPORT_SYMBOL(target_complete_cmd);
......
...@@ -528,6 +528,7 @@ struct se_cmd { ...@@ -528,6 +528,7 @@ struct se_cmd {
unsigned int t_prot_nents; unsigned int t_prot_nents;
sense_reason_t pi_err; sense_reason_t pi_err;
sector_t bad_sector; sector_t bad_sector;
int cpuid;
}; };
struct se_ua { struct se_ua {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment