Commit 2ef23e4b authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "ufs: Do not requeue while ungating the clock"

Bart Van Assche <bvanassche@acm.org> says:

In the traces we recorded while testing zoned storage we noticed that UFS
commands are requeued while the clock is being ungated. Command requeueing
makes it harder than necessary to preserve the command order. Hence this
patch series that modifies the SCSI core and also the UFS driver such that
clock ungating does not trigger command requeueing.

Link: https://lore.kernel.org/r/20230529202640.11883-1-bvanassche@acm.orgSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 0e5e41ee 078f4f4b
...@@ -441,6 +441,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv ...@@ -441,6 +441,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
shost->cmd_per_lun = sht->cmd_per_lun; shost->cmd_per_lun = sht->cmd_per_lun;
shost->no_write_same = sht->no_write_same; shost->no_write_same = sht->no_write_same;
shost->host_tagset = sht->host_tagset; shost->host_tagset = sht->host_tagset;
shost->queuecommand_may_block = sht->queuecommand_may_block;
if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
shost->eh_deadline = -1; shost->eh_deadline = -1;
......
...@@ -1984,6 +1984,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) ...@@ -1984,6 +1984,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->flags = BLK_MQ_F_SHOULD_MERGE; tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
tag_set->flags |= tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
if (shost->queuecommand_may_block)
tag_set->flags |= BLK_MQ_F_BLOCKING;
tag_set->driver_data = shost; tag_set->driver_data = shost;
if (shost->host_tagset) if (shost->host_tagset)
tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
...@@ -2943,11 +2945,20 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) ...@@ -2943,11 +2945,20 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
} }
EXPORT_SYMBOL_GPL(scsi_target_unblock); EXPORT_SYMBOL_GPL(scsi_target_unblock);
/**
* scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state
* @shost: device to block
*
* Pause SCSI command processing for all logical units associated with the SCSI
* host and wait until pending scsi_queue_rq() calls have finished.
*
* Returns zero if successful or a negative error code upon failure.
*/
int int
scsi_host_block(struct Scsi_Host *shost) scsi_host_block(struct Scsi_Host *shost)
{ {
struct scsi_device *sdev; struct scsi_device *sdev;
int ret = 0; int ret;
/* /*
* Call scsi_internal_device_block_nowait so we can avoid * Call scsi_internal_device_block_nowait so we can avoid
...@@ -2959,20 +2970,14 @@ scsi_host_block(struct Scsi_Host *shost) ...@@ -2959,20 +2970,14 @@ scsi_host_block(struct Scsi_Host *shost)
mutex_unlock(&sdev->state_mutex); mutex_unlock(&sdev->state_mutex);
if (ret) { if (ret) {
scsi_device_put(sdev); scsi_device_put(sdev);
break; return ret;
} }
} }
/* /* Wait for ongoing scsi_queue_rq() calls to finish. */
* SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so blk_mq_wait_quiesce_done(&shost->tag_set);
* calling synchronize_rcu() once is enough.
*/
WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
if (!ret) return 0;
synchronize_rcu();
return ret;
} }
EXPORT_SYMBOL_GPL(scsi_host_block); EXPORT_SYMBOL_GPL(scsi_host_block);
......
...@@ -168,7 +168,7 @@ static ssize_t auto_hibern8_show(struct device *dev, ...@@ -168,7 +168,7 @@ static ssize_t auto_hibern8_show(struct device *dev,
} }
pm_runtime_get_sync(hba->dev); pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false); ufshcd_hold(hba);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba); ufshcd_release(hba);
pm_runtime_put_sync(hba->dev); pm_runtime_put_sync(hba->dev);
......
...@@ -24,7 +24,7 @@ static int ufshcd_program_key(struct ufs_hba *hba, ...@@ -24,7 +24,7 @@ static int ufshcd_program_key(struct ufs_hba *hba,
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
int err = 0; int err = 0;
ufshcd_hold(hba, false); ufshcd_hold(hba);
if (hba->vops && hba->vops->program_key) { if (hba->vops && hba->vops->program_key) {
err = hba->vops->program_key(hba, cfg, slot); err = hba->vops->program_key(hba, cfg, slot);
......
...@@ -84,9 +84,6 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, ...@@ -84,9 +84,6 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii); u8 **buf, bool ascii);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
......
...@@ -1189,7 +1189,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ...@@ -1189,7 +1189,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
bool timeout = false, do_last_check = false; bool timeout = false, do_last_check = false;
ktime_t start; ktime_t start;
ufshcd_hold(hba, false); ufshcd_hold(hba);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
/* /*
* Wait for all the outstanding tasks/transfer requests. * Wait for all the outstanding tasks/transfer requests.
...@@ -1310,7 +1310,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) ...@@ -1310,7 +1310,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
} }
/* let's not get into low power until clock scaling is completed */ /* let's not get into low power until clock scaling is completed */
ufshcd_hold(hba, false); ufshcd_hold(hba);
out: out:
return ret; return ret;
...@@ -1640,7 +1640,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, ...@@ -1640,7 +1640,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
goto out; goto out;
ufshcd_rpm_get_sync(hba); ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false); ufshcd_hold(hba);
hba->clk_scaling.is_enabled = value; hba->clk_scaling.is_enabled = value;
...@@ -1723,7 +1723,7 @@ static void ufshcd_ungate_work(struct work_struct *work) ...@@ -1723,7 +1723,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) { if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
goto unblock_reqs; return;
} }
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
...@@ -1746,25 +1746,21 @@ static void ufshcd_ungate_work(struct work_struct *work) ...@@ -1746,25 +1746,21 @@ static void ufshcd_ungate_work(struct work_struct *work)
} }
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
} }
unblock_reqs:
ufshcd_scsi_unblock_requests(hba);
} }
/** /**
* ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
* Also, exit from hibern8 mode and set the link as active. * Also, exit from hibern8 mode and set the link as active.
* @hba: per adapter instance * @hba: per adapter instance
* @async: This indicates whether caller should ungate clocks asynchronously.
*/ */
int ufshcd_hold(struct ufs_hba *hba, bool async) void ufshcd_hold(struct ufs_hba *hba)
{ {
int rc = 0;
bool flush_result; bool flush_result;
unsigned long flags; unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba) || if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized) !hba->clk_gating.is_initialized)
goto out; return;
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++; hba->clk_gating.active_reqs++;
...@@ -1781,15 +1777,10 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) ...@@ -1781,15 +1777,10 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
*/ */
if (ufshcd_can_hibern8_during_gating(hba) && if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) { ufshcd_is_link_hibern8(hba)) {
if (async) {
rc = -EAGAIN;
hba->clk_gating.active_reqs--;
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work); flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result) if (hba->clk_gating.is_suspended && !flush_result)
goto out; return;
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
goto start; goto start;
} }
...@@ -1811,21 +1802,14 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) ...@@ -1811,21 +1802,14 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
hba->clk_gating.state = REQ_CLKS_ON; hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev), trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state); hba->clk_gating.state);
if (queue_work(hba->clk_gating.clk_gating_workq, queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work)) &hba->clk_gating.ungate_work);
ufshcd_scsi_block_requests(hba);
/* /*
* fall through to check if we should wait for this * fall through to check if we should wait for this
* work to be done or not. * work to be done or not.
*/ */
fallthrough; fallthrough;
case REQ_CLKS_ON: case REQ_CLKS_ON:
if (async) {
rc = -EAGAIN;
hba->clk_gating.active_reqs--;
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work); flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */ /* Make sure state is CLKS_ON before returning */
...@@ -1837,8 +1821,6 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) ...@@ -1837,8 +1821,6 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
break; break;
} }
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return rc;
} }
EXPORT_SYMBOL_GPL(ufshcd_hold); EXPORT_SYMBOL_GPL(ufshcd_hold);
...@@ -2070,7 +2052,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) ...@@ -2070,7 +2052,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
ufshcd_remove_clk_gating_sysfs(hba); ufshcd_remove_clk_gating_sysfs(hba);
/* Ungate the clock if necessary. */ /* Ungate the clock if necessary. */
ufshcd_hold(hba, false); ufshcd_hold(hba);
hba->clk_gating.is_initialized = false; hba->clk_gating.is_initialized = false;
ufshcd_release(hba); ufshcd_release(hba);
...@@ -2468,7 +2450,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ...@@ -2468,7 +2450,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0; return 0;
ufshcd_hold(hba, false); ufshcd_hold(hba);
mutex_lock(&hba->uic_cmd_mutex); mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba); ufshcd_add_delay_before_dme_cmd(hba);
...@@ -2871,12 +2853,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ...@@ -2871,12 +2853,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
/*
* Allows the UFS error handler to wait for prior ufshcd_queuecommand()
* calls.
*/
rcu_read_lock();
switch (hba->ufshcd_state) { switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL: case UFSHCD_STATE_OPERATIONAL:
break; break;
...@@ -2922,13 +2898,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ...@@ -2922,13 +2898,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0; hba->req_abort_count = 0;
err = ufshcd_hold(hba, true); ufshcd_hold(hba);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
lrbp = &hba->lrb[tag]; lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd); WARN_ON(lrbp->cmd);
...@@ -2958,8 +2928,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ...@@ -2958,8 +2928,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag, hwq); ufshcd_send_command(hba, tag, hwq);
out: out:
rcu_read_unlock();
if (ufs_trigger_eh()) { if (ufs_trigger_eh()) {
unsigned long flags; unsigned long flags;
...@@ -3253,7 +3221,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, ...@@ -3253,7 +3221,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba); BUG_ON(!hba);
ufshcd_hold(hba, false); ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index, ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector); selector);
...@@ -3327,7 +3295,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, ...@@ -3327,7 +3295,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL; return -EINVAL;
} }
ufshcd_hold(hba, false); ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index, ufshcd_init_query(hba, &request, &response, opcode, idn, index,
...@@ -3423,7 +3391,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, ...@@ -3423,7 +3391,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL; return -EINVAL;
} }
ufshcd_hold(hba, false); ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index, ufshcd_init_query(hba, &request, &response, opcode, idn, index,
...@@ -4241,7 +4209,7 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) ...@@ -4241,7 +4209,7 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET; uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode; uic_cmd.argument3 = mode;
ufshcd_hold(hba, false); ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba); ufshcd_release(hba);
...@@ -4348,7 +4316,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) ...@@ -4348,7 +4316,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
if (update && if (update &&
!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba); ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false); ufshcd_hold(hba);
ufshcd_auto_hibern8_enable(hba); ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba); ufshcd_release(hba);
ufshcd_rpm_put_sync(hba); ufshcd_rpm_put_sync(hba);
...@@ -4941,7 +4909,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) ...@@ -4941,7 +4909,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0; int err = 0;
int retries; int retries;
ufshcd_hold(hba, false); ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
...@@ -6227,22 +6195,22 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ...@@ -6227,22 +6195,22 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_setup_vreg(hba, true); ufshcd_setup_vreg(hba, true);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
ufshcd_hold(hba, false); ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba)) if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true); ufshcd_setup_clocks(hba, true);
ufshcd_release(hba); ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op); ufshcd_vops_resume(hba, pm_op);
} else { } else {
ufshcd_hold(hba, false); ufshcd_hold(hba);
if (ufshcd_is_clkscaling_supported(hba) && if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled) hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba); ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false); ufshcd_clk_scaling_allow(hba, false);
} }
ufshcd_scsi_block_requests(hba); ufshcd_scsi_block_requests(hba);
/* Drain ufshcd_queuecommand() */ /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
synchronize_rcu(); blk_mq_wait_quiesce_done(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work); cancel_work_sync(&hba->eeh_work);
} }
...@@ -6887,7 +6855,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ...@@ -6887,7 +6855,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
return PTR_ERR(req); return PTR_ERR(req);
req->end_io_data = &wait; req->end_io_data = &wait;
ufshcd_hold(hba, false); ufshcd_hold(hba);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
...@@ -7124,7 +7092,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, ...@@ -7124,7 +7092,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP; cmd_type = DEV_CMD_TYPE_NOP;
fallthrough; fallthrough;
case UPIU_TRANSACTION_QUERY_REQ: case UPIU_TRANSACTION_QUERY_REQ:
ufshcd_hold(hba, false); ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len, desc_buff, buff_len,
...@@ -7190,7 +7158,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r ...@@ -7190,7 +7158,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len; u16 ehs_len;
/* Protects use of hba->reserved_slot. */ /* Protects use of hba->reserved_slot. */
ufshcd_hold(hba, false); ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock); mutex_lock(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock); down_read(&hba->clk_scaling_lock);
...@@ -7425,7 +7393,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ...@@ -7425,7 +7393,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
ufshcd_hold(hba, false); ufshcd_hold(hba);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return FAILED. */ /* If command is already aborted/completed, return FAILED. */
if (!(test_bit(tag, &hba->outstanding_reqs))) { if (!(test_bit(tag, &hba->outstanding_reqs))) {
...@@ -9416,7 +9384,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -9416,7 +9384,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If we can't transition into any of the low power modes * If we can't transition into any of the low power modes
* just gate the clocks. * just gate the clocks.
*/ */
ufshcd_hold(hba, false); ufshcd_hold(hba);
hba->clk_gating.is_suspended = true; hba->clk_gating.is_suspended = true;
if (ufshcd_is_clkscaling_supported(hba)) if (ufshcd_is_clkscaling_supported(hba))
...@@ -10204,6 +10172,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -10204,6 +10172,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_channel = UFSHCD_MAX_CHANNEL; host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no; host->unique_id = host->host_no;
host->max_cmd_len = UFS_CDB_SIZE; host->max_cmd_len = UFS_CDB_SIZE;
host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
hba->max_pwr_info.is_valid = false; hba->max_pwr_info.is_valid = false;
......
...@@ -458,6 +458,9 @@ struct scsi_host_template { ...@@ -458,6 +458,9 @@ struct scsi_host_template {
/* True if the host uses host-wide tagspace */ /* True if the host uses host-wide tagspace */
unsigned host_tagset:1; unsigned host_tagset:1;
/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
unsigned queuecommand_may_block:1;
/* /*
* Countdown for host blocking with no commands outstanding. * Countdown for host blocking with no commands outstanding.
*/ */
...@@ -653,6 +656,9 @@ struct Scsi_Host { ...@@ -653,6 +656,9 @@ struct Scsi_Host {
/* True if the host uses host-wide tagspace */ /* True if the host uses host-wide tagspace */
unsigned host_tagset:1; unsigned host_tagset:1;
/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
unsigned queuecommand_may_block:1;
/* Host responded with short (<36 bytes) INQUIRY result */ /* Host responded with short (<36 bytes) INQUIRY result */
unsigned short_inquiry:1; unsigned short_inquiry:1;
......
...@@ -1358,7 +1358,7 @@ void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, ...@@ -1358,7 +1358,7 @@ void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii); u8 **buf, bool ascii);
int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_hold(struct ufs_hba *hba);
void ufshcd_release(struct ufs_hba *hba); void ufshcd_release(struct ufs_hba *hba);
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value); void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment