Commit 51dd905b authored by Stanley Chu's avatar Stanley Chu Committed by Martin K. Petersen

scsi: ufs: Fix WriteBooster flush during runtime suspend

Currently UFS host driver promises VCC supply if UFS device needs to do
WriteBooster flush during runtime suspend.

However the UFS specification mentions:

"While the flushing operation is in progress, the device is in Active power
mode."

Therefore UFS host driver needs to promise more: Keep UFS device as "Active
power mode", otherwise UFS device shall not do any flush if device enters
Sleep or PowerDown power mode.  Similarly, the same promises shall be
applied if device needs urgent BKOP during runtime suspend.

Fix this by not changing device power mode if WriteBooster flush or urgent
BKOP is required in ufshcd_suspend().

Now, if device finishes its job but is not resumed for a very long time,
system will have unnecessary power drain because VCC is still supplied. A
method to re-check the threshold of keeping VCC supply is required to fix
the power drain. However, the threshold re-check needs to re-activate the
link first because the decision depends on the latest device status.

Also introduce a delayed work to force runtime resume after a certain delay
during runtime suspend. This makes threshold re-check happen natually in
the entry of the next runtime-suspend. The device can continue its
WriteBooster flush or urgent BKOP jobs soon after resumed if device has no
upcoming requests and link enters hibern8 state either by Auto-Hibern8 or
hibern8 during clk-gating scheme. This solution not only prevents power
drain but also makes as much use of time as possible for device's
background jobs.

Link: https://lore.kernel.org/r/20200522083212.4008-5-stanley.chu@mediatek.comReviewed-by: default avatarAsutosh Das <asutoshd@codeaurora.org>
Signed-off-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent e31011ab
...@@ -574,7 +574,7 @@ struct ufs_dev_info { ...@@ -574,7 +574,7 @@ struct ufs_dev_info {
u32 d_ext_ufs_feature_sup; u32 d_ext_ufs_feature_sup;
u8 b_wb_buffer_type; u8 b_wb_buffer_type;
u32 d_wb_alloc_units; u32 d_wb_alloc_units;
bool keep_vcc_on; bool b_rpm_dev_flush_capable;
u8 b_presrv_uspc_en; u8 b_presrv_uspc_en;
}; };
......
...@@ -94,6 +94,9 @@ ...@@ -94,6 +94,9 @@
/* default delay of autosuspend: 2000 ms */ /* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000 #define RPM_AUTOSUSPEND_DELAY_MS 2000
/* Default delay of RPM device flush delayed work */
#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
/* Default value of wait time before gating device ref clock */ /* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */ #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
...@@ -5314,7 +5317,7 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, ...@@ -5314,7 +5317,7 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
return false; return false;
} }
static bool ufshcd_wb_keep_vcc_on(struct ufs_hba *hba) static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
{ {
int ret; int ret;
u32 avail_buf; u32 avail_buf;
...@@ -5352,6 +5355,21 @@ static bool ufshcd_wb_keep_vcc_on(struct ufs_hba *hba) ...@@ -5352,6 +5355,21 @@ static bool ufshcd_wb_keep_vcc_on(struct ufs_hba *hba)
return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
} }
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(to_delayed_work(work),
struct ufs_hba,
rpm_dev_flush_recheck_work);
/*
* To prevent unnecessary VCC power drain after device finishes
* WriteBooster buffer flush or Auto BKOPs, force runtime resume
* after a certain delay to recheck the threshold by next runtime
* suspend.
*/
pm_runtime_get_sync(hba->dev);
pm_runtime_put_sync(hba->dev);
}
/** /**
* ufshcd_exception_event_handler - handle exceptions raised by device * ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data * @work: pointer to work data
...@@ -8099,7 +8117,6 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) ...@@ -8099,7 +8117,6 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) { !hba->dev_info.is_lu_power_on_wp) {
ufshcd_setup_vreg(hba, false); ufshcd_setup_vreg(hba, false);
} else if (!ufshcd_is_ufs_dev_active(hba)) { } else if (!ufshcd_is_ufs_dev_active(hba)) {
if (!hba->dev_info.keep_vcc_on)
ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
if (!ufshcd_is_link_active(hba)) { if (!ufshcd_is_link_active(hba)) {
ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
...@@ -8225,28 +8242,31 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -8225,28 +8242,31 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_disable_auto_bkops(hba); ufshcd_disable_auto_bkops(hba);
} }
/* /*
* With wb enabled, if the bkops is enabled or if the * If device needs to do BKOP or WB buffer flush during
* configured WB type is 70% full, keep vcc ON * Hibern8, keep device power mode as "active power mode"
* for the device to flush the wb buffer * and VCC supply.
*/ */
if ((hba->auto_bkops_enabled && ufshcd_is_wb_allowed(hba)) || hba->dev_info.b_rpm_dev_flush_capable =
ufshcd_wb_keep_vcc_on(hba)) hba->auto_bkops_enabled ||
hba->dev_info.keep_vcc_on = true; (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
else ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
hba->dev_info.keep_vcc_on = false; ufshcd_is_auto_hibern8_enabled(hba))) &&
} else { ufshcd_wb_need_flush(hba));
hba->dev_info.keep_vcc_on = false;
} }
if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
!ufshcd_is_runtime_pm(pm_op))) { !ufshcd_is_runtime_pm(pm_op)) {
/* ensure that bkops is disabled */ /* ensure that bkops is disabled */
ufshcd_disable_auto_bkops(hba); ufshcd_disable_auto_bkops(hba);
}
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
if (ret) if (ret)
goto enable_gating; goto enable_gating;
} }
}
flush_work(&hba->eeh_work); flush_work(&hba->eeh_work);
ret = ufshcd_link_state_transition(hba, req_link_state, 1); ret = ufshcd_link_state_transition(hba, req_link_state, 1);
...@@ -8298,9 +8318,16 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -8298,9 +8318,16 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed) if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba); ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba); ufshcd_release(hba);
out: out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
}
hba->pm_op_in_progress = 0; hba->pm_op_in_progress = 0;
if (ret) if (ret)
ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret); ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
return ret; return ret;
...@@ -8389,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -8389,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */ /* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba); ufshcd_auto_hibern8_enable(hba);
if (hba->dev_info.b_rpm_dev_flush_capable) {
hba->dev_info.b_rpm_dev_flush_capable = false;
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
/* Schedule clock gating in case of no access to UFS device yet */ /* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba); ufshcd_release(hba);
...@@ -8862,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8862,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE, UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE); UIC_LINK_HIBERN8_STATE);
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
ufshcd_rpm_dev_flush_recheck_work);
/* Set the default auto-hiberate idle timer value to 150 ms */ /* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
......
...@@ -745,6 +745,7 @@ struct ufs_hba { ...@@ -745,6 +745,7 @@ struct ufs_hba {
struct request_queue *bsg_queue; struct request_queue *bsg_queue;
bool wb_buf_flush_enabled; bool wb_buf_flush_enabled;
bool wb_enabled; bool wb_enabled;
struct delayed_work rpm_dev_flush_recheck_work;
}; };
/* Returns true if clocks can be gated. Otherwise false */ /* Returns true if clocks can be gated. Otherwise false */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment