Commit 0e0a4da3 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "scsi: ufs: Remove overzealous memory barriers"

Andrew Halaney <ahalaney@redhat.com> says:

Please review with care as I'm not all that confident in this subject.
UFS has a lot of mb() variants used, most with comments saying "ensure
this takes effect before continuing". mb()'s aren't really the way to
guarantee that, a read back is the best method.

Some of these though I think could go a step further and remove the
mb() variant without a read back. As far as I can tell there's no real
reason to ensure it takes effect in most cases (there's no delay() or
anything afterwards, and eventually another readl()/writel() happens
which is by definition ordered). Some of the patches in this series do
that if I was confident it was safe (or a reviewer pointed out prior
that they thought it was safe to do so).

Thanks in advance for the help,
Andrew

Link: https://lore.kernel.org/r/20240329-ufs-reset-ensure-effect-before-delay-v5-0-181252004586@redhat.comSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 9282899e 356a8ce7
...@@ -4305,7 +4305,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) ...@@ -4305,7 +4305,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
* Make sure UIC command completion interrupt is disabled before * Make sure UIC command completion interrupt is disabled before
* issuing UIC command. * issuing UIC command.
*/ */
wmb(); ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
reenable_intr = true; reenable_intr = true;
} }
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
...@@ -4787,12 +4787,6 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba) ...@@ -4787,12 +4787,6 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba)
ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_H); REG_UTP_TASK_REQ_LIST_BASE_H);
/*
* Make sure base address and interrupt setup are updated before
* enabling the run/stop registers below.
*/
wmb();
/* /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
*/ */
...@@ -7108,10 +7102,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ...@@ -7108,10 +7102,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
/* send command to the controller */ /* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks); __set_bit(task_tag, &hba->outstanding_tasks);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
spin_unlock_irqrestore(host->host_lock, flags); spin_unlock_irqrestore(host->host_lock, flags);
...@@ -10363,7 +10354,7 @@ int ufshcd_system_restore(struct device *dev) ...@@ -10363,7 +10354,7 @@ int ufshcd_system_restore(struct device *dev)
* are updated with the latest queue addresses. Only after * are updated with the latest queue addresses. Only after
* updating these addresses, we can queue the new commands. * updating these addresses, we can queue the new commands.
*/ */
mb(); ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
/* Resuming from hibernate, assume that link was OFF */ /* Resuming from hibernate, assume that link was OFF */
ufshcd_set_link_off(hba); ufshcd_set_link_off(hba);
...@@ -10584,7 +10575,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -10584,7 +10575,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
* Make sure that UFS interrupts are disabled and any pending interrupt * Make sure that UFS interrupts are disabled and any pending interrupt
* status is cleared before registering UFS interrupt handler. * status is cleared before registering UFS interrupt handler.
*/ */
mb(); ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */ /* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
......
...@@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) ...@@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
* Make sure the register was updated, * Make sure the register was updated,
* UniPro layer will not work with an incorrect value. * UniPro layer will not work with an incorrect value.
*/ */
mb(); ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV);
return 0; return 0;
} }
......
...@@ -278,9 +278,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) ...@@ -278,9 +278,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
if (host->hw_ver.major >= 0x05) if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
/* make sure above configuration is applied before we return */
mb();
} }
/* /*
...@@ -409,7 +406,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) ...@@ -409,7 +406,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
REG_UFS_CFG2); REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */ /* Ensure that HW clock gating is enabled before next operations */
mb(); ufshcd_readl(hba, REG_UFS_CFG2);
} }
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
...@@ -501,7 +498,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, ...@@ -501,7 +498,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* make sure above write gets applied before we return from * make sure above write gets applied before we return from
* this function. * this function.
*/ */
mb(); ufshcd_readl(hba, REG_UFS_SYS1CLK_1US);
} }
return 0; return 0;
...@@ -1445,11 +1442,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) ...@@ -1445,11 +1442,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
(u32)host->testbus.select_minor << offset, (u32)host->testbus.select_minor << offset,
reg); reg);
ufs_qcom_enable_test_bus(host); ufs_qcom_enable_test_bus(host);
/*
* Make sure the test bus configuration is
* committed before returning.
*/
mb();
return 0; return 0;
} }
......
...@@ -151,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) ...@@ -151,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1); ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
/* /*
* Make sure assertion of ufs phy reset is written to * Dummy read to ensure the write takes effect before doing any sort
* register before returning * of delay
*/ */
mb(); ufshcd_readl(hba, REG_UFS_CFG1);
} }
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
...@@ -162,10 +162,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) ...@@ -162,10 +162,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1); ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1);
/* /*
* Make sure de-assertion of ufs phy reset is written to * Dummy read to ensure the write takes effect before doing any sort
* register before returning * of delay
*/ */
mb(); ufshcd_readl(hba, REG_UFS_CFG1);
} }
/* Host controller hardware version: major.minor.step */ /* Host controller hardware version: major.minor.step */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment