Commit 104cd58d authored by Manivannan Sadhasivam's avatar Manivannan Sadhasivam Committed by Martin K. Petersen

scsi: ufs: qcom: Remove support for host controllers older than v2.0

The legacy platforms making use of host controllers older than version 2.0
are not supported in upstream. So there is no need to carry code to support
them.
Signed-off-by: default avatarManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Link: https://lore.kernel.org/r/20231208065902.11006-16-manivannan.sadhasivam@linaro.org
Tested-by: Andrew Halaney <ahalaney@redhat.com> # sa8775p-ride
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 6b481af2
......@@ -274,9 +274,7 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
{
ufshcd_rmwl(host->hba, QUNIPRO_SEL,
ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
REG_UFS_CFG1);
ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1);
if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
......@@ -333,18 +331,8 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x1) {
/*
* HS-G3 operations may not reliably work on legacy QCOM
* UFS host controller hardware even though capability
* exchange during link startup phase may end up
* negotiating maximum supported gear as G3.
* Hence downgrade the maximum supported gear to HS-G2.
*/
return UFS_HS_G2;
} else if (host->hw_ver.major >= 0x4) {
if (host->hw_ver.major >= 0x4)
return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
}
/* Default is HS-G3 */
return UFS_HS_G3;
......@@ -457,41 +445,16 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
u32 tx_clk_cycles_per_us = 0;
unsigned long core_clk_rate = 0;
u32 core_clk_cycles_per_us;
static u32 pwm_fr_table[][2] = {
{UFS_PWM_G1, 0x1},
{UFS_PWM_G2, 0x1},
{UFS_PWM_G3, 0x1},
{UFS_PWM_G4, 0x1},
};
static u32 hs_fr_table_rA[][2] = {
{UFS_HS_G1, 0x1F},
{UFS_HS_G2, 0x3e},
{UFS_HS_G3, 0x7D},
};
static u32 hs_fr_table_rB[][2] = {
{UFS_HS_G1, 0x24},
{UFS_HS_G2, 0x49},
{UFS_HS_G3, 0x92},
};
/*
* The Qunipro controller does not use following registers:
* SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
* UFS_REG_PA_LINK_STARTUP_TIMER.
* However UTP controller uses SYS1CLK_1US_REG register for Interrupt
* UTP controller uses SYS1CLK_1US_REG register for Interrupt
* Aggregation logic.
* It is mandatory to write SYS1CLK_1US_REG register on UFS host
* controller V4.0.0 onwards.
*/
if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
!ufshcd_is_intr_aggr_allowed(hba))
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
if (gear == 0) {
......@@ -524,79 +487,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
mb();
}
if (ufs_qcom_cap_qunipro(host))
return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
core_clk_period_in_ns &= MASK_CLK_NS_REG;
switch (hs) {
case FASTAUTO_MODE:
case FAST_MODE:
if (rate == PA_HS_MODE_A) {
if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
dev_err(hba->dev,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
dev_err(hba->dev,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
return -EINVAL;
}
break;
case SLOWAUTO_MODE:
case SLOW_MODE:
if (gear > ARRAY_SIZE(pwm_fr_table)) {
dev_err(hba->dev,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
(core_clk_period_in_ns | tx_clk_cycles_per_us)) {
/* this register 2 fields shall be written at once */
ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
REG_UFS_TX_SYMBOL_CLK_NS_US);
/*
* make sure above write gets applied before we return from
* this function.
*/
mb();
}
if (update_link_startup_timer && host->hw_ver.major != 0x5) {
ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
REG_UFS_CFG0);
/*
* make sure that this configuration is applied before
* we return
*/
mb();
}
return 0;
}
......@@ -604,7 +494,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
switch (status) {
case PRE_CHANGE:
......@@ -615,11 +504,9 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host)) {
err = ufs_qcom_set_core_clk_ctrl(hba, true);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
}
err = ufs_qcom_set_core_clk_ctrl(hba, true);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
......@@ -918,12 +805,7 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x1)
return ufshci_version(1, 1);
else
return ufshci_version(2, 0);
return ufshci_version(2, 0);
}
/**
......@@ -939,46 +821,21 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x01) {
hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
}
if (host->hw_ver.major == 0x2) {
if (host->hw_ver.major == 0x2)
hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
if (!ufs_qcom_cap_qunipro(host))
/* Legacy UniPro mode still need following quirks */
hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
if (host->hw_ver.major > 0x3)
hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
if (host->hw_ver.major >= 0x2) {
host->caps = UFS_QCOM_CAP_QUNIPRO |
UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
}
}
/**
......@@ -1101,9 +958,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
{
int err;
struct device *dev = hba->dev;
struct platform_device *pdev = to_platform_device(dev);
struct ufs_qcom_host *host;
struct resource *res;
struct ufs_clk_info *clki;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
......@@ -1154,25 +1009,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
/*
* for newer controllers, device reference clock control bit has
* moved inside UFS controller register address space itself.
*/
if (host->hw_ver.major >= 0x02) {
host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
host->dev_ref_clk_en_mask = BIT(26);
} else {
/* "dev_ref_clk_ctrl_mem" is optional resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"dev_ref_clk_ctrl_mem");
if (res) {
host->dev_ref_clk_ctrl_mmio =
devm_ioremap_resource(dev, res);
if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
host->dev_ref_clk_ctrl_mmio = NULL;
host->dev_ref_clk_en_mask = BIT(5);
}
}
host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
host->dev_ref_clk_en_mask = BIT(26);
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk_unipro"))
......@@ -1351,9 +1189,6 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int ret;
if (!ufs_qcom_cap_qunipro(host))
return 0;
ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
attr->hs_rate, false, true);
if (ret) {
......@@ -1371,13 +1206,9 @@ static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
u32 core_clk_ctrl_reg;
if (!ufs_qcom_cap_qunipro(host))
return 0;
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
......@@ -1396,11 +1227,6 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (!ufs_qcom_cap_qunipro(host))
return 0;
/* set unipro core clock attributes and clear clock divider */
return ufs_qcom_set_core_clk_ctrl(hba, false);
}
......
......@@ -114,13 +114,6 @@ enum {
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* bit offset */
#define OFFSET_CLK_NS_REG 0xa
/* bit masks */
#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
#define MASK_CLK_NS_REG GENMASK(23, 10)
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
......@@ -189,21 +182,6 @@ struct ufs_qcom_testbus {
struct gpio_desc;
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
* and if driver wants the Host controller to operate in QUniPro mode.
* Note: By default this capability will be kept enabled if host
* controller supports the QUniPro mode.
*/
#define UFS_QCOM_CAP_QUNIPRO 0x1
/*
* Set this capability if host controller can retain the secure
* configuration even after UFS controller core power collapse.
*/
#define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
u32 caps;
struct phy *generic_phy;
struct ufs_hba *hba;
struct ufs_pa_layer_attr dev_req_params;
......@@ -253,9 +231,4 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
{
return host->caps & UFS_QCOM_CAP_QUNIPRO;
}
#endif /* UFS_QCOM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment