Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
b86c75fd
Commit
b86c75fd
authored
Mar 15, 2016
by
James Bottomley
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'mkp-scsi/4.6/scsi-queue' into misc
parents
a7dee8f4
cdc43ae3
Changes
16
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
1143 additions
and
135 deletions
+1143
-135
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+3
-0
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_alua.c
+1
-1
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas.h
+1
-1
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_base.c
+10
-4
drivers/scsi/scsi_sas_internal.h
drivers/scsi/scsi_sas_internal.h
+1
-1
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_sysfs.c
+1
-1
drivers/scsi/scsi_transport_sas.c
drivers/scsi/scsi_transport_sas.c
+2
-0
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-qcom.c
+131
-24
drivers/scsi/ufs/ufs-qcom.h
drivers/scsi/ufs/ufs-qcom.h
+9
-0
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufs.h
+33
-0
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufs_quirks.h
+151
-0
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd-pltfrm.c
+19
-0
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.c
+724
-94
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshcd.h
+31
-9
drivers/scsi/ufs/ufshci.h
drivers/scsi/ufs/ufshci.h
+4
-0
drivers/scsi/ufs/unipro.h
drivers/scsi/ufs/unipro.h
+22
-0
No files found.
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
View file @
b86c75fd
...
...
@@ -38,6 +38,9 @@ Optional properties:
defined or a value in the array is "0" then it is assumed
that the frequency is set by the parent clock or a
fixed rate clock source.
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both
directions at once. If not specified, default is 2 lanes per direction.
Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on.
...
...
drivers/scsi/device_handler/scsi_dh_alua.c
View file @
b86c75fd
...
...
@@ -332,7 +332,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
{
int
rel_port
=
-
1
,
group_id
;
struct
alua_port_group
*
pg
,
*
old_pg
=
NULL
;
bool
pg_updated
;
bool
pg_updated
=
false
;
unsigned
long
flags
;
group_id
=
scsi_vpd_tpg_id
(
sdev
,
&
rel_port
);
...
...
drivers/scsi/megaraid/megaraid_sas.h
View file @
b86c75fd
...
...
@@ -2097,7 +2097,7 @@ struct megasas_instance {
u8
UnevenSpanSupport
;
u8
supportmax256vd
;
u8
allow_fw_scan
;
u8
pd_list_not_supported
;
u16
fw_supported_vd_count
;
u16
fw_supported_pd_count
;
...
...
drivers/scsi/megaraid/megaraid_sas_base.c
View file @
b86c75fd
...
...
@@ -1838,7 +1838,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
struct
megasas_instance
*
instance
;
instance
=
megasas_lookup_instance
(
sdev
->
host
->
host_no
);
if
(
instance
->
allow_fw_scan
)
{
if
(
instance
->
pd_list_not_supported
)
{
if
(
sdev
->
channel
<
MEGASAS_MAX_PD_CHANNELS
&&
sdev
->
type
==
TYPE_DISK
)
{
pd_index
=
(
sdev
->
channel
*
MEGASAS_MAX_DEV_PER_CHANNEL
)
+
...
...
@@ -1874,7 +1874,8 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
pd_index
=
(
sdev
->
channel
*
MEGASAS_MAX_DEV_PER_CHANNEL
)
+
sdev
->
id
;
if
((
instance
->
allow_fw_scan
||
instance
->
pd_list
[
pd_index
].
driveState
==
if
((
instance
->
pd_list_not_supported
||
instance
->
pd_list
[
pd_index
].
driveState
==
MR_PD_STATE_SYSTEM
))
{
goto
scan_target
;
}
...
...
@@ -4087,7 +4088,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
switch
(
ret
)
{
case
DCMD_FAILED
:
megaraid_sas_kill_hba
(
instance
);
dev_info
(
&
instance
->
pdev
->
dev
,
"MR_DCMD_PD_LIST_QUERY "
"failed/not supported by firmware
\n
"
);
if
(
instance
->
ctrl_context
)
megaraid_sas_kill_hba
(
instance
);
else
instance
->
pd_list_not_supported
=
1
;
break
;
case
DCMD_TIMEOUT
:
...
...
@@ -5034,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
case
PCI_DEVICE_ID_DELL_PERC5
:
default:
instance
->
instancet
=
&
megasas_instance_template_xscale
;
instance
->
allow_fw_scan
=
1
;
break
;
}
...
...
drivers/scsi/scsi_sas_internal.h
View file @
b86c75fd
...
...
@@ -4,7 +4,7 @@
#define SAS_HOST_ATTRS 0
#define SAS_PHY_ATTRS 17
#define SAS_PORT_ATTRS 1
#define SAS_RPORT_ATTRS
7
#define SAS_RPORT_ATTRS
8
#define SAS_END_DEV_ATTRS 5
#define SAS_EXPANDER_ATTRS 7
...
...
drivers/scsi/scsi_sysfs.c
View file @
b86c75fd
...
...
@@ -1105,7 +1105,7 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if
(
attr
==
&
dev_attr_vpd_pg80
&&
!
sdev
->
vpd_pg80
)
return
0
;
if
(
attr
==
&
dev_attr_vpd_pg83
&&
sdev
->
vpd_pg83
)
if
(
attr
==
&
dev_attr_vpd_pg83
&&
!
sdev
->
vpd_pg83
)
return
0
;
return
S_IRUGO
;
...
...
drivers/scsi/scsi_transport_sas.c
View file @
b86c75fd
...
...
@@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
sas_rphy_simple_attr
(
identify
.
sas_address
,
sas_address
,
"0x%016llx
\n
"
,
unsigned
long
long
);
sas_rphy_simple_attr
(
identify
.
phy_identifier
,
phy_identifier
,
"%d
\n
"
,
u8
);
sas_rphy_simple_attr
(
scsi_target_id
,
scsi_target_id
,
"%d
\n
"
,
u32
);
/* only need 8 bytes of data plus header (4 or 8) */
#define BUF_SIZE 64
...
...
@@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_RPORT_ATTRIBUTE
(
rphy_device_type
);
SETUP_RPORT_ATTRIBUTE
(
rphy_sas_address
);
SETUP_RPORT_ATTRIBUTE
(
rphy_phy_identifier
);
SETUP_RPORT_ATTRIBUTE
(
rphy_scsi_target_id
);
SETUP_OPTIONAL_RPORT_ATTRIBUTE
(
rphy_enclosure_identifier
,
get_enclosure_identifier
);
SETUP_OPTIONAL_RPORT_ATTRIBUTE
(
rphy_bay_identifier
,
...
...
drivers/scsi/ufs/ufs-qcom.c
View file @
b86c75fd
/*
* Copyright (c) 2013-201
5
, Linux Foundation. All rights reserved.
* Copyright (c) 2013-201
6
, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
...
...
@@ -16,8 +16,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-qcom-ufs.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
#include "unipro.h"
...
...
@@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
len
*
4
,
false
);
}
static
void
ufs_qcom_dump_regs_wrapper
(
struct
ufs_hba
*
hba
,
int
offset
,
int
len
,
char
*
prefix
,
void
*
priv
)
{
ufs_qcom_dump_regs
(
hba
,
offset
,
len
,
prefix
);
}
static
int
ufs_qcom_get_connected_tx_lanes
(
struct
ufs_hba
*
hba
,
u32
*
tx_lanes
)
{
int
err
=
0
;
...
...
@@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if
(
!
host
->
is_lane_clks_enabled
)
return
;
clk_disable_unprepare
(
host
->
tx_l1_sync_clk
);
if
(
host
->
hba
->
lanes_per_direction
>
1
)
clk_disable_unprepare
(
host
->
tx_l1_sync_clk
);
clk_disable_unprepare
(
host
->
tx_l0_sync_clk
);
clk_disable_unprepare
(
host
->
rx_l1_sync_clk
);
if
(
host
->
hba
->
lanes_per_direction
>
1
)
clk_disable_unprepare
(
host
->
rx_l1_sync_clk
);
clk_disable_unprepare
(
host
->
rx_l0_sync_clk
);
host
->
is_lane_clks_enabled
=
false
;
...
...
@@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if
(
err
)
goto
disable_rx_l0
;
err
=
ufs_qcom_host_clk_enable
(
dev
,
"rx_lane1_sync_clk"
,
host
->
rx_l1_sync_clk
);
if
(
err
)
goto
disable_tx_l0
;
if
(
host
->
hba
->
lanes_per_direction
>
1
)
{
err
=
ufs_qcom_host_clk_enable
(
dev
,
"rx_lane1_sync_clk"
,
host
->
rx_l1_sync_clk
);
if
(
err
)
goto
disable_tx_l0
;
err
=
ufs_qcom_host_clk_enable
(
dev
,
"tx_lane1_sync_clk"
,
host
->
tx_l1_sync_clk
);
if
(
err
)
goto
disable_rx_l1
;
err
=
ufs_qcom_host_clk_enable
(
dev
,
"tx_lane1_sync_clk"
,
host
->
tx_l1_sync_clk
);
if
(
err
)
goto
disable_rx_l1
;
}
host
->
is_lane_clks_enabled
=
true
;
goto
out
;
disable_rx_l1:
clk_disable_unprepare
(
host
->
rx_l1_sync_clk
);
if
(
host
->
hba
->
lanes_per_direction
>
1
)
clk_disable_unprepare
(
host
->
rx_l1_sync_clk
);
disable_tx_l0:
clk_disable_unprepare
(
host
->
tx_l0_sync_clk
);
disable_rx_l0:
...
...
@@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
if
(
err
)
goto
out
;
err
=
ufs_qcom_host_clk_get
(
dev
,
"rx_lane1_sync_clk"
,
&
host
->
rx_l1_sync_clk
);
if
(
err
)
goto
out
;
err
=
ufs_qcom_host_clk_get
(
dev
,
"tx_lane1_sync_clk"
,
&
host
->
tx_l1_sync_clk
);
/* In case of single lane per direction, don't read lane1 clocks */
if
(
host
->
hba
->
lanes_per_direction
>
1
)
{
err
=
ufs_qcom_host_clk_get
(
dev
,
"rx_lane1_sync_clk"
,
&
host
->
rx_l1_sync_clk
);
if
(
err
)
goto
out
;
err
=
ufs_qcom_host_clk_get
(
dev
,
"tx_lane1_sync_clk"
,
&
host
->
tx_l1_sync_clk
);
}
out:
return
err
;
}
...
...
@@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
ret
=
ufs_qcom_phy_calibrate_phy
(
phy
,
is_rate_B
);
if
(
ret
)
{
dev_err
(
hba
->
dev
,
"%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d
\n
"
,
__func__
,
ret
);
dev_err
(
hba
->
dev
,
"%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d
\n
"
,
__func__
,
ret
);
goto
out
;
}
...
...
@@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
err
=
ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div
(
hba
,
150
);
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
* before link startup which will make sure that both host
* and device TX LCC are disabled once link startup is
* completed.
*/
if
(
ufshcd_get_local_unipro_ver
(
hba
)
!=
UFS_UNIPRO_VER_1_41
)
err
=
ufshcd_dme_set
(
hba
,
UIC_ARG_MIB
(
PA_LOCAL_TX_LCC_ENABLE
),
0
);
break
;
case
POST_CHANGE
:
ufs_qcom_link_startup_post_change
(
hba
);
...
...
@@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
goto
out
;
}
/* enable the device ref clock before changing to HS mode */
if
(
!
ufshcd_is_hs_mode
(
&
hba
->
pwr_info
)
&&
ufshcd_is_hs_mode
(
dev_req_params
))
ufs_qcom_dev_ref_clk_ctrl
(
host
,
true
);
break
;
case
POST_CHANGE
:
if
(
ufs_qcom_cfg_timers
(
hba
,
dev_req_params
->
gear_rx
,
...
...
@@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
memcpy
(
&
host
->
dev_req_params
,
dev_req_params
,
sizeof
(
*
dev_req_params
));
ufs_qcom_update_bus_bw_vote
(
host
);
/* disable the device ref clock if entered PWM mode */
if
(
ufshcd_is_hs_mode
(
&
hba
->
pwr_info
)
&&
!
ufshcd_is_hs_mode
(
dev_req_params
))
ufs_qcom_dev_ref_clk_ctrl
(
host
,
false
);
break
;
default:
ret
=
-
EINVAL
;
...
...
@@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
ufs_qcom_phy_disable_iface_clk
(
host
->
generic_phy
);
goto
out
;
}
/* enable the device ref clock for HS mode*/
if
(
ufshcd_is_hs_mode
(
&
hba
->
pwr_info
))
ufs_qcom_dev_ref_clk_ctrl
(
host
,
true
);
vote
=
host
->
bus_vote
.
saved_vote
;
if
(
vote
==
host
->
bus_vote
.
min_bw_vote
)
ufs_qcom_update_bus_bw_vote
(
host
);
...
...
@@ -1367,6 +1403,74 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
return
err
;
}
static
void
ufs_qcom_print_hw_debug_reg_all
(
struct
ufs_hba
*
hba
,
void
*
priv
,
void
(
*
print_fn
)(
struct
ufs_hba
*
hba
,
int
offset
,
int
num_regs
,
char
*
str
,
void
*
priv
))
{
u32
reg
;
struct
ufs_qcom_host
*
host
;
if
(
unlikely
(
!
hba
))
{
pr_err
(
"%s: hba is NULL
\n
"
,
__func__
);
return
;
}
if
(
unlikely
(
!
print_fn
))
{
dev_err
(
hba
->
dev
,
"%s: print_fn is NULL
\n
"
,
__func__
);
return
;
}
host
=
ufshcd_get_variant
(
hba
);
if
(
!
(
host
->
dbg_print_en
&
UFS_QCOM_DBG_PRINT_REGS_EN
))
return
;
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_UFS_DBG_RD_REG_OCSC
);
print_fn
(
hba
,
reg
,
44
,
"UFS_UFS_DBG_RD_REG_OCSC "
,
priv
);
reg
=
ufshcd_readl
(
hba
,
REG_UFS_CFG1
);
reg
|=
UFS_BIT
(
17
);
ufshcd_writel
(
hba
,
reg
,
REG_UFS_CFG1
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_UFS_DBG_RD_EDTL_RAM
);
print_fn
(
hba
,
reg
,
32
,
"UFS_UFS_DBG_RD_EDTL_RAM "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_UFS_DBG_RD_DESC_RAM
);
print_fn
(
hba
,
reg
,
128
,
"UFS_UFS_DBG_RD_DESC_RAM "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_UFS_DBG_RD_PRDT_RAM
);
print_fn
(
hba
,
reg
,
64
,
"UFS_UFS_DBG_RD_PRDT_RAM "
,
priv
);
ufshcd_writel
(
hba
,
(
reg
&
~
UFS_BIT
(
17
)),
REG_UFS_CFG1
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_DBG_RD_REG_UAWM
);
print_fn
(
hba
,
reg
,
4
,
"UFS_DBG_RD_REG_UAWM "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_DBG_RD_REG_UARM
);
print_fn
(
hba
,
reg
,
4
,
"UFS_DBG_RD_REG_UARM "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_DBG_RD_REG_TXUC
);
print_fn
(
hba
,
reg
,
48
,
"UFS_DBG_RD_REG_TXUC "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_DBG_RD_REG_RXUC
);
print_fn
(
hba
,
reg
,
27
,
"UFS_DBG_RD_REG_RXUC "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_DBG_RD_REG_DFC
);
print_fn
(
hba
,
reg
,
19
,
"UFS_DBG_RD_REG_DFC "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_DBG_RD_REG_TRLUT
);
print_fn
(
hba
,
reg
,
34
,
"UFS_DBG_RD_REG_TRLUT "
,
priv
);
reg
=
ufs_qcom_get_debug_reg_offset
(
host
,
UFS_DBG_RD_REG_TMRLUT
);
print_fn
(
hba
,
reg
,
9
,
"UFS_DBG_RD_REG_TMRLUT "
,
priv
);
}
static
void
ufs_qcom_enable_test_bus
(
struct
ufs_qcom_host
*
host
)
{
if
(
host
->
dbg_print_en
&
UFS_QCOM_DBG_PRINT_TEST_BUS_EN
)
ufshcd_rmwl
(
host
->
hba
,
TEST_BUS_EN
,
TEST_BUS_EN
,
REG_UFS_CFG1
);
else
ufshcd_rmwl
(
host
->
hba
,
TEST_BUS_EN
,
0
,
REG_UFS_CFG1
);
}
static
void
ufs_qcom_get_default_testbus_cfg
(
struct
ufs_qcom_host
*
host
)
{
/* provide a legal default configuration */
...
...
@@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
ufshcd_rmwl
(
host
->
hba
,
mask
,
(
u32
)
host
->
testbus
.
select_minor
<<
offset
,
reg
);
ufs_qcom_enable_test_bus
(
host
);
ufshcd_release
(
host
->
hba
);
pm_runtime_put_sync
(
host
->
hba
->
dev
);
...
...
@@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
ufs_qcom_dump_regs
(
hba
,
REG_UFS_SYS1CLK_1US
,
16
,
"HCI Vendor Specific Registers "
);
ufs_qcom_print_hw_debug_reg_all
(
hba
,
NULL
,
ufs_qcom_dump_regs_wrapper
);
ufs_qcom_testbus_read
(
hba
);
}
/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
...
...
@@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev)
* ufs_qcom_remove - set driver_data of the device to NULL
* @pdev: pointer to platform device handle
*
* Always return 0
* Always return
s
0
*/
static
int
ufs_qcom_remove
(
struct
platform_device
*
pdev
)
{
...
...
drivers/scsi/ufs/ufs-qcom.h
View file @
b86c75fd
...
...
@@ -241,6 +241,15 @@ struct ufs_qcom_host {
struct
ufs_qcom_testbus
testbus
;
};
static
inline
u32
ufs_qcom_get_debug_reg_offset
(
struct
ufs_qcom_host
*
host
,
u32
reg
)
{
if
(
host
->
hw_ver
.
major
<=
0x02
)
return
UFS_CNTLR_2_x_x_VEN_REGS_OFFSET
(
reg
);
return
UFS_CNTLR_3_x_x_VEN_REGS_OFFSET
(
reg
);
};
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
...
...
drivers/scsi/ufs/ufs.h
View file @
b86c75fd
...
...
@@ -43,6 +43,7 @@
#define GENERAL_UPIU_REQUEST_SIZE 32
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
...
...
@@ -195,6 +196,37 @@ enum unit_desc_param {
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1
=
0x22
,
};
/* Device descriptor parameters offsets in bytes*/
enum
device_desc_param
{
DEVICE_DESC_PARAM_LEN
=
0x0
,
DEVICE_DESC_PARAM_TYPE
=
0x1
,
DEVICE_DESC_PARAM_DEVICE_TYPE
=
0x2
,
DEVICE_DESC_PARAM_DEVICE_CLASS
=
0x3
,
DEVICE_DESC_PARAM_DEVICE_SUB_CLASS
=
0x4
,
DEVICE_DESC_PARAM_PRTCL
=
0x5
,
DEVICE_DESC_PARAM_NUM_LU
=
0x6
,
DEVICE_DESC_PARAM_NUM_WLU
=
0x7
,
DEVICE_DESC_PARAM_BOOT_ENBL
=
0x8
,
DEVICE_DESC_PARAM_DESC_ACCSS_ENBL
=
0x9
,
DEVICE_DESC_PARAM_INIT_PWR_MODE
=
0xA
,
DEVICE_DESC_PARAM_HIGH_PR_LUN
=
0xB
,
DEVICE_DESC_PARAM_SEC_RMV_TYPE
=
0xC
,
DEVICE_DESC_PARAM_SEC_LU
=
0xD
,
DEVICE_DESC_PARAM_BKOP_TERM_LT
=
0xE
,
DEVICE_DESC_PARAM_ACTVE_ICC_LVL
=
0xF
,
DEVICE_DESC_PARAM_SPEC_VER
=
0x10
,
DEVICE_DESC_PARAM_MANF_DATE
=
0x12
,
DEVICE_DESC_PARAM_MANF_NAME
=
0x14
,
DEVICE_DESC_PARAM_PRDCT_NAME
=
0x15
,
DEVICE_DESC_PARAM_SN
=
0x16
,
DEVICE_DESC_PARAM_OEM_ID
=
0x17
,
DEVICE_DESC_PARAM_MANF_ID
=
0x18
,
DEVICE_DESC_PARAM_UD_OFFSET
=
0x1A
,
DEVICE_DESC_PARAM_UD_LEN
=
0x1B
,
DEVICE_DESC_PARAM_RTT_CAP
=
0x1C
,
DEVICE_DESC_PARAM_FRQ_RTC
=
0x1D
,
};
/*
* Logical Unit Write Protect
* 00h: LU not write protected
...
...
@@ -469,6 +501,7 @@ struct ufs_vreg {
struct
regulator
*
reg
;
const
char
*
name
;
bool
enabled
;
bool
unused
;
int
min_uV
;
int
max_uV
;
int
min_uA
;
...
...
drivers/scsi/ufs/ufs_quirks.h
0 → 100644
View file @
b86c75fd
/*
* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UFS_QUIRKS_H_
#define _UFS_QUIRKS_H_
/* return true if s1 is a prefix of s2 */
#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
#define UFS_ANY_VENDOR 0xFFFF
#define UFS_ANY_MODEL "ANY_MODEL"
#define MAX_MODEL_LEN 16
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
/**
* ufs_device_info - ufs device details
* @wmanufacturerid: card details
* @model: card model
*/
struct
ufs_device_info
{
u16
wmanufacturerid
;
char
model
[
MAX_MODEL_LEN
+
1
];
};
/**
* ufs_dev_fix - ufs device quirk info
* @card: ufs card details
* @quirk: device quirk
*/
struct
ufs_dev_fix
{
struct
ufs_device_info
card
;
unsigned
int
quirk
;
};
#define END_FIX { { 0 }, 0 }
/* add specific device quirk */
#define UFS_FIX(_vendor, _model, _quirk) \
{ \
.card.wmanufacturerid = (_vendor),\
.card.model = (_model), \
.quirk = (_quirk), \
}
/*
* If UFS device is having issue in processing LCC (Line Control
* Command) coming from UFS host controller then enable this quirk.
* When this quirk is enabled, host controller driver should disable
* the LCC transmission on UFS host controller (by clearing
* TX_LCC_ENABLE attribute of host to 0).
*/
#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
/*
* Some UFS devices don't need VCCQ rail for device operations. Enabling this
* quirk for such devices will make sure that VCCQ rail is not voted.
*/
#define UFS_DEVICE_NO_VCCQ (1 << 1)
/*
* Some vendor's UFS device sends back to back NACs for the DL data frames
* causing the host controller to raise the DFES error status. Sometimes
* such UFS devices send back to back NAC without waiting for new
* retransmitted DL frame from the host and in such cases it might be possible
* the Host UniPro goes into bad state without raising the DFES error
* interrupt. If this happens then all the pending commands would timeout
* only after respective SW command (which is generally too large).
*
* We can workaround such device behaviour like this:
* - As soon as SW sees the DL NAC error, it should schedule the error handler
* - Error handler would sleep for 50ms to see if there are any fatal errors
* raised by UFS controller.
* - If there are fatal errors then SW does normal error recovery.
* - If there are no fatal errors then SW sends the NOP command to device
* to check if link is alive.
* - If NOP command times out, SW does normal error recovery
* - If NOP command succeed, skip the error handling.
*
* If DL NAC error is seen multiple times with some vendor's UFS devices then
* enable this quirk to initiate quick error recovery and also silence related
* error logs to reduce spamming of kernel logs.
*/
#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
/*
* Some UFS devices may not work properly after resume if the link was kept
* in off state during suspend. Enabling this quirk will not allow the
* link to be kept in off state during suspend.
*/
#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3)
/*
* Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
* 600us which may not be enough for reliable hibern8 exit hardware sequence
* from UFS device.
* To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
* if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
*/
#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
/*
* Some UFS memory devices may have really low read/write throughput in
* FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
* never enabled for such devices.
*/
#define UFS_DEVICE_NO_FASTAUTO (1 << 5)
/*
* It seems some UFS devices may keep drawing more than sleep current
* (atleast for 500us) from UFS rails (especially from VCCQ rail).
* To avoid this situation, add 2ms delay before putting these UFS
* rails in LPM mode.
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
struct
ufs_hba
;
void
ufs_advertise_fixup_device
(
struct
ufs_hba
*
hba
);
static
struct
ufs_dev_fix
ufs_fixups
[]
=
{
/* UFS cards deviations table */
UFS_FIX
(
UFS_VENDOR_SAMSUNG
,
UFS_ANY_MODEL
,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
UFS_FIX
(
UFS_VENDOR_SAMSUNG
,
UFS_ANY_MODEL
,
UFS_DEVICE_NO_VCCQ
),
UFS_FIX
(
UFS_VENDOR_SAMSUNG
,
UFS_ANY_MODEL
,
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
),
UFS_FIX
(
UFS_VENDOR_SAMSUNG
,
UFS_ANY_MODEL
,
UFS_DEVICE_NO_FASTAUTO
),
UFS_FIX
(
UFS_VENDOR_TOSHIBA
,
UFS_ANY_MODEL
,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
),
UFS_FIX
(
UFS_VENDOR_TOSHIBA
,
"THGLF2G9C8KBADG"
,
UFS_DEVICE_QUIRK_PA_TACTIVATE
),
UFS_FIX
(
UFS_VENDOR_TOSHIBA
,
"THGLF2G9D8KBADG"
,
UFS_DEVICE_QUIRK_PA_TACTIVATE
),
END_FIX
};
#endif
/* UFS_QUIRKS_H_ */
drivers/scsi/ufs/ufshcd-pltfrm.c
View file @
b86c75fd
...
...
@@ -40,6 +40,8 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
static
int
ufshcd_parse_clock_info
(
struct
ufs_hba
*
hba
)
{
int
ret
=
0
;
...
...
@@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL
(
ufshcd_pltfrm_shutdown
);
static
void
ufshcd_init_lanes_per_dir
(
struct
ufs_hba
*
hba
)
{
struct
device
*
dev
=
hba
->
dev
;
int
ret
;
ret
=
of_property_read_u32
(
dev
->
of_node
,
"lanes-per-direction"
,
&
hba
->
lanes_per_direction
);
if
(
ret
)
{
dev_dbg
(
hba
->
dev
,
"%s: failed to read lanes-per-direction, ret=%d
\n
"
,
__func__
,
ret
);
hba
->
lanes_per_direction
=
UFSHCD_DEFAULT_LANES_PER_DIRECTION
;
}
}
/**
* ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle
...
...
@@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
pm_runtime_set_active
(
&
pdev
->
dev
);
pm_runtime_enable
(
&
pdev
->
dev
);
ufshcd_init_lanes_per_dir
(
hba
);
err
=
ufshcd_init
(
hba
,
mmio_base
,
irq
);
if
(
err
)
{
dev_err
(
dev
,
"Initialization failed
\n
"
);
...
...
drivers/scsi/ufs/ufshcd.c
View file @
b86c75fd
...
...
@@ -39,8 +39,10 @@
#include <linux/async.h>
#include <linux/devfreq.h>
#include <linux/nls.h>
#include <linux/of.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
...
...
@@ -131,9 +133,11 @@ enum {
/* UFSHCD UIC layer error flags */
enum
{
UFSHCD_UIC_DL_PA_INIT_ERROR
=
(
1
<<
0
),
/* Data link layer error */
UFSHCD_UIC_NL_ERROR
=
(
1
<<
1
),
/* Network layer error */
UFSHCD_UIC_TL_ERROR
=
(
1
<<
2
),
/* Transport Layer error */
UFSHCD_UIC_DME_ERROR
=
(
1
<<
3
),
/* DME error */
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
=
(
1
<<
1
),
/* Data link layer error */
UFSHCD_UIC_DL_TCx_REPLAY_ERROR
=
(
1
<<
2
),
/* Data link layer error */
UFSHCD_UIC_NL_ERROR
=
(
1
<<
3
),
/* Network layer error */
UFSHCD_UIC_TL_ERROR
=
(
1
<<
4
),
/* Transport Layer error */
UFSHCD_UIC_DME_ERROR
=
(
1
<<
5
),
/* DME error */
};
/* Interrupt configuration options */
...
...
@@ -193,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba);
static
int
__ufshcd_setup_clocks
(
struct
ufs_hba
*
hba
,
bool
on
,
bool
skip_ref_clk
);
static
int
ufshcd_setup_clocks
(
struct
ufs_hba
*
hba
,
bool
on
);
static
int
ufshcd_set_vccq_rail_unused
(
struct
ufs_hba
*
hba
,
bool
unused
);
static
int
ufshcd_uic_hibern8_exit
(
struct
ufs_hba
*
hba
);
static
int
ufshcd_uic_hibern8_enter
(
struct
ufs_hba
*
hba
);
static
inline
void
ufshcd_add_delay_before_dme_cmd
(
struct
ufs_hba
*
hba
);
...
...
@@ -231,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
/* replace non-printable or non-ASCII characters with spaces */
static
inline
void
ufshcd_remove_non_printable
(
char
*
val
)
{
if
(
!
val
)
return
;
if
(
*
val
<
0x20
||
*
val
>
0x7e
)
*
val
=
' '
;
}
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
...
...
@@ -239,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
* @val - wait condition
* @interval_us - polling interval in microsecs
* @timeout_ms - timeout in millisecs
* @can_sleep - perform sleep or just spin
*
* Returns -ETIMEDOUT on error, zero on success
*/
static
int
ufshcd_wait_for_register
(
struct
ufs_hba
*
hba
,
u32
reg
,
u32
mask
,
u32
val
,
unsigned
long
interval_us
,
unsigned
long
timeout_ms
)
int
ufshcd_wait_for_register
(
struct
ufs_hba
*
hba
,
u32
reg
,
u32
mask
,
u32
val
,
unsigned
long
interval_us
,
unsigned
long
timeout_ms
,
bool
can_sleep
)
{
int
err
=
0
;
unsigned
long
timeout
=
jiffies
+
msecs_to_jiffies
(
timeout_ms
);
...
...
@@ -252,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val
=
val
&
mask
;
while
((
ufshcd_readl
(
hba
,
reg
)
&
mask
)
!=
val
)
{
/* wakeup within 50us of expiry */
usleep_range
(
interval_us
,
interval_us
+
50
);
if
(
can_sleep
)
usleep_range
(
interval_us
,
interval_us
+
50
);
else
udelay
(
interval_us
);
if
(
time_after
(
jiffies
,
timeout
))
{
if
((
ufshcd_readl
(
hba
,
reg
)
&
mask
)
!=
val
)
err
=
-
ETIMEDOUT
;
...
...
@@ -552,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
return
(
ufshcd_readl
(
hba
,
REG_CONTROLLER_ENABLE
)
&
0x1
)
?
0
:
1
;
}
u32
ufshcd_get_local_unipro_ver
(
struct
ufs_hba
*
hba
)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
if
((
hba
->
ufs_version
==
UFSHCI_VERSION_10
)
||
(
hba
->
ufs_version
==
UFSHCI_VERSION_11
))
return
UFS_UNIPRO_VER_1_41
;
else
return
UFS_UNIPRO_VER_1_6
;
}
EXPORT_SYMBOL
(
ufshcd_get_local_unipro_ver
);
static
bool
ufshcd_is_unipro_pa_params_tuning_req
(
struct
ufs_hba
*
hba
)
{
/*
* If both host and device support UniPro ver1.6 or later, PA layer
* parameters tuning happens during link startup itself.
*
* We can manually tune PA layer parameters if either host or device
* doesn't support UniPro ver 1.6 or later. But to keep manual tuning
* logic simple, we will only do manual tuning if local unipro version
* doesn't support ver1.6 or later.
*/
if
(
ufshcd_get_local_unipro_ver
(
hba
)
<
UFS_UNIPRO_VER_1_6
)
return
true
;
else
return
false
;
}
static
void
ufshcd_ungate_work
(
struct
work_struct
*
work
)
{
int
ret
;
...
...
@@ -1458,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/
err
=
ufshcd_wait_for_register
(
hba
,
REG_UTP_TRANSFER_REQ_DOOR_BELL
,
mask
,
~
mask
,
1000
,
1000
);
mask
,
~
mask
,
1000
,
1000
,
true
);
return
err
;
}
...
...
@@ -1857,21 +1903,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
return
ret
;
}
/**
* ufshcd_query_descriptor - API function for sending descriptor requests
* hba: per-adapter instance
* opcode: attribute opcode
* idn: attribute idn to access
* index: index field
* selector: selector field
* desc_buf: the buffer that contains the descriptor
* buf_len: length parameter passed to the device
*
* Returns 0 for success, non-zero in case of failure.
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*/
static
int
ufshcd_query_descriptor
(
struct
ufs_hba
*
hba
,
static
int
__ufshcd_query_descriptor
(
struct
ufs_hba
*
hba
,
enum
query_opcode
opcode
,
enum
desc_idn
idn
,
u8
index
,
u8
selector
,
u8
*
desc_buf
,
int
*
buf_len
)
{
...
...
@@ -1935,6 +1967,39 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
return
err
;
}
/**
* ufshcd_query_descriptor_retry - API function for sending descriptor
* requests
* hba: per-adapter instance
* opcode: attribute opcode
* idn: attribute idn to access
* index: index field
* selector: selector field
* desc_buf: the buffer that contains the descriptor
* buf_len: length parameter passed to the device
*
* Returns 0 for success, non-zero in case of failure.
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*/
int
ufshcd_query_descriptor_retry
(
struct
ufs_hba
*
hba
,
enum
query_opcode
opcode
,
enum
desc_idn
idn
,
u8
index
,
u8
selector
,
u8
*
desc_buf
,
int
*
buf_len
)
{
int
err
;
int
retries
;
for
(
retries
=
QUERY_REQ_RETRIES
;
retries
>
0
;
retries
--
)
{
err
=
__ufshcd_query_descriptor
(
hba
,
opcode
,
idn
,
index
,
selector
,
desc_buf
,
buf_len
);
if
(
!
err
||
err
==
-
EINVAL
)
break
;
}
return
err
;
}
EXPORT_SYMBOL
(
ufshcd_query_descriptor_retry
);
/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
...
...
@@ -1977,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
return
-
ENOMEM
;
}
ret
=
ufshcd_query_descriptor
(
hba
,
UPIU_QUERY_OPCODE_READ_DESC
,
desc_id
,
desc_index
,
0
,
desc_buf
,
&
buff_len
);
ret
=
ufshcd_query_descriptor
_retry
(
hba
,
UPIU_QUERY_OPCODE_READ_DESC
,
desc_id
,
desc_index
,
0
,
desc_buf
,
&
buff_len
);
if
(
ret
||
(
buff_len
<
ufs_query_desc_max_size
[
desc_id
])
||
(
desc_buf
[
QUERY_DESC_LENGTH_OFFSET
]
!=
...
...
@@ -2017,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
return
ufshcd_read_desc
(
hba
,
QUERY_DESC_IDN_POWER
,
0
,
buf
,
size
);
}
int
ufshcd_read_device_desc
(
struct
ufs_hba
*
hba
,
u8
*
buf
,
u32
size
)
{
return
ufshcd_read_desc
(
hba
,
QUERY_DESC_IDN_DEVICE
,
0
,
buf
,
size
);
}
EXPORT_SYMBOL
(
ufshcd_read_device_desc
);
/**
* ufshcd_read_string_desc - read string descriptor
* @hba: pointer to adapter instance
* @desc_index: descriptor index
* @buf: pointer to buffer where descriptor would be read
* @size: size of buf
* @ascii: if true convert from unicode to ascii characters
*
* Return 0 in case of success, non-zero otherwise
*/
int
ufshcd_read_string_desc
(
struct
ufs_hba
*
hba
,
int
desc_index
,
u8
*
buf
,
u32
size
,
bool
ascii
)
{
int
err
=
0
;
err
=
ufshcd_read_desc
(
hba
,
QUERY_DESC_IDN_STRING
,
desc_index
,
buf
,
size
);
if
(
err
)
{
dev_err
(
hba
->
dev
,
"%s: reading String Desc failed after %d retries. err = %d
\n
"
,
__func__
,
QUERY_REQ_RETRIES
,
err
);
goto
out
;
}
if
(
ascii
)
{
int
desc_len
;
int
ascii_len
;
int
i
;
char
*
buff_ascii
;
desc_len
=
buf
[
0
];
/* remove header and divide by 2 to move from UTF16 to UTF8 */
ascii_len
=
(
desc_len
-
QUERY_DESC_HDR_SIZE
)
/
2
+
1
;
if
(
size
<
ascii_len
+
QUERY_DESC_HDR_SIZE
)
{
dev_err
(
hba
->
dev
,
"%s: buffer allocated size is too small
\n
"
,
__func__
);
err
=
-
ENOMEM
;
goto
out
;
}
buff_ascii
=
kmalloc
(
ascii_len
,
GFP_KERNEL
);
if
(
!
buff_ascii
)
{
err
=
-
ENOMEM
;
goto
out_free_buff
;
}
/*
* the descriptor contains string in UTF16 format
* we need to convert to utf-8 so it can be displayed
*/
utf16s_to_utf8s
((
wchar_t
*
)
&
buf
[
QUERY_DESC_HDR_SIZE
],
desc_len
-
QUERY_DESC_HDR_SIZE
,
UTF16_BIG_ENDIAN
,
buff_ascii
,
ascii_len
);
/* replace non-printable or non-ASCII characters with spaces */
for
(
i
=
0
;
i
<
ascii_len
;
i
++
)
ufshcd_remove_non_printable
(
&
buff_ascii
[
i
]);
memset
(
buf
+
QUERY_DESC_HDR_SIZE
,
0
,
size
-
QUERY_DESC_HDR_SIZE
);
memcpy
(
buf
+
QUERY_DESC_HDR_SIZE
,
buff_ascii
,
ascii_len
);
buf
[
QUERY_DESC_LENGTH_OFFSET
]
=
ascii_len
+
QUERY_DESC_HDR_SIZE
;
out_free_buff:
kfree
(
buff_ascii
);
}
out:
return
err
;
}
EXPORT_SYMBOL
(
ufshcd_read_string_desc
);
/**
* ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
* @hba: Pointer to adapter instance
...
...
@@ -2813,6 +2954,23 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
return
err
;
}
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
* @can_sleep: perform sleep or just spin
*/
static
inline
void
ufshcd_hba_stop
(
struct
ufs_hba
*
hba
,
bool
can_sleep
)
{
int
err
;
ufshcd_writel
(
hba
,
CONTROLLER_DISABLE
,
REG_CONTROLLER_ENABLE
);
err
=
ufshcd_wait_for_register
(
hba
,
REG_CONTROLLER_ENABLE
,
CONTROLLER_ENABLE
,
CONTROLLER_DISABLE
,
10
,
1
,
can_sleep
);
if
(
err
)
dev_err
(
hba
->
dev
,
"%s: Controller disable failed
\n
"
,
__func__
);
}
/**
* ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance
...
...
@@ -2833,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
* development and testing of this driver. msleep can be changed to
* mdelay and retry count can be reduced based on the controller.
*/
if
(
!
ufshcd_is_hba_active
(
hba
))
{
if
(
!
ufshcd_is_hba_active
(
hba
))
/* change controller state to "reset state" */
ufshcd_hba_stop
(
hba
);
/*
* This delay is based on the testing done with UFS host
* controller FPGA. The delay can be changed based on the
* host controller used.
*/
msleep
(
5
);
}
ufshcd_hba_stop
(
hba
,
true
);
/* UniPro link is disabled at this point */
ufshcd_set_link_off
(
hba
);
...
...
@@ -3365,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
}
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
*
__
ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
* @completed_reqs: requests to complete
*/
static
void
ufshcd_transfer_req_compl
(
struct
ufs_hba
*
hba
)
static
void
__ufshcd_transfer_req_compl
(
struct
ufs_hba
*
hba
,
unsigned
long
completed_reqs
)
{
struct
ufshcd_lrb
*
lrbp
;
struct
scsi_cmnd
*
cmd
;
unsigned
long
completed_reqs
;
u32
tr_doorbell
;
int
result
;
int
index
;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
* after reset. The down side of this solution is the possibility of
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
if
(
ufshcd_is_intr_aggr_allowed
(
hba
))
ufshcd_reset_intr_aggr
(
hba
);
tr_doorbell
=
ufshcd_readl
(
hba
,
REG_UTP_TRANSFER_REQ_DOOR_BELL
);
completed_reqs
=
tr_doorbell
^
hba
->
outstanding_reqs
;
for_each_set_bit
(
index
,
&
completed_reqs
,
hba
->
nutrs
)
{
lrbp
=
&
hba
->
lrb
[
index
];
cmd
=
lrbp
->
cmd
;
...
...
@@ -3418,6 +3554,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
wake_up
(
&
hba
->
dev_cmd
.
tag_wq
);
}
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*/
static
void
ufshcd_transfer_req_compl
(
struct
ufs_hba
*
hba
)
{
unsigned
long
completed_reqs
;
u32
tr_doorbell
;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
* after reset. The down side of this solution is the possibility of
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
if
(
ufshcd_is_intr_aggr_allowed
(
hba
))
ufshcd_reset_intr_aggr
(
hba
);
tr_doorbell
=
ufshcd_readl
(
hba
,
REG_UTP_TRANSFER_REQ_DOOR_BELL
);
completed_reqs
=
tr_doorbell
^
hba
->
outstanding_reqs
;
__ufshcd_transfer_req_compl
(
hba
,
completed_reqs
);
}
/**
* ufshcd_disable_ee - disable exception event
* @hba: per-adapter instance
...
...
@@ -3630,7 +3791,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
*/
static
int
ufshcd_urgent_bkops
(
struct
ufs_hba
*
hba
)
{
return
ufshcd_bkops_ctrl
(
hba
,
BKOPS_STATUS_PERF_IMPACT
);
return
ufshcd_bkops_ctrl
(
hba
,
hba
->
urgent_bkops_lvl
);
}
static
inline
int
ufshcd_get_ee_status
(
struct
ufs_hba
*
hba
,
u32
*
status
)
...
...
@@ -3639,6 +3800,43 @@ static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
QUERY_ATTR_IDN_EE_STATUS
,
0
,
0
,
status
);
}
static
void
ufshcd_bkops_exception_event_handler
(
struct
ufs_hba
*
hba
)
{
int
err
;
u32
curr_status
=
0
;
if
(
hba
->
is_urgent_bkops_lvl_checked
)
goto
enable_auto_bkops
;
err
=
ufshcd_get_bkops_status
(
hba
,
&
curr_status
);
if
(
err
)
{
dev_err
(
hba
->
dev
,
"%s: failed to get BKOPS status %d
\n
"
,
__func__
,
err
);
goto
out
;
}
/*
* We are seeing that some devices are raising the urgent bkops
* exception events even when BKOPS status doesn't indicate performace
* impacted or critical. Handle these device by determining their urgent
* bkops status at runtime.
*/
if
(
curr_status
<
BKOPS_STATUS_PERF_IMPACT
)
{
dev_err
(
hba
->
dev
,
"%s: device raised urgent BKOPS exception for bkops status %d
\n
"
,
__func__
,
curr_status
);
/* update the current status as the urgent bkops level */
hba
->
urgent_bkops_lvl
=
curr_status
;
hba
->
is_urgent_bkops_lvl_checked
=
true
;
}
enable_auto_bkops:
err
=
ufshcd_enable_auto_bkops
(
hba
);
out:
if
(
err
<
0
)
dev_err
(
hba
->
dev
,
"%s: failed to handle urgent bkops %d
\n
"
,
__func__
,
err
);
}
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
...
...
@@ -3662,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
}
status
&=
hba
->
ee_ctrl_mask
;
if
(
status
&
MASK_EE_URGENT_BKOPS
)
{
err
=
ufshcd_urgent_bkops
(
hba
);
if
(
err
<
0
)
dev_err
(
hba
->
dev
,
"%s: failed to handle urgent bkops %d
\n
"
,
__func__
,
err
);
}
if
(
status
&
MASK_EE_URGENT_BKOPS
)
ufshcd_bkops_exception_event_handler
(
hba
);
out:
pm_runtime_put_sync
(
hba
->
dev
);
return
;
}
/* Complete requests that have door-bell cleared */
static
void
ufshcd_complete_requests
(
struct
ufs_hba
*
hba
)
{
ufshcd_transfer_req_compl
(
hba
);
ufshcd_tmc_handler
(
hba
);
}
/**
* ufshcd_quirk_dl_nac_errors - This function checks if error handling is
* to recover from the DL NAC errors or not.
* @hba: per-adapter instance
*
* Returns true if error handling is required, false otherwise
*/
static
bool
ufshcd_quirk_dl_nac_errors
(
struct
ufs_hba
*
hba
)
{
unsigned
long
flags
;
bool
err_handling
=
true
;
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
/*
* UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
* device fatal error and/or DL NAC & REPLAY timeout errors.
*/
if
(
hba
->
saved_err
&
(
CONTROLLER_FATAL_ERROR
|
SYSTEM_BUS_FATAL_ERROR
))
goto
out
;
if
((
hba
->
saved_err
&
DEVICE_FATAL_ERROR
)
||
((
hba
->
saved_err
&
UIC_ERROR
)
&&
(
hba
->
saved_uic_err
&
UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
goto
out
;
if
((
hba
->
saved_err
&
UIC_ERROR
)
&&
(
hba
->
saved_uic_err
&
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
))
{
int
err
;
/*
* wait for 50ms to see if we can get any other errors or not.
*/
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
msleep
(
50
);
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
/*
* now check if we have got any other severe errors other than
* DL NAC error?
*/
if
((
hba
->
saved_err
&
INT_FATAL_ERRORS
)
||
((
hba
->
saved_err
&
UIC_ERROR
)
&&
(
hba
->
saved_uic_err
&
~
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
goto
out
;
/*
* As DL NAC is the only error received so far, send out NOP
* command to confirm if link is still active or not.
* - If we don't get any response then do error recovery.
* - If we get response then clear the DL NAC error bit.
*/
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
err
=
ufshcd_verify_dev_init
(
hba
);
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
if
(
err
)
goto
out
;
/* Link seems to be alive hence ignore the DL NAC errors */
if
(
hba
->
saved_uic_err
==
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
hba
->
saved_err
&=
~
UIC_ERROR
;
/* clear NAC error */
hba
->
saved_uic_err
&=
~
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
if
(
!
hba
->
saved_uic_err
)
{
err_handling
=
false
;
goto
out
;
}
}
out:
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
return
err_handling
;
}
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
* @work: pointer to work structure
...
...
@@ -3685,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work)
u32
err_tm
=
0
;
int
err
=
0
;
int
tag
;
bool
needs_reset
=
false
;
hba
=
container_of
(
work
,
struct
ufs_hba
,
eh_work
);
...
...
@@ -3692,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_hold
(
hba
,
false
);
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
if
(
hba
->
ufshcd_state
==
UFSHCD_STATE_RESET
)
{
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
if
(
hba
->
ufshcd_state
==
UFSHCD_STATE_RESET
)
goto
out
;
}
hba
->
ufshcd_state
=
UFSHCD_STATE_RESET
;
ufshcd_set_eh_in_progress
(
hba
);
/* Complete requests that have door-bell cleared by h/w */
ufshcd_transfer_req_compl
(
hba
);
ufshcd_tmc_handler
(
hba
);
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
ufshcd_complete_requests
(
hba
);
if
(
hba
->
dev_quirks
&
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
)
{
bool
ret
;
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret
=
ufshcd_quirk_dl_nac_errors
(
hba
);
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
if
(
!
ret
)
goto
skip_err_handling
;
}
if
((
hba
->
saved_err
&
INT_FATAL_ERRORS
)
||
((
hba
->
saved_err
&
UIC_ERROR
)
&&
(
hba
->
saved_uic_err
&
(
UFSHCD_UIC_DL_PA_INIT_ERROR
|
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
UFSHCD_UIC_DL_TCx_REPLAY_ERROR
))))
needs_reset
=
true
;
/*
* if host reset is required then skip clearing the pending
* transfers forcefully because they will automatically get
* cleared after link startup.
*/
if
(
needs_reset
)
goto
skip_pending_xfer_clear
;
/* release lock as clear command might sleep */
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
/* Clear pending transfer requests */
for_each_set_bit
(
tag
,
&
hba
->
outstanding_reqs
,
hba
->
nutrs
)
if
(
ufshcd_clear_cmd
(
hba
,
tag
))
err_xfer
|=
1
<<
tag
;
for_each_set_bit
(
tag
,
&
hba
->
outstanding_reqs
,
hba
->
nutrs
)
{
if
(
ufshcd_clear_cmd
(
hba
,
tag
))
{
err_xfer
=
true
;
goto
lock_skip_pending_xfer_clear
;
}
}
/* Clear pending task management requests */
for_each_set_bit
(
tag
,
&
hba
->
outstanding_tasks
,
hba
->
nutmrs
)
if
(
ufshcd_clear_tm_cmd
(
hba
,
tag
))
err_tm
|=
1
<<
tag
;
for_each_set_bit
(
tag
,
&
hba
->
outstanding_tasks
,
hba
->
nutmrs
)
{
if
(
ufshcd_clear_tm_cmd
(
hba
,
tag
))
{
err_tm
=
true
;
goto
lock_skip_pending_xfer_clear
;
}
}
/* Complete the requests that are cleared by s/w */
lock_skip_pending_xfer_clear:
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
ufshcd_transfer_req_compl
(
hba
);
ufshcd_tmc_handler
(
hba
);
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests
(
hba
);
if
(
err_xfer
||
err_tm
)
needs_reset
=
true
;
skip_pending_xfer_clear:
/* Fatal errors need reset */
if
(
err_xfer
||
err_tm
||
(
hba
->
saved_err
&
INT_FATAL_ERRORS
)
||
((
hba
->
saved_err
&
UIC_ERROR
)
&&
(
hba
->
saved_uic_err
&
UFSHCD_UIC_DL_PA_INIT_ERROR
)))
{
if
(
needs_reset
)
{
unsigned
long
max_doorbells
=
(
1UL
<<
hba
->
nutrs
)
-
1
;
/*
* ufshcd_reset_and_restore() does the link reinitialization
* which will need atleast one empty doorbell slot to send the
* device management commands (NOP and query commands).
* If there is no slot empty at this moment then free up last
* slot forcefully.
*/
if
(
hba
->
outstanding_reqs
==
max_doorbells
)
__ufshcd_transfer_req_compl
(
hba
,
(
1UL
<<
(
hba
->
nutrs
-
1
)));
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
err
=
ufshcd_reset_and_restore
(
hba
);
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
if
(
err
)
{
dev_err
(
hba
->
dev
,
"%s: reset and restore failed
\n
"
,
__func__
);
...
...
@@ -3739,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work)
hba
->
saved_err
=
0
;
hba
->
saved_uic_err
=
0
;
}
skip_err_handling:
if
(
!
needs_reset
)
{
hba
->
ufshcd_state
=
UFSHCD_STATE_OPERATIONAL
;
if
(
hba
->
saved_err
||
hba
->
saved_uic_err
)
dev_err_ratelimited
(
hba
->
dev
,
"%s: exit: saved_err 0x%x saved_uic_err 0x%x"
,
__func__
,
hba
->
saved_err
,
hba
->
saved_uic_err
);
}
ufshcd_clear_eh_in_progress
(
hba
);
out:
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
scsi_unblock_requests
(
hba
->
host
);
ufshcd_release
(
hba
);
pm_runtime_put_sync
(
hba
->
dev
);
...
...
@@ -3759,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
reg
=
ufshcd_readl
(
hba
,
REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
if
(
reg
&
UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
hba
->
uic_error
|=
UFSHCD_UIC_DL_PA_INIT_ERROR
;
else
if
(
hba
->
dev_quirks
&
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
)
{
if
(
reg
&
UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
hba
->
uic_error
|=
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
else
if
(
reg
&
UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
hba
->
uic_error
|=
UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
}
/* UIC NL/TL/DME errors needs software retry */
reg
=
ufshcd_readl
(
hba
,
REG_UIC_ERROR_CODE_NETWORK_LAYER
);
...
...
@@ -3796,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
}
if
(
queue_eh_work
)
{
/*
* update the transfer error masks to sticky bits, let's do this
* irrespective of current ufshcd_state.
*/
hba
->
saved_err
|=
hba
->
errors
;
hba
->
saved_uic_err
|=
hba
->
uic_error
;
/* handle fatal errors only when link is functional */
if
(
hba
->
ufshcd_state
==
UFSHCD_STATE_OPERATIONAL
)
{
/* block commands from scsi mid-layer */
scsi_block_requests
(
hba
->
host
);
/* transfer error masks to sticky bits */
hba
->
saved_err
|=
hba
->
errors
;
hba
->
saved_uic_err
|=
hba
->
uic_error
;
hba
->
ufshcd_state
=
UFSHCD_STATE_ERROR
;
schedule_work
(
&
hba
->
eh_work
);
}
...
...
@@ -3897,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */
err
=
ufshcd_wait_for_register
(
hba
,
REG_UTP_TASK_REQ_DOOR_BELL
,
mask
,
0
,
1000
,
1000
);
mask
,
0
,
1000
,
1000
,
true
);
out:
return
err
;
}
...
...
@@ -4179,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Reset the host controller */
spin_lock_irqsave
(
hba
->
host
->
host_lock
,
flags
);
ufshcd_hba_stop
(
hba
);
ufshcd_hba_stop
(
hba
,
false
);
spin_unlock_irqrestore
(
hba
->
host
->
host_lock
,
flags
);
err
=
ufshcd_hba_enable
(
hba
);
...
...
@@ -4466,6 +4810,164 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
return
ret
;
}
static
int
ufs_get_device_info
(
struct
ufs_hba
*
hba
,
struct
ufs_device_info
*
card_data
)
{
int
err
;
u8
model_index
;
u8
str_desc_buf
[
QUERY_DESC_STRING_MAX_SIZE
+
1
]
=
{
0
};
u8
desc_buf
[
QUERY_DESC_DEVICE_MAX_SIZE
];
err
=
ufshcd_read_device_desc
(
hba
,
desc_buf
,
QUERY_DESC_DEVICE_MAX_SIZE
);
if
(
err
)
{
dev_err
(
hba
->
dev
,
"%s: Failed reading Device Desc. err = %d
\n
"
,
__func__
,
err
);
goto
out
;
}
/*
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
card_data
->
wmanufacturerid
=
desc_buf
[
DEVICE_DESC_PARAM_MANF_ID
]
<<
8
|
desc_buf
[
DEVICE_DESC_PARAM_MANF_ID
+
1
];
model_index
=
desc_buf
[
DEVICE_DESC_PARAM_PRDCT_NAME
];
err
=
ufshcd_read_string_desc
(
hba
,
model_index
,
str_desc_buf
,
QUERY_DESC_STRING_MAX_SIZE
,
ASCII_STD
);
if
(
err
)
{
dev_err
(
hba
->
dev
,
"%s: Failed reading Product Name. err = %d
\n
"
,
__func__
,
err
);
goto
out
;
}
str_desc_buf
[
QUERY_DESC_STRING_MAX_SIZE
]
=
'\0'
;
strlcpy
(
card_data
->
model
,
(
str_desc_buf
+
QUERY_DESC_HDR_SIZE
),
min_t
(
u8
,
str_desc_buf
[
QUERY_DESC_LENGTH_OFFSET
],
MAX_MODEL_LEN
));
/* Null terminate the model string */
card_data
->
model
[
MAX_MODEL_LEN
]
=
'\0'
;
out:
return
err
;
}
void
ufs_advertise_fixup_device
(
struct
ufs_hba
*
hba
)
{
int
err
;
struct
ufs_dev_fix
*
f
;
struct
ufs_device_info
card_data
;
card_data
.
wmanufacturerid
=
0
;
err
=
ufs_get_device_info
(
hba
,
&
card_data
);
if
(
err
)
{
dev_err
(
hba
->
dev
,
"%s: Failed getting device info. err = %d
\n
"
,
__func__
,
err
);
return
;
}
for
(
f
=
ufs_fixups
;
f
->
quirk
;
f
++
)
{
if
(((
f
->
card
.
wmanufacturerid
==
card_data
.
wmanufacturerid
)
||
(
f
->
card
.
wmanufacturerid
==
UFS_ANY_VENDOR
))
&&
(
STR_PRFX_EQUAL
(
f
->
card
.
model
,
card_data
.
model
)
||
!
strcmp
(
f
->
card
.
model
,
UFS_ANY_MODEL
)))
hba
->
dev_quirks
|=
f
->
quirk
;
}
}
/**
* ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
* @hba: per-adapter instance
*
* PA_TActivate parameter can be tuned manually if UniPro version is less than
* 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
* RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
* the hibern8 exit latency.
*
* Returns zero on success, non-zero error value on failure.
*/
static
int
ufshcd_tune_pa_tactivate
(
struct
ufs_hba
*
hba
)
{
int
ret
=
0
;
u32
peer_rx_min_activatetime
=
0
,
tuned_pa_tactivate
;
ret
=
ufshcd_dme_peer_get
(
hba
,
UIC_ARG_MIB_SEL
(
RX_MIN_ACTIVATETIME_CAPABILITY
,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX
(
0
)),
&
peer_rx_min_activatetime
);
if
(
ret
)
goto
out
;
/* make sure proper unit conversion is applied */
tuned_pa_tactivate
=
((
peer_rx_min_activatetime
*
RX_MIN_ACTIVATETIME_UNIT_US
)
/
PA_TACTIVATE_TIME_UNIT_US
);
ret
=
ufshcd_dme_set
(
hba
,
UIC_ARG_MIB
(
PA_TACTIVATE
),
tuned_pa_tactivate
);
out:
return
ret
;
}
/**
* ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
* @hba: per-adapter instance
*
* PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
* 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
* TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
* This optimal value can help reduce the hibern8 exit latency.
*
* Returns zero on success, non-zero error value on failure.
*/
static
int
ufshcd_tune_pa_hibern8time
(
struct
ufs_hba
*
hba
)
{
int
ret
=
0
;
u32
local_tx_hibern8_time_cap
=
0
,
peer_rx_hibern8_time_cap
=
0
;
u32
max_hibern8_time
,
tuned_pa_hibern8time
;
ret
=
ufshcd_dme_get
(
hba
,
UIC_ARG_MIB_SEL
(
TX_HIBERN8TIME_CAPABILITY
,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX
(
0
)),
&
local_tx_hibern8_time_cap
);
if
(
ret
)
goto
out
;
ret
=
ufshcd_dme_peer_get
(
hba
,
UIC_ARG_MIB_SEL
(
RX_HIBERN8TIME_CAPABILITY
,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX
(
0
)),
&
peer_rx_hibern8_time_cap
);
if
(
ret
)
goto
out
;
max_hibern8_time
=
max
(
local_tx_hibern8_time_cap
,
peer_rx_hibern8_time_cap
);
/* make sure proper unit conversion is applied */
tuned_pa_hibern8time
=
((
max_hibern8_time
*
HIBERN8TIME_UNIT_US
)
/
PA_HIBERN8_TIME_UNIT_US
);
ret
=
ufshcd_dme_set
(
hba
,
UIC_ARG_MIB
(
PA_HIBERN8TIME
),
tuned_pa_hibern8time
);
out:
return
ret
;
}
static
void
ufshcd_tune_unipro_params
(
struct
ufs_hba
*
hba
)
{
if
(
ufshcd_is_unipro_pa_params_tuning_req
(
hba
))
{
ufshcd_tune_pa_tactivate
(
hba
);
ufshcd_tune_pa_hibern8time
(
hba
);
}
if
(
hba
->
dev_quirks
&
UFS_DEVICE_QUIRK_PA_TACTIVATE
)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set
(
hba
,
UIC_ARG_MIB
(
PA_TACTIVATE
),
10
);
}
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
...
...
@@ -4482,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_pwr_info
(
hba
);
/* set the default level for urgent bkops */
hba
->
urgent_bkops_lvl
=
BKOPS_STATUS_PERF_IMPACT
;
hba
->
is_urgent_bkops_lvl_checked
=
false
;
/* UniPro link is active now */
ufshcd_set_link_active
(
hba
);
...
...
@@ -4493,6 +4999,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if
(
ret
)
goto
out
;
ufs_advertise_fixup_device
(
hba
);
ufshcd_tune_unipro_params
(
hba
);
ret
=
ufshcd_set_vccq_rail_unused
(
hba
,
(
hba
->
dev_quirks
&
UFS_DEVICE_NO_VCCQ
)
?
true
:
false
);
if
(
ret
)
goto
out
;
/* UFS device is also active now */
ufshcd_set_ufs_dev_active
(
hba
);
ufshcd_force_reset_auto_bkops
(
hba
);
...
...
@@ -4567,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
ufshcd_probe_hba
(
hba
);
}
static
enum
blk_eh_timer_return
ufshcd_eh_timed_out
(
struct
scsi_cmnd
*
scmd
)
{
unsigned
long
flags
;
struct
Scsi_Host
*
host
;
struct
ufs_hba
*
hba
;
int
index
;
bool
found
=
false
;
if
(
!
scmd
||
!
scmd
->
device
||
!
scmd
->
device
->
host
)
return
BLK_EH_NOT_HANDLED
;
host
=
scmd
->
device
->
host
;
hba
=
shost_priv
(
host
);
if
(
!
hba
)
return
BLK_EH_NOT_HANDLED
;
spin_lock_irqsave
(
host
->
host_lock
,
flags
);
for_each_set_bit
(
index
,
&
hba
->
outstanding_reqs
,
hba
->
nutrs
)
{
if
(
hba
->
lrb
[
index
].
cmd
==
scmd
)
{
found
=
true
;
break
;
}
}
spin_unlock_irqrestore
(
host
->
host_lock
,
flags
);
/*
* Bypass SCSI error handling and reset the block layer timer if this
* SCSI command was not actually dispatched to UFS driver, otherwise
* let SCSI layer handle the error as usual.
*/
return
found
?
BLK_EH_NOT_HANDLED
:
BLK_EH_RESET_TIMER
;
}
static
struct
scsi_host_template
ufshcd_driver_template
=
{
.
module
=
THIS_MODULE
,
.
name
=
UFSHCD
,
...
...
@@ -4579,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.
eh_abort_handler
=
ufshcd_abort
,
.
eh_device_reset_handler
=
ufshcd_eh_device_reset_handler
,
.
eh_host_reset_handler
=
ufshcd_eh_host_reset_handler
,
.
eh_timed_out
=
ufshcd_eh_timed_out
,
.
this_id
=
-
1
,
.
sg_tablesize
=
SG_ALL
,
.
cmd_per_lun
=
UFSHCD_CMD_PER_LUN
,
...
...
@@ -4607,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
static
inline
int
ufshcd_config_vreg_lpm
(
struct
ufs_hba
*
hba
,
struct
ufs_vreg
*
vreg
)
{
return
ufshcd_config_vreg_load
(
hba
->
dev
,
vreg
,
UFS_VREG_LPM_LOAD_UA
);
if
(
!
vreg
)
return
0
;
else
if
(
vreg
->
unused
)
return
0
;
else
return
ufshcd_config_vreg_load
(
hba
->
dev
,
vreg
,
UFS_VREG_LPM_LOAD_UA
);
}
static
inline
int
ufshcd_config_vreg_hpm
(
struct
ufs_hba
*
hba
,
struct
ufs_vreg
*
vreg
)
{
return
ufshcd_config_vreg_load
(
hba
->
dev
,
vreg
,
vreg
->
max_uA
);
if
(
!
vreg
)
return
0
;
else
if
(
vreg
->
unused
)
return
0
;
else
return
ufshcd_config_vreg_load
(
hba
->
dev
,
vreg
,
vreg
->
max_uA
);
}
static
int
ufshcd_config_vreg
(
struct
device
*
dev
,
...
...
@@ -4648,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int
ret
=
0
;
if
(
!
vreg
||
vreg
->
enabled
)
if
(
!
vreg
)
goto
out
;
else
if
(
vreg
->
enabled
||
vreg
->
unused
)
goto
out
;
ret
=
ufshcd_config_vreg
(
dev
,
vreg
,
true
);
...
...
@@ -4668,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int
ret
=
0
;
if
(
!
vreg
||
!
vreg
->
enabled
)
if
(
!
vreg
)
goto
out
;
else
if
(
!
vreg
->
enabled
||
vreg
->
unused
)
goto
out
;
ret
=
regulator_disable
(
vreg
->
reg
);
...
...
@@ -4774,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
return
0
;
}
static
int
ufshcd_set_vccq_rail_unused
(
struct
ufs_hba
*
hba
,
bool
unused
)
{
int
ret
=
0
;
struct
ufs_vreg_info
*
info
=
&
hba
->
vreg_info
;
if
(
!
info
)
goto
out
;
else
if
(
!
info
->
vccq
)
goto
out
;
if
(
unused
)
{
/* shut off the rail here */
ret
=
ufshcd_toggle_vreg
(
hba
->
dev
,
info
->
vccq
,
false
);
/*
* Mark this rail as no longer used, so it doesn't get enabled
* later by mistake
*/
if
(
!
ret
)
info
->
vccq
->
unused
=
true
;
}
else
{
/*
* rail should have been already enabled hence just make sure
* that unused flag is cleared.
*/
info
->
vccq
->
unused
=
false
;
}
out:
return
ret
;
}
static
int
__ufshcd_setup_clocks
(
struct
ufs_hba
*
hba
,
bool
on
,
bool
skip_ref_clk
)
{
...
...
@@ -5092,11 +5687,21 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
else
if
((
req_link_state
==
UIC_LINK_OFF_STATE
)
&&
(
!
check_for_bkops
||
(
check_for_bkops
&&
!
hba
->
auto_bkops_enabled
)))
{
/*
* Let's make sure that link is in low power mode, we are doing
* this currently by putting the link in Hibern8. Otherway to
* put the link in low power mode is to send the DME end point
* to device and then send the DME reset command to local
* unipro. But putting the link in hibern8 is much faster.
*/
ret
=
ufshcd_uic_hibern8_enter
(
hba
);
if
(
ret
)
goto
out
;
/*
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
ufshcd_hba_stop
(
hba
);
ufshcd_hba_stop
(
hba
,
true
);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
...
...
@@ -5110,6 +5715,16 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
static
void
ufshcd_vreg_set_lpm
(
struct
ufs_hba
*
hba
)
{
/*
* It seems some UFS devices may keep drawing more than sleep current
* (atleast for 500us) from UFS rails (especially from VCCQ rail).
* To avoid this situation, add 2ms delay before putting these UFS
* rails in LPM mode.
*/
if
(
!
ufshcd_is_link_active
(
hba
)
&&
hba
->
dev_quirks
&
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
usleep_range
(
2000
,
2100
);
/*
* If UFS device is either in UFS_Sleep turn off VCC rail to save some
* power.
...
...
@@ -5572,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host
(
hba
->
host
);
/* disable interrupts */
ufshcd_disable_intr
(
hba
,
hba
->
intr_mask
);
ufshcd_hba_stop
(
hba
);
ufshcd_hba_stop
(
hba
,
true
);
scsi_host_put
(
hba
->
host
);
...
...
@@ -5836,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_waitqueue_head
(
&
hba
->
dev_cmd
.
tag_wq
);
ufshcd_init_clk_gating
(
hba
);
/*
* In order to avoid any spurious interrupt immediately after
* registering UFS controller interrupt handler, clear any pending UFS
* interrupt status and disable all the UFS interrupts.
*/
ufshcd_writel
(
hba
,
ufshcd_readl
(
hba
,
REG_INTERRUPT_STATUS
),
REG_INTERRUPT_STATUS
);
ufshcd_writel
(
hba
,
0
,
REG_INTERRUPT_ENABLE
);
/*
* Make sure that UFS interrupts are disabled and any pending interrupt
* status is cleared before registering UFS interrupt handler.
*/
mb
();
/* IRQ registration */
err
=
devm_request_irq
(
dev
,
irq
,
ufshcd_intr
,
IRQF_SHARED
,
UFSHCD
,
hba
);
if
(
err
)
{
...
...
drivers/scsi/ufs/ufshcd.h
View file @
b86c75fd
...
...
@@ -54,6 +54,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
#include "unipro.h"
#include <asm/irq.h>
#include <asm/byteorder.h>
...
...
@@ -383,6 +384,9 @@ struct ufs_init_prefetch {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
*/
struct
ufs_hba
{
void
__iomem
*
mmio_base
;
...
...
@@ -470,6 +474,9 @@ struct ufs_hba {
unsigned
int
quirks
;
/* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
unsigned
int
dev_quirks
;
wait_queue_head_t
tm_wq
;
wait_queue_head_t
tm_tag_wq
;
unsigned
long
tm_condition
;
...
...
@@ -509,6 +516,8 @@ struct ufs_hba {
bool
wlun_dev_clr_ua
;
/* Number of lanes available (1 or 2) for Rx/Tx */
u32
lanes_per_direction
;
struct
ufs_pa_layer_attr
pwr_info
;
struct
ufs_pwr_mode_info
max_pwr_info
;
...
...
@@ -533,6 +542,9 @@ struct ufs_hba {
struct
devfreq
*
devfreq
;
struct
ufs_clk_scaling
clk_scaling
;
bool
is_sys_suspended
;
enum
bkops_status
urgent_bkops_lvl
;
bool
is_urgent_bkops_lvl_checked
;
};
/* Returns true if clocks can be gated. Otherwise false */
...
...
@@ -588,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void
ufshcd_dealloc_host
(
struct
ufs_hba
*
);
int
ufshcd_init
(
struct
ufs_hba
*
,
void
__iomem
*
,
unsigned
int
);
void
ufshcd_remove
(
struct
ufs_hba
*
);
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
*/
static
inline
void
ufshcd_hba_stop
(
struct
ufs_hba
*
hba
)
{
ufshcd_writel
(
hba
,
CONTROLLER_DISABLE
,
REG_CONTROLLER_ENABLE
);
}
int
ufshcd_wait_for_register
(
struct
ufs_hba
*
hba
,
u32
reg
,
u32
mask
,
u32
val
,
unsigned
long
interval_us
,
unsigned
long
timeout_ms
,
bool
can_sleep
);
static
inline
void
check_upiu_size
(
void
)
{
...
...
@@ -682,11 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return
ufshcd_dme_get_attr
(
hba
,
attr_sel
,
mib_val
,
DME_PEER
);
}
int
ufshcd_read_device_desc
(
struct
ufs_hba
*
hba
,
u8
*
buf
,
u32
size
);
static
inline
bool
ufshcd_is_hs_mode
(
struct
ufs_pa_layer_attr
*
pwr_info
)
{
return
(
pwr_info
->
pwr_rx
==
FAST_MODE
||
pwr_info
->
pwr_rx
==
FASTAUTO_MODE
)
&&
(
pwr_info
->
pwr_tx
==
FAST_MODE
||
pwr_info
->
pwr_tx
==
FASTAUTO_MODE
);
}
#define ASCII_STD true
int
ufshcd_read_string_desc
(
struct
ufs_hba
*
hba
,
int
desc_index
,
u8
*
buf
,
u32
size
,
bool
ascii
);
/* Expose Query-Request API */
int
ufshcd_query_flag
(
struct
ufs_hba
*
hba
,
enum
query_opcode
opcode
,
enum
flag_idn
idn
,
bool
*
flag_res
);
int
ufshcd_hold
(
struct
ufs_hba
*
hba
,
bool
async
);
void
ufshcd_release
(
struct
ufs_hba
*
hba
);
u32
ufshcd_get_local_unipro_ver
(
struct
ufs_hba
*
hba
);
/* Wrapper functions for safely calling variant operations */
static
inline
const
char
*
ufshcd_get_var_name
(
struct
ufs_hba
*
hba
)
...
...
drivers/scsi/ufs/ufshci.h
View file @
b86c75fd
...
...
@@ -92,6 +92,7 @@ enum {
UFSHCI_VERSION_10
=
0x00010000
,
/* 1.0 */
UFSHCI_VERSION_11
=
0x00010100
,
/* 1.1 */
UFSHCI_VERSION_20
=
0x00000200
,
/* 2.0 */
UFSHCI_VERSION_21
=
0x00000210
,
/* 2.1 */
};
/*
...
...
@@ -170,6 +171,8 @@ enum {
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
...
...
@@ -209,6 +212,7 @@ enum {
/* GenSelectorIndex calculation macros for M-PHY attributes */
#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
((sel) & 0xFFFF))
...
...
drivers/scsi/ufs/unipro.h
View file @
b86c75fd
...
...
@@ -15,6 +15,7 @@
/*
* M-TX Configuration Attributes
*/
#define TX_HIBERN8TIME_CAPABILITY 0x000F
#define TX_MODE 0x0021
#define TX_HSRATE_SERIES 0x0022
#define TX_HSGEAR 0x0023
...
...
@@ -48,8 +49,12 @@
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
#define RX_TERMINATION_FORCE_ENABLE 0x0089
#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
#define RX_HIBERN8TIME_CAPABILITY 0x0092
#define is_mphy_tx_attr(attr) (attr < RX_MODE)
#define RX_MIN_ACTIVATETIME_UNIT_US 100
#define HIBERN8TIME_UNIT_US 100
/*
* PHY Adpater attributes
*/
...
...
@@ -70,6 +75,7 @@
#define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544
#define PA_LOCAL_TX_LCC_ENABLE 0x155E
#define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566
#define PA_REMOTEVERINFO 0x15A0
...
...
@@ -110,6 +116,12 @@
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
/* PA power modes */
enum
{
FAST_MODE
=
1
,
...
...
@@ -143,6 +155,16 @@ enum ufs_hs_gear_tag {
UFS_HS_G3
,
/* HS Gear 3 */
};
enum
ufs_unipro_ver
{
UFS_UNIPRO_VER_RESERVED
=
0
,
UFS_UNIPRO_VER_1_40
=
1
,
/* UniPro version 1.40 */
UFS_UNIPRO_VER_1_41
=
2
,
/* UniPro version 1.41 */
UFS_UNIPRO_VER_1_6
=
3
,
/* UniPro version 1.6 */
UFS_UNIPRO_VER_MAX
=
4
,
/* UniPro unsupported version */
/* UniPro version field mask in PA_LOCALVERINFO */
UFS_UNIPRO_VER_MASK
=
0xF
,
};
/*
* Data Link Layer Attributes
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment