Commit e30618a4 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "UFS patches for kernel 6.11"

Bart Van Assche <bvanassche@acm.org> says:

Hi Martin,

Please consider this series of UFS driver patches for the next merge window.

Thank you,

Bart.

Link: https://lore.kernel.org/r/20240708211716.2827751-1-bvanassche@acm.orgSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 5e9a522b af568c7e
......@@ -137,7 +137,6 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
*
* MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device.
* It is mandatory to implement get_hba_mac() to enable MCQ mode.
* Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device.
*/
......@@ -145,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{
int mac;
/* Mandatory to implement get_hba_mac() */
mac = ufshcd_mcq_vops_get_hba_mac(hba);
if (mac < 0) {
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
if (!hba->vops || !hba->vops->get_hba_mac) {
/*
* Extract the maximum number of active transfer tasks value
* from the host controller capabilities register. This value is
* 0-based.
*/
hba->capabilities =
ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
mac++;
} else {
mac = hba->vops->get_hba_mac(hba);
}
if (mac < 0)
goto err;
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
/*
......@@ -159,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
* shared queuing architecture is enabled.
*/
return min_t(int, mac, hba->dev_info.bqueuedepth);
err:
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
}
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
......@@ -415,9 +427,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
void ufshcd_mcq_enable(struct ufs_hba *hba)
{
ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
hba->mcq_enabled = true;
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
void ufshcd_mcq_disable(struct ufs_hba *hba)
{
ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
hba->mcq_enabled = false;
}
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
{
ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
......
......@@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba);
void ufshcd_mcq_disable(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
......@@ -255,14 +250,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->get_hba_mac)
return hba->vops->get_hba_mac(hba);
return -EOPNOTSUPP;
}
static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->op_runtime_config)
......
This diff is collapsed.
......@@ -693,7 +693,7 @@ static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
if (!is_mcq_enabled(hba))
if (!hba->mcq_enabled)
return;
if (host->mcq_nr_intr == 0)
......@@ -711,7 +711,7 @@ static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
if (!is_mcq_enabled(hba))
if (!hba->mcq_enabled)
return;
if (host->mcq_nr_intr == 0)
......@@ -1308,7 +1308,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err)
return err;
if (is_mcq_enabled(hba)) {
if (hba->mcq_enabled) {
ufs_mtk_config_mcq(hba, false);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
......
......@@ -73,8 +73,8 @@ enum ufs_event_type {
* @done: UIC command completion
*/
struct uic_command {
u32 command;
u32 argument1;
const u32 command;
const u32 argument1;
u32 argument2;
u32 argument3;
int cmd_active;
......@@ -325,7 +325,9 @@ struct ufs_pwr_mode_info {
* @event_notify: called to notify important events
* @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources
* @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
* @get_hba_mac: reports maximum number of outstanding commands supported by
* the controller. Should be implemented for UFSHCI 4.0 or later
* controllers that are not compliant with the UFSHCI 4.0 specification.
* @op_runtime_config: called to config Operation and runtime regs Pointers
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
......@@ -1133,11 +1135,6 @@ struct ufs_hw_queue {
#define MCQ_QCFG_SIZE 0x40
static inline bool is_mcq_enabled(struct ufs_hba *hba)
{
return hba->mcq_enabled;
}
static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
enum ufshcd_mcq_opr opr, int idx)
{
......
......@@ -67,7 +67,8 @@ enum {
/* Controller capability masks */
enum {
MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F,
MASK_TRANSFER_REQUESTS_SLOTS_MCQ = 0x000000FF,
MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00,
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
MASK_EHSLUTRD_SUPPORTED = 0x00400000,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment