Commit bc5fef01 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "ufs: core: mcq: Add ufshcd_abort() and error handler support in MCQ mode"

Bao D. Nguyen <quic_nguyenb@quicinc.com> says:

This patch series enables support for ufshcd_abort() and error handler
in MCQ mode.

Link: https://lore.kernel.org/r/cover.1685396241.git.quic_nguyenb@quicinc.comSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 14ce2c26 ab248643
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include "ufshcd-priv.h" #include "ufshcd-priv.h"
#include <linux/delay.h>
#include <scsi/scsi_cmnd.h>
#include <linux/bitfield.h>
#include <linux/iopoll.h>
#define MAX_QUEUE_SUP GENMASK(7, 0) #define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2 #define UFS_MCQ_MIN_RW_QUEUES 2
...@@ -27,6 +31,9 @@ ...@@ -27,6 +31,9 @@
#define MCQ_ENTRY_SIZE_IN_DWORD 8 #define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7) #define CQE_UCD_BA GENMASK_ULL(63, 7)
/* Max mcq register polling time in microseconds */
#define MCQ_POLL_US 500000
static int rw_queue_count_set(const char *val, const struct kernel_param *kp) static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{ {
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES, return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
...@@ -269,16 +276,38 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba, ...@@ -269,16 +276,38 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
} }
static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq) struct ufs_hw_queue *hwq)
{ {
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
int tag = ufshcd_mcq_get_tag(hba, hwq, cqe); int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
ufshcd_compl_one_cqe(hba, tag, cqe); if (cqe->command_desc_base_addr) {
ufshcd_compl_one_cqe(hba, tag, cqe);
/* After processed the cqe, mark it empty (invalid) entry */
cqe->command_desc_base_addr = 0;
}
} }
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq) struct ufs_hw_queue *hwq)
{
unsigned long flags;
u32 entries = hwq->max_entries;
spin_lock_irqsave(&hwq->cq_lock, flags);
while (entries > 0) {
ufshcd_mcq_process_cqe(hba, hwq);
ufshcd_mcq_inc_cq_head_slot(hwq);
entries--;
}
ufshcd_mcq_update_cq_tail_slot(hwq);
hwq->cq_head_slot = hwq->cq_tail_slot;
spin_unlock_irqrestore(&hwq->cq_lock, flags);
}
static unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{ {
unsigned long completed_reqs = 0; unsigned long completed_reqs = 0;
...@@ -294,7 +323,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, ...@@ -294,7 +323,6 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
return completed_reqs; return completed_reqs;
} }
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq) struct ufs_hw_queue *hwq)
...@@ -307,6 +335,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, ...@@ -307,6 +335,7 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
return completed_reqs; return completed_reqs;
} }
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{ {
...@@ -419,6 +448,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba) ...@@ -419,6 +448,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
hwq->max_entries = hba->nutrs; hwq->max_entries = hba->nutrs;
spin_lock_init(&hwq->sq_lock); spin_lock_init(&hwq->sq_lock);
spin_lock_init(&hwq->cq_lock); spin_lock_init(&hwq->cq_lock);
mutex_init(&hwq->sq_mutex);
} }
/* The very first HW queue serves device commands */ /* The very first HW queue serves device commands */
...@@ -429,3 +459,222 @@ int ufshcd_mcq_init(struct ufs_hba *hba) ...@@ -429,3 +459,222 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
host->host_tagset = 1; host->host_tagset = 1;
return 0; return 0;
} }
static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
{
void __iomem *reg;
u32 id = hwq->id, val;
int err;
writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_STS, 20,
MCQ_POLL_US, false, reg);
if (err)
dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
__func__, id, err);
return err;
}
static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
{
void __iomem *reg;
u32 id = hwq->id, val;
int err;
writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
MCQ_POLL_US, false, reg);
if (err)
dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
__func__, id, err);
return err;
}
/**
* ufshcd_mcq_sq_cleanup - Clean up submission queue resources
* associated with the pending command.
* @hba - per adapter instance.
* @task_tag - The command's task tag.
*
* Returns 0 for success; error code otherwise.
*/
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct ufs_hw_queue *hwq;
void __iomem *reg, *opr_sqd_base;
u32 nexus, id, val;
int err;
if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
if (!cmd)
return -EINVAL;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
} else {
hwq = hba->dev_cmd_queue;
}
id = hwq->id;
mutex_lock(&hwq->sq_mutex);
/* stop the SQ fetching before working on it */
err = ufshcd_mcq_sq_stop(hba, hwq);
if (err)
goto unlock;
/* SQCTI = EXT_IID, IID, LUN, Task Tag */
nexus = lrbp->lun << 8 | task_tag;
opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
writel(nexus, opr_sqd_base + REG_SQCTI);
/* SQRTCy.ICU = 1 */
writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
/* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
reg = opr_sqd_base + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
MCQ_POLL_US, false, reg);
if (err)
dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
__func__, id, task_tag,
FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT;
unlock:
mutex_unlock(&hwq->sq_mutex);
return err;
}
/**
* ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
* Write the sqe's Command Type to 0xF. The host controller will not
* fetch any sqe with Command Type = 0xF.
*
* @utrd - UTP Transfer Request Descriptor to be nullified.
*/
static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
{
u32 dword_0;
dword_0 = le32_to_cpu(utrd->header.dword_0);
dword_0 &= ~UPIU_COMMAND_TYPE_MASK;
dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF);
utrd->header.dword_0 = cpu_to_le32(dword_0);
}
/**
* ufshcd_mcq_sqe_search - Search for the command in the submission queue
* If the command is in the submission queue and not issued to the device yet,
* nullify the sqe so the host controller will skip fetching the sqe.
*
* @hba - per adapter instance.
* @hwq - Hardware Queue to be searched.
* @task_tag - The command's task tag.
*
* Returns true if the SQE containing the command is present in the SQ
* (not fetched by the controller); returns false if the SQE is not in the SQ.
*/
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
struct ufs_hw_queue *hwq, int task_tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
struct utp_transfer_req_desc *utrd;
u32 mask = hwq->max_entries - 1;
__le64 cmd_desc_base_addr;
bool ret = false;
u64 addr, match;
u32 sq_head_slot;
mutex_lock(&hwq->sq_mutex);
ufshcd_mcq_sq_stop(hba, hwq);
sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
if (sq_head_slot == hwq->sq_tail_slot)
goto out;
cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
while (sq_head_slot != hwq->sq_tail_slot) {
utrd = hwq->sqe_base_addr +
sq_head_slot * sizeof(struct utp_transfer_req_desc);
match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
if (addr == match) {
ufshcd_mcq_nullify_sqe(utrd);
ret = true;
goto out;
}
sq_head_slot = (sq_head_slot + 1) & mask;
}
out:
ufshcd_mcq_sq_start(hba, hwq);
mutex_unlock(&hwq->sq_mutex);
return ret;
}
/**
* ufshcd_mcq_abort - Abort the command in MCQ.
* @cmd - The command to be aborted.
*
* Returns SUCCESS or FAILED error codes
*/
int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
int err = FAILED;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
dev_err(hba->dev,
"%s: skip abort. cmd at tag %d already completed.\n",
__func__, tag);
goto out;
}
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag);
goto out;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
* Failure. The command should not be "stuck" in SQ for
* a long time which resulted in command being aborted.
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag);
goto out;
}
/*
* The command is not in the submission queue, and it is not
* in the completion queue either. Query the device to see if
* the command is being processed in the device.
*/
if (ufshcd_try_to_abort_task(hba, tag)) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true;
goto out;
}
err = SUCCESS;
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
out:
return err;
}
...@@ -71,12 +71,18 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); ...@@ -71,12 +71,18 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba); void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req); struct request *req);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq); struct ufs_hw_queue *hwq);
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1 #define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true #define SD_ASCII_STD true
...@@ -401,4 +407,12 @@ static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q) ...@@ -401,4 +407,12 @@ static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
return cqe + q->cq_head_slot; return cqe + q->cq_head_slot;
} }
static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
{
u32 val = readl(q->mcq_sq_head);
return val / sizeof(struct utp_transfer_req_desc);
}
#endif /* _UFSHCD_PRIV_H_ */ #endif /* _UFSHCD_PRIV_H_ */
This diff is collapsed.
...@@ -1556,7 +1556,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba) ...@@ -1556,7 +1556,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
struct ufs_hw_queue *hwq = &hba->uhq[id]; struct ufs_hw_queue *hwq = &hba->uhq[id];
ufshcd_mcq_write_cqis(hba, 0x1, id); ufshcd_mcq_write_cqis(hba, 0x1, id);
ufshcd_mcq_poll_cqe_nolock(hba, hwq); ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -1087,6 +1087,7 @@ struct ufs_hba { ...@@ -1087,6 +1087,7 @@ struct ufs_hba {
* @cq_tail_slot: current slot to which CQ tail pointer is pointing * @cq_tail_slot: current slot to which CQ tail pointer is pointing
* @cq_head_slot: current slot to which CQ head pointer is pointing * @cq_head_slot: current slot to which CQ head pointer is pointing
* @cq_lock: Synchronize between multiple polling instances * @cq_lock: Synchronize between multiple polling instances
* @sq_mutex: prevent submission queue concurrent access
*/ */
struct ufs_hw_queue { struct ufs_hw_queue {
void __iomem *mcq_sq_head; void __iomem *mcq_sq_head;
...@@ -1105,6 +1106,8 @@ struct ufs_hw_queue { ...@@ -1105,6 +1106,8 @@ struct ufs_hw_queue {
u32 cq_tail_slot; u32 cq_tail_slot;
u32 cq_head_slot; u32 cq_head_slot;
spinlock_t cq_lock; spinlock_t cq_lock;
/* prevent concurrent access to submission queue */
struct mutex sq_mutex;
}; };
static inline bool is_mcq_enabled(struct ufs_hba *hba) static inline bool is_mcq_enabled(struct ufs_hba *hba)
...@@ -1240,7 +1243,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); ...@@ -1240,7 +1243,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba); void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba); void ufshcd_schedule_eh_work(struct ufs_hba *hba);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq); struct ufs_hw_queue *hwq);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
......
...@@ -99,6 +99,9 @@ enum { ...@@ -99,6 +99,9 @@ enum {
enum { enum {
REG_SQHP = 0x0, REG_SQHP = 0x0,
REG_SQTP = 0x4, REG_SQTP = 0x4,
REG_SQRTC = 0x8,
REG_SQCTI = 0xC,
REG_SQRTS = 0x10,
}; };
enum { enum {
...@@ -111,12 +114,26 @@ enum { ...@@ -111,12 +114,26 @@ enum {
REG_CQIE = 0x4, REG_CQIE = 0x4,
}; };
enum {
SQ_START = 0x0,
SQ_STOP = 0x1,
SQ_ICU = 0x2,
};
enum {
SQ_STS = 0x1,
SQ_CUS = 0x2,
};
#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4)
#define UPIU_COMMAND_TYPE_MASK GENMASK(31, 28)
#define UFS_MASK(mask, offset) ((mask) << (offset)) #define UFS_MASK(mask, offset) ((mask) << (offset))
/* UFS Version 08h */ /* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) #define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) #define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
#define UFSHCD_NUM_RESERVED 1
/* /*
* Controller UFSHCI version * Controller UFSHCI version
* - 2.x and newer use the following scheme: * - 2.x and newer use the following scheme:
...@@ -503,8 +520,7 @@ struct request_desc_header { ...@@ -503,8 +520,7 @@ struct request_desc_header {
/** /**
* struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD) * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
* @header: UTRD header DW-0 to DW-3 * @header: UTRD header DW-0 to DW-3
* @command_desc_base_addr_lo: UCD base address low DW-4 * @command_desc_base_addr: UCD base address DW 4-5
* @command_desc_base_addr_hi: UCD base address high DW-5
* @response_upiu_length: response UPIU length DW-6 * @response_upiu_length: response UPIU length DW-6
* @response_upiu_offset: response UPIU offset DW-6 * @response_upiu_offset: response UPIU offset DW-6
* @prd_table_length: Physical region descriptor length DW-7 * @prd_table_length: Physical region descriptor length DW-7
...@@ -516,8 +532,7 @@ struct utp_transfer_req_desc { ...@@ -516,8 +532,7 @@ struct utp_transfer_req_desc {
struct request_desc_header header; struct request_desc_header header;
/* DW 4-5*/ /* DW 4-5*/
__le32 command_desc_base_addr_lo; __le64 command_desc_base_addr;
__le32 command_desc_base_addr_hi;
/* DW 6 */ /* DW 6 */
__le16 response_upiu_length; __le16 response_upiu_length;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment