Commit ccb23dc3 authored by Po-Wen Kao's avatar Po-Wen Kao Committed by Martin K. Petersen

scsi: ufs: core: Remove dedicated hwq for dev command

This commit depends on "scsi: ufs: core: mcq: Fix the incorrect OCS value
for the device command" which takes care of the OCS value of dev commands
in MCQ mode.

It is safe to share first hwq for dev command and I/O request here.
Tested-by: default avatarPo-Wen Kao <powen.kao@mediatek.com>
Signed-off-by: default avatarPo-Wen Kao <powen.kao@mediatek.com>
Link: https://lore.kernel.org/r/20230610021553.1213-3-powen.kao@mediatek.comReviewed-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0fef6bb7
...@@ -20,12 +20,10 @@ ...@@ -20,12 +20,10 @@
#define MAX_QUEUE_SUP GENMASK(7, 0) #define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2 #define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0 #define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0 #define UFS_MCQ_MIN_POLL_QUEUES 0
#define QUEUE_EN_OFFSET 31 #define QUEUE_EN_OFFSET 31
#define QUEUE_ID_OFFSET 16 #define QUEUE_ID_OFFSET 16
#define MAX_DEV_CMD_ENTRIES 2
#define MCQ_CFG_MAC_MASK GENMASK(16, 8) #define MCQ_CFG_MAC_MASK GENMASK(16, 8)
#define MCQ_QCFG_SIZE 0x40 #define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8 #define MCQ_ENTRY_SIZE_IN_DWORD 8
...@@ -115,8 +113,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, ...@@ -115,8 +113,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
u32 utag = blk_mq_unique_tag(req); u32 utag = blk_mq_unique_tag(req);
u32 hwq = blk_mq_unique_tag_to_hwq(utag); u32 hwq = blk_mq_unique_tag_to_hwq(utag);
/* uhq[0] is used to serve device commands */ return &hba->uhq[hwq];
return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
} }
/** /**
...@@ -159,8 +156,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) ...@@ -159,8 +156,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities); hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues + tot_queues = read_queues + poll_queues + rw_queues;
rw_queues;
if (hba_maxq < tot_queues) { if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n", dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
...@@ -168,7 +164,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) ...@@ -168,7 +164,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES; rem = hba_maxq;
if (rw_queues) { if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues; hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
...@@ -194,7 +190,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) ...@@ -194,7 +190,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
for (i = 0; i < HCTX_MAX_TYPES; i++) for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i]; host->nr_hw_queues += hba->nr_queues[i];
hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES; hba->nr_hw_queues = host->nr_hw_queues;
return 0; return 0;
} }
...@@ -444,8 +440,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba) ...@@ -444,8 +440,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
/* The very first HW queue serves device commands */ /* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0]; hba->dev_cmd_queue = &hba->uhq[0];
/* Give dev_cmd_queue the minimal number of entries */
hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
host->host_tagset = 1; host->host_tagset = 1;
return 0; return 0;
......
...@@ -84,7 +84,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); ...@@ -84,7 +84,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba, void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp); struct ufshcd_lrb *lrbp);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true #define SD_ASCII_STD true
#define SD_RAW false #define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
......
...@@ -5503,7 +5503,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) ...@@ -5503,7 +5503,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hw_queue *hwq; struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) { if (is_mcq_enabled(hba)) {
hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET]; hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq); return ufshcd_mcq_poll_cqe_lock(hba, hwq);
} }
...@@ -5557,7 +5557,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, ...@@ -5557,7 +5557,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq_num = blk_mq_unique_tag_to_hwq(utag); hwq_num = blk_mq_unique_tag_to_hwq(utag);
hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET]; hwq = &hba->uhq[hwq_num];
if (force_compl) { if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq); ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment