Commit 7252a360 authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: ufs: Avoid busy-waiting by eliminating tag conflicts

Instead of tracking which tags are in use in the ufs_hba.lrb_in_use
bitmask, rely on the block layer tag allocation mechanism. This patch
removes the following busy-waiting loop if ufshcd_issue_devman_upiu_cmd()
and the block layer accidentally allocate the same tag for a SCSI request:

 * ufshcd_queuecommand() returns SCSI_MLQUEUE_HOST_BUSY.

 * The SCSI core requeues the SCSI command.

Cc: Can Guo <cang@codeaurora.org>
Cc: Stanley Chu <stanley.chu@mediatek.com>
Cc: Avri Altman <avri.altman@wdc.com>
Cc: Tomas Winkler <tomas.winkler@intel.com>
Link: https://lore.kernel.org/r/20191209181309.196233-2-bvanassche@acm.orgTested-by: default avatarBean Huo <beanhuo@micron.com>
Reviewed-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent bd407261
...@@ -497,8 +497,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) ...@@ -497,8 +497,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba) static void ufshcd_print_host_state(struct ufs_hba *hba)
{ {
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); hba->outstanding_reqs, hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err); hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
...@@ -1273,6 +1273,24 @@ static int ufshcd_devfreq_target(struct device *dev, ...@@ -1273,6 +1273,24 @@ static int ufshcd_devfreq_target(struct device *dev,
return ret; return ret;
} }
static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
{
int *busy = priv;
WARN_ON_ONCE(reserved);
(*busy)++;
return false;
}
/* Whether or not any tag is in use by a request that is in progress. */
static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
{
struct request_queue *q = hba->cmd_queue;
int busy = 0;
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
return busy;
}
static int ufshcd_devfreq_get_dev_status(struct device *dev, static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat) struct devfreq_dev_status *stat)
...@@ -1619,7 +1637,7 @@ static void ufshcd_gate_work(struct work_struct *work) ...@@ -1619,7 +1637,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done) || hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock; goto rel_lock;
...@@ -1673,7 +1691,7 @@ static void __ufshcd_release(struct ufs_hba *hba) ...@@ -1673,7 +1691,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done || hba->active_uic_cmd || hba->uic_async_done
|| ufshcd_eh_in_progress(hba)) || ufshcd_eh_in_progress(hba))
return; return;
...@@ -2443,22 +2461,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ...@@ -2443,22 +2461,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0; hba->req_abort_count = 0;
/* acquire the tag to make sure device cmds don't use it */
if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
/*
* Dev manage command in progress, requeue the command.
* Requeuing the command helps in cases where the request *may*
* find different tag instead of waiting for dev manage command
* completion.
*/
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
err = ufshcd_hold(hba, true); err = ufshcd_hold(hba, true);
if (err) { if (err) {
err = SCSI_MLQUEUE_HOST_BUSY; err = SCSI_MLQUEUE_HOST_BUSY;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out; goto out;
} }
WARN_ON(hba->clk_gating.state != CLKS_ON); WARN_ON(hba->clk_gating.state != CLKS_ON);
...@@ -2479,7 +2484,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ...@@ -2479,7 +2484,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_map_sg(hba, lrbp); err = ufshcd_map_sg(hba, lrbp);
if (err) { if (err) {
lrbp->cmd = NULL; lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out; goto out;
} }
/* Make sure descriptors are ready before ringing the doorbell */ /* Make sure descriptors are ready before ringing the doorbell */
...@@ -2626,44 +2630,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, ...@@ -2626,44 +2630,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
return err; return err;
} }
/**
* ufshcd_get_dev_cmd_tag - Get device management command tag
* @hba: per-adapter instance
* @tag_out: pointer to variable with available slot value
*
* Get a free slot and lock it until device management command
* completes.
*
* Returns false if free slot is unavailable for locking, else
* return true with tag value in @tag.
*/
static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
{
int tag;
bool ret = false;
unsigned long tmp;
if (!tag_out)
goto out;
do {
tmp = ~hba->lrb_in_use;
tag = find_last_bit(&tmp, hba->nutrs);
if (tag >= hba->nutrs)
goto out;
} while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
*tag_out = tag;
ret = true;
out:
return ret;
}
static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
{
clear_bit_unlock(tag, &hba->lrb_in_use);
}
/** /**
* ufshcd_exec_dev_cmd - API for sending device management requests * ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba * @hba: UFS hba
...@@ -2676,6 +2642,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) ...@@ -2676,6 +2642,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout) enum dev_cmd_type cmd_type, int timeout)
{ {
struct request_queue *q = hba->cmd_queue;
struct request *req;
struct ufshcd_lrb *lrbp; struct ufshcd_lrb *lrbp;
int err; int err;
int tag; int tag;
...@@ -2689,7 +2657,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, ...@@ -2689,7 +2657,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely, * Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout. * the maximum wait time is bounded by SCSI request timeout.
*/ */
wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait); init_completion(&wait);
lrbp = &hba->lrb[tag]; lrbp = &hba->lrb[tag];
...@@ -2714,8 +2686,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, ...@@ -2714,8 +2686,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err ? "query_complete_err" : "query_complete"); err ? "query_complete_err" : "query_complete");
out_put_tag: out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag); blk_put_request(req);
wake_up(&hba->dev_cmd.tag_wq);
up_read(&hba->clk_scaling_lock); up_read(&hba->clk_scaling_lock);
return err; return err;
} }
...@@ -4856,7 +4827,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, ...@@ -4856,7 +4827,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
cmd->result = result; cmd->result = result;
/* Mark completed command as NULL in LRB */ /* Mark completed command as NULL in LRB */
lrbp->cmd = NULL; lrbp->cmd = NULL;
clear_bit_unlock(index, &hba->lrb_in_use);
/* Do not touch lrbp after scsi done */ /* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
__ufshcd_release(hba); __ufshcd_release(hba);
...@@ -4878,9 +4848,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, ...@@ -4878,9 +4848,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
hba->outstanding_reqs ^= completed_reqs; hba->outstanding_reqs ^= completed_reqs;
ufshcd_clk_scaling_update_busy(hba); ufshcd_clk_scaling_update_busy(hba);
/* we might have free'd some tags above */
wake_up(&hba->dev_cmd.tag_wq);
} }
/** /**
...@@ -5863,6 +5830,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, ...@@ -5863,6 +5830,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, enum dev_cmd_type cmd_type,
enum query_opcode desc_op) enum query_opcode desc_op)
{ {
struct request_queue *q = hba->cmd_queue;
struct request *req;
struct ufshcd_lrb *lrbp; struct ufshcd_lrb *lrbp;
int err = 0; int err = 0;
int tag; int tag;
...@@ -5872,7 +5841,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, ...@@ -5872,7 +5841,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock); down_read(&hba->clk_scaling_lock);
wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait); init_completion(&wait);
lrbp = &hba->lrb[tag]; lrbp = &hba->lrb[tag];
...@@ -5948,8 +5921,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, ...@@ -5948,8 +5921,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
} }
} }
ufshcd_put_dev_cmd_tag(hba, tag); blk_put_request(req);
wake_up(&hba->dev_cmd.tag_wq);
up_read(&hba->clk_scaling_lock); up_read(&hba->clk_scaling_lock);
return err; return err;
} }
...@@ -6244,9 +6216,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ...@@ -6244,9 +6216,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
hba->lrb[tag].cmd = NULL; hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags); spin_unlock_irqrestore(host->host_lock, flags);
clear_bit_unlock(tag, &hba->lrb_in_use);
wake_up(&hba->dev_cmd.tag_wq);
out: out:
if (!err) { if (!err) {
err = SUCCESS; err = SUCCESS;
...@@ -8250,6 +8219,7 @@ void ufshcd_remove(struct ufs_hba *hba) ...@@ -8250,6 +8219,7 @@ void ufshcd_remove(struct ufs_hba *hba)
{ {
ufs_bsg_remove(hba); ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev); ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host); scsi_remove_host(hba->host);
/* disable interrupts */ /* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_disable_intr(hba, hba->intr_mask);
...@@ -8413,9 +8383,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8413,9 +8383,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_rwsem(&hba->clk_scaling_lock); init_rwsem(&hba->clk_scaling_lock);
/* Initialize device management tag acquire wait queue */
init_waitqueue_head(&hba->dev_cmd.tag_wq);
ufshcd_init_clk_gating(hba); ufshcd_init_clk_gating(hba);
ufshcd_init_clk_scaling(hba); ufshcd_init_clk_scaling(hba);
...@@ -8449,6 +8416,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8449,6 +8416,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating; goto exit_gating;
} }
hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
if (IS_ERR(hba->cmd_queue)) {
err = PTR_ERR(hba->cmd_queue);
goto out_remove_scsi_host;
}
/* Reset the attached device */ /* Reset the attached device */
ufshcd_vops_device_reset(hba); ufshcd_vops_device_reset(hba);
...@@ -8458,7 +8431,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8458,7 +8431,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
dev_err(hba->dev, "Host controller enable failed\n"); dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_host_regs(hba); ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba); ufshcd_print_host_state(hba);
goto out_remove_scsi_host; goto free_cmd_queue;
} }
/* /*
...@@ -8495,6 +8468,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -8495,6 +8468,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0; return 0;
free_cmd_queue:
blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host: out_remove_scsi_host:
scsi_remove_host(hba->host); scsi_remove_host(hba->host);
exit_gating: exit_gating:
......
...@@ -212,13 +212,11 @@ struct ufs_query { ...@@ -212,13 +212,11 @@ struct ufs_query {
* @type: device management command type - Query, NOP OUT * @type: device management command type - Query, NOP OUT
* @lock: lock to allow one command at a time * @lock: lock to allow one command at a time
* @complete: internal commands completion * @complete: internal commands completion
* @tag_wq: wait queue until free command slot is available
*/ */
struct ufs_dev_cmd { struct ufs_dev_cmd {
enum dev_cmd_type type; enum dev_cmd_type type;
struct mutex lock; struct mutex lock;
struct completion *complete; struct completion *complete;
wait_queue_head_t tag_wq;
struct ufs_query query; struct ufs_query query;
}; };
...@@ -483,7 +481,7 @@ struct ufs_stats { ...@@ -483,7 +481,7 @@ struct ufs_stats {
* @host: Scsi_Host instance of the driver * @host: Scsi_Host instance of the driver
* @dev: device handle * @dev: device handle
* @lrb: local reference block * @lrb: local reference block
* @lrb_in_use: lrb in use * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests * @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_reqs: Bits representing outstanding transfer requests * @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities * @capabilities: UFS Controller Capabilities
...@@ -541,6 +539,7 @@ struct ufs_hba { ...@@ -541,6 +539,7 @@ struct ufs_hba {
struct Scsi_Host *host; struct Scsi_Host *host;
struct device *dev; struct device *dev;
struct request_queue *cmd_queue;
/* /*
* This field is to keep a reference to "scsi_device" corresponding to * This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU. * "UFS device" W-LU.
...@@ -561,7 +560,6 @@ struct ufs_hba { ...@@ -561,7 +560,6 @@ struct ufs_hba {
u32 ahit; u32 ahit;
struct ufshcd_lrb *lrb; struct ufshcd_lrb *lrb;
unsigned long lrb_in_use;
unsigned long outstanding_tasks; unsigned long outstanding_tasks;
unsigned long outstanding_reqs; unsigned long outstanding_reqs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment