Commit 52abca64 authored by Alan Stern's avatar Alan Stern Committed by Martin K. Petersen

scsi: block: Do not accept any requests while suspended

blk_queue_enter() accepts BLK_MQ_REQ_PM requests independent of the runtime
power management state. Now that SCSI domain validation no longer depends
on this behavior, modify the behavior of blk_queue_enter() as follows:

   - Do not accept any requests while suspended.

   - Only process power management requests while suspending or resuming.

Submitting BLK_MQ_REQ_PM requests to a device that is runtime suspended
causes runtime-suspended devices not to resume as they should. The request
which should cause a runtime resume instead gets issued directly, without
resuming the device first. Of course the device can't handle it properly,
the I/O fails, and the device remains suspended.

The problem is fixed by checking that the queue's runtime-PM status isn't
RPM_SUSPENDED before allowing a request to be issued, and queuing a
runtime-resume request if it is.  In particular, the inline
blk_pm_request_resume() routine is renamed blk_pm_resume_queue() and the
code is unified by merging the surrounding checks into the routine.  If the
queue isn't set up for runtime PM, or there currently is no restriction on
allowed requests, the request is allowed.  Likewise if the BLK_MQ_REQ_PM
flag is set and the status isn't RPM_SUSPENDED.  Otherwise a runtime resume
is queued and the request is blocked until conditions are more suitable.

[ bvanassche: modified commit message and removed Cc: stable because
  without the previous patches from this series this patch would break
  parallel SCSI domain validation + introduced queue_rpm_status() ]

Link: https://lore.kernel.org/r/20201209052951.16136-9-bvanassche@acm.org
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Can Guo <cang@codeaurora.org>
Cc: Stanley Chu <stanley.chu@mediatek.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reported-and-tested-by: default avatarMartin Kepplinger <martin.kepplinger@puri.sm>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent a4d34da7
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/blk-pm.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
...@@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) ...@@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
* responsible for ensuring that that counter is * responsible for ensuring that that counter is
* globally visible before the queue is unfrozen. * globally visible before the queue is unfrozen.
*/ */
if (pm || !blk_queue_pm_only(q)) { if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
!blk_queue_pm_only(q)) {
success = true; success = true;
} else { } else {
percpu_ref_put(&q->q_usage_counter); percpu_ref_put(&q->q_usage_counter);
...@@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) ...@@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq, wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth && (!q->mq_freeze_depth &&
(pm || (blk_pm_request_resume(q), blk_pm_resume_queue(pm, q)) ||
!blk_queue_pm_only(q)))) ||
blk_queue_dying(q)); blk_queue_dying(q));
if (blk_queue_dying(q)) if (blk_queue_dying(q))
return -ENODEV; return -ENODEV;
......
...@@ -6,11 +6,14 @@ ...@@ -6,11 +6,14 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#ifdef CONFIG_PM #ifdef CONFIG_PM
static inline void blk_pm_request_resume(struct request_queue *q) static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{ {
if (q->dev && (q->rpm_status == RPM_SUSPENDED || if (!q->dev || !blk_queue_pm_only(q))
q->rpm_status == RPM_SUSPENDING)) return 1; /* Nothing to do */
pm_request_resume(q->dev); if (pm && q->rpm_status != RPM_SUSPENDED)
return 1; /* Request allowed */
pm_request_resume(q->dev);
return 0;
} }
static inline void blk_pm_mark_last_busy(struct request *rq) static inline void blk_pm_mark_last_busy(struct request *rq)
...@@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq) ...@@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq)
--rq->q->nr_pending; --rq->q->nr_pending;
} }
#else #else
static inline void blk_pm_request_resume(struct request_queue *q) static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{ {
return 1;
} }
static inline void blk_pm_mark_last_busy(struct request *rq) static inline void blk_pm_mark_last_busy(struct request *rq)
......
...@@ -692,6 +692,18 @@ static inline bool queue_is_mq(struct request_queue *q) ...@@ -692,6 +692,18 @@ static inline bool queue_is_mq(struct request_queue *q)
return q->mq_ops; return q->mq_ops;
} }
#ifdef CONFIG_PM
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
return q->rpm_status;
}
#else
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
return RPM_ACTIVE;
}
#endif
static inline enum blk_zoned_model static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q) blk_queue_zoned_model(struct request_queue *q)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment