Commit a4d34da7 authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: block: Remove RQF_PREEMPT and BLK_MQ_REQ_PREEMPT

Remove flag RQF_PREEMPT and BLK_MQ_REQ_PREEMPT since these are no longer
used by any kernel code.

Link: https://lore.kernel.org/r/20201209052951.16136-8-bvanassche@acm.org
Cc: Can Guo <cang@codeaurora.org>
Cc: Stanley Chu <stanley.chu@mediatek.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Martin Kepplinger <martin.kepplinger@puri.sm>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Reviewed-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent e6044f71
...@@ -424,11 +424,11 @@ EXPORT_SYMBOL(blk_cleanup_queue); ...@@ -424,11 +424,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
/** /**
* blk_queue_enter() - try to increase q->q_usage_counter * blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer * @q: request queue pointer
* @flags: BLK_MQ_REQ_NOWAIT, BLK_MQ_REQ_PM and/or BLK_MQ_REQ_PREEMPT * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
*/ */
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{ {
const bool pm = flags & (BLK_MQ_REQ_PM | BLK_MQ_REQ_PREEMPT); const bool pm = flags & BLK_MQ_REQ_PM;
while (true) { while (true) {
bool success = false; bool success = false;
...@@ -630,8 +630,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op, ...@@ -630,8 +630,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
struct request *req; struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT); WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM | WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
BLK_MQ_REQ_PREEMPT));
req = blk_mq_alloc_request(q, op, flags); req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
......
...@@ -297,7 +297,6 @@ static const char *const rqf_name[] = { ...@@ -297,7 +297,6 @@ static const char *const rqf_name[] = {
RQF_NAME(MIXED_MERGE), RQF_NAME(MIXED_MERGE),
RQF_NAME(MQ_INFLIGHT), RQF_NAME(MQ_INFLIGHT),
RQF_NAME(DONTPREP), RQF_NAME(DONTPREP),
RQF_NAME(PREEMPT),
RQF_NAME(FAILED), RQF_NAME(FAILED),
RQF_NAME(QUIET), RQF_NAME(QUIET),
RQF_NAME(ELVPRIV), RQF_NAME(ELVPRIV),
......
...@@ -294,8 +294,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, ...@@ -294,8 +294,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->cmd_flags = data->cmd_flags; rq->cmd_flags = data->cmd_flags;
if (data->flags & BLK_MQ_REQ_PM) if (data->flags & BLK_MQ_REQ_PM)
rq->rq_flags |= RQF_PM; rq->rq_flags |= RQF_PM;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
if (blk_queue_io_stat(data->q)) if (blk_queue_io_stat(data->q))
rq->rq_flags |= RQF_IO_STAT; rq->rq_flags |= RQF_IO_STAT;
INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->queuelist);
......
...@@ -446,8 +446,6 @@ enum { ...@@ -446,8 +446,6 @@ enum {
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
/* set RQF_PM */ /* set RQF_PM */
BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2), BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
/* set RQF_PREEMPT */
BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
}; };
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
......
...@@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t; ...@@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
/* don't call prep for this one */ /* don't call prep for this one */
#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
/* set for "ide_preempt" requests and also for requests for which the SCSI
"quiesce" state must be ignored. */
#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
/* vaguely specified driver internal error. Ignored by the block layer */ /* vaguely specified driver internal error. Ignored by the block layer */
#define RQF_FAILED ((__force req_flags_t)(1 << 10)) #define RQF_FAILED ((__force req_flags_t)(1 << 10))
/* don't warn about errors */ /* don't warn about errors */
...@@ -430,8 +427,7 @@ struct request_queue { ...@@ -430,8 +427,7 @@ struct request_queue {
unsigned long queue_flags; unsigned long queue_flags;
/* /*
* Number of contexts that have called blk_set_pm_only(). If this * Number of contexts that have called blk_set_pm_only(). If this
* counter is above zero then only RQF_PM and RQF_PREEMPT requests are * counter is above zero then only RQF_PM requests are processed.
* processed.
*/ */
atomic_t pm_only; atomic_t pm_only;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment