Commit 6deacb3b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: simplify blk_mq_plug

Drop the unused q argument, and invert the check to move the exception
into a branch and the regular path as the normal return.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20220706070350.1703384-5-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent edd1dbc8
...@@ -719,7 +719,7 @@ void submit_bio_noacct(struct bio *bio) ...@@ -719,7 +719,7 @@ void submit_bio_noacct(struct bio *bio)
might_sleep(); might_sleep();
plug = blk_mq_plug(q, bio); plug = blk_mq_plug(bio);
if (plug && plug->nowait) if (plug && plug->nowait)
bio->bi_opf |= REQ_NOWAIT; bio->bi_opf |= REQ_NOWAIT;
......
...@@ -1051,7 +1051,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, ...@@ -1051,7 +1051,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
struct blk_plug *plug; struct blk_plug *plug;
struct request *rq; struct request *rq;
plug = blk_mq_plug(q, bio); plug = blk_mq_plug(bio);
if (!plug || rq_list_empty(plug->mq_list)) if (!plug || rq_list_empty(plug->mq_list))
return false; return false;
......
...@@ -2808,7 +2808,7 @@ static void bio_set_ioprio(struct bio *bio) ...@@ -2808,7 +2808,7 @@ static void bio_set_ioprio(struct bio *bio)
void blk_mq_submit_bio(struct bio *bio) void blk_mq_submit_bio(struct bio *bio)
{ {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
struct blk_plug *plug = blk_mq_plug(q, bio); struct blk_plug *plug = blk_mq_plug(bio);
const int is_sync = op_is_sync(bio->bi_opf); const int is_sync = op_is_sync(bio->bi_opf);
struct request *rq; struct request *rq;
unsigned int nr_segs = 1; unsigned int nr_segs = 1;
......
...@@ -294,7 +294,6 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) ...@@ -294,7 +294,6 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
/* /*
* blk_mq_plug() - Get caller context plug * blk_mq_plug() - Get caller context plug
* @q: request queue
* @bio : the bio being submitted by the caller context * @bio : the bio being submitted by the caller context
* *
* Plugging, by design, may delay the insertion of BIOs into the elevator in * Plugging, by design, may delay the insertion of BIOs into the elevator in
...@@ -305,23 +304,22 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) ...@@ -305,23 +304,22 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
* order. While this is not a problem with regular block devices, this ordering * order. While this is not a problem with regular block devices, this ordering
* change can cause write BIO failures with zoned block devices as these * change can cause write BIO failures with zoned block devices as these
* require sequential write patterns to zones. Prevent this from happening by * require sequential write patterns to zones. Prevent this from happening by
* ignoring the plug state of a BIO issuing context if the target request queue * ignoring the plug state of a BIO issuing context if it is for a zoned block
* is for a zoned block device and the BIO to plug is a write operation. * device and the BIO to plug is a write operation.
* *
* Return current->plug if the bio can be plugged and NULL otherwise * Return current->plug if the bio can be plugged and NULL otherwise
*/ */
static inline struct blk_plug *blk_mq_plug(struct request_queue *q, static inline struct blk_plug *blk_mq_plug( struct bio *bio)
struct bio *bio)
{ {
/* Zoned block device write operation case: do not plug the BIO */
if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio)))
return NULL;
/* /*
* For regular block devices or read operations, use the context plug * For regular block devices or read operations, use the context plug
* which may be NULL if blk_start_plug() was not executed. * which may be NULL if blk_start_plug() was not executed.
*/ */
if (!bdev_is_zoned(bio->bi_bdev) || !op_is_write(bio_op(bio))) return current->plug;
return current->plug;
/* Zoned block device write operation case: do not plug the BIO */
return NULL;
} }
/* Free all requests on the list */ /* Free all requests on the list */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment