Commit 8cafdb5a authored by Pankaj Raghav's avatar Pankaj Raghav Committed by Jens Axboe

block: adapt blk_mq_plug() to not plug for writes that require a zone lock

The current implementation of blk_mq_plug() disables plugging for all
operations that involves a transfer to the device as we just check if
the last bit in op_is_write() function.

Modify blk_mq_plug() to disable plugging only for REQ_OP_WRITE and
REQ_OP_WRITE_ZEROS as they might require a zone lock.
Suggested-by: default avatarChristoph Hellwig <hch@lst.de>
Suggested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarPankaj Raghav <p.raghav@samsung.com>
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20220929074745.103073-2-p.raghav@samsung.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dfdcbf1f
...@@ -312,7 +312,8 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) ...@@ -312,7 +312,8 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
static inline struct blk_plug *blk_mq_plug( struct bio *bio) static inline struct blk_plug *blk_mq_plug( struct bio *bio)
{ {
/* Zoned block device write operation case: do not plug the BIO */ /* Zoned block device write operation case: do not plug the BIO */
if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio))) if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
return NULL; return NULL;
/* /*
......
...@@ -63,13 +63,10 @@ bool blk_req_needs_zone_write_lock(struct request *rq) ...@@ -63,13 +63,10 @@ bool blk_req_needs_zone_write_lock(struct request *rq)
if (!rq->q->disk->seq_zones_wlock) if (!rq->q->disk->seq_zones_wlock)
return false; return false;
switch (req_op(rq)) { if (bdev_op_is_zoned_write(rq->q->disk->part0, req_op(rq)))
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE:
return blk_rq_zone_is_seq(rq); return blk_rq_zone_is_seq(rq);
default:
return false; return false;
}
} }
EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
......
...@@ -1304,6 +1304,15 @@ static inline bool bdev_is_zoned(struct block_device *bdev) ...@@ -1304,6 +1304,15 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false; return false;
} }
static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
blk_opf_t op)
{
if (!bdev_is_zoned(bdev))
return false;
return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
}
static inline sector_t bdev_zone_sectors(struct block_device *bdev) static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment