Commit a557e82e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: add a bdev_fua helper

Add a helper to check the FUA flag based on the block_device instead of
having to poke into the block layer internal request_queue.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-14-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 08e688fd
...@@ -533,7 +533,6 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, ...@@ -533,7 +533,6 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev) struct rnbd_srv_sess_dev *sess_dev)
{ {
struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev; struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP); rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = rsp->device_id =
...@@ -560,7 +559,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, ...@@ -560,7 +559,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
rsp->cache_policy = 0; rsp->cache_policy = 0;
if (bdev_write_cache(rnbd_dev->bdev)) if (bdev_write_cache(rnbd_dev->bdev))
rsp->cache_policy |= RNBD_WRITEBACK; rsp->cache_policy |= RNBD_WRITEBACK;
if (blk_queue_fua(q)) if (bdev_fua(rnbd_dev->bdev))
rsp->cache_policy |= RNBD_FUA; rsp->cache_policy |= RNBD_FUA;
} }
......
...@@ -727,14 +727,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, ...@@ -727,14 +727,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (data_direction == DMA_TO_DEVICE) { if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/* /*
* Force writethrough using REQ_FUA if a volatile write cache * Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit. * is not enabled, or if initiator set the Force Unit Access bit.
*/ */
opf = REQ_OP_WRITE; opf = REQ_OP_WRITE;
miter_dir = SG_MITER_TO_SG; miter_dir = SG_MITER_TO_SG;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { if (bdev_fua(ib_dev->ibd_bd)) {
if (cmd->se_cmd_flags & SCF_FUA) if (cmd->se_cmd_flags & SCF_FUA)
opf |= REQ_FUA; opf |= REQ_FUA;
else if (!bdev_write_cache(ib_dev->ibd_bd)) else if (!bdev_write_cache(ib_dev->ibd_bd))
......
...@@ -265,8 +265,7 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, ...@@ -265,8 +265,7 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
* cache flushes on IO completion. * cache flushes on IO completion.
*/ */
if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
(dio->flags & IOMAP_DIO_WRITE_FUA) && (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev))
blk_queue_fua(bdev_get_queue(iomap->bdev)))
use_fua = true; use_fua = true;
} }
......
...@@ -602,7 +602,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); ...@@ -602,7 +602,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
REQ_FAILFAST_DRIVER)) REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags) #define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
...@@ -1336,6 +1335,11 @@ static inline bool bdev_write_cache(struct block_device *bdev) ...@@ -1336,6 +1335,11 @@ static inline bool bdev_write_cache(struct block_device *bdev)
return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags); return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
} }
static inline bool bdev_fua(struct block_device *bdev)
{
return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
}
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment