Commit 0a3140ea authored by Chaitanya Kulkarni's avatar Chaitanya Kulkarni Committed by Jens Axboe

block: pass a block_device and opf to blk_next_bio

All callers need to set the block_device and operation, so lift that into
the common code.
Signed-off-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220124091107.642561-15-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3b005bf6
...@@ -344,10 +344,14 @@ void bio_chain(struct bio *bio, struct bio *parent) ...@@ -344,10 +344,14 @@ void bio_chain(struct bio *bio, struct bio *parent)
} }
EXPORT_SYMBOL(bio_chain); EXPORT_SYMBOL(bio_chain);
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp)
{ {
struct bio *new = bio_alloc(gfp, nr_pages); struct bio *new = bio_alloc(gfp, nr_pages);
bio_set_dev(new, bdev);
new->bi_opf = opf;
if (bio) { if (bio) {
bio_chain(bio, new); bio_chain(bio, new);
submit_bio(bio); submit_bio(bio);
......
...@@ -82,11 +82,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -82,11 +82,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
WARN_ON_ONCE((req_sects << 9) > UINT_MAX); WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
bio = blk_next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9; bio->bi_iter.bi_size = req_sects << 9;
sector += req_sects; sector += req_sects;
nr_sects -= req_sects; nr_sects -= req_sects;
...@@ -176,14 +173,12 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, ...@@ -176,14 +173,12 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
max_write_same_sectors = bio_allowed_max_sectors(q); max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) { while (nr_sects) {
bio = blk_next_bio(bio, 1, gfp_mask); bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_vcnt = 1; bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
if (nr_sects > max_write_same_sectors) { if (nr_sects > max_write_same_sectors) {
bio->bi_iter.bi_size = max_write_same_sectors << 9; bio->bi_iter.bi_size = max_write_same_sectors << 9;
...@@ -252,10 +247,8 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, ...@@ -252,10 +247,8 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
while (nr_sects) { while (nr_sects) {
bio = blk_next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE_ZEROES;
if (flags & BLKDEV_ZERO_NOUNMAP) if (flags & BLKDEV_ZERO_NOUNMAP)
bio->bi_opf |= REQ_NOUNMAP; bio->bi_opf |= REQ_NOUNMAP;
...@@ -303,11 +296,9 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev, ...@@ -303,11 +296,9 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
return -EPERM; return -EPERM;
while (nr_sects != 0) { while (nr_sects != 0) {
bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask); REQ_OP_WRITE, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
while (nr_sects != 0) { while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE, nr_sects << 9); sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
......
...@@ -215,9 +215,8 @@ static int blkdev_zone_reset_all_emulated(struct block_device *bdev, ...@@ -215,9 +215,8 @@ static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
continue; continue;
} }
bio = blk_next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
bio_set_dev(bio, bdev); gfp_mask);
bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
sector += zone_sectors; sector += zone_sectors;
...@@ -306,9 +305,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, ...@@ -306,9 +305,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
} }
while (sector < end_sector) { while (sector < end_sector) {
bio = blk_next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
bio_set_dev(bio, bdev);
bio->bi_opf = op | REQ_SYNC;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
sector += zone_sectors; sector += zone_sectors;
......
...@@ -406,8 +406,6 @@ extern int blk_iolatency_init(struct request_queue *q); ...@@ -406,8 +406,6 @@ extern int blk_iolatency_init(struct request_queue *q);
static inline int blk_iolatency_init(struct request_queue *q) { return 0; } static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif #endif
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
void blk_queue_free_zone_bitmaps(struct request_queue *q); void blk_queue_free_zone_bitmaps(struct request_queue *q);
void blk_queue_clear_zone_settings(struct request_queue *q); void blk_queue_clear_zone_settings(struct request_queue *q);
......
...@@ -412,10 +412,10 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) ...@@ -412,10 +412,10 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
while (sector < get_capacity(bdev->bd_disk)) { while (sector < get_capacity(bdev->bd_disk)) {
if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
bio = blk_next_bio(bio, 0, GFP_KERNEL); bio = blk_next_bio(bio, bdev, 0,
bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
GFP_KERNEL);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
/* This may take a while, so be nice to others */ /* This may take a while, so be nice to others */
cond_resched(); cond_resched();
} }
......
...@@ -790,6 +790,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) ...@@ -790,6 +790,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
bio->bi_opf |= REQ_NOWAIT; bio->bi_opf |= REQ_NOWAIT;
} }
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp);
#endif /* __LINUX_BIO_H */ #endif /* __LINUX_BIO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment