Commit 4317ff00 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: introduce btrfs_bio::fs_info member

Currently we're doing a lot of work for btrfs_bio:

- Checksum verification for data read bios
- Bio splits if it crosses stripe boundary
- Read repair for data read bios

However for the incoming scrub patches, we don't want this extra
functionality at all, just plain logical + mirror -> physical mapping
ability.

Thus here we do the following changes:

- Introduce btrfs_bio::fs_info
  This is for the new scrub specific btrfs_bio, which would not populate
  btrfs_bio::inode.
  Thus we need such new member to grab a fs_info

  This new member will always be populated.

- Replace @inode argument with @fs_info for btrfs_bio_init() and its
  caller
  Since @inode is no longer a mandatory member, replace it with
  @fs_info, and let involved users populate @inode.

- Skip checksum verification and generation if @bbio->inode is NULL

- Add extra ASSERT()s
  To make sure:

  * bbio->inode is properly set for involved read repair path
  * if @file_offset is set, bbio->inode is also populated

- Grab @fs_info from @bbio directly
  We can no longer go @bbio->inode->root->fs_info, as bbio->inode can be
  NULL. This involves:

  * btrfs_simple_end_io()
  * should_async_write()
  * btrfs_wq_submit_bio()
  * btrfs_use_zone_append()
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2a2dc22f
...@@ -31,11 +31,11 @@ struct btrfs_failed_bio { ...@@ -31,11 +31,11 @@ struct btrfs_failed_bio {
* Initialize a btrfs_bio structure. This skips the embedded bio itself as it * Initialize a btrfs_bio structure. This skips the embedded bio itself as it
* is already initialized by the block layer. * is already initialized by the block layer.
*/ */
void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
btrfs_bio_end_io_t end_io, void *private) btrfs_bio_end_io_t end_io, void *private)
{ {
memset(bbio, 0, offsetof(struct btrfs_bio, bio)); memset(bbio, 0, offsetof(struct btrfs_bio, bio));
bbio->inode = inode; bbio->fs_info = fs_info;
bbio->end_io = end_io; bbio->end_io = end_io;
bbio->private = private; bbio->private = private;
atomic_set(&bbio->pending_ios, 1); atomic_set(&bbio->pending_ios, 1);
...@@ -49,7 +49,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, ...@@ -49,7 +49,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode,
* a mempool. * a mempool.
*/ */
struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
struct btrfs_inode *inode, struct btrfs_fs_info *fs_info,
btrfs_bio_end_io_t end_io, void *private) btrfs_bio_end_io_t end_io, void *private)
{ {
struct btrfs_bio *bbio; struct btrfs_bio *bbio;
...@@ -57,7 +57,7 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, ...@@ -57,7 +57,7 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
bbio = btrfs_bio(bio); bbio = btrfs_bio(bio);
btrfs_bio_init(bbio, inode, end_io, private); btrfs_bio_init(bbio, fs_info, end_io, private);
return bbio; return bbio;
} }
...@@ -92,8 +92,8 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, ...@@ -92,8 +92,8 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
GFP_NOFS, &btrfs_clone_bioset); GFP_NOFS, &btrfs_clone_bioset);
} }
bbio = btrfs_bio(bio); bbio = btrfs_bio(bio);
btrfs_bio_init(bbio, orig_bbio->inode, NULL, orig_bbio); btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
bbio->inode = orig_bbio->inode;
bbio->file_offset = orig_bbio->file_offset; bbio->file_offset = orig_bbio->file_offset;
if (!(orig_bbio->bio.bi_opf & REQ_BTRFS_ONE_ORDERED)) if (!(orig_bbio->bio.bi_opf & REQ_BTRFS_ONE_ORDERED))
orig_bbio->file_offset += map_length; orig_bbio->file_offset += map_length;
...@@ -244,7 +244,8 @@ static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio, ...@@ -244,7 +244,8 @@ static struct btrfs_failed_bio *repair_one_sector(struct btrfs_bio *failed_bbio,
__bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset); __bio_add_page(repair_bio, bv->bv_page, bv->bv_len, bv->bv_offset);
repair_bbio = btrfs_bio(repair_bio); repair_bbio = btrfs_bio(repair_bio);
btrfs_bio_init(repair_bbio, failed_bbio->inode, NULL, fbio); btrfs_bio_init(repair_bbio, fs_info, NULL, fbio);
repair_bbio->inode = failed_bbio->inode;
repair_bbio->file_offset = failed_bbio->file_offset + bio_offset; repair_bbio->file_offset = failed_bbio->file_offset + bio_offset;
mirror = next_repair_mirror(fbio, failed_bbio->mirror_num); mirror = next_repair_mirror(fbio, failed_bbio->mirror_num);
...@@ -263,6 +264,9 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de ...@@ -263,6 +264,9 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, struct btrfs_device *de
struct btrfs_failed_bio *fbio = NULL; struct btrfs_failed_bio *fbio = NULL;
u32 offset = 0; u32 offset = 0;
/* Read-repair requires the inode field to be set by the submitter. */
ASSERT(inode);
/* /*
* Hand off repair bios to the repair code as there is no upper level * Hand off repair bios to the repair code as there is no upper level
* submitter for them. * submitter for them.
...@@ -323,17 +327,17 @@ static void btrfs_end_bio_work(struct work_struct *work) ...@@ -323,17 +327,17 @@ static void btrfs_end_bio_work(struct work_struct *work)
struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work); struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
/* Metadata reads are checked and repaired by the submitter. */ /* Metadata reads are checked and repaired by the submitter. */
if (bbio->bio.bi_opf & REQ_META) if (bbio->inode && !(bbio->bio.bi_opf & REQ_META))
bbio->end_io(bbio);
else
btrfs_check_read_bio(bbio, bbio->bio.bi_private); btrfs_check_read_bio(bbio, bbio->bio.bi_private);
else
bbio->end_io(bbio);
} }
static void btrfs_simple_end_io(struct bio *bio) static void btrfs_simple_end_io(struct bio *bio)
{ {
struct btrfs_bio *bbio = btrfs_bio(bio); struct btrfs_bio *bbio = btrfs_bio(bio);
struct btrfs_device *dev = bio->bi_private; struct btrfs_device *dev = bio->bi_private;
struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; struct btrfs_fs_info *fs_info = bbio->fs_info;
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
...@@ -357,7 +361,8 @@ static void btrfs_raid56_end_io(struct bio *bio) ...@@ -357,7 +361,8 @@ static void btrfs_raid56_end_io(struct bio *bio)
btrfs_bio_counter_dec(bioc->fs_info); btrfs_bio_counter_dec(bioc->fs_info);
bbio->mirror_num = bioc->mirror_num; bbio->mirror_num = bioc->mirror_num;
if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META)) if (bio_op(bio) == REQ_OP_READ && bbio->inode &&
!(bbio->bio.bi_opf & REQ_META))
btrfs_check_read_bio(bbio, NULL); btrfs_check_read_bio(bbio, NULL);
else else
btrfs_orig_bbio_end_io(bbio); btrfs_orig_bbio_end_io(bbio);
...@@ -583,7 +588,7 @@ static bool should_async_write(struct btrfs_bio *bbio) ...@@ -583,7 +588,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
* in order. * in order.
*/ */
if (bbio->bio.bi_opf & REQ_META) { if (bbio->bio.bi_opf & REQ_META) {
struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; struct btrfs_fs_info *fs_info = bbio->fs_info;
if (btrfs_is_zoned(fs_info)) if (btrfs_is_zoned(fs_info))
return false; return false;
...@@ -603,7 +608,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, ...@@ -603,7 +608,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
struct btrfs_io_context *bioc, struct btrfs_io_context *bioc,
struct btrfs_io_stripe *smap, int mirror_num) struct btrfs_io_stripe *smap, int mirror_num)
{ {
struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info; struct btrfs_fs_info *fs_info = bbio->fs_info;
struct async_submit_bio *async; struct async_submit_bio *async;
async = kmalloc(sizeof(*async), GFP_NOFS); async = kmalloc(sizeof(*async), GFP_NOFS);
...@@ -627,7 +632,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, ...@@ -627,7 +632,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
{ {
struct btrfs_inode *inode = bbio->inode; struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = bbio->fs_info;
struct btrfs_bio *orig_bbio = bbio; struct btrfs_bio *orig_bbio = bbio;
struct bio *bio = &bbio->bio; struct bio *bio = &bbio->bio;
u64 logical = bio->bi_iter.bi_sector << 9; u64 logical = bio->bi_iter.bi_sector << 9;
...@@ -660,7 +665,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -660,7 +665,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
* Save the iter for the end_io handler and preload the checksums for * Save the iter for the end_io handler and preload the checksums for
* data reads. * data reads.
*/ */
if (bio_op(bio) == REQ_OP_READ && !(bio->bi_opf & REQ_META)) { if (bio_op(bio) == REQ_OP_READ && inode && !(bio->bi_opf & REQ_META)) {
bbio->saved_iter = bio->bi_iter; bbio->saved_iter = bio->bi_iter;
ret = btrfs_lookup_bio_sums(bbio); ret = btrfs_lookup_bio_sums(bbio);
if (ret) if (ret)
...@@ -680,7 +685,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -680,7 +685,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
* Csum items for reloc roots have already been cloned at this * Csum items for reloc roots have already been cloned at this
* point, so they are handled as part of the no-checksum case. * point, so they are handled as part of the no-checksum case.
*/ */
if (!(inode->flags & BTRFS_INODE_NODATASUM) && if (inode && !(inode->flags & BTRFS_INODE_NODATASUM) &&
!test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) && !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) &&
!btrfs_is_data_reloc_root(inode->root)) { !btrfs_is_data_reloc_root(inode->root)) {
if (should_async_write(bbio) && if (should_async_write(bbio) &&
...@@ -709,6 +714,9 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -709,6 +714,9 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num) void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num)
{ {
/* If bbio->inode is not populated, its file_offset must be 0. */
ASSERT(bbio->inode || bbio->file_offset == 0);
while (!btrfs_submit_chunk(bbio, mirror_num)) while (!btrfs_submit_chunk(bbio, mirror_num))
; ;
} }
......
...@@ -30,7 +30,10 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio); ...@@ -30,7 +30,10 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
* passed to btrfs_submit_bio for mapping to the physical devices. * passed to btrfs_submit_bio for mapping to the physical devices.
*/ */
struct btrfs_bio { struct btrfs_bio {
/* Inode and offset into it that this I/O operates on. */ /*
* Inode and offset into it that this I/O operates on.
* Only set for data I/O.
*/
struct btrfs_inode *inode; struct btrfs_inode *inode;
u64 file_offset; u64 file_offset;
...@@ -58,6 +61,9 @@ struct btrfs_bio { ...@@ -58,6 +61,9 @@ struct btrfs_bio {
atomic_t pending_ios; atomic_t pending_ios;
struct work_struct end_io_work; struct work_struct end_io_work;
/* File system that this I/O operates on. */
struct btrfs_fs_info *fs_info;
/* /*
* This member must come last, bio_alloc_bioset will allocate enough * This member must come last, bio_alloc_bioset will allocate enough
* bytes for entire btrfs_bio but relies on bio being last. * bytes for entire btrfs_bio but relies on bio being last.
...@@ -73,10 +79,10 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio) ...@@ -73,10 +79,10 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
int __init btrfs_bioset_init(void); int __init btrfs_bioset_init(void);
void __cold btrfs_bioset_exit(void); void __cold btrfs_bioset_exit(void);
void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode, void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
btrfs_bio_end_io_t end_io, void *private); btrfs_bio_end_io_t end_io, void *private);
struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
struct btrfs_inode *inode, struct btrfs_fs_info *fs_info,
btrfs_bio_end_io_t end_io, void *private); btrfs_bio_end_io_t end_io, void *private);
static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
......
...@@ -69,7 +69,8 @@ static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, ...@@ -69,7 +69,8 @@ static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op, bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
GFP_NOFS, &btrfs_compressed_bioset)); GFP_NOFS, &btrfs_compressed_bioset));
btrfs_bio_init(bbio, inode, end_io, NULL); btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
bbio->inode = inode;
bbio->file_offset = start; bbio->file_offset = start;
return to_compressed_bio(bbio); return to_compressed_bio(bbio);
} }
......
...@@ -898,9 +898,10 @@ static void alloc_new_bio(struct btrfs_inode *inode, ...@@ -898,9 +898,10 @@ static void alloc_new_bio(struct btrfs_inode *inode,
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_bio *bbio; struct btrfs_bio *bbio;
bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, inode, bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
bio_ctrl->end_io_func, NULL); bio_ctrl->end_io_func, NULL);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
bbio->file_offset = file_offset; bbio->file_offset = file_offset;
bio_ctrl->bbio = bbio; bio_ctrl->bbio = bbio;
bio_ctrl->len_to_oe_boundary = U32_MAX; bio_ctrl->len_to_oe_boundary = U32_MAX;
......
...@@ -7711,7 +7711,9 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, ...@@ -7711,7 +7711,9 @@ static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
container_of(bbio, struct btrfs_dio_private, bbio); container_of(bbio, struct btrfs_dio_private, bbio);
struct btrfs_dio_data *dio_data = iter->private; struct btrfs_dio_data *dio_data = iter->private;
btrfs_bio_init(bbio, BTRFS_I(iter->inode), btrfs_dio_end_io, bio->bi_private); btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
btrfs_dio_end_io, bio->bi_private);
bbio->inode = BTRFS_I(iter->inode);
bbio->file_offset = file_offset; bbio->file_offset = file_offset;
dip->file_offset = file_offset; dip->file_offset = file_offset;
...@@ -9899,6 +9901,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, ...@@ -9899,6 +9901,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr, u64 file_offset, u64 disk_bytenr,
u64 disk_io_size, struct page **pages) u64 disk_io_size, struct page **pages)
{ {
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_encoded_read_private priv = { struct btrfs_encoded_read_private priv = {
.pending = ATOMIC_INIT(1), .pending = ATOMIC_INIT(1),
}; };
...@@ -9907,9 +9910,10 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, ...@@ -9907,9 +9910,10 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
init_waitqueue_head(&priv.wait); init_waitqueue_head(&priv.wait);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
btrfs_encoded_read_endio, &priv); btrfs_encoded_read_endio, &priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
do { do {
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
...@@ -9918,9 +9922,10 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, ...@@ -9918,9 +9922,10 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
atomic_inc(&priv.pending); atomic_inc(&priv.pending);
btrfs_submit_bio(bbio, 0); btrfs_submit_bio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
btrfs_encoded_read_endio, &priv); btrfs_encoded_read_endio, &priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
continue; continue;
} }
......
...@@ -1640,14 +1640,14 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio) ...@@ -1640,14 +1640,14 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio)
{ {
u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT); u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
struct btrfs_inode *inode = bbio->inode; struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = bbio->fs_info;
struct btrfs_block_group *cache; struct btrfs_block_group *cache;
bool ret = false; bool ret = false;
if (!btrfs_is_zoned(fs_info)) if (!btrfs_is_zoned(fs_info))
return false; return false;
if (!is_data_inode(&inode->vfs_inode)) if (!inode || !is_data_inode(&inode->vfs_inode))
return false; return false;
if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE) if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment