Commit d45cfb88 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: move btrfs_bio allocation to volumes.c

volumes.c is the place that implements the storage layer using the
btrfs_bio structure, so move the bio_set and allocation helpers there
as well.

To make up for the new initialization boilerplate, merge the two
init/exit helpers in extent_io.c into a single one.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarAnand Jain <anand.jain@oracle.com>
Tested-by: default avatarNikolay Borisov <nborisov@suse.com>
Tested-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 1e408af3
......@@ -96,9 +96,6 @@ struct extent_state {
#endif
};
int __init extent_state_cache_init(void);
void __cold extent_state_cache_exit(void);
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
struct extent_io_tree *tree, unsigned int owner,
void *private_data);
......
......@@ -33,7 +33,6 @@
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
static struct bio_set btrfs_bioset;
static inline bool extent_state_in_tree(const struct extent_state *state)
{
......@@ -232,41 +231,23 @@ static void submit_write_bio(struct extent_page_data *epd, int ret)
}
}
int __init extent_state_cache_init(void)
int __init extent_io_init(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
return 0;
}
int __init extent_io_init(void)
{
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
sizeof(struct extent_buffer), 0,
SLAB_MEM_SPREAD, NULL);
if (!extent_buffer_cache)
if (!extent_buffer_cache) {
kmem_cache_destroy(extent_state_cache);
return -ENOMEM;
if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
offsetof(struct btrfs_bio, bio),
BIOSET_NEED_BVECS))
goto free_buffer_cache;
}
return 0;
free_buffer_cache:
kmem_cache_destroy(extent_buffer_cache);
extent_buffer_cache = NULL;
return -ENOMEM;
}
void __cold extent_state_cache_exit(void)
{
btrfs_extent_state_leak_debug_check();
kmem_cache_destroy(extent_state_cache);
}
void __cold extent_io_exit(void)
......@@ -277,7 +258,8 @@ void __cold extent_io_exit(void)
*/
rcu_barrier();
kmem_cache_destroy(extent_buffer_cache);
bioset_exit(&btrfs_bioset);
btrfs_extent_state_leak_debug_check();
kmem_cache_destroy(extent_state_cache);
}
/*
......@@ -3153,50 +3135,6 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
return 0;
}
/*
* Initialize the members up to but not including 'bio'. Use after allocating a
* new bio by bio_alloc_bioset as it does not initialize the bytes outside of
* 'bio' because use of __GFP_ZERO is not supported.
*/
static inline void btrfs_bio_init(struct btrfs_bio *bbio)
{
memset(bbio, 0, offsetof(struct btrfs_bio, bio));
}
/*
* Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
*
* The bio allocation is backed by bioset and does not fail.
*/
struct bio *btrfs_bio_alloc(unsigned int nr_iovecs)
{
struct bio *bio;
ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
bio = bio_alloc_bioset(NULL, nr_iovecs, 0, GFP_NOFS, &btrfs_bioset);
btrfs_bio_init(btrfs_bio(bio));
return bio;
}
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
{
struct bio *bio;
struct btrfs_bio *bbio;
ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
/* this will never fail when it's backed by a bioset */
bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
ASSERT(bio);
bbio = btrfs_bio(bio);
btrfs_bio_init(bbio);
bio_trim(bio, offset >> 9, size >> 9);
bbio->iter = bio->bi_iter;
return bio;
}
/**
* Attempt to add a page to bio
*
......
......@@ -60,7 +60,6 @@ enum {
struct btrfs_bio;
struct btrfs_root;
struct btrfs_inode;
struct btrfs_io_bio;
struct btrfs_fs_info;
struct io_failure_record;
struct extent_io_tree;
......@@ -242,8 +241,6 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
u32 bits_to_clear, unsigned long page_ops);
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array);
struct bio *btrfs_bio_alloc(unsigned int nr_iovecs);
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num);
......
......@@ -2669,13 +2669,13 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_cachep;
err = extent_state_cache_init();
err = btrfs_bioset_init();
if (err)
goto free_extent_io;
err = extent_map_init();
if (err)
goto free_extent_state_cache;
goto free_bioset;
err = ordered_data_init();
if (err)
......@@ -2727,8 +2727,8 @@ static int __init init_btrfs_fs(void)
ordered_data_exit();
free_extent_map:
extent_map_exit();
free_extent_state_cache:
extent_state_cache_exit();
free_bioset:
btrfs_bioset_exit();
free_extent_io:
extent_io_exit();
free_cachep:
......@@ -2749,7 +2749,7 @@ static void __exit exit_btrfs_fs(void)
btrfs_prelim_ref_exit();
ordered_data_exit();
extent_map_exit();
extent_state_cache_exit();
btrfs_bioset_exit();
extent_io_exit();
btrfs_interface_exit();
unregister_filesystem(&btrfs_fs_type);
......
......@@ -34,6 +34,8 @@
#include "discard.h"
#include "zoned.h"
static struct bio_set btrfs_bioset;
#define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID10 | \
BTRFS_BLOCK_GROUP_RAID56_MASK)
......@@ -6612,6 +6614,48 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
}
/*
* Initialize a btrfs_bio structure. This skips the embedded bio itself as it
* is already initialized by the block layer.
*/
static inline void btrfs_bio_init(struct btrfs_bio *bbio)
{
memset(bbio, 0, offsetof(struct btrfs_bio, bio));
}
/*
* Allocate a btrfs_bio structure. The btrfs_bio is the main I/O container for
* btrfs, and is used for all I/O submitted through btrfs_submit_bio.
*
* Just like the underlying bio_alloc_bioset it will not fail as it is backed by
* a mempool.
*/
struct bio *btrfs_bio_alloc(unsigned int nr_vecs)
{
struct bio *bio;
bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_NOFS, &btrfs_bioset);
btrfs_bio_init(btrfs_bio(bio));
return bio;
}
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
{
struct bio *bio;
struct btrfs_bio *bbio;
ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
bbio = btrfs_bio(bio);
btrfs_bio_init(bbio);
bio_trim(bio, offset >> 9, size >> 9);
bbio->iter = bio->bi_iter;
return bio;
}
static struct workqueue_struct *btrfs_end_io_wq(struct btrfs_io_context *bioc)
{
if (bioc->orig_bio->bi_opf & REQ_META)
......@@ -8289,3 +8333,17 @@ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
return true;
}
int __init btrfs_bioset_init(void)
{
if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
offsetof(struct btrfs_bio, bio),
BIOSET_NEED_BVECS))
return -ENOMEM;
return 0;
}
void __cold btrfs_bioset_exit(void)
{
bioset_exit(&btrfs_bioset);
}
......@@ -393,6 +393,12 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
return container_of(bio, struct btrfs_bio, bio);
}
int __init btrfs_bioset_init(void);
void __cold btrfs_bioset_exit(void);
struct bio *btrfs_bio_alloc(unsigned int nr_vecs);
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
{
if (bbio->csum != bbio->csum_inline) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment