Commit 390ed29b authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: refactor submit_extent_page() to make bio and its flag tracing easier

There is a lot of code inside extent_io.c needs both "struct bio
**bio_ret" and "unsigned long prev_bio_flags", along with some
parameters like "unsigned long bio_flags".

Such strange parameters are here for bio assembly.

For example, we have such inode page layout:

  0       4K      8K      12K
  |<-- Extent A-->|<- EB->|

Then what we do is:

- Page [0, 4K)
  *bio_ret = NULL
  So we allocate a new bio to bio_ret,
  Add page [0, 4K) to *bio_ret.

- Page [4K, 8K)
  *bio_ret != NULL
  We found this page is continuous to *bio_ret,
  and if we're not at stripe boundary, we
  add page [4K, 8K) to *bio_ret.

- Page [8K, 12K)
  *bio_ret != NULL
  But we found this page is not continuous, so
  we submit *bio_ret, then allocate a new bio,
  and add page [8K, 12K) to the new bio.

This means we need to record both the bio and its bio_flag, but we
record them manually using those strange parameter list, other than
encapsulating them into their own structure.

So this patch will introduce a new structure, btrfs_bio_ctrl, to record
both the bio, and its bio_flags.

Also, in above case, for all pages added to the bio, we need to check if
the new page crosses stripe boundary.  This check itself can be time
consuming, and we don't really need to do that for each page.

This patch also integrates the stripe boundary check into btrfs_bio_ctrl.
When a new bio is allocated, the stripe and ordered extent boundary is
also calculated, so no matter how large the bio will be, we only
calculate the boundaries once, to save some CPU time.

The following functions/structures are affected:

- struct extent_page_data
  Replace its bio pointer with structure btrfs_bio_ctrl (embedded
  structure, not pointer)

- end_write_bio()
- flush_write_bio()
  Just change how bio is fetched

- btrfs_bio_add_page()
  Use pre-calculated boundaries instead of re-calculating them.
  And use @bio_ctrl to replace @bio and @prev_bio_flags.

- calc_bio_boundaries()
  New function

- submit_extent_page() callers
- btrfs_do_readpage() callers
- contiguous_readpages() callers
  To Use @bio_ctrl to replace @bio and @prev_bio_flags, and how to grab
  bio.

- btrfs_bio_fits_in_ordered_extent()
  Removed, as now the ordered extent size limit is done at bio
  allocation time, no need to check for each page range.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 1a0b5c4d
...@@ -3157,8 +3157,6 @@ void btrfs_split_delalloc_extent(struct inode *inode, ...@@ -3157,8 +3157,6 @@ void btrfs_split_delalloc_extent(struct inode *inode,
struct extent_state *orig, u64 split); struct extent_state *orig, u64 split);
int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio, int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
unsigned long bio_flags); unsigned long bio_flags);
bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
unsigned int size);
void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end); void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end);
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf); vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page); int btrfs_readpage(struct file *file, struct page *page);
......
...@@ -136,7 +136,7 @@ struct tree_entry { ...@@ -136,7 +136,7 @@ struct tree_entry {
}; };
struct extent_page_data { struct extent_page_data {
struct bio *bio; struct btrfs_bio_ctrl bio_ctrl;
/* tells writepage not to lock the state bits for this range /* tells writepage not to lock the state bits for this range
* it still does the unlocking * it still does the unlocking
*/ */
...@@ -185,10 +185,12 @@ int __must_check submit_one_bio(struct bio *bio, int mirror_num, ...@@ -185,10 +185,12 @@ int __must_check submit_one_bio(struct bio *bio, int mirror_num,
/* Cleanup unsubmitted bios */ /* Cleanup unsubmitted bios */
static void end_write_bio(struct extent_page_data *epd, int ret) static void end_write_bio(struct extent_page_data *epd, int ret)
{ {
if (epd->bio) { struct bio *bio = epd->bio_ctrl.bio;
epd->bio->bi_status = errno_to_blk_status(ret);
bio_endio(epd->bio); if (bio) {
epd->bio = NULL; bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
epd->bio_ctrl.bio = NULL;
} }
} }
...@@ -201,9 +203,10 @@ static void end_write_bio(struct extent_page_data *epd, int ret) ...@@ -201,9 +203,10 @@ static void end_write_bio(struct extent_page_data *epd, int ret)
static int __must_check flush_write_bio(struct extent_page_data *epd) static int __must_check flush_write_bio(struct extent_page_data *epd)
{ {
int ret = 0; int ret = 0;
struct bio *bio = epd->bio_ctrl.bio;
if (epd->bio) { if (bio) {
ret = submit_one_bio(epd->bio, 0, 0); ret = submit_one_bio(bio, 0, 0);
/* /*
* Clean up of epd->bio is handled by its endio function. * Clean up of epd->bio is handled by its endio function.
* And endio is either triggered by successful bio execution * And endio is either triggered by successful bio execution
...@@ -211,7 +214,7 @@ static int __must_check flush_write_bio(struct extent_page_data *epd) ...@@ -211,7 +214,7 @@ static int __must_check flush_write_bio(struct extent_page_data *epd)
* So at this point, no matter what happened, we don't need * So at this point, no matter what happened, we don't need
* to clean up epd->bio. * to clean up epd->bio.
*/ */
epd->bio = NULL; epd->bio_ctrl.bio = NULL;
} }
return ret; return ret;
} }
...@@ -3163,42 +3166,99 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size) ...@@ -3163,42 +3166,99 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
* *
* Return true if successfully page added. Otherwise, return false. * Return true if successfully page added. Otherwise, return false.
*/ */
static bool btrfs_bio_add_page(struct bio *bio, struct page *page, static bool btrfs_bio_add_page(struct btrfs_bio_ctrl *bio_ctrl,
struct page *page,
u64 disk_bytenr, unsigned int size, u64 disk_bytenr, unsigned int size,
unsigned int pg_offset, unsigned int pg_offset,
unsigned long prev_bio_flags,
unsigned long bio_flags) unsigned long bio_flags)
{ {
struct bio *bio = bio_ctrl->bio;
u32 bio_size = bio->bi_iter.bi_size;
const sector_t sector = disk_bytenr >> SECTOR_SHIFT; const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
bool contig; bool contig;
int ret; int ret;
if (prev_bio_flags != bio_flags) ASSERT(bio);
/* The limit should be calculated when bio_ctrl->bio is allocated */
ASSERT(bio_ctrl->len_to_oe_boundary && bio_ctrl->len_to_stripe_boundary);
if (bio_ctrl->bio_flags != bio_flags)
return false; return false;
if (prev_bio_flags & EXTENT_BIO_COMPRESSED) if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED)
contig = bio->bi_iter.bi_sector == sector; contig = bio->bi_iter.bi_sector == sector;
else else
contig = bio_end_sector(bio) == sector; contig = bio_end_sector(bio) == sector;
if (!contig) if (!contig)
return false; return false;
if (btrfs_bio_fits_in_stripe(page, size, bio, bio_flags)) if (bio_size + size > bio_ctrl->len_to_oe_boundary ||
bio_size + size > bio_ctrl->len_to_stripe_boundary)
return false; return false;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) { if (bio_op(bio) == REQ_OP_ZONE_APPEND)
struct page *first_page = bio_first_bvec_all(bio)->bv_page;
if (!btrfs_bio_fits_in_ordered_extent(first_page, bio, size))
return false;
ret = bio_add_zone_append_page(bio, page, size, pg_offset); ret = bio_add_zone_append_page(bio, page, size, pg_offset);
} else { else
ret = bio_add_page(bio, page, size, pg_offset); ret = bio_add_page(bio, page, size, pg_offset);
}
return ret == size; return ret == size;
} }
static int calc_bio_boundaries(struct btrfs_bio_ctrl *bio_ctrl,
struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_io_geometry geom;
struct btrfs_ordered_extent *ordered;
struct extent_map *em;
u64 logical = (bio_ctrl->bio->bi_iter.bi_sector << SECTOR_SHIFT);
int ret;
/*
* Pages for compressed extent are never submitted to disk directly,
* thus it has no real boundary, just set them to U32_MAX.
*
* The split happens for real compressed bio, which happens in
* btrfs_submit_compressed_read/write().
*/
if (bio_ctrl->bio_flags & EXTENT_BIO_COMPRESSED) {
bio_ctrl->len_to_oe_boundary = U32_MAX;
bio_ctrl->len_to_stripe_boundary = U32_MAX;
return 0;
}
em = btrfs_get_chunk_map(fs_info, logical, fs_info->sectorsize);
if (IS_ERR(em))
return PTR_ERR(em);
ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio_ctrl->bio),
logical, &geom);
free_extent_map(em);
if (ret < 0) {
return ret;
}
if (geom.len > U32_MAX)
bio_ctrl->len_to_stripe_boundary = U32_MAX;
else
bio_ctrl->len_to_stripe_boundary = (u32)geom.len;
if (!btrfs_is_zoned(fs_info) ||
bio_op(bio_ctrl->bio) != REQ_OP_ZONE_APPEND) {
bio_ctrl->len_to_oe_boundary = U32_MAX;
return 0;
}
ASSERT(fs_info->max_zone_append_size > 0);
/* Ordered extent not yet created, so we're good */
ordered = btrfs_lookup_ordered_extent(inode, logical);
if (!ordered) {
bio_ctrl->len_to_oe_boundary = U32_MAX;
return 0;
}
bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
ordered->disk_bytenr + ordered->disk_num_bytes - logical);
btrfs_put_ordered_extent(ordered);
return 0;
}
/* /*
* @opf: bio REQ_OP_* and REQ_* flags as one value * @opf: bio REQ_OP_* and REQ_* flags as one value
* @wbc: optional writeback control for io accounting * @wbc: optional writeback control for io accounting
...@@ -3215,12 +3275,11 @@ static bool btrfs_bio_add_page(struct bio *bio, struct page *page, ...@@ -3215,12 +3275,11 @@ static bool btrfs_bio_add_page(struct bio *bio, struct page *page,
*/ */
static int submit_extent_page(unsigned int opf, static int submit_extent_page(unsigned int opf,
struct writeback_control *wbc, struct writeback_control *wbc,
struct btrfs_bio_ctrl *bio_ctrl,
struct page *page, u64 disk_bytenr, struct page *page, u64 disk_bytenr,
size_t size, unsigned long pg_offset, size_t size, unsigned long pg_offset,
struct bio **bio_ret,
bio_end_io_t end_io_func, bio_end_io_t end_io_func,
int mirror_num, int mirror_num,
unsigned long prev_bio_flags,
unsigned long bio_flags, unsigned long bio_flags,
bool force_bio_submit) bool force_bio_submit)
{ {
...@@ -3231,19 +3290,19 @@ static int submit_extent_page(unsigned int opf, ...@@ -3231,19 +3290,19 @@ static int submit_extent_page(unsigned int opf,
struct extent_io_tree *tree = &inode->io_tree; struct extent_io_tree *tree = &inode->io_tree;
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
ASSERT(bio_ret); ASSERT(bio_ctrl);
if (*bio_ret) { ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
bio = *bio_ret; pg_offset + size <= PAGE_SIZE);
if (bio_ctrl->bio) {
bio = bio_ctrl->bio;
if (force_bio_submit || if (force_bio_submit ||
!btrfs_bio_add_page(bio, page, disk_bytenr, io_size, !btrfs_bio_add_page(bio_ctrl, page, disk_bytenr, io_size,
pg_offset, prev_bio_flags, bio_flags)) { pg_offset, bio_flags)) {
ret = submit_one_bio(bio, mirror_num, prev_bio_flags); ret = submit_one_bio(bio, mirror_num, bio_ctrl->bio_flags);
if (ret < 0) { bio_ctrl->bio = NULL;
*bio_ret = NULL; if (ret < 0)
return ret; return ret;
}
bio = NULL;
} else { } else {
if (wbc) if (wbc)
wbc_account_cgroup_owner(wbc, page, io_size); wbc_account_cgroup_owner(wbc, page, io_size);
...@@ -3275,7 +3334,9 @@ static int submit_extent_page(unsigned int opf, ...@@ -3275,7 +3334,9 @@ static int submit_extent_page(unsigned int opf,
btrfs_io_bio(bio)->device = device; btrfs_io_bio(bio)->device = device;
} }
*bio_ret = bio; bio_ctrl->bio = bio;
bio_ctrl->bio_flags = bio_flags;
ret = calc_bio_boundaries(bio_ctrl, inode);
return ret; return ret;
} }
...@@ -3388,7 +3449,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, ...@@ -3388,7 +3449,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
* return 0 on success, otherwise return error * return 0 on success, otherwise return error
*/ */
int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
struct bio **bio, unsigned long *bio_flags, struct btrfs_bio_ctrl *bio_ctrl,
unsigned int read_flags, u64 *prev_em_start) unsigned int read_flags, u64 *prev_em_start)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
...@@ -3564,15 +3625,13 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -3564,15 +3625,13 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
} }
ret = submit_extent_page(REQ_OP_READ | read_flags, NULL, ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
page, disk_bytenr, iosize, bio_ctrl, page, disk_bytenr, iosize,
pg_offset, bio, pg_offset,
end_bio_extent_readpage, 0, end_bio_extent_readpage, 0,
*bio_flags,
this_bio_flag, this_bio_flag,
force_bio_submit); force_bio_submit);
if (!ret) { if (!ret) {
nr++; nr++;
*bio_flags = this_bio_flag;
} else { } else {
unlock_extent(tree, cur, cur + iosize - 1); unlock_extent(tree, cur, cur + iosize - 1);
end_page_read(page, false, cur, iosize); end_page_read(page, false, cur, iosize);
...@@ -3588,8 +3647,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -3588,8 +3647,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
static inline void contiguous_readpages(struct page *pages[], int nr_pages, static inline void contiguous_readpages(struct page *pages[], int nr_pages,
u64 start, u64 end, u64 start, u64 end,
struct extent_map **em_cached, struct extent_map **em_cached,
struct bio **bio, struct btrfs_bio_ctrl *bio_ctrl,
unsigned long *bio_flags,
u64 *prev_em_start) u64 *prev_em_start)
{ {
struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
...@@ -3598,7 +3656,7 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages, ...@@ -3598,7 +3656,7 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
for (index = 0; index < nr_pages; index++) { for (index = 0; index < nr_pages; index++) {
btrfs_do_readpage(pages[index], em_cached, bio, bio_flags, btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
REQ_RAHEAD, prev_em_start); REQ_RAHEAD, prev_em_start);
put_page(pages[index]); put_page(pages[index]);
} }
...@@ -3787,11 +3845,12 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -3787,11 +3845,12 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
page->index, cur, end); page->index, cur, end);
} }
ret = submit_extent_page(opf | write_flags, wbc, page, ret = submit_extent_page(opf | write_flags, wbc,
&epd->bio_ctrl, page,
disk_bytenr, iosize, disk_bytenr, iosize,
cur - page_offset(page), &epd->bio, cur - page_offset(page),
end_bio_extent_writepage, end_bio_extent_writepage,
0, 0, 0, false); 0, 0, false);
if (ret) { if (ret) {
SetPageError(page); SetPageError(page);
if (PageWriteback(page)) if (PageWriteback(page))
...@@ -4222,10 +4281,10 @@ static int write_one_subpage_eb(struct extent_buffer *eb, ...@@ -4222,10 +4281,10 @@ static int write_one_subpage_eb(struct extent_buffer *eb,
if (no_dirty_ebs) if (no_dirty_ebs)
clear_page_dirty_for_io(page); clear_page_dirty_for_io(page);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc, page, ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
eb->start, eb->len, eb->start - page_offset(page), &epd->bio_ctrl, page, eb->start, eb->len,
&epd->bio, end_bio_extent_buffer_writepage, 0, 0, 0, eb->start - page_offset(page),
false); end_bio_extent_buffer_writepage, 0, 0, false);
if (ret) { if (ret) {
btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len); btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
set_btree_ioerr(page, eb); set_btree_ioerr(page, eb);
...@@ -4285,10 +4344,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, ...@@ -4285,10 +4344,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p); clear_page_dirty_for_io(p);
set_page_writeback(p); set_page_writeback(p);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc, ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
p, disk_bytenr, PAGE_SIZE, 0, &epd->bio_ctrl, p, disk_bytenr,
&epd->bio, PAGE_SIZE, 0,
end_bio_extent_buffer_writepage, end_bio_extent_buffer_writepage,
0, 0, 0, false); 0, 0, false);
if (ret) { if (ret) {
set_btree_ioerr(p, eb); set_btree_ioerr(p, eb);
if (PageWriteback(p)) if (PageWriteback(p))
...@@ -4504,7 +4563,7 @@ int btree_write_cache_pages(struct address_space *mapping, ...@@ -4504,7 +4563,7 @@ int btree_write_cache_pages(struct address_space *mapping,
{ {
struct extent_buffer *eb_context = NULL; struct extent_buffer *eb_context = NULL;
struct extent_page_data epd = { struct extent_page_data epd = {
.bio = NULL, .bio_ctrl = { 0 },
.extent_locked = 0, .extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL, .sync_io = wbc->sync_mode == WB_SYNC_ALL,
}; };
...@@ -4786,7 +4845,7 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc) ...@@ -4786,7 +4845,7 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{ {
int ret; int ret;
struct extent_page_data epd = { struct extent_page_data epd = {
.bio = NULL, .bio_ctrl = { 0 },
.extent_locked = 0, .extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL, .sync_io = wbc->sync_mode == WB_SYNC_ALL,
}; };
...@@ -4813,7 +4872,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end, ...@@ -4813,7 +4872,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
PAGE_SHIFT; PAGE_SHIFT;
struct extent_page_data epd = { struct extent_page_data epd = {
.bio = NULL, .bio_ctrl = { 0 },
.extent_locked = 1, .extent_locked = 1,
.sync_io = mode == WB_SYNC_ALL, .sync_io = mode == WB_SYNC_ALL,
}; };
...@@ -4856,7 +4915,7 @@ int extent_writepages(struct address_space *mapping, ...@@ -4856,7 +4915,7 @@ int extent_writepages(struct address_space *mapping,
{ {
int ret = 0; int ret = 0;
struct extent_page_data epd = { struct extent_page_data epd = {
.bio = NULL, .bio_ctrl = { 0 },
.extent_locked = 0, .extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL, .sync_io = wbc->sync_mode == WB_SYNC_ALL,
}; };
...@@ -4873,8 +4932,7 @@ int extent_writepages(struct address_space *mapping, ...@@ -4873,8 +4932,7 @@ int extent_writepages(struct address_space *mapping,
void extent_readahead(struct readahead_control *rac) void extent_readahead(struct readahead_control *rac)
{ {
struct bio *bio = NULL; struct btrfs_bio_ctrl bio_ctrl = { 0 };
unsigned long bio_flags = 0;
struct page *pagepool[16]; struct page *pagepool[16];
struct extent_map *em_cached = NULL; struct extent_map *em_cached = NULL;
u64 prev_em_start = (u64)-1; u64 prev_em_start = (u64)-1;
...@@ -4885,14 +4943,14 @@ void extent_readahead(struct readahead_control *rac) ...@@ -4885,14 +4943,14 @@ void extent_readahead(struct readahead_control *rac)
u64 contig_end = contig_start + readahead_batch_length(rac) - 1; u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
contiguous_readpages(pagepool, nr, contig_start, contig_end, contiguous_readpages(pagepool, nr, contig_start, contig_end,
&em_cached, &bio, &bio_flags, &prev_em_start); &em_cached, &bio_ctrl, &prev_em_start);
} }
if (em_cached) if (em_cached)
free_extent_map(em_cached); free_extent_map(em_cached);
if (bio) { if (bio_ctrl.bio) {
if (submit_one_bio(bio, 0, bio_flags)) if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags))
return; return;
} }
} }
...@@ -6182,7 +6240,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, ...@@ -6182,7 +6240,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_fs_info *fs_info = eb->fs_info;
struct extent_io_tree *io_tree; struct extent_io_tree *io_tree;
struct page *page = eb->pages[0]; struct page *page = eb->pages[0];
struct bio *bio = NULL; struct btrfs_bio_ctrl bio_ctrl = { 0 };
int ret = 0; int ret = 0;
ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)); ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
...@@ -6213,9 +6271,10 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, ...@@ -6213,9 +6271,10 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
check_buffer_tree_ref(eb); check_buffer_tree_ref(eb);
btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len); btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, page, eb->start, ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, &bio_ctrl,
eb->len, eb->start - page_offset(page), &bio, page, eb->start, eb->len,
end_bio_extent_readpage, mirror_num, 0, 0, eb->start - page_offset(page),
end_bio_extent_readpage, mirror_num, 0,
true); true);
if (ret) { if (ret) {
/* /*
...@@ -6225,10 +6284,11 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, ...@@ -6225,10 +6284,11 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
*/ */
atomic_dec(&eb->io_pages); atomic_dec(&eb->io_pages);
} }
if (bio) { if (bio_ctrl.bio) {
int tmp; int tmp;
tmp = submit_one_bio(bio, mirror_num, 0); tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
bio_ctrl.bio = NULL;
if (tmp < 0) if (tmp < 0)
return tmp; return tmp;
} }
...@@ -6251,8 +6311,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) ...@@ -6251,8 +6311,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
int all_uptodate = 1; int all_uptodate = 1;
int num_pages; int num_pages;
unsigned long num_reads = 0; unsigned long num_reads = 0;
struct bio *bio = NULL; struct btrfs_bio_ctrl bio_ctrl = { 0 };
unsigned long bio_flags = 0;
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0; return 0;
...@@ -6316,9 +6375,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) ...@@ -6316,9 +6375,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
ClearPageError(page); ClearPageError(page);
err = submit_extent_page(REQ_OP_READ | REQ_META, NULL, err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
page, page_offset(page), PAGE_SIZE, 0, &bio_ctrl, page, page_offset(page),
&bio, end_bio_extent_readpage, PAGE_SIZE, 0, end_bio_extent_readpage,
mirror_num, 0, 0, false); mirror_num, 0, false);
if (err) { if (err) {
/* /*
* We failed to submit the bio so it's the * We failed to submit the bio so it's the
...@@ -6335,8 +6394,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) ...@@ -6335,8 +6394,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
} }
} }
if (bio) { if (bio_ctrl.bio) {
err = submit_one_bio(bio, mirror_num, bio_flags); err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
bio_ctrl.bio = NULL;
if (err) if (err)
return err; return err;
} }
......
...@@ -101,6 +101,17 @@ struct extent_buffer { ...@@ -101,6 +101,17 @@ struct extent_buffer {
#endif #endif
}; };
/*
* Structure to record info about the bio being assembled, and other info like
* how many bytes are there before stripe/ordered extent boundary.
*/
struct btrfs_bio_ctrl {
struct bio *bio;
unsigned long bio_flags;
u32 len_to_stripe_boundary;
u32 len_to_oe_boundary;
};
/* /*
* Structure to record how many bytes and which ranges are set/cleared * Structure to record how many bytes and which ranges are set/cleared
*/ */
...@@ -169,7 +180,7 @@ int try_release_extent_buffer(struct page *page); ...@@ -169,7 +180,7 @@ int try_release_extent_buffer(struct page *page);
int __must_check submit_one_bio(struct bio *bio, int mirror_num, int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags); unsigned long bio_flags);
int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
struct bio **bio, unsigned long *bio_flags, struct btrfs_bio_ctrl *bio_ctrl,
unsigned int read_flags, u64 *prev_em_start); unsigned int read_flags, u64 *prev_em_start);
int extent_write_full_page(struct page *page, struct writeback_control *wbc); int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end, int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
......
...@@ -2229,33 +2229,6 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio, ...@@ -2229,33 +2229,6 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
} }
bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
unsigned int size)
{
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_ordered_extent *ordered;
u64 len = bio->bi_iter.bi_size + size;
bool ret = true;
ASSERT(btrfs_is_zoned(fs_info));
ASSERT(fs_info->max_zone_append_size > 0);
ASSERT(bio_op(bio) == REQ_OP_ZONE_APPEND);
/* Ordered extent not yet created, so we're good */
ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
if (!ordered)
return ret;
if ((bio->bi_iter.bi_sector << SECTOR_SHIFT) + len >
ordered->disk_bytenr + ordered->disk_num_bytes)
ret = false;
btrfs_put_ordered_extent(ordered);
return ret;
}
static blk_status_t extract_ordered_extent(struct btrfs_inode *inode, static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
struct bio *bio, loff_t file_offset) struct bio *bio, loff_t file_offset)
{ {
...@@ -8297,15 +8270,14 @@ int btrfs_readpage(struct file *file, struct page *page) ...@@ -8297,15 +8270,14 @@ int btrfs_readpage(struct file *file, struct page *page)
struct btrfs_inode *inode = BTRFS_I(page->mapping->host); struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
u64 start = page_offset(page); u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1; u64 end = start + PAGE_SIZE - 1;
unsigned long bio_flags = 0; struct btrfs_bio_ctrl bio_ctrl = { 0 };
struct bio *bio = NULL;
int ret; int ret;
btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
ret = btrfs_do_readpage(page, NULL, &bio, &bio_flags, 0, NULL); ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
if (bio) if (bio_ctrl.bio)
ret = submit_one_bio(bio, 0, bio_flags); ret = submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment