Commit c9583ada authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: avoid double clean up when submit_one_bio() failed

[BUG]
When running generic/475 with 64K page size and 4K sector size, it has a
very high chance (almost 100%) to hang, with mostly data page locked but
no one is going to unlock it.

[CAUSE]
With commit 1784b7d5 ("btrfs: handle csum lookup errors properly on
reads"), if we failed to lookup checksum due to metadata IO error, we
will return error for btrfs_submit_data_bio().

This will cause the page to be unlocked twice in btrfs_do_readpage():

 btrfs_do_readpage()
 |- submit_extent_page()
 |  |- submit_one_bio()
 |     |- btrfs_submit_data_bio()
 |        |- if (ret) {
 |        |-     bio->bi_status = ret;
 |        |-     bio_endio(bio); }
 |               In the endio function, we will call end_page_read()
 |               and unlock_extent() to cleanup the subpage range.
 |
 |- if (ret) {
 |-        unlock_extent(); end_page_read() }
           Here we unlock the extent and cleanup the subpage range
           again.

For unlock_extent(), it's mostly double unlock safe.

But for end_page_read(), it's not, especially for subpage case,
as for subpage case we will call btrfs_subpage_end_reader() to reduce
the reader number, and use that to number to determine if we need to
unlock the full page.

If double accounted, it can underflow the number and leave the page
locked without anyone to unlock it.

[FIX]
The commit 1784b7d5 ("btrfs: handle csum lookup errors properly on
reads") itself is completely fine, it's our existing code not properly
handling the error from bio submission hook properly.

This patch will make submit_one_bio() to return void so that the callers
will never be able to do cleanup when bio submission hook fails.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent dd7382a2
...@@ -166,24 +166,27 @@ static int add_extent_changeset(struct extent_state *state, u32 bits, ...@@ -166,24 +166,27 @@ static int add_extent_changeset(struct extent_state *state, u32 bits,
return ret; return ret;
} }
int __must_check submit_one_bio(struct bio *bio, int mirror_num, void submit_one_bio(struct bio *bio, int mirror_num, unsigned long bio_flags)
unsigned long bio_flags)
{ {
blk_status_t ret = 0;
struct extent_io_tree *tree = bio->bi_private; struct extent_io_tree *tree = bio->bi_private;
bio->bi_private = NULL; bio->bi_private = NULL;
/* Caller should ensure the bio has at least some range added */ /* Caller should ensure the bio has at least some range added */
ASSERT(bio->bi_iter.bi_size); ASSERT(bio->bi_iter.bi_size);
if (is_data_inode(tree->private_data)) if (is_data_inode(tree->private_data))
ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num, btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
bio_flags); bio_flags);
else else
ret = btrfs_submit_metadata_bio(tree->private_data, bio, btrfs_submit_metadata_bio(tree->private_data, bio,
mirror_num, bio_flags); mirror_num, bio_flags);
/*
return blk_status_to_errno(ret); * Above submission hooks will handle the error by ending the bio,
* which will do the cleanup properly. So here we should not return
* any error, or the caller of submit_extent_page() will do cleanup
* again, causing problems.
*/
} }
/* Cleanup unsubmitted bios */ /* Cleanup unsubmitted bios */
...@@ -204,13 +207,12 @@ static void end_write_bio(struct extent_page_data *epd, int ret) ...@@ -204,13 +207,12 @@ static void end_write_bio(struct extent_page_data *epd, int ret)
* Return 0 if everything is OK. * Return 0 if everything is OK.
* Return <0 for error. * Return <0 for error.
*/ */
static int __must_check flush_write_bio(struct extent_page_data *epd) static void flush_write_bio(struct extent_page_data *epd)
{ {
int ret = 0;
struct bio *bio = epd->bio_ctrl.bio; struct bio *bio = epd->bio_ctrl.bio;
if (bio) { if (bio) {
ret = submit_one_bio(bio, 0, 0); submit_one_bio(bio, 0, 0);
/* /*
* Clean up of epd->bio is handled by its endio function. * Clean up of epd->bio is handled by its endio function.
* And endio is either triggered by successful bio execution * And endio is either triggered by successful bio execution
...@@ -220,7 +222,6 @@ static int __must_check flush_write_bio(struct extent_page_data *epd) ...@@ -220,7 +222,6 @@ static int __must_check flush_write_bio(struct extent_page_data *epd)
*/ */
epd->bio_ctrl.bio = NULL; epd->bio_ctrl.bio = NULL;
} }
return ret;
} }
int __init extent_state_cache_init(void) int __init extent_state_cache_init(void)
...@@ -3446,10 +3447,8 @@ static int submit_extent_page(unsigned int opf, ...@@ -3446,10 +3447,8 @@ static int submit_extent_page(unsigned int opf,
ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE && ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
pg_offset + size <= PAGE_SIZE); pg_offset + size <= PAGE_SIZE);
if (force_bio_submit && bio_ctrl->bio) { if (force_bio_submit && bio_ctrl->bio) {
ret = submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags); submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
bio_ctrl->bio = NULL; bio_ctrl->bio = NULL;
if (ret < 0)
return ret;
} }
while (cur < pg_offset + size) { while (cur < pg_offset + size) {
...@@ -3490,11 +3489,8 @@ static int submit_extent_page(unsigned int opf, ...@@ -3490,11 +3489,8 @@ static int submit_extent_page(unsigned int opf,
if (added < size - offset) { if (added < size - offset) {
/* The bio should contain some page(s) */ /* The bio should contain some page(s) */
ASSERT(bio_ctrl->bio->bi_iter.bi_size); ASSERT(bio_ctrl->bio->bi_iter.bi_size);
ret = submit_one_bio(bio_ctrl->bio, mirror_num, submit_one_bio(bio_ctrl->bio, mirror_num, bio_ctrl->bio_flags);
bio_ctrl->bio_flags);
bio_ctrl->bio = NULL; bio_ctrl->bio = NULL;
if (ret < 0)
return ret;
} }
cur += added; cur += added;
} }
...@@ -4242,14 +4238,12 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb ...@@ -4242,14 +4238,12 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
struct extent_page_data *epd) struct extent_page_data *epd)
{ {
struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_fs_info *fs_info = eb->fs_info;
int i, num_pages, failed_page_nr; int i, num_pages;
int flush = 0; int flush = 0;
int ret = 0; int ret = 0;
if (!btrfs_try_tree_write_lock(eb)) { if (!btrfs_try_tree_write_lock(eb)) {
ret = flush_write_bio(epd); flush_write_bio(epd);
if (ret < 0)
return ret;
flush = 1; flush = 1;
btrfs_tree_lock(eb); btrfs_tree_lock(eb);
} }
...@@ -4259,9 +4253,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb ...@@ -4259,9 +4253,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
if (!epd->sync_io) if (!epd->sync_io)
return 0; return 0;
if (!flush) { if (!flush) {
ret = flush_write_bio(epd); flush_write_bio(epd);
if (ret < 0)
return ret;
flush = 1; flush = 1;
} }
while (1) { while (1) {
...@@ -4308,39 +4300,13 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb ...@@ -4308,39 +4300,13 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
if (!trylock_page(p)) { if (!trylock_page(p)) {
if (!flush) { if (!flush) {
int err; flush_write_bio(epd);
err = flush_write_bio(epd);
if (err < 0) {
ret = err;
failed_page_nr = i;
goto err_unlock;
}
flush = 1; flush = 1;
} }
lock_page(p); lock_page(p);
} }
} }
return ret;
err_unlock:
/* Unlock already locked pages */
for (i = 0; i < failed_page_nr; i++)
unlock_page(eb->pages[i]);
/*
* Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
* Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
* be made and undo everything done before.
*/
btrfs_tree_lock(eb);
spin_lock(&eb->refs_lock);
set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
end_extent_buffer_writeback(eb);
spin_unlock(&eb->refs_lock);
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
fs_info->dirty_metadata_batch);
btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
btrfs_tree_unlock(eb);
return ret; return ret;
} }
...@@ -4962,13 +4928,19 @@ int btree_write_cache_pages(struct address_space *mapping, ...@@ -4962,13 +4928,19 @@ int btree_write_cache_pages(struct address_space *mapping,
* if the fs already has error. * if the fs already has error.
*/ */
if (!BTRFS_FS_ERROR(fs_info)) { if (!BTRFS_FS_ERROR(fs_info)) {
ret = flush_write_bio(&epd); flush_write_bio(&epd);
} else { } else {
ret = -EROFS; ret = -EROFS;
end_write_bio(&epd, ret); end_write_bio(&epd, ret);
} }
out: out:
btrfs_zoned_meta_io_unlock(fs_info); btrfs_zoned_meta_io_unlock(fs_info);
/*
* We can get ret > 0 from submit_extent_page() indicating how many ebs
* were submitted. Reset it to 0 to avoid false alerts for the caller.
*/
if (ret > 0)
ret = 0;
return ret; return ret;
} }
...@@ -5070,8 +5042,7 @@ static int extent_write_cache_pages(struct address_space *mapping, ...@@ -5070,8 +5042,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
* tmpfs file mapping * tmpfs file mapping
*/ */
if (!trylock_page(page)) { if (!trylock_page(page)) {
ret = flush_write_bio(epd); flush_write_bio(epd);
BUG_ON(ret < 0);
lock_page(page); lock_page(page);
} }
...@@ -5081,10 +5052,8 @@ static int extent_write_cache_pages(struct address_space *mapping, ...@@ -5081,10 +5052,8 @@ static int extent_write_cache_pages(struct address_space *mapping,
} }
if (wbc->sync_mode != WB_SYNC_NONE) { if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page)) { if (PageWriteback(page))
ret = flush_write_bio(epd); flush_write_bio(epd);
BUG_ON(ret < 0);
}
wait_on_page_writeback(page); wait_on_page_writeback(page);
} }
...@@ -5124,8 +5093,7 @@ static int extent_write_cache_pages(struct address_space *mapping, ...@@ -5124,8 +5093,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
* page in our current bio, and thus deadlock, so flush the * page in our current bio, and thus deadlock, so flush the
* write bio here. * write bio here.
*/ */
ret = flush_write_bio(epd); flush_write_bio(epd);
if (!ret)
goto retry; goto retry;
} }
...@@ -5152,8 +5120,7 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc) ...@@ -5152,8 +5120,7 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
return ret; return ret;
} }
ret = flush_write_bio(&epd); flush_write_bio(&epd);
ASSERT(ret <= 0);
return ret; return ret;
} }
...@@ -5215,7 +5182,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end) ...@@ -5215,7 +5182,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
} }
if (!found_error) if (!found_error)
ret = flush_write_bio(&epd); flush_write_bio(&epd);
else else
end_write_bio(&epd, ret); end_write_bio(&epd, ret);
...@@ -5248,7 +5215,7 @@ int extent_writepages(struct address_space *mapping, ...@@ -5248,7 +5215,7 @@ int extent_writepages(struct address_space *mapping,
end_write_bio(&epd, ret); end_write_bio(&epd, ret);
return ret; return ret;
} }
ret = flush_write_bio(&epd); flush_write_bio(&epd);
return ret; return ret;
} }
...@@ -5271,10 +5238,8 @@ void extent_readahead(struct readahead_control *rac) ...@@ -5271,10 +5238,8 @@ void extent_readahead(struct readahead_control *rac)
if (em_cached) if (em_cached)
free_extent_map(em_cached); free_extent_map(em_cached);
if (bio_ctrl.bio) { if (bio_ctrl.bio)
if (submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags)) submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags);
return;
}
} }
/* /*
...@@ -6654,12 +6619,8 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, ...@@ -6654,12 +6619,8 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
atomic_dec(&eb->io_pages); atomic_dec(&eb->io_pages);
} }
if (bio_ctrl.bio) { if (bio_ctrl.bio) {
int tmp; submit_one_bio(bio_ctrl.bio, mirror_num, 0);
tmp = submit_one_bio(bio_ctrl.bio, mirror_num, 0);
bio_ctrl.bio = NULL; bio_ctrl.bio = NULL;
if (tmp < 0)
return tmp;
} }
if (ret || wait != WAIT_COMPLETE) if (ret || wait != WAIT_COMPLETE)
return ret; return ret;
...@@ -6772,10 +6733,8 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) ...@@ -6772,10 +6733,8 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
} }
if (bio_ctrl.bio) { if (bio_ctrl.bio) {
err = submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags); submit_one_bio(bio_ctrl.bio, mirror_num, bio_ctrl.bio_flags);
bio_ctrl.bio = NULL; bio_ctrl.bio = NULL;
if (err)
return err;
} }
if (ret || wait != WAIT_COMPLETE) if (ret || wait != WAIT_COMPLETE)
......
...@@ -178,8 +178,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, ...@@ -178,8 +178,7 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
int try_release_extent_mapping(struct page *page, gfp_t mask); int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page); int try_release_extent_buffer(struct page *page);
int __must_check submit_one_bio(struct bio *bio, int mirror_num, void submit_one_bio(struct bio *bio, int mirror_num, unsigned long bio_flags);
unsigned long bio_flags);
int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
struct btrfs_bio_ctrl *bio_ctrl, struct btrfs_bio_ctrl *bio_ctrl,
unsigned int read_flags, u64 *prev_em_start); unsigned int read_flags, u64 *prev_em_start);
......
...@@ -8141,13 +8141,12 @@ int btrfs_readpage(struct file *file, struct page *page) ...@@ -8141,13 +8141,12 @@ int btrfs_readpage(struct file *file, struct page *page)
btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL); ret = btrfs_do_readpage(page, NULL, &bio_ctrl, 0, NULL);
if (bio_ctrl.bio) { /*
int ret2; * If btrfs_do_readpage() failed we will want to submit the assembled
* bio to do the cleanup.
ret2 = submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags); */
if (ret == 0) if (bio_ctrl.bio)
ret = ret2; submit_one_bio(bio_ctrl.bio, 0, bio_ctrl.bio_flags);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment