Commit 9783e4de authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: remove end_extent_writepage

end_extent_writepage is a small helper that combines a call to
btrfs_mark_ordered_io_finished with conditional error-only calls to
btrfs_page_clear_uptodate and mapping_set_error with a somewhat
unfortunate calling convention that passes and inclusive end instead
of the len expected by the underlying functions.

Remove end_extent_writepage and open code it in the 4 callers. Out
of those two already are error-only and thus don't need the extra
conditional, and one already has the mapping_set_error, so a duplicate
call can be avoided.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 6648cedd
......@@ -464,29 +464,6 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
btrfs_subpage_end_reader(fs_info, page, start, len);
}
/* lots and lots of room for performance fixes in the end_bio funcs */
void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
{
struct btrfs_inode *inode;
const bool uptodate = (err == 0);
int ret = 0;
u32 len = end + 1 - start;
ASSERT(end + 1 - start <= U32_MAX);
ASSERT(page && page->mapping);
inode = BTRFS_I(page->mapping->host);
btrfs_mark_ordered_io_finished(inode, page, start, len, uptodate);
if (!uptodate) {
const struct btrfs_fs_info *fs_info = inode->root->fs_info;
btrfs_page_clear_uptodate(fs_info, page, start, len);
ret = err < 0 ? err : -EIO;
mapping_set_error(page->mapping, ret);
}
}
/*
* after a writepage IO is done, we need to:
* clear the uptodate bits on error
......@@ -1452,7 +1429,6 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
const u64 page_start = page_offset(page);
const u64 page_end = page_start + PAGE_SIZE - 1;
int ret;
int nr = 0;
size_t pg_offset;
......@@ -1496,8 +1472,13 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
set_page_writeback(page);
end_page_writeback(page);
}
if (ret)
end_extent_writepage(page, ret, page_start, page_end);
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
PAGE_SIZE, !ret);
btrfs_page_clear_uptodate(btrfs_sb(inode->i_sb), page,
page_start, PAGE_SIZE);
mapping_set_error(page->mapping, ret);
}
unlock_page(page);
ASSERT(ret <= 0);
return ret;
......@@ -2222,6 +2203,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
while (cur <= end) {
u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
u32 cur_len = cur_end + 1 - cur;
struct page *page;
int nr = 0;
......@@ -2245,9 +2227,13 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
set_page_writeback(page);
end_page_writeback(page);
}
if (ret)
end_extent_writepage(page, ret, cur, cur_end);
btrfs_page_unlock_writer(fs_info, page, cur, cur_end + 1 - cur);
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
cur, cur_len, !ret);
btrfs_page_clear_uptodate(fs_info, page, cur, cur_len);
mapping_set_error(page->mapping, ret);
}
btrfs_page_unlock_writer(fs_info, page, cur, cur_len);
if (ret < 0) {
found_error = true;
first_error = ret;
......
......@@ -276,8 +276,6 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
struct page *locked_page, u64 *start,
......
......@@ -424,11 +424,10 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
while (index <= end_index) {
/*
* For locked page, we will call end_extent_writepage() on it
* in run_delalloc_range() for the error handling. That
* end_extent_writepage() function will call
* btrfs_mark_ordered_io_finished() to clear page Ordered and
* run the ordered extent accounting.
* For locked page, we will call btrfs_mark_ordered_io_finished
* through btrfs_mark_ordered_io_finished() on it
* in run_delalloc_range() for the error handling, which will
* clear page Ordered and run the ordered extent accounting.
*
* Here we can't just clear the Ordered bit, or
* btrfs_mark_ordered_io_finished() would skip the accounting
......@@ -1158,11 +1157,16 @@ static int submit_uncompressed_range(struct btrfs_inode *inode,
btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
if (locked_page) {
const u64 page_start = page_offset(locked_page);
const u64 page_end = page_start + PAGE_SIZE - 1;
set_page_writeback(locked_page);
end_page_writeback(locked_page);
end_extent_writepage(locked_page, ret, page_start, page_end);
btrfs_mark_ordered_io_finished(inode, locked_page,
page_start, PAGE_SIZE,
!ret);
btrfs_page_clear_uptodate(inode->root->fs_info,
locked_page, page_start,
PAGE_SIZE);
mapping_set_error(locked_page->mapping, ret);
unlock_page(locked_page);
}
return ret;
......@@ -2837,23 +2841,19 @@ struct btrfs_writepage_fixup {
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
struct btrfs_writepage_fixup *fixup;
struct btrfs_writepage_fixup *fixup =
container_of(work, struct btrfs_writepage_fixup, work);
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
struct page *page;
struct btrfs_inode *inode;
u64 page_start;
u64 page_end;
struct page *page = fixup->page;
struct btrfs_inode *inode = fixup->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 page_start = page_offset(page);
u64 page_end = page_offset(page) + PAGE_SIZE - 1;
int ret = 0;
bool free_delalloc_space = true;
fixup = container_of(work, struct btrfs_writepage_fixup, work);
page = fixup->page;
inode = fixup->inode;
page_start = page_offset(page);
page_end = page_offset(page) + PAGE_SIZE - 1;
/*
* This is similar to page_mkwrite, we need to reserve the space before
* we take the page lock.
......@@ -2946,10 +2946,12 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
* to reflect the errors and clean the page.
*/
mapping_set_error(page->mapping, ret);
end_extent_writepage(page, ret, page_start, page_end);
btrfs_mark_ordered_io_finished(inode, page, page_start,
PAGE_SIZE, !ret);
btrfs_page_clear_uptodate(fs_info, page, page_start, PAGE_SIZE);
clear_page_dirty_for_io(page);
}
btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
unlock_page(page);
put_page(page);
kfree(fixup);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment