Commit 6648cedd authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: remove btrfs_writepage_endio_finish_ordered

btrfs_writepage_endio_finish_ordered is a small wrapper around
btrfs_mark_ordered_io_finished that just changs the argument passing
slightly, and adds a tracepoint.

Move the tracpoint to btrfs_mark_ordered_io_finished, which means
it now also covers the error handling in btrfs_cleanup_ordered_extent
and switch all callers to just call btrfs_mark_ordered_io_finished
directly.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ef4e88e6
......@@ -501,9 +501,6 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
u64 start, u64 end, int *page_started,
unsigned long *nr_written, struct writeback_control *wbc);
int btrfs_writepage_cow_fixup(struct page *page);
void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
struct page *page, u64 start,
u64 end, bool uptodate);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
......
......@@ -471,17 +471,15 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
struct btrfs_inode *inode;
const bool uptodate = (err == 0);
int ret = 0;
u32 len = end + 1 - start;
ASSERT(end + 1 - start <= U32_MAX);
ASSERT(page && page->mapping);
inode = BTRFS_I(page->mapping->host);
btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
btrfs_mark_ordered_io_finished(inode, page, start, len, uptodate);
if (!uptodate) {
const struct btrfs_fs_info *fs_info = inode->root->fs_info;
u32 len;
ASSERT(end + 1 - start <= U32_MAX);
len = end + 1 - start;
btrfs_page_clear_uptodate(fs_info, page, start, len);
ret = err < 0 ? err : -EIO;
......@@ -1349,6 +1347,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
bio_ctrl->end_io_func = end_bio_extent_writepage;
while (cur <= end) {
u32 len = end - cur + 1;
u64 disk_bytenr;
u64 em_end;
u64 dirty_range_start = cur;
......@@ -1356,8 +1355,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
u32 iosize;
if (cur >= i_size) {
btrfs_writepage_endio_finish_ordered(inode, page, cur,
end, true);
btrfs_mark_ordered_io_finished(inode, page, cur, len,
true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
......@@ -1366,7 +1365,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
btrfs_page_clear_dirty(fs_info, page, cur, len);
break;
}
......@@ -1377,7 +1376,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
continue;
}
em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
em = btrfs_get_extent(inode, NULL, 0, cur, len);
if (IS_ERR(em)) {
ret = PTR_ERR_OR_ZERO(em);
goto out_error;
......
......@@ -3388,15 +3388,6 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
return btrfs_finish_one_ordered(ordered);
}
void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
struct page *page, u64 start,
u64 end, bool uptodate)
{
trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate);
}
/*
* Verify the checksum for a single sector without any extra action that depend
* on the type of I/O.
......
......@@ -410,6 +410,10 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
unsigned long flags;
u64 cur = file_offset;
trace_btrfs_writepage_end_io_hook(inode, file_offset,
file_offset + num_bytes - 1,
uptodate);
spin_lock_irqsave(&tree->lock, flags);
while (cur < file_offset + num_bytes) {
u64 entry_end;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment