Commit a7922801 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: convert btrfs_mark_ordered_io_finished() to take a folio

We only need a folio now, make it take a folio as an argument and update
all of the callers.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent aef665d6
......@@ -1428,8 +1428,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
u32 iosize;
if (cur >= i_size) {
btrfs_mark_ordered_io_finished(inode, &folio->page, cur,
len, true);
btrfs_mark_ordered_io_finished(inode, folio, cur, len,
true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
......@@ -1568,7 +1568,7 @@ static int __extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ct
folio_end_writeback(folio);
}
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), &folio->page,
btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
page_start, PAGE_SIZE, !ret);
mapping_set_error(folio->mapping, ret);
}
......@@ -2330,7 +2330,7 @@ void extent_write_locked_range(struct inode *inode, const struct page *locked_pa
btrfs_folio_clear_writeback(fs_info, folio, cur, cur_len);
}
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), &folio->page,
btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
cur, cur_len, !ret);
mapping_set_error(mapping, ret);
}
......
......@@ -1144,7 +1144,8 @@ static void submit_uncompressed_range(struct btrfs_inode *inode,
set_page_writeback(locked_page);
end_page_writeback(locked_page);
btrfs_mark_ordered_io_finished(inode, locked_page,
btrfs_mark_ordered_io_finished(inode,
page_folio(locked_page),
page_start, PAGE_SIZE,
!ret);
mapping_set_error(locked_page->mapping, ret);
......@@ -2802,8 +2803,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
* to reflect the errors and clean the page.
*/
mapping_set_error(page->mapping, ret);
btrfs_mark_ordered_io_finished(inode, page, page_start,
PAGE_SIZE, !ret);
btrfs_mark_ordered_io_finished(inode, page_folio(page),
page_start, PAGE_SIZE, !ret);
clear_page_dirty_for_io(page);
}
btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
......
......@@ -449,8 +449,8 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
/*
* Mark all ordered extents io inside the specified range finished.
*
* @page: The involved page for the operation.
* For uncompressed buffered IO, the page status also needs to be
* @folio: The involved folio for the operation.
* For uncompressed buffered IO, the folio status also needs to be
* updated to indicate whether the pending ordered io is finished.
* Can be NULL for direct IO and compressed write.
* For these cases, callers are ensured they won't execute the
......@@ -460,7 +460,7 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
* extent(s) covering it.
*/
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
struct folio *folio, u64 file_offset,
u64 num_bytes, bool uptodate)
{
struct rb_node *node;
......@@ -524,8 +524,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
ASSERT(end + 1 - cur < U32_MAX);
len = end + 1 - cur;
if (can_finish_ordered_extent(entry, page_folio(page), cur, len,
uptodate)) {
if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
btrfs_queue_ordered_fn(entry);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
......
......@@ -166,8 +166,8 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
struct folio *folio, u64 file_offset, u64 len,
bool uptodate);
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
u64 num_bytes, bool uptodate);
struct folio *folio, u64 file_offset,
u64 num_bytes, bool uptodate);
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment