Commit 2609c928 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: convert btrfs_run_delalloc_range() to take a folio

Now that every function that btrfs_run_delalloc_range calls takes a
folio, update it to take a folio and update the callers.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d9c75027
...@@ -596,7 +596,7 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, ...@@ -596,7 +596,7 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode, struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size, u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint); loff_t actual_len, u64 *alloc_hint);
int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc); u64 start, u64 end, struct writeback_control *wbc);
int btrfs_writepage_cow_fixup(struct page *page); int btrfs_writepage_cow_fixup(struct page *page);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
......
...@@ -1254,7 +1254,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, ...@@ -1254,7 +1254,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
if (ret >= 0) { if (ret >= 0) {
/* No errors hit so far, run the current delalloc range. */ /* No errors hit so far, run the current delalloc range. */
ret = btrfs_run_delalloc_range(inode, &folio->page, ret = btrfs_run_delalloc_range(inode, folio,
found_start, found_start,
found_start + found_len - 1, found_start + found_len - 1,
wbc); wbc);
......
...@@ -2287,42 +2287,40 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end) ...@@ -2287,42 +2287,40 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
* Function to process delayed allocation (create CoW) for ranges which are * Function to process delayed allocation (create CoW) for ranges which are
* being touched for the first time. * being touched for the first time.
*/ */
int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
u64 start, u64 end, struct writeback_control *wbc) u64 start, u64 end, struct writeback_control *wbc)
{ {
const bool zoned = btrfs_is_zoned(inode->root->fs_info); const bool zoned = btrfs_is_zoned(inode->root->fs_info);
int ret; int ret;
/* /*
* The range must cover part of the @locked_page, or a return of 1 * The range must cover part of the @locked_folio, or a return of 1
* can confuse the caller. * can confuse the caller.
*/ */
ASSERT(!(end <= page_offset(locked_page) || ASSERT(!(end <= folio_pos(locked_folio) ||
start >= page_offset(locked_page) + PAGE_SIZE)); start >= folio_pos(locked_folio) + folio_size(locked_folio)));
if (should_nocow(inode, start, end)) { if (should_nocow(inode, start, end)) {
ret = run_delalloc_nocow(inode, page_folio(locked_page), start, ret = run_delalloc_nocow(inode, locked_folio, start, end);
end);
goto out; goto out;
} }
if (btrfs_inode_can_compress(inode) && if (btrfs_inode_can_compress(inode) &&
inode_need_compress(inode, start, end) && inode_need_compress(inode, start, end) &&
run_delalloc_compressed(inode, page_folio(locked_page), start, end, run_delalloc_compressed(inode, locked_folio, start, end, wbc))
wbc))
return 1; return 1;
if (zoned) if (zoned)
ret = run_delalloc_cow(inode, page_folio(locked_page), start, ret = run_delalloc_cow(inode, locked_folio, start, end, wbc,
end, wbc, true); true);
else else
ret = cow_file_range(inode, page_folio(locked_page), start, end, ret = cow_file_range(inode, locked_folio, start, end, NULL,
NULL, false, false); false, false);
out: out:
if (ret < 0) if (ret < 0)
btrfs_cleanup_ordered_extents(inode, page_folio(locked_page), btrfs_cleanup_ordered_extents(inode, locked_folio, start,
start, end - start + 1); end - start + 1);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment