Commit 0145aa38 authored by Li Zetao's avatar Li Zetao Committed by David Sterba

btrfs: convert try_release_subpage_extent_buffer() to take a folio

The old page API is being gradually replaced and converted to use folio
to improve code readability and avoid repeated conversion between page
and folio. And use folio_pos instead of page_offset, which is more
consistent with folio usage. At the same time, folio_test_private() can
handle folio directly without converting from page to folio first.
Signed-off-by: default avatarLi Zetao <lizetao1@huawei.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d4aeb5f7
...@@ -4077,11 +4077,11 @@ static struct extent_buffer *get_next_extent_buffer( ...@@ -4077,11 +4077,11 @@ static struct extent_buffer *get_next_extent_buffer(
return found; return found;
} }
static int try_release_subpage_extent_buffer(struct page *page) static int try_release_subpage_extent_buffer(struct folio *folio)
{ {
struct btrfs_fs_info *fs_info = page_to_fs_info(page); struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
u64 cur = page_offset(page); u64 cur = folio_pos(folio);
const u64 end = page_offset(page) + PAGE_SIZE; const u64 end = cur + PAGE_SIZE;
int ret; int ret;
while (cur < end) { while (cur < end) {
...@@ -4096,7 +4096,7 @@ static int try_release_subpage_extent_buffer(struct page *page) ...@@ -4096,7 +4096,7 @@ static int try_release_subpage_extent_buffer(struct page *page)
* with spinlock rather than RCU. * with spinlock rather than RCU.
*/ */
spin_lock(&fs_info->buffer_lock); spin_lock(&fs_info->buffer_lock);
eb = get_next_extent_buffer(fs_info, page_folio(page), cur); eb = get_next_extent_buffer(fs_info, folio, cur);
if (!eb) { if (!eb) {
/* No more eb in the page range after or at cur */ /* No more eb in the page range after or at cur */
spin_unlock(&fs_info->buffer_lock); spin_unlock(&fs_info->buffer_lock);
...@@ -4137,12 +4137,12 @@ static int try_release_subpage_extent_buffer(struct page *page) ...@@ -4137,12 +4137,12 @@ static int try_release_subpage_extent_buffer(struct page *page)
* Finally to check if we have cleared folio private, as if we have * Finally to check if we have cleared folio private, as if we have
* released all ebs in the page, the folio private should be cleared now. * released all ebs in the page, the folio private should be cleared now.
*/ */
spin_lock(&page->mapping->i_private_lock); spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(page_folio(page))) if (!folio_test_private(folio))
ret = 1; ret = 1;
else else
ret = 0; ret = 0;
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&folio->mapping->i_private_lock);
return ret; return ret;
} }
...@@ -4153,7 +4153,7 @@ int try_release_extent_buffer(struct page *page) ...@@ -4153,7 +4153,7 @@ int try_release_extent_buffer(struct page *page)
struct extent_buffer *eb; struct extent_buffer *eb;
if (page_to_fs_info(page)->nodesize < PAGE_SIZE) if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
return try_release_subpage_extent_buffer(page); return try_release_subpage_extent_buffer(page_folio(page));
/* /*
* We need to make sure nobody is changing folio private, as we rely on * We need to make sure nobody is changing folio private, as we rely on
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment