Commit 55151ea9 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: migrate subpage code to folio interfaces

Although subpage itself is conflicting with higher folio, since subpage
(sectorsize < PAGE_SIZE and nodesize < PAGE_SIZE) means we will never
need higher order folio, there is a hidden pitfall:

- btrfs_page_*() helpers

Those helpers are an abstraction to handle both subpage and non-subpage
cases, which means we're going to pass pages pointers to those helpers.

And since those helpers are shared between data and metadata paths, it's
unavoidable to let them to handle folios, including higher order
folios).

Meanwhile for true subpage case, we should only have a single page
backed folios anyway, thus add a new ASSERT() for btrfs_subpage_assert()
to ensure that.

Also since those helpers are shared between both data and metadata, add
some extra ASSERT()s for data path to make sure we only get single page
backed folio for now.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 8d993618
...@@ -306,8 +306,8 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb) ...@@ -306,8 +306,8 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb)
for (i = 0; i < ret; i++) { for (i = 0; i < ret; i++) {
struct folio *folio = fbatch.folios[i]; struct folio *folio = fbatch.folios[i];
btrfs_page_clamp_clear_writeback(fs_info, &folio->page, btrfs_folio_clamp_clear_writeback(fs_info, folio,
cb->start, cb->len); cb->start, cb->len);
} }
folio_batch_release(&fbatch); folio_batch_release(&fbatch);
} }
...@@ -541,7 +541,8 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -541,7 +541,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page. * subpage::readers and to unlock the page.
*/ */
if (fs_info->sectorsize < PAGE_SIZE) if (fs_info->sectorsize < PAGE_SIZE)
btrfs_subpage_start_reader(fs_info, page, cur, add_size); btrfs_subpage_start_reader(fs_info, page_folio(page),
cur, add_size);
put_page(page); put_page(page);
cur += add_size; cur += add_size;
} }
......
...@@ -1189,7 +1189,7 @@ static int defrag_one_locked_target(struct btrfs_inode *inode, ...@@ -1189,7 +1189,7 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
/* Update the page status */ /* Update the page status */
for (i = start_index - first_index; i <= last_index - first_index; i++) { for (i = start_index - first_index; i <= last_index - first_index; i++) {
ClearPageChecked(pages[i]); ClearPageChecked(pages[i]);
btrfs_page_clamp_set_dirty(fs_info, pages[i], start, len); btrfs_folio_clamp_set_dirty(fs_info, page_folio(pages[i]), start, len);
} }
btrfs_delalloc_release_extents(inode, len); btrfs_delalloc_release_extents(inode, len);
extent_changeset_free(data_reserved); extent_changeset_free(data_reserved);
......
...@@ -284,8 +284,8 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio) ...@@ -284,8 +284,8 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
if (WARN_ON_ONCE(found_start != eb->start)) if (WARN_ON_ONCE(found_start != eb->start))
return BLK_STS_IOERR; return BLK_STS_IOERR;
if (WARN_ON(!btrfs_page_test_uptodate(fs_info, folio_page(eb->folios[0], 0), if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
eb->start, eb->len))) eb->start, eb->len)))
return BLK_STS_IOERR; return BLK_STS_IOERR;
ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
......
This diff is collapsed.
...@@ -111,8 +111,8 @@ static void btrfs_drop_pages(struct btrfs_fs_info *fs_info, ...@@ -111,8 +111,8 @@ static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
* accessed as prepare_pages should have marked them accessed * accessed as prepare_pages should have marked them accessed
* in prepare_pages via find_or_create_page() * in prepare_pages via find_or_create_page()
*/ */
btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start, btrfs_folio_clamp_clear_checked(fs_info, page_folio(pages[i]),
block_len); block_start, block_len);
unlock_page(pages[i]); unlock_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
} }
...@@ -168,9 +168,12 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, ...@@ -168,9 +168,12 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
struct page *p = pages[i]; struct page *p = pages[i];
btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes); btrfs_folio_clamp_set_uptodate(fs_info, page_folio(p),
btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes); start_pos, num_bytes);
btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes); btrfs_folio_clamp_clear_checked(fs_info, page_folio(p),
start_pos, num_bytes);
btrfs_folio_clamp_set_dirty(fs_info, page_folio(p),
start_pos, num_bytes);
} }
/* /*
......
...@@ -439,8 +439,8 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) ...@@ -439,8 +439,8 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
for (i = 0; i < io_ctl->num_pages; i++) { for (i = 0; i < io_ctl->num_pages; i++) {
if (io_ctl->pages[i]) { if (io_ctl->pages[i]) {
btrfs_page_clear_checked(io_ctl->fs_info, btrfs_folio_clear_checked(io_ctl->fs_info,
io_ctl->pages[i], page_folio(io_ctl->pages[i]),
page_offset(io_ctl->pages[i]), page_offset(io_ctl->pages[i]),
PAGE_SIZE); PAGE_SIZE);
unlock_page(io_ctl->pages[i]); unlock_page(io_ctl->pages[i]);
......
...@@ -456,8 +456,8 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode, ...@@ -456,8 +456,8 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
* range, then btrfs_mark_ordered_io_finished() will handle * range, then btrfs_mark_ordered_io_finished() will handle
* the ordered extent accounting for the range. * the ordered extent accounting for the range.
*/ */
btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, btrfs_folio_clamp_clear_ordered(inode->root->fs_info,
offset, bytes); page_folio(page), offset, bytes);
put_page(page); put_page(page);
} }
...@@ -2802,7 +2802,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) ...@@ -2802,7 +2802,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
PAGE_SIZE, !ret); PAGE_SIZE, !ret);
clear_page_dirty_for_io(page); clear_page_dirty_for_io(page);
} }
btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
kfree(fixup); kfree(fixup);
...@@ -2857,7 +2857,7 @@ int btrfs_writepage_cow_fixup(struct page *page) ...@@ -2857,7 +2857,7 @@ int btrfs_writepage_cow_fixup(struct page *page)
* page->mapping outside of the page lock. * page->mapping outside of the page lock.
*/ */
ihold(inode); ihold(inode);
btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE); btrfs_folio_set_checked(fs_info, page_folio(page), page_offset(page), PAGE_SIZE);
get_page(page); get_page(page);
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL); btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
fixup->page = page; fixup->page = page;
...@@ -4776,9 +4776,10 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, ...@@ -4776,9 +4776,10 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
memzero_page(page, (block_start - page_offset(page)) + offset, memzero_page(page, (block_start - page_offset(page)) + offset,
len); len);
} }
btrfs_page_clear_checked(fs_info, page, block_start, btrfs_folio_clear_checked(fs_info, page_folio(page), block_start,
block_end + 1 - block_start); block_end + 1 - block_start);
btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); btrfs_folio_set_dirty(fs_info, page_folio(page), block_start,
block_end + 1 - block_start);
unlock_extent(io_tree, block_start, block_end, &cached_state); unlock_extent(io_tree, block_start, block_end, &cached_state);
if (only_release_metadata) if (only_release_metadata)
...@@ -8009,7 +8010,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset, ...@@ -8009,7 +8010,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
page_end); page_end);
ASSERT(range_end + 1 - cur < U32_MAX); ASSERT(range_end + 1 - cur < U32_MAX);
range_len = range_end + 1 - cur; range_len = range_end + 1 - cur;
if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
/* /*
* If Ordered (Private2) is cleared, it means endio has * If Ordered (Private2) is cleared, it means endio has
* already been executed for the range. * already been executed for the range.
...@@ -8018,7 +8019,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset, ...@@ -8018,7 +8019,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
*/ */
goto next; goto next;
} }
btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
/* /*
* IO on this page will never be started, so we need to account * IO on this page will never be started, so we need to account
...@@ -8088,7 +8089,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset, ...@@ -8088,7 +8089,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* did something wrong. * did something wrong.
*/ */
ASSERT(!folio_test_ordered(folio)); ASSERT(!folio_test_ordered(folio));
btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
if (!inode_evicting) if (!inode_evicting)
__btrfs_release_folio(folio, GFP_NOFS); __btrfs_release_folio(folio, GFP_NOFS);
clear_page_extent_mapped(&folio->page); clear_page_extent_mapped(&folio->page);
...@@ -8112,6 +8113,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset, ...@@ -8112,6 +8113,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
{ {
struct page *page = vmf->page; struct page *page = vmf->page;
struct folio *folio = page_folio(page);
struct inode *inode = file_inode(vmf->vma->vm_file); struct inode *inode = file_inode(vmf->vma->vm_file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
...@@ -8128,6 +8130,8 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -8128,6 +8130,8 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
u64 page_end; u64 page_end;
u64 end; u64 end;
ASSERT(folio_order(folio) == 0);
reserved_space = PAGE_SIZE; reserved_space = PAGE_SIZE;
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
...@@ -8231,9 +8235,9 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -8231,9 +8235,9 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
if (zero_start != PAGE_SIZE) if (zero_start != PAGE_SIZE)
memzero_page(page, zero_start, PAGE_SIZE - zero_start); memzero_page(page, zero_start, PAGE_SIZE - zero_start);
btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE); btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
btrfs_set_inode_last_sub_trans(BTRFS_I(inode)); btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
...@@ -9819,7 +9823,9 @@ void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) ...@@ -9819,7 +9823,9 @@ void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
page = find_get_page(inode->vfs_inode.i_mapping, index); page = find_get_page(inode->vfs_inode.i_mapping, index);
ASSERT(page); /* Pages should be in the extent_io_tree */ ASSERT(page); /* Pages should be in the extent_io_tree */
btrfs_page_set_writeback(fs_info, page, start, len); /* This is for data, which doesn't yet support larger folio. */
ASSERT(folio_order(page_folio(page)) == 0);
btrfs_folio_set_writeback(fs_info, page_folio(page), start, len);
put_page(page); put_page(page);
index++; index++;
} }
......
...@@ -323,9 +323,10 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered, ...@@ -323,9 +323,10 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
* *
* If there's no such bit, we need to skip to next range. * If there's no such bit, we need to skip to next range.
*/ */
if (!btrfs_page_test_ordered(fs_info, page, file_offset, len)) if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
file_offset, len))
return false; return false;
btrfs_page_clear_ordered(fs_info, page, file_offset, len); btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
} }
/* Now we're fine to update the accounting. */ /* Now we're fine to update the accounting. */
......
...@@ -141,9 +141,9 @@ static int copy_inline_to_page(struct btrfs_inode *inode, ...@@ -141,9 +141,9 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
if (datal < block_size) if (datal < block_size)
memzero_page(page, datal, block_size - datal); memzero_page(page, datal, block_size - datal);
btrfs_page_set_uptodate(fs_info, page, file_offset, block_size); btrfs_folio_set_uptodate(fs_info, page_folio(page), file_offset, block_size);
btrfs_page_clear_checked(fs_info, page, file_offset, block_size); btrfs_folio_clear_checked(fs_info, page_folio(page), file_offset, block_size);
btrfs_page_set_dirty(fs_info, page, file_offset, block_size); btrfs_folio_set_dirty(fs_info, page_folio(page), file_offset, block_size);
out_unlock: out_unlock:
if (page) { if (page) {
unlock_page(page); unlock_page(page);
......
...@@ -2895,7 +2895,7 @@ static noinline_for_stack int prealloc_file_extent_cluster( ...@@ -2895,7 +2895,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
* will re-read the whole page anyway. * will re-read the whole page anyway.
*/ */
if (page) { if (page) {
btrfs_subpage_clear_uptodate(fs_info, page, i_size, btrfs_subpage_clear_uptodate(fs_info, page_folio(page), i_size,
round_up(i_size, PAGE_SIZE) - i_size); round_up(i_size, PAGE_SIZE) - i_size);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
...@@ -3070,7 +3070,8 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ...@@ -3070,7 +3070,8 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
clamped_len); clamped_len);
goto release_page; goto release_page;
} }
btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len); btrfs_folio_set_dirty(fs_info, page_folio(page),
clamped_start, clamped_len);
/* /*
* Set the boundary if it's inside the page. * Set the boundary if it's inside the page.
......
This diff is collapsed.
...@@ -77,9 +77,8 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space ...@@ -77,9 +77,8 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space
void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize); void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize);
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct page *page, enum btrfs_subpage_type type); struct folio *folio, enum btrfs_subpage_type type);
void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio);
struct page *page);
/* Allocate additional data where page represents more than one sector */ /* Allocate additional data where page represents more than one sector */
struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
...@@ -90,52 +89,52 @@ void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio * ...@@ -90,52 +89,52 @@ void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio); void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info, int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info, void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
/* /*
* Template for subpage related operations. * Template for subpage related operations.
* *
* btrfs_subpage_*() are for call sites where the page has subpage attached and * btrfs_subpage_*() are for call sites where the folio has subpage attached and
* the range is ensured to be inside the page. * the range is ensured to be inside the folio's single page.
* *
* btrfs_page_*() are for call sites where the page can either be subpage * btrfs_folio_*() are for call sites where the page can either be subpage
* specific or regular page. The function will handle both cases. * specific or regular folios. The function will handle both cases.
* But the range still needs to be inside the page. * But the range still needs to be inside one single page.
* *
* btrfs_page_clamp_*() are similar to btrfs_page_*(), except the range doesn't * btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
* need to be inside the page. Those functions will truncate the range * need to be inside the page. Those functions will truncate the range
* automatically. * automatically.
*/ */
#define DECLARE_BTRFS_SUBPAGE_OPS(name) \ #define DECLARE_BTRFS_SUBPAGE_OPS(name) \
void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \ bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); \ struct folio *folio, u64 start, u32 len); \
bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
DECLARE_BTRFS_SUBPAGE_OPS(uptodate); DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
DECLARE_BTRFS_SUBPAGE_OPS(dirty); DECLARE_BTRFS_SUBPAGE_OPS(dirty);
...@@ -144,13 +143,12 @@ DECLARE_BTRFS_SUBPAGE_OPS(ordered); ...@@ -144,13 +143,12 @@ DECLARE_BTRFS_SUBPAGE_OPS(ordered);
DECLARE_BTRFS_SUBPAGE_OPS(checked); DECLARE_BTRFS_SUBPAGE_OPS(checked);
bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info, void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio);
struct page *page); void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, struct folio *folio, u64 start, u32 len);
u64 start, u32 len);
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment