Commit fbca46eb authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: make nodesize >= PAGE_SIZE case to reuse the non-subpage routine

The reason why we only support 64K page size for subpage is, for 64K
page size we can ensure no matter what the nodesize is, we can fit it
into one page.

When other page size come, especially like 16K, the limitation is a bit
limiting.

To remove such limitation, we allow nodesize >= PAGE_SIZE case to go the
non-subpage routine.  By this, we can allow 4K sectorsize on 16K page
size.

Although this introduces another smaller limitation, the metadata can
not cross page boundary, which is already met by most recent mkfs.

Another small improvement is, we can avoid the overhead for metadata if
nodesize >= PAGE_SIZE.
For 4K sector size and 64K page size/node size, or 4K sector size and
16K page size/node size, we don't need to allocate extra memory for the
metadata pages.

Please note that, this patch will not yet enable other page size support
yet.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e959d3c1
...@@ -519,7 +519,7 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct bio_vec *bvec ...@@ -519,7 +519,7 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct bio_vec *bvec
u64 found_start; u64 found_start;
struct extent_buffer *eb; struct extent_buffer *eb;
if (fs_info->sectorsize < PAGE_SIZE) if (fs_info->nodesize < PAGE_SIZE)
return csum_dirty_subpage_buffers(fs_info, bvec); return csum_dirty_subpage_buffers(fs_info, bvec);
eb = (struct extent_buffer *)page->private; eb = (struct extent_buffer *)page->private;
...@@ -704,7 +704,7 @@ int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio, ...@@ -704,7 +704,7 @@ int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio,
ASSERT(page->private); ASSERT(page->private);
if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE) if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
return validate_subpage_buffer(page, start, end, mirror); return validate_subpage_buffer(page, start, end, mirror);
eb = (struct extent_buffer *)page->private; eb = (struct extent_buffer *)page->private;
......
...@@ -2711,7 +2711,7 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) ...@@ -2711,7 +2711,7 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
btrfs_page_set_error(fs_info, page, start, len); btrfs_page_set_error(fs_info, page, start, len);
} }
if (fs_info->sectorsize == PAGE_SIZE) if (!btrfs_is_subpage(fs_info, page))
unlock_page(page); unlock_page(page);
else else
btrfs_subpage_end_reader(fs_info, page, start, len); btrfs_subpage_end_reader(fs_info, page, start, len);
...@@ -2944,7 +2944,7 @@ static void endio_readpage_release_extent(struct processed_extent *processed, ...@@ -2944,7 +2944,7 @@ static void endio_readpage_release_extent(struct processed_extent *processed,
static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page) static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
{ {
ASSERT(PageLocked(page)); ASSERT(PageLocked(page));
if (fs_info->sectorsize == PAGE_SIZE) if (!btrfs_is_subpage(fs_info, page))
return; return;
ASSERT(PagePrivate(page)); ASSERT(PagePrivate(page));
...@@ -2966,7 +2966,7 @@ static struct extent_buffer *find_extent_buffer_readpage( ...@@ -2966,7 +2966,7 @@ static struct extent_buffer *find_extent_buffer_readpage(
* For regular sectorsize, we can use page->private to grab extent * For regular sectorsize, we can use page->private to grab extent
* buffer * buffer
*/ */
if (fs_info->sectorsize == PAGE_SIZE) { if (fs_info->nodesize >= PAGE_SIZE) {
ASSERT(PagePrivate(page) && page->private); ASSERT(PagePrivate(page) && page->private);
return (struct extent_buffer *)page->private; return (struct extent_buffer *)page->private;
} }
...@@ -3479,7 +3479,7 @@ static int attach_extent_buffer_page(struct extent_buffer *eb, ...@@ -3479,7 +3479,7 @@ static int attach_extent_buffer_page(struct extent_buffer *eb,
if (page->mapping) if (page->mapping)
lockdep_assert_held(&page->mapping->private_lock); lockdep_assert_held(&page->mapping->private_lock);
if (fs_info->sectorsize == PAGE_SIZE) { if (fs_info->nodesize >= PAGE_SIZE) {
if (!PagePrivate(page)) if (!PagePrivate(page))
attach_page_private(page, eb); attach_page_private(page, eb);
else else
...@@ -3514,7 +3514,7 @@ int set_page_extent_mapped(struct page *page) ...@@ -3514,7 +3514,7 @@ int set_page_extent_mapped(struct page *page)
fs_info = btrfs_sb(page->mapping->host->i_sb); fs_info = btrfs_sb(page->mapping->host->i_sb);
if (fs_info->sectorsize < PAGE_SIZE) if (btrfs_is_subpage(fs_info, page))
return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA); return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE); attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
...@@ -3531,7 +3531,7 @@ void clear_page_extent_mapped(struct page *page) ...@@ -3531,7 +3531,7 @@ void clear_page_extent_mapped(struct page *page)
return; return;
fs_info = btrfs_sb(page->mapping->host->i_sb); fs_info = btrfs_sb(page->mapping->host->i_sb);
if (fs_info->sectorsize < PAGE_SIZE) if (btrfs_is_subpage(fs_info, page))
return btrfs_detach_subpage(fs_info, page); return btrfs_detach_subpage(fs_info, page);
detach_page_private(page); detach_page_private(page);
...@@ -3878,7 +3878,7 @@ static void find_next_dirty_byte(struct btrfs_fs_info *fs_info, ...@@ -3878,7 +3878,7 @@ static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
* For regular sector size == page size case, since one page only * For regular sector size == page size case, since one page only
* contains one sector, we return the page offset directly. * contains one sector, we return the page offset directly.
*/ */
if (fs_info->sectorsize == PAGE_SIZE) { if (!btrfs_is_subpage(fs_info, page)) {
*start = page_offset(page); *start = page_offset(page);
*end = page_offset(page) + PAGE_SIZE; *end = page_offset(page) + PAGE_SIZE;
return; return;
...@@ -4261,7 +4261,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb ...@@ -4261,7 +4261,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
* Subpage metadata doesn't use page locking at all, so we can skip * Subpage metadata doesn't use page locking at all, so we can skip
* the page locking. * the page locking.
*/ */
if (!ret || fs_info->sectorsize < PAGE_SIZE) if (!ret || fs_info->nodesize < PAGE_SIZE)
return ret; return ret;
num_pages = num_extent_pages(eb); num_pages = num_extent_pages(eb);
...@@ -4421,7 +4421,7 @@ static void end_bio_subpage_eb_writepage(struct bio *bio) ...@@ -4421,7 +4421,7 @@ static void end_bio_subpage_eb_writepage(struct bio *bio)
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb); fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb);
ASSERT(fs_info->sectorsize < PAGE_SIZE); ASSERT(fs_info->nodesize < PAGE_SIZE);
ASSERT(!bio_flagged(bio, BIO_CLONED)); ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) { bio_for_each_segment_all(bvec, bio, iter_all) {
...@@ -4748,7 +4748,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc, ...@@ -4748,7 +4748,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
if (!PagePrivate(page)) if (!PagePrivate(page))
return 0; return 0;
if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE) if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
return submit_eb_subpage(page, wbc, epd); return submit_eb_subpage(page, wbc, epd);
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
...@@ -5805,7 +5805,7 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag ...@@ -5805,7 +5805,7 @@ static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *pag
return; return;
} }
if (fs_info->sectorsize == PAGE_SIZE) { if (fs_info->nodesize >= PAGE_SIZE) {
/* /*
* We do this since we'll remove the pages after we've * We do this since we'll remove the pages after we've
* removed the eb from the radix tree, so we could race * removed the eb from the radix tree, so we could race
...@@ -6125,7 +6125,7 @@ static struct extent_buffer *grab_extent_buffer( ...@@ -6125,7 +6125,7 @@ static struct extent_buffer *grab_extent_buffer(
* don't try to insert two ebs for the same bytenr. So here we always * don't try to insert two ebs for the same bytenr. So here we always
* return NULL and just continue. * return NULL and just continue.
*/ */
if (fs_info->sectorsize < PAGE_SIZE) if (fs_info->nodesize < PAGE_SIZE)
return NULL; return NULL;
/* Page not yet attached to an extent buffer */ /* Page not yet attached to an extent buffer */
...@@ -6147,6 +6147,30 @@ static struct extent_buffer *grab_extent_buffer( ...@@ -6147,6 +6147,30 @@ static struct extent_buffer *grab_extent_buffer(
return NULL; return NULL;
} }
static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
{
if (!IS_ALIGNED(start, fs_info->sectorsize)) {
btrfs_err(fs_info, "bad tree block start %llu", start);
return -EINVAL;
}
if (fs_info->nodesize < PAGE_SIZE &&
offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
btrfs_err(fs_info,
"tree block crosses page boundary, start %llu nodesize %u",
start, fs_info->nodesize);
return -EINVAL;
}
if (fs_info->nodesize >= PAGE_SIZE &&
!IS_ALIGNED(start, PAGE_SIZE)) {
btrfs_err(fs_info,
"tree block is not page aligned, start %llu nodesize %u",
start, fs_info->nodesize);
return -EINVAL;
}
return 0;
}
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level) u64 start, u64 owner_root, int level)
{ {
...@@ -6161,10 +6185,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, ...@@ -6161,10 +6185,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
int uptodate = 1; int uptodate = 1;
int ret; int ret;
if (!IS_ALIGNED(start, fs_info->sectorsize)) { if (check_eb_alignment(fs_info, start))
btrfs_err(fs_info, "bad tree block start %llu", start);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
}
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
if (start >= MAX_LFS_FILESIZE) { if (start >= MAX_LFS_FILESIZE) {
...@@ -6177,14 +6199,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, ...@@ -6177,14 +6199,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
btrfs_warn_32bit_limit(fs_info); btrfs_warn_32bit_limit(fs_info);
#endif #endif
if (fs_info->sectorsize < PAGE_SIZE &&
offset_in_page(start) + len > PAGE_SIZE) {
btrfs_err(fs_info,
"tree block crosses page boundary, start %llu nodesize %lu",
start, len);
return ERR_PTR(-EINVAL);
}
eb = find_extent_buffer(fs_info, start); eb = find_extent_buffer(fs_info, start);
if (eb) if (eb)
return eb; return eb;
...@@ -6214,7 +6228,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, ...@@ -6214,7 +6228,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
* page, but it may change in the future for 16K page size * page, but it may change in the future for 16K page size
* support, so we still preallocate the memory in the loop. * support, so we still preallocate the memory in the loop.
*/ */
if (fs_info->sectorsize < PAGE_SIZE) { if (fs_info->nodesize < PAGE_SIZE) {
prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA); prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
if (IS_ERR(prealloc)) { if (IS_ERR(prealloc)) {
ret = PTR_ERR(prealloc); ret = PTR_ERR(prealloc);
...@@ -6433,7 +6447,7 @@ void clear_extent_buffer_dirty(const struct extent_buffer *eb) ...@@ -6433,7 +6447,7 @@ void clear_extent_buffer_dirty(const struct extent_buffer *eb)
int num_pages; int num_pages;
struct page *page; struct page *page;
if (eb->fs_info->sectorsize < PAGE_SIZE) if (eb->fs_info->nodesize < PAGE_SIZE)
return clear_subpage_extent_buffer_dirty(eb); return clear_subpage_extent_buffer_dirty(eb);
num_pages = num_extent_pages(eb); num_pages = num_extent_pages(eb);
...@@ -6465,7 +6479,7 @@ bool set_extent_buffer_dirty(struct extent_buffer *eb) ...@@ -6465,7 +6479,7 @@ bool set_extent_buffer_dirty(struct extent_buffer *eb)
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
if (!was_dirty) { if (!was_dirty) {
bool subpage = eb->fs_info->sectorsize < PAGE_SIZE; bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
/* /*
* For subpage case, we can have other extent buffers in the * For subpage case, we can have other extent buffers in the
...@@ -6505,9 +6519,18 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb) ...@@ -6505,9 +6519,18 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb)
num_pages = num_extent_pages(eb); num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
page = eb->pages[i]; page = eb->pages[i];
if (page) if (!page)
btrfs_page_clear_uptodate(fs_info, page, continue;
eb->start, eb->len);
/*
* This is special handling for metadata subpage, as regular
* btrfs_is_subpage() can not handle cloned/dummy metadata.
*/
if (fs_info->nodesize >= PAGE_SIZE)
ClearPageUptodate(page);
else
btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
eb->len);
} }
} }
...@@ -6522,7 +6545,16 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb) ...@@ -6522,7 +6545,16 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
num_pages = num_extent_pages(eb); num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
page = eb->pages[i]; page = eb->pages[i];
btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
/*
* This is special handling for metadata subpage, as regular
* btrfs_is_subpage() can not handle cloned/dummy metadata.
*/
if (fs_info->nodesize >= PAGE_SIZE)
SetPageUptodate(page);
else
btrfs_subpage_set_uptodate(fs_info, page, eb->start,
eb->len);
} }
} }
...@@ -6617,7 +6649,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) ...@@ -6617,7 +6649,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
return -EIO; return -EIO;
if (eb->fs_info->sectorsize < PAGE_SIZE) if (eb->fs_info->nodesize < PAGE_SIZE)
return read_extent_buffer_subpage(eb, wait, mirror_num); return read_extent_buffer_subpage(eb, wait, mirror_num);
num_pages = num_extent_pages(eb); num_pages = num_extent_pages(eb);
...@@ -6872,7 +6904,7 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb, ...@@ -6872,7 +6904,7 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb,
* would have !PageUptodate && !PageError, as we clear PageError before * would have !PageUptodate && !PageError, as we clear PageError before
* reading. * reading.
*/ */
if (fs_info->sectorsize < PAGE_SIZE) { if (fs_info->nodesize < PAGE_SIZE) {
bool uptodate, error; bool uptodate, error;
uptodate = btrfs_subpage_test_uptodate(fs_info, page, uptodate = btrfs_subpage_test_uptodate(fs_info, page,
...@@ -6974,7 +7006,7 @@ void copy_extent_buffer_full(const struct extent_buffer *dst, ...@@ -6974,7 +7006,7 @@ void copy_extent_buffer_full(const struct extent_buffer *dst,
ASSERT(dst->len == src->len); ASSERT(dst->len == src->len);
if (dst->fs_info->sectorsize == PAGE_SIZE) { if (dst->fs_info->nodesize >= PAGE_SIZE) {
num_pages = num_extent_pages(dst); num_pages = num_extent_pages(dst);
for (i = 0; i < num_pages; i++) for (i = 0; i < num_pages; i++)
copy_page(page_address(dst->pages[i]), copy_page(page_address(dst->pages[i]),
...@@ -6983,7 +7015,7 @@ void copy_extent_buffer_full(const struct extent_buffer *dst, ...@@ -6983,7 +7015,7 @@ void copy_extent_buffer_full(const struct extent_buffer *dst,
size_t src_offset = get_eb_offset_in_page(src, 0); size_t src_offset = get_eb_offset_in_page(src, 0);
size_t dst_offset = get_eb_offset_in_page(dst, 0); size_t dst_offset = get_eb_offset_in_page(dst, 0);
ASSERT(src->fs_info->sectorsize < PAGE_SIZE); ASSERT(src->fs_info->nodesize < PAGE_SIZE);
memcpy(page_address(dst->pages[0]) + dst_offset, memcpy(page_address(dst->pages[0]) + dst_offset,
page_address(src->pages[0]) + src_offset, page_address(src->pages[0]) + src_offset,
src->len); src->len);
...@@ -7376,7 +7408,7 @@ int try_release_extent_buffer(struct page *page) ...@@ -7376,7 +7408,7 @@ int try_release_extent_buffer(struct page *page)
{ {
struct extent_buffer *eb; struct extent_buffer *eb;
if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE) if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
return try_release_subpage_extent_buffer(page); return try_release_subpage_extent_buffer(page);
/* /*
......
...@@ -8200,7 +8200,7 @@ static void wait_subpage_spinlock(struct page *page) ...@@ -8200,7 +8200,7 @@ static void wait_subpage_spinlock(struct page *page)
struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
if (fs_info->sectorsize == PAGE_SIZE) if (!btrfs_is_subpage(fs_info, page))
return; return;
ASSERT(PagePrivate(page) && page->private); ASSERT(PagePrivate(page) && page->private);
......
...@@ -63,6 +63,29 @@ ...@@ -63,6 +63,29 @@
* This means a slightly higher tree locking latency. * This means a slightly higher tree locking latency.
*/ */
bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
{
if (fs_info->sectorsize >= PAGE_SIZE)
return false;
/*
* Only data pages (either through DIO or compression) can have no
* mapping. And if page->mapping->host is data inode, it's subpage.
* As we have ruled our sectorsize >= PAGE_SIZE case already.
*/
if (!page->mapping || !page->mapping->host ||
is_data_inode(page->mapping->host))
return true;
/*
* Now the only remaining case is metadata, which we only go subpage
* routine if nodesize < PAGE_SIZE.
*/
if (fs_info->nodesize < PAGE_SIZE)
return true;
return false;
}
void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
{ {
unsigned int cur = 0; unsigned int cur = 0;
...@@ -107,7 +130,7 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, ...@@ -107,7 +130,7 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
ASSERT(PageLocked(page)); ASSERT(PageLocked(page));
/* Either not subpage, or the page already has private attached */ /* Either not subpage, or the page already has private attached */
if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page)) if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
return 0; return 0;
subpage = btrfs_alloc_subpage(fs_info, type); subpage = btrfs_alloc_subpage(fs_info, type);
...@@ -124,7 +147,7 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, ...@@ -124,7 +147,7 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
/* Either not subpage, or already detached */ /* Either not subpage, or already detached */
if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page)) if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
return; return;
subpage = (struct btrfs_subpage *)detach_page_private(page); subpage = (struct btrfs_subpage *)detach_page_private(page);
...@@ -175,7 +198,7 @@ void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info, ...@@ -175,7 +198,7 @@ void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
{ {
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
if (fs_info->sectorsize == PAGE_SIZE) if (!btrfs_is_subpage(fs_info, page))
return; return;
ASSERT(PagePrivate(page) && page->mapping); ASSERT(PagePrivate(page) && page->mapping);
...@@ -190,7 +213,7 @@ void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info, ...@@ -190,7 +213,7 @@ void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
{ {
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
if (fs_info->sectorsize == PAGE_SIZE) if (!btrfs_is_subpage(fs_info, page))
return; return;
ASSERT(PagePrivate(page) && page->mapping); ASSERT(PagePrivate(page) && page->mapping);
...@@ -319,7 +342,7 @@ bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, ...@@ -319,7 +342,7 @@ bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info, int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len) struct page *page, u64 start, u32 len)
{ {
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
lock_page(page); lock_page(page);
return 0; return 0;
} }
...@@ -336,7 +359,7 @@ int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info, ...@@ -336,7 +359,7 @@ int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info, void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len) struct page *page, u64 start, u32 len)
{ {
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
return unlock_page(page); return unlock_page(page);
btrfs_subpage_clamp_range(page, &start, &len); btrfs_subpage_clamp_range(page, &start, &len);
if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len)) if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
...@@ -620,7 +643,7 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked); ...@@ -620,7 +643,7 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
set_page_func(page); \ set_page_func(page); \
return; \ return; \
} \ } \
...@@ -629,7 +652,7 @@ void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -629,7 +652,7 @@ void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
clear_page_func(page); \ clear_page_func(page); \
return; \ return; \
} \ } \
...@@ -638,14 +661,14 @@ void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -638,14 +661,14 @@ void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \ bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
return test_page_func(page); \ return test_page_func(page); \
return btrfs_subpage_test_##name(fs_info, page, start, len); \ return btrfs_subpage_test_##name(fs_info, page, start, len); \
} \ } \
void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
set_page_func(page); \ set_page_func(page); \
return; \ return; \
} \ } \
...@@ -655,7 +678,7 @@ void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -655,7 +678,7 @@ void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
clear_page_func(page); \ clear_page_func(page); \
return; \ return; \
} \ } \
...@@ -665,7 +688,7 @@ void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -665,7 +688,7 @@ void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \ if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
return test_page_func(page); \ return test_page_func(page); \
btrfs_subpage_clamp_range(page, &start, &len); \ btrfs_subpage_clamp_range(page, &start, &len); \
return btrfs_subpage_test_##name(fs_info, page, start, len); \ return btrfs_subpage_test_##name(fs_info, page, start, len); \
...@@ -694,7 +717,7 @@ void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info, ...@@ -694,7 +717,7 @@ void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
return; return;
ASSERT(!PageDirty(page)); ASSERT(!PageDirty(page));
if (fs_info->sectorsize == PAGE_SIZE) if (!btrfs_is_subpage(fs_info, page))
return; return;
ASSERT(PagePrivate(page) && page->private); ASSERT(PagePrivate(page) && page->private);
...@@ -722,8 +745,8 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, ...@@ -722,8 +745,8 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
ASSERT(PageLocked(page)); ASSERT(PageLocked(page));
/* For regular page size case, we just unlock the page */ /* For non-subpage case, we just unlock the page */
if (fs_info->sectorsize == PAGE_SIZE) if (!btrfs_is_subpage(fs_info, page))
return unlock_page(page); return unlock_page(page);
ASSERT(PagePrivate(page) && page->private); ASSERT(PagePrivate(page) && page->private);
......
...@@ -74,6 +74,8 @@ enum btrfs_subpage_type { ...@@ -74,6 +74,8 @@ enum btrfs_subpage_type {
BTRFS_SUBPAGE_DATA, BTRFS_SUBPAGE_DATA,
}; };
bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page);
void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize); void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize);
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
struct page *page, enum btrfs_subpage_type type); struct page *page, enum btrfs_subpage_type type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment