Commit 81891974 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: refactor __extent_writepage_io() to do sector-by-sector submission

Unlike the bitmap usage inside raid56, for __extent_writepage_io() we
handle the subpage submission not sector-by-sector, but for each dirty
range we found.

This is not a big deal normally, as the subpage complex code is already
mostly optimized out by the compiler for x86_64.

However for the sake of consistency and for the future of subpage
sector-perfect compression support, this patch does:

- Extract the sector submission code into submit_one_sector()

- Add the needed code to extract the dirty bitmap for subpage case
  There is a small pitfall for non-subpage case, as we cleared page
  dirty before starting writeback, so we have to manually set
  the default dirty_bitmap to 1 for such case.

- Use bitmap_and() to calculate the target sectors we need to submit
  This is done for both subpage and non-subpage cases, and will later
  be expanded to skip inline/compression ranges.

For x86_64, the dirty bitmap will be fixed to 1, with the length of 1,
so we're still doing the same workload per sector.

For larger page sizes, the overhead will be a little larger, as previous
we only need to do one extent_map lookup per-dirty-range, but now it
will be one extent_map lookup per-sector.

But that is the same frequency as x86_64, so we're just aligning the
behavior to x86_64.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 77b0b98b
...@@ -1333,56 +1333,68 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, ...@@ -1333,56 +1333,68 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
} }
/* /*
* Find the first byte we need to write. * Return 0 if we have submitted or queued the sector for submission.
* Return <0 for critical errors.
* *
* For subpage, one page can contain several sectors, and * Caller should make sure filepos < i_size and handle filepos >= i_size case.
* __extent_writepage_io() will just grab all extent maps in the page
* range and try to submit all non-inline/non-compressed extents.
*
* This is a big problem for subpage, we shouldn't re-submit already written
* data at all.
* This function will lookup subpage dirty bit to find which range we really
* need to submit.
*
* Return the next dirty range in [@start, @end).
* If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
*/ */
static void find_next_dirty_byte(const struct btrfs_fs_info *fs_info, static int submit_one_sector(struct btrfs_inode *inode,
struct folio *folio, u64 *start, u64 *end) struct folio *folio,
u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
loff_t i_size)
{ {
struct btrfs_subpage *subpage = folio_get_private(folio); struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_subpage_info *spi = fs_info->subpage_info; struct extent_map *em;
u64 orig_start = *start; u64 block_start;
/* Declare as unsigned long so we can use bitmap ops */ u64 disk_bytenr;
unsigned long flags; u64 extent_offset;
int range_start_bit; u64 em_end;
int range_end_bit; const u32 sectorsize = fs_info->sectorsize;
/* ASSERT(IS_ALIGNED(filepos, sectorsize));
* For regular sector size == page size case, since one page only
* contains one sector, we return the page offset directly. /* @filepos >= i_size case should be handled by the caller. */
*/ ASSERT(filepos < i_size);
if (!btrfs_is_subpage(fs_info, folio->mapping)) {
*start = folio_pos(folio); em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
*end = folio_pos(folio) + folio_size(folio); if (IS_ERR(em))
return; return PTR_ERR_OR_ZERO(em);
}
range_start_bit = spi->dirty_offset + extent_offset = filepos - em->start;
(offset_in_folio(folio, orig_start) >> em_end = extent_map_end(em);
fs_info->sectorsize_bits); ASSERT(filepos <= em_end);
ASSERT(IS_ALIGNED(em->start, sectorsize));
ASSERT(IS_ALIGNED(em->len, sectorsize));
/* We should have the page locked, but just in case */ block_start = extent_map_block_start(em);
spin_lock_irqsave(&subpage->lock, flags); disk_bytenr = extent_map_block_start(em) + extent_offset;
bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
spi->dirty_offset + spi->bitmap_nr_bits);
spin_unlock_irqrestore(&subpage->lock, flags);
range_start_bit -= spi->dirty_offset; ASSERT(!extent_map_is_compressed(em));
range_end_bit -= spi->dirty_offset; ASSERT(block_start != EXTENT_MAP_HOLE);
ASSERT(block_start != EXTENT_MAP_INLINE);
*start = folio_pos(folio) + range_start_bit * fs_info->sectorsize; free_extent_map(em);
*end = folio_pos(folio) + range_end_bit * fs_info->sectorsize; em = NULL;
btrfs_set_range_writeback(inode, filepos, filepos + sectorsize - 1);
/*
* Above call should set the whole folio with writeback flag, even
* just for a single subpage sector.
* As long as the folio is properly locked and the range is correct,
* we should always get the folio with writeback flag.
*/
ASSERT(folio_test_writeback(folio));
/*
* Although the PageDirty bit is cleared before entering this
* function, subpage dirty bit is not cleared.
* So clear subpage dirty bit here so next time we won't submit
* folio for range already written to disk.
*/
btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
submit_extent_folio(bio_ctrl, disk_bytenr, folio,
sectorsize, filepos - folio_pos(folio));
return 0;
} }
/* /*
...@@ -1400,16 +1412,24 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -1400,16 +1412,24 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
loff_t i_size, int *nr_ret) loff_t i_size, int *nr_ret)
{ {
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 cur = start; unsigned long range_bitmap = 0;
u64 end = start + len - 1; /*
u64 extent_offset; * This is the default value for sectorsize == PAGE_SIZE case.
u64 block_start; * We known we need to write the dirty sector (aka the page),
struct extent_map *em; * even if the page is not dirty (we cleared it before entering).
*
* For subpage cases we will get the correct bitmap later.
*/
unsigned long dirty_bitmap = 1;
unsigned int bitmap_size = 1;
const u64 folio_start = folio_pos(folio);
u64 cur;
int bit;
int ret = 0; int ret = 0;
int nr = 0; int nr = 0;
ASSERT(start >= folio_pos(folio) && ASSERT(start >= folio_start &&
start + len <= folio_pos(folio) + folio_size(folio)); start + len <= folio_start + folio_size(folio));
ret = btrfs_writepage_cow_fixup(folio); ret = btrfs_writepage_cow_fixup(folio);
if (ret) { if (ret) {
...@@ -1419,18 +1439,23 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -1419,18 +1439,23 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
return 1; return 1;
} }
if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
ASSERT(fs_info->subpage_info);
btrfs_get_subpage_dirty_bitmap(fs_info, folio, &dirty_bitmap);
bitmap_size = fs_info->subpage_info->bitmap_nr_bits;
}
for (cur = start; cur < start + len; cur += fs_info->sectorsize)
set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
bitmap_and(&dirty_bitmap, &dirty_bitmap, &range_bitmap, bitmap_size);
bio_ctrl->end_io_func = end_bbio_data_write; bio_ctrl->end_io_func = end_bbio_data_write;
while (cur <= end) {
u32 len = end - cur + 1; for_each_set_bit(bit, &dirty_bitmap, bitmap_size) {
u64 disk_bytenr; cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
u64 em_end;
u64 dirty_range_start = cur;
u64 dirty_range_end;
u32 iosize;
if (cur >= i_size) { if (cur >= i_size) {
btrfs_mark_ordered_io_finished(inode, folio, cur, len, btrfs_mark_ordered_io_finished(inode, folio, cur,
true); start + len - cur, true);
/* /*
* This range is beyond i_size, thus we don't need to * This range is beyond i_size, thus we don't need to
* bother writing back. * bother writing back.
...@@ -1439,62 +1464,13 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ...@@ -1439,62 +1464,13 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
* writeback the sectors with subpage dirty bits, * writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent. * causing writeback without ordered extent.
*/ */
btrfs_folio_clear_dirty(fs_info, folio, cur, len); btrfs_folio_clear_dirty(fs_info, folio, cur,
start + len - cur);
break; break;
} }
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
find_next_dirty_byte(fs_info, folio, &dirty_range_start, if (ret < 0)
&dirty_range_end);
if (cur < dirty_range_start) {
cur = dirty_range_start;
continue;
}
em = btrfs_get_extent(inode, NULL, cur, len);
if (IS_ERR(em)) {
ret = PTR_ERR_OR_ZERO(em);
goto out_error; goto out_error;
}
extent_offset = cur - em->start;
em_end = extent_map_end(em);
ASSERT(cur <= em_end);
ASSERT(cur < end);
ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
block_start = extent_map_block_start(em);
disk_bytenr = extent_map_block_start(em) + extent_offset;
ASSERT(!extent_map_is_compressed(em));
ASSERT(block_start != EXTENT_MAP_HOLE);
ASSERT(block_start != EXTENT_MAP_INLINE);
/*
* Note that em_end from extent_map_end() and dirty_range_end from
* find_next_dirty_byte() are all exclusive
*/
iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
free_extent_map(em);
em = NULL;
/*
* Although the PageDirty bit is cleared before entering this
* function, subpage dirty bit is not cleared.
* So clear subpage dirty bit here so next time we won't submit
* folio for range already written to disk.
*/
btrfs_folio_clear_dirty(fs_info, folio, cur, iosize);
btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
if (!folio_test_writeback(folio)) {
btrfs_err(inode->root->fs_info,
"folio %lu not writeback, cur %llu end %llu",
folio->index, cur, end);
}
submit_extent_folio(bio_ctrl, disk_bytenr, folio,
iosize, cur - folio_pos(folio));
cur += iosize;
nr++; nr++;
} }
......
...@@ -946,3 +946,20 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, ...@@ -946,3 +946,20 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
subpage_info->bitmap_nr_bits, &ordered_bitmap, subpage_info->bitmap_nr_bits, &ordered_bitmap,
subpage_info->bitmap_nr_bits, &checked_bitmap); subpage_info->bitmap_nr_bits, &checked_bitmap);
} }
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
struct folio *folio,
unsigned long *ret_bitmap)
{
struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
struct btrfs_subpage *subpage;
unsigned long flags;
ASSERT(folio_test_private(folio) && folio_get_private(folio));
ASSERT(subpage_info);
subpage = folio_get_private(folio);
spin_lock_irqsave(&subpage->lock, flags);
GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, ret_bitmap);
spin_unlock_irqrestore(&subpage->lock, flags);
}
...@@ -175,6 +175,9 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, ...@@ -175,6 +175,9 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info, void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
struct folio *folio,
unsigned long *ret_bitmap);
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len); struct folio *folio, u64 start, u32 len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment