Commit eb34dcea authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: only call __extent_writepage_io from extent_write_locked_range

__extent_writepage does a lot of things that make no sense for
extent_write_locked_range, given that extent_write_locked_range itself is
called from __extent_writepage either directly or through a workqueue,
and all this work has already been done in the first invocation and the
pages haven't been unlocked since.  Call __extent_writepage_io directly
instead and open code the logic tracked in
btrfs_bio_ctrl::extent_locked.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 9ecdbee8
......@@ -103,12 +103,6 @@ struct btrfs_bio_ctrl {
blk_opf_t opf;
btrfs_bio_end_io_t end_io_func;
struct writeback_control *wbc;
/*
* Tell writepage not to lock the state bits for this range, it still
* does the unlocking.
*/
bool extent_locked;
};
static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
......@@ -1475,7 +1469,6 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
{
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const u64 page_start = page_offset(page);
const u64 page_end = page_start + PAGE_SIZE - 1;
int ret;
......@@ -1503,13 +1496,11 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
if (ret < 0)
goto done;
if (!bio_ctrl->extent_locked) {
ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
if (ret == 1)
return 0;
if (ret)
goto done;
}
ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
if (ret == 1)
......@@ -1525,21 +1516,7 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
}
if (ret)
end_extent_writepage(page, ret, page_start, page_end);
if (bio_ctrl->extent_locked) {
struct writeback_control *wbc = bio_ctrl->wbc;
/*
* If bio_ctrl->extent_locked, it's from extent_write_locked_range(),
* the page can either be locked by lock_page() or
* process_one_page().
* Let btrfs_page_unlock_writer() handle both cases.
*/
ASSERT(wbc);
btrfs_page_unlock_writer(fs_info, page, wbc->range_start,
wbc->range_end + 1 - wbc->range_start);
} else {
unlock_page(page);
}
ASSERT(ret <= 0);
return ret;
}
......@@ -2239,10 +2216,10 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
int first_error = 0;
int ret = 0;
struct address_space *mapping = inode->i_mapping;
struct page *page;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const u32 sectorsize = fs_info->sectorsize;
loff_t i_size = i_size_read(inode);
u64 cur = start;
unsigned long nr_pages;
const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
struct writeback_control wbc_writepages = {
.sync_mode = WB_SYNC_ALL,
.range_start = start,
......@@ -2254,17 +2231,15 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
/* We're called from an async helper function */
.opf = REQ_OP_WRITE | REQ_BTRFS_CGROUP_PUNT |
wbc_to_write_flags(&wbc_writepages),
.extent_locked = 1,
};
ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >>
PAGE_SHIFT;
wbc_writepages.nr_to_write = nr_pages * 2;
wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
while (cur <= end) {
u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
struct page *page;
int nr = 0;
page = find_get_page(mapping, cur >> PAGE_SHIFT);
/*
......@@ -2275,12 +2250,25 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
ASSERT(PageLocked(page));
ASSERT(PageDirty(page));
clear_page_dirty_for_io(page);
ret = __extent_writepage(page, &bio_ctrl);
ASSERT(ret <= 0);
ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
i_size, &nr);
if (ret == 1)
goto next_page;
/* Make sure the mapping tag for page dirty gets cleared. */
if (nr == 0) {
set_page_writeback(page);
end_page_writeback(page);
}
if (ret)
end_extent_writepage(page, ret, cur, cur_end);
btrfs_page_unlock_writer(fs_info, page, cur, cur_end + 1 - cur);
if (ret < 0) {
found_error = true;
first_error = ret;
}
next_page:
put_page(page);
cur = cur_end + 1;
}
......@@ -2301,7 +2289,6 @@ int extent_writepages(struct address_space *mapping,
struct btrfs_bio_ctrl bio_ctrl = {
.wbc = wbc,
.opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
.extent_locked = 0,
};
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment