Commit 00d31d17 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: merge submit_compressed_extents and async_cow_submit

The code in submit_compressed_extents just loops over the async_extents,
and doesn't need to be conditional on an inode being present, as there
won't be any async_extent in the list if we created and inline extent.
Merge the two functions to simplify the logic.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent c15d8cf2
...@@ -1285,25 +1285,6 @@ static void submit_one_async_extent(struct async_chunk *async_chunk, ...@@ -1285,25 +1285,6 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
kfree(async_extent); kfree(async_extent);
} }
/*
* Phase two of compressed writeback. This is the ordered portion of the code,
* which only gets called in the order the work was queued. We walk all the
* async extents created by compress_file_range and send them down to the disk.
*/
static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
{
struct async_extent *async_extent;
u64 alloc_hint = 0;
while (!list_empty(&async_chunk->extents)) {
async_extent = list_entry(async_chunk->extents.next,
struct async_extent, list);
list_del(&async_extent->list);
submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
}
}
static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
u64 num_bytes) u64 num_bytes)
{ {
...@@ -1646,24 +1627,24 @@ static noinline int cow_file_range(struct btrfs_inode *inode, ...@@ -1646,24 +1627,24 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* which only gets called in the order the work was queued. We walk all the * which only gets called in the order the work was queued. We walk all the
* async extents created by compress_file_range and send them down to the disk. * async extents created by compress_file_range and send them down to the disk.
*/ */
static noinline void async_cow_submit(struct btrfs_work *work) static noinline void submit_compressed_extents(struct btrfs_work *work)
{ {
struct async_chunk *async_chunk = container_of(work, struct async_chunk, struct async_chunk *async_chunk = container_of(work, struct async_chunk,
work); work);
struct btrfs_fs_info *fs_info = btrfs_work_owner(work); struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
struct async_extent *async_extent;
unsigned long nr_pages; unsigned long nr_pages;
u64 alloc_hint = 0;
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
PAGE_SHIFT; PAGE_SHIFT;
/* while (!list_empty(&async_chunk->extents)) {
* ->inode could be NULL if async_chunk_start has failed to compress, async_extent = list_entry(async_chunk->extents.next,
* in which case we don't have anything to submit, yet we need to struct async_extent, list);
* always adjust ->async_delalloc_pages as its paired with the init list_del(&async_extent->list);
* happening in run_delalloc_compressed submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
*/ }
if (async_chunk->inode)
submit_compressed_extents(async_chunk);
/* atomic_sub_return implies a barrier */ /* atomic_sub_return implies a barrier */
if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
...@@ -1763,7 +1744,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode, ...@@ -1763,7 +1744,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
} }
btrfs_init_work(&async_chunk[i].work, compress_file_range, btrfs_init_work(&async_chunk[i].work, compress_file_range,
async_cow_submit, async_cow_free); submit_compressed_extents, async_cow_free);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages); atomic_add(nr_pages, &fs_info->async_delalloc_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment