Commit 973fb26e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: don't fail writeback when allocating the compression context fails

If cow_file_range_async fails to allocate the asynchronous writeback
context, it currently returns an error and entirely fails the writeback.
This is not a good idea as a writeback failure is a non-temporary error
condition that will make the file system unusable.  Just fall back to
synchronous uncompressed writeback instead.  This requires us to delay
setting the BTRFS_INODE_HAS_ASYNC_EXTENT flag until we've committed to
the async writeback.

The compression checks INODE_NOCOMPRESS and FORCE_COMPRESS are moved
from cow_file_range_async to the preceding checks btrfs_run_delalloc_range().
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 57201ddd
...@@ -1720,7 +1720,7 @@ static noinline void async_cow_free(struct btrfs_work *work) ...@@ -1720,7 +1720,7 @@ static noinline void async_cow_free(struct btrfs_work *work)
kvfree(async_cow); kvfree(async_cow);
} }
static int cow_file_range_async(struct btrfs_inode *inode, static bool cow_file_range_async(struct btrfs_inode *inode,
struct writeback_control *wbc, struct writeback_control *wbc,
struct page *locked_page, struct page *locked_page,
u64 start, u64 end, int *page_started, u64 start, u64 end, int *page_started,
...@@ -1731,47 +1731,25 @@ static int cow_file_range_async(struct btrfs_inode *inode, ...@@ -1731,47 +1731,25 @@ static int cow_file_range_async(struct btrfs_inode *inode,
struct async_cow *ctx; struct async_cow *ctx;
struct async_chunk *async_chunk; struct async_chunk *async_chunk;
unsigned long nr_pages; unsigned long nr_pages;
u64 cur_end;
u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
int i; int i;
bool should_compress;
unsigned nofs_flag; unsigned nofs_flag;
const blk_opf_t write_flags = wbc_to_write_flags(wbc); const blk_opf_t write_flags = wbc_to_write_flags(wbc);
unlock_extent(&inode->io_tree, start, end, NULL);
if (inode->flags & BTRFS_INODE_NOCOMPRESS &&
!btrfs_test_opt(fs_info, FORCE_COMPRESS)) {
num_chunks = 1;
should_compress = false;
} else {
should_compress = true;
}
nofs_flag = memalloc_nofs_save(); nofs_flag = memalloc_nofs_save();
ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
memalloc_nofs_restore(nofs_flag); memalloc_nofs_restore(nofs_flag);
if (!ctx)
return false;
if (!ctx) { unlock_extent(&inode->io_tree, start, end, NULL);
unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING;
unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK | PAGE_SET_ERROR;
extent_clear_unlock_delalloc(inode, start, end, locked_page,
clear_bits, page_ops);
return -ENOMEM;
}
async_chunk = ctx->chunks; async_chunk = ctx->chunks;
atomic_set(&ctx->num_chunks, num_chunks); atomic_set(&ctx->num_chunks, num_chunks);
for (i = 0; i < num_chunks; i++) { for (i = 0; i < num_chunks; i++) {
if (should_compress) u64 cur_end = min(end, start + SZ_512K - 1);
cur_end = min(end, start + SZ_512K - 1);
else
cur_end = end;
/* /*
* igrab is called higher up in the call chain, take only the * igrab is called higher up in the call chain, take only the
...@@ -1832,7 +1810,7 @@ static int cow_file_range_async(struct btrfs_inode *inode, ...@@ -1832,7 +1810,7 @@ static int cow_file_range_async(struct btrfs_inode *inode,
start = cur_end + 1; start = cur_end + 1;
} }
*page_started = 1; *page_started = 1;
return 0; return true;
} }
static noinline int run_delalloc_zoned(struct btrfs_inode *inode, static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
...@@ -2413,7 +2391,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page ...@@ -2413,7 +2391,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
u64 start, u64 end, int *page_started, unsigned long *nr_written, u64 start, u64 end, int *page_started, unsigned long *nr_written,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
int ret; int ret = 0;
const bool zoned = btrfs_is_zoned(inode->root->fs_info); const bool zoned = btrfs_is_zoned(inode->root->fs_info);
/* /*
...@@ -2434,19 +2412,23 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page ...@@ -2434,19 +2412,23 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root)); ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
ret = run_delalloc_nocow(inode, locked_page, start, end, ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, nr_written); page_started, nr_written);
} else if (!btrfs_inode_can_compress(inode) || goto out;
!inode_need_compress(inode, start, end)) { }
if (btrfs_inode_can_compress(inode) &&
inode_need_compress(inode, start, end) &&
cow_file_range_async(inode, wbc, locked_page, start,
end, page_started, nr_written))
goto out;
if (zoned) if (zoned)
ret = run_delalloc_zoned(inode, locked_page, start, end, ret = run_delalloc_zoned(inode, locked_page, start, end,
page_started, nr_written); page_started, nr_written);
else else
ret = cow_file_range(inode, locked_page, start, end, ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1, NULL); page_started, nr_written, 1, NULL);
} else {
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); out:
ret = cow_file_range_async(inode, wbc, locked_page, start, end,
page_started, nr_written);
}
ASSERT(ret <= 0); ASSERT(ret <= 0);
if (ret) if (ret)
btrfs_cleanup_ordered_extents(inode, locked_page, start, btrfs_cleanup_ordered_extents(inode, locked_page, start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment