Commit 80f9d241 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: make btrfs_check_nocow_lock nowait compatible

Now all the helpers that btrfs_check_nocow_lock uses handle nowait, add
a nowait flag to btrfs_check_nocow_lock so it can be used by the write
path.
Reviewed-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarStefan Roesch <shr@fb.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d2c7a19f
...@@ -3525,7 +3525,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, ...@@ -3525,7 +3525,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
struct extent_state **cached, bool noreserve); struct extent_state **cached, bool noreserve);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes); size_t *write_bytes, bool nowait);
void btrfs_check_nocow_unlock(struct btrfs_inode *inode); void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end, bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
u64 *delalloc_start_ret, u64 *delalloc_end_ret); u64 *delalloc_start_ret, u64 *delalloc_end_ret);
......
...@@ -1480,7 +1480,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, ...@@ -1480,7 +1480,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
* NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0. * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
*/ */
int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes) size_t *write_bytes, bool nowait)
{ {
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root; struct btrfs_root *root = inode->root;
...@@ -1499,16 +1499,21 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos, ...@@ -1499,16 +1499,21 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
fs_info->sectorsize) - 1; fs_info->sectorsize) - 1;
num_bytes = lockend - lockstart + 1; num_bytes = lockend - lockstart + 1;
if (nowait) {
if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend)) {
btrfs_drew_write_unlock(&root->snapshot_lock);
return -EAGAIN;
}
} else {
btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, NULL); btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend, NULL);
}
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
NULL, NULL, NULL, false, false); NULL, NULL, NULL, nowait, false);
if (ret <= 0) { if (ret <= 0)
ret = 0;
btrfs_drew_write_unlock(&root->snapshot_lock); btrfs_drew_write_unlock(&root->snapshot_lock);
} else { else
*write_bytes = min_t(size_t, *write_bytes , *write_bytes = min_t(size_t, *write_bytes ,
num_bytes - pos + lockstart); num_bytes - pos + lockstart);
}
unlock_extent(&inode->io_tree, lockstart, lockend, NULL); unlock_extent(&inode->io_tree, lockstart, lockend, NULL);
return ret; return ret;
...@@ -1665,16 +1670,22 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1665,16 +1670,22 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
&data_reserved, pos, &data_reserved, pos,
write_bytes, false); write_bytes, false);
if (ret < 0) { if (ret < 0) {
int can_nocow;
/* /*
* If we don't have to COW at the offset, reserve * If we don't have to COW at the offset, reserve
* metadata only. write_bytes may get smaller than * metadata only. write_bytes may get smaller than
* requested here. * requested here.
*/ */
if (btrfs_check_nocow_lock(BTRFS_I(inode), pos, can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
&write_bytes) > 0) &write_bytes, false);
only_release_metadata = true; if (can_nocow < 0)
else ret = can_nocow;
if (can_nocow > 0)
ret = 0;
if (ret)
break; break;
only_release_metadata = true;
} }
num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE); num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
......
...@@ -4887,7 +4887,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, ...@@ -4887,7 +4887,7 @@ int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
blocksize, false); blocksize, false);
if (ret < 0) { if (ret < 0) {
if (btrfs_check_nocow_lock(inode, block_start, &write_bytes) > 0) { if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
/* For nocow case, no need to reserve data space */ /* For nocow case, no need to reserve data space */
only_release_metadata = true; only_release_metadata = true;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment