Commit 23d31bd4 authored by Nikolay Borisov's avatar Nikolay Borisov Committed by David Sterba

btrfs: Use newly introduced btrfs_lock_and_flush_ordered_range

There several functions which open code
btrfs_lock_and_flush_ordered_range, just replace them with a call to the
function. No functional changes.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ffa87214
...@@ -3204,21 +3204,10 @@ static inline void contiguous_readpages(struct extent_io_tree *tree, ...@@ -3204,21 +3204,10 @@ static inline void contiguous_readpages(struct extent_io_tree *tree,
unsigned long *bio_flags, unsigned long *bio_flags,
u64 *prev_em_start) u64 *prev_em_start)
{ {
struct inode *inode; struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
struct btrfs_ordered_extent *ordered;
int index; int index;
inode = pages[0]->mapping->host; btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
while (1) {
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
end - start + 1);
if (!ordered)
break;
unlock_extent(tree, start, end);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
for (index = 0; index < nr_pages; index++) { for (index = 0; index < nr_pages; index++) {
__do_readpage(tree, pages[index], btrfs_get_extent, em_cached, __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
...@@ -3234,22 +3223,12 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -3234,22 +3223,12 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
unsigned long *bio_flags, unsigned long *bio_flags,
unsigned int read_flags) unsigned int read_flags)
{ {
struct inode *inode = page->mapping->host; struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
struct btrfs_ordered_extent *ordered;
u64 start = page_offset(page); u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1; u64 end = start + PAGE_SIZE - 1;
int ret; int ret;
while (1) { btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
PAGE_SIZE);
if (!ordered)
break;
unlock_extent(tree, start, end);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
bio_flags, read_flags, NULL); bio_flags, read_flags, NULL);
......
...@@ -1550,7 +1550,6 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, ...@@ -1550,7 +1550,6 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
{ {
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root; struct btrfs_root *root = inode->root;
struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend; u64 lockstart, lockend;
u64 num_bytes; u64 num_bytes;
int ret; int ret;
...@@ -1563,17 +1562,8 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, ...@@ -1563,17 +1562,8 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
lockend = round_up(pos + *write_bytes, lockend = round_up(pos + *write_bytes,
fs_info->sectorsize) - 1; fs_info->sectorsize) - 1;
while (1) { btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart,
lock_extent(&inode->io_tree, lockstart, lockend); lockend, NULL);
ordered = btrfs_lookup_ordered_range(inode, lockstart,
lockend - lockstart + 1);
if (!ordered) {
break;
}
unlock_extent(&inode->io_tree, lockstart, lockend);
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
num_bytes = lockend - lockstart + 1; num_bytes = lockend - lockstart + 1;
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
......
...@@ -5004,21 +5004,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) ...@@ -5004,21 +5004,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
if (size <= hole_start) if (size <= hole_start)
return 0; return 0;
while (1) { btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start,
struct btrfs_ordered_extent *ordered; block_end - 1, &cached_state);
lock_extent_bits(io_tree, hole_start, block_end - 1,
&cached_state);
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
block_end - hole_start);
if (!ordered)
break;
unlock_extent_cached(io_tree, hole_start, block_end - 1,
&cached_state);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
cur_offset = hole_start; cur_offset = hole_start;
while (1) { while (1) {
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment