Commit 99be1a66 authored by David Sterba's avatar David Sterba

btrfs: add specific helper for range bit test exists

The existing helper test_range_bit works in two ways, checks if the whole
range contains all the bits, or stop on the first occurrence.  By adding
a specific helper for the latter case, the inner loop can be simplified
and contains fewer conditionals, making it a bit faster.

There's no caller that uses the cached state pointer so this reduces the
argument count further.
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 6422b4cd
......@@ -1035,8 +1035,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* very likely resulting in a larger extent after writeback is
* triggered (except in a case of free space fragmentation).
*/
if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
EXTENT_DELALLOC, 0, NULL))
if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
EXTENT_DELALLOC))
goto next;
/*
......
......@@ -1639,6 +1639,37 @@ u64 count_range_bits(struct extent_io_tree *tree,
return total_bytes;
}
/*
* Check if the single @bit exists in the given range.
*/
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
{
struct extent_state *state = NULL;
bool bitset = false;
ASSERT(is_power_of_2(bit));
spin_lock(&tree->lock);
state = tree_search(tree, start);
while (state && start <= end) {
if (state->start > end)
break;
if (state->state & bit) {
bitset = true;
break;
}
/* If state->end is (u64)-1, start will overflow to 0 */
start = state->end + 1;
if (start > end || start == 0)
break;
state = next_state(state);
}
spin_unlock(&tree->lock);
return bitset;
}
/*
* Search a range in the state tree for a given mask. If 'filled' == 1, this
* returns 1 only if every extent in the tree has the bits set. Otherwise, 1
......
......@@ -133,6 +133,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, int filled, struct extent_state *cached_state);
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
......
......@@ -2293,7 +2293,7 @@ static int try_release_extent_state(struct extent_io_tree *tree,
u64 end = start + PAGE_SIZE - 1;
int ret = 1;
if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
ret = 0;
} else {
u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
......@@ -2352,9 +2352,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
free_extent_map(em);
break;
}
if (test_range_bit(tree, em->start,
extent_map_end(em) - 1,
EXTENT_LOCKED, 0, NULL))
if (test_range_bit_exists(tree, em->start,
extent_map_end(em) - 1,
EXTENT_LOCKED))
goto next;
/*
* If it's not in the list of modified extents, used
......
......@@ -2239,8 +2239,7 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
{
if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
if (inode->defrag_bytes &&
test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
0, NULL))
test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
return false;
return true;
}
......@@ -7111,8 +7110,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
range_end = round_up(offset + nocow_args.num_bytes,
root->fs_info->sectorsize) - 1;
ret = test_range_bit(io_tree, offset, range_end,
EXTENT_DELALLOC, 0, NULL);
ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
if (ret) {
ret = -EAGAIN;
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment