Commit 40431d6c authored by Chris Mason's avatar Chris Mason

Btrfs: optimize set extent bit

The Btrfs set_extent_bit call currently searches the rbtree
every time it needs to find more extent_state objects to fill
the requested operation.

This adds a simple test with rb_next to see if the next object
in the tree was adjacent to the one we just found.  If so,
we skip the search and just use the next object.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 9042846b
...@@ -694,8 +694,8 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -694,8 +694,8 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
BUG_ON(err == -EEXIST); BUG_ON(err == -EEXIST);
goto out; goto out;
} }
state = rb_entry(node, struct extent_state, rb_node); state = rb_entry(node, struct extent_state, rb_node);
hit_next:
last_start = state->start; last_start = state->start;
last_end = state->end; last_end = state->end;
...@@ -706,6 +706,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -706,6 +706,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* Just lock what we found and keep going * Just lock what we found and keep going
*/ */
if (state->start == start && state->end <= end) { if (state->start == start && state->end <= end) {
struct rb_node *next_node;
set = state->state & bits; set = state->state & bits;
if (set && exclusive) { if (set && exclusive) {
*failed_start = state->start; *failed_start = state->start;
...@@ -716,7 +717,17 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -716,7 +717,17 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
merge_state(tree, state); merge_state(tree, state);
if (last_end == (u64)-1) if (last_end == (u64)-1)
goto out; goto out;
start = last_end + 1; start = last_end + 1;
if (start < end && prealloc && !need_resched()) {
next_node = rb_next(node);
if (next_node) {
state = rb_entry(next_node, struct extent_state,
rb_node);
if (state->start == start)
goto hit_next;
}
}
goto search_again; goto search_again;
} }
...@@ -852,7 +863,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -852,7 +863,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask) gfp_t mask)
{ {
return set_extent_bit(tree, start, end, return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_DIRTY, EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
0, NULL, mask); 0, NULL, mask);
} }
......
...@@ -136,8 +136,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -136,8 +136,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
hint_byte = 0; hint_byte = 0;
set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
/* check for reserved extents on each page, we don't want /* check for reserved extents on each page, we don't want
* to reset the delalloc bit on things that already have * to reset the delalloc bit on things that already have
* extents reserved. * extents reserved.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment