Commit ee39b432 authored by David Sterba's avatar David Sterba

btrfs: remove unlikely from data-dependent branches and slow paths

There are the branch hints that obviously depend on the data being
processed, the CPU predictor will do better job according to the actual
load. It also does not make sense to use the hints in slow paths that do
a lot of other operations like locking, waiting or IO.
Signed-off-by: default avatarDavid Sterba <dsterba@suse.cz>
parent 5d99a998
...@@ -9694,7 +9694,7 @@ void btrfs_end_nocow_write(struct btrfs_root *root) ...@@ -9694,7 +9694,7 @@ void btrfs_end_nocow_write(struct btrfs_root *root)
int btrfs_start_nocow_write(struct btrfs_root *root) int btrfs_start_nocow_write(struct btrfs_root *root)
{ {
if (unlikely(atomic_read(&root->will_be_snapshoted))) if (atomic_read(&root->will_be_snapshoted))
return 0; return 0;
percpu_counter_inc(&root->subv_writers->counter); percpu_counter_inc(&root->subv_writers->counter);
...@@ -9702,7 +9702,7 @@ int btrfs_start_nocow_write(struct btrfs_root *root) ...@@ -9702,7 +9702,7 @@ int btrfs_start_nocow_write(struct btrfs_root *root)
* Make sure counter is updated before we check for snapshot creation. * Make sure counter is updated before we check for snapshot creation.
*/ */
smp_mb(); smp_mb();
if (unlikely(atomic_read(&root->will_be_snapshoted))) { if (atomic_read(&root->will_be_snapshoted)) {
btrfs_end_nocow_write(root); btrfs_end_nocow_write(root);
return 0; return 0;
} }
......
...@@ -452,7 +452,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, ...@@ -452,7 +452,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
if (unlikely(copied == 0)) if (unlikely(copied == 0))
break; break;
if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { if (copied < PAGE_CACHE_SIZE - offset) {
offset += copied; offset += copied;
} else { } else {
pg++; pg++;
...@@ -1792,7 +1792,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, ...@@ -1792,7 +1792,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (sync) if (sync)
atomic_inc(&BTRFS_I(inode)->sync_writers); atomic_inc(&BTRFS_I(inode)->sync_writers);
if (unlikely(file->f_flags & O_DIRECT)) { if (file->f_flags & O_DIRECT) {
num_written = __btrfs_direct_write(iocb, from, pos); num_written = __btrfs_direct_write(iocb, from, pos);
} else { } else {
num_written = __btrfs_buffered_write(file, from, pos); num_written = __btrfs_buffered_write(file, from, pos);
......
...@@ -7802,9 +7802,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, ...@@ -7802,9 +7802,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc(&dip->pending_bios); atomic_inc(&dip->pending_bios);
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len || if (map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len, bio_add_page(bio, bvec->bv_page, bvec->bv_len,
bvec->bv_offset) < bvec->bv_len)) { bvec->bv_offset) < bvec->bv_len) {
/* /*
* inc the count before we submit the bio so * inc the count before we submit the bio so
* we know the end IO handler won't happen before * we know the end IO handler won't happen before
...@@ -8017,8 +8017,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, ...@@ -8017,8 +8017,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
ret = btrfs_delalloc_reserve_space(inode, count); ret = btrfs_delalloc_reserve_space(inode, count);
if (ret) if (ret)
goto out; goto out;
} else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK, } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags))) { &BTRFS_I(inode)->runtime_flags)) {
inode_dio_done(inode); inode_dio_done(inode);
flags = DIO_LOCKING | DIO_SKIP_HOLES; flags = DIO_LOCKING | DIO_SKIP_HOLES;
wakeup = false; wakeup = false;
......
...@@ -3167,7 +3167,7 @@ static void clone_update_extent_map(struct inode *inode, ...@@ -3167,7 +3167,7 @@ static void clone_update_extent_map(struct inode *inode,
em->start + em->len - 1, 0); em->start + em->len - 1, 0);
} }
if (unlikely(ret)) if (ret)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags); &BTRFS_I(inode)->runtime_flags);
} }
......
...@@ -418,7 +418,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type, ...@@ -418,7 +418,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
/* /*
* Do the reservation for the relocation root creation * Do the reservation for the relocation root creation
*/ */
if (unlikely(need_reserve_reloc_root(root))) { if (need_reserve_reloc_root(root)) {
num_bytes += root->nodesize; num_bytes += root->nodesize;
reloc_reserved = true; reloc_reserved = true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment