Commit fe5ecbe8 authored by David Sterba's avatar David Sterba

btrfs: precalculate checksums per leaf once

btrfs_csum_bytes_to_leaves shows up in system profiles, which makes it a
candidate for optimizations. After the 64bit division has been replaced
by shift, there's still a calculation done each time the function is
called: checksums per leaf.

As this is a constant value for the entire filesystem lifetime, we
can calculate it once at mount time and reuse. This also allows to
reduce the division to 64bit/32bit as we know the constant will always
fit the 32bit type.

Replace the open-coded rounding up with a macro that internally handles
the 64bit division and as it's now a short function, make it static
inline (slight code increase, slight stack usage reduction).
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 22b6331d
...@@ -938,6 +938,7 @@ struct btrfs_fs_info { ...@@ -938,6 +938,7 @@ struct btrfs_fs_info {
/* ilog2 of sectorsize, use to avoid 64bit division */ /* ilog2 of sectorsize, use to avoid 64bit division */
u32 sectorsize_bits; u32 sectorsize_bits;
u32 csum_size; u32 csum_size;
u32 csums_per_leaf;
u32 stripesize; u32 stripesize;
/* Block groups and devices containing active swapfiles. */ /* Block groups and devices containing active swapfiles. */
...@@ -2525,7 +2526,17 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, ...@@ -2525,7 +2526,17 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
enum btrfs_inline_ref_type is_data); enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset); u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes); /*
* Take the number of bytes to be checksummmed and figure out how many leaves
* it would require to store the csums for that many bytes.
*/
static inline u64 btrfs_csum_bytes_to_leaves(
const struct btrfs_fs_info *fs_info, u64 csum_bytes)
{
const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
}
/* /*
* Use this if we would be adding new items, as we could split nodes as we cow * Use this if we would be adding new items, as we could split nodes as we cow
......
...@@ -3081,6 +3081,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3081,6 +3081,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->sectorsize = sectorsize; fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize); fs_info->sectorsize_bits = ilog2(sectorsize);
fs_info->csum_size = btrfs_super_csum_size(disk_super); fs_info->csum_size = btrfs_super_csum_size(disk_super);
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
fs_info->stripesize = stripesize; fs_info->stripesize = stripesize;
/* /*
......
...@@ -2132,25 +2132,6 @@ static u64 find_middle(struct rb_root *root) ...@@ -2132,25 +2132,6 @@ static u64 find_middle(struct rb_root *root)
} }
#endif #endif
/*
* Takes the number of bytes to be csumm'ed and figures out how many leaves it
* would require to store the csums for that many bytes.
*/
u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
{
u64 csum_size;
u64 num_csums_per_leaf;
u64 num_csums;
csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
num_csums_per_leaf = div64_u64(csum_size,
(u64)btrfs_super_csum_size(fs_info->super_copy));
num_csums = csum_bytes >> fs_info->sectorsize_bits;
num_csums += num_csums_per_leaf - 1;
num_csums = div64_u64(num_csums, num_csums_per_leaf);
return num_csums;
}
/* /*
* this starts processing the delayed reference count updates and * this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be * extent insertions we have queued up so far. count can be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment