Commit 4fec3fc0 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: use round_up to enhance calculation

.i_cluster_size should be power of 2, so we can use round_up() instead
of roundup() to enhance the calculation.
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent c75488fb
......@@ -742,16 +742,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
* for compressed file, only support cluster size
* aligned truncation.
*/
if (f2fs_compressed_file(inode)) {
size_t cluster_shift = PAGE_SHIFT +
F2FS_I(inode)->i_log_cluster_size;
size_t cluster_mask = (1 << cluster_shift) - 1;
free_from = from >> cluster_shift;
if (from & cluster_mask)
free_from++;
free_from <<= cluster_shift;
}
if (f2fs_compressed_file(inode))
free_from = round_up(from,
F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
#endif
err = f2fs_do_truncate_blocks(inode, free_from, lock);
......@@ -3563,7 +3556,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = roundup(count, F2FS_I(inode)->i_cluster_size);
count = round_up(count, F2FS_I(inode)->i_cluster_size);
ret = release_compress_blocks(&dn, count);
......@@ -3715,7 +3708,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = roundup(count, F2FS_I(inode)->i_cluster_size);
count = round_up(count, F2FS_I(inode)->i_cluster_size);
ret = reserve_compress_blocks(&dn, count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment