Commit e15d54d5 authored by Yunlei He's avatar Yunlei He Committed by Jaegeuk Kim

f2fs: Allocate and stat mem used by free nid bitmap more accurately

This patch used f2fs_bitmap_size macro to calculate mem used by
free nid bitmap, and stat used mem including aligned part.
Signed-off-by: default avatarYunlei He <heyunlei@huawei.com>
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 9dc956b2
......@@ -215,7 +215,8 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
si->base_mem += NM_I(sbi)->nat_blocks *
f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
......
......@@ -2797,7 +2797,7 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
}
......
......@@ -304,11 +304,6 @@ struct f2fs_node {
* For NAT entries
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
BITS_PER_LONG * BITS_PER_LONG)
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment