Commit 9954e7af authored by Ryusuke Konishi's avatar Ryusuke Konishi

nilfs2: add free entries count only if clear bit operation succeeded

Three functions of the current persistent object allocator,
nilfs_palloc_commit_free_entry, nilfs_palloc_abort_alloc_entry, and
nilfs_palloc_freev functions unconditionally add a counter after doing
clear bit operation on a bitmap block.

If the clear bit operation overlapped, the counter will not add up.
This fixes the issue by making the counter operations conditional.
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
parent 25b18d39
...@@ -521,7 +521,7 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, ...@@ -521,7 +521,7 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
group_offset, bitmap)) group_offset, bitmap))
printk(KERN_WARNING "%s: entry number %llu already freed\n", printk(KERN_WARNING "%s: entry number %llu already freed\n",
__func__, (unsigned long long)req->pr_entry_nr); __func__, (unsigned long long)req->pr_entry_nr);
else
nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_bitmap_bh->b_page);
...@@ -558,7 +558,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, ...@@ -558,7 +558,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
group_offset, bitmap)) group_offset, bitmap))
printk(KERN_WARNING "%s: entry number %llu already freed\n", printk(KERN_WARNING "%s: entry number %llu already freed\n",
__func__, (unsigned long long)req->pr_entry_nr); __func__, (unsigned long long)req->pr_entry_nr);
else
nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_bitmap_bh->b_page);
...@@ -665,7 +665,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ...@@ -665,7 +665,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
for (j = i, n = 0; for (j = i, n = 0;
(j < nitems) && nilfs_palloc_group_is_in(inode, group, (j < nitems) && nilfs_palloc_group_is_in(inode, group,
entry_nrs[j]); entry_nrs[j]);
j++, n++) { j++) {
nilfs_palloc_group(inode, entry_nrs[j], &group_offset); nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
if (!nilfs_clear_bit_atomic( if (!nilfs_clear_bit_atomic(
nilfs_mdt_bgl_lock(inode, group), nilfs_mdt_bgl_lock(inode, group),
...@@ -674,6 +674,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ...@@ -674,6 +674,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
"%s: entry number %llu already freed\n", "%s: entry number %llu already freed\n",
__func__, __func__,
(unsigned long long)entry_nrs[j]); (unsigned long long)entry_nrs[j]);
} else {
n++;
} }
} }
nilfs_palloc_group_desc_add_entries(inode, group, desc, n); nilfs_palloc_group_desc_add_entries(inode, group, desc, n);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment