Commit 5fc7b141 authored by Ryusuke Konishi's avatar Ryusuke Konishi

nilfs2: use mark_buffer_dirty to mark btnode or meta data dirty

This replaces nilfs_mdt_mark_buffer_dirty and nilfs_btnode_mark_dirty
macros with mark_buffer_dirty and gets rid of nilfs_mark_buffer_dirty,
an own mark buffer dirty function.
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
parent aa405b1f
...@@ -489,8 +489,8 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode, ...@@ -489,8 +489,8 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
void nilfs_palloc_commit_alloc_entry(struct inode *inode, void nilfs_palloc_commit_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req) struct nilfs_palloc_req *req)
{ {
nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh); mark_buffer_dirty(req->pr_bitmap_bh);
nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh); mark_buffer_dirty(req->pr_desc_bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(req->pr_bitmap_bh); brelse(req->pr_bitmap_bh);
...@@ -527,8 +527,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, ...@@ -527,8 +527,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_bitmap_bh->b_page);
kunmap(req->pr_desc_bh->b_page); kunmap(req->pr_desc_bh->b_page);
nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh); mark_buffer_dirty(req->pr_desc_bh);
nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh); mark_buffer_dirty(req->pr_bitmap_bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(req->pr_bitmap_bh); brelse(req->pr_bitmap_bh);
...@@ -683,8 +683,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ...@@ -683,8 +683,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
kunmap(bitmap_bh->b_page); kunmap(bitmap_bh->b_page);
kunmap(desc_bh->b_page); kunmap(desc_bh->b_page);
nilfs_mdt_mark_buffer_dirty(desc_bh); mark_buffer_dirty(desc_bh);
nilfs_mdt_mark_buffer_dirty(bitmap_bh); mark_buffer_dirty(bitmap_bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(bitmap_bh); brelse(bitmap_bh);
......
...@@ -254,7 +254,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc, ...@@ -254,7 +254,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
"invalid oldkey %lld (newkey=%lld)", "invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey, (unsigned long long)oldkey,
(unsigned long long)newkey); (unsigned long long)newkey);
nilfs_btnode_mark_dirty(obh); mark_buffer_dirty(obh);
spin_lock_irq(&btnc->tree_lock); spin_lock_irq(&btnc->tree_lock);
radix_tree_delete(&btnc->page_tree, oldkey); radix_tree_delete(&btnc->page_tree, oldkey);
...@@ -266,7 +266,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc, ...@@ -266,7 +266,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
unlock_page(opage); unlock_page(opage);
} else { } else {
nilfs_copy_buffer(nbh, obh); nilfs_copy_buffer(nbh, obh);
nilfs_btnode_mark_dirty(nbh); mark_buffer_dirty(nbh);
nbh->b_blocknr = newkey; nbh->b_blocknr = newkey;
ctxt->bh = nbh; ctxt->bh = nbh;
......
...@@ -50,7 +50,4 @@ void nilfs_btnode_commit_change_key(struct address_space *, ...@@ -50,7 +50,4 @@ void nilfs_btnode_commit_change_key(struct address_space *,
void nilfs_btnode_abort_change_key(struct address_space *, void nilfs_btnode_abort_change_key(struct address_space *,
struct nilfs_btnode_chkey_ctxt *); struct nilfs_btnode_chkey_ctxt *);
#define nilfs_btnode_mark_dirty(bh) nilfs_mark_buffer_dirty(bh)
#endif /* _NILFS_BTNODE_H */ #endif /* _NILFS_BTNODE_H */
...@@ -714,7 +714,7 @@ static void nilfs_btree_promote_key(struct nilfs_bmap *btree, ...@@ -714,7 +714,7 @@ static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
nilfs_btree_get_nonroot_node(path, level), nilfs_btree_get_nonroot_node(path, level),
path[level].bp_index, key); path[level].bp_index, key);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
} while ((path[level].bp_index == 0) && } while ((path[level].bp_index == 0) &&
(++level < nilfs_btree_height(btree) - 1)); (++level < nilfs_btree_height(btree) - 1));
} }
...@@ -739,7 +739,7 @@ static void nilfs_btree_do_insert(struct nilfs_bmap *btree, ...@@ -739,7 +739,7 @@ static void nilfs_btree_do_insert(struct nilfs_bmap *btree,
nilfs_btree_node_insert(node, path[level].bp_index, nilfs_btree_node_insert(node, path[level].bp_index,
*keyp, *ptrp, ncblk); *keyp, *ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (path[level].bp_index == 0) if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
...@@ -777,9 +777,9 @@ static void nilfs_btree_carry_left(struct nilfs_bmap *btree, ...@@ -777,9 +777,9 @@ static void nilfs_btree_carry_left(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0)); nilfs_btree_node_get_key(node, 0));
...@@ -823,9 +823,9 @@ static void nilfs_btree_carry_right(struct nilfs_bmap *btree, ...@@ -823,9 +823,9 @@ static void nilfs_btree_carry_right(struct nilfs_bmap *btree,
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
path[level + 1].bp_index++; path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
...@@ -870,9 +870,9 @@ static void nilfs_btree_split(struct nilfs_bmap *btree, ...@@ -870,9 +870,9 @@ static void nilfs_btree_split(struct nilfs_bmap *btree,
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
newkey = nilfs_btree_node_get_key(right, 0); newkey = nilfs_btree_node_get_key(right, 0);
newptr = path[level].bp_newreq.bpr_ptr; newptr = path[level].bp_newreq.bpr_ptr;
...@@ -919,7 +919,7 @@ static void nilfs_btree_grow(struct nilfs_bmap *btree, ...@@ -919,7 +919,7 @@ static void nilfs_btree_grow(struct nilfs_bmap *btree,
nilfs_btree_node_set_level(root, level + 1); nilfs_btree_node_set_level(root, level + 1);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL; path[level].bp_sib_bh = NULL;
...@@ -1194,7 +1194,7 @@ static void nilfs_btree_do_delete(struct nilfs_bmap *btree, ...@@ -1194,7 +1194,7 @@ static void nilfs_btree_do_delete(struct nilfs_bmap *btree,
nilfs_btree_node_delete(node, path[level].bp_index, nilfs_btree_node_delete(node, path[level].bp_index,
keyp, ptrp, ncblk); keyp, ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (path[level].bp_index == 0) if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0)); nilfs_btree_node_get_key(node, 0));
...@@ -1226,9 +1226,9 @@ static void nilfs_btree_borrow_left(struct nilfs_bmap *btree, ...@@ -1226,9 +1226,9 @@ static void nilfs_btree_borrow_left(struct nilfs_bmap *btree,
nilfs_btree_node_move_right(left, node, n, ncblk, ncblk); nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0)); nilfs_btree_node_get_key(node, 0));
...@@ -1258,9 +1258,9 @@ static void nilfs_btree_borrow_right(struct nilfs_bmap *btree, ...@@ -1258,9 +1258,9 @@ static void nilfs_btree_borrow_right(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
path[level + 1].bp_index++; path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
...@@ -1289,7 +1289,7 @@ static void nilfs_btree_concat_left(struct nilfs_bmap *btree, ...@@ -1289,7 +1289,7 @@ static void nilfs_btree_concat_left(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btnode_delete(path[level].bp_bh); nilfs_btnode_delete(path[level].bp_bh);
path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_bh = path[level].bp_sib_bh;
...@@ -1315,7 +1315,7 @@ static void nilfs_btree_concat_right(struct nilfs_bmap *btree, ...@@ -1315,7 +1315,7 @@ static void nilfs_btree_concat_right(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
nilfs_btnode_delete(path[level].bp_sib_bh); nilfs_btnode_delete(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL; path[level].bp_sib_bh = NULL;
...@@ -1709,7 +1709,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, ...@@ -1709,7 +1709,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs); nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
if (!buffer_dirty(bh)) if (!buffer_dirty(bh))
nilfs_btnode_mark_dirty(bh); mark_buffer_dirty(bh);
if (!nilfs_bmap_dirty(btree)) if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree); nilfs_bmap_set_dirty(btree);
...@@ -1787,7 +1787,7 @@ static int nilfs_btree_propagate_p(struct nilfs_bmap *btree, ...@@ -1787,7 +1787,7 @@ static int nilfs_btree_propagate_p(struct nilfs_bmap *btree,
{ {
while ((++level < nilfs_btree_height(btree) - 1) && while ((++level < nilfs_btree_height(btree) - 1) &&
!buffer_dirty(path[level].bp_bh)) !buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
return 0; return 0;
} }
...@@ -2229,7 +2229,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level) ...@@ -2229,7 +2229,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level)
} }
if (!buffer_dirty(bh)) if (!buffer_dirty(bh))
nilfs_btnode_mark_dirty(bh); mark_buffer_dirty(bh);
brelse(bh); brelse(bh);
if (!nilfs_bmap_dirty(btree)) if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree); nilfs_bmap_set_dirty(btree);
......
...@@ -216,14 +216,14 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile, ...@@ -216,14 +216,14 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
if (!nilfs_cpfile_is_in_first(cpfile, cno)) if (!nilfs_cpfile_is_in_first(cpfile, cno))
nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
kaddr, 1); kaddr, 1);
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
header = nilfs_cpfile_block_get_header(cpfile, header_bh, header = nilfs_cpfile_block_get_header(cpfile, header_bh,
kaddr); kaddr);
le64_add_cpu(&header->ch_ncheckpoints, 1); le64_add_cpu(&header->ch_ncheckpoints, 1);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
} }
...@@ -326,7 +326,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, ...@@ -326,7 +326,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
} }
if (nicps > 0) { if (nicps > 0) {
tnicps += nicps; tnicps += nicps;
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
if (!nilfs_cpfile_is_in_first(cpfile, cno)) { if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
count = count =
...@@ -358,7 +358,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, ...@@ -358,7 +358,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
header = nilfs_cpfile_block_get_header(cpfile, header_bh, header = nilfs_cpfile_block_get_header(cpfile, header_bh,
kaddr); kaddr);
le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
} }
...@@ -671,10 +671,10 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) ...@@ -671,10 +671,10 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
le64_add_cpu(&header->ch_nsnapshots, 1); le64_add_cpu(&header->ch_nsnapshots, 1);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(prev_bh); mark_buffer_dirty(prev_bh);
nilfs_mdt_mark_buffer_dirty(curr_bh); mark_buffer_dirty(curr_bh);
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
brelse(prev_bh); brelse(prev_bh);
...@@ -774,10 +774,10 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) ...@@ -774,10 +774,10 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
le64_add_cpu(&header->ch_nsnapshots, -1); le64_add_cpu(&header->ch_nsnapshots, -1);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(next_bh); mark_buffer_dirty(next_bh);
nilfs_mdt_mark_buffer_dirty(prev_bh); mark_buffer_dirty(prev_bh);
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
brelse(prev_bh); brelse(prev_bh);
......
...@@ -54,7 +54,7 @@ static int nilfs_dat_prepare_entry(struct inode *dat, ...@@ -54,7 +54,7 @@ static int nilfs_dat_prepare_entry(struct inode *dat,
static void nilfs_dat_commit_entry(struct inode *dat, static void nilfs_dat_commit_entry(struct inode *dat,
struct nilfs_palloc_req *req) struct nilfs_palloc_req *req)
{ {
nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh); mark_buffer_dirty(req->pr_entry_bh);
nilfs_mdt_mark_dirty(dat); nilfs_mdt_mark_dirty(dat);
brelse(req->pr_entry_bh); brelse(req->pr_entry_bh);
} }
...@@ -361,7 +361,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) ...@@ -361,7 +361,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
entry->de_blocknr = cpu_to_le64(blocknr); entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(entry_bh); mark_buffer_dirty(entry_bh);
nilfs_mdt_mark_dirty(dat); nilfs_mdt_mark_dirty(dat);
brelse(entry_bh); brelse(entry_bh);
......
...@@ -157,15 +157,11 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) ...@@ -157,15 +157,11 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
if (buffer_dirty(bh)) if (buffer_dirty(bh))
return -EEXIST; return -EEXIST;
if (buffer_nilfs_node(bh)) { if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) {
if (nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh);
clear_buffer_uptodate(bh); return -EIO;
return -EIO;
}
nilfs_btnode_mark_dirty(bh);
} else {
nilfs_mark_buffer_dirty(bh);
} }
mark_buffer_dirty(bh);
return 0; return 0;
} }
......
...@@ -80,7 +80,7 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, ...@@ -80,7 +80,7 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
return ret; return ret;
} }
nilfs_palloc_commit_alloc_entry(ifile, &req); nilfs_palloc_commit_alloc_entry(ifile, &req);
nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh); mark_buffer_dirty(req.pr_entry_bh);
nilfs_mdt_mark_dirty(ifile); nilfs_mdt_mark_dirty(ifile);
*out_ino = (ino_t)req.pr_entry_nr; *out_ino = (ino_t)req.pr_entry_nr;
*out_bh = req.pr_entry_bh; *out_bh = req.pr_entry_bh;
...@@ -128,7 +128,7 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) ...@@ -128,7 +128,7 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
raw_inode->i_flags = 0; raw_inode->i_flags = 0;
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh); mark_buffer_dirty(req.pr_entry_bh);
brelse(req.pr_entry_bh); brelse(req.pr_entry_bh);
nilfs_palloc_commit_free_entry(ifile, &req); nilfs_palloc_commit_free_entry(ifile, &req);
......
...@@ -901,7 +901,7 @@ int nilfs_mark_inode_dirty(struct inode *inode) ...@@ -901,7 +901,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
return err; return err;
} }
nilfs_update_inode(inode, ibh); nilfs_update_inode(inode, ibh);
nilfs_mdt_mark_buffer_dirty(ibh); mark_buffer_dirty(ibh);
nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
brelse(ibh); brelse(ibh);
return 0; return 0;
......
...@@ -66,7 +66,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, ...@@ -66,7 +66,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
nilfs_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
return 0; return 0;
} }
...@@ -355,7 +355,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block) ...@@ -355,7 +355,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
err = nilfs_mdt_read_block(inode, block, 0, &bh); err = nilfs_mdt_read_block(inode, block, 0, &bh);
if (unlikely(err)) if (unlikely(err))
return err; return err;
nilfs_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(bh); brelse(bh);
return 0; return 0;
......
...@@ -88,8 +88,6 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh); ...@@ -88,8 +88,6 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh);
struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode,
struct buffer_head *bh); struct buffer_head *bh);
#define nilfs_mdt_mark_buffer_dirty(bh) nilfs_mark_buffer_dirty(bh)
static inline void nilfs_mdt_mark_dirty(struct inode *inode) static inline void nilfs_mdt_mark_dirty(struct inode *inode)
{ {
if (!test_bit(NILFS_I_DIRTY, &NILFS_I(inode)->i_state)) if (!test_bit(NILFS_I_DIRTY, &NILFS_I(inode)->i_state))
......
...@@ -58,19 +58,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, ...@@ -58,19 +58,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
return bh; return bh;
} }
/*
* Since the page cache of B-tree node pages or data page cache of pseudo
* inodes does not have a valid mapping->host pointer, calling
* mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
* it calls __mark_inode_dirty(NULL) through __set_page_dirty().
* To avoid this problem, the old style mark_buffer_dirty() is used instead.
*/
void nilfs_mark_buffer_dirty(struct buffer_head *bh)
{
if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
__set_page_dirty_nobuffers(bh->b_page);
}
struct buffer_head *nilfs_grab_buffer(struct inode *inode, struct buffer_head *nilfs_grab_buffer(struct inode *inode,
struct address_space *mapping, struct address_space *mapping,
unsigned long blkoff, unsigned long blkoff,
......
...@@ -44,7 +44,6 @@ BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */ ...@@ -44,7 +44,6 @@ BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */ BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
void nilfs_mark_buffer_dirty(struct buffer_head *bh);
int __nilfs_clear_page_dirty(struct page *); int __nilfs_clear_page_dirty(struct page *);
struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *, struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *,
......
...@@ -806,7 +806,7 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) ...@@ -806,7 +806,7 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
/* The following code is duplicated with cpfile. But, it is /* The following code is duplicated with cpfile. But, it is
needed to collect the checkpoint even if it was not newly needed to collect the checkpoint even if it was not newly
created */ created */
nilfs_mdt_mark_buffer_dirty(bh_cp); mark_buffer_dirty(bh_cp);
nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
nilfs_cpfile_put_checkpoint( nilfs_cpfile_put_checkpoint(
nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
...@@ -1865,7 +1865,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, ...@@ -1865,7 +1865,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
"failed to get inode block.\n"); "failed to get inode block.\n");
return err; return err;
} }
nilfs_mdt_mark_buffer_dirty(ibh); mark_buffer_dirty(ibh);
nilfs_mdt_mark_dirty(ifile); nilfs_mdt_mark_dirty(ifile);
spin_lock(&nilfs->ns_inode_lock); spin_lock(&nilfs->ns_inode_lock);
if (likely(!ii->i_bh)) if (likely(!ii->i_bh))
......
...@@ -117,7 +117,7 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, ...@@ -117,7 +117,7 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
} }
/** /**
...@@ -377,8 +377,8 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -377,8 +377,8 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
sui->ncleansegs--; sui->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
brelse(su_bh); brelse(su_bh);
*segnump = segnum; *segnump = segnum;
...@@ -421,7 +421,7 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, ...@@ -421,7 +421,7 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
nilfs_sufile_mod_counter(header_bh, -1, 1); nilfs_sufile_mod_counter(header_bh, -1, 1);
NILFS_SUI(sufile)->ncleansegs--; NILFS_SUI(sufile)->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -452,7 +452,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, ...@@ -452,7 +452,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
NILFS_SUI(sufile)->ncleansegs -= clean; NILFS_SUI(sufile)->ncleansegs -= clean;
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -478,7 +478,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, ...@@ -478,7 +478,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
sudirty = nilfs_segment_usage_dirty(su); sudirty = nilfs_segment_usage_dirty(su);
nilfs_segment_usage_set_clean(su); nilfs_segment_usage_set_clean(su);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
NILFS_SUI(sufile)->ncleansegs++; NILFS_SUI(sufile)->ncleansegs++;
...@@ -498,7 +498,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) ...@@ -498,7 +498,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
if (!ret) { if (!ret) {
nilfs_mdt_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
brelse(bh); brelse(bh);
} }
...@@ -533,7 +533,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, ...@@ -533,7 +533,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
su->su_nblocks = cpu_to_le32(nblocks); su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
brelse(bh); brelse(bh);
...@@ -612,7 +612,7 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, ...@@ -612,7 +612,7 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
nilfs_sufile_mod_counter(header_bh, -1, 0); nilfs_sufile_mod_counter(header_bh, -1, 0);
NILFS_SUI(sufile)->ncleansegs--; NILFS_SUI(sufile)->ncleansegs--;
} }
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -698,7 +698,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ...@@ -698,7 +698,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
if (nc > 0) { if (nc > 0) {
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
ncleaned += nc; ncleaned += nc;
} }
brelse(su_bh); brelse(su_bh);
...@@ -777,7 +777,7 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) ...@@ -777,7 +777,7 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
nilfs_set_nsegments(nilfs, newnsegs); nilfs_set_nsegments(nilfs, newnsegs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment