Commit caebc160 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2:
  nilfs2: use mark_buffer_dirty to mark btnode or meta data dirty
  nilfs2: always set back pointer to host inode in mapping->host
  nilfs2: get rid of NILFS_I_NILFS
  nilfs2: use list_first_entry
  nilfs2: use empty_aops for gc-inodes
  nilfs2: implement resize ioctl
  nilfs2: add truncation routine of segment usage file
  nilfs2: add routine to move secondary super block
  nilfs2: add ioctl which limits range of segment to be allocated
  nilfs2: zero fill unused portion of super root block
  nilfs2: super root size should change depending on inode size
  nilfs2: get rid of private page allocator
  nilfs2: merge list_del()/list_add_tail() to list_move_tail()
parents d798f7f0 5fc7b141
...@@ -489,8 +489,8 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode, ...@@ -489,8 +489,8 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
void nilfs_palloc_commit_alloc_entry(struct inode *inode, void nilfs_palloc_commit_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req) struct nilfs_palloc_req *req)
{ {
nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh); mark_buffer_dirty(req->pr_bitmap_bh);
nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh); mark_buffer_dirty(req->pr_desc_bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(req->pr_bitmap_bh); brelse(req->pr_bitmap_bh);
...@@ -527,8 +527,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, ...@@ -527,8 +527,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_bitmap_bh->b_page);
kunmap(req->pr_desc_bh->b_page); kunmap(req->pr_desc_bh->b_page);
nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh); mark_buffer_dirty(req->pr_desc_bh);
nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh); mark_buffer_dirty(req->pr_bitmap_bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(req->pr_bitmap_bh); brelse(req->pr_bitmap_bh);
...@@ -683,8 +683,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ...@@ -683,8 +683,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
kunmap(bitmap_bh->b_page); kunmap(bitmap_bh->b_page);
kunmap(desc_bh->b_page); kunmap(desc_bh->b_page);
nilfs_mdt_mark_buffer_dirty(desc_bh); mark_buffer_dirty(desc_bh);
nilfs_mdt_mark_buffer_dirty(bitmap_bh); mark_buffer_dirty(bitmap_bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(bitmap_bh); brelse(bitmap_bh);
......
...@@ -34,7 +34,9 @@ ...@@ -34,7 +34,9 @@
struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
{ {
return NILFS_I_NILFS(bmap->b_inode)->ns_dat; struct the_nilfs *nilfs = bmap->b_inode->i_sb->s_fs_info;
return nilfs->ns_dat;
} }
static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
......
...@@ -34,12 +34,6 @@ ...@@ -34,12 +34,6 @@
#include "page.h" #include "page.h"
#include "btnode.h" #include "btnode.h"
void nilfs_btnode_cache_init(struct address_space *btnc,
struct backing_dev_info *bdi)
{
nilfs_mapping_init(btnc, bdi);
}
void nilfs_btnode_cache_clear(struct address_space *btnc) void nilfs_btnode_cache_clear(struct address_space *btnc)
{ {
invalidate_mapping_pages(btnc, 0, -1); invalidate_mapping_pages(btnc, 0, -1);
...@@ -62,7 +56,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) ...@@ -62,7 +56,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
BUG(); BUG();
} }
memset(bh->b_data, 0, 1 << inode->i_blkbits); memset(bh->b_data, 0, 1 << inode->i_blkbits);
bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev; bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = blocknr; bh->b_blocknr = blocknr;
set_buffer_mapped(bh); set_buffer_mapped(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
...@@ -94,10 +88,11 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, ...@@ -94,10 +88,11 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
if (pblocknr == 0) { if (pblocknr == 0) {
pblocknr = blocknr; pblocknr = blocknr;
if (inode->i_ino != NILFS_DAT_INO) { if (inode->i_ino != NILFS_DAT_INO) {
struct inode *dat = NILFS_I_NILFS(inode)->ns_dat; struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
/* blocknr is a virtual block number */ /* blocknr is a virtual block number */
err = nilfs_dat_translate(dat, blocknr, &pblocknr); err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
&pblocknr);
if (unlikely(err)) { if (unlikely(err)) {
brelse(bh); brelse(bh);
goto out_locked; goto out_locked;
...@@ -120,7 +115,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, ...@@ -120,7 +115,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
goto found; goto found;
} }
set_buffer_mapped(bh); set_buffer_mapped(bh);
bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev; bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = pblocknr; /* set block address for read */ bh->b_blocknr = pblocknr; /* set block address for read */
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
get_bh(bh); get_bh(bh);
...@@ -259,7 +254,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc, ...@@ -259,7 +254,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
"invalid oldkey %lld (newkey=%lld)", "invalid oldkey %lld (newkey=%lld)",
(unsigned long long)oldkey, (unsigned long long)oldkey,
(unsigned long long)newkey); (unsigned long long)newkey);
nilfs_btnode_mark_dirty(obh); mark_buffer_dirty(obh);
spin_lock_irq(&btnc->tree_lock); spin_lock_irq(&btnc->tree_lock);
radix_tree_delete(&btnc->page_tree, oldkey); radix_tree_delete(&btnc->page_tree, oldkey);
...@@ -271,7 +266,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc, ...@@ -271,7 +266,7 @@ void nilfs_btnode_commit_change_key(struct address_space *btnc,
unlock_page(opage); unlock_page(opage);
} else { } else {
nilfs_copy_buffer(nbh, obh); nilfs_copy_buffer(nbh, obh);
nilfs_btnode_mark_dirty(nbh); mark_buffer_dirty(nbh);
nbh->b_blocknr = newkey; nbh->b_blocknr = newkey;
ctxt->bh = nbh; ctxt->bh = nbh;
......
...@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt { ...@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
struct buffer_head *newbh; struct buffer_head *newbh;
}; };
void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
void nilfs_btnode_cache_clear(struct address_space *); void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr); __u64 blocknr);
...@@ -51,7 +50,4 @@ void nilfs_btnode_commit_change_key(struct address_space *, ...@@ -51,7 +50,4 @@ void nilfs_btnode_commit_change_key(struct address_space *,
void nilfs_btnode_abort_change_key(struct address_space *, void nilfs_btnode_abort_change_key(struct address_space *,
struct nilfs_btnode_chkey_ctxt *); struct nilfs_btnode_chkey_ctxt *);
#define nilfs_btnode_mark_dirty(bh) nilfs_mark_buffer_dirty(bh)
#endif /* _NILFS_BTNODE_H */ #endif /* _NILFS_BTNODE_H */
...@@ -714,7 +714,7 @@ static void nilfs_btree_promote_key(struct nilfs_bmap *btree, ...@@ -714,7 +714,7 @@ static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
nilfs_btree_get_nonroot_node(path, level), nilfs_btree_get_nonroot_node(path, level),
path[level].bp_index, key); path[level].bp_index, key);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
} while ((path[level].bp_index == 0) && } while ((path[level].bp_index == 0) &&
(++level < nilfs_btree_height(btree) - 1)); (++level < nilfs_btree_height(btree) - 1));
} }
...@@ -739,7 +739,7 @@ static void nilfs_btree_do_insert(struct nilfs_bmap *btree, ...@@ -739,7 +739,7 @@ static void nilfs_btree_do_insert(struct nilfs_bmap *btree,
nilfs_btree_node_insert(node, path[level].bp_index, nilfs_btree_node_insert(node, path[level].bp_index,
*keyp, *ptrp, ncblk); *keyp, *ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (path[level].bp_index == 0) if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
...@@ -777,9 +777,9 @@ static void nilfs_btree_carry_left(struct nilfs_bmap *btree, ...@@ -777,9 +777,9 @@ static void nilfs_btree_carry_left(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0)); nilfs_btree_node_get_key(node, 0));
...@@ -823,9 +823,9 @@ static void nilfs_btree_carry_right(struct nilfs_bmap *btree, ...@@ -823,9 +823,9 @@ static void nilfs_btree_carry_right(struct nilfs_bmap *btree,
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
path[level + 1].bp_index++; path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
...@@ -870,9 +870,9 @@ static void nilfs_btree_split(struct nilfs_bmap *btree, ...@@ -870,9 +870,9 @@ static void nilfs_btree_split(struct nilfs_bmap *btree,
nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
newkey = nilfs_btree_node_get_key(right, 0); newkey = nilfs_btree_node_get_key(right, 0);
newptr = path[level].bp_newreq.bpr_ptr; newptr = path[level].bp_newreq.bpr_ptr;
...@@ -919,7 +919,7 @@ static void nilfs_btree_grow(struct nilfs_bmap *btree, ...@@ -919,7 +919,7 @@ static void nilfs_btree_grow(struct nilfs_bmap *btree,
nilfs_btree_node_set_level(root, level + 1); nilfs_btree_node_set_level(root, level + 1);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL; path[level].bp_sib_bh = NULL;
...@@ -1194,7 +1194,7 @@ static void nilfs_btree_do_delete(struct nilfs_bmap *btree, ...@@ -1194,7 +1194,7 @@ static void nilfs_btree_do_delete(struct nilfs_bmap *btree,
nilfs_btree_node_delete(node, path[level].bp_index, nilfs_btree_node_delete(node, path[level].bp_index,
keyp, ptrp, ncblk); keyp, ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (path[level].bp_index == 0) if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0)); nilfs_btree_node_get_key(node, 0));
...@@ -1226,9 +1226,9 @@ static void nilfs_btree_borrow_left(struct nilfs_bmap *btree, ...@@ -1226,9 +1226,9 @@ static void nilfs_btree_borrow_left(struct nilfs_bmap *btree,
nilfs_btree_node_move_right(left, node, n, ncblk, ncblk); nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0)); nilfs_btree_node_get_key(node, 0));
...@@ -1258,9 +1258,9 @@ static void nilfs_btree_borrow_right(struct nilfs_bmap *btree, ...@@ -1258,9 +1258,9 @@ static void nilfs_btree_borrow_right(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
path[level + 1].bp_index++; path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_promote_key(btree, path, level + 1,
...@@ -1289,7 +1289,7 @@ static void nilfs_btree_concat_left(struct nilfs_bmap *btree, ...@@ -1289,7 +1289,7 @@ static void nilfs_btree_concat_left(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_sib_bh)) if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh); mark_buffer_dirty(path[level].bp_sib_bh);
nilfs_btnode_delete(path[level].bp_bh); nilfs_btnode_delete(path[level].bp_bh);
path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_bh = path[level].bp_sib_bh;
...@@ -1315,7 +1315,7 @@ static void nilfs_btree_concat_right(struct nilfs_bmap *btree, ...@@ -1315,7 +1315,7 @@ static void nilfs_btree_concat_right(struct nilfs_bmap *btree,
nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh)) if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
nilfs_btnode_delete(path[level].bp_sib_bh); nilfs_btnode_delete(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL; path[level].bp_sib_bh = NULL;
...@@ -1709,7 +1709,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, ...@@ -1709,7 +1709,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs); nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
if (!buffer_dirty(bh)) if (!buffer_dirty(bh))
nilfs_btnode_mark_dirty(bh); mark_buffer_dirty(bh);
if (!nilfs_bmap_dirty(btree)) if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree); nilfs_bmap_set_dirty(btree);
...@@ -1787,7 +1787,7 @@ static int nilfs_btree_propagate_p(struct nilfs_bmap *btree, ...@@ -1787,7 +1787,7 @@ static int nilfs_btree_propagate_p(struct nilfs_bmap *btree,
{ {
while ((++level < nilfs_btree_height(btree) - 1) && while ((++level < nilfs_btree_height(btree) - 1) &&
!buffer_dirty(path[level].bp_bh)) !buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh); mark_buffer_dirty(path[level].bp_bh);
return 0; return 0;
} }
...@@ -2229,7 +2229,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level) ...@@ -2229,7 +2229,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level)
} }
if (!buffer_dirty(bh)) if (!buffer_dirty(bh))
nilfs_btnode_mark_dirty(bh); mark_buffer_dirty(bh);
brelse(bh); brelse(bh);
if (!nilfs_bmap_dirty(btree)) if (!nilfs_bmap_dirty(btree))
nilfs_bmap_set_dirty(btree); nilfs_bmap_set_dirty(btree);
......
...@@ -216,14 +216,14 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile, ...@@ -216,14 +216,14 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
if (!nilfs_cpfile_is_in_first(cpfile, cno)) if (!nilfs_cpfile_is_in_first(cpfile, cno))
nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
kaddr, 1); kaddr, 1);
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
header = nilfs_cpfile_block_get_header(cpfile, header_bh, header = nilfs_cpfile_block_get_header(cpfile, header_bh,
kaddr); kaddr);
le64_add_cpu(&header->ch_ncheckpoints, 1); le64_add_cpu(&header->ch_ncheckpoints, 1);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
} }
...@@ -326,7 +326,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, ...@@ -326,7 +326,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
} }
if (nicps > 0) { if (nicps > 0) {
tnicps += nicps; tnicps += nicps;
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
if (!nilfs_cpfile_is_in_first(cpfile, cno)) { if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
count = count =
...@@ -358,7 +358,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, ...@@ -358,7 +358,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
header = nilfs_cpfile_block_get_header(cpfile, header_bh, header = nilfs_cpfile_block_get_header(cpfile, header_bh,
kaddr); kaddr);
le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
} }
...@@ -671,10 +671,10 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) ...@@ -671,10 +671,10 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
le64_add_cpu(&header->ch_nsnapshots, 1); le64_add_cpu(&header->ch_nsnapshots, 1);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(prev_bh); mark_buffer_dirty(prev_bh);
nilfs_mdt_mark_buffer_dirty(curr_bh); mark_buffer_dirty(curr_bh);
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
brelse(prev_bh); brelse(prev_bh);
...@@ -774,10 +774,10 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) ...@@ -774,10 +774,10 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
le64_add_cpu(&header->ch_nsnapshots, -1); le64_add_cpu(&header->ch_nsnapshots, -1);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(next_bh); mark_buffer_dirty(next_bh);
nilfs_mdt_mark_buffer_dirty(prev_bh); mark_buffer_dirty(prev_bh);
nilfs_mdt_mark_buffer_dirty(cp_bh); mark_buffer_dirty(cp_bh);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(cpfile); nilfs_mdt_mark_dirty(cpfile);
brelse(prev_bh); brelse(prev_bh);
......
...@@ -54,7 +54,7 @@ static int nilfs_dat_prepare_entry(struct inode *dat, ...@@ -54,7 +54,7 @@ static int nilfs_dat_prepare_entry(struct inode *dat,
static void nilfs_dat_commit_entry(struct inode *dat, static void nilfs_dat_commit_entry(struct inode *dat,
struct nilfs_palloc_req *req) struct nilfs_palloc_req *req)
{ {
nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh); mark_buffer_dirty(req->pr_entry_bh);
nilfs_mdt_mark_dirty(dat); nilfs_mdt_mark_dirty(dat);
brelse(req->pr_entry_bh); brelse(req->pr_entry_bh);
} }
...@@ -361,7 +361,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) ...@@ -361,7 +361,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
entry->de_blocknr = cpu_to_le64(blocknr); entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(entry_bh); mark_buffer_dirty(entry_bh);
nilfs_mdt_mark_dirty(dat); nilfs_mdt_mark_dirty(dat);
brelse(entry_bh); brelse(entry_bh);
......
...@@ -111,7 +111,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -111,7 +111,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb); nilfs_transaction_commit(inode->i_sb);
mapped: mapped:
SetPageChecked(page);
wait_on_page_writeback(page); wait_on_page_writeback(page);
return VM_FAULT_LOCKED; return VM_FAULT_LOCKED;
} }
......
...@@ -48,9 +48,6 @@ ...@@ -48,9 +48,6 @@
#include "dat.h" #include "dat.h"
#include "ifile.h" #include "ifile.h"
static const struct address_space_operations def_gcinode_aops = {
};
/* /*
* nilfs_gccache_submit_read_data() - add data buffer and submit read request * nilfs_gccache_submit_read_data() - add data buffer and submit read request
* @inode - gc inode * @inode - gc inode
...@@ -87,9 +84,9 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, ...@@ -87,9 +84,9 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
goto out; goto out;
if (pbn == 0) { if (pbn == 0) {
struct inode *dat_inode = NILFS_I_NILFS(inode)->ns_dat; struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
/* use original dat, not gc dat. */
err = nilfs_dat_translate(dat_inode, vbn, &pbn); err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */ if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
brelse(bh); brelse(bh);
goto failed; goto failed;
...@@ -103,7 +100,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, ...@@ -103,7 +100,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
} }
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh)) {
bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev; bh->b_bdev = inode->i_sb->s_bdev;
set_buffer_mapped(bh); set_buffer_mapped(bh);
} }
bh->b_blocknr = pbn; bh->b_blocknr = pbn;
...@@ -160,15 +157,11 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) ...@@ -160,15 +157,11 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
if (buffer_dirty(bh)) if (buffer_dirty(bh))
return -EEXIST; return -EEXIST;
if (buffer_nilfs_node(bh)) { if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) {
if (nilfs_btree_broken_node_block(bh)) {
clear_buffer_uptodate(bh); clear_buffer_uptodate(bh);
return -EIO; return -EIO;
} }
nilfs_btnode_mark_dirty(bh); mark_buffer_dirty(bh);
} else {
nilfs_mark_buffer_dirty(bh);
}
return 0; return 0;
} }
...@@ -178,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode) ...@@ -178,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
inode->i_mode = S_IFREG; inode->i_mode = S_IFREG;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
inode->i_mapping->a_ops = &def_gcinode_aops; inode->i_mapping->a_ops = &empty_aops;
inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
ii->i_flags = 0; ii->i_flags = 0;
......
...@@ -80,7 +80,7 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, ...@@ -80,7 +80,7 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
return ret; return ret;
} }
nilfs_palloc_commit_alloc_entry(ifile, &req); nilfs_palloc_commit_alloc_entry(ifile, &req);
nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh); mark_buffer_dirty(req.pr_entry_bh);
nilfs_mdt_mark_dirty(ifile); nilfs_mdt_mark_dirty(ifile);
*out_ino = (ino_t)req.pr_entry_nr; *out_ino = (ino_t)req.pr_entry_nr;
*out_bh = req.pr_entry_bh; *out_bh = req.pr_entry_bh;
...@@ -128,7 +128,7 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) ...@@ -128,7 +128,7 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
raw_inode->i_flags = 0; raw_inode->i_flags = 0;
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh); mark_buffer_dirty(req.pr_entry_bh);
brelse(req.pr_entry_bh); brelse(req.pr_entry_bh);
nilfs_palloc_commit_free_entry(ifile, &req); nilfs_palloc_commit_free_entry(ifile, &req);
......
...@@ -74,14 +74,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, ...@@ -74,14 +74,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
struct buffer_head *bh_result, int create) struct buffer_head *bh_result, int create)
{ {
struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_inode_info *ii = NILFS_I(inode);
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
__u64 blknum = 0; __u64 blknum = 0;
int err = 0, ret; int err = 0, ret;
struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
down_read(&NILFS_MDT(dat)->mi_sem); down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
up_read(&NILFS_MDT(dat)->mi_sem); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
if (ret >= 0) { /* found */ if (ret >= 0) { /* found */
map_bh(bh_result, inode->i_sb, blknum); map_bh(bh_result, inode->i_sb, blknum);
if (ret > 0) if (ret > 0)
...@@ -596,6 +596,16 @@ void nilfs_write_inode_common(struct inode *inode, ...@@ -596,6 +596,16 @@ void nilfs_write_inode_common(struct inode *inode,
raw_inode->i_flags = cpu_to_le32(ii->i_flags); raw_inode->i_flags = cpu_to_le32(ii->i_flags);
raw_inode->i_generation = cpu_to_le32(inode->i_generation); raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
/* zero-fill unused portion in the case of super root block */
raw_inode->i_xattr = 0;
raw_inode->i_pad = 0;
memset((void *)raw_inode + sizeof(*raw_inode), 0,
nilfs->ns_inode_size - sizeof(*raw_inode));
}
if (has_bmap) if (has_bmap)
nilfs_bmap_write(ii->i_bmap, raw_inode); nilfs_bmap_write(ii->i_bmap, raw_inode);
else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
...@@ -872,8 +882,7 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty) ...@@ -872,8 +882,7 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
return -EINVAL; /* NILFS_I_DIRTY may remain for return -EINVAL; /* NILFS_I_DIRTY may remain for
freeing inode */ freeing inode */
} }
list_del(&ii->i_dirty); list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
list_add_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
set_bit(NILFS_I_QUEUED, &ii->i_state); set_bit(NILFS_I_QUEUED, &ii->i_state);
} }
spin_unlock(&nilfs->ns_inode_lock); spin_unlock(&nilfs->ns_inode_lock);
...@@ -892,7 +901,7 @@ int nilfs_mark_inode_dirty(struct inode *inode) ...@@ -892,7 +901,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
return err; return err;
} }
nilfs_update_inode(inode, ibh); nilfs_update_inode(inode, ibh);
nilfs_mdt_mark_buffer_dirty(ibh); mark_buffer_dirty(ibh);
nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
brelse(ibh); brelse(ibh);
return 0; return 0;
...@@ -931,7 +940,7 @@ void nilfs_dirty_inode(struct inode *inode) ...@@ -931,7 +940,7 @@ void nilfs_dirty_inode(struct inode *inode)
int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len) __u64 start, __u64 len)
{ {
struct the_nilfs *nilfs = NILFS_I_NILFS(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
__u64 logical = 0, phys = 0, size = 0; __u64 logical = 0, phys = 0, size = 0;
__u32 flags = 0; __u32 flags = 0;
loff_t isize; loff_t isize;
......
...@@ -698,6 +698,63 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, ...@@ -698,6 +698,63 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
return 0; return 0;
} }
static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
void __user *argp)
{
__u64 newsize;
int ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
ret = mnt_want_write(filp->f_path.mnt);
if (ret)
goto out;
ret = -EFAULT;
if (copy_from_user(&newsize, argp, sizeof(newsize)))
goto out_drop_write;
ret = nilfs_resize_fs(inode->i_sb, newsize);
out_drop_write:
mnt_drop_write(filp->f_path.mnt);
out:
return ret;
}
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
{
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
__u64 range[2];
__u64 minseg, maxseg;
unsigned long segbytes;
int ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
ret = -EFAULT;
if (copy_from_user(range, argp, sizeof(__u64[2])))
goto out;
ret = -ERANGE;
if (range[1] > i_size_read(inode->i_sb->s_bdev->bd_inode))
goto out;
segbytes = nilfs->ns_blocks_per_segment * nilfs->ns_blocksize;
minseg = range[0] + segbytes - 1;
do_div(minseg, segbytes);
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
do_div(maxseg, segbytes);
maxseg--;
ret = nilfs_sufile_set_alloc_range(nilfs->ns_sufile, minseg, maxseg);
out:
return ret;
}
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp, unsigned int cmd, void __user *argp,
size_t membsz, size_t membsz,
...@@ -763,6 +820,10 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -763,6 +820,10 @@ long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return nilfs_ioctl_clean_segments(inode, filp, cmd, argp); return nilfs_ioctl_clean_segments(inode, filp, cmd, argp);
case NILFS_IOCTL_SYNC: case NILFS_IOCTL_SYNC:
return nilfs_ioctl_sync(inode, filp, cmd, argp); return nilfs_ioctl_sync(inode, filp, cmd, argp);
case NILFS_IOCTL_RESIZE:
return nilfs_ioctl_resize(inode, filp, argp);
case NILFS_IOCTL_SET_ALLOC_RANGE:
return nilfs_ioctl_set_alloc_range(inode, argp);
default: default:
return -ENOTTY; return -ENOTTY;
} }
......
...@@ -66,7 +66,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, ...@@ -66,7 +66,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
nilfs_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
return 0; return 0;
} }
...@@ -355,7 +355,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block) ...@@ -355,7 +355,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
err = nilfs_mdt_read_block(inode, block, 0, &bh); err = nilfs_mdt_read_block(inode, block, 0, &bh);
if (unlikely(err)) if (unlikely(err))
return err; return err;
nilfs_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(inode); nilfs_mdt_mark_dirty(inode);
brelse(bh); brelse(bh);
return 0; return 0;
...@@ -450,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, ...@@ -450,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
INIT_LIST_HEAD(&shadow->frozen_buffers); INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data); address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, bdi); nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
address_space_init_once(&shadow->frozen_btnodes); address_space_init_once(&shadow->frozen_btnodes);
nilfs_mapping_init(&shadow->frozen_btnodes, bdi); nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
mi->mi_shadow = shadow; mi->mi_shadow = shadow;
return 0; return 0;
} }
......
...@@ -64,11 +64,6 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode) ...@@ -64,11 +64,6 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
return inode->i_private; return inode->i_private;
} }
static inline struct the_nilfs *NILFS_I_NILFS(struct inode *inode)
{
return inode->i_sb->s_fs_info;
}
/* Default GFP flags using highmem */ /* Default GFP flags using highmem */
#define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM) #define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
...@@ -93,8 +88,6 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh); ...@@ -93,8 +88,6 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh);
struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode,
struct buffer_head *bh); struct buffer_head *bh);
#define nilfs_mdt_mark_buffer_dirty(bh) nilfs_mark_buffer_dirty(bh)
static inline void nilfs_mdt_mark_dirty(struct inode *inode) static inline void nilfs_mdt_mark_dirty(struct inode *inode)
{ {
if (!test_bit(NILFS_I_DIRTY, &NILFS_I(inode)->i_state)) if (!test_bit(NILFS_I_DIRTY, &NILFS_I(inode)->i_state))
...@@ -108,7 +101,7 @@ static inline void nilfs_mdt_clear_dirty(struct inode *inode) ...@@ -108,7 +101,7 @@ static inline void nilfs_mdt_clear_dirty(struct inode *inode)
static inline __u64 nilfs_mdt_cno(struct inode *inode) static inline __u64 nilfs_mdt_cno(struct inode *inode)
{ {
return NILFS_I_NILFS(inode)->ns_cno; return ((struct the_nilfs *)inode->i_sb->s_fs_info)->ns_cno;
} }
#define nilfs_mdt_bgl_lock(inode, bg) \ #define nilfs_mdt_bgl_lock(inode, bg) \
......
...@@ -80,12 +80,6 @@ static inline struct inode *NILFS_BTNC_I(struct address_space *btnc) ...@@ -80,12 +80,6 @@ static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
return &ii->vfs_inode; return &ii->vfs_inode;
} }
static inline struct inode *NILFS_AS_I(struct address_space *mapping)
{
return (mapping->host) ? :
container_of(mapping, struct inode, i_data);
}
/* /*
* Dynamic state flags of NILFS on-memory inode (i_state) * Dynamic state flags of NILFS on-memory inode (i_state)
*/ */
...@@ -298,6 +292,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb, ...@@ -298,6 +292,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
int flip); int flip);
int nilfs_commit_super(struct super_block *sb, int flag); int nilfs_commit_super(struct super_block *sb, int flag);
int nilfs_cleanup_super(struct super_block *sb); int nilfs_cleanup_super(struct super_block *sb);
int nilfs_resize_fs(struct super_block *sb, __u64 newsize);
int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
struct nilfs_root **root); struct nilfs_root **root);
int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno); int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno);
......
...@@ -37,8 +37,7 @@ ...@@ -37,8 +37,7 @@
#define NILFS_BUFFER_INHERENT_BITS \ #define NILFS_BUFFER_INHERENT_BITS \
((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
(1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated) | \ (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
(1UL << BH_NILFS_Checked))
static struct buffer_head * static struct buffer_head *
__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
...@@ -59,19 +58,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, ...@@ -59,19 +58,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
return bh; return bh;
} }
/*
* Since the page cache of B-tree node pages or data page cache of pseudo
* inodes does not have a valid mapping->host pointer, calling
* mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
* it calls __mark_inode_dirty(NULL) through __set_page_dirty().
* To avoid this problem, the old style mark_buffer_dirty() is used instead.
*/
void nilfs_mark_buffer_dirty(struct buffer_head *bh)
{
if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
__set_page_dirty_nobuffers(bh->b_page);
}
struct buffer_head *nilfs_grab_buffer(struct inode *inode, struct buffer_head *nilfs_grab_buffer(struct inode *inode,
struct address_space *mapping, struct address_space *mapping,
unsigned long blkoff, unsigned long blkoff,
...@@ -183,7 +169,7 @@ int nilfs_page_buffers_clean(struct page *page) ...@@ -183,7 +169,7 @@ int nilfs_page_buffers_clean(struct page *page)
void nilfs_page_bug(struct page *page) void nilfs_page_bug(struct page *page)
{ {
struct address_space *m; struct address_space *m;
unsigned long ino = 0; unsigned long ino;
if (unlikely(!page)) { if (unlikely(!page)) {
printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
...@@ -191,11 +177,8 @@ void nilfs_page_bug(struct page *page) ...@@ -191,11 +177,8 @@ void nilfs_page_bug(struct page *page)
} }
m = page->mapping; m = page->mapping;
if (m) { ino = m ? m->host->i_ino : 0;
struct inode *inode = NILFS_AS_I(m);
if (inode != NULL)
ino = inode->i_ino;
}
printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
"mapping=%p ino=%lu\n", "mapping=%p ino=%lu\n",
page, atomic_read(&page->_count), page, atomic_read(&page->_count),
...@@ -216,56 +199,6 @@ void nilfs_page_bug(struct page *page) ...@@ -216,56 +199,6 @@ void nilfs_page_bug(struct page *page)
} }
} }
/**
* nilfs_alloc_private_page - allocate a private page with buffer heads
*
* Return Value: On success, a pointer to the allocated page is returned.
* On error, NULL is returned.
*/
struct page *nilfs_alloc_private_page(struct block_device *bdev, int size,
unsigned long state)
{
struct buffer_head *bh, *head, *tail;
struct page *page;
page = alloc_page(GFP_NOFS); /* page_count of the returned page is 1 */
if (unlikely(!page))
return NULL;
lock_page(page);
head = alloc_page_buffers(page, size, 0);
if (unlikely(!head)) {
unlock_page(page);
__free_page(page);
return NULL;
}
bh = head;
do {
bh->b_state = (1UL << BH_NILFS_Allocated) | state;
tail = bh;
bh->b_bdev = bdev;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
attach_page_buffers(page, head);
return page;
}
void nilfs_free_private_page(struct page *page)
{
BUG_ON(!PageLocked(page));
BUG_ON(page->mapping);
if (page_has_buffers(page) && !try_to_free_buffers(page))
NILFS_PAGE_BUG(page, "failed to free page");
unlock_page(page);
__free_page(page);
}
/** /**
* nilfs_copy_page -- copy the page with buffers * nilfs_copy_page -- copy the page with buffers
* @dst: destination page * @dst: destination page
...@@ -492,10 +425,10 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, ...@@ -492,10 +425,10 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
return nc; return nc;
} }
void nilfs_mapping_init(struct address_space *mapping, void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
struct backing_dev_info *bdi) struct backing_dev_info *bdi)
{ {
mapping->host = NULL; mapping->host = inode;
mapping->flags = 0; mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS); mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL; mapping->assoc_mapping = NULL;
......
...@@ -38,14 +38,12 @@ enum { ...@@ -38,14 +38,12 @@ enum {
BH_NILFS_Redirected, BH_NILFS_Redirected,
}; };
BUFFER_FNS(NILFS_Allocated, nilfs_allocated) /* nilfs private buffers */
BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */ BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */
BUFFER_FNS(NILFS_Volatile, nilfs_volatile) BUFFER_FNS(NILFS_Volatile, nilfs_volatile)
BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */ BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */ BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
void nilfs_mark_buffer_dirty(struct buffer_head *bh);
int __nilfs_clear_page_dirty(struct page *); int __nilfs_clear_page_dirty(struct page *);
struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *, struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *,
...@@ -54,14 +52,11 @@ void nilfs_forget_buffer(struct buffer_head *); ...@@ -54,14 +52,11 @@ void nilfs_forget_buffer(struct buffer_head *);
void nilfs_copy_buffer(struct buffer_head *, struct buffer_head *); void nilfs_copy_buffer(struct buffer_head *, struct buffer_head *);
int nilfs_page_buffers_clean(struct page *); int nilfs_page_buffers_clean(struct page *);
void nilfs_page_bug(struct page *); void nilfs_page_bug(struct page *);
struct page *nilfs_alloc_private_page(struct block_device *, int,
unsigned long);
void nilfs_free_private_page(struct page *);
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *); void nilfs_clear_dirty_pages(struct address_space *);
void nilfs_mapping_init(struct address_space *mapping, void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
struct backing_dev_info *bdi); struct backing_dev_info *bdi);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode, unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
......
...@@ -387,9 +387,9 @@ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, ...@@ -387,9 +387,9 @@ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
static void dispose_recovery_list(struct list_head *head) static void dispose_recovery_list(struct list_head *head)
{ {
while (!list_empty(head)) { while (!list_empty(head)) {
struct nilfs_recovery_block *rb struct nilfs_recovery_block *rb;
= list_entry(head->next,
struct nilfs_recovery_block, list); rb = list_first_entry(head, struct nilfs_recovery_block, list);
list_del(&rb->list); list_del(&rb->list);
kfree(rb); kfree(rb);
} }
...@@ -416,9 +416,9 @@ static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) ...@@ -416,9 +416,9 @@ static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
void nilfs_dispose_segment_list(struct list_head *head) void nilfs_dispose_segment_list(struct list_head *head)
{ {
while (!list_empty(head)) { while (!list_empty(head)) {
struct nilfs_segment_entry *ent struct nilfs_segment_entry *ent;
= list_entry(head->next,
struct nilfs_segment_entry, list); ent = list_first_entry(head, struct nilfs_segment_entry, list);
list_del(&ent->list); list_del(&ent->list);
kfree(ent); kfree(ent);
} }
......
...@@ -239,12 +239,15 @@ nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf, ...@@ -239,12 +239,15 @@ nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
u32 seed) u32 seed)
{ {
struct nilfs_super_root *raw_sr; struct nilfs_super_root *raw_sr;
struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info;
unsigned srsize;
u32 crc; u32 crc;
raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data; raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
srsize = NILFS_SR_BYTES(nilfs->ns_inode_size);
crc = crc32_le(seed, crc = crc32_le(seed,
(unsigned char *)raw_sr + sizeof(raw_sr->sr_sum), (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
NILFS_SR_BYTES - sizeof(raw_sr->sr_sum)); srsize - sizeof(raw_sr->sr_sum));
raw_sr->sr_sum = cpu_to_le32(crc); raw_sr->sr_sum = cpu_to_le32(crc);
} }
...@@ -254,18 +257,6 @@ static void nilfs_release_buffers(struct list_head *list) ...@@ -254,18 +257,6 @@ static void nilfs_release_buffers(struct list_head *list)
list_for_each_entry_safe(bh, n, list, b_assoc_buffers) { list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
list_del_init(&bh->b_assoc_buffers); list_del_init(&bh->b_assoc_buffers);
if (buffer_nilfs_allocated(bh)) {
struct page *clone_page = bh->b_page;
/* remove clone page */
brelse(bh);
page_cache_release(clone_page); /* for each bh */
if (page_count(clone_page) <= 2) {
lock_page(clone_page);
nilfs_free_private_page(clone_page);
}
continue;
}
brelse(bh); brelse(bh);
} }
} }
......
...@@ -655,13 +655,10 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, ...@@ -655,13 +655,10 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
if (unlikely(page->index > last)) if (unlikely(page->index > last))
break; break;
if (mapping->host) {
lock_page(page); lock_page(page);
if (!page_has_buffers(page)) if (!page_has_buffers(page))
create_empty_buffers(page, create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1 << inode->i_blkbits, 0);
unlock_page(page); unlock_page(page);
}
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
...@@ -809,7 +806,7 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) ...@@ -809,7 +806,7 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
/* The following code is duplicated with cpfile. But, it is /* The following code is duplicated with cpfile. But, it is
needed to collect the checkpoint even if it was not newly needed to collect the checkpoint even if it was not newly
created */ created */
nilfs_mdt_mark_buffer_dirty(bh_cp); mark_buffer_dirty(bh_cp);
nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
nilfs_cpfile_put_checkpoint( nilfs_cpfile_put_checkpoint(
nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
...@@ -889,12 +886,14 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, ...@@ -889,12 +886,14 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
{ {
struct buffer_head *bh_sr; struct buffer_head *bh_sr;
struct nilfs_super_root *raw_sr; struct nilfs_super_root *raw_sr;
unsigned isz = nilfs->ns_inode_size; unsigned isz, srsz;
bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root; bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
raw_sr = (struct nilfs_super_root *)bh_sr->b_data; raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
isz = nilfs->ns_inode_size;
srsz = NILFS_SR_BYTES(isz);
raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES); raw_sr->sr_bytes = cpu_to_le16(srsz);
raw_sr->sr_nongc_ctime raw_sr->sr_nongc_ctime
= cpu_to_le64(nilfs_doing_gc() ? = cpu_to_le64(nilfs_doing_gc() ?
nilfs->ns_nongc_ctime : sci->sc_seg_ctime); nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
...@@ -906,6 +905,7 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, ...@@ -906,6 +905,7 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
NILFS_SR_CPFILE_OFFSET(isz), 1); NILFS_SR_CPFILE_OFFSET(isz), 1);
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr + nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
NILFS_SR_SUFILE_OFFSET(isz), 1); NILFS_SR_SUFILE_OFFSET(isz), 1);
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
} }
static void nilfs_redirty_inodes(struct list_head *head) static void nilfs_redirty_inodes(struct list_head *head)
...@@ -954,7 +954,7 @@ static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, ...@@ -954,7 +954,7 @@ static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
dispose_buffers: dispose_buffers:
while (!list_empty(listp)) { while (!list_empty(listp)) {
bh = list_entry(listp->next, struct buffer_head, bh = list_first_entry(listp, struct buffer_head,
b_assoc_buffers); b_assoc_buffers);
list_del_init(&bh->b_assoc_buffers); list_del_init(&bh->b_assoc_buffers);
brelse(bh); brelse(bh);
...@@ -1500,10 +1500,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, ...@@ -1500,10 +1500,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
nblocks = le32_to_cpu(finfo->fi_nblocks); nblocks = le32_to_cpu(finfo->fi_nblocks);
ndatablk = le32_to_cpu(finfo->fi_ndatablk); ndatablk = le32_to_cpu(finfo->fi_ndatablk);
if (buffer_nilfs_node(bh)) inode = bh->b_page->mapping->host;
inode = NILFS_BTNC_I(bh->b_page->mapping);
else
inode = NILFS_AS_I(bh->b_page->mapping);
if (mode == SC_LSEG_DSYNC) if (mode == SC_LSEG_DSYNC)
sc_op = &nilfs_sc_dsync_ops; sc_op = &nilfs_sc_dsync_ops;
...@@ -1556,83 +1553,24 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) ...@@ -1556,83 +1553,24 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
return 0; return 0;
} }
static int static void nilfs_begin_page_io(struct page *page)
nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
{
struct page *clone_page;
struct buffer_head *bh, *head, *bh2;
void *kaddr;
bh = head = page_buffers(page);
clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0);
if (unlikely(!clone_page))
return -ENOMEM;
bh2 = page_buffers(clone_page);
kaddr = kmap_atomic(page, KM_USER0);
do {
if (list_empty(&bh->b_assoc_buffers))
continue;
get_bh(bh2);
page_cache_get(clone_page); /* for each bh */
memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size);
bh2->b_blocknr = bh->b_blocknr;
list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers);
list_add_tail(&bh->b_assoc_buffers, out);
} while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head);
kunmap_atomic(kaddr, KM_USER0);
if (!TestSetPageWriteback(clone_page))
account_page_writeback(clone_page);
unlock_page(clone_page);
return 0;
}
static int nilfs_test_page_to_be_frozen(struct page *page)
{
struct address_space *mapping = page->mapping;
if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode))
return 0;
if (page_mapped(page)) {
ClearPageChecked(page);
return 1;
}
return PageChecked(page);
}
static int nilfs_begin_page_io(struct page *page, struct list_head *out)
{ {
if (!page || PageWriteback(page)) if (!page || PageWriteback(page))
/* For split b-tree node pages, this function may be called /* For split b-tree node pages, this function may be called
twice. We ignore the 2nd or later calls by this check. */ twice. We ignore the 2nd or later calls by this check. */
return 0; return;
lock_page(page); lock_page(page);
clear_page_dirty_for_io(page); clear_page_dirty_for_io(page);
set_page_writeback(page); set_page_writeback(page);
unlock_page(page); unlock_page(page);
if (nilfs_test_page_to_be_frozen(page)) {
int err = nilfs_copy_replace_page_buffers(page, out);
if (unlikely(err))
return err;
}
return 0;
} }
static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
struct page **failed_page)
{ {
struct nilfs_segment_buffer *segbuf; struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL; struct page *bd_page = NULL, *fs_page = NULL;
struct list_head *list = &sci->sc_copied_buffers;
int err;
*failed_page = NULL;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
struct buffer_head *bh; struct buffer_head *bh;
...@@ -1662,11 +1600,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, ...@@ -1662,11 +1600,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
break; break;
} }
if (bh->b_page != fs_page) { if (bh->b_page != fs_page) {
err = nilfs_begin_page_io(fs_page, list); nilfs_begin_page_io(fs_page);
if (unlikely(err)) {
*failed_page = fs_page;
goto out;
}
fs_page = bh->b_page; fs_page = bh->b_page;
} }
} }
...@@ -1677,11 +1611,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, ...@@ -1677,11 +1611,7 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
set_page_writeback(bd_page); set_page_writeback(bd_page);
unlock_page(bd_page); unlock_page(bd_page);
} }
err = nilfs_begin_page_io(fs_page, list); nilfs_begin_page_io(fs_page);
if (unlikely(err))
*failed_page = fs_page;
out:
return err;
} }
static int nilfs_segctor_write(struct nilfs_sc_info *sci, static int nilfs_segctor_write(struct nilfs_sc_info *sci,
...@@ -1694,24 +1624,6 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci, ...@@ -1694,24 +1624,6 @@ static int nilfs_segctor_write(struct nilfs_sc_info *sci,
return ret; return ret;
} }
static void __nilfs_end_page_io(struct page *page, int err)
{
if (!err) {
if (!nilfs_page_buffers_clean(page))
__set_page_dirty_nobuffers(page);
ClearPageError(page);
} else {
__set_page_dirty_nobuffers(page);
SetPageError(page);
}
if (buffer_nilfs_allocated(page_buffers(page))) {
if (TestClearPageWriteback(page))
dec_zone_page_state(page, NR_WRITEBACK);
} else
end_page_writeback(page);
}
static void nilfs_end_page_io(struct page *page, int err) static void nilfs_end_page_io(struct page *page, int err)
{ {
if (!page) if (!page)
...@@ -1738,40 +1650,19 @@ static void nilfs_end_page_io(struct page *page, int err) ...@@ -1738,40 +1650,19 @@ static void nilfs_end_page_io(struct page *page, int err)
return; return;
} }
__nilfs_end_page_io(page, err);
}
static void nilfs_clear_copied_buffers(struct list_head *list, int err)
{
struct buffer_head *bh, *head;
struct page *page;
while (!list_empty(list)) {
bh = list_entry(list->next, struct buffer_head,
b_assoc_buffers);
page = bh->b_page;
page_cache_get(page);
head = bh = page_buffers(page);
do {
if (!list_empty(&bh->b_assoc_buffers)) {
list_del_init(&bh->b_assoc_buffers);
if (!err) { if (!err) {
set_buffer_uptodate(bh); if (!nilfs_page_buffers_clean(page))
clear_buffer_dirty(bh); __set_page_dirty_nobuffers(page);
clear_buffer_delay(bh); ClearPageError(page);
clear_buffer_nilfs_volatile(bh); } else {
} __set_page_dirty_nobuffers(page);
brelse(bh); /* for b_assoc_buffers */ SetPageError(page);
} }
} while ((bh = bh->b_this_page) != head);
__nilfs_end_page_io(page, err); end_page_writeback(page);
page_cache_release(page);
}
} }
static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page, static void nilfs_abort_logs(struct list_head *logs, int err)
int err)
{ {
struct nilfs_segment_buffer *segbuf; struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL; struct page *bd_page = NULL, *fs_page = NULL;
...@@ -1801,8 +1692,6 @@ static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page, ...@@ -1801,8 +1692,6 @@ static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
} }
if (bh->b_page != fs_page) { if (bh->b_page != fs_page) {
nilfs_end_page_io(fs_page, err); nilfs_end_page_io(fs_page, err);
if (fs_page && fs_page == failed_page)
return;
fs_page = bh->b_page; fs_page = bh->b_page;
} }
} }
...@@ -1821,12 +1710,11 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, ...@@ -1821,12 +1710,11 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
list_splice_tail_init(&sci->sc_write_logs, &logs); list_splice_tail_init(&sci->sc_write_logs, &logs);
ret = nilfs_wait_on_logs(&logs); ret = nilfs_wait_on_logs(&logs);
nilfs_abort_logs(&logs, NULL, ret ? : err); nilfs_abort_logs(&logs, ret ? : err);
list_splice_tail_init(&sci->sc_segbufs, &logs); list_splice_tail_init(&sci->sc_segbufs, &logs);
nilfs_cancel_segusage(&logs, nilfs->ns_sufile); nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
nilfs_free_incomplete_logs(&logs, nilfs); nilfs_free_incomplete_logs(&logs, nilfs);
nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
if (sci->sc_stage.flags & NILFS_CF_SUFREED) { if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
...@@ -1920,8 +1808,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) ...@@ -1920,8 +1808,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
nilfs_end_page_io(fs_page, 0); nilfs_end_page_io(fs_page, 0);
nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
nilfs_drop_collected_inodes(&sci->sc_dirty_files); nilfs_drop_collected_inodes(&sci->sc_dirty_files);
if (nilfs_doing_gc()) if (nilfs_doing_gc())
...@@ -1979,7 +1865,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, ...@@ -1979,7 +1865,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
"failed to get inode block.\n"); "failed to get inode block.\n");
return err; return err;
} }
nilfs_mdt_mark_buffer_dirty(ibh); mark_buffer_dirty(ibh);
nilfs_mdt_mark_dirty(ifile); nilfs_mdt_mark_dirty(ifile);
spin_lock(&nilfs->ns_inode_lock); spin_lock(&nilfs->ns_inode_lock);
if (likely(!ii->i_bh)) if (likely(!ii->i_bh))
...@@ -1991,8 +1877,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, ...@@ -1991,8 +1877,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
clear_bit(NILFS_I_QUEUED, &ii->i_state); clear_bit(NILFS_I_QUEUED, &ii->i_state);
set_bit(NILFS_I_BUSY, &ii->i_state); set_bit(NILFS_I_BUSY, &ii->i_state);
list_del(&ii->i_dirty); list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
} }
spin_unlock(&nilfs->ns_inode_lock); spin_unlock(&nilfs->ns_inode_lock);
...@@ -2014,8 +1899,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, ...@@ -2014,8 +1899,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
clear_bit(NILFS_I_BUSY, &ii->i_state); clear_bit(NILFS_I_BUSY, &ii->i_state);
brelse(ii->i_bh); brelse(ii->i_bh);
ii->i_bh = NULL; ii->i_bh = NULL;
list_del(&ii->i_dirty); list_move_tail(&ii->i_dirty, &ti->ti_garbage);
list_add_tail(&ii->i_dirty, &ti->ti_garbage);
} }
spin_unlock(&nilfs->ns_inode_lock); spin_unlock(&nilfs->ns_inode_lock);
} }
...@@ -2026,7 +1910,6 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, ...@@ -2026,7 +1910,6 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
{ {
struct the_nilfs *nilfs = sci->sc_super->s_fs_info; struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
struct page *failed_page;
int err; int err;
sci->sc_stage.scnt = NILFS_ST_INIT; sci->sc_stage.scnt = NILFS_ST_INIT;
...@@ -2081,11 +1964,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) ...@@ -2081,11 +1964,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
/* Write partial segments */ /* Write partial segments */
err = nilfs_segctor_prepare_write(sci, &failed_page); nilfs_segctor_prepare_write(sci);
if (err) {
nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
goto failed_to_write;
}
nilfs_add_checksums_on_logs(&sci->sc_segbufs, nilfs_add_checksums_on_logs(&sci->sc_segbufs,
nilfs->ns_crc_seed); nilfs->ns_crc_seed);
...@@ -2687,7 +2566,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, ...@@ -2687,7 +2566,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
INIT_LIST_HEAD(&sci->sc_segbufs); INIT_LIST_HEAD(&sci->sc_segbufs);
INIT_LIST_HEAD(&sci->sc_write_logs); INIT_LIST_HEAD(&sci->sc_write_logs);
INIT_LIST_HEAD(&sci->sc_gc_inodes); INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_copied_buffers);
init_timer(&sci->sc_timer); init_timer(&sci->sc_timer);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
...@@ -2741,8 +2619,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) ...@@ -2741,8 +2619,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
if (flag || !nilfs_segctor_confirm(sci)) if (flag || !nilfs_segctor_confirm(sci))
nilfs_segctor_write_out(sci); nilfs_segctor_write_out(sci);
WARN_ON(!list_empty(&sci->sc_copied_buffers));
if (!list_empty(&sci->sc_dirty_files)) { if (!list_empty(&sci->sc_dirty_files)) {
nilfs_warning(sci->sc_super, __func__, nilfs_warning(sci->sc_super, __func__,
"dirty file(s) after the final construction\n"); "dirty file(s) after the final construction\n");
......
...@@ -92,7 +92,6 @@ struct nilfs_segsum_pointer { ...@@ -92,7 +92,6 @@ struct nilfs_segsum_pointer {
* @sc_nblk_inc: Block count of current generation * @sc_nblk_inc: Block count of current generation
* @sc_dirty_files: List of files to be written * @sc_dirty_files: List of files to be written
* @sc_gc_inodes: List of GC inodes having blocks to be written * @sc_gc_inodes: List of GC inodes having blocks to be written
* @sc_copied_buffers: List of copied buffers (buffer heads) to freeze data
* @sc_freesegs: array of segment numbers to be freed * @sc_freesegs: array of segment numbers to be freed
* @sc_nfreesegs: number of segments on @sc_freesegs * @sc_nfreesegs: number of segments on @sc_freesegs
* @sc_dsync_inode: inode whose data pages are written for a sync operation * @sc_dsync_inode: inode whose data pages are written for a sync operation
...@@ -136,7 +135,6 @@ struct nilfs_sc_info { ...@@ -136,7 +135,6 @@ struct nilfs_sc_info {
struct list_head sc_dirty_files; struct list_head sc_dirty_files;
struct list_head sc_gc_inodes; struct list_head sc_gc_inodes;
struct list_head sc_copied_buffers;
__u64 *sc_freesegs; __u64 *sc_freesegs;
size_t sc_nfreesegs; size_t sc_nfreesegs;
......
...@@ -33,7 +33,9 @@ ...@@ -33,7 +33,9 @@
struct nilfs_sufile_info { struct nilfs_sufile_info {
struct nilfs_mdt_info mi; struct nilfs_mdt_info mi;
unsigned long ncleansegs; unsigned long ncleansegs;/* number of clean segments */
__u64 allocmin; /* lower limit of allocatable segment range */
__u64 allocmax; /* upper limit of allocatable segment range */
}; };
static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
...@@ -96,6 +98,13 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, ...@@ -96,6 +98,13 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
create, NULL, bhp); create, NULL, bhp);
} }
static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
__u64 segnum)
{
return nilfs_mdt_delete_block(sufile,
nilfs_sufile_get_blkoff(sufile, segnum));
}
static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
u64 ncleanadd, u64 ndirtyadd) u64 ncleanadd, u64 ndirtyadd)
{ {
...@@ -108,7 +117,7 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, ...@@ -108,7 +117,7 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
} }
/** /**
...@@ -247,6 +256,35 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, ...@@ -247,6 +256,35 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
return ret; return ret;
} }
/**
* nilfs_sufile_set_alloc_range - limit range of segment to be allocated
* @sufile: inode of segment usage file
* @start: minimum segment number of allocatable region (inclusive)
* @end: maximum segment number of allocatable region (inclusive)
*
* Return Value: On success, 0 is returned. On error, one of the
* following negative error codes is returned.
*
* %-ERANGE - invalid segment region
*/
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
{
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
__u64 nsegs;
int ret = -ERANGE;
down_write(&NILFS_MDT(sufile)->mi_sem);
nsegs = nilfs_sufile_get_nsegments(sufile);
if (start <= end && end < nsegs) {
sui->allocmin = start;
sui->allocmax = end;
ret = 0;
}
up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
/** /**
* nilfs_sufile_alloc - allocate a segment * nilfs_sufile_alloc - allocate a segment
* @sufile: inode of segment usage file * @sufile: inode of segment usage file
...@@ -269,11 +307,12 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -269,11 +307,12 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
struct buffer_head *header_bh, *su_bh; struct buffer_head *header_bh, *su_bh;
struct nilfs_sufile_header *header; struct nilfs_sufile_header *header;
struct nilfs_segment_usage *su; struct nilfs_segment_usage *su;
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
size_t susz = NILFS_MDT(sufile)->mi_entry_size; size_t susz = NILFS_MDT(sufile)->mi_entry_size;
__u64 segnum, maxsegnum, last_alloc; __u64 segnum, maxsegnum, last_alloc;
void *kaddr; void *kaddr;
unsigned long nsegments, ncleansegs, nsus; unsigned long nsegments, ncleansegs, nsus, cnt;
int ret, i, j; int ret, j;
down_write(&NILFS_MDT(sufile)->mi_sem); down_write(&NILFS_MDT(sufile)->mi_sem);
...@@ -287,13 +326,31 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -287,13 +326,31 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nsegments = nilfs_sufile_get_nsegments(sufile); nsegments = nilfs_sufile_get_nsegments(sufile);
maxsegnum = sui->allocmax;
segnum = last_alloc + 1; segnum = last_alloc + 1;
if (segnum < sui->allocmin || segnum > sui->allocmax)
segnum = sui->allocmin;
for (cnt = 0; cnt < nsegments; cnt += nsus) {
if (segnum > maxsegnum) {
if (cnt < sui->allocmax - sui->allocmin + 1) {
/*
* wrap around in the limited region.
* if allocation started from
* sui->allocmin, this never happens.
*/
segnum = sui->allocmin;
maxsegnum = last_alloc;
} else if (segnum > sui->allocmin &&
sui->allocmax + 1 < nsegments) {
segnum = sui->allocmax + 1;
maxsegnum = nsegments - 1; maxsegnum = nsegments - 1;
for (i = 0; i < nsegments; i += nsus) { } else if (sui->allocmin > 0) {
if (segnum >= nsegments) {
/* wrap around */
segnum = 0; segnum = 0;
maxsegnum = last_alloc; maxsegnum = sui->allocmin - 1;
} else {
break; /* never happens */
}
} }
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
&su_bh); &su_bh);
...@@ -319,9 +376,9 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -319,9 +376,9 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
header->sh_last_alloc = cpu_to_le64(segnum); header->sh_last_alloc = cpu_to_le64(segnum);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
NILFS_SUI(sufile)->ncleansegs--; sui->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
brelse(su_bh); brelse(su_bh);
*segnump = segnum; *segnump = segnum;
...@@ -364,7 +421,7 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, ...@@ -364,7 +421,7 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
nilfs_sufile_mod_counter(header_bh, -1, 1); nilfs_sufile_mod_counter(header_bh, -1, 1);
NILFS_SUI(sufile)->ncleansegs--; NILFS_SUI(sufile)->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -395,7 +452,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, ...@@ -395,7 +452,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
NILFS_SUI(sufile)->ncleansegs -= clean; NILFS_SUI(sufile)->ncleansegs -= clean;
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -421,7 +478,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, ...@@ -421,7 +478,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
sudirty = nilfs_segment_usage_dirty(su); sudirty = nilfs_segment_usage_dirty(su);
nilfs_segment_usage_set_clean(su); nilfs_segment_usage_set_clean(su);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
NILFS_SUI(sufile)->ncleansegs++; NILFS_SUI(sufile)->ncleansegs++;
...@@ -441,7 +498,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) ...@@ -441,7 +498,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
if (!ret) { if (!ret) {
nilfs_mdt_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
brelse(bh); brelse(bh);
} }
...@@ -476,7 +533,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, ...@@ -476,7 +533,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
su->su_nblocks = cpu_to_le32(nblocks); su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
brelse(bh); brelse(bh);
...@@ -505,7 +562,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) ...@@ -505,7 +562,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
{ {
struct buffer_head *header_bh; struct buffer_head *header_bh;
struct nilfs_sufile_header *header; struct nilfs_sufile_header *header;
struct the_nilfs *nilfs = NILFS_I_NILFS(sufile); struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
void *kaddr; void *kaddr;
int ret; int ret;
...@@ -555,10 +612,182 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, ...@@ -555,10 +612,182 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
nilfs_sufile_mod_counter(header_bh, -1, 0); nilfs_sufile_mod_counter(header_bh, -1, 0);
NILFS_SUI(sufile)->ncleansegs--; NILFS_SUI(sufile)->ncleansegs--;
} }
nilfs_mdt_mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
/**
* nilfs_sufile_truncate_range - truncate range of segment array
* @sufile: inode of segment usage file
* @start: start segment number (inclusive)
* @end: end segment number (inclusive)
*
* Return Value: On success, 0 is returned. On error, one of the
* following negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-EINVAL - Invalid number of segments specified
*
* %-EBUSY - Dirty or active segments are present in the range
*/
static int nilfs_sufile_truncate_range(struct inode *sufile,
__u64 start, __u64 end)
{
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
struct buffer_head *header_bh;
struct buffer_head *su_bh;
struct nilfs_segment_usage *su, *su2;
size_t susz = NILFS_MDT(sufile)->mi_entry_size;
unsigned long segusages_per_block;
unsigned long nsegs, ncleaned;
__u64 segnum;
void *kaddr;
ssize_t n, nc;
int ret;
int j;
nsegs = nilfs_sufile_get_nsegments(sufile);
ret = -EINVAL;
if (start > end || start >= nsegs)
goto out;
ret = nilfs_sufile_get_header_block(sufile, &header_bh);
if (ret < 0)
goto out;
segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
ncleaned = 0;
for (segnum = start; segnum <= end; segnum += n) {
n = min_t(unsigned long,
segusages_per_block -
nilfs_sufile_get_offset(sufile, segnum),
end - segnum + 1);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
&su_bh);
if (ret < 0) {
if (ret != -ENOENT)
goto out_header;
/* hole */
continue;
}
kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr);
su2 = su;
for (j = 0; j < n; j++, su = (void *)su + susz) {
if ((le32_to_cpu(su->su_flags) &
~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
nilfs_segment_is_active(nilfs, segnum + j)) {
ret = -EBUSY;
kunmap_atomic(kaddr, KM_USER0);
brelse(su_bh);
goto out_header;
}
}
nc = 0;
for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
if (nilfs_segment_usage_error(su)) {
nilfs_segment_usage_set_clean(su);
nc++;
}
}
kunmap_atomic(kaddr, KM_USER0);
if (nc > 0) {
mark_buffer_dirty(su_bh);
ncleaned += nc;
}
brelse(su_bh);
if (n == segusages_per_block) {
/* make hole */
nilfs_sufile_delete_segment_usage_block(sufile, segnum);
}
}
ret = 0;
out_header:
if (ncleaned > 0) {
NILFS_SUI(sufile)->ncleansegs += ncleaned;
nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
nilfs_mdt_mark_dirty(sufile);
}
brelse(header_bh);
out:
return ret;
}
/**
* nilfs_sufile_resize - resize segment array
* @sufile: inode of segment usage file
* @newnsegs: new number of segments
*
* Return Value: On success, 0 is returned. On error, one of the
* following negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOSPC - Enough free space is not left for shrinking
*
* %-EBUSY - Dirty or active segments exist in the region to be truncated
*/
int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
{
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
struct buffer_head *header_bh;
struct nilfs_sufile_header *header;
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
void *kaddr;
unsigned long nsegs, nrsvsegs;
int ret = 0;
down_write(&NILFS_MDT(sufile)->mi_sem);
nsegs = nilfs_sufile_get_nsegments(sufile);
if (nsegs == newnsegs)
goto out;
ret = -ENOSPC;
nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
goto out;
ret = nilfs_sufile_get_header_block(sufile, &header_bh);
if (ret < 0)
goto out;
if (newnsegs > nsegs) {
sui->ncleansegs += newnsegs - nsegs;
} else /* newnsegs < nsegs */ {
ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
if (ret < 0)
goto out_header;
sui->ncleansegs -= nsegs - newnsegs;
}
kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
header = kaddr + bh_offset(header_bh);
header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
kunmap_atomic(kaddr, KM_USER0);
mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(sufile);
nilfs_set_nsegments(nilfs, newnsegs);
out_header:
brelse(header_bh);
out:
up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
/** /**
* nilfs_sufile_get_suinfo - * nilfs_sufile_get_suinfo -
* @sufile: inode of segment usage file * @sufile: inode of segment usage file
...@@ -583,7 +812,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, ...@@ -583,7 +812,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
struct nilfs_segment_usage *su; struct nilfs_segment_usage *su;
struct nilfs_suinfo *si = buf; struct nilfs_suinfo *si = buf;
size_t susz = NILFS_MDT(sufile)->mi_entry_size; size_t susz = NILFS_MDT(sufile)->mi_entry_size;
struct the_nilfs *nilfs = NILFS_I_NILFS(sufile); struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
void *kaddr; void *kaddr;
unsigned long nsegs, segusages_per_block; unsigned long nsegs, segusages_per_block;
ssize_t n; ssize_t n;
...@@ -679,6 +908,9 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, ...@@ -679,6 +908,9 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
brelse(header_bh); brelse(header_bh);
sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
sui->allocmin = 0;
unlock_new_inode(sufile); unlock_new_inode(sufile);
out: out:
*inodep = sufile; *inodep = sufile;
......
...@@ -31,11 +31,12 @@ ...@@ -31,11 +31,12 @@
static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile) static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
{ {
return NILFS_I_NILFS(sufile)->ns_nsegments; return ((struct the_nilfs *)sufile->i_sb->s_fs_info)->ns_nsegments;
} }
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile); unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end);
int nilfs_sufile_alloc(struct inode *, __u64 *); int nilfs_sufile_alloc(struct inode *, __u64 *);
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum); int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
...@@ -61,6 +62,7 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *, ...@@ -61,6 +62,7 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *, void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
struct buffer_head *); struct buffer_head *);
int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs);
int nilfs_sufile_read(struct super_block *sb, size_t susize, int nilfs_sufile_read(struct super_block *sb, size_t susize,
struct nilfs_inode *raw_inode, struct inode **inodep); struct nilfs_inode *raw_inode, struct inode **inodep);
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include "btnode.h" #include "btnode.h"
#include "page.h" #include "page.h"
#include "cpfile.h" #include "cpfile.h"
#include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */
#include "ifile.h" #include "ifile.h"
#include "dat.h" #include "dat.h"
#include "segment.h" #include "segment.h"
...@@ -165,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) ...@@ -165,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
ii->i_state = 0; ii->i_state = 0;
ii->i_cno = 0; ii->i_cno = 0;
ii->vfs_inode.i_version = 1; ii->vfs_inode.i_version = 1;
nilfs_btnode_cache_init(&ii->i_btnode_cache, sb->s_bdi); nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi);
return &ii->vfs_inode; return &ii->vfs_inode;
} }
...@@ -347,6 +348,134 @@ int nilfs_cleanup_super(struct super_block *sb) ...@@ -347,6 +348,134 @@ int nilfs_cleanup_super(struct super_block *sb)
return ret; return ret;
} }
/**
* nilfs_move_2nd_super - relocate secondary super block
* @sb: super block instance
* @sb2off: new offset of the secondary super block (in bytes)
*/
static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
{
struct the_nilfs *nilfs = sb->s_fs_info;
struct buffer_head *nsbh;
struct nilfs_super_block *nsbp;
sector_t blocknr, newblocknr;
unsigned long offset;
int sb2i = -1; /* array index of the secondary superblock */
int ret = 0;
/* nilfs->ns_sem must be locked by the caller. */
if (nilfs->ns_sbh[1] &&
nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) {
sb2i = 1;
blocknr = nilfs->ns_sbh[1]->b_blocknr;
} else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) {
sb2i = 0;
blocknr = nilfs->ns_sbh[0]->b_blocknr;
}
if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off)
goto out; /* super block location is unchanged */
/* Get new super block buffer */
newblocknr = sb2off >> nilfs->ns_blocksize_bits;
offset = sb2off & (nilfs->ns_blocksize - 1);
nsbh = sb_getblk(sb, newblocknr);
if (!nsbh) {
printk(KERN_WARNING
"NILFS warning: unable to move secondary superblock "
"to block %llu\n", (unsigned long long)newblocknr);
ret = -EIO;
goto out;
}
nsbp = (void *)nsbh->b_data + offset;
memset(nsbp, 0, nilfs->ns_blocksize);
if (sb2i >= 0) {
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
brelse(nilfs->ns_sbh[sb2i]);
nilfs->ns_sbh[sb2i] = nsbh;
nilfs->ns_sbp[sb2i] = nsbp;
} else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) {
/* secondary super block will be restored to index 1 */
nilfs->ns_sbh[1] = nsbh;
nilfs->ns_sbp[1] = nsbp;
} else {
brelse(nsbh);
}
out:
return ret;
}
/**
* nilfs_resize_fs - resize the filesystem
* @sb: super block instance
* @newsize: new size of the filesystem (in bytes)
*/
int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
{
struct the_nilfs *nilfs = sb->s_fs_info;
struct nilfs_super_block **sbp;
__u64 devsize, newnsegs;
loff_t sb2off;
int ret;
ret = -ERANGE;
devsize = i_size_read(sb->s_bdev->bd_inode);
if (newsize > devsize)
goto out;
/*
* Write lock is required to protect some functions depending
* on the number of segments, the number of reserved segments,
* and so forth.
*/
down_write(&nilfs->ns_segctor_sem);
sb2off = NILFS_SB2_OFFSET_BYTES(newsize);
newnsegs = sb2off >> nilfs->ns_blocksize_bits;
do_div(newnsegs, nilfs->ns_blocks_per_segment);
ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs);
up_write(&nilfs->ns_segctor_sem);
if (ret < 0)
goto out;
ret = nilfs_construct_segment(sb);
if (ret < 0)
goto out;
down_write(&nilfs->ns_sem);
nilfs_move_2nd_super(sb, sb2off);
ret = -EIO;
sbp = nilfs_prepare_super(sb, 0);
if (likely(sbp)) {
nilfs_set_log_cursor(sbp[0], nilfs);
/*
* Drop NILFS_RESIZE_FS flag for compatibility with
* mount-time resize which may be implemented in a
* future release.
*/
sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) &
~NILFS_RESIZE_FS);
sbp[0]->s_dev_size = cpu_to_le64(newsize);
sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments);
if (sbp[1])
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
}
up_write(&nilfs->ns_sem);
/*
* Reset the range of allocatable segments last. This order
* is important in the case of expansion because the secondary
* superblock must be protected from log write until migration
* completes.
*/
if (!ret)
nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1);
out:
return ret;
}
static void nilfs_put_super(struct super_block *sb) static void nilfs_put_super(struct super_block *sb)
{ {
struct the_nilfs *nilfs = sb->s_fs_info; struct the_nilfs *nilfs = sb->s_fs_info;
......
...@@ -363,6 +363,24 @@ static unsigned long long nilfs_max_size(unsigned int blkbits) ...@@ -363,6 +363,24 @@ static unsigned long long nilfs_max_size(unsigned int blkbits)
return res; return res;
} }
/**
* nilfs_nrsvsegs - calculate the number of reserved segments
* @nilfs: nilfs object
* @nsegs: total number of segments
*/
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
{
return max_t(unsigned long, NILFS_MIN_NRSVSEGS,
DIV_ROUND_UP(nsegs * nilfs->ns_r_segments_percentage,
100));
}
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
{
nilfs->ns_nsegments = nsegs;
nilfs->ns_nrsvsegs = nilfs_nrsvsegs(nilfs, nsegs);
}
static int nilfs_store_disk_layout(struct the_nilfs *nilfs, static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp) struct nilfs_super_block *sbp)
{ {
...@@ -389,13 +407,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, ...@@ -389,13 +407,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
} }
nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments);
nilfs->ns_r_segments_percentage = nilfs->ns_r_segments_percentage =
le32_to_cpu(sbp->s_r_segments_percentage); le32_to_cpu(sbp->s_r_segments_percentage);
nilfs->ns_nrsvsegs = nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
max_t(unsigned long, NILFS_MIN_NRSVSEGS,
DIV_ROUND_UP(nilfs->ns_nsegments *
nilfs->ns_r_segments_percentage, 100));
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
return 0; return 0;
} }
......
...@@ -268,6 +268,8 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev); ...@@ -268,6 +268,8 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev);
void destroy_nilfs(struct the_nilfs *nilfs); void destroy_nilfs(struct the_nilfs *nilfs);
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data); int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb); int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs);
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs);
int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t); int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
int nilfs_count_free_blocks(struct the_nilfs *, sector_t *); int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno); struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno);
......
...@@ -107,7 +107,7 @@ struct nilfs_super_root { ...@@ -107,7 +107,7 @@ struct nilfs_super_root {
#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0) #define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0)
#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1) #define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1)
#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2) #define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2)
#define NILFS_SR_BYTES (sizeof(struct nilfs_super_root)) #define NILFS_SR_BYTES(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 3)
/* /*
* Maximal mount counts * Maximal mount counts
...@@ -845,5 +845,7 @@ struct nilfs_bdesc { ...@@ -845,5 +845,7 @@ struct nilfs_bdesc {
_IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
#define NILFS_IOCTL_RESIZE \ #define NILFS_IOCTL_RESIZE \
_IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
#define NILFS_IOCTL_SET_ALLOC_RANGE \
_IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2])
#endif /* _LINUX_NILFS_FS_H */ #endif /* _LINUX_NILFS_FS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment