Commit 4e2ccdb0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2: (49 commits)
  nilfs2: separate wait function from nilfs_segctor_write
  nilfs2: add iterator for segment buffers
  nilfs2: hide nilfs_write_info struct in segment buffer code
  nilfs2: relocate io status variables to segment buffer
  nilfs2: do not return io error for bio allocation failure
  nilfs2: use list_splice_tail or list_splice_tail_init
  nilfs2: replace mark_inode_dirty as nilfs_mark_inode_dirty
  nilfs2: delete mark_inode_dirty in nilfs_delete_entry
  nilfs2: delete mark_inode_dirty in nilfs_commit_chunk
  nilfs2: change return type of nilfs_commit_chunk
  nilfs2: split nilfs_unlink as nilfs_do_unlink and nilfs_unlink
  nilfs2: delete redundant mark_inode_dirty
  nilfs2: expand inode_inc_link_count and inode_dec_link_count
  nilfs2: delete mark_inode_dirty from nilfs_set_link
  nilfs2: delete mark_inode_dirty in nilfs_new_inode
  nilfs2: add norecovery mount option
  nilfs2: add helper to get if volume is in a valid state
  nilfs2: move recovery completion into load_nilfs function
  nilfs2: apply readahead for recovery on mount
  nilfs2: clean up get/put function of a segment usage
  ...
parents 3ef884b4 a694291a
...@@ -49,8 +49,7 @@ Mount options ...@@ -49,8 +49,7 @@ Mount options
NILFS2 supports the following mount options: NILFS2 supports the following mount options:
(*) == default (*) == default
barrier=on(*) This enables/disables barriers. barrier=off disables nobarrier Disables barriers.
it, barrier=on enables it.
errors=continue(*) Keep going on a filesystem error. errors=continue(*) Keep going on a filesystem error.
errors=remount-ro Remount the filesystem read-only on an error. errors=remount-ro Remount the filesystem read-only on an error.
errors=panic Panic and halt the machine if an error occurs. errors=panic Panic and halt the machine if an error occurs.
...@@ -71,6 +70,10 @@ order=strict Apply strict in-order semantics that preserves sequence ...@@ -71,6 +70,10 @@ order=strict Apply strict in-order semantics that preserves sequence
blocks. That means, it is guaranteed that no blocks. That means, it is guaranteed that no
overtaking of events occurs in the recovered file overtaking of events occurs in the recovered file
system after a crash. system after a crash.
norecovery Disable recovery of the filesystem on mount.
This disables every write access on the device for
read-only mounts or snapshots. This option will fail
for r/w mounts on an unclean volume.
NILFS2 usage NILFS2 usage
============ ============
......
...@@ -142,29 +142,75 @@ static void nilfs_palloc_desc_block_init(struct inode *inode, ...@@ -142,29 +142,75 @@ static void nilfs_palloc_desc_block_init(struct inode *inode,
} }
} }
static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
int create,
void (*init_block)(struct inode *,
struct buffer_head *,
void *),
struct buffer_head **bhp,
struct nilfs_bh_assoc *prev,
spinlock_t *lock)
{
int ret;
spin_lock(lock);
if (prev->bh && blkoff == prev->blkoff) {
get_bh(prev->bh);
*bhp = prev->bh;
spin_unlock(lock);
return 0;
}
spin_unlock(lock);
ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
if (!ret) {
spin_lock(lock);
/*
* The following code must be safe for change of the
* cache contents during the get block call.
*/
brelse(prev->bh);
get_bh(*bhp);
prev->bh = *bhp;
prev->blkoff = blkoff;
spin_unlock(lock);
}
return ret;
}
static int nilfs_palloc_get_desc_block(struct inode *inode, static int nilfs_palloc_get_desc_block(struct inode *inode,
unsigned long group, unsigned long group,
int create, struct buffer_head **bhp) int create, struct buffer_head **bhp)
{ {
return nilfs_mdt_get_block(inode, struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
nilfs_palloc_desc_blkoff(inode, group),
create, nilfs_palloc_desc_block_init, bhp); return nilfs_palloc_get_block(inode,
nilfs_palloc_desc_blkoff(inode, group),
create, nilfs_palloc_desc_block_init,
bhp, &cache->prev_desc, &cache->lock);
} }
static int nilfs_palloc_get_bitmap_block(struct inode *inode, static int nilfs_palloc_get_bitmap_block(struct inode *inode,
unsigned long group, unsigned long group,
int create, struct buffer_head **bhp) int create, struct buffer_head **bhp)
{ {
return nilfs_mdt_get_block(inode, struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
nilfs_palloc_bitmap_blkoff(inode, group),
create, NULL, bhp); return nilfs_palloc_get_block(inode,
nilfs_palloc_bitmap_blkoff(inode, group),
create, NULL, bhp,
&cache->prev_bitmap, &cache->lock);
} }
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
int create, struct buffer_head **bhp) int create, struct buffer_head **bhp)
{ {
return nilfs_mdt_get_block(inode, nilfs_palloc_entry_blkoff(inode, nr), struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
create, NULL, bhp);
return nilfs_palloc_get_block(inode,
nilfs_palloc_entry_blkoff(inode, nr),
create, NULL, bhp,
&cache->prev_entry, &cache->lock);
} }
static struct nilfs_palloc_group_desc * static struct nilfs_palloc_group_desc *
...@@ -176,13 +222,6 @@ nilfs_palloc_block_get_group_desc(const struct inode *inode, ...@@ -176,13 +222,6 @@ nilfs_palloc_block_get_group_desc(const struct inode *inode,
group % nilfs_palloc_groups_per_desc_block(inode); group % nilfs_palloc_groups_per_desc_block(inode);
} }
static unsigned char *
nilfs_palloc_block_get_bitmap(const struct inode *inode,
const struct buffer_head *bh, void *kaddr)
{
return (unsigned char *)(kaddr + bh_offset(bh));
}
void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr, void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
const struct buffer_head *bh, void *kaddr) const struct buffer_head *bh, void *kaddr)
{ {
...@@ -289,8 +328,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode, ...@@ -289,8 +328,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
if (ret < 0) if (ret < 0)
goto out_desc; goto out_desc;
bitmap_kaddr = kmap(bitmap_bh->b_page); bitmap_kaddr = kmap(bitmap_bh->b_page);
bitmap = nilfs_palloc_block_get_bitmap( bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
inode, bitmap_bh, bitmap_kaddr);
pos = nilfs_palloc_find_available_slot( pos = nilfs_palloc_find_available_slot(
inode, group, group_offset, bitmap, inode, group, group_offset, bitmap,
entries_per_group); entries_per_group);
...@@ -351,8 +389,7 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, ...@@ -351,8 +389,7 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
desc = nilfs_palloc_block_get_group_desc(inode, group, desc = nilfs_palloc_block_get_group_desc(inode, group,
req->pr_desc_bh, desc_kaddr); req->pr_desc_bh, desc_kaddr);
bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh, bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
bitmap_kaddr);
if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
group_offset, bitmap)) group_offset, bitmap))
...@@ -385,8 +422,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, ...@@ -385,8 +422,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
desc = nilfs_palloc_block_get_group_desc(inode, group, desc = nilfs_palloc_block_get_group_desc(inode, group,
req->pr_desc_bh, desc_kaddr); req->pr_desc_bh, desc_kaddr);
bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh, bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
bitmap_kaddr);
if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
group_offset, bitmap)) group_offset, bitmap))
printk(KERN_WARNING "%s: entry numer %llu already freed\n", printk(KERN_WARNING "%s: entry numer %llu already freed\n",
...@@ -472,8 +508,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ...@@ -472,8 +508,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
desc = nilfs_palloc_block_get_group_desc( desc = nilfs_palloc_block_get_group_desc(
inode, group, desc_bh, desc_kaddr); inode, group, desc_bh, desc_kaddr);
bitmap_kaddr = kmap(bitmap_bh->b_page); bitmap_kaddr = kmap(bitmap_bh->b_page);
bitmap = nilfs_palloc_block_get_bitmap( bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
inode, bitmap_bh, bitmap_kaddr);
for (j = i, n = 0; for (j = i, n = 0;
(j < nitems) && nilfs_palloc_group_is_in(inode, group, (j < nitems) && nilfs_palloc_group_is_in(inode, group,
entry_nrs[j]); entry_nrs[j]);
...@@ -502,3 +537,30 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ...@@ -502,3 +537,30 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
} }
return 0; return 0;
} }
void nilfs_palloc_setup_cache(struct inode *inode,
struct nilfs_palloc_cache *cache)
{
NILFS_MDT(inode)->mi_palloc_cache = cache;
spin_lock_init(&cache->lock);
}
void nilfs_palloc_clear_cache(struct inode *inode)
{
struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
spin_lock(&cache->lock);
brelse(cache->prev_desc.bh);
brelse(cache->prev_bitmap.bh);
brelse(cache->prev_entry.bh);
cache->prev_desc.bh = NULL;
cache->prev_bitmap.bh = NULL;
cache->prev_entry.bh = NULL;
spin_unlock(&cache->lock);
}
void nilfs_palloc_destroy_cache(struct inode *inode)
{
nilfs_palloc_clear_cache(inode);
NILFS_MDT(inode)->mi_palloc_cache = NULL;
}
...@@ -69,4 +69,25 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t); ...@@ -69,4 +69,25 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t);
#define nilfs_clear_bit_atomic ext2_clear_bit_atomic #define nilfs_clear_bit_atomic ext2_clear_bit_atomic
#define nilfs_find_next_zero_bit ext2_find_next_zero_bit #define nilfs_find_next_zero_bit ext2_find_next_zero_bit
/*
* persistent object allocator cache
*/
struct nilfs_bh_assoc {
unsigned long blkoff;
struct buffer_head *bh;
};
struct nilfs_palloc_cache {
spinlock_t lock;
struct nilfs_bh_assoc prev_desc;
struct nilfs_bh_assoc prev_bitmap;
struct nilfs_bh_assoc prev_entry;
};
void nilfs_palloc_setup_cache(struct inode *inode,
struct nilfs_palloc_cache *cache);
void nilfs_palloc_clear_cache(struct inode *inode);
void nilfs_palloc_destroy_cache(struct inode *inode);
#endif /* _NILFS_ALLOC_H */ #endif /* _NILFS_ALLOC_H */
...@@ -402,19 +402,11 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) ...@@ -402,19 +402,11 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n) void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n)
{ {
inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
if (NILFS_MDT(bmap->b_inode))
nilfs_mdt_mark_dirty(bmap->b_inode);
else
mark_inode_dirty(bmap->b_inode);
} }
void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n) void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n)
{ {
inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
if (NILFS_MDT(bmap->b_inode))
nilfs_mdt_mark_dirty(bmap->b_inode);
else
mark_inode_dirty(bmap->b_inode);
} }
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
......
...@@ -68,9 +68,34 @@ void nilfs_btnode_cache_clear(struct address_space *btnc) ...@@ -68,9 +68,34 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
truncate_inode_pages(btnc, 0); truncate_inode_pages(btnc, 0);
} }
struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{
struct inode *inode = NILFS_BTNC_I(btnc);
struct buffer_head *bh;
bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
if (unlikely(!bh))
return NULL;
if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
buffer_dirty(bh))) {
brelse(bh);
BUG();
}
memset(bh->b_data, 0, 1 << inode->i_blkbits);
bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
bh->b_blocknr = blocknr;
set_buffer_mapped(bh);
set_buffer_uptodate(bh);
unlock_page(bh->b_page);
page_cache_release(bh->b_page);
return bh;
}
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
sector_t pblocknr, struct buffer_head **pbh, sector_t pblocknr, struct buffer_head **pbh)
int newblk)
{ {
struct buffer_head *bh; struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc); struct inode *inode = NILFS_BTNC_I(btnc);
...@@ -81,19 +106,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, ...@@ -81,19 +106,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
return -ENOMEM; return -ENOMEM;
err = -EEXIST; /* internal code */ err = -EEXIST; /* internal code */
if (newblk) {
if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
buffer_dirty(bh))) {
brelse(bh);
BUG();
}
memset(bh->b_data, 0, 1 << inode->i_blkbits);
bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
bh->b_blocknr = blocknr;
set_buffer_mapped(bh);
set_buffer_uptodate(bh);
goto found;
}
if (buffer_uptodate(bh) || buffer_dirty(bh)) if (buffer_uptodate(bh) || buffer_dirty(bh))
goto found; goto found;
...@@ -135,27 +147,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, ...@@ -135,27 +147,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
return err; return err;
} }
int nilfs_btnode_get(struct address_space *btnc, __u64 blocknr,
sector_t pblocknr, struct buffer_head **pbh, int newblk)
{
struct buffer_head *bh;
int err;
err = nilfs_btnode_submit_block(btnc, blocknr, pblocknr, pbh, newblk);
if (err == -EEXIST) /* internal code (cache hit) */
return 0;
if (unlikely(err))
return err;
bh = *pbh;
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
brelse(bh);
return -EIO;
}
return 0;
}
/** /**
* nilfs_btnode_delete - delete B-tree node buffer * nilfs_btnode_delete - delete B-tree node buffer
* @bh: buffer to be deleted * @bh: buffer to be deleted
...@@ -244,12 +235,13 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, ...@@ -244,12 +235,13 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
unlock_page(obh->b_page); unlock_page(obh->b_page);
} }
err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1); nbh = nilfs_btnode_create_block(btnc, newkey);
if (likely(!err)) { if (!nbh)
BUG_ON(nbh == obh); return -ENOMEM;
ctxt->newbh = nbh;
} BUG_ON(nbh == obh);
return err; ctxt->newbh = nbh;
return 0;
failed_unlock: failed_unlock:
unlock_page(obh->b_page); unlock_page(obh->b_page);
......
...@@ -40,10 +40,10 @@ struct nilfs_btnode_chkey_ctxt { ...@@ -40,10 +40,10 @@ struct nilfs_btnode_chkey_ctxt {
void nilfs_btnode_cache_init_once(struct address_space *); void nilfs_btnode_cache_init_once(struct address_space *);
void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
void nilfs_btnode_cache_clear(struct address_space *); void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr);
int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t,
struct buffer_head **, int); struct buffer_head **);
int nilfs_btnode_get(struct address_space *, __u64, sector_t,
struct buffer_head **, int);
void nilfs_btnode_delete(struct buffer_head *); void nilfs_btnode_delete(struct buffer_head *);
int nilfs_btnode_prepare_change_key(struct address_space *, int nilfs_btnode_prepare_change_key(struct address_space *,
struct nilfs_btnode_chkey_ctxt *); struct nilfs_btnode_chkey_ctxt *);
......
This diff is collapsed.
...@@ -33,28 +33,6 @@ ...@@ -33,28 +33,6 @@
struct nilfs_btree; struct nilfs_btree;
struct nilfs_btree_path; struct nilfs_btree_path;
/**
* struct nilfs_btree_node - B-tree node
* @bn_flags: flags
* @bn_level: level
* @bn_nchildren: number of children
* @bn_pad: padding
*/
struct nilfs_btree_node {
__u8 bn_flags;
__u8 bn_level;
__le16 bn_nchildren;
__le32 bn_pad;
};
/* flags */
#define NILFS_BTREE_NODE_ROOT 0x01
/* level */
#define NILFS_BTREE_LEVEL_DATA 0
#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
#define NILFS_BTREE_LEVEL_MAX 14
/** /**
* struct nilfs_btree - B-tree structure * struct nilfs_btree - B-tree structure
* @bt_bmap: bmap base structure * @bt_bmap: bmap base structure
......
...@@ -926,3 +926,29 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) ...@@ -926,3 +926,29 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
up_read(&NILFS_MDT(cpfile)->mi_sem); up_read(&NILFS_MDT(cpfile)->mi_sem);
return ret; return ret;
} }
/**
* nilfs_cpfile_read - read cpfile inode
* @cpfile: cpfile inode
* @raw_inode: on-disk cpfile inode
*/
int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode)
{
return nilfs_read_inode_common(cpfile, raw_inode);
}
/**
* nilfs_cpfile_new - create cpfile
* @nilfs: nilfs object
* @cpsize: size of a checkpoint entry
*/
struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize)
{
struct inode *cpfile;
cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0);
if (cpfile)
nilfs_mdt_set_entry_size(cpfile, cpsize,
sizeof(struct nilfs_cpfile_header));
return cpfile;
}
...@@ -40,4 +40,7 @@ int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *); ...@@ -40,4 +40,7 @@ int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *);
ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned, ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned,
size_t); size_t);
int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode);
struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize);
#endif /* _NILFS_CPFILE_H */ #endif /* _NILFS_CPFILE_H */
...@@ -33,6 +33,16 @@ ...@@ -33,6 +33,16 @@
#define NILFS_CNO_MIN ((__u64)1) #define NILFS_CNO_MIN ((__u64)1)
#define NILFS_CNO_MAX (~(__u64)0) #define NILFS_CNO_MAX (~(__u64)0)
struct nilfs_dat_info {
struct nilfs_mdt_info mi;
struct nilfs_palloc_cache palloc_cache;
};
static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
{
return (struct nilfs_dat_info *)NILFS_MDT(dat);
}
static int nilfs_dat_prepare_entry(struct inode *dat, static int nilfs_dat_prepare_entry(struct inode *dat,
struct nilfs_palloc_req *req, int create) struct nilfs_palloc_req *req, int create)
{ {
...@@ -425,3 +435,40 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, ...@@ -425,3 +435,40 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
return nvi; return nvi;
} }
/**
* nilfs_dat_read - read dat inode
* @dat: dat inode
* @raw_inode: on-disk dat inode
*/
int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
{
return nilfs_read_inode_common(dat, raw_inode);
}
/**
* nilfs_dat_new - create dat file
* @nilfs: nilfs object
* @entry_size: size of a dat entry
*/
struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
{
static struct lock_class_key dat_lock_key;
struct inode *dat;
struct nilfs_dat_info *di;
int err;
dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di));
if (dat) {
err = nilfs_palloc_init_blockgroup(dat, entry_size);
if (unlikely(err)) {
nilfs_mdt_destroy(dat);
return NULL;
}
di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache);
}
return dat;
}
...@@ -53,4 +53,7 @@ int nilfs_dat_freev(struct inode *, __u64 *, size_t); ...@@ -53,4 +53,7 @@ int nilfs_dat_freev(struct inode *, __u64 *, size_t);
int nilfs_dat_move(struct inode *, __u64, sector_t); int nilfs_dat_move(struct inode *, __u64, sector_t);
ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t); ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t);
int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode);
struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size);
#endif /* _NILFS_DAT_H */ #endif /* _NILFS_DAT_H */
...@@ -99,9 +99,9 @@ static int nilfs_prepare_chunk(struct page *page, ...@@ -99,9 +99,9 @@ static int nilfs_prepare_chunk(struct page *page,
NULL, nilfs_get_block); NULL, nilfs_get_block);
} }
static int nilfs_commit_chunk(struct page *page, static void nilfs_commit_chunk(struct page *page,
struct address_space *mapping, struct address_space *mapping,
unsigned from, unsigned to) unsigned from, unsigned to)
{ {
struct inode *dir = mapping->host; struct inode *dir = mapping->host;
struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb); struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb);
...@@ -112,15 +112,13 @@ static int nilfs_commit_chunk(struct page *page, ...@@ -112,15 +112,13 @@ static int nilfs_commit_chunk(struct page *page,
nr_dirty = nilfs_page_count_clean_buffers(page, from, to); nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (pos + copied > dir->i_size) { if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied); i_size_write(dir, pos + copied);
mark_inode_dirty(dir);
}
if (IS_DIRSYNC(dir)) if (IS_DIRSYNC(dir))
nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_set_transaction_flag(NILFS_TI_SYNC);
err = nilfs_set_file_dirty(sbi, dir, nr_dirty); err = nilfs_set_file_dirty(sbi, dir, nr_dirty);
WARN_ON(err); /* do not happen */
unlock_page(page); unlock_page(page);
return err;
} }
static void nilfs_check_page(struct page *page) static void nilfs_check_page(struct page *page)
...@@ -455,11 +453,10 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, ...@@ -455,11 +453,10 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
BUG_ON(err); BUG_ON(err);
de->inode = cpu_to_le64(inode->i_ino); de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode); nilfs_set_de_type(de, inode);
err = nilfs_commit_chunk(page, mapping, from, to); nilfs_commit_chunk(page, mapping, from, to);
nilfs_put_page(page); nilfs_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME; dir->i_mtime = dir->i_ctime = CURRENT_TIME;
/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */ /* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
mark_inode_dirty(dir);
} }
/* /*
...@@ -548,10 +545,10 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode) ...@@ -548,10 +545,10 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
memcpy(de->name, name, namelen); memcpy(de->name, name, namelen);
de->inode = cpu_to_le64(inode->i_ino); de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode); nilfs_set_de_type(de, inode);
err = nilfs_commit_chunk(page, page->mapping, from, to); nilfs_commit_chunk(page, page->mapping, from, to);
dir->i_mtime = dir->i_ctime = CURRENT_TIME; dir->i_mtime = dir->i_ctime = CURRENT_TIME;
/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */ /* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
mark_inode_dirty(dir); nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */ /* OFFSET_CACHE */
out_put: out_put:
nilfs_put_page(page); nilfs_put_page(page);
...@@ -595,10 +592,9 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) ...@@ -595,10 +592,9 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
if (pde) if (pde)
pde->rec_len = cpu_to_le16(to - from); pde->rec_len = cpu_to_le16(to - from);
dir->inode = 0; dir->inode = 0;
err = nilfs_commit_chunk(page, mapping, from, to); nilfs_commit_chunk(page, mapping, from, to);
inode->i_ctime = inode->i_mtime = CURRENT_TIME; inode->i_ctime = inode->i_mtime = CURRENT_TIME;
/* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */ /* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
mark_inode_dirty(inode);
out: out:
nilfs_put_page(page); nilfs_put_page(page);
return err; return err;
...@@ -640,7 +636,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) ...@@ -640,7 +636,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
memcpy(de->name, "..\0", 4); memcpy(de->name, "..\0", 4);
nilfs_set_de_type(de, inode); nilfs_set_de_type(de, inode);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
err = nilfs_commit_chunk(page, mapping, 0, chunk_size); nilfs_commit_chunk(page, mapping, 0, chunk_size);
fail: fail:
page_cache_release(page); page_cache_release(page);
return err; return err;
......
...@@ -61,6 +61,8 @@ void nilfs_commit_gcdat_inode(struct the_nilfs *nilfs) ...@@ -61,6 +61,8 @@ void nilfs_commit_gcdat_inode(struct the_nilfs *nilfs)
nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap); nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap);
nilfs_palloc_clear_cache(dat);
nilfs_palloc_clear_cache(gcdat);
nilfs_clear_dirty_pages(mapping); nilfs_clear_dirty_pages(mapping);
nilfs_copy_back_pages(mapping, gmapping); nilfs_copy_back_pages(mapping, gmapping);
/* note: mdt dirty flags should be cleared by segctor. */ /* note: mdt dirty flags should be cleared by segctor. */
...@@ -79,6 +81,7 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *nilfs) ...@@ -79,6 +81,7 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *nilfs)
gcdat->i_state = I_CLEAR; gcdat->i_state = I_CLEAR;
gii->i_flags = 0; gii->i_flags = 0;
nilfs_palloc_clear_cache(gcdat);
truncate_inode_pages(gcdat->i_mapping, 0); truncate_inode_pages(gcdat->i_mapping, 0);
truncate_inode_pages(&gii->i_btnode_cache, 0); truncate_inode_pages(&gii->i_btnode_cache, 0);
} }
...@@ -149,7 +149,7 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, ...@@ -149,7 +149,7 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh) __u64 vbn, struct buffer_head **out_bh)
{ {
int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
vbn ? : pbn, pbn, out_bh, 0); vbn ? : pbn, pbn, out_bh);
if (ret == -EEXIST) /* internal code (cache hit) */ if (ret == -EEXIST) /* internal code (cache hit) */
ret = 0; ret = 0;
return ret; return ret;
...@@ -212,9 +212,10 @@ void nilfs_destroy_gccache(struct the_nilfs *nilfs) ...@@ -212,9 +212,10 @@ void nilfs_destroy_gccache(struct the_nilfs *nilfs)
static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino, static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino,
__u64 cno) __u64 cno)
{ {
struct inode *inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS); struct inode *inode;
struct nilfs_inode_info *ii; struct nilfs_inode_info *ii;
inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS, 0);
if (!inode) if (!inode)
return NULL; return NULL;
...@@ -265,7 +266,6 @@ struct inode *nilfs_gc_iget(struct the_nilfs *nilfs, ino_t ino, __u64 cno) ...@@ -265,7 +266,6 @@ struct inode *nilfs_gc_iget(struct the_nilfs *nilfs, ino_t ino, __u64 cno)
*/ */
void nilfs_clear_gcinode(struct inode *inode) void nilfs_clear_gcinode(struct inode *inode)
{ {
nilfs_mdt_clear(inode);
nilfs_mdt_destroy(inode); nilfs_mdt_destroy(inode);
} }
......
...@@ -29,6 +29,17 @@ ...@@ -29,6 +29,17 @@
#include "alloc.h" #include "alloc.h"
#include "ifile.h" #include "ifile.h"
struct nilfs_ifile_info {
struct nilfs_mdt_info mi;
struct nilfs_palloc_cache palloc_cache;
};
static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile)
{
return (struct nilfs_ifile_info *)NILFS_MDT(ifile);
}
/** /**
* nilfs_ifile_create_inode - create a new disk inode * nilfs_ifile_create_inode - create a new disk inode
* @ifile: ifile inode * @ifile: ifile inode
...@@ -148,3 +159,27 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, ...@@ -148,3 +159,27 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
} }
return err; return err;
} }
/**
* nilfs_ifile_new - create inode file
* @sbi: nilfs_sb_info struct
* @inode_size: size of an inode
*/
struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size)
{
struct inode *ifile;
int err;
ifile = nilfs_mdt_new(sbi->s_nilfs, sbi->s_super, NILFS_IFILE_INO,
sizeof(struct nilfs_ifile_info));
if (ifile) {
err = nilfs_palloc_init_blockgroup(ifile, inode_size);
if (unlikely(err)) {
nilfs_mdt_destroy(ifile);
return NULL;
}
nilfs_palloc_setup_cache(ifile,
&NILFS_IFILE_I(ifile)->palloc_cache);
}
return ifile;
}
...@@ -49,4 +49,6 @@ int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **); ...@@ -49,4 +49,6 @@ int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **);
int nilfs_ifile_delete_inode(struct inode *, ino_t); int nilfs_ifile_delete_inode(struct inode *, ino_t);
int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **); int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **);
struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size);
#endif /* _NILFS_IFILE_H */ #endif /* _NILFS_IFILE_H */
...@@ -97,6 +97,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, ...@@ -97,6 +97,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
nilfs_transaction_abort(inode->i_sb); nilfs_transaction_abort(inode->i_sb);
goto out; goto out;
} }
nilfs_mark_inode_dirty(inode);
nilfs_transaction_commit(inode->i_sb); /* never fails */ nilfs_transaction_commit(inode->i_sb); /* never fails */
/* Error handling should be detailed */ /* Error handling should be detailed */
set_buffer_new(bh_result); set_buffer_new(bh_result);
...@@ -322,7 +323,6 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode) ...@@ -322,7 +323,6 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
nilfs_init_acl(), proper cancellation of nilfs_init_acl(), proper cancellation of
above jobs should be considered */ above jobs should be considered */
mark_inode_dirty(inode);
return inode; return inode;
failed_acl: failed_acl:
...@@ -525,7 +525,6 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) ...@@ -525,7 +525,6 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
/* The buffer is guarded with lock_buffer() by the caller */
if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
...@@ -599,6 +598,7 @@ void nilfs_truncate(struct inode *inode) ...@@ -599,6 +598,7 @@ void nilfs_truncate(struct inode *inode)
if (IS_SYNC(inode)) if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_set_transaction_flag(NILFS_TI_SYNC);
nilfs_mark_inode_dirty(inode);
nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
nilfs_transaction_commit(sb); nilfs_transaction_commit(sb);
/* May construct a logical segment and may fail in sync mode. /* May construct a logical segment and may fail in sync mode.
...@@ -623,6 +623,7 @@ void nilfs_delete_inode(struct inode *inode) ...@@ -623,6 +623,7 @@ void nilfs_delete_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0); truncate_inode_pages(&inode->i_data, 0);
nilfs_truncate_bmap(ii, 0); nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
nilfs_free_inode(inode); nilfs_free_inode(inode);
/* nilfs_free_inode() marks inode buffer dirty */ /* nilfs_free_inode() marks inode buffer dirty */
if (IS_SYNC(inode)) if (IS_SYNC(inode))
...@@ -745,9 +746,7 @@ int nilfs_mark_inode_dirty(struct inode *inode) ...@@ -745,9 +746,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
"failed to reget inode block.\n"); "failed to reget inode block.\n");
return err; return err;
} }
lock_buffer(ibh);
nilfs_update_inode(inode, ibh); nilfs_update_inode(inode, ibh);
unlock_buffer(ibh);
nilfs_mdt_mark_buffer_dirty(ibh); nilfs_mdt_mark_buffer_dirty(ibh);
nilfs_mdt_mark_dirty(sbi->s_ifile); nilfs_mdt_mark_dirty(sbi->s_ifile);
brelse(ibh); brelse(ibh);
......
...@@ -186,7 +186,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, ...@@ -186,7 +186,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
} }
static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
struct buffer_head **out_bh) int readahead, struct buffer_head **out_bh)
{ {
struct buffer_head *first_bh, *bh; struct buffer_head *first_bh, *bh;
unsigned long blkoff; unsigned long blkoff;
...@@ -200,16 +200,18 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, ...@@ -200,16 +200,18 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
if (unlikely(err)) if (unlikely(err))
goto failed; goto failed;
blkoff = block + 1; if (readahead) {
for (i = 0; i < nr_ra_blocks; i++, blkoff++) { blkoff = block + 1;
err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh); for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
if (likely(!err || err == -EEXIST)) err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
brelse(bh); if (likely(!err || err == -EEXIST))
else if (err != -EBUSY) brelse(bh);
break; /* abort readahead if bmap lookup failed */ else if (err != -EBUSY)
break;
if (!buffer_locked(first_bh)) /* abort readahead if bmap lookup failed */
goto out_no_wait; if (!buffer_locked(first_bh))
goto out_no_wait;
}
} }
wait_on_buffer(first_bh); wait_on_buffer(first_bh);
...@@ -263,7 +265,7 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, ...@@ -263,7 +265,7 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
/* Should be rewritten with merging nilfs_mdt_read_block() */ /* Should be rewritten with merging nilfs_mdt_read_block() */
retry: retry:
ret = nilfs_mdt_read_block(inode, blkoff, out_bh); ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
if (!create || ret != -ENOENT) if (!create || ret != -ENOENT)
return ret; return ret;
...@@ -371,7 +373,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block) ...@@ -371,7 +373,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
struct buffer_head *bh; struct buffer_head *bh;
int err; int err;
err = nilfs_mdt_read_block(inode, block, &bh); err = nilfs_mdt_read_block(inode, block, 0, &bh);
if (unlikely(err)) if (unlikely(err))
return err; return err;
nilfs_mark_buffer_dirty(bh); nilfs_mark_buffer_dirty(bh);
...@@ -445,9 +447,17 @@ static const struct file_operations def_mdt_fops; ...@@ -445,9 +447,17 @@ static const struct file_operations def_mdt_fops;
* longer than those of the super block structs; they may continue for * longer than those of the super block structs; they may continue for
* several consecutive mounts/umounts. This would need discussions. * several consecutive mounts/umounts. This would need discussions.
*/ */
/**
* nilfs_mdt_new_common - allocate a pseudo inode for metadata file
* @nilfs: nilfs object
* @sb: super block instance the metadata file belongs to
* @ino: inode number
* @gfp_mask: gfp mask for data pages
* @objsz: size of the private object attached to inode->i_private
*/
struct inode * struct inode *
nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb, nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
ino_t ino, gfp_t gfp_mask) ino_t ino, gfp_t gfp_mask, size_t objsz)
{ {
struct inode *inode = nilfs_alloc_inode_common(nilfs); struct inode *inode = nilfs_alloc_inode_common(nilfs);
...@@ -455,8 +465,9 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb, ...@@ -455,8 +465,9 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
return NULL; return NULL;
else { else {
struct address_space * const mapping = &inode->i_data; struct address_space * const mapping = &inode->i_data;
struct nilfs_mdt_info *mi = kzalloc(sizeof(*mi), GFP_NOFS); struct nilfs_mdt_info *mi;
mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
if (!mi) { if (!mi) {
nilfs_destroy_inode(inode); nilfs_destroy_inode(inode);
return NULL; return NULL;
...@@ -513,11 +524,11 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb, ...@@ -513,11 +524,11 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
} }
struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb, struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb,
ino_t ino) ino_t ino, size_t objsz)
{ {
struct inode *inode = nilfs_mdt_new_common(nilfs, sb, ino, struct inode *inode;
NILFS_MDT_GFP);
inode = nilfs_mdt_new_common(nilfs, sb, ino, NILFS_MDT_GFP, objsz);
if (!inode) if (!inode)
return NULL; return NULL;
...@@ -544,14 +555,15 @@ void nilfs_mdt_set_shadow(struct inode *orig, struct inode *shadow) ...@@ -544,14 +555,15 @@ void nilfs_mdt_set_shadow(struct inode *orig, struct inode *shadow)
&NILFS_I(orig)->i_btnode_cache; &NILFS_I(orig)->i_btnode_cache;
} }
void nilfs_mdt_clear(struct inode *inode) static void nilfs_mdt_clear(struct inode *inode)
{ {
struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_inode_info *ii = NILFS_I(inode);
invalidate_mapping_pages(inode->i_mapping, 0, -1); invalidate_mapping_pages(inode->i_mapping, 0, -1);
truncate_inode_pages(inode->i_mapping, 0); truncate_inode_pages(inode->i_mapping, 0);
nilfs_bmap_clear(ii->i_bmap); if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap);
nilfs_btnode_cache_clear(&ii->i_btnode_cache); nilfs_btnode_cache_clear(&ii->i_btnode_cache);
} }
...@@ -559,6 +571,10 @@ void nilfs_mdt_destroy(struct inode *inode) ...@@ -559,6 +571,10 @@ void nilfs_mdt_destroy(struct inode *inode)
{ {
struct nilfs_mdt_info *mdi = NILFS_MDT(inode); struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
if (mdi->mi_palloc_cache)
nilfs_palloc_destroy_cache(inode);
nilfs_mdt_clear(inode);
kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
kfree(mdi); kfree(mdi);
nilfs_destroy_inode(inode); nilfs_destroy_inode(inode);
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
* @mi_entry_size: size of an entry * @mi_entry_size: size of an entry
* @mi_first_entry_offset: offset to the first entry * @mi_first_entry_offset: offset to the first entry
* @mi_entries_per_block: number of entries in a block * @mi_entries_per_block: number of entries in a block
* @mi_palloc_cache: persistent object allocator cache
* @mi_blocks_per_group: number of blocks in a group * @mi_blocks_per_group: number of blocks in a group
* @mi_blocks_per_desc_block: number of blocks per descriptor block * @mi_blocks_per_desc_block: number of blocks per descriptor block
*/ */
...@@ -46,6 +47,7 @@ struct nilfs_mdt_info { ...@@ -46,6 +47,7 @@ struct nilfs_mdt_info {
unsigned mi_entry_size; unsigned mi_entry_size;
unsigned mi_first_entry_offset; unsigned mi_first_entry_offset;
unsigned long mi_entries_per_block; unsigned long mi_entries_per_block;
struct nilfs_palloc_cache *mi_palloc_cache;
unsigned long mi_blocks_per_group; unsigned long mi_blocks_per_group;
unsigned long mi_blocks_per_desc_block; unsigned long mi_blocks_per_desc_block;
}; };
...@@ -74,11 +76,11 @@ int nilfs_mdt_forget_block(struct inode *, unsigned long); ...@@ -74,11 +76,11 @@ int nilfs_mdt_forget_block(struct inode *, unsigned long);
int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long); int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
int nilfs_mdt_fetch_dirty(struct inode *); int nilfs_mdt_fetch_dirty(struct inode *);
struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t); struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t,
size_t);
struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *, struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *,
ino_t, gfp_t); ino_t, gfp_t, size_t);
void nilfs_mdt_destroy(struct inode *); void nilfs_mdt_destroy(struct inode *);
void nilfs_mdt_clear(struct inode *);
void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned); void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned);
void nilfs_mdt_set_shadow(struct inode *, struct inode *); void nilfs_mdt_set_shadow(struct inode *, struct inode *);
...@@ -104,21 +106,4 @@ static inline __u64 nilfs_mdt_cno(struct inode *inode) ...@@ -104,21 +106,4 @@ static inline __u64 nilfs_mdt_cno(struct inode *inode)
#define nilfs_mdt_bgl_lock(inode, bg) \ #define nilfs_mdt_bgl_lock(inode, bg) \
(&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock) (&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock)
static inline int
nilfs_mdt_read_inode_direct(struct inode *inode, struct buffer_head *bh,
unsigned n)
{
return nilfs_read_inode_common(
inode, (struct nilfs_inode *)(bh->b_data + n));
}
static inline void
nilfs_mdt_write_inode_direct(struct inode *inode, struct buffer_head *bh,
unsigned n)
{
nilfs_write_inode_common(
inode, (struct nilfs_inode *)(bh->b_data + n), 1);
}
#endif /* _NILFS_MDT_H */ #endif /* _NILFS_MDT_H */
...@@ -120,7 +120,7 @@ static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode, ...@@ -120,7 +120,7 @@ static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode,
inode->i_op = &nilfs_file_inode_operations; inode->i_op = &nilfs_file_inode_operations;
inode->i_fop = &nilfs_file_operations; inode->i_fop = &nilfs_file_operations;
inode->i_mapping->a_ops = &nilfs_aops; inode->i_mapping->a_ops = &nilfs_aops;
mark_inode_dirty(inode); nilfs_mark_inode_dirty(inode);
err = nilfs_add_nondir(dentry, inode); err = nilfs_add_nondir(dentry, inode);
} }
if (!err) if (!err)
...@@ -148,7 +148,7 @@ nilfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) ...@@ -148,7 +148,7 @@ nilfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
err = PTR_ERR(inode); err = PTR_ERR(inode);
if (!IS_ERR(inode)) { if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev); init_special_inode(inode, inode->i_mode, rdev);
mark_inode_dirty(inode); nilfs_mark_inode_dirty(inode);
err = nilfs_add_nondir(dentry, inode); err = nilfs_add_nondir(dentry, inode);
} }
if (!err) if (!err)
...@@ -188,7 +188,7 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry, ...@@ -188,7 +188,7 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_fail; goto out_fail;
/* mark_inode_dirty(inode); */ /* mark_inode_dirty(inode); */
/* nilfs_new_inode() and page_symlink() do this */ /* page_symlink() do this */
err = nilfs_add_nondir(dentry, inode); err = nilfs_add_nondir(dentry, inode);
out: out:
...@@ -200,7 +200,8 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry, ...@@ -200,7 +200,8 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry,
return err; return err;
out_fail: out_fail:
inode_dec_link_count(inode); drop_nlink(inode);
nilfs_mark_inode_dirty(inode);
iput(inode); iput(inode);
goto out; goto out;
} }
...@@ -245,7 +246,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ...@@ -245,7 +246,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (err) if (err)
return err; return err;
inode_inc_link_count(dir); inc_nlink(dir);
inode = nilfs_new_inode(dir, S_IFDIR | mode); inode = nilfs_new_inode(dir, S_IFDIR | mode);
err = PTR_ERR(inode); err = PTR_ERR(inode);
...@@ -256,7 +257,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ...@@ -256,7 +257,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
inode->i_fop = &nilfs_dir_operations; inode->i_fop = &nilfs_dir_operations;
inode->i_mapping->a_ops = &nilfs_aops; inode->i_mapping->a_ops = &nilfs_aops;
inode_inc_link_count(inode); inc_nlink(inode);
err = nilfs_make_empty(inode, dir); err = nilfs_make_empty(inode, dir);
if (err) if (err)
...@@ -266,6 +267,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ...@@ -266,6 +267,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (err) if (err)
goto out_fail; goto out_fail;
nilfs_mark_inode_dirty(inode);
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
out: out:
if (!err) if (!err)
...@@ -276,26 +278,23 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ...@@ -276,26 +278,23 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return err; return err;
out_fail: out_fail:
inode_dec_link_count(inode); drop_nlink(inode);
inode_dec_link_count(inode); drop_nlink(inode);
nilfs_mark_inode_dirty(inode);
iput(inode); iput(inode);
out_dir: out_dir:
inode_dec_link_count(dir); drop_nlink(dir);
nilfs_mark_inode_dirty(dir);
goto out; goto out;
} }
static int nilfs_unlink(struct inode *dir, struct dentry *dentry) static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
{ {
struct inode *inode; struct inode *inode;
struct nilfs_dir_entry *de; struct nilfs_dir_entry *de;
struct page *page; struct page *page;
struct nilfs_transaction_info ti;
int err; int err;
err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
if (err)
return err;
err = -ENOENT; err = -ENOENT;
de = nilfs_find_entry(dir, dentry, &page); de = nilfs_find_entry(dir, dentry, &page);
if (!de) if (!de)
...@@ -317,12 +316,28 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry) ...@@ -317,12 +316,28 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
goto out; goto out;
inode->i_ctime = dir->i_ctime; inode->i_ctime = dir->i_ctime;
inode_dec_link_count(inode); drop_nlink(inode);
err = 0; err = 0;
out: out:
if (!err) return err;
}
static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct nilfs_transaction_info ti;
int err;
err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
if (err)
return err;
err = nilfs_do_unlink(dir, dentry);
if (!err) {
nilfs_mark_inode_dirty(dir);
nilfs_mark_inode_dirty(dentry->d_inode);
err = nilfs_transaction_commit(dir->i_sb); err = nilfs_transaction_commit(dir->i_sb);
else } else
nilfs_transaction_abort(dir->i_sb); nilfs_transaction_abort(dir->i_sb);
return err; return err;
...@@ -340,11 +355,13 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry) ...@@ -340,11 +355,13 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
err = -ENOTEMPTY; err = -ENOTEMPTY;
if (nilfs_empty_dir(inode)) { if (nilfs_empty_dir(inode)) {
err = nilfs_unlink(dir, dentry); err = nilfs_do_unlink(dir, dentry);
if (!err) { if (!err) {
inode->i_size = 0; inode->i_size = 0;
inode_dec_link_count(inode); drop_nlink(inode);
inode_dec_link_count(dir); nilfs_mark_inode_dirty(inode);
drop_nlink(dir);
nilfs_mark_inode_dirty(dir);
} }
} }
if (!err) if (!err)
...@@ -395,42 +412,48 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -395,42 +412,48 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_de = nilfs_find_entry(new_dir, new_dentry, &new_page); new_de = nilfs_find_entry(new_dir, new_dentry, &new_page);
if (!new_de) if (!new_de)
goto out_dir; goto out_dir;
inode_inc_link_count(old_inode); inc_nlink(old_inode);
nilfs_set_link(new_dir, new_de, new_page, old_inode); nilfs_set_link(new_dir, new_de, new_page, old_inode);
nilfs_mark_inode_dirty(new_dir);
new_inode->i_ctime = CURRENT_TIME; new_inode->i_ctime = CURRENT_TIME;
if (dir_de) if (dir_de)
drop_nlink(new_inode); drop_nlink(new_inode);
inode_dec_link_count(new_inode); drop_nlink(new_inode);
nilfs_mark_inode_dirty(new_inode);
} else { } else {
if (dir_de) { if (dir_de) {
err = -EMLINK; err = -EMLINK;
if (new_dir->i_nlink >= NILFS_LINK_MAX) if (new_dir->i_nlink >= NILFS_LINK_MAX)
goto out_dir; goto out_dir;
} }
inode_inc_link_count(old_inode); inc_nlink(old_inode);
err = nilfs_add_link(new_dentry, old_inode); err = nilfs_add_link(new_dentry, old_inode);
if (err) { if (err) {
inode_dec_link_count(old_inode); drop_nlink(old_inode);
nilfs_mark_inode_dirty(old_inode);
goto out_dir; goto out_dir;
} }
if (dir_de) if (dir_de) {
inode_inc_link_count(new_dir); inc_nlink(new_dir);
nilfs_mark_inode_dirty(new_dir);
}
} }
/* /*
* Like most other Unix systems, set the ctime for inodes on a * Like most other Unix systems, set the ctime for inodes on a
* rename. * rename.
* inode_dec_link_count() will mark the inode dirty.
*/ */
old_inode->i_ctime = CURRENT_TIME; old_inode->i_ctime = CURRENT_TIME;
nilfs_delete_entry(old_de, old_page); nilfs_delete_entry(old_de, old_page);
inode_dec_link_count(old_inode); drop_nlink(old_inode);
if (dir_de) { if (dir_de) {
nilfs_set_link(old_inode, dir_de, dir_page, new_dir); nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
inode_dec_link_count(old_dir); drop_nlink(old_dir);
} }
nilfs_mark_inode_dirty(old_dir);
nilfs_mark_inode_dirty(old_inode);
err = nilfs_transaction_commit(old_dir->i_sb); err = nilfs_transaction_commit(old_dir->i_sb);
return err; return err;
......
...@@ -770,14 +770,8 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs, ...@@ -770,14 +770,8 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
nilfs_finish_roll_forward(nilfs, sbi, ri); nilfs_finish_roll_forward(nilfs, sbi, ri);
} }
nilfs_detach_checkpoint(sbi);
return 0;
failed: failed:
nilfs_detach_checkpoint(sbi); nilfs_detach_checkpoint(sbi);
nilfs_mdt_clear(nilfs->ns_cpfile);
nilfs_mdt_clear(nilfs->ns_sufile);
nilfs_mdt_clear(nilfs->ns_dat);
return err; return err;
} }
...@@ -804,6 +798,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, ...@@ -804,6 +798,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
struct nilfs_segsum_info ssi; struct nilfs_segsum_info ssi;
sector_t pseg_start, pseg_end, sr_pseg_start = 0; sector_t pseg_start, pseg_end, sr_pseg_start = 0;
sector_t seg_start, seg_end; /* range of full segment (block number) */ sector_t seg_start, seg_end; /* range of full segment (block number) */
sector_t b, end;
u64 seg_seq; u64 seg_seq;
__u64 segnum, nextnum = 0; __u64 segnum, nextnum = 0;
__u64 cno; __u64 cno;
...@@ -819,6 +814,11 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, ...@@ -819,6 +814,11 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
/* Calculate range of segment */ /* Calculate range of segment */
nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
/* Read ahead segment */
b = seg_start;
while (b <= seg_end)
sb_breadahead(sbi->s_super, b++);
for (;;) { for (;;) {
/* Load segment summary */ /* Load segment summary */
ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
...@@ -841,14 +841,20 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, ...@@ -841,14 +841,20 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
ri->ri_nextnum = nextnum; ri->ri_nextnum = nextnum;
empty_seg = 0; empty_seg = 0;
if (!NILFS_SEG_HAS_SR(&ssi) && !scan_newer) {
/* This will never happen because a superblock
(last_segment) always points to a pseg
having a super root. */
ret = NILFS_SEG_FAIL_CONSISTENCY;
goto failed;
}
if (pseg_start == seg_start) {
nilfs_get_segment_range(nilfs, nextnum, &b, &end);
while (b <= end)
sb_breadahead(sbi->s_super, b++);
}
if (!NILFS_SEG_HAS_SR(&ssi)) { if (!NILFS_SEG_HAS_SR(&ssi)) {
if (!scan_newer) {
/* This will never happen because a superblock
(last_segment) always points to a pseg
having a super root. */
ret = NILFS_SEG_FAIL_CONSISTENCY;
goto failed;
}
if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) { if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) {
ri->ri_lsegs_start = pseg_start; ri->ri_lsegs_start = pseg_start;
ri->ri_lsegs_start_seq = seg_seq; ri->ri_lsegs_start_seq = seg_seq;
...@@ -919,7 +925,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, ...@@ -919,7 +925,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
super_root_found: super_root_found:
/* Updating pointers relating to the latest checkpoint */ /* Updating pointers relating to the latest checkpoint */
list_splice(&segments, ri->ri_used_segments.prev); list_splice_tail(&segments, &ri->ri_used_segments);
nilfs->ns_last_pseg = sr_pseg_start; nilfs->ns_last_pseg = sr_pseg_start;
nilfs->ns_last_seq = nilfs->ns_seg_seq; nilfs->ns_last_seq = nilfs->ns_seg_seq;
nilfs->ns_last_cno = ri->ri_cno; nilfs->ns_last_cno = ri->ri_cno;
......
...@@ -24,10 +24,22 @@ ...@@ -24,10 +24,22 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/backing-dev.h>
#include "page.h" #include "page.h"
#include "segbuf.h" #include "segbuf.h"
struct nilfs_write_info {
struct the_nilfs *nilfs;
struct bio *bio;
int start, end; /* The region to be submitted */
int rest_blocks;
int max_pages;
int nr_vecs;
sector_t blocknr;
};
static struct kmem_cache *nilfs_segbuf_cachep; static struct kmem_cache *nilfs_segbuf_cachep;
static void nilfs_segbuf_init_once(void *obj) static void nilfs_segbuf_init_once(void *obj)
...@@ -63,6 +75,11 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb) ...@@ -63,6 +75,11 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
INIT_LIST_HEAD(&segbuf->sb_list); INIT_LIST_HEAD(&segbuf->sb_list);
INIT_LIST_HEAD(&segbuf->sb_segsum_buffers); INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
INIT_LIST_HEAD(&segbuf->sb_payload_buffers); INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
init_completion(&segbuf->sb_bio_event);
atomic_set(&segbuf->sb_err, 0);
segbuf->sb_nbio = 0;
return segbuf; return segbuf;
} }
...@@ -83,6 +100,22 @@ void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum, ...@@ -83,6 +100,22 @@ void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
} }
/**
* nilfs_segbuf_map_cont - map a new log behind a given log
* @segbuf: new segment buffer
* @prev: segment buffer containing a log to be continued
*/
void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
struct nilfs_segment_buffer *prev)
{
segbuf->sb_segnum = prev->sb_segnum;
segbuf->sb_fseg_start = prev->sb_fseg_start;
segbuf->sb_fseg_end = prev->sb_fseg_end;
segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
segbuf->sb_rest_blocks =
segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
}
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf, void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
__u64 nextnum, struct the_nilfs *nilfs) __u64 nextnum, struct the_nilfs *nilfs)
{ {
...@@ -132,8 +165,6 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags, ...@@ -132,8 +165,6 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary); segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0; segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
segbuf->sb_sum.ctime = ctime; segbuf->sb_sum.ctime = ctime;
segbuf->sb_io_error = 0;
return 0; return 0;
} }
...@@ -219,7 +250,7 @@ void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, ...@@ -219,7 +250,7 @@ void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
raw_sum->ss_datasum = cpu_to_le32(crc); raw_sum->ss_datasum = cpu_to_le32(crc);
} }
void nilfs_release_buffers(struct list_head *list) static void nilfs_release_buffers(struct list_head *list)
{ {
struct buffer_head *bh, *n; struct buffer_head *bh, *n;
...@@ -241,13 +272,56 @@ void nilfs_release_buffers(struct list_head *list) ...@@ -241,13 +272,56 @@ void nilfs_release_buffers(struct list_head *list)
} }
} }
static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
{
nilfs_release_buffers(&segbuf->sb_segsum_buffers);
nilfs_release_buffers(&segbuf->sb_payload_buffers);
}
/*
* Iterators for segment buffers
*/
void nilfs_clear_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
list_for_each_entry(segbuf, logs, sb_list)
nilfs_segbuf_clear(segbuf);
}
void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last)
{
struct nilfs_segment_buffer *n, *segbuf;
segbuf = list_prepare_entry(last, logs, sb_list);
list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
list_del_init(&segbuf->sb_list);
nilfs_segbuf_clear(segbuf);
nilfs_segbuf_free(segbuf);
}
}
int nilfs_wait_on_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
int err;
list_for_each_entry(segbuf, logs, sb_list) {
err = nilfs_segbuf_wait(segbuf);
if (err)
return err;
}
return 0;
}
/* /*
* BIO operations * BIO operations
*/ */
static void nilfs_end_bio_write(struct bio *bio, int err) static void nilfs_end_bio_write(struct bio *bio, int err)
{ {
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct nilfs_write_info *wi = bio->bi_private; struct nilfs_segment_buffer *segbuf = bio->bi_private;
if (err == -EOPNOTSUPP) { if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
...@@ -256,21 +330,22 @@ static void nilfs_end_bio_write(struct bio *bio, int err) ...@@ -256,21 +330,22 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
} }
if (!uptodate) if (!uptodate)
atomic_inc(&wi->err); atomic_inc(&segbuf->sb_err);
bio_put(bio); bio_put(bio);
complete(&wi->bio_event); complete(&segbuf->sb_bio_event);
} }
static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode) static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi, int mode)
{ {
struct bio *bio = wi->bio; struct bio *bio = wi->bio;
int err; int err;
if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) { if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
wait_for_completion(&wi->bio_event); wait_for_completion(&segbuf->sb_bio_event);
wi->nbio--; segbuf->sb_nbio--;
if (unlikely(atomic_read(&wi->err))) { if (unlikely(atomic_read(&segbuf->sb_err))) {
bio_put(bio); bio_put(bio);
err = -EIO; err = -EIO;
goto failed; goto failed;
...@@ -278,7 +353,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode) ...@@ -278,7 +353,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
} }
bio->bi_end_io = nilfs_end_bio_write; bio->bi_end_io = nilfs_end_bio_write;
bio->bi_private = wi; bio->bi_private = segbuf;
bio_get(bio); bio_get(bio);
submit_bio(mode, bio); submit_bio(mode, bio);
if (bio_flagged(bio, BIO_EOPNOTSUPP)) { if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
...@@ -286,7 +361,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode) ...@@ -286,7 +361,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto failed; goto failed;
} }
wi->nbio++; segbuf->sb_nbio++;
bio_put(bio); bio_put(bio);
wi->bio = NULL; wi->bio = NULL;
...@@ -301,17 +376,15 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode) ...@@ -301,17 +376,15 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
} }
/** /**
* nilfs_alloc_seg_bio - allocate a bio for writing segment. * nilfs_alloc_seg_bio - allocate a new bio for writing log
* @sb: super block * @nilfs: nilfs object
* @start: beginning disk block number of this BIO. * @start: start block number of the bio
* @nr_vecs: request size of page vector. * @nr_vecs: request size of page vector.
* *
* alloc_seg_bio() allocates a new BIO structure and initialize it.
*
* Return Value: On success, pointer to the struct bio is returned. * Return Value: On success, pointer to the struct bio is returned.
* On error, NULL is returned. * On error, NULL is returned.
*/ */
static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start, static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
int nr_vecs) int nr_vecs)
{ {
struct bio *bio; struct bio *bio;
...@@ -322,36 +395,33 @@ static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start, ...@@ -322,36 +395,33 @@ static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
bio = bio_alloc(GFP_NOIO, nr_vecs); bio = bio_alloc(GFP_NOIO, nr_vecs);
} }
if (likely(bio)) { if (likely(bio)) {
bio->bi_bdev = sb->s_bdev; bio->bi_bdev = nilfs->ns_bdev;
bio->bi_sector = (sector_t)start << (sb->s_blocksize_bits - 9); bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
} }
return bio; return bio;
} }
void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf, static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi) struct nilfs_write_info *wi)
{ {
wi->bio = NULL; wi->bio = NULL;
wi->rest_blocks = segbuf->sb_sum.nblocks; wi->rest_blocks = segbuf->sb_sum.nblocks;
wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev); wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
wi->start = wi->end = 0; wi->start = wi->end = 0;
wi->nbio = 0;
wi->blocknr = segbuf->sb_pseg_start; wi->blocknr = segbuf->sb_pseg_start;
atomic_set(&wi->err, 0);
init_completion(&wi->bio_event);
} }
static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh, static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
int mode) struct nilfs_write_info *wi,
struct buffer_head *bh, int mode)
{ {
int len, err; int len, err;
BUG_ON(wi->nr_vecs <= 0); BUG_ON(wi->nr_vecs <= 0);
repeat: repeat:
if (!wi->bio) { if (!wi->bio) {
wi->bio = nilfs_alloc_seg_bio(wi->sb, wi->blocknr + wi->end, wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
wi->nr_vecs); wi->nr_vecs);
if (unlikely(!wi->bio)) if (unlikely(!wi->bio))
return -ENOMEM; return -ENOMEM;
...@@ -363,76 +433,83 @@ static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh, ...@@ -363,76 +433,83 @@ static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
return 0; return 0;
} }
/* bio is FULL */ /* bio is FULL */
err = nilfs_submit_seg_bio(wi, mode); err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
/* never submit current bh */ /* never submit current bh */
if (likely(!err)) if (likely(!err))
goto repeat; goto repeat;
return err; return err;
} }
/**
* nilfs_segbuf_write - submit write requests of a log
* @segbuf: buffer storing a log to be written
* @nilfs: nilfs object
*
* Return Value: On Success, 0 is returned. On Error, one of the following
* negative error code is returned.
*
* %-EIO - I/O error
*
* %-ENOMEM - Insufficient memory available.
*/
int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct nilfs_write_info *wi) struct the_nilfs *nilfs)
{ {
struct nilfs_write_info wi;
struct buffer_head *bh; struct buffer_head *bh;
int res, rw = WRITE; int res = 0, rw = WRITE;
wi.nilfs = nilfs;
nilfs_segbuf_prepare_write(segbuf, &wi);
list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
res = nilfs_submit_bh(wi, bh, rw); res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
if (unlikely(res)) if (unlikely(res))
goto failed_bio; goto failed_bio;
} }
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
res = nilfs_submit_bh(wi, bh, rw); res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
if (unlikely(res)) if (unlikely(res))
goto failed_bio; goto failed_bio;
} }
if (wi->bio) { if (wi.bio) {
/* /*
* Last BIO is always sent through the following * Last BIO is always sent through the following
* submission. * submission.
*/ */
rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
res = nilfs_submit_seg_bio(wi, rw); res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
if (unlikely(res))
goto failed_bio;
} }
res = 0;
out:
return res;
failed_bio: failed_bio:
atomic_inc(&wi->err); return res;
goto out;
} }
/** /**
* nilfs_segbuf_wait - wait for completion of requested BIOs * nilfs_segbuf_wait - wait for completion of requested BIOs
* @wi: nilfs_write_info * @segbuf: segment buffer
* *
* Return Value: On Success, 0 is returned. On Error, one of the following * Return Value: On Success, 0 is returned. On Error, one of the following
* negative error code is returned. * negative error code is returned.
* *
* %-EIO - I/O error * %-EIO - I/O error
*/ */
int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf, int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
struct nilfs_write_info *wi)
{ {
int err = 0; int err = 0;
if (!wi->nbio) if (!segbuf->sb_nbio)
return 0; return 0;
do { do {
wait_for_completion(&wi->bio_event); wait_for_completion(&segbuf->sb_bio_event);
} while (--wi->nbio > 0); } while (--segbuf->sb_nbio > 0);
if (unlikely(atomic_read(&wi->err) > 0)) { if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
printk(KERN_ERR "NILFS: IO error writing segment\n"); printk(KERN_ERR "NILFS: IO error writing segment\n");
err = -EIO; err = -EIO;
segbuf->sb_io_error = 1;
} }
return err; return err;
} }
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/backing-dev.h>
/** /**
* struct nilfs_segsum_info - On-memory segment summary * struct nilfs_segsum_info - On-memory segment summary
...@@ -77,7 +76,9 @@ struct nilfs_segsum_info { ...@@ -77,7 +76,9 @@ struct nilfs_segsum_info {
* @sb_rest_blocks: Number of residual blocks in the current segment * @sb_rest_blocks: Number of residual blocks in the current segment
* @sb_segsum_buffers: List of buffers for segment summaries * @sb_segsum_buffers: List of buffers for segment summaries
* @sb_payload_buffers: List of buffers for segment payload * @sb_payload_buffers: List of buffers for segment payload
* @sb_io_error: I/O error status * @sb_nbio: Number of flying bio requests
* @sb_err: I/O error status
* @sb_bio_event: Completion event of log writing
*/ */
struct nilfs_segment_buffer { struct nilfs_segment_buffer {
struct super_block *sb_super; struct super_block *sb_super;
...@@ -96,7 +97,9 @@ struct nilfs_segment_buffer { ...@@ -96,7 +97,9 @@ struct nilfs_segment_buffer {
struct list_head sb_payload_buffers; /* including super root */ struct list_head sb_payload_buffers; /* including super root */
/* io status */ /* io status */
int sb_io_error; int sb_nbio;
atomic_t sb_err;
struct completion sb_bio_event;
}; };
#define NILFS_LIST_SEGBUF(head) \ #define NILFS_LIST_SEGBUF(head) \
...@@ -125,6 +128,8 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *); ...@@ -125,6 +128,8 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *);
void nilfs_segbuf_free(struct nilfs_segment_buffer *); void nilfs_segbuf_free(struct nilfs_segment_buffer *);
void nilfs_segbuf_map(struct nilfs_segment_buffer *, __u64, unsigned long, void nilfs_segbuf_map(struct nilfs_segment_buffer *, __u64, unsigned long,
struct the_nilfs *); struct the_nilfs *);
void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
struct nilfs_segment_buffer *prev);
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64, void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
struct the_nilfs *); struct the_nilfs *);
int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t); int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t);
...@@ -161,41 +166,18 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf, ...@@ -161,41 +166,18 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf,
segbuf->sb_sum.nfileblk++; segbuf->sb_sum.nfileblk++;
} }
void nilfs_release_buffers(struct list_head *); int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
struct the_nilfs *nilfs);
int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
static inline void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf) void nilfs_clear_logs(struct list_head *logs);
void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last);
int nilfs_wait_on_logs(struct list_head *logs);
static inline void nilfs_destroy_logs(struct list_head *logs)
{ {
nilfs_release_buffers(&segbuf->sb_segsum_buffers); nilfs_truncate_logs(logs, NULL);
nilfs_release_buffers(&segbuf->sb_payload_buffers);
} }
struct nilfs_write_info {
struct bio *bio;
int start, end; /* The region to be submitted */
int rest_blocks;
int max_pages;
int nr_vecs;
sector_t blocknr;
int nbio;
atomic_t err;
struct completion bio_event;
/* completion event of segment write */
/*
* The following fields must be set explicitly
*/
struct super_block *sb;
struct backing_dev_info *bdi; /* backing dev info */
struct buffer_head *bh_sr;
};
void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *,
struct nilfs_write_info *);
int nilfs_segbuf_write(struct nilfs_segment_buffer *,
struct nilfs_write_info *);
int nilfs_segbuf_wait(struct nilfs_segment_buffer *,
struct nilfs_write_info *);
#endif /* _NILFS_SEGBUF_H */ #endif /* _NILFS_SEGBUF_H */
This diff is collapsed.
...@@ -97,6 +97,7 @@ struct nilfs_segsum_pointer { ...@@ -97,6 +97,7 @@ struct nilfs_segsum_pointer {
* @sc_dsync_start: start byte offset of data pages * @sc_dsync_start: start byte offset of data pages
* @sc_dsync_end: end byte offset of data pages (inclusive) * @sc_dsync_end: end byte offset of data pages (inclusive)
* @sc_segbufs: List of segment buffers * @sc_segbufs: List of segment buffers
* @sc_write_logs: List of segment buffers to hold logs under writing
* @sc_segbuf_nblocks: Number of available blocks in segment buffers. * @sc_segbuf_nblocks: Number of available blocks in segment buffers.
* @sc_curseg: Current segment buffer * @sc_curseg: Current segment buffer
* @sc_super_root: Pointer to the super root buffer * @sc_super_root: Pointer to the super root buffer
...@@ -143,6 +144,7 @@ struct nilfs_sc_info { ...@@ -143,6 +144,7 @@ struct nilfs_sc_info {
/* Segment buffers */ /* Segment buffers */
struct list_head sc_segbufs; struct list_head sc_segbufs;
struct list_head sc_write_logs;
unsigned long sc_segbuf_nblocks; unsigned long sc_segbuf_nblocks;
struct nilfs_segment_buffer *sc_curseg; struct nilfs_segment_buffer *sc_curseg;
struct buffer_head *sc_super_root; struct buffer_head *sc_super_root;
......
...@@ -31,6 +31,16 @@ ...@@ -31,6 +31,16 @@
#include "sufile.h" #include "sufile.h"
struct nilfs_sufile_info {
struct nilfs_mdt_info mi;
unsigned long ncleansegs;
};
static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
{
return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
}
static inline unsigned long static inline unsigned long
nilfs_sufile_segment_usages_per_block(const struct inode *sufile) nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
{ {
...@@ -62,14 +72,6 @@ nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, ...@@ -62,14 +72,6 @@ nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
max - curr + 1); max - curr + 1);
} }
static inline struct nilfs_sufile_header *
nilfs_sufile_block_get_header(const struct inode *sufile,
struct buffer_head *bh,
void *kaddr)
{
return kaddr + bh_offset(bh);
}
static struct nilfs_segment_usage * static struct nilfs_segment_usage *
nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
struct buffer_head *bh, void *kaddr) struct buffer_head *bh, void *kaddr)
...@@ -109,6 +111,15 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, ...@@ -109,6 +111,15 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
nilfs_mdt_mark_buffer_dirty(header_bh); nilfs_mdt_mark_buffer_dirty(header_bh);
} }
/**
* nilfs_sufile_get_ncleansegs - return the number of clean segments
* @sufile: inode of segment usage file
*/
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
{
return NILFS_SUI(sufile)->ncleansegs;
}
/** /**
* nilfs_sufile_updatev - modify multiple segment usages at a time * nilfs_sufile_updatev - modify multiple segment usages at a time
* @sufile: inode of segment usage file * @sufile: inode of segment usage file
...@@ -270,7 +281,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -270,7 +281,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
if (ret < 0) if (ret < 0)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); header = kaddr + bh_offset(header_bh);
ncleansegs = le64_to_cpu(header->sh_ncleansegs); ncleansegs = le64_to_cpu(header->sh_ncleansegs);
last_alloc = le64_to_cpu(header->sh_last_alloc); last_alloc = le64_to_cpu(header->sh_last_alloc);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
...@@ -302,13 +313,13 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -302,13 +313,13 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
header = nilfs_sufile_block_get_header( header = kaddr + bh_offset(header_bh);
sufile, header_bh, kaddr);
le64_add_cpu(&header->sh_ncleansegs, -1); le64_add_cpu(&header->sh_ncleansegs, -1);
le64_add_cpu(&header->sh_ndirtysegs, 1); le64_add_cpu(&header->sh_ndirtysegs, 1);
header->sh_last_alloc = cpu_to_le64(segnum); header->sh_last_alloc = cpu_to_le64(segnum);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
NILFS_SUI(sufile)->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(header_bh); nilfs_mdt_mark_buffer_dirty(header_bh);
nilfs_mdt_mark_buffer_dirty(su_bh); nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
...@@ -351,6 +362,8 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, ...@@ -351,6 +362,8 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_sufile_mod_counter(header_bh, -1, 1); nilfs_sufile_mod_counter(header_bh, -1, 1);
NILFS_SUI(sufile)->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(su_bh); nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -380,6 +393,8 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, ...@@ -380,6 +393,8 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
NILFS_SUI(sufile)->ncleansegs -= clean;
nilfs_mdt_mark_buffer_dirty(su_bh); nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -409,79 +424,65 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, ...@@ -409,79 +424,65 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
nilfs_mdt_mark_buffer_dirty(su_bh); nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
NILFS_SUI(sufile)->ncleansegs++;
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
/** /**
* nilfs_sufile_get_segment_usage - get a segment usage * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
* @sufile: inode of segment usage file * @sufile: inode of segment usage file
* @segnum: segment number * @segnum: segment number
* @sup: pointer to segment usage
* @bhp: pointer to buffer head
*
* Description: nilfs_sufile_get_segment_usage() acquires the segment usage
* specified by @segnum.
*
* Return Value: On success, 0 is returned, and the segment usage and the
* buffer head of the buffer on which the segment usage is located are stored
* in the place pointed by @sup and @bhp, respectively. On error, one of the
* following negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-EINVAL - Invalid segment usage number.
*/ */
int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum, int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
struct nilfs_segment_usage **sup,
struct buffer_head **bhp)
{ {
struct buffer_head *bh; struct buffer_head *bh;
struct nilfs_segment_usage *su;
void *kaddr;
int ret; int ret;
/* segnum is 0 origin */ ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
if (segnum >= nilfs_sufile_get_nsegments(sufile)) if (!ret) {
return -EINVAL; nilfs_mdt_mark_buffer_dirty(bh);
down_write(&NILFS_MDT(sufile)->mi_sem); nilfs_mdt_mark_dirty(sufile);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
if (ret < 0)
goto out_sem;
kaddr = kmap(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
if (nilfs_segment_usage_error(su)) {
kunmap(bh->b_page);
brelse(bh); brelse(bh);
ret = -EINVAL;
goto out_sem;
} }
if (sup != NULL)
*sup = su;
*bhp = bh;
out_sem:
up_write(&NILFS_MDT(sufile)->mi_sem);
return ret; return ret;
} }
/** /**
* nilfs_sufile_put_segment_usage - put a segment usage * nilfs_sufile_set_segment_usage - set usage of a segment
* @sufile: inode of segment usage file * @sufile: inode of segment usage file
* @segnum: segment number * @segnum: segment number
* @bh: buffer head * @nblocks: number of live blocks in the segment
* * @modtime: modification time (option)
* Description: nilfs_sufile_put_segment_usage() releases the segment usage
* specified by @segnum. @bh must be the buffer head which have been returned
* by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
*/ */
void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum, int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
struct buffer_head *bh) unsigned long nblocks, time_t modtime)
{ {
kunmap(bh->b_page); struct buffer_head *bh;
struct nilfs_segment_usage *su;
void *kaddr;
int ret;
down_write(&NILFS_MDT(sufile)->mi_sem);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
if (ret < 0)
goto out_sem;
kaddr = kmap_atomic(bh->b_page, KM_USER0);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
WARN_ON(nilfs_segment_usage_error(su));
if (modtime)
su->su_lastmod = cpu_to_le64(modtime);
su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr, KM_USER0);
nilfs_mdt_mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile);
brelse(bh); brelse(bh);
out_sem:
up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
} }
/** /**
...@@ -515,7 +516,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) ...@@ -515,7 +516,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); header = kaddr + bh_offset(header_bh);
sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
...@@ -532,33 +533,6 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) ...@@ -532,33 +533,6 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
return ret; return ret;
} }
/**
* nilfs_sufile_get_ncleansegs - get the number of clean segments
* @sufile: inode of segment usage file
* @nsegsp: pointer to the number of clean segments
*
* Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
* segments.
*
* Return Value: On success, 0 is returned and the number of clean segments is
* stored in the place pointed by @nsegsp. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*/
int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
{
struct nilfs_sustat sustat;
int ret;
ret = nilfs_sufile_get_stat(sufile, &sustat);
if (ret == 0)
*nsegsp = sustat.ss_ncleansegs;
return ret;
}
void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
struct buffer_head *header_bh, struct buffer_head *header_bh,
struct buffer_head *su_bh) struct buffer_head *su_bh)
...@@ -577,8 +551,10 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, ...@@ -577,8 +551,10 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
nilfs_segment_usage_set_error(su); nilfs_segment_usage_set_error(su);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
if (suclean) if (suclean) {
nilfs_sufile_mod_counter(header_bh, -1, 0); nilfs_sufile_mod_counter(header_bh, -1, 0);
NILFS_SUI(sufile)->ncleansegs--;
}
nilfs_mdt_mark_buffer_dirty(su_bh); nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
} }
...@@ -657,3 +633,48 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, ...@@ -657,3 +633,48 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
up_read(&NILFS_MDT(sufile)->mi_sem); up_read(&NILFS_MDT(sufile)->mi_sem);
return ret; return ret;
} }
/**
* nilfs_sufile_read - read sufile inode
* @sufile: sufile inode
* @raw_inode: on-disk sufile inode
*/
int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
{
struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
struct buffer_head *header_bh;
struct nilfs_sufile_header *header;
void *kaddr;
int ret;
ret = nilfs_read_inode_common(sufile, raw_inode);
if (ret < 0)
return ret;
ret = nilfs_sufile_get_header_block(sufile, &header_bh);
if (!ret) {
kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
header = kaddr + bh_offset(header_bh);
sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
kunmap_atomic(kaddr, KM_USER0);
brelse(header_bh);
}
return ret;
}
/**
* nilfs_sufile_new - create sufile
* @nilfs: nilfs object
* @susize: size of a segment usage entry
*/
struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
{
struct inode *sufile;
sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO,
sizeof(struct nilfs_sufile_info));
if (sufile)
nilfs_mdt_set_entry_size(sufile, susize,
sizeof(struct nilfs_sufile_header));
return sufile;
}
...@@ -34,14 +34,13 @@ static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile) ...@@ -34,14 +34,13 @@ static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments; return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments;
} }
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
int nilfs_sufile_alloc(struct inode *, __u64 *); int nilfs_sufile_alloc(struct inode *, __u64 *);
int nilfs_sufile_get_segment_usage(struct inode *, __u64, int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
struct nilfs_segment_usage **, int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
struct buffer_head **); unsigned long nblocks, time_t modtime);
void nilfs_sufile_put_segment_usage(struct inode *, __u64,
struct buffer_head *);
int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *);
ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned, ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned,
size_t); size_t);
...@@ -62,6 +61,9 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *, ...@@ -62,6 +61,9 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *, void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
struct buffer_head *); struct buffer_head *);
int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode);
struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize);
/** /**
* nilfs_sufile_scrap - make a segment garbage * nilfs_sufile_scrap - make a segment garbage
* @sufile: inode of segment usage file * @sufile: inode of segment usage file
......
...@@ -363,14 +363,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno) ...@@ -363,14 +363,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
list_add(&sbi->s_list, &nilfs->ns_supers); list_add(&sbi->s_list, &nilfs->ns_supers);
up_write(&nilfs->ns_super_sem); up_write(&nilfs->ns_super_sem);
sbi->s_ifile = nilfs_mdt_new(nilfs, sbi->s_super, NILFS_IFILE_INO); sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
if (!sbi->s_ifile) if (!sbi->s_ifile)
return -ENOMEM; return -ENOMEM;
err = nilfs_palloc_init_blockgroup(sbi->s_ifile, nilfs->ns_inode_size);
if (unlikely(err))
goto failed;
down_read(&nilfs->ns_segctor_sem); down_read(&nilfs->ns_segctor_sem);
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
&bh_cp); &bh_cp);
...@@ -411,7 +407,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi) ...@@ -411,7 +407,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi)
{ {
struct the_nilfs *nilfs = sbi->s_nilfs; struct the_nilfs *nilfs = sbi->s_nilfs;
nilfs_mdt_clear(sbi->s_ifile);
nilfs_mdt_destroy(sbi->s_ifile); nilfs_mdt_destroy(sbi->s_ifile);
sbi->s_ifile = NULL; sbi->s_ifile = NULL;
down_write(&nilfs->ns_super_sem); down_write(&nilfs->ns_super_sem);
...@@ -419,22 +414,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi) ...@@ -419,22 +414,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi)
up_write(&nilfs->ns_super_sem); up_write(&nilfs->ns_super_sem);
} }
static int nilfs_mark_recovery_complete(struct nilfs_sb_info *sbi)
{
struct the_nilfs *nilfs = sbi->s_nilfs;
int err = 0;
down_write(&nilfs->ns_sem);
if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) {
nilfs->ns_mount_state |= NILFS_VALID_FS;
err = nilfs_commit_super(sbi, 1);
if (likely(!err))
printk(KERN_INFO "NILFS: recovery complete.\n");
}
up_write(&nilfs->ns_sem);
return err;
}
static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{ {
struct super_block *sb = dentry->d_sb; struct super_block *sb = dentry->d_sb;
...@@ -490,7 +469,7 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs) ...@@ -490,7 +469,7 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
struct nilfs_sb_info *sbi = NILFS_SB(sb); struct nilfs_sb_info *sbi = NILFS_SB(sb);
if (!nilfs_test_opt(sbi, BARRIER)) if (!nilfs_test_opt(sbi, BARRIER))
seq_printf(seq, ",barrier=off"); seq_printf(seq, ",nobarrier");
if (nilfs_test_opt(sbi, SNAPSHOT)) if (nilfs_test_opt(sbi, SNAPSHOT))
seq_printf(seq, ",cp=%llu", seq_printf(seq, ",cp=%llu",
(unsigned long long int)sbi->s_snapshot_cno); (unsigned long long int)sbi->s_snapshot_cno);
...@@ -500,6 +479,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs) ...@@ -500,6 +479,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",errors=panic"); seq_printf(seq, ",errors=panic");
if (nilfs_test_opt(sbi, STRICT_ORDER)) if (nilfs_test_opt(sbi, STRICT_ORDER))
seq_printf(seq, ",order=strict"); seq_printf(seq, ",order=strict");
if (nilfs_test_opt(sbi, NORECOVERY))
seq_printf(seq, ",norecovery");
return 0; return 0;
} }
...@@ -568,7 +549,7 @@ static const struct export_operations nilfs_export_ops = { ...@@ -568,7 +549,7 @@ static const struct export_operations nilfs_export_ops = {
enum { enum {
Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_barrier, Opt_snapshot, Opt_order, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
Opt_err, Opt_err,
}; };
...@@ -576,25 +557,13 @@ static match_table_t tokens = { ...@@ -576,25 +557,13 @@ static match_table_t tokens = {
{Opt_err_cont, "errors=continue"}, {Opt_err_cont, "errors=continue"},
{Opt_err_panic, "errors=panic"}, {Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"}, {Opt_err_ro, "errors=remount-ro"},
{Opt_barrier, "barrier=%s"}, {Opt_nobarrier, "nobarrier"},
{Opt_snapshot, "cp=%u"}, {Opt_snapshot, "cp=%u"},
{Opt_order, "order=%s"}, {Opt_order, "order=%s"},
{Opt_norecovery, "norecovery"},
{Opt_err, NULL} {Opt_err, NULL}
}; };
static int match_bool(substring_t *s, int *result)
{
int len = s->to - s->from;
if (strncmp(s->from, "on", len) == 0)
*result = 1;
else if (strncmp(s->from, "off", len) == 0)
*result = 0;
else
return 1;
return 0;
}
static int parse_options(char *options, struct super_block *sb) static int parse_options(char *options, struct super_block *sb)
{ {
struct nilfs_sb_info *sbi = NILFS_SB(sb); struct nilfs_sb_info *sbi = NILFS_SB(sb);
...@@ -612,13 +581,8 @@ static int parse_options(char *options, struct super_block *sb) ...@@ -612,13 +581,8 @@ static int parse_options(char *options, struct super_block *sb)
token = match_token(p, tokens, args); token = match_token(p, tokens, args);
switch (token) { switch (token) {
case Opt_barrier: case Opt_nobarrier:
if (match_bool(&args[0], &option)) nilfs_clear_opt(sbi, BARRIER);
return 0;
if (option)
nilfs_set_opt(sbi, BARRIER);
else
nilfs_clear_opt(sbi, BARRIER);
break; break;
case Opt_order: case Opt_order:
if (strcmp(args[0].from, "relaxed") == 0) if (strcmp(args[0].from, "relaxed") == 0)
...@@ -647,6 +611,9 @@ static int parse_options(char *options, struct super_block *sb) ...@@ -647,6 +611,9 @@ static int parse_options(char *options, struct super_block *sb)
sbi->s_snapshot_cno = option; sbi->s_snapshot_cno = option;
nilfs_set_opt(sbi, SNAPSHOT); nilfs_set_opt(sbi, SNAPSHOT);
break; break;
case Opt_norecovery:
nilfs_set_opt(sbi, NORECOVERY);
break;
default: default:
printk(KERN_ERR printk(KERN_ERR
"NILFS: Unrecognized mount option \"%s\"\n", p); "NILFS: Unrecognized mount option \"%s\"\n", p);
...@@ -672,9 +639,7 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi) ...@@ -672,9 +639,7 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi)
int mnt_count = le16_to_cpu(sbp->s_mnt_count); int mnt_count = le16_to_cpu(sbp->s_mnt_count);
/* nilfs->sem must be locked by the caller. */ /* nilfs->sem must be locked by the caller. */
if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) { if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
} else if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
printk(KERN_WARNING printk(KERN_WARNING
"NILFS warning: mounting fs with errors\n"); "NILFS warning: mounting fs with errors\n");
#if 0 #if 0
...@@ -782,11 +747,10 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent, ...@@ -782,11 +747,10 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
sb->s_root = NULL; sb->s_root = NULL;
sb->s_time_gran = 1; sb->s_time_gran = 1;
if (!nilfs_loaded(nilfs)) { err = load_nilfs(nilfs, sbi);
err = load_nilfs(nilfs, sbi); if (err)
if (err) goto failed_sbi;
goto failed_sbi;
}
cno = nilfs_last_cno(nilfs); cno = nilfs_last_cno(nilfs);
if (sb->s_flags & MS_RDONLY) { if (sb->s_flags & MS_RDONLY) {
...@@ -854,12 +818,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent, ...@@ -854,12 +818,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
up_write(&nilfs->ns_sem); up_write(&nilfs->ns_sem);
} }
err = nilfs_mark_recovery_complete(sbi);
if (unlikely(err)) {
printk(KERN_ERR "NILFS: recovery failed.\n");
goto failed_root;
}
down_write(&nilfs->ns_super_sem); down_write(&nilfs->ns_super_sem);
if (!nilfs_test_opt(sbi, SNAPSHOT)) if (!nilfs_test_opt(sbi, SNAPSHOT))
nilfs->ns_current = sbi; nilfs->ns_current = sbi;
...@@ -867,10 +825,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent, ...@@ -867,10 +825,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
return 0; return 0;
failed_root:
dput(sb->s_root);
sb->s_root = NULL;
failed_segctor: failed_segctor:
nilfs_detach_segment_constructor(sbi); nilfs_detach_segment_constructor(sbi);
...@@ -915,6 +869,14 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) ...@@ -915,6 +869,14 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts; goto restore_opts;
} }
if (!nilfs_valid_fs(nilfs)) {
printk(KERN_WARNING "NILFS (device %s): couldn't "
"remount because the filesystem is in an "
"incomplete recovery state.\n", sb->s_id);
err = -EINVAL;
goto restore_opts;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
goto out; goto out;
if (*flags & MS_RDONLY) { if (*flags & MS_RDONLY) {
......
...@@ -146,13 +146,9 @@ void put_nilfs(struct the_nilfs *nilfs) ...@@ -146,13 +146,9 @@ void put_nilfs(struct the_nilfs *nilfs)
might_sleep(); might_sleep();
if (nilfs_loaded(nilfs)) { if (nilfs_loaded(nilfs)) {
nilfs_mdt_clear(nilfs->ns_sufile);
nilfs_mdt_destroy(nilfs->ns_sufile); nilfs_mdt_destroy(nilfs->ns_sufile);
nilfs_mdt_clear(nilfs->ns_cpfile);
nilfs_mdt_destroy(nilfs->ns_cpfile); nilfs_mdt_destroy(nilfs->ns_cpfile);
nilfs_mdt_clear(nilfs->ns_dat);
nilfs_mdt_destroy(nilfs->ns_dat); nilfs_mdt_destroy(nilfs->ns_dat);
/* XXX: how and when to clear nilfs->ns_gc_dat? */
nilfs_mdt_destroy(nilfs->ns_gc_dat); nilfs_mdt_destroy(nilfs->ns_gc_dat);
} }
if (nilfs_init(nilfs)) { if (nilfs_init(nilfs)) {
...@@ -166,7 +162,6 @@ void put_nilfs(struct the_nilfs *nilfs) ...@@ -166,7 +162,6 @@ void put_nilfs(struct the_nilfs *nilfs)
static int nilfs_load_super_root(struct the_nilfs *nilfs, static int nilfs_load_super_root(struct the_nilfs *nilfs,
struct nilfs_sb_info *sbi, sector_t sr_block) struct nilfs_sb_info *sbi, sector_t sr_block)
{ {
static struct lock_class_key dat_lock_key;
struct buffer_head *bh_sr; struct buffer_head *bh_sr;
struct nilfs_super_root *raw_sr; struct nilfs_super_root *raw_sr;
struct nilfs_super_block **sbp = nilfs->ns_sbp; struct nilfs_super_block **sbp = nilfs->ns_sbp;
...@@ -187,51 +182,36 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs, ...@@ -187,51 +182,36 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs,
inode_size = nilfs->ns_inode_size; inode_size = nilfs->ns_inode_size;
err = -ENOMEM; err = -ENOMEM;
nilfs->ns_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO); nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size);
if (unlikely(!nilfs->ns_dat)) if (unlikely(!nilfs->ns_dat))
goto failed; goto failed;
nilfs->ns_gc_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO); nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size);
if (unlikely(!nilfs->ns_gc_dat)) if (unlikely(!nilfs->ns_gc_dat))
goto failed_dat; goto failed_dat;
nilfs->ns_cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO); nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size);
if (unlikely(!nilfs->ns_cpfile)) if (unlikely(!nilfs->ns_cpfile))
goto failed_gc_dat; goto failed_gc_dat;
nilfs->ns_sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO); nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size);
if (unlikely(!nilfs->ns_sufile)) if (unlikely(!nilfs->ns_sufile))
goto failed_cpfile; goto failed_cpfile;
err = nilfs_palloc_init_blockgroup(nilfs->ns_dat, dat_entry_size);
if (unlikely(err))
goto failed_sufile;
err = nilfs_palloc_init_blockgroup(nilfs->ns_gc_dat, dat_entry_size);
if (unlikely(err))
goto failed_sufile;
lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key);
lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key);
nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size,
sizeof(struct nilfs_cpfile_header));
nilfs_mdt_set_entry_size(nilfs->ns_sufile, segment_usage_size,
sizeof(struct nilfs_sufile_header));
err = nilfs_mdt_read_inode_direct( err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data +
nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size)); NILFS_SR_DAT_OFFSET(inode_size));
if (unlikely(err)) if (unlikely(err))
goto failed_sufile; goto failed_sufile;
err = nilfs_mdt_read_inode_direct( err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data +
nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size)); NILFS_SR_CPFILE_OFFSET(inode_size));
if (unlikely(err)) if (unlikely(err))
goto failed_sufile; goto failed_sufile;
err = nilfs_mdt_read_inode_direct( err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data +
nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size)); NILFS_SR_SUFILE_OFFSET(inode_size));
if (unlikely(err)) if (unlikely(err))
goto failed_sufile; goto failed_sufile;
...@@ -281,29 +261,30 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) ...@@ -281,29 +261,30 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
struct nilfs_recovery_info ri; struct nilfs_recovery_info ri;
unsigned int s_flags = sbi->s_super->s_flags; unsigned int s_flags = sbi->s_super->s_flags;
int really_read_only = bdev_read_only(nilfs->ns_bdev); int really_read_only = bdev_read_only(nilfs->ns_bdev);
unsigned valid_fs; int valid_fs = nilfs_valid_fs(nilfs);
int err = 0; int err;
nilfs_init_recovery_info(&ri);
down_write(&nilfs->ns_sem); if (nilfs_loaded(nilfs)) {
valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS); if (valid_fs ||
up_write(&nilfs->ns_sem); ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY)))
return 0;
printk(KERN_ERR "NILFS: the filesystem is in an incomplete "
"recovery state.\n");
return -EINVAL;
}
if (!valid_fs && (s_flags & MS_RDONLY)) { if (!valid_fs) {
printk(KERN_INFO "NILFS: INFO: recovery " printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
"required for readonly filesystem.\n"); if (s_flags & MS_RDONLY) {
if (really_read_only) { printk(KERN_INFO "NILFS: INFO: recovery "
printk(KERN_ERR "NILFS: write access " "required for readonly filesystem.\n");
"unavailable, cannot proceed.\n"); printk(KERN_INFO "NILFS: write access will "
err = -EROFS; "be enabled during recovery.\n");
goto failed;
} }
printk(KERN_INFO "NILFS: write access will "
"be enabled during recovery.\n");
sbi->s_super->s_flags &= ~MS_RDONLY;
} }
nilfs_init_recovery_info(&ri);
err = nilfs_search_super_root(nilfs, sbi, &ri); err = nilfs_search_super_root(nilfs, sbi, &ri);
if (unlikely(err)) { if (unlikely(err)) {
printk(KERN_ERR "NILFS: error searching super root.\n"); printk(KERN_ERR "NILFS: error searching super root.\n");
...@@ -316,19 +297,56 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) ...@@ -316,19 +297,56 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
goto failed; goto failed;
} }
if (!valid_fs) { if (valid_fs)
err = nilfs_recover_logical_segments(nilfs, sbi, &ri); goto skip_recovery;
if (unlikely(err)) {
nilfs_mdt_destroy(nilfs->ns_cpfile); if (s_flags & MS_RDONLY) {
nilfs_mdt_destroy(nilfs->ns_sufile); if (nilfs_test_opt(sbi, NORECOVERY)) {
nilfs_mdt_destroy(nilfs->ns_dat); printk(KERN_INFO "NILFS: norecovery option specified. "
goto failed; "skipping roll-forward recovery\n");
goto skip_recovery;
} }
if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED) if (really_read_only) {
sbi->s_super->s_dirt = 1; printk(KERN_ERR "NILFS: write access "
"unavailable, cannot proceed.\n");
err = -EROFS;
goto failed_unload;
}
sbi->s_super->s_flags &= ~MS_RDONLY;
} else if (nilfs_test_opt(sbi, NORECOVERY)) {
printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
"option was specified for a read/write mount\n");
err = -EINVAL;
goto failed_unload;
} }
err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
if (err)
goto failed_unload;
down_write(&nilfs->ns_sem);
nilfs->ns_mount_state |= NILFS_VALID_FS;
nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
err = nilfs_commit_super(sbi, 1);
up_write(&nilfs->ns_sem);
if (err) {
printk(KERN_ERR "NILFS: failed to update super block. "
"recovery unfinished.\n");
goto failed_unload;
}
printk(KERN_INFO "NILFS: recovery complete.\n");
skip_recovery:
set_nilfs_loaded(nilfs); set_nilfs_loaded(nilfs);
nilfs_clear_recovery_info(&ri);
sbi->s_super->s_flags = s_flags;
return 0;
failed_unload:
nilfs_mdt_destroy(nilfs->ns_cpfile);
nilfs_mdt_destroy(nilfs->ns_sufile);
nilfs_mdt_destroy(nilfs->ns_dat);
failed: failed:
nilfs_clear_recovery_info(&ri); nilfs_clear_recovery_info(&ri);
...@@ -632,30 +650,23 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) ...@@ -632,30 +650,23 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
{ {
struct inode *dat = nilfs_dat_inode(nilfs); struct inode *dat = nilfs_dat_inode(nilfs);
unsigned long ncleansegs; unsigned long ncleansegs;
int err;
down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
if (likely(!err)) *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; return 0;
return err;
} }
int nilfs_near_disk_full(struct the_nilfs *nilfs) int nilfs_near_disk_full(struct the_nilfs *nilfs)
{ {
struct inode *sufile = nilfs->ns_sufile;
unsigned long ncleansegs, nincsegs; unsigned long ncleansegs, nincsegs;
int ret;
ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
if (likely(!ret)) { nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / nilfs->ns_blocks_per_segment + 1;
nilfs->ns_blocks_per_segment + 1;
if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs) return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
ret++;
}
return ret;
} }
/** /**
......
...@@ -258,6 +258,16 @@ static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi) ...@@ -258,6 +258,16 @@ static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi)
kfree(sbi); kfree(sbi);
} }
static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
{
unsigned valid_fs;
down_read(&nilfs->ns_sem);
valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
up_read(&nilfs->ns_sem);
return valid_fs;
}
static inline void static inline void
nilfs_get_segment_range(struct the_nilfs *nilfs, __u64 segnum, nilfs_get_segment_range(struct the_nilfs *nilfs, __u64 segnum,
sector_t *seg_start, sector_t *seg_end) sector_t *seg_start, sector_t *seg_end)
......
...@@ -151,6 +151,8 @@ struct nilfs_super_root { ...@@ -151,6 +151,8 @@ struct nilfs_super_root {
#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order #define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
semantics also for data */ semantics also for data */
#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
mount-time recovery */
/** /**
...@@ -402,6 +404,28 @@ struct nilfs_segment_summary { ...@@ -402,6 +404,28 @@ struct nilfs_segment_summary {
#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */ #define NILFS_SS_SYNDT 0x0008 /* includes data only updates */
#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ #define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
/**
* struct nilfs_btree_node - B-tree node
* @bn_flags: flags
* @bn_level: level
* @bn_nchildren: number of children
* @bn_pad: padding
*/
struct nilfs_btree_node {
__u8 bn_flags;
__u8 bn_level;
__le16 bn_nchildren;
__le32 bn_pad;
};
/* flags */
#define NILFS_BTREE_NODE_ROOT 0x01
/* level */
#define NILFS_BTREE_LEVEL_DATA 0
#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
#define NILFS_BTREE_LEVEL_MAX 14
/** /**
* struct nilfs_palloc_group_desc - block group descriptor * struct nilfs_palloc_group_desc - block group descriptor
* @pg_nfrees: number of free entries in block group * @pg_nfrees: number of free entries in block group
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment