Commit aa405b1f authored by Ryusuke Konishi's avatar Ryusuke Konishi

nilfs2: always set back pointer to host inode in mapping->host

In the current nilfs, page cache for btree nodes and meta data files
do not set a valid back pointer to the host inode in mapping->host.

This will change it so that every address space in nilfs uses
mapping->host to hold its host inode.
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
parent 0ef28f9a
...@@ -34,12 +34,6 @@ ...@@ -34,12 +34,6 @@
#include "page.h" #include "page.h"
#include "btnode.h" #include "btnode.h"
void nilfs_btnode_cache_init(struct address_space *btnc,
struct backing_dev_info *bdi)
{
nilfs_mapping_init(btnc, bdi);
}
void nilfs_btnode_cache_clear(struct address_space *btnc) void nilfs_btnode_cache_clear(struct address_space *btnc)
{ {
invalidate_mapping_pages(btnc, 0, -1); invalidate_mapping_pages(btnc, 0, -1);
......
...@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt { ...@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
struct buffer_head *newbh; struct buffer_head *newbh;
}; };
void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
void nilfs_btnode_cache_clear(struct address_space *); void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr); __u64 blocknr);
......
...@@ -450,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, ...@@ -450,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
INIT_LIST_HEAD(&shadow->frozen_buffers); INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data); address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, bdi); nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
address_space_init_once(&shadow->frozen_btnodes); address_space_init_once(&shadow->frozen_btnodes);
nilfs_mapping_init(&shadow->frozen_btnodes, bdi); nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
mi->mi_shadow = shadow; mi->mi_shadow = shadow;
return 0; return 0;
} }
......
...@@ -80,12 +80,6 @@ static inline struct inode *NILFS_BTNC_I(struct address_space *btnc) ...@@ -80,12 +80,6 @@ static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
return &ii->vfs_inode; return &ii->vfs_inode;
} }
static inline struct inode *NILFS_AS_I(struct address_space *mapping)
{
return (mapping->host) ? :
container_of(mapping, struct inode, i_data);
}
/* /*
* Dynamic state flags of NILFS on-memory inode (i_state) * Dynamic state flags of NILFS on-memory inode (i_state)
*/ */
......
...@@ -182,7 +182,7 @@ int nilfs_page_buffers_clean(struct page *page) ...@@ -182,7 +182,7 @@ int nilfs_page_buffers_clean(struct page *page)
void nilfs_page_bug(struct page *page) void nilfs_page_bug(struct page *page)
{ {
struct address_space *m; struct address_space *m;
unsigned long ino = 0; unsigned long ino;
if (unlikely(!page)) { if (unlikely(!page)) {
printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
...@@ -190,11 +190,8 @@ void nilfs_page_bug(struct page *page) ...@@ -190,11 +190,8 @@ void nilfs_page_bug(struct page *page)
} }
m = page->mapping; m = page->mapping;
if (m) { ino = m ? m->host->i_ino : 0;
struct inode *inode = NILFS_AS_I(m);
if (inode != NULL)
ino = inode->i_ino;
}
printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
"mapping=%p ino=%lu\n", "mapping=%p ino=%lu\n",
page, atomic_read(&page->_count), page, atomic_read(&page->_count),
...@@ -441,10 +438,10 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, ...@@ -441,10 +438,10 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
return nc; return nc;
} }
void nilfs_mapping_init(struct address_space *mapping, void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
struct backing_dev_info *bdi) struct backing_dev_info *bdi)
{ {
mapping->host = NULL; mapping->host = inode;
mapping->flags = 0; mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS); mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL; mapping->assoc_mapping = NULL;
......
...@@ -57,7 +57,7 @@ void nilfs_page_bug(struct page *); ...@@ -57,7 +57,7 @@ void nilfs_page_bug(struct page *);
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *); void nilfs_clear_dirty_pages(struct address_space *);
void nilfs_mapping_init(struct address_space *mapping, void nilfs_mapping_init(struct address_space *mapping, struct inode *inode,
struct backing_dev_info *bdi); struct backing_dev_info *bdi);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode, unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
......
...@@ -655,13 +655,10 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, ...@@ -655,13 +655,10 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
if (unlikely(page->index > last)) if (unlikely(page->index > last))
break; break;
if (mapping->host) { lock_page(page);
lock_page(page); if (!page_has_buffers(page))
if (!page_has_buffers(page)) create_empty_buffers(page, 1 << inode->i_blkbits, 0);
create_empty_buffers(page, unlock_page(page);
1 << inode->i_blkbits, 0);
unlock_page(page);
}
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
...@@ -1503,10 +1500,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, ...@@ -1503,10 +1500,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
nblocks = le32_to_cpu(finfo->fi_nblocks); nblocks = le32_to_cpu(finfo->fi_nblocks);
ndatablk = le32_to_cpu(finfo->fi_ndatablk); ndatablk = le32_to_cpu(finfo->fi_ndatablk);
if (buffer_nilfs_node(bh)) inode = bh->b_page->mapping->host;
inode = NILFS_BTNC_I(bh->b_page->mapping);
else
inode = NILFS_AS_I(bh->b_page->mapping);
if (mode == SC_LSEG_DSYNC) if (mode == SC_LSEG_DSYNC)
sc_op = &nilfs_sc_dsync_ops; sc_op = &nilfs_sc_dsync_ops;
......
...@@ -166,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) ...@@ -166,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
ii->i_state = 0; ii->i_state = 0;
ii->i_cno = 0; ii->i_cno = 0;
ii->vfs_inode.i_version = 1; ii->vfs_inode.i_version = 1;
nilfs_btnode_cache_init(&ii->i_btnode_cache, sb->s_bdi); nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi);
return &ii->vfs_inode; return &ii->vfs_inode;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment