Commit 6e211930 authored by Ryusuke Konishi's avatar Ryusuke Konishi Committed by Linus Torvalds

nilfs2: fix lockdep warnings during disk space reclamation

During disk space reclamation, nilfs2 still emits the following lockdep
warning due to page/folio operations on shadowed page caches that nilfs2
uses to get a snapshot of DAT file in memory:

  WARNING: CPU: 0 PID: 2643 at include/linux/backing-dev.h:272 __folio_mark_dirty+0x645/0x670
  ...
  RIP: 0010:__folio_mark_dirty+0x645/0x670
  ...
  Call Trace:
    filemap_dirty_folio+0x74/0xd0
    __set_page_dirty_nobuffers+0x85/0xb0
    nilfs_copy_dirty_pages+0x288/0x510 [nilfs2]
    nilfs_mdt_save_to_shadow_map+0x50/0xe0 [nilfs2]
    nilfs_clean_segments+0xee/0x5d0 [nilfs2]
    nilfs_ioctl_clean_segments.isra.19+0xb08/0xf40 [nilfs2]
    nilfs_ioctl+0xc52/0xfb0 [nilfs2]
    __x64_sys_ioctl+0x11d/0x170

This fixes the remaining warning by using inode objects to hold those
page caches.

Link: https://lkml.kernel.org/r/1647867427-30498-3-git-send-email-konishi.ryusuke@gmail.comSigned-off-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Tested-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hao Sun <sunhao.th@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e897be17
...@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size, ...@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
di = NILFS_DAT_I(dat); di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache); nilfs_palloc_setup_cache(dat, &di->palloc_cache);
nilfs_mdt_setup_shadow_map(dat, &di->shadow); err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
if (err)
goto failed;
err = nilfs_read_inode_common(dat, raw_inode); err = nilfs_read_inode_common(dat, raw_inode);
if (err) if (err)
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* @root: pointer on NILFS root object (mounted checkpoint) * @root: pointer on NILFS root object (mounted checkpoint)
* @for_gc: inode for GC flag * @for_gc: inode for GC flag
* @for_btnc: inode for B-tree node cache flag * @for_btnc: inode for B-tree node cache flag
* @for_shadow: inode for shadowed page cache flag
*/ */
struct nilfs_iget_args { struct nilfs_iget_args {
u64 ino; u64 ino;
...@@ -37,6 +38,7 @@ struct nilfs_iget_args { ...@@ -37,6 +38,7 @@ struct nilfs_iget_args {
struct nilfs_root *root; struct nilfs_root *root;
bool for_gc; bool for_gc;
bool for_btnc; bool for_btnc;
bool for_shadow;
}; };
static int nilfs_iget_test(struct inode *inode, void *opaque); static int nilfs_iget_test(struct inode *inode, void *opaque);
...@@ -315,7 +317,7 @@ static int nilfs_insert_inode_locked(struct inode *inode, ...@@ -315,7 +317,7 @@ static int nilfs_insert_inode_locked(struct inode *inode,
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = false, .ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false .for_btnc = false, .for_shadow = false
}; };
return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
...@@ -534,6 +536,12 @@ static int nilfs_iget_test(struct inode *inode, void *opaque) ...@@ -534,6 +536,12 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
} else if (args->for_btnc) { } else if (args->for_btnc) {
return 0; return 0;
} }
if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
if (!args->for_shadow)
return 0;
} else if (args->for_shadow) {
return 0;
}
if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
return !args->for_gc; return !args->for_gc;
...@@ -555,6 +563,8 @@ static int nilfs_iget_set(struct inode *inode, void *opaque) ...@@ -555,6 +563,8 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
if (args->for_btnc) if (args->for_btnc)
NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
if (args->for_shadow)
NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
return 0; return 0;
} }
...@@ -563,7 +573,7 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, ...@@ -563,7 +573,7 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = false, .ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false .for_btnc = false, .for_shadow = false
}; };
return ilookup5(sb, ino, nilfs_iget_test, &args); return ilookup5(sb, ino, nilfs_iget_test, &args);
...@@ -574,7 +584,7 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, ...@@ -574,7 +584,7 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = false, .ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false .for_btnc = false, .for_shadow = false
}; };
return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
...@@ -606,7 +616,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, ...@@ -606,7 +616,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = NULL, .cno = cno, .for_gc = true, .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
.for_btnc = false .for_btnc = false, .for_shadow = false
}; };
struct inode *inode; struct inode *inode;
int err; int err;
...@@ -653,6 +663,7 @@ int nilfs_attach_btree_node_cache(struct inode *inode) ...@@ -653,6 +663,7 @@ int nilfs_attach_btree_node_cache(struct inode *inode)
args.cno = ii->i_cno; args.cno = ii->i_cno;
args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
args.for_btnc = true; args.for_btnc = true;
args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args); nilfs_iget_set, &args);
...@@ -688,6 +699,50 @@ void nilfs_detach_btree_node_cache(struct inode *inode) ...@@ -688,6 +699,50 @@ void nilfs_detach_btree_node_cache(struct inode *inode)
} }
} }
/**
* nilfs_iget_for_shadow - obtain inode for shadow mapping
* @inode: inode object that uses shadow mapping
*
* nilfs_iget_for_shadow() allocates a pair of inodes that holds page
* caches for shadow mapping. The page cache for data pages is set up
* in one inode and the one for b-tree node pages is set up in the
* other inode, which is attached to the former inode.
*
* Return Value: On success, a pointer to the inode for data pages is
* returned. On errors, one of the following negative error code is returned
* in a pointer type.
*
* %-ENOMEM - Insufficient memory available.
*/
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
struct nilfs_iget_args args = {
.ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = true
};
struct inode *s_inode;
int err;
s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
if (unlikely(!s_inode))
return ERR_PTR(-ENOMEM);
if (!(s_inode->i_state & I_NEW))
return inode;
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
iget_failed(s_inode);
return ERR_PTR(err);
}
unlock_new_inode(s_inode);
return s_inode;
}
void nilfs_write_inode_common(struct inode *inode, void nilfs_write_inode_common(struct inode *inode,
struct nilfs_inode *raw_inode, int has_bmap) struct nilfs_inode *raw_inode, int has_bmap)
{ {
......
...@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) ...@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
void nilfs_mdt_clear(struct inode *inode) void nilfs_mdt_clear(struct inode *inode)
{ {
struct nilfs_mdt_info *mdi = NILFS_MDT(inode); struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mdi->mi_shadow;
if (mdi->mi_palloc_cache) if (mdi->mi_palloc_cache)
nilfs_palloc_destroy_cache(inode); nilfs_palloc_destroy_cache(inode);
if (shadow) {
struct inode *s_inode = shadow->inode;
shadow->inode = NULL;
iput(s_inode);
mdi->mi_shadow = NULL;
}
} }
/** /**
...@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, ...@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow) struct nilfs_shadow_map *shadow)
{ {
struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct inode *s_inode;
INIT_LIST_HEAD(&shadow->frozen_buffers); INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, inode); s_inode = nilfs_iget_for_shadow(inode);
address_space_init_once(&shadow->frozen_btnodes); if (IS_ERR(s_inode))
nilfs_mapping_init(&shadow->frozen_btnodes, inode); return PTR_ERR(s_inode);
shadow->inode = s_inode;
mi->mi_shadow = shadow; mi->mi_shadow = shadow;
return 0; return 0;
} }
...@@ -526,13 +538,14 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode) ...@@ -526,13 +538,14 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_inode_info *ii = NILFS_I(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow; struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *s_inode = shadow->inode;
int ret; int ret;
ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping); ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
if (ret) if (ret)
goto out; goto out;
ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes, ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
ii->i_assoc_inode->i_mapping); ii->i_assoc_inode->i_mapping);
if (ret) if (ret)
goto out; goto out;
...@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) ...@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page; struct page *page;
int blkbits = inode->i_blkbits; int blkbits = inode->i_blkbits;
page = grab_cache_page(&shadow->frozen_data, bh->b_page->index); page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) ...@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page; struct page *page;
int n; int n;
page = find_lock_page(&shadow->frozen_data, bh->b_page->index); page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
if (page) { if (page) {
if (page_has_buffers(page)) { if (page_has_buffers(page)) {
n = bh_offset(bh) >> inode->i_blkbits; n = bh_offset(bh) >> inode->i_blkbits;
...@@ -622,11 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode) ...@@ -622,11 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
nilfs_palloc_clear_cache(inode); nilfs_palloc_clear_cache(inode);
nilfs_clear_dirty_pages(inode->i_mapping, true); nilfs_clear_dirty_pages(inode->i_mapping, true);
nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true); nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping, nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
&shadow->frozen_btnodes); NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
...@@ -641,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode) ...@@ -641,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
{ {
struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow; struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
down_write(&mi->mi_sem); down_write(&mi->mi_sem);
nilfs_release_frozen_buffers(shadow); nilfs_release_frozen_buffers(shadow);
truncate_inode_pages(&shadow->frozen_data, 0); truncate_inode_pages(shadow->inode->i_mapping, 0);
truncate_inode_pages(&shadow->frozen_btnodes, 0); truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
up_write(&mi->mi_sem); up_write(&mi->mi_sem);
} }
...@@ -18,14 +18,12 @@ ...@@ -18,14 +18,12 @@
/** /**
* struct nilfs_shadow_map - shadow mapping of meta data file * struct nilfs_shadow_map - shadow mapping of meta data file
* @bmap_store: shadow copy of bmap state * @bmap_store: shadow copy of bmap state
* @frozen_data: shadowed dirty data pages * @inode: holder of page caches used in shadow mapping
* @frozen_btnodes: shadowed dirty b-tree nodes' pages
* @frozen_buffers: list of frozen buffers * @frozen_buffers: list of frozen buffers
*/ */
struct nilfs_shadow_map { struct nilfs_shadow_map {
struct nilfs_bmap_store bmap_store; struct nilfs_bmap_store bmap_store;
struct address_space frozen_data; struct inode *inode;
struct address_space frozen_btnodes;
struct list_head frozen_buffers; struct list_head frozen_buffers;
}; };
......
...@@ -92,6 +92,7 @@ enum { ...@@ -92,6 +92,7 @@ enum {
NILFS_I_BMAP, /* has bmap and btnode_cache */ NILFS_I_BMAP, /* has bmap and btnode_cache */
NILFS_I_GCINODE, /* inode for GC, on memory only */ NILFS_I_GCINODE, /* inode for GC, on memory only */
NILFS_I_BTNC, /* inode for btree node cache */ NILFS_I_BTNC, /* inode for btree node cache */
NILFS_I_SHADOW, /* inode for shadowed page cache */
}; };
/* /*
...@@ -263,6 +264,7 @@ extern struct inode *nilfs_iget_for_gc(struct super_block *sb, ...@@ -263,6 +264,7 @@ extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
unsigned long ino, __u64 cno); unsigned long ino, __u64 cno);
int nilfs_attach_btree_node_cache(struct inode *inode); int nilfs_attach_btree_node_cache(struct inode *inode);
void nilfs_detach_btree_node_cache(struct inode *inode); void nilfs_detach_btree_node_cache(struct inode *inode);
struct inode *nilfs_iget_for_shadow(struct inode *inode);
extern void nilfs_update_inode(struct inode *, struct buffer_head *, int); extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
extern void nilfs_truncate(struct inode *); extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *); extern void nilfs_evict_inode(struct inode *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment