Commit cf2271e7 authored by Jaegeuk Kim's avatar Jaegeuk Kim

f2fs: avoid retrying wrong recovery routine when error was occurred

This patch eliminates the propagation of recovery errors to the next mount.
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 61e0f2d0
...@@ -796,6 +796,7 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) ...@@ -796,6 +796,7 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
{ {
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
nid_t last_nid = 0; nid_t last_nid = 0;
block_t start_blk; block_t start_blk;
struct page *cp_page; struct page *cp_page;
...@@ -809,7 +810,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) ...@@ -809,7 +810,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
* This avoids to conduct wrong roll-forward operations and uses * This avoids to conduct wrong roll-forward operations and uses
* metapages, so should be called prior to sync_meta_pages below. * metapages, so should be called prior to sync_meta_pages below.
*/ */
discard_next_dnode(sbi); discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
/* Flush all the NAT/SIT pages */ /* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) while (get_pages(sbi, F2FS_DIRTY_META))
......
...@@ -1225,7 +1225,7 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *); ...@@ -1225,7 +1225,7 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *);
void invalidate_blocks(struct f2fs_sb_info *, block_t); void invalidate_blocks(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t); void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
void clear_prefree_segments(struct f2fs_sb_info *); void clear_prefree_segments(struct f2fs_sb_info *);
void discard_next_dnode(struct f2fs_sb_info *); void discard_next_dnode(struct f2fs_sb_info *, block_t);
int npages_for_summary_flush(struct f2fs_sb_info *); int npages_for_summary_flush(struct f2fs_sb_info *);
void allocate_new_segments(struct f2fs_sb_info *); void allocate_new_segments(struct f2fs_sb_info *);
struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
......
...@@ -434,7 +434,9 @@ static int recover_data(struct f2fs_sb_info *sbi, ...@@ -434,7 +434,9 @@ static int recover_data(struct f2fs_sb_info *sbi,
int recover_fsync_data(struct f2fs_sb_info *sbi) int recover_fsync_data(struct f2fs_sb_info *sbi)
{ {
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list; struct list_head inode_list;
block_t blkaddr;
int err; int err;
bool need_writecp = false; bool need_writecp = false;
...@@ -447,6 +449,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -447,6 +449,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
/* step #1: find fsynced inode numbers */ /* step #1: find fsynced inode numbers */
sbi->por_doing = true; sbi->por_doing = true;
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
err = find_fsync_dnodes(sbi, &inode_list); err = find_fsync_dnodes(sbi, &inode_list);
if (err) if (err)
goto out; goto out;
...@@ -462,8 +467,21 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -462,8 +467,21 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
out: out:
destroy_fsync_dnodes(&inode_list); destroy_fsync_dnodes(&inode_list);
kmem_cache_destroy(fsync_entry_slab); kmem_cache_destroy(fsync_entry_slab);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
}
sbi->por_doing = false; sbi->por_doing = false;
if (!err && need_writecp) if (err) {
discard_next_dnode(sbi, blkaddr);
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META))
sync_meta_pages(sbi, META, LONG_MAX);
} else if (need_writecp) {
write_checkpoint(sbi, false); write_checkpoint(sbi, false);
}
return err; return err;
} }
...@@ -379,11 +379,8 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi, ...@@ -379,11 +379,8 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
} }
void discard_next_dnode(struct f2fs_sb_info *sbi) void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
{ {
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
block_t blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
if (f2fs_issue_discard(sbi, blkaddr, 1)) { if (f2fs_issue_discard(sbi, blkaddr, 1)) {
struct page *page = grab_meta_page(sbi, blkaddr); struct page *page = grab_meta_page(sbi, blkaddr);
/* zero-filled page */ /* zero-filled page */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment