Commit 4fc29c1a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-f2fs-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "The major change in this version is mitigating cpu overheads on write
  paths by replacing redundant inode page updates with mark_inode_dirty
  calls.  And we tried to reduce lock contentions as well to improve
  filesystem scalability.  Other feature is setting F2FS automatically
  when detecting host-managed SMR.

  Enhancements:
   - ioctl to move a range of data between files
   - inject orphan inode errors
   - avoid flush commands congestion
   - support lazytime

  Bug fixes:
   - return proper results for some dentry operations
   - fix deadlock in add_link failure
   - disable extent_cache for fcollapse/finsert"

* tag 'for-f2fs-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (68 commits)
  f2fs: clean up coding style and redundancy
  f2fs: get victim segment again after new cp
  f2fs: handle error case with f2fs_bug_on
  f2fs: avoid data race when deciding checkpoin in f2fs_sync_file
  f2fs: support an ioctl to move a range of data blocks
  f2fs: fix to report error number of f2fs_find_entry
  f2fs: avoid memory allocation failure due to a long length
  f2fs: reset default idle interval value
  f2fs: use blk_plug in all the possible paths
  f2fs: fix to avoid data update racing between GC and DIO
  f2fs: add maximum prefree segments
  f2fs: disable extent_cache for fcollapse/finsert inodes
  f2fs: refactor __exchange_data_block for speed up
  f2fs: fix ERR_PTR returned by bio
  f2fs: avoid mark_inode_dirty
  f2fs: move i_size_write in f2fs_write_end
  f2fs: fix to avoid redundant discard during fstrim
  f2fs: avoid mismatching block range for discard
  f2fs: fix incorrect f_bfree calculation in ->statfs
  f2fs: use percpu_rw_semaphore
  ...
parents 0e6acf02 5302fb00
...@@ -109,7 +109,9 @@ background_gc=%s Turn on/off cleaning operations, namely garbage ...@@ -109,7 +109,9 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
disable_roll_forward Disable the roll-forward recovery routine disable_roll_forward Disable the roll-forward recovery routine
norecovery Disable the roll-forward recovery routine, mounted read- norecovery Disable the roll-forward recovery routine, mounted read-
only (i.e., -o ro,disable_roll_forward) only (i.e., -o ro,disable_roll_forward)
discard Issue discard/TRIM commands when a segment is cleaned. discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
enabled, f2fs will issue discard/TRIM commands when a
segment is cleaned.
no_heap Disable heap-style segment allocation which finds free no_heap Disable heap-style segment allocation which finds free
segments for data from the beginning of main area, while segments for data from the beginning of main area, while
for node from the end of main area. for node from the end of main area.
...@@ -151,6 +153,9 @@ noinline_data Disable the inline data feature, inline data feature is ...@@ -151,6 +153,9 @@ noinline_data Disable the inline data feature, inline data feature is
enabled by default. enabled by default.
data_flush Enable data flushing before checkpoint in order to data_flush Enable data flushing before checkpoint in order to
persist data of regular and symlink. persist data of regular and symlink.
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
================================================================================ ================================================================================
DEBUGFS ENTRIES DEBUGFS ENTRIES
......
...@@ -201,7 +201,6 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type) ...@@ -201,7 +201,6 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
static int __f2fs_set_acl(struct inode *inode, int type, static int __f2fs_set_acl(struct inode *inode, int type,
struct posix_acl *acl, struct page *ipage) struct posix_acl *acl, struct page *ipage)
{ {
struct f2fs_inode_info *fi = F2FS_I(inode);
int name_index; int name_index;
void *value = NULL; void *value = NULL;
size_t size = 0; size_t size = 0;
...@@ -214,7 +213,7 @@ static int __f2fs_set_acl(struct inode *inode, int type, ...@@ -214,7 +213,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
error = posix_acl_equiv_mode(acl, &inode->i_mode); error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0) if (error < 0)
return error; return error;
set_acl_inode(fi, inode->i_mode); set_acl_inode(inode, inode->i_mode);
if (error == 0) if (error == 0)
acl = NULL; acl = NULL;
} }
...@@ -233,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type, ...@@ -233,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (acl) { if (acl) {
value = f2fs_acl_to_disk(acl, &size); value = f2fs_acl_to_disk(acl, &size);
if (IS_ERR(value)) { if (IS_ERR(value)) {
clear_inode_flag(fi, FI_ACL_MODE); clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value); return (int)PTR_ERR(value);
} }
} }
...@@ -244,7 +243,7 @@ static int __f2fs_set_acl(struct inode *inode, int type, ...@@ -244,7 +243,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (!error) if (!error)
set_cached_acl(inode, type, acl); set_cached_acl(inode, type, acl);
clear_inode_flag(fi, FI_ACL_MODE); clear_inode_flag(inode, FI_ACL_MODE);
return error; return error;
} }
...@@ -385,6 +384,8 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage, ...@@ -385,6 +384,8 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
if (error) if (error)
return error; return error;
f2fs_mark_inode_dirty_sync(inode);
if (default_acl) { if (default_acl) {
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl, error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage); ipage);
......
...@@ -37,7 +37,7 @@ struct f2fs_acl_header { ...@@ -37,7 +37,7 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL #ifdef CONFIG_F2FS_FS_POSIX_ACL
extern struct posix_acl *f2fs_get_acl(struct inode *, int); extern struct posix_acl *f2fs_get_acl(struct inode *, int);
extern int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type); extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *, extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *); struct page *);
#else #else
......
...@@ -48,7 +48,8 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) ...@@ -48,7 +48,8 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
goto repeat; goto repeat;
} }
f2fs_wait_on_page_writeback(page, META, true); f2fs_wait_on_page_writeback(page, META, true);
SetPageUptodate(page); if (!PageUptodate(page))
SetPageUptodate(page);
return page; return page;
} }
...@@ -266,6 +267,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping, ...@@ -266,6 +267,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct blk_plug plug;
long diff, written; long diff, written;
/* collect a number of dirty meta pages and write together */ /* collect a number of dirty meta pages and write together */
...@@ -278,7 +280,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping, ...@@ -278,7 +280,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
/* if mounting is failed, skip writing node pages */ /* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex); mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc); diff = nr_pages_to_write(sbi, META, wbc);
blk_start_plug(&plug);
written = sync_meta_pages(sbi, META, wbc->nr_to_write); written = sync_meta_pages(sbi, META, wbc->nr_to_write);
blk_finish_plug(&plug);
mutex_unlock(&sbi->cp_mutex); mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0; return 0;
...@@ -366,9 +370,10 @@ static int f2fs_set_meta_page_dirty(struct page *page) ...@@ -366,9 +370,10 @@ static int f2fs_set_meta_page_dirty(struct page *page)
{ {
trace_f2fs_set_page_dirty(page, META); trace_f2fs_set_page_dirty(page, META);
SetPageUptodate(page); if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) { if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page); f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META); inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page); SetPagePrivate(page);
f2fs_trace_pid(page); f2fs_trace_pid(page);
...@@ -510,10 +515,11 @@ void release_orphan_inode(struct f2fs_sb_info *sbi) ...@@ -510,10 +515,11 @@ void release_orphan_inode(struct f2fs_sb_info *sbi)
spin_unlock(&im->ino_lock); spin_unlock(&im->ino_lock);
} }
void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) void add_orphan_inode(struct inode *inode)
{ {
/* add new orphan ino entry into list */ /* add new orphan ino entry into list */
__add_ino_entry(sbi, ino, ORPHAN_INO); __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
update_inode_page(inode);
} }
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
...@@ -761,28 +767,25 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi) ...@@ -761,28 +767,25 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
static void __add_dirty_inode(struct inode *inode, enum inode_type type) static void __add_dirty_inode(struct inode *inode, enum inode_type type)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE; int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
if (is_inode_flag_set(fi, flag)) if (is_inode_flag_set(inode, flag))
return; return;
set_inode_flag(fi, flag); set_inode_flag(inode, flag);
list_add_tail(&fi->dirty_list, &sbi->inode_list[type]); list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
stat_inc_dirty_inode(sbi, type); stat_inc_dirty_inode(sbi, type);
} }
static void __remove_dirty_inode(struct inode *inode, enum inode_type type) static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
{ {
struct f2fs_inode_info *fi = F2FS_I(inode);
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE; int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
if (get_dirty_pages(inode) || if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
!is_inode_flag_set(F2FS_I(inode), flag))
return; return;
list_del_init(&fi->dirty_list); list_del_init(&F2FS_I(inode)->dirty_list);
clear_inode_flag(fi, flag); clear_inode_flag(inode, flag);
stat_dec_dirty_inode(F2FS_I_SB(inode), type); stat_dec_dirty_inode(F2FS_I_SB(inode), type);
} }
...@@ -795,13 +798,12 @@ void update_dirty_page(struct inode *inode, struct page *page) ...@@ -795,13 +798,12 @@ void update_dirty_page(struct inode *inode, struct page *page)
!S_ISLNK(inode->i_mode)) !S_ISLNK(inode->i_mode))
return; return;
if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) { spin_lock(&sbi->inode_lock[type]);
spin_lock(&sbi->inode_lock[type]); if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
__add_dirty_inode(inode, type); __add_dirty_inode(inode, type);
spin_unlock(&sbi->inode_lock[type]);
}
inode_inc_dirty_pages(inode); inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]);
SetPagePrivate(page); SetPagePrivate(page);
f2fs_trace_pid(page); f2fs_trace_pid(page);
} }
...@@ -864,6 +866,34 @@ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) ...@@ -864,6 +866,34 @@ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
goto retry; goto retry;
} }
int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
{
struct list_head *head = &sbi->inode_list[DIRTY_META];
struct inode *inode;
struct f2fs_inode_info *fi;
s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
while (total--) {
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (list_empty(head)) {
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return 0;
}
fi = list_entry(head->next, struct f2fs_inode_info,
gdirty_list);
inode = igrab(&fi->vfs_inode);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
if (inode) {
update_inode_page(inode);
iput(inode);
}
};
return 0;
}
/* /*
* Freeze all the FS-operations for checkpoint. * Freeze all the FS-operations for checkpoint.
*/ */
...@@ -890,6 +920,14 @@ static int block_operations(struct f2fs_sb_info *sbi) ...@@ -890,6 +920,14 @@ static int block_operations(struct f2fs_sb_info *sbi)
goto retry_flush_dents; goto retry_flush_dents;
} }
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
f2fs_unlock_all(sbi);
err = f2fs_sync_inode_meta(sbi);
if (err)
goto out;
goto retry_flush_dents;
}
/* /*
* POR: we should ensure that there are no dirty node pages * POR: we should ensure that there are no dirty node pages
* until finishing nat/sit flush. * until finishing nat/sit flush.
...@@ -914,6 +952,8 @@ static int block_operations(struct f2fs_sb_info *sbi) ...@@ -914,6 +952,8 @@ static int block_operations(struct f2fs_sb_info *sbi)
static void unblock_operations(struct f2fs_sb_info *sbi) static void unblock_operations(struct f2fs_sb_info *sbi)
{ {
up_write(&sbi->node_write); up_write(&sbi->node_write);
build_free_nids(sbi);
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
} }
...@@ -954,7 +994,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -954,7 +994,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* This avoids to conduct wrong roll-forward operations and uses * This avoids to conduct wrong roll-forward operations and uses
* metapages, so should be called prior to sync_meta_pages below. * metapages, so should be called prior to sync_meta_pages below.
*/ */
if (discard_next_dnode(sbi, discard_blk)) if (!test_opt(sbi, LFS) && discard_next_dnode(sbi, discard_blk))
invalidate = true; invalidate = true;
/* Flush all the NAT/SIT pages */ /* Flush all the NAT/SIT pages */
......
This diff is collapsed.
...@@ -47,6 +47,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -47,6 +47,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA); si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE]; si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->wb_bios = atomic_read(&sbi->nr_wb_bios); si->wb_bios = atomic_read(&sbi->nr_wb_bios);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
...@@ -304,8 +305,8 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -304,8 +305,8 @@ static int stat_show(struct seq_file *s, void *v)
si->inmem_pages, si->wb_bios); si->inmem_pages, si->wb_bios);
seq_printf(s, " - nodes: %4lld in %4d\n", seq_printf(s, " - nodes: %4lld in %4d\n",
si->ndirty_node, si->node_pages); si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4lld in dirs:%4d\n", seq_printf(s, " - dents: %4lld in dirs:%4d (%4d)\n",
si->ndirty_dent, si->ndirty_dirs); si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
seq_printf(s, " - datas: %4lld in files:%4d\n", seq_printf(s, " - datas: %4lld in files:%4d\n",
si->ndirty_data, si->ndirty_files); si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4lld in %4d\n", seq_printf(s, " - meta: %4lld in %4d\n",
......
...@@ -185,8 +185,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, ...@@ -185,8 +185,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
/* no need to allocate new dentry pages to all the indices */ /* no need to allocate new dentry pages to all the indices */
dentry_page = find_data_page(dir, bidx); dentry_page = find_data_page(dir, bidx);
if (IS_ERR(dentry_page)) { if (IS_ERR(dentry_page)) {
room = true; if (PTR_ERR(dentry_page) == -ENOENT) {
continue; room = true;
continue;
} else {
*res_page = dentry_page;
break;
}
} }
de = find_in_block(dentry_page, fname, namehash, &max_slots, de = find_in_block(dentry_page, fname, namehash, &max_slots,
...@@ -223,19 +228,22 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, ...@@ -223,19 +228,22 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
struct fscrypt_name fname; struct fscrypt_name fname;
int err; int err;
*res_page = NULL;
err = fscrypt_setup_filename(dir, child, 1, &fname); err = fscrypt_setup_filename(dir, child, 1, &fname);
if (err) if (err) {
*res_page = ERR_PTR(err);
return NULL; return NULL;
}
if (f2fs_has_inline_dentry(dir)) { if (f2fs_has_inline_dentry(dir)) {
*res_page = NULL;
de = find_in_inline_dir(dir, &fname, res_page); de = find_in_inline_dir(dir, &fname, res_page);
goto out; goto out;
} }
if (npages == 0) if (npages == 0) {
*res_page = NULL;
goto out; goto out;
}
max_depth = F2FS_I(dir)->i_current_depth; max_depth = F2FS_I(dir)->i_current_depth;
if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) { if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
...@@ -243,13 +251,13 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, ...@@ -243,13 +251,13 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
"Corrupted max_depth of %lu: %u", "Corrupted max_depth of %lu: %u",
dir->i_ino, max_depth); dir->i_ino, max_depth);
max_depth = MAX_DIR_HASH_DEPTH; max_depth = MAX_DIR_HASH_DEPTH;
F2FS_I(dir)->i_current_depth = max_depth; f2fs_i_depth_write(dir, max_depth);
mark_inode_dirty(dir);
} }
for (level = 0; level < max_depth; level++) { for (level = 0; level < max_depth; level++) {
*res_page = NULL;
de = find_in_level(dir, level, &fname, res_page); de = find_in_level(dir, level, &fname, res_page);
if (de) if (de || IS_ERR(*res_page))
break; break;
} }
out: out:
...@@ -259,35 +267,22 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, ...@@ -259,35 +267,22 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p) struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
{ {
struct page *page; struct qstr dotdot = QSTR_INIT("..", 2);
struct f2fs_dir_entry *de;
struct f2fs_dentry_block *dentry_blk;
if (f2fs_has_inline_dentry(dir))
return f2fs_parent_inline_dir(dir, p);
page = get_lock_data_page(dir, 0, false);
if (IS_ERR(page))
return NULL;
dentry_blk = kmap(page); return f2fs_find_entry(dir, &dotdot, p);
de = &dentry_blk->dentry[1];
*p = page;
unlock_page(page);
return de;
} }
ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr) ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr,
struct page **page)
{ {
ino_t res = 0; ino_t res = 0;
struct f2fs_dir_entry *de; struct f2fs_dir_entry *de;
struct page *page;
de = f2fs_find_entry(dir, qstr, &page); de = f2fs_find_entry(dir, qstr, page);
if (de) { if (de) {
res = le32_to_cpu(de->ino); res = le32_to_cpu(de->ino);
f2fs_dentry_kunmap(dir, page); f2fs_dentry_kunmap(dir, *page);
f2fs_put_page(page, 0); f2fs_put_page(*page, 0);
} }
return res; return res;
...@@ -303,9 +298,9 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, ...@@ -303,9 +298,9 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
set_de_type(de, inode->i_mode); set_de_type(de, inode->i_mode);
f2fs_dentry_kunmap(dir, page); f2fs_dentry_kunmap(dir, page);
set_page_dirty(page); set_page_dirty(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
f2fs_mark_inode_dirty_sync(dir);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
} }
...@@ -385,7 +380,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -385,7 +380,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
struct page *page; struct page *page;
int err; int err;
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { if (is_inode_flag_set(inode, FI_NEW_INODE)) {
page = new_inode_page(inode); page = new_inode_page(inode);
if (IS_ERR(page)) if (IS_ERR(page))
return page; return page;
...@@ -429,7 +424,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -429,7 +424,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
* This file should be checkpointed during fsync. * This file should be checkpointed during fsync.
* We lost i_pino from now on. * We lost i_pino from now on.
*/ */
if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) { if (is_inode_flag_set(inode, FI_INC_LINK)) {
file_lost_pino(inode); file_lost_pino(inode);
/* /*
* If link the tmpfile to alias through linkat path, * If link the tmpfile to alias through linkat path,
...@@ -437,14 +432,11 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -437,14 +432,11 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
*/ */
if (inode->i_nlink == 0) if (inode->i_nlink == 0)
remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino); remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
inc_nlink(inode); f2fs_i_links_write(inode, true);
} }
return page; return page;
put_error: put_error:
/* truncate empty dir pages */
truncate_inode_pages(&inode->i_data, 0);
clear_nlink(inode); clear_nlink(inode);
update_inode(inode, page); update_inode(inode, page);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
...@@ -454,23 +446,19 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -454,23 +446,19 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
void update_parent_metadata(struct inode *dir, struct inode *inode, void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth) unsigned int current_depth)
{ {
if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
if (S_ISDIR(inode->i_mode)) { if (S_ISDIR(inode->i_mode))
inc_nlink(dir); f2fs_i_links_write(dir, true);
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); clear_inode_flag(inode, FI_NEW_INODE);
}
clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
} }
dir->i_mtime = dir->i_ctime = CURRENT_TIME; dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir); f2fs_mark_inode_dirty_sync(dir);
if (F2FS_I(dir)->i_current_depth != current_depth) { if (F2FS_I(dir)->i_current_depth != current_depth)
F2FS_I(dir)->i_current_depth = current_depth; f2fs_i_depth_write(dir, current_depth);
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) if (inode && is_inode_flag_set(inode, FI_INC_LINK))
clear_inode_flag(F2FS_I(inode), FI_INC_LINK); clear_inode_flag(inode, FI_INC_LINK);
} }
int room_for_filename(const void *bitmap, int slots, int max_slots) int room_for_filename(const void *bitmap, int slots, int max_slots)
...@@ -596,9 +584,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, ...@@ -596,9 +584,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
set_page_dirty(dentry_page); set_page_dirty(dentry_page);
if (inode) { if (inode) {
/* we don't need to mark_inode_dirty now */ f2fs_i_pino_write(inode, dir->i_ino);
F2FS_I(inode)->i_pino = dir->i_ino;
update_inode(inode, page);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
} }
...@@ -607,10 +593,6 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, ...@@ -607,10 +593,6 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
if (inode) if (inode)
up_write(&F2FS_I(inode)->i_sem); up_write(&F2FS_I(inode)->i_sem);
if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
update_inode_page(dir);
clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
kunmap(dentry_page); kunmap(dentry_page);
f2fs_put_page(dentry_page, 1); f2fs_put_page(dentry_page, 1);
...@@ -657,42 +639,34 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir) ...@@ -657,42 +639,34 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
err = PTR_ERR(page); err = PTR_ERR(page);
goto fail; goto fail;
} }
/* we don't need to mark_inode_dirty now */
update_inode(inode, page);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
clear_inode_flag(F2FS_I(inode), FI_NEW_INODE); clear_inode_flag(inode, FI_NEW_INODE);
fail: fail:
up_write(&F2FS_I(inode)->i_sem); up_write(&F2FS_I(inode)->i_sem);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return err; return err;
} }
void f2fs_drop_nlink(struct inode *dir, struct inode *inode, struct page *page) void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
down_write(&F2FS_I(inode)->i_sem); down_write(&F2FS_I(inode)->i_sem);
if (S_ISDIR(inode->i_mode)) { if (S_ISDIR(inode->i_mode))
drop_nlink(dir); f2fs_i_links_write(dir, false);
if (page)
update_inode(dir, page);
else
update_inode_page(dir);
}
inode->i_ctime = CURRENT_TIME; inode->i_ctime = CURRENT_TIME;
drop_nlink(inode); f2fs_i_links_write(inode, false);
if (S_ISDIR(inode->i_mode)) { if (S_ISDIR(inode->i_mode)) {
drop_nlink(inode); f2fs_i_links_write(inode, false);
i_size_write(inode, 0); f2fs_i_size_write(inode, 0);
} }
up_write(&F2FS_I(inode)->i_sem); up_write(&F2FS_I(inode)->i_sem);
update_inode_page(inode);
if (inode->i_nlink == 0) if (inode->i_nlink == 0)
add_orphan_inode(sbi, inode->i_ino); add_orphan_inode(inode);
else else
release_orphan_inode(sbi); release_orphan_inode(sbi);
} }
...@@ -730,9 +704,10 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, ...@@ -730,9 +704,10 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page); set_page_dirty(page);
dir->i_ctime = dir->i_mtime = CURRENT_TIME; dir->i_ctime = dir->i_mtime = CURRENT_TIME;
f2fs_mark_inode_dirty_sync(dir);
if (inode) if (inode)
f2fs_drop_nlink(dir, inode, NULL); f2fs_drop_nlink(dir, inode);
if (bit_pos == NR_DENTRY_IN_BLOCK && if (bit_pos == NR_DENTRY_IN_BLOCK &&
!truncate_hole(dir, page->index, page->index + 1)) { !truncate_hole(dir, page->index, page->index + 1)) {
......
...@@ -170,8 +170,10 @@ static void __drop_largest_extent(struct inode *inode, ...@@ -170,8 +170,10 @@ static void __drop_largest_extent(struct inode *inode,
{ {
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest; struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
largest->len = 0; largest->len = 0;
f2fs_mark_inode_dirty_sync(inode);
}
} }
/* return true, if inode page is changed */ /* return true, if inode page is changed */
...@@ -335,11 +337,12 @@ static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et, ...@@ -335,11 +337,12 @@ static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
return en; return en;
} }
static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, static struct extent_node *__try_merge_extent_node(struct inode *inode,
struct extent_tree *et, struct extent_info *ei, struct extent_tree *et, struct extent_info *ei,
struct extent_node *prev_ex, struct extent_node *prev_ex,
struct extent_node *next_ex) struct extent_node *next_ex)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_node *en = NULL; struct extent_node *en = NULL;
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) { if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
...@@ -360,7 +363,7 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, ...@@ -360,7 +363,7 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
if (!en) if (!en)
return NULL; return NULL;
__try_update_largest_extent(et, en); __try_update_largest_extent(inode, et, en);
spin_lock(&sbi->extent_lock); spin_lock(&sbi->extent_lock);
if (!list_empty(&en->list)) { if (!list_empty(&en->list)) {
...@@ -371,11 +374,12 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, ...@@ -371,11 +374,12 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
return en; return en;
} }
static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, static struct extent_node *__insert_extent_tree(struct inode *inode,
struct extent_tree *et, struct extent_info *ei, struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p, struct rb_node **insert_p,
struct rb_node *insert_parent) struct rb_node *insert_parent)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct rb_node **p = &et->root.rb_node; struct rb_node **p = &et->root.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct extent_node *en = NULL; struct extent_node *en = NULL;
...@@ -402,7 +406,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, ...@@ -402,7 +406,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
if (!en) if (!en)
return NULL; return NULL;
__try_update_largest_extent(et, en); __try_update_largest_extent(inode, et, en);
/* update in global extent list */ /* update in global extent list */
spin_lock(&sbi->extent_lock); spin_lock(&sbi->extent_lock);
...@@ -431,7 +435,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, ...@@ -431,7 +435,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
write_lock(&et->lock); write_lock(&et->lock);
if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) { if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
write_unlock(&et->lock); write_unlock(&et->lock);
return false; return false;
} }
...@@ -473,7 +477,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, ...@@ -473,7 +477,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
set_extent_info(&ei, end, set_extent_info(&ei, end,
end - dei.fofs + dei.blk, end - dei.fofs + dei.blk,
org_end - end); org_end - end);
en1 = __insert_extent_tree(sbi, et, &ei, en1 = __insert_extent_tree(inode, et, &ei,
NULL, NULL); NULL, NULL);
next_en = en1; next_en = en1;
} else { } else {
...@@ -494,7 +498,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, ...@@ -494,7 +498,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
} }
if (parts) if (parts)
__try_update_largest_extent(et, en); __try_update_largest_extent(inode, et, en);
else else
__release_extent_node(sbi, et, en); __release_extent_node(sbi, et, en);
...@@ -514,20 +518,20 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode, ...@@ -514,20 +518,20 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
if (blkaddr) { if (blkaddr) {
set_extent_info(&ei, fofs, blkaddr, len); set_extent_info(&ei, fofs, blkaddr, len);
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
__insert_extent_tree(sbi, et, &ei, __insert_extent_tree(inode, et, &ei,
insert_p, insert_parent); insert_p, insert_parent);
/* give up extent_cache, if split and small updates happen */ /* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 && if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN && prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) { et->largest.len < F2FS_MIN_EXTENT_LEN) {
et->largest.len = 0; __drop_largest_extent(inode, 0, UINT_MAX);
set_inode_flag(F2FS_I(inode), FI_NO_EXTENT); set_inode_flag(inode, FI_NO_EXTENT);
} }
} }
if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) if (is_inode_flag_set(inode, FI_NO_EXTENT))
__free_extent_tree(sbi, et); __free_extent_tree(sbi, et);
write_unlock(&et->lock); write_unlock(&et->lock);
...@@ -627,6 +631,19 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode) ...@@ -627,6 +631,19 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
return node_cnt; return node_cnt;
} }
void f2fs_drop_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et = F2FS_I(inode)->extent_tree;
set_inode_flag(inode, FI_NO_EXTENT);
write_lock(&et->lock);
__free_extent_tree(sbi, et);
__drop_largest_extent(inode, 0, UINT_MAX);
write_unlock(&et->lock);
}
void f2fs_destroy_extent_tree(struct inode *inode) void f2fs_destroy_extent_tree(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
...@@ -685,9 +702,7 @@ void f2fs_update_extent_cache(struct dnode_of_data *dn) ...@@ -685,9 +702,7 @@ void f2fs_update_extent_cache(struct dnode_of_data *dn)
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node; dn->ofs_in_node;
f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1))
sync_inode_page(dn);
} }
void f2fs_update_extent_cache_range(struct dnode_of_data *dn, void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
...@@ -697,8 +712,7 @@ void f2fs_update_extent_cache_range(struct dnode_of_data *dn, ...@@ -697,8 +712,7 @@ void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
if (!f2fs_may_extent_tree(dn->inode)) if (!f2fs_may_extent_tree(dn->inode))
return; return;
if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len)) f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
sync_inode_page(dn);
} }
void init_extent_cache_info(struct f2fs_sb_info *sbi) void init_extent_cache_info(struct f2fs_sb_info *sbi)
......
This diff is collapsed.
This diff is collapsed.
...@@ -594,11 +594,11 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) ...@@ -594,11 +594,11 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
/* write page */ /* write page */
lock_page(fio.encrypted_page); lock_page(fio.encrypted_page);
if (unlikely(!PageUptodate(fio.encrypted_page))) { if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
err = -EIO; err = -EIO;
goto put_page_out; goto put_page_out;
} }
if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { if (unlikely(!PageUptodate(fio.encrypted_page))) {
err = -EIO; err = -EIO;
goto put_page_out; goto put_page_out;
} }
...@@ -619,9 +619,9 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) ...@@ -619,9 +619,9 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
f2fs_submit_page_mbio(&fio); f2fs_submit_page_mbio(&fio);
f2fs_update_data_blkaddr(&dn, newaddr); f2fs_update_data_blkaddr(&dn, newaddr);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0) if (page->index == 0)
set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
put_page_out: put_page_out:
f2fs_put_page(fio.encrypted_page, 1); f2fs_put_page(fio.encrypted_page, 1);
recover_block: recover_block:
...@@ -656,12 +656,23 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type) ...@@ -656,12 +656,23 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
.page = page, .page = page,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };
bool is_dirty = PageDirty(page);
int err;
retry:
set_page_dirty(page); set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA, true); f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page)) if (clear_page_dirty_for_io(page))
inode_dec_dirty_pages(inode); inode_dec_dirty_pages(inode);
set_cold_data(page); set_cold_data(page);
do_write_data_page(&fio);
err = do_write_data_page(&fio);
if (err == -ENOMEM && is_dirty) {
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
clear_cold_data(page); clear_cold_data(page);
} }
out: out:
...@@ -748,12 +759,32 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, ...@@ -748,12 +759,32 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
/* phase 3 */ /* phase 3 */
inode = find_gc_inode(gc_list, dni.ino); inode = find_gc_inode(gc_list, dni.ino);
if (inode) { if (inode) {
struct f2fs_inode_info *fi = F2FS_I(inode);
bool locked = false;
if (S_ISREG(inode->i_mode)) {
if (!down_write_trylock(&fi->dio_rwsem[READ]))
continue;
if (!down_write_trylock(
&fi->dio_rwsem[WRITE])) {
up_write(&fi->dio_rwsem[READ]);
continue;
}
locked = true;
}
start_bidx = start_bidx_of_node(nofs, inode) start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node; + ofs_in_node;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
move_encrypted_block(inode, start_bidx); move_encrypted_block(inode, start_bidx);
else else
move_data_page(inode, start_bidx, gc_type); move_data_page(inode, start_bidx, gc_type);
if (locked) {
up_write(&fi->dio_rwsem[WRITE]);
up_write(&fi->dio_rwsem[READ]);
}
stat_inc_data_blk_count(sbi, 1, gc_type); stat_inc_data_blk_count(sbi, 1, gc_type);
} }
} }
...@@ -802,6 +833,10 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -802,6 +833,10 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
blk_start_plug(&plug); blk_start_plug(&plug);
for (segno = start_segno; segno < end_segno; segno++) { for (segno = start_segno; segno < end_segno; segno++) {
if (get_valid_blocks(sbi, segno, 1) == 0)
continue;
/* find segment summary of victim */ /* find segment summary of victim */
sum_page = find_get_page(META_MAPPING(sbi), sum_page = find_get_page(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno)); GET_SUM_BLOCK(sbi, segno));
...@@ -877,10 +912,13 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) ...@@ -877,10 +912,13 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
* enough free sections, we should flush dent/node blocks and do * enough free sections, we should flush dent/node blocks and do
* garbage collections. * garbage collections.
*/ */
if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi)) if (__get_victim(sbi, &segno, gc_type) ||
prefree_segments(sbi)) {
write_checkpoint(sbi, &cpc); write_checkpoint(sbi, &cpc);
else if (has_not_enough_free_secs(sbi, 0)) segno = NULL_SEGNO;
} else if (has_not_enough_free_secs(sbi, 0)) {
write_checkpoint(sbi, &cpc); write_checkpoint(sbi, &cpc);
}
} }
if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type)) if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
......
...@@ -59,7 +59,8 @@ void read_inline_data(struct page *page, struct page *ipage) ...@@ -59,7 +59,8 @@ void read_inline_data(struct page *page, struct page *ipage)
memcpy(dst_addr, src_addr, MAX_INLINE_DATA); memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(dst_addr); kunmap_atomic(dst_addr);
SetPageUptodate(page); if (!PageUptodate(page))
SetPageUptodate(page);
} }
bool truncate_inline_inode(struct page *ipage, u64 from) bool truncate_inline_inode(struct page *ipage, u64 from)
...@@ -73,7 +74,7 @@ bool truncate_inline_inode(struct page *ipage, u64 from) ...@@ -73,7 +74,7 @@ bool truncate_inline_inode(struct page *ipage, u64 from)
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true);
memset(addr + from, 0, MAX_INLINE_DATA - from); memset(addr + from, 0, MAX_INLINE_DATA - from);
set_page_dirty(ipage);
return true; return true;
} }
...@@ -97,7 +98,8 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page) ...@@ -97,7 +98,8 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
else else
read_inline_data(page, ipage); read_inline_data(page, ipage);
SetPageUptodate(page); if (!PageUptodate(page))
SetPageUptodate(page);
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
unlock_page(page); unlock_page(page);
return 0; return 0;
...@@ -139,7 +141,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) ...@@ -139,7 +141,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
inode_dec_dirty_pages(dn->inode); inode_dec_dirty_pages(dn->inode);
/* this converted inline_data should be recovered. */ /* this converted inline_data should be recovered. */
set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */ /* clear inline data and flag after data writeback */
truncate_inline_inode(dn->inode_page, 0); truncate_inline_inode(dn->inode_page, 0);
...@@ -147,7 +149,6 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) ...@@ -147,7 +149,6 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
clear_out: clear_out:
stat_dec_inline_inode(dn->inode); stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode); f2fs_clear_inline_inode(dn->inode);
sync_inode_page(dn);
f2fs_put_dnode(dn); f2fs_put_dnode(dn);
return 0; return 0;
} }
...@@ -213,11 +214,11 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) ...@@ -213,11 +214,11 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
dst_addr = inline_data_addr(dn.inode_page); dst_addr = inline_data_addr(dn.inode_page);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA); memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
kunmap_atomic(src_addr); kunmap_atomic(src_addr);
set_page_dirty(dn.inode_page);
set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); set_inode_flag(inode, FI_APPEND_WRITE);
set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); set_inode_flag(inode, FI_DATA_EXIST);
sync_inode_page(&dn);
clear_inline_node(dn.inode_page); clear_inline_node(dn.inode_page);
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
return 0; return 0;
...@@ -253,10 +254,10 @@ bool recover_inline_data(struct inode *inode, struct page *npage) ...@@ -253,10 +254,10 @@ bool recover_inline_data(struct inode *inode, struct page *npage)
dst_addr = inline_data_addr(ipage); dst_addr = inline_data_addr(ipage);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA); memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); set_inode_flag(inode, FI_INLINE_DATA);
set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); set_inode_flag(inode, FI_DATA_EXIST);
update_inode(inode, ipage); set_page_dirty(ipage);
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
return true; return true;
} }
...@@ -267,7 +268,6 @@ bool recover_inline_data(struct inode *inode, struct page *npage) ...@@ -267,7 +268,6 @@ bool recover_inline_data(struct inode *inode, struct page *npage)
if (!truncate_inline_inode(ipage, 0)) if (!truncate_inline_inode(ipage, 0))
return false; return false;
f2fs_clear_inline_inode(inode); f2fs_clear_inline_inode(inode);
update_inode(inode, ipage);
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
if (truncate_blocks(inode, 0, false)) if (truncate_blocks(inode, 0, false))
...@@ -289,8 +289,10 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, ...@@ -289,8 +289,10 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
f2fs_hash_t namehash; f2fs_hash_t namehash;
ipage = get_node_page(sbi, dir->i_ino); ipage = get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage)) if (IS_ERR(ipage)) {
*res_page = ipage;
return NULL; return NULL;
}
namehash = f2fs_dentry_hash(&name); namehash = f2fs_dentry_hash(&name);
...@@ -307,25 +309,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, ...@@ -307,25 +309,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
return de; return de;
} }
struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
struct page **p)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
struct f2fs_dir_entry *de;
struct f2fs_inline_dentry *dentry_blk;
ipage = get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage))
return NULL;
dentry_blk = inline_data_addr(ipage);
de = &dentry_blk->dentry[1];
*p = ipage;
unlock_page(ipage);
return de;
}
int make_empty_inline_dir(struct inode *inode, struct inode *parent, int make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage) struct page *ipage)
{ {
...@@ -340,10 +323,8 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent, ...@@ -340,10 +323,8 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
set_page_dirty(ipage); set_page_dirty(ipage);
/* update i_size to MAX_INLINE_DATA */ /* update i_size to MAX_INLINE_DATA */
if (i_size_read(inode) < MAX_INLINE_DATA) { if (i_size_read(inode) < MAX_INLINE_DATA)
i_size_write(inode, MAX_INLINE_DATA); f2fs_i_size_write(inode, MAX_INLINE_DATA);
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
return 0; return 0;
} }
...@@ -392,22 +373,19 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, ...@@ -392,22 +373,19 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
NR_INLINE_DENTRY * F2FS_SLOT_LEN); NR_INLINE_DENTRY * F2FS_SLOT_LEN);
kunmap_atomic(dentry_blk); kunmap_atomic(dentry_blk);
SetPageUptodate(page); if (!PageUptodate(page))
SetPageUptodate(page);
set_page_dirty(page); set_page_dirty(page);
/* clear inline dir and flag after data writeback */ /* clear inline dir and flag after data writeback */
truncate_inline_inode(ipage, 0); truncate_inline_inode(ipage, 0);
stat_dec_inline_dir(dir); stat_dec_inline_dir(dir);
clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); clear_inode_flag(dir, FI_INLINE_DENTRY);
F2FS_I(dir)->i_current_depth = 1; f2fs_i_depth_write(dir, 1);
if (i_size_read(dir) < PAGE_SIZE) { if (i_size_read(dir) < PAGE_SIZE)
i_size_write(dir, PAGE_SIZE); f2fs_i_size_write(dir, PAGE_SIZE);
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
sync_inode_page(&dn);
out: out:
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
return err; return err;
...@@ -465,7 +443,6 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, ...@@ -465,7 +443,6 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry) struct f2fs_inline_dentry *inline_dentry)
{ {
struct f2fs_inline_dentry *backup_dentry; struct f2fs_inline_dentry *backup_dentry;
struct f2fs_inode_info *fi = F2FS_I(dir);
int err; int err;
backup_dentry = f2fs_kmalloc(sizeof(struct f2fs_inline_dentry), backup_dentry = f2fs_kmalloc(sizeof(struct f2fs_inline_dentry),
...@@ -487,16 +464,15 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, ...@@ -487,16 +464,15 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
lock_page(ipage); lock_page(ipage);
stat_dec_inline_dir(dir); stat_dec_inline_dir(dir);
clear_inode_flag(fi, FI_INLINE_DENTRY); clear_inode_flag(dir, FI_INLINE_DENTRY);
update_inode(dir, ipage);
kfree(backup_dentry); kfree(backup_dentry);
return 0; return 0;
recover: recover:
lock_page(ipage); lock_page(ipage);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA); memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
fi->i_current_depth = 0; f2fs_i_depth_write(dir, 0);
i_size_write(dir, MAX_INLINE_DATA); f2fs_i_size_write(dir, MAX_INLINE_DATA);
update_inode(dir, ipage); set_page_dirty(ipage);
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
kfree(backup_dentry); kfree(backup_dentry);
...@@ -560,8 +536,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, ...@@ -560,8 +536,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
/* we don't need to mark_inode_dirty now */ /* we don't need to mark_inode_dirty now */
if (inode) { if (inode) {
F2FS_I(inode)->i_pino = dir->i_ino; f2fs_i_pino_write(inode, dir->i_ino);
update_inode(inode, page);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
} }
...@@ -569,11 +544,6 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, ...@@ -569,11 +544,6 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
fail: fail:
if (inode) if (inode)
up_write(&F2FS_I(inode)->i_sem); up_write(&F2FS_I(inode)->i_sem);
if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
update_inode(dir, ipage);
clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
}
out: out:
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
return err; return err;
...@@ -597,13 +567,13 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, ...@@ -597,13 +567,13 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
&inline_dentry->dentry_bitmap); &inline_dentry->dentry_bitmap);
set_page_dirty(page); set_page_dirty(page);
f2fs_put_page(page, 1);
dir->i_ctime = dir->i_mtime = CURRENT_TIME; dir->i_ctime = dir->i_mtime = CURRENT_TIME;
f2fs_mark_inode_dirty_sync(dir);
if (inode) if (inode)
f2fs_drop_nlink(dir, inode, page); f2fs_drop_nlink(dir, inode);
f2fs_put_page(page, 1);
} }
bool f2fs_empty_inline_dir(struct inode *dir) bool f2fs_empty_inline_dir(struct inode *dir)
......
...@@ -18,6 +18,13 @@ ...@@ -18,6 +18,13 @@
#include <trace/events/f2fs.h> #include <trace/events/f2fs.h>
void f2fs_mark_inode_dirty_sync(struct inode *inode)
{
if (f2fs_inode_dirtied(inode))
return;
mark_inode_dirty_sync(inode);
}
void f2fs_set_inode_flags(struct inode *inode) void f2fs_set_inode_flags(struct inode *inode)
{ {
unsigned int flags = F2FS_I(inode)->i_flags; unsigned int flags = F2FS_I(inode)->i_flags;
...@@ -35,6 +42,7 @@ void f2fs_set_inode_flags(struct inode *inode) ...@@ -35,6 +42,7 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_DIRSYNC; new_fl |= S_DIRSYNC;
inode_set_flags(inode, new_fl, inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
f2fs_mark_inode_dirty_sync(inode);
} }
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
...@@ -85,8 +93,8 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage) ...@@ -85,8 +93,8 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
if (*start++) { if (*start++) {
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true);
set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); set_inode_flag(inode, FI_DATA_EXIST);
set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage)); set_raw_inline(inode, F2FS_INODE(ipage));
set_page_dirty(ipage); set_page_dirty(ipage);
return; return;
} }
...@@ -141,7 +149,7 @@ static int do_read_inode(struct inode *inode) ...@@ -141,7 +149,7 @@ static int do_read_inode(struct inode *inode)
if (f2fs_init_extent_tree(inode, &ri->i_ext)) if (f2fs_init_extent_tree(inode, &ri->i_ext))
set_page_dirty(node_page); set_page_dirty(node_page);
get_inline_info(fi, ri); get_inline_info(inode, ri);
/* check data exist */ /* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
...@@ -151,7 +159,10 @@ static int do_read_inode(struct inode *inode) ...@@ -151,7 +159,10 @@ static int do_read_inode(struct inode *inode)
__get_inode_rdev(inode, ri); __get_inode_rdev(inode, ri);
if (__written_first_block(ri)) if (__written_first_block(ri))
set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
if (!need_inode_block_update(sbi, inode->i_ino))
fi->last_disk_size = inode->i_size;
f2fs_put_page(node_page, 1); f2fs_put_page(node_page, 1);
...@@ -227,6 +238,8 @@ int update_inode(struct inode *inode, struct page *node_page) ...@@ -227,6 +238,8 @@ int update_inode(struct inode *inode, struct page *node_page)
{ {
struct f2fs_inode *ri; struct f2fs_inode *ri;
f2fs_inode_synced(inode);
f2fs_wait_on_page_writeback(node_page, NODE, true); f2fs_wait_on_page_writeback(node_page, NODE, true);
ri = F2FS_INODE(node_page); ri = F2FS_INODE(node_page);
...@@ -244,7 +257,7 @@ int update_inode(struct inode *inode, struct page *node_page) ...@@ -244,7 +257,7 @@ int update_inode(struct inode *inode, struct page *node_page)
&ri->i_ext); &ri->i_ext);
else else
memset(&ri->i_ext, 0, sizeof(ri->i_ext)); memset(&ri->i_ext, 0, sizeof(ri->i_ext));
set_raw_inline(F2FS_I(inode), ri); set_raw_inline(inode, ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
...@@ -261,7 +274,6 @@ int update_inode(struct inode *inode, struct page *node_page) ...@@ -261,7 +274,6 @@ int update_inode(struct inode *inode, struct page *node_page)
__set_inode_rdev(inode, ri); __set_inode_rdev(inode, ri);
set_cold_node(inode, node_page); set_cold_node(inode, node_page);
clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
/* deleted inode */ /* deleted inode */
if (inode->i_nlink == 0) if (inode->i_nlink == 0)
...@@ -285,6 +297,7 @@ int update_inode_page(struct inode *inode) ...@@ -285,6 +297,7 @@ int update_inode_page(struct inode *inode)
} else if (err != -ENOENT) { } else if (err != -ENOENT) {
f2fs_stop_checkpoint(sbi, false); f2fs_stop_checkpoint(sbi, false);
} }
f2fs_inode_synced(inode);
return 0; return 0;
} }
ret = update_inode(inode, node_page); ret = update_inode(inode, node_page);
...@@ -300,7 +313,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -300,7 +313,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi)) inode->i_ino == F2FS_META_INO(sbi))
return 0; return 0;
if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE)) if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0; return 0;
/* /*
...@@ -318,8 +331,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -318,8 +331,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
void f2fs_evict_inode(struct inode *inode) void f2fs_evict_inode(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid;
nid_t xnid = fi->i_xattr_nid;
int err = 0; int err = 0;
/* some remained atomic pages should discarded */ /* some remained atomic pages should discarded */
...@@ -341,12 +353,17 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -341,12 +353,17 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode)) if (inode->i_nlink || is_bad_inode(inode))
goto no_delete; goto no_delete;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_EVICT_INODE))
goto no_delete;
#endif
sb_start_intwrite(inode->i_sb); sb_start_intwrite(inode->i_sb);
set_inode_flag(fi, FI_NO_ALLOC); set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0); i_size_write(inode, 0);
retry: retry:
if (F2FS_HAS_BLOCKS(inode)) if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode, true); err = f2fs_truncate(inode);
if (!err) { if (!err) {
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
...@@ -360,6 +377,8 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -360,6 +377,8 @@ void f2fs_evict_inode(struct inode *inode)
goto retry; goto retry;
} }
if (err)
update_inode_page(inode);
sb_end_intwrite(inode->i_sb); sb_end_intwrite(inode->i_sb);
no_delete: no_delete:
stat_dec_inline_xattr(inode); stat_dec_inline_xattr(inode);
...@@ -369,13 +388,13 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -369,13 +388,13 @@ void f2fs_evict_inode(struct inode *inode)
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
if (xnid) if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
if (is_inode_flag_set(fi, FI_APPEND_WRITE)) if (is_inode_flag_set(inode, FI_APPEND_WRITE))
add_ino_entry(sbi, inode->i_ino, APPEND_INO); add_ino_entry(sbi, inode->i_ino, APPEND_INO);
if (is_inode_flag_set(fi, FI_UPDATE_WRITE)) if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
add_ino_entry(sbi, inode->i_ino, UPDATE_INO); add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
if (is_inode_flag_set(fi, FI_FREE_NID)) { if (is_inode_flag_set(inode, FI_FREE_NID)) {
alloc_nid_failed(sbi, inode->i_ino); alloc_nid_failed(sbi, inode->i_ino);
clear_inode_flag(fi, FI_FREE_NID); clear_inode_flag(inode, FI_FREE_NID);
} }
f2fs_bug_on(sbi, err && f2fs_bug_on(sbi, err &&
!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)); !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
...@@ -407,11 +426,11 @@ void handle_failed_inode(struct inode *inode) ...@@ -407,11 +426,11 @@ void handle_failed_inode(struct inode *inode)
f2fs_msg(sbi->sb, KERN_WARNING, f2fs_msg(sbi->sb, KERN_WARNING,
"Too many orphan inodes, run fsck to fix."); "Too many orphan inodes, run fsck to fix.");
} else { } else {
add_orphan_inode(sbi, inode->i_ino); add_orphan_inode(inode);
} }
alloc_nid_done(sbi, inode->i_ino); alloc_nid_done(sbi, inode->i_ino);
} else { } else {
set_inode_flag(F2FS_I(inode), FI_FREE_NID); set_inode_flag(inode, FI_FREE_NID);
} }
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
......
This diff is collapsed.
This diff is collapsed.
...@@ -15,18 +15,21 @@ ...@@ -15,18 +15,21 @@
#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK) #define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
/* # of pages to perform synchronous readahead before building free nids */ /* # of pages to perform synchronous readahead before building free nids */
#define FREE_NID_PAGES 4 #define FREE_NID_PAGES 8
#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */ #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */ /* maximum readahead size for node during getting data blocks */
#define MAX_RA_NODE 128 #define MAX_RA_NODE 128
/* control the memory footprint threshold (10MB per 1GB ram) */ /* control the memory footprint threshold (10MB per 1GB ram) */
#define DEF_RAM_THRESHOLD 10 #define DEF_RAM_THRESHOLD 1
/* control dirty nats ratio threshold (default: 10% over max nid count) */ /* control dirty nats ratio threshold (default: 10% over max nid count) */
#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10 #define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
/* control total # of nats */
#define DEF_NAT_CACHE_THRESHOLD 100000
/* vector size for gang look-up from nat cache that consists of radix tree */ /* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64 #define NATVEC_SIZE 64
...@@ -126,6 +129,11 @@ static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi) ...@@ -126,6 +129,11 @@ static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
NM_I(sbi)->dirty_nats_ratio / 100; NM_I(sbi)->dirty_nats_ratio / 100;
} }
static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
{
return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
}
enum mem_type { enum mem_type {
FREE_NIDS, /* indicates the free nid list */ FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */ NAT_ENTRIES, /* indicates the cached nat entry */
......
...@@ -153,9 +153,12 @@ static int recover_dentry(struct inode *inode, struct page *ipage, ...@@ -153,9 +153,12 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
f2fs_delete_entry(de, page, dir, einode); f2fs_delete_entry(de, page, dir, einode);
iput(einode); iput(einode);
goto retry; goto retry;
} else if (IS_ERR(page)) {
err = PTR_ERR(page);
} else {
err = __f2fs_add_link(dir, &name, inode,
inode->i_ino, inode->i_mode);
} }
err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
goto out; goto out;
out_unmap_put: out_unmap_put:
...@@ -175,7 +178,7 @@ static void recover_inode(struct inode *inode, struct page *page) ...@@ -175,7 +178,7 @@ static void recover_inode(struct inode *inode, struct page *page)
char *name; char *name;
inode->i_mode = le16_to_cpu(raw->i_mode); inode->i_mode = le16_to_cpu(raw->i_mode);
i_size_write(inode, le64_to_cpu(raw->i_size)); f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime); inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime); inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime); inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
...@@ -455,6 +458,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -455,6 +458,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
continue; continue;
} }
if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
/* /*
* dest is reserved block, invalidate src block * dest is reserved block, invalidate src block
* and then reserve one new block in dnode page. * and then reserve one new block in dnode page.
...@@ -476,6 +482,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -476,6 +482,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
#endif #endif
/* We should not get -ENOSPC */ /* We should not get -ENOSPC */
f2fs_bug_on(sbi, err); f2fs_bug_on(sbi, err);
if (err)
goto err;
} }
/* Check the previous node page having this index */ /* Check the previous node page having this index */
...@@ -490,9 +498,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -490,9 +498,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
} }
} }
if (IS_INODE(dn.node_page))
sync_inode_page(&dn);
copy_node_footer(dn.node_page, page); copy_node_footer(dn.node_page, page);
fill_node_footer(dn.node_page, dn.nid, ni.ino, fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false); ofs_of_node(page), false);
...@@ -624,8 +629,12 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) ...@@ -624,8 +629,12 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
if (err) { if (err) {
bool invalidate = false; bool invalidate = false;
if (discard_next_dnode(sbi, blkaddr)) if (test_opt(sbi, LFS)) {
update_meta_page(sbi, NULL, blkaddr);
invalidate = true;
} else if (discard_next_dnode(sbi, blkaddr)) {
invalidate = true; invalidate = true;
}
/* Flush all the NAT/SIT pages */ /* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) while (get_pages(sbi, F2FS_DIRTY_META))
......
...@@ -241,7 +241,7 @@ void drop_inmem_pages(struct inode *inode) ...@@ -241,7 +241,7 @@ void drop_inmem_pages(struct inode *inode)
{ {
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); clear_inode_flag(inode, FI_ATOMIC_FILE);
mutex_lock(&fi->inmem_lock); mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
...@@ -346,6 +346,11 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) ...@@ -346,6 +346,11 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{ {
if (!need) if (!need)
return; return;
/* balance_fs_bg is able to be pending */
if (excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi);
/* /*
* We should do GC or end up with checkpoint, if there are so many dirty * We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments. * dir/node pages without enough free segments.
...@@ -367,7 +372,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) ...@@ -367,7 +372,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS)) if (!available_free_memory(sbi, FREE_NIDS))
try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES); try_to_free_nids(sbi, MAX_FREE_NIDS);
else
build_free_nids(sbi);
/* checkpoint is the only way to shrink partial cached entries */ /* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) || if (!available_free_memory(sbi, NAT_ENTRIES) ||
...@@ -435,25 +442,29 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi) ...@@ -435,25 +442,29 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
if (test_opt(sbi, NOBARRIER)) if (test_opt(sbi, NOBARRIER))
return 0; return 0;
if (!test_opt(sbi, FLUSH_MERGE)) { if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
struct bio *bio = f2fs_bio_alloc(0); struct bio *bio = f2fs_bio_alloc(0);
int ret; int ret;
atomic_inc(&fcc->submit_flush);
bio->bi_bdev = sbi->sb->s_bdev; bio->bi_bdev = sbi->sb->s_bdev;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
atomic_dec(&fcc->submit_flush);
bio_put(bio); bio_put(bio);
return ret; return ret;
} }
init_completion(&cmd.wait); init_completion(&cmd.wait);
atomic_inc(&fcc->submit_flush);
llist_add(&cmd.llnode, &fcc->issue_list); llist_add(&cmd.llnode, &fcc->issue_list);
if (!fcc->dispatch_list) if (!fcc->dispatch_list)
wake_up(&fcc->flush_wait_queue); wake_up(&fcc->flush_wait_queue);
wait_for_completion(&cmd.wait); wait_for_completion(&cmd.wait);
atomic_dec(&fcc->submit_flush);
return cmd.ret; return cmd.ret;
} }
...@@ -467,6 +478,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi) ...@@ -467,6 +478,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc) if (!fcc)
return -ENOMEM; return -ENOMEM;
atomic_set(&fcc->submit_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue); init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list); init_llist_head(&fcc->issue_list);
SM_I(sbi)->cmd_control_info = fcc; SM_I(sbi)->cmd_control_info = fcc;
...@@ -668,6 +680,10 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -668,6 +680,10 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
break; break;
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
if (force && start && end != max_blocks
&& (end - start) < cpc->trim_minlen)
continue;
__add_discard_entry(sbi, cpc, se, start, end); __add_discard_entry(sbi, cpc, se, start, end);
} }
} }
...@@ -705,6 +721,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -705,6 +721,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1; unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason == CP_DISCARD);
mutex_lock(&dirty_i->seglist_lock); mutex_lock(&dirty_i->seglist_lock);
...@@ -721,17 +739,31 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -721,17 +739,31 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
dirty_i->nr_dirty[PRE] -= end - start; dirty_i->nr_dirty[PRE] -= end - start;
if (!test_opt(sbi, DISCARD)) if (force || !test_opt(sbi, DISCARD))
continue; continue;
f2fs_issue_discard(sbi, START_BLOCK(sbi, start), if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg); (end - start) << sbi->log_blocks_per_seg);
continue;
}
next:
secno = GET_SECNO(sbi, start);
start_segno = secno * sbi->segs_per_sec;
if (!IS_CURSEC(sbi, secno) &&
!get_valid_blocks(sbi, start, sbi->segs_per_sec))
f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
sbi->segs_per_sec << sbi->log_blocks_per_seg);
start = start_segno + sbi->segs_per_sec;
if (start < end)
goto next;
} }
mutex_unlock(&dirty_i->seglist_lock); mutex_unlock(&dirty_i->seglist_lock);
/* send small discards */ /* send small discards */
list_for_each_entry_safe(entry, this, head, list) { list_for_each_entry_safe(entry, this, head, list) {
if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen) if (force && entry->len < cpc->trim_minlen)
goto skip; goto skip;
f2fs_issue_discard(sbi, entry->blkaddr, entry->len); f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
cpc->trimmed += entry->len; cpc->trimmed += entry->len;
...@@ -1219,6 +1251,9 @@ void allocate_new_segments(struct f2fs_sb_info *sbi) ...@@ -1219,6 +1251,9 @@ void allocate_new_segments(struct f2fs_sb_info *sbi)
{ {
int i; int i;
if (test_opt(sbi, LFS))
return;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
__allocate_new_segments(sbi, i); __allocate_new_segments(sbi, i);
} }
...@@ -1392,11 +1427,17 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) ...@@ -1392,11 +1427,17 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{ {
int type = __get_segment_type(fio->page, fio->type); int type = __get_segment_type(fio->page, fio->type);
if (fio->type == NODE || fio->type == DATA)
mutex_lock(&fio->sbi->wio_mutex[fio->type]);
allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type); &fio->new_blkaddr, sum, type);
/* writeout dirty page into bdev */ /* writeout dirty page into bdev */
f2fs_submit_page_mbio(fio); f2fs_submit_page_mbio(fio);
if (fio->type == NODE || fio->type == DATA)
mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
} }
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
...@@ -2377,7 +2418,11 @@ int build_segment_manager(struct f2fs_sb_info *sbi) ...@@ -2377,7 +2418,11 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments * sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100; DEF_RECLAIM_PREFREE_SEGMENTS / 100;
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
if (!test_opt(sbi, LFS))
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#define NULL_SECNO ((unsigned int)(~0)) #define NULL_SECNO ((unsigned int)(~0))
#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
/* L: Logical segment # in volume, R: Relative segment # in main area */ /* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno) #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
...@@ -470,6 +471,10 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi) ...@@ -470,6 +471,10 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
{ {
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
if (test_opt(sbi, LFS))
return false;
return free_sections(sbi) <= (node_secs + 2 * dent_secs + return free_sections(sbi) <= (node_secs + 2 * dent_secs +
reserved_sections(sbi) + 1); reserved_sections(sbi) + 1);
} }
...@@ -479,6 +484,8 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) ...@@ -479,6 +484,8 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
node_secs += get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false; return false;
...@@ -531,6 +538,9 @@ static inline bool need_inplace_update(struct inode *inode) ...@@ -531,6 +538,9 @@ static inline bool need_inplace_update(struct inode *inode)
if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode)) if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
return false; return false;
if (test_opt(sbi, LFS))
return false;
if (policy & (0x1 << F2FS_IPU_FORCE)) if (policy & (0x1 << F2FS_IPU_FORCE))
return true; return true;
if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi)) if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
...@@ -544,7 +554,7 @@ static inline bool need_inplace_update(struct inode *inode) ...@@ -544,7 +554,7 @@ static inline bool need_inplace_update(struct inode *inode)
/* this is only set during fdatasync */ /* this is only set during fdatasync */
if (policy & (0x1 << F2FS_IPU_FSYNC) && if (policy & (0x1 << F2FS_IPU_FSYNC) &&
is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU)) is_inode_flag_set(inode, FI_NEED_IPU))
return true; return true;
return false; return false;
...@@ -706,9 +716,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) ...@@ -706,9 +716,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
if (type == DATA) if (type == DATA)
return sbi->blocks_per_seg; return sbi->blocks_per_seg;
else if (type == NODE) else if (type == NODE)
return 3 * sbi->blocks_per_seg; return 8 * sbi->blocks_per_seg;
else if (type == META) else if (type == META)
return MAX_BIO_BLOCKS(sbi); return 8 * MAX_BIO_BLOCKS(sbi);
else else
return 0; return 0;
} }
...@@ -726,10 +736,8 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, ...@@ -726,10 +736,8 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
nr_to_write = wbc->nr_to_write; nr_to_write = wbc->nr_to_write;
if (type == DATA) if (type == NODE)
desired = 4096; desired = 2 * max_hw_blocks(sbi);
else if (type == NODE)
desired = 3 * max_hw_blocks(sbi);
else else
desired = MAX_BIO_BLOCKS(sbi); desired = MAX_BIO_BLOCKS(sbi);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/f2fs_fs.h> #include <linux/f2fs_fs.h>
#include "f2fs.h" #include "f2fs.h"
#include "node.h"
static LIST_HEAD(f2fs_list); static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock); static DEFINE_SPINLOCK(f2fs_list_lock);
...@@ -25,8 +26,8 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) ...@@ -25,8 +26,8 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{ {
if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK) if (NM_I(sbi)->fcnt > MAX_FREE_NIDS)
return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK; return NM_I(sbi)->fcnt - MAX_FREE_NIDS;
return 0; return 0;
} }
......
...@@ -49,6 +49,7 @@ char *fault_name[FAULT_MAX] = { ...@@ -49,6 +49,7 @@ char *fault_name[FAULT_MAX] = {
[FAULT_ORPHAN] = "orphan", [FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block", [FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth", [FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
}; };
static void f2fs_build_fault_attr(unsigned int rate) static void f2fs_build_fault_attr(unsigned int rate)
...@@ -75,6 +76,7 @@ enum { ...@@ -75,6 +76,7 @@ enum {
Opt_disable_roll_forward, Opt_disable_roll_forward,
Opt_norecovery, Opt_norecovery,
Opt_discard, Opt_discard,
Opt_nodiscard,
Opt_noheap, Opt_noheap,
Opt_user_xattr, Opt_user_xattr,
Opt_nouser_xattr, Opt_nouser_xattr,
...@@ -86,13 +88,17 @@ enum { ...@@ -86,13 +88,17 @@ enum {
Opt_inline_data, Opt_inline_data,
Opt_inline_dentry, Opt_inline_dentry,
Opt_flush_merge, Opt_flush_merge,
Opt_noflush_merge,
Opt_nobarrier, Opt_nobarrier,
Opt_fastboot, Opt_fastboot,
Opt_extent_cache, Opt_extent_cache,
Opt_noextent_cache, Opt_noextent_cache,
Opt_noinline_data, Opt_noinline_data,
Opt_data_flush, Opt_data_flush,
Opt_mode,
Opt_fault_injection, Opt_fault_injection,
Opt_lazytime,
Opt_nolazytime,
Opt_err, Opt_err,
}; };
...@@ -101,6 +107,7 @@ static match_table_t f2fs_tokens = { ...@@ -101,6 +107,7 @@ static match_table_t f2fs_tokens = {
{Opt_disable_roll_forward, "disable_roll_forward"}, {Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_norecovery, "norecovery"}, {Opt_norecovery, "norecovery"},
{Opt_discard, "discard"}, {Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"}, {Opt_noheap, "no_heap"},
{Opt_user_xattr, "user_xattr"}, {Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"}, {Opt_nouser_xattr, "nouser_xattr"},
...@@ -112,13 +119,17 @@ static match_table_t f2fs_tokens = { ...@@ -112,13 +119,17 @@ static match_table_t f2fs_tokens = {
{Opt_inline_data, "inline_data"}, {Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"}, {Opt_inline_dentry, "inline_dentry"},
{Opt_flush_merge, "flush_merge"}, {Opt_flush_merge, "flush_merge"},
{Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"}, {Opt_nobarrier, "nobarrier"},
{Opt_fastboot, "fastboot"}, {Opt_fastboot, "fastboot"},
{Opt_extent_cache, "extent_cache"}, {Opt_extent_cache, "extent_cache"},
{Opt_noextent_cache, "noextent_cache"}, {Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"}, {Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"}, {Opt_data_flush, "data_flush"},
{Opt_mode, "mode=%s"},
{Opt_fault_injection, "fault_injection=%u"}, {Opt_fault_injection, "fault_injection=%u"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
{Opt_err, NULL}, {Opt_err, NULL},
}; };
...@@ -417,6 +428,8 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -417,6 +428,8 @@ static int parse_options(struct super_block *sb, char *options)
"the device does not support discard"); "the device does not support discard");
} }
break; break;
case Opt_nodiscard:
clear_opt(sbi, DISCARD);
case Opt_noheap: case Opt_noheap:
set_opt(sbi, NOHEAP); set_opt(sbi, NOHEAP);
break; break;
...@@ -478,6 +491,9 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -478,6 +491,9 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_flush_merge: case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE); set_opt(sbi, FLUSH_MERGE);
break; break;
case Opt_noflush_merge:
clear_opt(sbi, FLUSH_MERGE);
break;
case Opt_nobarrier: case Opt_nobarrier:
set_opt(sbi, NOBARRIER); set_opt(sbi, NOBARRIER);
break; break;
...@@ -496,6 +512,23 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -496,6 +512,23 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_data_flush: case Opt_data_flush:
set_opt(sbi, DATA_FLUSH); set_opt(sbi, DATA_FLUSH);
break; break;
case Opt_mode:
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
if (strlen(name) == 8 &&
!strncmp(name, "adaptive", 8)) {
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
} else if (strlen(name) == 3 &&
!strncmp(name, "lfs", 3)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
} else {
kfree(name);
return -EINVAL;
}
kfree(name);
break;
case Opt_fault_injection: case Opt_fault_injection:
if (args->from && match_int(args, &arg)) if (args->from && match_int(args, &arg))
return -EINVAL; return -EINVAL;
...@@ -506,6 +539,12 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -506,6 +539,12 @@ static int parse_options(struct super_block *sb, char *options)
"FAULT_INJECTION was not selected"); "FAULT_INJECTION was not selected");
#endif #endif
break; break;
case Opt_lazytime:
sb->s_flags |= MS_LAZYTIME;
break;
case Opt_nolazytime:
sb->s_flags &= ~MS_LAZYTIME;
break;
default: default:
f2fs_msg(sb, KERN_ERR, f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value", "Unrecognized mount option \"%s\" or missing value",
...@@ -537,13 +576,11 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) ...@@ -537,13 +576,11 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
fi->i_advise = 0; fi->i_advise = 0;
init_rwsem(&fi->i_sem); init_rwsem(&fi->i_sem);
INIT_LIST_HEAD(&fi->dirty_list); INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_pages); INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock); mutex_init(&fi->inmem_lock);
init_rwsem(&fi->dio_rwsem[READ]);
set_inode_flag(fi, FI_NEW_INODE); init_rwsem(&fi->dio_rwsem[WRITE]);
if (test_opt(F2FS_SB(sb), INLINE_XATTR))
set_inode_flag(fi, FI_INLINE_XATTR);
/* Will be used by directory only */ /* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level; fi->i_dir_level = F2FS_SB(sb)->dir_level;
...@@ -559,7 +596,7 @@ static int f2fs_drop_inode(struct inode *inode) ...@@ -559,7 +596,7 @@ static int f2fs_drop_inode(struct inode *inode)
* - f2fs_gc -> iput -> evict * - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode) * - inode_wait_for_writeback(inode)
*/ */
if (!inode_unhashed(inode) && inode->i_state & I_SYNC) { if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) { if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */ /* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count); atomic_inc(&inode->i_count);
...@@ -573,10 +610,10 @@ static int f2fs_drop_inode(struct inode *inode) ...@@ -573,10 +610,10 @@ static int f2fs_drop_inode(struct inode *inode)
f2fs_destroy_extent_node(inode); f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb); sb_start_intwrite(inode->i_sb);
i_size_write(inode, 0); f2fs_i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode)) if (F2FS_HAS_BLOCKS(inode))
f2fs_truncate(inode, true); f2fs_truncate(inode);
sb_end_intwrite(inode->i_sb); sb_end_intwrite(inode->i_sb);
...@@ -586,9 +623,47 @@ static int f2fs_drop_inode(struct inode *inode) ...@@ -586,9 +623,47 @@ static int f2fs_drop_inode(struct inode *inode)
} }
return 0; return 0;
} }
return generic_drop_inode(inode); return generic_drop_inode(inode);
} }
int f2fs_inode_dirtied(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return 1;
}
set_inode_flag(inode, FI_DIRTY_INODE);
list_add_tail(&F2FS_I(inode)->gdirty_list,
&sbi->inode_list[DIRTY_META]);
inc_page_count(sbi, F2FS_DIRTY_IMETA);
stat_inc_dirty_inode(sbi, DIRTY_META);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return 0;
}
void f2fs_inode_synced(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return;
}
list_del_init(&F2FS_I(inode)->gdirty_list);
clear_inode_flag(inode, FI_DIRTY_INODE);
clear_inode_flag(inode, FI_AUTO_RECOVER);
dec_page_count(sbi, F2FS_DIRTY_IMETA);
stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
}
/* /*
* f2fs_dirty_inode() is called from __mark_inode_dirty() * f2fs_dirty_inode() is called from __mark_inode_dirty()
* *
...@@ -596,7 +671,19 @@ static int f2fs_drop_inode(struct inode *inode) ...@@ -596,7 +671,19 @@ static int f2fs_drop_inode(struct inode *inode)
*/ */
static void f2fs_dirty_inode(struct inode *inode, int flags) static void f2fs_dirty_inode(struct inode *inode, int flags)
{ {
set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
inode->i_ino == F2FS_META_INO(sbi))
return;
if (flags == I_DIRTY_TIME)
return;
if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
clear_inode_flag(inode, FI_AUTO_RECOVER);
f2fs_inode_dirtied(inode);
} }
static void f2fs_i_callback(struct rcu_head *head) static void f2fs_i_callback(struct rcu_head *head)
...@@ -619,6 +706,8 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi) ...@@ -619,6 +706,8 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi)
percpu_counter_destroy(&sbi->nr_pages[i]); percpu_counter_destroy(&sbi->nr_pages[i]);
percpu_counter_destroy(&sbi->alloc_valid_block_count); percpu_counter_destroy(&sbi->alloc_valid_block_count);
percpu_counter_destroy(&sbi->total_valid_inode_count); percpu_counter_destroy(&sbi->total_valid_inode_count);
percpu_free_rwsem(&sbi->cp_rwsem);
} }
static void f2fs_put_super(struct super_block *sb) static void f2fs_put_super(struct super_block *sb)
...@@ -738,7 +827,7 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) ...@@ -738,7 +827,7 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bsize = sbi->blocksize; buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count; buf->f_blocks = total_count - start_count;
buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count; buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi); buf->f_bavail = user_block_count - valid_user_blocks(sbi);
buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
...@@ -803,6 +892,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) ...@@ -803,6 +892,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noextent_cache"); seq_puts(seq, ",noextent_cache");
if (test_opt(sbi, DATA_FLUSH)) if (test_opt(sbi, DATA_FLUSH))
seq_puts(seq, ",data_flush"); seq_puts(seq, ",data_flush");
seq_puts(seq, ",mode=");
if (test_opt(sbi, ADAPTIVE))
seq_puts(seq, "adaptive");
else if (test_opt(sbi, LFS))
seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs); seq_printf(seq, ",active_logs=%u", sbi->active_logs);
return 0; return 0;
...@@ -884,6 +979,14 @@ static void default_options(struct f2fs_sb_info *sbi) ...@@ -884,6 +979,14 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, BG_GC); set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DATA);
set_opt(sbi, EXTENT_CACHE); set_opt(sbi, EXTENT_CACHE);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
if (f2fs_sb_mounted_hmsmr(sbi->sb)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
set_opt(sbi, DISCARD);
} else {
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
}
#ifdef CONFIG_F2FS_FS_XATTR #ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER); set_opt(sbi, XATTR_USER);
...@@ -1367,6 +1470,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -1367,6 +1470,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->s_list); INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex); mutex_init(&sbi->umount_mutex);
mutex_init(&sbi->wio_mutex[NODE]);
mutex_init(&sbi->wio_mutex[DATA]);
#ifdef CONFIG_F2FS_FS_ENCRYPTION #ifdef CONFIG_F2FS_FS_ENCRYPTION
memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX, memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
...@@ -1379,6 +1484,9 @@ static int init_percpu_info(struct f2fs_sb_info *sbi) ...@@ -1379,6 +1484,9 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
{ {
int i, err; int i, err;
if (percpu_init_rwsem(&sbi->cp_rwsem))
return -ENOMEM;
for (i = 0; i < NR_COUNT_TYPE; i++) { for (i = 0; i < NR_COUNT_TYPE; i++) {
err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL); err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
if (err) if (err)
...@@ -1530,6 +1638,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1530,6 +1638,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi; goto free_sbi;
sb->s_fs_info = sbi; sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
default_options(sbi); default_options(sbi);
/* parse mount options */ /* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL); options = kstrdup((const char *)data, GFP_KERNEL);
...@@ -1559,10 +1669,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1559,10 +1669,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */ /* init f2fs-specific super block info */
sbi->raw_super = raw_super;
sbi->valid_super_block = valid_super_block; sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex); mutex_init(&sbi->gc_mutex);
mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex); mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write); init_rwsem(&sbi->node_write);
...@@ -1579,7 +1687,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1579,7 +1687,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
sbi->write_io[i].bio = NULL; sbi->write_io[i].bio = NULL;
} }
init_rwsem(&sbi->cp_rwsem);
init_waitqueue_head(&sbi->cp_wait); init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi); init_sb_info(sbi);
...@@ -1762,6 +1869,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1762,6 +1869,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
return 0; return 0;
free_kobj: free_kobj:
f2fs_sync_inode_meta(sbi);
kobject_del(&sbi->s_kobj); kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj); kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister); wait_for_completion(&sbi->s_kobj_unregister);
......
...@@ -106,7 +106,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler, ...@@ -106,7 +106,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
return -EINVAL; return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value; F2FS_I(inode)->i_advise |= *(char *)value;
mark_inode_dirty(inode); f2fs_mark_inode_dirty_sync(inode);
return 0; return 0;
} }
...@@ -299,6 +299,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize, ...@@ -299,6 +299,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
if (ipage) { if (ipage) {
inline_addr = inline_xattr_addr(ipage); inline_addr = inline_xattr_addr(ipage);
f2fs_wait_on_page_writeback(ipage, NODE, true); f2fs_wait_on_page_writeback(ipage, NODE, true);
set_page_dirty(ipage);
} else { } else {
page = get_node_page(sbi, inode->i_ino); page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) { if (IS_ERR(page)) {
...@@ -441,13 +442,12 @@ static int __f2fs_setxattr(struct inode *inode, int index, ...@@ -441,13 +442,12 @@ static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size, const char *name, const void *value, size_t size,
struct page *ipage, int flags) struct page *ipage, int flags)
{ {
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *here, *last; struct f2fs_xattr_entry *here, *last;
void *base_addr; void *base_addr;
int found, newsize; int found, newsize;
size_t len; size_t len;
__u32 new_hsize; __u32 new_hsize;
int error = -ENOMEM; int error = 0;
if (name == NULL) if (name == NULL)
return -EINVAL; return -EINVAL;
...@@ -465,7 +465,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, ...@@ -465,7 +465,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
base_addr = read_all_xattrs(inode, ipage); base_addr = read_all_xattrs(inode, ipage);
if (!base_addr) if (!base_addr)
goto exit; return -ENOMEM;
/* find entry with wanted name. */ /* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name); here = __find_xattr(base_addr, index, len, name);
...@@ -539,19 +539,15 @@ static int __f2fs_setxattr(struct inode *inode, int index, ...@@ -539,19 +539,15 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error) if (error)
goto exit; goto exit;
if (is_inode_flag_set(fi, FI_ACL_MODE)) { if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = fi->i_acl_mode; inode->i_mode = F2FS_I(inode)->i_acl_mode;
inode->i_ctime = CURRENT_TIME; inode->i_ctime = CURRENT_TIME;
clear_inode_flag(fi, FI_ACL_MODE); clear_inode_flag(inode, FI_ACL_MODE);
} }
if (index == F2FS_XATTR_INDEX_ENCRYPTION && if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT)) !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode); f2fs_set_encrypted_inode(inode);
f2fs_mark_inode_dirty_sync(inode);
if (ipage)
update_inode(inode, ipage);
else
update_inode_page(inode);
exit: exit:
kzfree(base_addr); kzfree(base_addr);
return error; return error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment