Commit c7d7b986 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-f2fs-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "Major changes are to:
   - add f2fs_io_tracer and F2FS_IOC_GETVERSION
   - fix wrong acl assignment from parent
   - fix accessing wrong data blocks
   - fix wrong condition check for f2fs_sync_fs
   - align start block address for direct_io
   - add and refactor the readahead flows of FS metadata
   - refactor atomic and volatile write policies

  But most of patches are for clean-ups and minor bug fixes.  Some of
  them refactor old code too"

* tag 'for-f2fs-3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (64 commits)
  f2fs: use spinlock for segmap_lock instead of rwlock
  f2fs: fix accessing wrong indexed data blocks
  f2fs: avoid variable length array
  f2fs: fix sparse warnings
  f2fs: allocate data blocks in advance for f2fs_direct_IO
  f2fs: introduce macros to convert bytes and blocks in f2fs
  f2fs: call set_buffer_new for get_block
  f2fs: check node page contents all the time
  f2fs: avoid data offset overflow when lseeking huge file
  f2fs: fix to use highmem for pages of newly created directory
  f2fs: introduce a batched trim
  f2fs: merge {invalidate,release}page for meta/node/data pages
  f2fs: show the number of writeback pages in stat
  f2fs: keep PagePrivate during releasepage
  f2fs: should fail mount when trying to recover data on read-only dev
  f2fs: split UMOUNT and FASTBOOT flags
  f2fs: avoid write_checkpoint if f2fs is mounted readonly
  f2fs: support norecovery mount option
  f2fs: fix not to drop mount options when retrying fill_super
  f2fs: merge flags in struct f2fs_sb_info
  ...
parents 81809957 1a118ccf
...@@ -74,3 +74,9 @@ Date: March 2014 ...@@ -74,3 +74,9 @@ Date: March 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com> Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description: Description:
Controls the memory footprint used by f2fs. Controls the memory footprint used by f2fs.
What: /sys/fs/f2fs/<disk>/trim_sections
Date: February 2015
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Controls the trimming rate in batch mode.
...@@ -106,6 +106,8 @@ background_gc=%s Turn on/off cleaning operations, namely garbage ...@@ -106,6 +106,8 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
Default value for this option is on. So garbage Default value for this option is on. So garbage
collection is on by default. collection is on by default.
disable_roll_forward Disable the roll-forward recovery routine disable_roll_forward Disable the roll-forward recovery routine
norecovery Disable the roll-forward recovery routine, mounted read-
only (i.e., -o ro,disable_roll_forward)
discard Issue discard/TRIM commands when a segment is cleaned. discard Issue discard/TRIM commands when a segment is cleaned.
no_heap Disable heap-style segment allocation which finds free no_heap Disable heap-style segment allocation which finds free
segments for data from the beginning of main area, while segments for data from the beginning of main area, while
...@@ -197,6 +199,10 @@ Files in /sys/fs/f2fs/<devname> ...@@ -197,6 +199,10 @@ Files in /sys/fs/f2fs/<devname>
checkpoint is triggered, and issued during the checkpoint is triggered, and issued during the
checkpoint. By default, it is disabled with 0. checkpoint. By default, it is disabled with 0.
trim_sections This parameter controls the number of sections
to be trimmed out in batch mode when FITRIM
conducts. 32 sections is set by default.
ipu_policy This parameter controls the policy of in-place ipu_policy This parameter controls the policy of in-place
updates in f2fs. There are five policies: updates in f2fs. There are five policies:
0x01: F2FS_IPU_FORCE, 0x02: F2FS_IPU_SSR, 0x01: F2FS_IPU_FORCE, 0x02: F2FS_IPU_SSR,
......
...@@ -71,3 +71,13 @@ config F2FS_CHECK_FS ...@@ -71,3 +71,13 @@ config F2FS_CHECK_FS
Enables BUG_ONs which check the filesystem consistency in runtime. Enables BUG_ONs which check the filesystem consistency in runtime.
If you want to improve the performance, say N. If you want to improve the performance, say N.
config F2FS_IO_TRACE
bool "F2FS IO tracer"
depends on F2FS_FS
depends on FUNCTION_TRACER
help
F2FS IO trace is based on a function trace, which gathers process
information and block IO patterns in the filesystem level.
If unsure, say N.
...@@ -5,3 +5,4 @@ f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o ...@@ -5,3 +5,4 @@ f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o
f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
...@@ -62,7 +62,7 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size) ...@@ -62,7 +62,7 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
if (count == 0) if (count == 0)
return NULL; return NULL;
acl = posix_acl_alloc(count, GFP_KERNEL); acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl) if (!acl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -116,7 +116,7 @@ static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size) ...@@ -116,7 +116,7 @@ static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
int i; int i;
f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count * f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
sizeof(struct f2fs_acl_entry), GFP_KERNEL); sizeof(struct f2fs_acl_entry), GFP_NOFS);
if (!f2fs_acl) if (!f2fs_acl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -396,7 +396,7 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage, ...@@ -396,7 +396,7 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
posix_acl_release(default_acl); posix_acl_release(default_acl);
} }
if (acl) { if (acl) {
if (error) if (!error)
error = __f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, error = __f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl,
ipage); ipage);
posix_acl_release(acl); posix_acl_release(acl);
......
This diff is collapsed.
This diff is collapsed.
...@@ -40,6 +40,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -40,6 +40,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dirs = sbi->n_dirty_dirs; si->ndirty_dirs = sbi->n_dirty_dirs;
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->wb_pages = get_pages(sbi, F2FS_WRITEBACK);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi); si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi); si->overp_segs = overprovision_segments(sbi);
...@@ -57,7 +58,9 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -57,7 +58,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->node_pages = NODE_MAPPING(sbi)->nrpages; si->node_pages = NODE_MAPPING(sbi)->nrpages;
si->meta_pages = META_MAPPING(sbi)->nrpages; si->meta_pages = META_MAPPING(sbi)->nrpages;
si->nats = NM_I(sbi)->nat_cnt; si->nats = NM_I(sbi)->nat_cnt;
si->sits = SIT_I(sbi)->dirty_sentries; si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
si->fnids = NM_I(sbi)->fcnt; si->fnids = NM_I(sbi)->fcnt;
si->bg_gc = sbi->bg_gc; si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg) si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
...@@ -79,6 +82,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -79,6 +82,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->segment_count[i] = sbi->segment_count[i]; si->segment_count[i] = sbi->segment_count[i];
si->block_count[i] = sbi->block_count[i]; si->block_count[i] = sbi->block_count[i];
} }
si->inplace_count = atomic_read(&sbi->inplace_count);
} }
/* /*
...@@ -137,6 +142,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -137,6 +142,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry); si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi)); si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1) if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry); si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
si->base_mem += __bitmap_size(sbi, SIT_BITMAP); si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
...@@ -159,20 +165,32 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -159,20 +165,32 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct f2fs_nm_info); si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP); si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
get_cache:
si->cache_mem = 0;
/* build gc */ /* build gc */
si->base_mem += sizeof(struct f2fs_gc_kthread); if (sbi->gc_thread)
si->cache_mem += sizeof(struct f2fs_gc_kthread);
/* build merge flush thread */
if (SM_I(sbi)->cmd_control_info)
si->cache_mem += sizeof(struct flush_cmd_control);
get_cache:
/* free nids */ /* free nids */
si->cache_mem = NM_I(sbi)->fcnt; si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
si->cache_mem += NM_I(sbi)->nat_cnt; si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
npages = NODE_MAPPING(sbi)->nrpages; si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
si->cache_mem += npages << PAGE_CACHE_SHIFT; sizeof(struct nat_entry_set);
npages = META_MAPPING(sbi)->nrpages; si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
si->cache_mem += npages << PAGE_CACHE_SHIFT; si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry);
si->cache_mem += sbi->n_dirty_dirs * sizeof(struct dir_inode_entry);
for (i = 0; i <= UPDATE_INO; i++) for (i = 0; i <= UPDATE_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
si->page_mem += npages << PAGE_CACHE_SHIFT;
npages = META_MAPPING(sbi)->nrpages;
si->page_mem += npages << PAGE_CACHE_SHIFT;
} }
static int stat_show(struct seq_file *s, void *v) static int stat_show(struct seq_file *s, void *v)
...@@ -250,16 +268,16 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -250,16 +268,16 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, "\nExtent Hit Ratio: %d / %d\n", seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
si->hit_ext, si->total_ext); si->hit_ext, si->total_ext);
seq_puts(s, "\nBalancing F2FS Async:\n"); seq_puts(s, "\nBalancing F2FS Async:\n");
seq_printf(s, " - inmem: %4d\n", seq_printf(s, " - inmem: %4d, wb: %4d\n",
si->inmem_pages); si->inmem_pages, si->wb_pages);
seq_printf(s, " - nodes: %4d in %4d\n", seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages); si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4d in dirs:%4d\n", seq_printf(s, " - dents: %4d in dirs:%4d\n",
si->ndirty_dent, si->ndirty_dirs); si->ndirty_dent, si->ndirty_dirs);
seq_printf(s, " - meta: %4d in %4d\n", seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages); si->ndirty_meta, si->meta_pages);
seq_printf(s, " - NATs: %9d\n - SITs: %9d\n", seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->nats, si->sits); si->dirty_nats, si->nats, si->dirty_sits, si->sits);
seq_printf(s, " - free_nids: %9d\n", seq_printf(s, " - free_nids: %9d\n",
si->fnids); si->fnids);
seq_puts(s, "\nDistribution of User Blocks:"); seq_puts(s, "\nDistribution of User Blocks:");
...@@ -277,6 +295,7 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -277,6 +295,7 @@ static int stat_show(struct seq_file *s, void *v)
for (j = 0; j < si->util_free; j++) for (j = 0; j < si->util_free; j++)
seq_putc(s, '-'); seq_putc(s, '-');
seq_puts(s, "]\n\n"); seq_puts(s, "]\n\n");
seq_printf(s, "IPU: %u blocks\n", si->inplace_count);
seq_printf(s, "SSR: %u blocks in %u segments\n", seq_printf(s, "SSR: %u blocks in %u segments\n",
si->block_count[SSR], si->segment_count[SSR]); si->block_count[SSR], si->segment_count[SSR]);
seq_printf(s, "LFS: %u blocks in %u segments\n", seq_printf(s, "LFS: %u blocks in %u segments\n",
...@@ -289,9 +308,14 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -289,9 +308,14 @@ static int stat_show(struct seq_file *s, void *v)
/* memory footprint */ /* memory footprint */
update_mem_info(si->sbi); update_mem_info(si->sbi);
seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", seq_printf(s, "\nMemory: %u KB\n",
(si->base_mem + si->cache_mem) >> 10, (si->base_mem + si->cache_mem + si->page_mem) >> 10);
si->base_mem >> 10, si->cache_mem >> 10); seq_printf(s, " - static: %u KB\n",
si->base_mem >> 10);
seq_printf(s, " - cached: %u KB\n",
si->cache_mem >> 10);
seq_printf(s, " - paged : %u KB\n",
si->page_mem >> 10);
} }
mutex_unlock(&f2fs_stat_mutex); mutex_unlock(&f2fs_stat_mutex);
return 0; return 0;
...@@ -331,6 +355,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) ...@@ -331,6 +355,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inline_inode, 0); atomic_set(&sbi->inline_inode, 0);
atomic_set(&sbi->inline_dir, 0); atomic_set(&sbi->inline_dir, 0);
atomic_set(&sbi->inplace_count, 0);
mutex_lock(&f2fs_stat_mutex); mutex_lock(&f2fs_stat_mutex);
list_add_tail(&si->stat_list, &f2fs_stat_list); list_add_tail(&si->stat_list, &f2fs_stat_list);
......
...@@ -286,8 +286,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, ...@@ -286,8 +286,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
f2fs_wait_on_page_writeback(page, type); f2fs_wait_on_page_writeback(page, type);
de->ino = cpu_to_le32(inode->i_ino); de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode); set_de_type(de, inode);
if (!f2fs_has_inline_dentry(dir)) f2fs_dentry_kunmap(dir, page);
kunmap(page);
set_page_dirty(page); set_page_dirty(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME; dir->i_mtime = dir->i_ctime = CURRENT_TIME;
mark_inode_dirty(dir); mark_inode_dirty(dir);
......
This diff is collapsed.
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "segment.h" #include "segment.h"
#include "xattr.h" #include "xattr.h"
#include "acl.h" #include "acl.h"
#include "trace.h"
#include <trace/events/f2fs.h> #include <trace/events/f2fs.h>
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
...@@ -245,6 +246,10 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -245,6 +246,10 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
sync_nodes: sync_nodes:
sync_node_pages(sbi, ino, &wbc); sync_node_pages(sbi, ino, &wbc);
/* if cp_error was enabled, we should avoid infinite loop */
if (unlikely(f2fs_cp_error(sbi)))
goto out;
if (need_inode_block_update(sbi, ino)) { if (need_inode_block_update(sbi, ino)) {
mark_inode_dirty_sync(inode); mark_inode_dirty_sync(inode);
f2fs_write_inode(inode, NULL); f2fs_write_inode(inode, NULL);
...@@ -264,6 +269,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -264,6 +269,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = f2fs_issue_flush(sbi); ret = f2fs_issue_flush(sbi);
out: out:
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
f2fs_trace_ios(NULL, NULL, 1);
return ret; return ret;
} }
...@@ -350,7 +356,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) ...@@ -350,7 +356,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
/* find data/hole in dnode block */ /* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset; for (; dn.ofs_in_node < end_offset;
dn.ofs_in_node++, pgofs++, dn.ofs_in_node++, pgofs++,
data_ofs = pgofs << PAGE_CACHE_SHIFT) { data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
block_t blkaddr; block_t blkaddr;
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
...@@ -426,7 +432,8 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count) ...@@ -426,7 +432,8 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
if (blkaddr == NULL_ADDR) if (blkaddr == NULL_ADDR)
continue; continue;
update_extent_cache(NULL_ADDR, dn); dn->data_blkaddr = NULL_ADDR;
update_extent_cache(dn);
invalidate_blocks(sbi, blkaddr); invalidate_blocks(sbi, blkaddr);
nr_free++; nr_free++;
} }
...@@ -483,8 +490,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) ...@@ -483,8 +490,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
trace_f2fs_truncate_blocks_enter(inode, from); trace_f2fs_truncate_blocks_enter(inode, from);
free_from = (pgoff_t) free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
((from + blocksize - 1) >> (sbi->log_blocksize));
if (lock) if (lock)
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
...@@ -835,6 +841,19 @@ static long f2fs_fallocate(struct file *file, int mode, ...@@ -835,6 +841,19 @@ static long f2fs_fallocate(struct file *file, int mode,
return ret; return ret;
} }
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
commit_inmem_pages(inode, true);
if (f2fs_is_volatile_file(inode)) {
set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
filemap_fdatawrite(inode->i_mapping);
clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
}
return 0;
}
#define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
#define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
...@@ -905,29 +924,30 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) ...@@ -905,29 +924,30 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
return ret; return ret;
} }
static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
return put_user(inode->i_generation, (int __user *)arg);
}
static int f2fs_ioc_start_atomic_write(struct file *filp) static int f2fs_ioc_start_atomic_write(struct file *filp)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!inode_owner_or_capable(inode)) if (!inode_owner_or_capable(inode))
return -EACCES; return -EACCES;
f2fs_balance_fs(sbi); f2fs_balance_fs(F2FS_I_SB(inode));
if (f2fs_is_atomic_file(inode))
return 0;
set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
return f2fs_convert_inline_inode(inode); return f2fs_convert_inline_inode(inode);
} }
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
commit_inmem_pages(inode, true);
return 0;
}
static int f2fs_ioc_commit_atomic_write(struct file *filp) static int f2fs_ioc_commit_atomic_write(struct file *filp)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
...@@ -948,6 +968,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) ...@@ -948,6 +968,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
return ret; return ret;
} }
...@@ -958,11 +979,56 @@ static int f2fs_ioc_start_volatile_write(struct file *filp) ...@@ -958,11 +979,56 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (!inode_owner_or_capable(inode)) if (!inode_owner_or_capable(inode))
return -EACCES; return -EACCES;
if (f2fs_is_volatile_file(inode))
return 0;
set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
return f2fs_convert_inline_inode(inode); return f2fs_convert_inline_inode(inode);
} }
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
if (!inode_owner_or_capable(inode))
return -EACCES;
if (!f2fs_is_volatile_file(inode))
return 0;
punch_hole(inode, 0, F2FS_BLKSIZE);
return 0;
}
static int f2fs_ioc_abort_volatile_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
int ret;
if (!inode_owner_or_capable(inode))
return -EACCES;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
f2fs_balance_fs(F2FS_I_SB(inode));
if (f2fs_is_atomic_file(inode)) {
commit_inmem_pages(inode, false);
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
}
if (f2fs_is_volatile_file(inode)) {
clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
filemap_fdatawrite(inode->i_mapping);
set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
}
mnt_drop_write_file(filp);
return ret;
}
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
...@@ -1000,12 +1066,18 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -1000,12 +1066,18 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_getflags(filp, arg); return f2fs_ioc_getflags(filp, arg);
case F2FS_IOC_SETFLAGS: case F2FS_IOC_SETFLAGS:
return f2fs_ioc_setflags(filp, arg); return f2fs_ioc_setflags(filp, arg);
case F2FS_IOC_GETVERSION:
return f2fs_ioc_getversion(filp, arg);
case F2FS_IOC_START_ATOMIC_WRITE: case F2FS_IOC_START_ATOMIC_WRITE:
return f2fs_ioc_start_atomic_write(filp); return f2fs_ioc_start_atomic_write(filp);
case F2FS_IOC_COMMIT_ATOMIC_WRITE: case F2FS_IOC_COMMIT_ATOMIC_WRITE:
return f2fs_ioc_commit_atomic_write(filp); return f2fs_ioc_commit_atomic_write(filp);
case F2FS_IOC_START_VOLATILE_WRITE: case F2FS_IOC_START_VOLATILE_WRITE:
return f2fs_ioc_start_volatile_write(filp); return f2fs_ioc_start_volatile_write(filp);
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
return f2fs_ioc_release_volatile_write(filp);
case F2FS_IOC_ABORT_VOLATILE_WRITE:
return f2fs_ioc_abort_volatile_write(filp);
case FITRIM: case FITRIM:
return f2fs_ioc_fitrim(filp, arg); return f2fs_ioc_fitrim(filp, arg);
default: default:
......
...@@ -24,8 +24,6 @@ ...@@ -24,8 +24,6 @@
#include "gc.h" #include "gc.h"
#include <trace/events/f2fs.h> #include <trace/events/f2fs.h>
static struct kmem_cache *winode_slab;
static int gc_thread_func(void *data) static int gc_thread_func(void *data)
{ {
struct f2fs_sb_info *sbi = data; struct f2fs_sb_info *sbi = data;
...@@ -46,7 +44,7 @@ static int gc_thread_func(void *data) ...@@ -46,7 +44,7 @@ static int gc_thread_func(void *data)
break; break;
if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
wait_ms = increase_sleep_time(gc_th, wait_ms); increase_sleep_time(gc_th, &wait_ms);
continue; continue;
} }
...@@ -67,15 +65,15 @@ static int gc_thread_func(void *data) ...@@ -67,15 +65,15 @@ static int gc_thread_func(void *data)
continue; continue;
if (!is_idle(sbi)) { if (!is_idle(sbi)) {
wait_ms = increase_sleep_time(gc_th, wait_ms); increase_sleep_time(gc_th, &wait_ms);
mutex_unlock(&sbi->gc_mutex); mutex_unlock(&sbi->gc_mutex);
continue; continue;
} }
if (has_enough_invalid_blocks(sbi)) if (has_enough_invalid_blocks(sbi))
wait_ms = decrease_sleep_time(gc_th, wait_ms); decrease_sleep_time(gc_th, &wait_ms);
else else
wait_ms = increase_sleep_time(gc_th, wait_ms); increase_sleep_time(gc_th, &wait_ms);
stat_inc_bggc_count(sbi); stat_inc_bggc_count(sbi);
...@@ -356,13 +354,10 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) ...@@ -356,13 +354,10 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
iput(inode); iput(inode);
return; return;
} }
new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS); new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
new_ie->inode = inode; new_ie->inode = inode;
retry:
if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) { f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
cond_resched();
goto retry;
}
list_add_tail(&new_ie->list, &gc_list->ilist); list_add_tail(&new_ie->list, &gc_list->ilist);
} }
...@@ -373,7 +368,7 @@ static void put_gc_inode(struct gc_inode_list *gc_list) ...@@ -373,7 +368,7 @@ static void put_gc_inode(struct gc_inode_list *gc_list)
radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
iput(ie->inode); iput(ie->inode);
list_del(&ie->list); list_del(&ie->list);
kmem_cache_free(winode_slab, ie); kmem_cache_free(inode_entry_slab, ie);
} }
} }
...@@ -703,8 +698,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi) ...@@ -703,8 +698,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
.iroot = RADIX_TREE_INIT(GFP_NOFS), .iroot = RADIX_TREE_INIT(GFP_NOFS),
}; };
cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC; cpc.reason = __get_cp_reason(sbi);
gc_more: gc_more:
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
goto stop; goto stop;
...@@ -750,17 +744,3 @@ void build_gc_manager(struct f2fs_sb_info *sbi) ...@@ -750,17 +744,3 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
{ {
DIRTY_I(sbi)->v_ops = &default_v_ops; DIRTY_I(sbi)->v_ops = &default_v_ops;
} }
int __init create_gc_caches(void)
{
winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
sizeof(struct inode_entry));
if (!winode_slab)
return -ENOMEM;
return 0;
}
void destroy_gc_caches(void)
{
kmem_cache_destroy(winode_slab);
}
...@@ -35,11 +35,6 @@ struct f2fs_gc_kthread { ...@@ -35,11 +35,6 @@ struct f2fs_gc_kthread {
unsigned int gc_idle; unsigned int gc_idle;
}; };
struct inode_entry {
struct list_head list;
struct inode *inode;
};
struct gc_inode_list { struct gc_inode_list {
struct list_head ilist; struct list_head ilist;
struct radix_tree_root iroot; struct radix_tree_root iroot;
...@@ -69,26 +64,26 @@ static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) ...@@ -69,26 +64,26 @@ static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100; return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
} }
static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait) static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
long *wait)
{ {
if (wait == gc_th->no_gc_sleep_time) if (*wait == gc_th->no_gc_sleep_time)
return wait; return;
wait += gc_th->min_sleep_time; *wait += gc_th->min_sleep_time;
if (wait > gc_th->max_sleep_time) if (*wait > gc_th->max_sleep_time)
wait = gc_th->max_sleep_time; *wait = gc_th->max_sleep_time;
return wait;
} }
static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait) static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
long *wait)
{ {
if (wait == gc_th->no_gc_sleep_time) if (*wait == gc_th->no_gc_sleep_time)
wait = gc_th->max_sleep_time; *wait = gc_th->max_sleep_time;
wait -= gc_th->min_sleep_time; *wait -= gc_th->min_sleep_time;
if (wait <= gc_th->min_sleep_time) if (*wait <= gc_th->min_sleep_time)
wait = gc_th->min_sleep_time; *wait = gc_th->min_sleep_time;
return wait;
} }
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
......
...@@ -50,6 +50,12 @@ void read_inline_data(struct page *page, struct page *ipage) ...@@ -50,6 +50,12 @@ void read_inline_data(struct page *page, struct page *ipage)
SetPageUptodate(page); SetPageUptodate(page);
} }
static void truncate_inline_data(struct page *ipage)
{
f2fs_wait_on_page_writeback(ipage, NODE);
memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA);
}
int f2fs_read_inline_data(struct inode *inode, struct page *page) int f2fs_read_inline_data(struct inode *inode, struct page *page)
{ {
struct page *ipage; struct page *ipage;
...@@ -79,7 +85,6 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page) ...@@ -79,7 +85,6 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{ {
void *src_addr, *dst_addr; void *src_addr, *dst_addr;
block_t new_blk_addr;
struct f2fs_io_info fio = { struct f2fs_io_info fio = {
.type = DATA, .type = DATA,
.rw = WRITE_SYNC | REQ_PRIO, .rw = WRITE_SYNC | REQ_PRIO,
...@@ -115,9 +120,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) ...@@ -115,9 +120,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
/* write data page to try to make data consistent */ /* write data page to try to make data consistent */
set_page_writeback(page); set_page_writeback(page);
fio.blk_addr = dn->data_blkaddr;
write_data_page(page, dn, &new_blk_addr, &fio); write_data_page(page, dn, &fio);
update_extent_cache(new_blk_addr, dn); update_extent_cache(dn);
f2fs_wait_on_page_writeback(page, DATA); f2fs_wait_on_page_writeback(page, DATA);
if (dirty) if (dirty)
inode_dec_dirty_pages(dn->inode); inode_dec_dirty_pages(dn->inode);
...@@ -126,7 +131,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) ...@@ -126,7 +131,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */ /* clear inline data and flag after data writeback */
truncate_inline_data(dn->inode_page, 0); truncate_inline_data(dn->inode_page);
clear_out: clear_out:
stat_dec_inline_inode(dn->inode); stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode); f2fs_clear_inline_inode(dn->inode);
...@@ -199,19 +204,6 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) ...@@ -199,19 +204,6 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
return 0; return 0;
} }
void truncate_inline_data(struct page *ipage, u64 from)
{
void *addr;
if (from >= MAX_INLINE_DATA)
return;
f2fs_wait_on_page_writeback(ipage, NODE);
addr = inline_data_addr(ipage);
memset(addr + from, 0, MAX_INLINE_DATA - from);
}
bool recover_inline_data(struct inode *inode, struct page *npage) bool recover_inline_data(struct inode *inode, struct page *npage)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
...@@ -253,7 +245,7 @@ bool recover_inline_data(struct inode *inode, struct page *npage) ...@@ -253,7 +245,7 @@ bool recover_inline_data(struct inode *inode, struct page *npage)
if (f2fs_has_inline_data(inode)) { if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino); ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage)); f2fs_bug_on(sbi, IS_ERR(ipage));
truncate_inline_data(ipage, 0); truncate_inline_data(ipage);
f2fs_clear_inline_inode(inode); f2fs_clear_inline_inode(inode);
update_inode(inode, ipage); update_inode(inode, ipage);
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
...@@ -371,7 +363,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, ...@@ -371,7 +363,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
set_page_dirty(page); set_page_dirty(page);
/* clear inline dir and flag after data writeback */ /* clear inline dir and flag after data writeback */
truncate_inline_data(ipage, 0); truncate_inline_data(ipage);
stat_dec_inline_dir(dir); stat_dec_inline_dir(dir);
clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
......
...@@ -67,29 +67,23 @@ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) ...@@ -67,29 +67,23 @@ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
} }
} }
static int __recover_inline_status(struct inode *inode, struct page *ipage) static void __recover_inline_status(struct inode *inode, struct page *ipage)
{ {
void *inline_data = inline_data_addr(ipage); void *inline_data = inline_data_addr(ipage);
struct f2fs_inode *ri; __le32 *start = inline_data;
void *zbuf; __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
zbuf = kzalloc(MAX_INLINE_DATA, GFP_NOFS); while (start < end) {
if (!zbuf) if (*start++) {
return -ENOMEM; f2fs_wait_on_page_writeback(ipage, NODE);
if (!memcmp(zbuf, inline_data, MAX_INLINE_DATA)) { set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
kfree(zbuf); set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
return 0; set_page_dirty(ipage);
return;
}
} }
kfree(zbuf); return;
f2fs_wait_on_page_writeback(ipage, NODE);
set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
ri = F2FS_INODE(ipage);
set_raw_inline(F2FS_I(inode), ri);
set_page_dirty(ipage);
return 0;
} }
static int do_read_inode(struct inode *inode) static int do_read_inode(struct inode *inode)
...@@ -98,7 +92,6 @@ static int do_read_inode(struct inode *inode) ...@@ -98,7 +92,6 @@ static int do_read_inode(struct inode *inode)
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
struct page *node_page; struct page *node_page;
struct f2fs_inode *ri; struct f2fs_inode *ri;
int err = 0;
/* Check if ino is within scope */ /* Check if ino is within scope */
if (check_nid_range(sbi, inode->i_ino)) { if (check_nid_range(sbi, inode->i_ino)) {
...@@ -142,7 +135,7 @@ static int do_read_inode(struct inode *inode) ...@@ -142,7 +135,7 @@ static int do_read_inode(struct inode *inode)
/* check data exist */ /* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
err = __recover_inline_status(inode, node_page); __recover_inline_status(inode, node_page);
/* get rdev by using inline_info */ /* get rdev by using inline_info */
__get_inode_rdev(inode, ri); __get_inode_rdev(inode, ri);
...@@ -152,7 +145,7 @@ static int do_read_inode(struct inode *inode) ...@@ -152,7 +145,7 @@ static int do_read_inode(struct inode *inode)
stat_inc_inline_inode(inode); stat_inc_inline_inode(inode);
stat_inc_inline_dir(inode); stat_inc_inline_dir(inode);
return err; return 0;
} }
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
...@@ -304,7 +297,7 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -304,7 +297,7 @@ void f2fs_evict_inode(struct inode *inode)
nid_t xnid = F2FS_I(inode)->i_xattr_nid; nid_t xnid = F2FS_I(inode)->i_xattr_nid;
/* some remained atomic pages should discarded */ /* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) if (f2fs_is_atomic_file(inode))
commit_inmem_pages(inode, true); commit_inmem_pages(inode, true);
trace_f2fs_evict_inode(inode); trace_f2fs_evict_inode(inode);
......
...@@ -299,7 +299,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) ...@@ -299,7 +299,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_op = &f2fs_dir_inode_operations; inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations; inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops; inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
set_inode_flag(F2FS_I(inode), FI_INC_LINK); set_inode_flag(F2FS_I(inode), FI_INC_LINK);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
......
This diff is collapsed.
...@@ -25,10 +25,19 @@ ...@@ -25,10 +25,19 @@
/* vector size for gang look-up from nat cache that consists of radix tree */ /* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64 #define NATVEC_SIZE 64
#define SETVEC_SIZE 32
/* return value for read_node_page */ /* return value for read_node_page */
#define LOCKED_PAGE 1 #define LOCKED_PAGE 1
/* For flag in struct node_info */
enum {
IS_CHECKPOINTED, /* is it checkpointed before? */
HAS_FSYNCED_INODE, /* is the inode fsynced before? */
HAS_LAST_FSYNC, /* has the latest node fsync mark? */
IS_DIRTY, /* this nat entry is dirty? */
};
/* /*
* For node information * For node information
*/ */
...@@ -37,18 +46,11 @@ struct node_info { ...@@ -37,18 +46,11 @@ struct node_info {
nid_t ino; /* inode number of the node's owner */ nid_t ino; /* inode number of the node's owner */
block_t blk_addr; /* block address of the node */ block_t blk_addr; /* block address of the node */
unsigned char version; /* version of the node */ unsigned char version; /* version of the node */
}; unsigned char flag; /* for node information bits */
enum {
IS_CHECKPOINTED, /* is it checkpointed before? */
HAS_FSYNCED_INODE, /* is the inode fsynced before? */
HAS_LAST_FSYNC, /* has the latest node fsync mark? */
IS_DIRTY, /* this nat entry is dirty? */
}; };
struct nat_entry { struct nat_entry {
struct list_head list; /* for clean or dirty nat list */ struct list_head list; /* for clean or dirty nat list */
unsigned char flag; /* for node information bits */
struct node_info ni; /* in-memory node information */ struct node_info ni; /* in-memory node information */
}; };
...@@ -63,20 +65,30 @@ struct nat_entry { ...@@ -63,20 +65,30 @@ struct nat_entry {
#define inc_node_version(version) (++version) #define inc_node_version(version) (++version)
static inline void copy_node_info(struct node_info *dst,
struct node_info *src)
{
dst->nid = src->nid;
dst->ino = src->ino;
dst->blk_addr = src->blk_addr;
dst->version = src->version;
/* should not copy flag here */
}
static inline void set_nat_flag(struct nat_entry *ne, static inline void set_nat_flag(struct nat_entry *ne,
unsigned int type, bool set) unsigned int type, bool set)
{ {
unsigned char mask = 0x01 << type; unsigned char mask = 0x01 << type;
if (set) if (set)
ne->flag |= mask; ne->ni.flag |= mask;
else else
ne->flag &= ~mask; ne->ni.flag &= ~mask;
} }
static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type) static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
{ {
unsigned char mask = 0x01 << type; unsigned char mask = 0x01 << type;
return ne->flag & mask; return ne->ni.flag & mask;
} }
static inline void nat_reset_flag(struct nat_entry *ne) static inline void nat_reset_flag(struct nat_entry *ne)
...@@ -108,6 +120,7 @@ enum mem_type { ...@@ -108,6 +120,7 @@ enum mem_type {
NAT_ENTRIES, /* indicates the cached nat entry */ NAT_ENTRIES, /* indicates the cached nat entry */
DIRTY_DENTS, /* indicates dirty dentry pages */ DIRTY_DENTS, /* indicates dirty dentry pages */
INO_ENTRIES, /* indicates inode entries */ INO_ENTRIES, /* indicates inode entries */
BASE_CHECK, /* check kernel status */
}; };
struct nat_entry_set { struct nat_entry_set {
...@@ -200,11 +213,19 @@ static inline void fill_node_footer(struct page *page, nid_t nid, ...@@ -200,11 +213,19 @@ static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset) nid_t ino, unsigned int ofs, bool reset)
{ {
struct f2fs_node *rn = F2FS_NODE(page); struct f2fs_node *rn = F2FS_NODE(page);
unsigned int old_flag = 0;
if (reset) if (reset)
memset(rn, 0, sizeof(*rn)); memset(rn, 0, sizeof(*rn));
else
old_flag = le32_to_cpu(rn->footer.flag);
rn->footer.nid = cpu_to_le32(nid); rn->footer.nid = cpu_to_le32(nid);
rn->footer.ino = cpu_to_le32(ino); rn->footer.ino = cpu_to_le32(ino);
rn->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT);
/* should remain old flag bits such as COLD_BIT_SHIFT */
rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
(old_flag & OFFSET_BIT_MASK));
} }
static inline void copy_node_footer(struct page *dst, struct page *src) static inline void copy_node_footer(struct page *dst, struct page *src)
......
...@@ -346,6 +346,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -346,6 +346,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (IS_INODE(page)) { if (IS_INODE(page)) {
recover_inline_xattr(inode, page); recover_inline_xattr(inode, page);
} else if (f2fs_has_xattr_block(ofs_of_node(page))) { } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
/*
* Deprecated; xattr blocks should be found from cold log.
* But, we should remain this for backward compatibility.
*/
recover_xattr_data(inode, page, blkaddr); recover_xattr_data(inode, page, blkaddr);
goto out; goto out;
} }
...@@ -396,7 +400,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -396,7 +400,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
/* write dummy data page */ /* write dummy data page */
recover_data_page(sbi, NULL, &sum, src, dest); recover_data_page(sbi, NULL, &sum, src, dest);
update_extent_cache(dest, &dn); dn.data_blkaddr = dest;
update_extent_cache(&dn);
recovered++; recovered++;
} }
dn.ofs_in_node++; dn.ofs_in_node++;
...@@ -503,7 +508,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -503,7 +508,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&inode_list); INIT_LIST_HEAD(&inode_list);
/* step #1: find fsynced inode numbers */ /* step #1: find fsynced inode numbers */
sbi->por_doing = true; set_sbi_flag(sbi, SBI_POR_DOING);
/* prevent checkpoint */ /* prevent checkpoint */
mutex_lock(&sbi->cp_mutex); mutex_lock(&sbi->cp_mutex);
...@@ -536,7 +541,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -536,7 +541,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
truncate_inode_pages_final(META_MAPPING(sbi)); truncate_inode_pages_final(META_MAPPING(sbi));
} }
sbi->por_doing = false; clear_sbi_flag(sbi, SBI_POR_DOING);
if (err) { if (err) {
discard_next_dnode(sbi, blkaddr); discard_next_dnode(sbi, blkaddr);
......
This diff is collapsed.
...@@ -189,6 +189,7 @@ struct sit_info { ...@@ -189,6 +189,7 @@ struct sit_info {
char *sit_bitmap; /* SIT bitmap pointer */ char *sit_bitmap; /* SIT bitmap pointer */
unsigned int bitmap_size; /* SIT bitmap size */ unsigned int bitmap_size; /* SIT bitmap size */
unsigned long *tmp_map; /* bitmap for temporal use */
unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
unsigned int dirty_sentries; /* # of dirty sentries */ unsigned int dirty_sentries; /* # of dirty sentries */
unsigned int sents_per_block; /* # of SIT entries per block */ unsigned int sents_per_block; /* # of SIT entries per block */
...@@ -207,7 +208,7 @@ struct free_segmap_info { ...@@ -207,7 +208,7 @@ struct free_segmap_info {
unsigned int start_segno; /* start segment number logically */ unsigned int start_segno; /* start segment number logically */
unsigned int free_segments; /* # of free segments */ unsigned int free_segments; /* # of free segments */
unsigned int free_sections; /* # of free sections */ unsigned int free_sections; /* # of free sections */
rwlock_t segmap_lock; /* free segmap lock */ spinlock_t segmap_lock; /* free segmap lock */
unsigned long *free_segmap; /* free segment bitmap */ unsigned long *free_segmap; /* free segment bitmap */
unsigned long *free_secmap; /* free section bitmap */ unsigned long *free_secmap; /* free section bitmap */
}; };
...@@ -318,9 +319,9 @@ static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, ...@@ -318,9 +319,9 @@ static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
unsigned int max, unsigned int segno) unsigned int max, unsigned int segno)
{ {
unsigned int ret; unsigned int ret;
read_lock(&free_i->segmap_lock); spin_lock(&free_i->segmap_lock);
ret = find_next_bit(free_i->free_segmap, max, segno); ret = find_next_bit(free_i->free_segmap, max, segno);
read_unlock(&free_i->segmap_lock); spin_unlock(&free_i->segmap_lock);
return ret; return ret;
} }
...@@ -331,7 +332,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) ...@@ -331,7 +332,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
unsigned int start_segno = secno * sbi->segs_per_sec; unsigned int start_segno = secno * sbi->segs_per_sec;
unsigned int next; unsigned int next;
write_lock(&free_i->segmap_lock); spin_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap); clear_bit(segno, free_i->free_segmap);
free_i->free_segments++; free_i->free_segments++;
...@@ -340,7 +341,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) ...@@ -340,7 +341,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
clear_bit(secno, free_i->free_secmap); clear_bit(secno, free_i->free_secmap);
free_i->free_sections++; free_i->free_sections++;
} }
write_unlock(&free_i->segmap_lock); spin_unlock(&free_i->segmap_lock);
} }
static inline void __set_inuse(struct f2fs_sb_info *sbi, static inline void __set_inuse(struct f2fs_sb_info *sbi,
...@@ -362,7 +363,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, ...@@ -362,7 +363,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int start_segno = secno * sbi->segs_per_sec; unsigned int start_segno = secno * sbi->segs_per_sec;
unsigned int next; unsigned int next;
write_lock(&free_i->segmap_lock); spin_lock(&free_i->segmap_lock);
if (test_and_clear_bit(segno, free_i->free_segmap)) { if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++; free_i->free_segments++;
...@@ -373,7 +374,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, ...@@ -373,7 +374,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
free_i->free_sections++; free_i->free_sections++;
} }
} }
write_unlock(&free_i->segmap_lock); spin_unlock(&free_i->segmap_lock);
} }
static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
...@@ -381,13 +382,13 @@ static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, ...@@ -381,13 +382,13 @@ static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
{ {
struct free_segmap_info *free_i = FREE_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int secno = segno / sbi->segs_per_sec; unsigned int secno = segno / sbi->segs_per_sec;
write_lock(&free_i->segmap_lock); spin_lock(&free_i->segmap_lock);
if (!test_and_set_bit(segno, free_i->free_segmap)) { if (!test_and_set_bit(segno, free_i->free_segmap)) {
free_i->free_segments--; free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap)) if (!test_and_set_bit(secno, free_i->free_secmap))
free_i->free_sections--; free_i->free_sections--;
} }
write_unlock(&free_i->segmap_lock); spin_unlock(&free_i->segmap_lock);
} }
static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
...@@ -460,7 +461,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) ...@@ -460,7 +461,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
if (unlikely(sbi->por_doing)) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false; return false;
return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
...@@ -599,13 +600,13 @@ static inline void check_block_count(struct f2fs_sb_info *sbi, ...@@ -599,13 +600,13 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
{ {
if (segno > TOTAL_SEGS(sbi) - 1) if (segno > TOTAL_SEGS(sbi) - 1)
sbi->need_fsck = true; set_sbi_flag(sbi, SBI_NEED_FSCK);
} }
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{ {
if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi)) if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
sbi->need_fsck = true; set_sbi_flag(sbi, SBI_NEED_FSCK);
} }
/* /*
...@@ -616,11 +617,11 @@ static inline void check_block_count(struct f2fs_sb_info *sbi, ...@@ -616,11 +617,11 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
{ {
/* check segment usage */ /* check segment usage */
if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg) if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
sbi->need_fsck = true; set_sbi_flag(sbi, SBI_NEED_FSCK);
/* check boundary of a given segment number */ /* check boundary of a given segment number */
if (segno > TOTAL_SEGS(sbi) - 1) if (segno > TOTAL_SEGS(sbi) - 1)
sbi->need_fsck = true; set_sbi_flag(sbi, SBI_NEED_FSCK);
} }
#endif #endif
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "segment.h" #include "segment.h"
#include "xattr.h" #include "xattr.h"
#include "gc.h" #include "gc.h"
#include "trace.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h> #include <trace/events/f2fs.h>
...@@ -41,6 +42,7 @@ static struct kset *f2fs_kset; ...@@ -41,6 +42,7 @@ static struct kset *f2fs_kset;
enum { enum {
Opt_gc_background, Opt_gc_background,
Opt_disable_roll_forward, Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard, Opt_discard,
Opt_noheap, Opt_noheap,
Opt_user_xattr, Opt_user_xattr,
...@@ -61,6 +63,7 @@ enum { ...@@ -61,6 +63,7 @@ enum {
static match_table_t f2fs_tokens = { static match_table_t f2fs_tokens = {
{Opt_gc_background, "background_gc=%s"}, {Opt_gc_background, "background_gc=%s"},
{Opt_disable_roll_forward, "disable_roll_forward"}, {Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_norecovery, "norecovery"},
{Opt_discard, "discard"}, {Opt_discard, "discard"},
{Opt_noheap, "no_heap"}, {Opt_noheap, "no_heap"},
{Opt_user_xattr, "user_xattr"}, {Opt_user_xattr, "user_xattr"},
...@@ -192,6 +195,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time); ...@@ -192,6 +195,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle); F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
...@@ -207,6 +211,7 @@ static struct attribute *f2fs_attrs[] = { ...@@ -207,6 +211,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_idle), ATTR_LIST(gc_idle),
ATTR_LIST(reclaim_segments), ATTR_LIST(reclaim_segments),
ATTR_LIST(max_small_discards), ATTR_LIST(max_small_discards),
ATTR_LIST(batched_trim_sections),
ATTR_LIST(ipu_policy), ATTR_LIST(ipu_policy),
ATTR_LIST(min_ipu_util), ATTR_LIST(min_ipu_util),
ATTR_LIST(min_fsync_blocks), ATTR_LIST(min_fsync_blocks),
...@@ -286,6 +291,12 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -286,6 +291,12 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_disable_roll_forward: case Opt_disable_roll_forward:
set_opt(sbi, DISABLE_ROLL_FORWARD); set_opt(sbi, DISABLE_ROLL_FORWARD);
break; break;
case Opt_norecovery:
/* this option mounts f2fs with ro */
set_opt(sbi, DISABLE_ROLL_FORWARD);
if (!f2fs_readonly(sb))
return -EINVAL;
break;
case Opt_discard: case Opt_discard:
set_opt(sbi, DISCARD); set_opt(sbi, DISCARD);
break; break;
...@@ -446,8 +457,13 @@ static void f2fs_put_super(struct super_block *sb) ...@@ -446,8 +457,13 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_stats(sbi); f2fs_destroy_stats(sbi);
stop_gc_thread(sbi); stop_gc_thread(sbi);
/* We don't need to do checkpoint when it's clean */ /*
if (sbi->s_dirty) { * We don't need to do checkpoint when superblock is clean.
* But, the previous checkpoint was not done by umount, it needs to do
* clean checkpoint again.
*/
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
struct cp_control cpc = { struct cp_control cpc = {
.reason = CP_UMOUNT, .reason = CP_UMOUNT,
}; };
...@@ -486,13 +502,15 @@ int f2fs_sync_fs(struct super_block *sb, int sync) ...@@ -486,13 +502,15 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (sync) { if (sync) {
struct cp_control cpc; struct cp_control cpc;
cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC; cpc.reason = __get_cp_reason(sbi);
mutex_lock(&sbi->gc_mutex); mutex_lock(&sbi->gc_mutex);
write_checkpoint(sbi, &cpc); write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex); mutex_unlock(&sbi->gc_mutex);
} else { } else {
f2fs_balance_fs(sbi); f2fs_balance_fs(sbi);
} }
f2fs_trace_ios(NULL, NULL, 1);
return 0; return 0;
} }
...@@ -887,7 +905,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -887,7 +905,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
atomic_set(&sbi->nr_pages[i], 0); atomic_set(&sbi->nr_pages[i], 0);
sbi->dir_level = DEF_DIR_LEVEL; sbi->dir_level = DEF_DIR_LEVEL;
sbi->need_fsck = false; clear_sbi_flag(sbi, SBI_NEED_FSCK);
} }
/* /*
...@@ -942,6 +960,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -942,6 +960,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root; struct inode *root;
long err = -EINVAL; long err = -EINVAL;
bool retry = true; bool retry = true;
char *options = NULL;
int i; int i;
try_onemore: try_onemore:
...@@ -973,9 +992,15 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -973,9 +992,15 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sbi, POSIX_ACL); set_opt(sbi, POSIX_ACL);
#endif #endif
/* parse mount options */ /* parse mount options */
err = parse_options(sb, (char *)data); options = kstrdup((const char *)data, GFP_KERNEL);
if (err) if (data && !options) {
err = -ENOMEM;
goto free_sb_buf; goto free_sb_buf;
}
err = parse_options(sb, options);
if (err)
goto free_options;
sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
sb->s_max_links = F2FS_LINK_MAX; sb->s_max_links = F2FS_LINK_MAX;
...@@ -998,7 +1023,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -998,7 +1023,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
mutex_init(&sbi->writepages); mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex); mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write); init_rwsem(&sbi->node_write);
sbi->por_doing = false; clear_sbi_flag(sbi, SBI_POR_DOING);
spin_lock_init(&sbi->stat_lock); spin_lock_init(&sbi->stat_lock);
init_rwsem(&sbi->read_io.io_rwsem); init_rwsem(&sbi->read_io.io_rwsem);
...@@ -1019,7 +1044,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1019,7 +1044,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (IS_ERR(sbi->meta_inode)) { if (IS_ERR(sbi->meta_inode)) {
f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode); err = PTR_ERR(sbi->meta_inode);
goto free_sb_buf; goto free_options;
} }
err = get_valid_checkpoint(sbi); err = get_valid_checkpoint(sbi);
...@@ -1122,10 +1147,19 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1122,10 +1147,19 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_proc; goto free_proc;
if (!retry) if (!retry)
sbi->need_fsck = true; set_sbi_flag(sbi, SBI_NEED_FSCK);
/* recover fsynced data */ /* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
/*
* mount should be failed, when device has readonly mode, and
* previous checkpoint was not done by clean system shutdown.
*/
if (bdev_read_only(sb->s_bdev) &&
!is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
err = -EROFS;
goto free_kobj;
}
err = recover_fsync_data(sbi); err = recover_fsync_data(sbi);
if (err) { if (err) {
f2fs_msg(sb, KERN_ERR, f2fs_msg(sb, KERN_ERR,
...@@ -1144,6 +1178,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1144,6 +1178,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (err) if (err)
goto free_kobj; goto free_kobj;
} }
kfree(options);
return 0; return 0;
free_kobj: free_kobj:
...@@ -1168,6 +1203,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1168,6 +1203,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
free_meta_inode: free_meta_inode:
make_bad_inode(sbi->meta_inode); make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode); iput(sbi->meta_inode);
free_options:
kfree(options);
free_sb_buf: free_sb_buf:
brelse(raw_super_buf); brelse(raw_super_buf);
free_sbi: free_sbi:
...@@ -1188,11 +1225,18 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, ...@@ -1188,11 +1225,18 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
} }
static void kill_f2fs_super(struct super_block *sb)
{
if (sb->s_root)
set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
kill_block_super(sb);
}
static struct file_system_type f2fs_fs_type = { static struct file_system_type f2fs_fs_type = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "f2fs", .name = "f2fs",
.mount = f2fs_mount, .mount = f2fs_mount,
.kill_sb = kill_block_super, .kill_sb = kill_f2fs_super,
.fs_flags = FS_REQUIRES_DEV, .fs_flags = FS_REQUIRES_DEV,
}; };
MODULE_ALIAS_FS("f2fs"); MODULE_ALIAS_FS("f2fs");
...@@ -1220,6 +1264,8 @@ static int __init init_f2fs_fs(void) ...@@ -1220,6 +1264,8 @@ static int __init init_f2fs_fs(void)
{ {
int err; int err;
f2fs_build_trace_ios();
err = init_inodecache(); err = init_inodecache();
if (err) if (err)
goto fail; goto fail;
...@@ -1229,12 +1275,9 @@ static int __init init_f2fs_fs(void) ...@@ -1229,12 +1275,9 @@ static int __init init_f2fs_fs(void)
err = create_segment_manager_caches(); err = create_segment_manager_caches();
if (err) if (err)
goto free_node_manager_caches; goto free_node_manager_caches;
err = create_gc_caches();
if (err)
goto free_segment_manager_caches;
err = create_checkpoint_caches(); err = create_checkpoint_caches();
if (err) if (err)
goto free_gc_caches; goto free_segment_manager_caches;
f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj); f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
if (!f2fs_kset) { if (!f2fs_kset) {
err = -ENOMEM; err = -ENOMEM;
...@@ -1251,8 +1294,6 @@ static int __init init_f2fs_fs(void) ...@@ -1251,8 +1294,6 @@ static int __init init_f2fs_fs(void)
kset_unregister(f2fs_kset); kset_unregister(f2fs_kset);
free_checkpoint_caches: free_checkpoint_caches:
destroy_checkpoint_caches(); destroy_checkpoint_caches();
free_gc_caches:
destroy_gc_caches();
free_segment_manager_caches: free_segment_manager_caches:
destroy_segment_manager_caches(); destroy_segment_manager_caches();
free_node_manager_caches: free_node_manager_caches:
...@@ -1269,11 +1310,11 @@ static void __exit exit_f2fs_fs(void) ...@@ -1269,11 +1310,11 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_root_stats(); f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type); unregister_filesystem(&f2fs_fs_type);
destroy_checkpoint_caches(); destroy_checkpoint_caches();
destroy_gc_caches();
destroy_segment_manager_caches(); destroy_segment_manager_caches();
destroy_node_manager_caches(); destroy_node_manager_caches();
destroy_inodecache(); destroy_inodecache();
kset_unregister(f2fs_kset); kset_unregister(f2fs_kset);
f2fs_destroy_trace_ios();
} }
module_init(init_f2fs_fs) module_init(init_f2fs_fs)
......
/*
* f2fs IO tracer
*
* Copyright (c) 2014 Motorola Mobility
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/sched.h>
#include <linux/radix-tree.h>
#include "f2fs.h"
#include "trace.h"
static RADIX_TREE(pids, GFP_ATOMIC);
static spinlock_t pids_lock;
static struct last_io_info last_io;
static inline void __print_last_io(void)
{
if (!last_io.len)
return;
trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n",
last_io.major, last_io.minor,
last_io.pid, "----------------",
last_io.type,
last_io.fio.rw, last_io.fio.blk_addr,
last_io.len);
memset(&last_io, 0, sizeof(last_io));
}
static int __file_type(struct inode *inode, pid_t pid)
{
if (f2fs_is_atomic_file(inode))
return __ATOMIC_FILE;
else if (f2fs_is_volatile_file(inode))
return __VOLATILE_FILE;
else if (S_ISDIR(inode->i_mode))
return __DIR_FILE;
else if (inode->i_ino == F2FS_NODE_INO(F2FS_I_SB(inode)))
return __NODE_FILE;
else if (inode->i_ino == F2FS_META_INO(F2FS_I_SB(inode)))
return __META_FILE;
else if (pid)
return __NORMAL_FILE;
else
return __MISC_FILE;
}
void f2fs_trace_pid(struct page *page)
{
struct inode *inode = page->mapping->host;
pid_t pid = task_pid_nr(current);
void *p;
page->private = pid;
if (radix_tree_preload(GFP_NOFS))
return;
spin_lock(&pids_lock);
p = radix_tree_lookup(&pids, pid);
if (p == current)
goto out;
if (p)
radix_tree_delete(&pids, pid);
f2fs_radix_tree_insert(&pids, pid, current);
trace_printk("%3x:%3x %4x %-16s\n",
MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
pid, current->comm);
out:
spin_unlock(&pids_lock);
radix_tree_preload_end();
}
void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush)
{
struct inode *inode;
pid_t pid;
int major, minor;
if (flush) {
__print_last_io();
return;
}
inode = page->mapping->host;
pid = page_private(page);
major = MAJOR(inode->i_sb->s_dev);
minor = MINOR(inode->i_sb->s_dev);
if (last_io.major == major && last_io.minor == minor &&
last_io.pid == pid &&
last_io.type == __file_type(inode, pid) &&
last_io.fio.rw == fio->rw &&
last_io.fio.blk_addr + last_io.len == fio->blk_addr) {
last_io.len++;
return;
}
__print_last_io();
last_io.major = major;
last_io.minor = minor;
last_io.pid = pid;
last_io.type = __file_type(inode, pid);
last_io.fio = *fio;
last_io.len = 1;
return;
}
void f2fs_build_trace_ios(void)
{
spin_lock_init(&pids_lock);
}
#define PIDVEC_SIZE 128
static unsigned int gang_lookup_pids(pid_t *results, unsigned long first_index,
unsigned int max_items)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!max_items))
return 0;
radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
results[ret] = iter.index;
if (++ret == PIDVEC_SIZE)
break;
}
return ret;
}
void f2fs_destroy_trace_ios(void)
{
pid_t pid[PIDVEC_SIZE];
pid_t next_pid = 0;
unsigned int found;
spin_lock(&pids_lock);
while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
unsigned idx;
next_pid = pid[found - 1] + 1;
for (idx = 0; idx < found; idx++)
radix_tree_delete(&pids, pid[idx]);
}
spin_unlock(&pids_lock);
}
/*
* f2fs IO tracer
*
* Copyright (c) 2014 Motorola Mobility
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __F2FS_TRACE_H__
#define __F2FS_TRACE_H__
#ifdef CONFIG_F2FS_IO_TRACE
#include <trace/events/f2fs.h>
enum file_type {
__NORMAL_FILE,
__DIR_FILE,
__NODE_FILE,
__META_FILE,
__ATOMIC_FILE,
__VOLATILE_FILE,
__MISC_FILE,
};
struct last_io_info {
int major, minor;
pid_t pid;
enum file_type type;
struct f2fs_io_info fio;
block_t len;
};
extern void f2fs_trace_pid(struct page *);
extern void f2fs_trace_ios(struct page *, struct f2fs_io_info *, int);
extern void f2fs_build_trace_ios(void);
extern void f2fs_destroy_trace_ios(void);
#else
#define f2fs_trace_pid(p)
#define f2fs_trace_ios(p, i, n)
#define f2fs_build_trace_ios()
#define f2fs_destroy_trace_ios()
#endif
#endif /* __F2FS_TRACE_H__ */
...@@ -19,12 +19,16 @@ ...@@ -19,12 +19,16 @@
#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ #define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ #define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
#define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */ #define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
/* 0, 1(node nid), 2(meta nid) are reserved node id */ /* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3 #define F2FS_RESERVED_NODE_NUM 3
...@@ -87,6 +91,7 @@ struct f2fs_super_block { ...@@ -87,6 +91,7 @@ struct f2fs_super_block {
/* /*
* For checkpoint * For checkpoint
*/ */
#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010 #define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008 #define CP_ERROR_FLAG 0x00000008
#define CP_COMPACT_SUM_FLAG 0x00000004 #define CP_COMPACT_SUM_FLAG 0x00000004
...@@ -224,6 +229,8 @@ enum { ...@@ -224,6 +229,8 @@ enum {
OFFSET_BIT_SHIFT OFFSET_BIT_SHIFT
}; };
#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
struct node_footer { struct node_footer {
__le32 nid; /* node id */ __le32 nid; /* node id */
__le32 ino; /* inode nunmber */ __le32 ino; /* inode nunmber */
......
...@@ -72,6 +72,7 @@ ...@@ -72,6 +72,7 @@
#define show_cpreason(type) \ #define show_cpreason(type) \
__print_symbolic(type, \ __print_symbolic(type, \
{ CP_UMOUNT, "Umount" }, \ { CP_UMOUNT, "Umount" }, \
{ CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \ { CP_SYNC, "Sync" }, \
{ CP_DISCARD, "Discard" }) { CP_DISCARD, "Discard" })
...@@ -148,14 +149,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, ...@@ -148,14 +149,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
TRACE_EVENT(f2fs_sync_file_exit, TRACE_EVENT(f2fs_sync_file_exit,
TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret), TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret),
TP_ARGS(inode, need_cp, datasync, ret), TP_ARGS(inode, need_cp, datasync, ret),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(ino_t, ino) __field(ino_t, ino)
__field(bool, need_cp) __field(int, need_cp)
__field(int, datasync) __field(int, datasync)
__field(int, ret) __field(int, ret)
), ),
...@@ -190,7 +191,7 @@ TRACE_EVENT(f2fs_sync_fs, ...@@ -190,7 +191,7 @@ TRACE_EVENT(f2fs_sync_fs,
TP_fast_assign( TP_fast_assign(
__entry->dev = sb->s_dev; __entry->dev = sb->s_dev;
__entry->dirty = F2FS_SB(sb)->s_dirty; __entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY);
__entry->wait = wait; __entry->wait = wait;
), ),
...@@ -440,38 +441,6 @@ TRACE_EVENT(f2fs_truncate_partial_nodes, ...@@ -440,38 +441,6 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
__entry->err) __entry->err)
); );
TRACE_EVENT_CONDITION(f2fs_submit_page_bio,
TP_PROTO(struct page *page, sector_t blkaddr, int type),
TP_ARGS(page, blkaddr, type),
TP_CONDITION(page->mapping),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
__field(sector_t, blkaddr)
__field(int, type)
),
TP_fast_assign(
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
__entry->blkaddr = blkaddr;
__entry->type = type;
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
"blkaddr = 0x%llx, bio_type = %s%s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
(unsigned long long)__entry->blkaddr,
show_bio_type(__entry->type))
);
TRACE_EVENT(f2fs_get_data_block, TRACE_EVENT(f2fs_get_data_block,
TP_PROTO(struct inode *inode, sector_t iblock, TP_PROTO(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int ret), struct buffer_head *bh, int ret),
...@@ -680,11 +649,63 @@ TRACE_EVENT(f2fs_reserve_new_block, ...@@ -680,11 +649,63 @@ TRACE_EVENT(f2fs_reserve_new_block,
__entry->ofs_in_node) __entry->ofs_in_node)
); );
DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
TP_PROTO(struct page *page, struct f2fs_io_info *fio),
TP_ARGS(page, fio),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
__field(block_t, blkaddr)
__field(int, rw)
__field(int, type)
),
TP_fast_assign(
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
__entry->blkaddr = fio->blk_addr;
__entry->rw = fio->rw;
__entry->type = fio->type;
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
"blkaddr = 0x%llx, rw = %s%s, type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
(unsigned long long)__entry->blkaddr,
show_bio_type(__entry->rw),
show_block_type(__entry->type))
);
DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
TP_PROTO(struct page *page, struct f2fs_io_info *fio),
TP_ARGS(page, fio),
TP_CONDITION(page->mapping)
);
DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio,
TP_PROTO(struct page *page, struct f2fs_io_info *fio),
TP_ARGS(page, fio),
TP_CONDITION(page->mapping)
);
DECLARE_EVENT_CLASS(f2fs__submit_bio, DECLARE_EVENT_CLASS(f2fs__submit_bio,
TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
struct bio *bio),
TP_ARGS(sb, rw, type, bio), TP_ARGS(sb, fio, bio),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
...@@ -696,8 +717,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, ...@@ -696,8 +717,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
TP_fast_assign( TP_fast_assign(
__entry->dev = sb->s_dev; __entry->dev = sb->s_dev;
__entry->rw = rw; __entry->rw = fio->rw;
__entry->type = type; __entry->type = fio->type;
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->size = bio->bi_iter.bi_size; __entry->size = bio->bi_iter.bi_size;
), ),
...@@ -712,18 +733,20 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, ...@@ -712,18 +733,20 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
struct bio *bio),
TP_ARGS(sb, rw, type, bio), TP_ARGS(sb, fio, bio),
TP_CONDITION(bio) TP_CONDITION(bio)
); );
DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
struct bio *bio),
TP_ARGS(sb, rw, type, bio), TP_ARGS(sb, fio, bio),
TP_CONDITION(bio) TP_CONDITION(bio)
); );
...@@ -916,38 +939,6 @@ TRACE_EVENT(f2fs_writepages, ...@@ -916,38 +939,6 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_sync) __entry->for_sync)
); );
TRACE_EVENT(f2fs_submit_page_mbio,
TP_PROTO(struct page *page, int rw, int type, block_t blk_addr),
TP_ARGS(page, rw, type, blk_addr),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(int, rw)
__field(int, type)
__field(pgoff_t, index)
__field(block_t, block)
),
TP_fast_assign(
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->rw = rw;
__entry->type = type;
__entry->index = page->index;
__entry->block = blk_addr;
),
TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx",
show_dev_ino(__entry),
show_bio_type(__entry->rw),
show_block_type(__entry->type),
(unsigned long)__entry->index,
(unsigned long long)__entry->block)
);
TRACE_EVENT(f2fs_write_checkpoint, TRACE_EVENT(f2fs_write_checkpoint,
TP_PROTO(struct super_block *sb, int reason, char *msg), TP_PROTO(struct super_block *sb, int reason, char *msg),
...@@ -998,14 +989,15 @@ TRACE_EVENT(f2fs_issue_discard, ...@@ -998,14 +989,15 @@ TRACE_EVENT(f2fs_issue_discard,
TRACE_EVENT(f2fs_issue_flush, TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct super_block *sb, bool nobarrier, bool flush_merge), TP_PROTO(struct super_block *sb, unsigned int nobarrier,
unsigned int flush_merge),
TP_ARGS(sb, nobarrier, flush_merge), TP_ARGS(sb, nobarrier, flush_merge),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(bool, nobarrier) __field(unsigned int, nobarrier)
__field(bool, flush_merge) __field(unsigned int, flush_merge)
), ),
TP_fast_assign( TP_fast_assign(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment