Commit 1085db4a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'f2fs-for-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs update from Jaegeuk Kim:
 "[Major bug fixes]
   o Store device file information correctly
   o Fix -EIO handling with respect to power-off-recovery
   o Allocate blocks with global locks
   o Fix wrong calculation of the SSR cost

  [Cleanups]
   o Get rid of fake on-stack dentries

  [Enhancement]
   o Support (un)freeze_fs
   o Enhance the f2fs_gc flow
   o Support 32-bit binary execution on 64-bit kernel"

* tag 'f2fs-for-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (29 commits)
  f2fs: avoid build warning
  f2fs: add compat_ioctl to provide backward compatability
  f2fs: fix calculation of max. gc cost in the SSR case
  f2fs: clarify and enhance the f2fs_gc flow
  f2fs: optimize the return condition for has_not_enough_free_secs
  f2fs: make an accessor to get sections for particular block type
  f2fs: mark gc_thread as NULL when thread creation is failed
  f2fs: name gc task as per the block device
  f2fs: remove unnecessary gc option check and balance_fs
  f2fs: remove repeated F2FS_SET_SB_DIRT call
  f2fs: when check superblock failed, try to check another superblock
  f2fs: use F2FS_BLKSIZE to judge bloksize and page_cache_size
  f2fs: add device name in debugfs
  f2fs: stop repeated checking if cp is needed
  f2fs: avoid balanc_fs during evict_inode
  f2fs: remove the use of page_cache_release
  f2fs: fix typo mistake for data_version description
  f2fs: reorganize code for ra_node_page
  f2fs: avoid redundant call to has_not_enough_free_secs in f2fs_gc
  f2fs: add un/freeze_fs into super_operations
  ...
parents 3c834b6f 7dd690c8
......@@ -72,22 +72,22 @@ static int f2fs_write_meta_page(struct page *page,
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
int err;
wait_on_page_writeback(page);
err = write_meta_page(sbi, page, wbc);
if (err) {
/* Should not write any meta pages, if any IO error was occurred */
if (wbc->for_reclaim ||
is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
dec_page_count(sbi, F2FS_DIRTY_META);
wbc->pages_skipped++;
set_page_dirty(page);
return AOP_WRITEPAGE_ACTIVATE;
}
dec_page_count(sbi, F2FS_DIRTY_META);
wait_on_page_writeback(page);
/* In this case, we should not unlock this page */
if (err != AOP_WRITEPAGE_ACTIVATE)
unlock_page(page);
return err;
write_meta_page(sbi, page);
dec_page_count(sbi, F2FS_DIRTY_META);
unlock_page(page);
return 0;
}
static int f2fs_write_meta_pages(struct address_space *mapping,
......@@ -138,7 +138,10 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
BUG_ON(page->mapping != mapping);
BUG_ON(!PageDirty(page));
clear_page_dirty_for_io(page);
f2fs_write_meta_page(page, &wbc);
if (f2fs_write_meta_page(page, &wbc)) {
unlock_page(page);
break;
}
if (nwritten++ >= nr_to_write)
break;
}
......@@ -161,7 +164,6 @@ static int f2fs_set_meta_page_dirty(struct page *page)
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(sbi, F2FS_DIRTY_META);
F2FS_SET_SB_DIRT(sbi);
return 1;
}
return 0;
......@@ -216,19 +218,11 @@ void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
new->ino = ino;
/* add new_oentry into list which is sorted by inode number */
if (orphan) {
struct orphan_inode_entry *prev;
/* get previous entry */
prev = list_entry(orphan->list.prev, typeof(*prev), list);
if (&prev->list != head)
/* insert new orphan inode entry */
list_add(&new->list, &prev->list);
else
list_add(&new->list, head);
} else {
if (orphan)
list_add(&new->list, this->prev);
else
list_add_tail(&new->list, head);
}
sbi->n_orphans++;
out:
mutex_unlock(&sbi->orphan_inode_mutex);
......@@ -545,7 +539,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
/*
* Freeze all the FS-operations for checkpoint.
*/
void block_operations(struct f2fs_sb_info *sbi)
static void block_operations(struct f2fs_sb_info *sbi)
{
int t;
struct writeback_control wbc = {
......@@ -717,27 +711,24 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
sbi->alloc_valid_block_count = 0;
/* Here, we only have one bio having CP pack */
if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))
sbi->sb->s_flags |= MS_RDONLY;
else
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
clear_prefree_segments(sbi);
F2FS_RESET_SB_DIRT(sbi);
if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
clear_prefree_segments(sbi);
F2FS_RESET_SB_DIRT(sbi);
}
}
/*
* We guarantee that this checkpoint procedure should not fail.
*/
void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount)
void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_ver;
if (!blocked) {
mutex_lock(&sbi->cp_mutex);
block_operations(sbi);
}
mutex_lock(&sbi->cp_mutex);
block_operations(sbi);
f2fs_submit_bio(sbi, DATA, true);
f2fs_submit_bio(sbi, NODE, true);
......
......@@ -183,10 +183,12 @@ static int stat_show(struct seq_file *s, void *v)
mutex_lock(&f2fs_stat_mutex);
list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
char devname[BDEVNAME_SIZE];
update_general_status(si->sbi);
seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n",
bdevname(si->sbi->sb->s_bdev, devname), i++);
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
......
......@@ -265,7 +265,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
mutex_unlock_op(sbi, DENTRY_OPS);
}
void init_dent_inode(struct dentry *dentry, struct page *ipage)
void init_dent_inode(const struct qstr *name, struct page *ipage)
{
struct f2fs_node *rn;
......@@ -274,20 +274,19 @@ void init_dent_inode(struct dentry *dentry, struct page *ipage)
wait_on_page_writeback(ipage);
/* copy dentry info. to this inode page */
/* copy name info. to this inode page */
rn = (struct f2fs_node *)page_address(ipage);
rn->i.i_namelen = cpu_to_le32(dentry->d_name.len);
memcpy(rn->i.i_name, dentry->d_name.name, dentry->d_name.len);
rn->i.i_namelen = cpu_to_le32(name->len);
memcpy(rn->i.i_name, name->name, name->len);
set_page_dirty(ipage);
}
static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
static int init_inode_metadata(struct inode *inode,
struct inode *dir, const struct qstr *name)
{
struct inode *dir = dentry->d_parent->d_inode;
if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
int err;
err = new_inode_page(inode, dentry);
err = new_inode_page(inode, name);
if (err)
return err;
......@@ -310,7 +309,7 @@ static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
if (IS_ERR(ipage))
return PTR_ERR(ipage);
set_cold_node(inode, ipage);
init_dent_inode(dentry, ipage);
init_dent_inode(name, ipage);
f2fs_put_page(ipage, 1);
}
if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
......@@ -371,7 +370,7 @@ static int room_for_filename(struct f2fs_dentry_block *dentry_blk, int slots)
goto next;
}
int f2fs_add_link(struct dentry *dentry, struct inode *inode)
int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode)
{
unsigned int bit_pos;
unsigned int level;
......@@ -380,17 +379,15 @@ int f2fs_add_link(struct dentry *dentry, struct inode *inode)
f2fs_hash_t dentry_hash;
struct f2fs_dir_entry *de;
unsigned int nbucket, nblock;
struct inode *dir = dentry->d_parent->d_inode;
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
const char *name = dentry->d_name.name;
size_t namelen = dentry->d_name.len;
size_t namelen = name->len;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
int slots = GET_DENTRY_SLOTS(namelen);
int err = 0;
int i;
dentry_hash = f2fs_dentry_hash(name, dentry->d_name.len);
dentry_hash = f2fs_dentry_hash(name->name, name->len);
level = 0;
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
......@@ -433,7 +430,7 @@ int f2fs_add_link(struct dentry *dentry, struct inode *inode)
++level;
goto start;
add_dentry:
err = init_inode_metadata(inode, dentry);
err = init_inode_metadata(inode, dir, name);
if (err)
goto fail;
......@@ -442,7 +439,7 @@ int f2fs_add_link(struct dentry *dentry, struct inode *inode)
de = &dentry_blk->dentry[bit_pos];
de->hash_code = dentry_hash;
de->name_len = cpu_to_le16(namelen);
memcpy(dentry_blk->filename[bit_pos], name, namelen);
memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode);
for (i = 0; i < slots; i++)
......
......@@ -103,6 +103,20 @@ static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
return before;
}
/*
* ioctl commands
*/
#define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
#define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
* ioctl commands in 32 bit emulation
*/
#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
#endif
/*
* For INODE and NODE manager
*/
......@@ -141,7 +155,7 @@ struct f2fs_inode_info {
/* Use below internally in f2fs*/
unsigned long flags; /* use to pass per-file flags */
unsigned long long data_version;/* lastes version of data for fsync */
unsigned long long data_version;/* latest version of data for fsync */
atomic_t dirty_dents; /* # of dirty dentry pages */
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
......@@ -573,6 +587,14 @@ static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
return atomic_read(&sbi->nr_pages[count_type]);
}
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{
unsigned int pages_per_sec = sbi->segs_per_sec *
(1 << sbi->log_blocks_per_seg);
return ((get_pages(sbi, block_type) + pages_per_sec - 1)
>> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
}
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
{
block_t ret;
......@@ -842,12 +864,12 @@ void f2fs_truncate(struct inode *);
int f2fs_setattr(struct dentry *, struct iattr *);
int truncate_hole(struct inode *, pgoff_t, pgoff_t);
long f2fs_ioctl(struct file *, unsigned int, unsigned long);
long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
/*
* inode.c
*/
void f2fs_set_inode_flags(struct inode *);
struct inode *f2fs_iget_nowait(struct super_block *, unsigned long);
struct inode *f2fs_iget(struct super_block *, unsigned long);
void update_inode(struct inode *, struct page *);
int f2fs_write_inode(struct inode *, struct writeback_control *);
......@@ -867,12 +889,18 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
void init_dent_inode(struct dentry *, struct page *);
int f2fs_add_link(struct dentry *, struct inode *);
void init_dent_inode(const struct qstr *, struct page *);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
int f2fs_make_empty(struct inode *, struct inode *);
bool f2fs_empty_dir(struct inode *);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
{
return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
inode);
}
/*
* super.c
*/
......@@ -896,7 +924,7 @@ void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
int truncate_inode_blocks(struct inode *, pgoff_t);
int remove_inode_page(struct inode *);
int new_inode_page(struct inode *, struct dentry *);
int new_inode_page(struct inode *, const struct qstr *);
struct page *new_node_page(struct dnode_of_data *, unsigned int);
void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
......@@ -929,8 +957,7 @@ void allocate_new_segments(struct f2fs_sb_info *);
struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
struct bio *f2fs_bio_alloc(struct block_device *, int);
void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
int write_meta_page(struct f2fs_sb_info *, struct page *,
struct writeback_control *);
void write_meta_page(struct f2fs_sb_info *, struct page *);
void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
block_t, block_t *);
void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
......@@ -963,8 +990,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *);
void set_dirty_dir_page(struct inode *, struct page *);
void remove_dirty_dir_inode(struct inode *);
void sync_dirty_dir_inodes(struct f2fs_sb_info *);
void block_operations(struct f2fs_sb_info *);
void write_checkpoint(struct f2fs_sb_info *, bool, bool);
void write_checkpoint(struct f2fs_sb_info *, bool);
void init_orphan_info(struct f2fs_sb_info *);
int __init create_checkpoint_caches(void);
void destroy_checkpoint_caches(void);
......
......@@ -15,6 +15,7 @@
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/types.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/mount.h>
......@@ -157,11 +158,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
else if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
need_cp = true;
if (!space_for_roll_forward(sbi))
else if (!space_for_roll_forward(sbi))
need_cp = true;
if (need_to_sync_dir(sbi, inode))
else if (need_to_sync_dir(sbi, inode))
need_cp = true;
if (need_cp) {
......@@ -298,8 +299,6 @@ void f2fs_truncate(struct inode *inode)
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
}
f2fs_balance_fs(F2FS_SB(inode->i_sb));
}
static int f2fs_getattr(struct vfsmount *mnt,
......@@ -356,6 +355,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attr->ia_size);
f2fs_truncate(inode);
f2fs_balance_fs(F2FS_SB(inode->i_sb));
}
__setattr_copy(inode, attr);
......@@ -387,12 +387,17 @@ const struct inode_operations f2fs_file_inode_operations = {
static void fill_zero(struct inode *inode, pgoff_t index,
loff_t start, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct page *page;
if (!len)
return;
f2fs_balance_fs(sbi);
mutex_lock_op(sbi, DATA_NEW);
page = get_new_data_page(inode, index, false);
mutex_unlock_op(sbi, DATA_NEW);
if (!IS_ERR(page)) {
wait_on_page_writeback(page);
......@@ -630,6 +635,23 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
#ifdef CONFIG_COMPAT
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case F2FS_IOC32_GETFLAGS:
cmd = F2FS_IOC_GETFLAGS;
break;
case F2FS_IOC32_SETFLAGS:
cmd = F2FS_IOC_SETFLAGS;
break;
default:
return -ENOIOCTLCMD;
}
return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
const struct file_operations f2fs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
......@@ -641,6 +663,9 @@ const struct file_operations f2fs_file_operations = {
.fsync = f2fs_sync_file,
.fallocate = f2fs_fallocate,
.unlocked_ioctl = f2fs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
#endif
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
};
......@@ -44,10 +44,10 @@ static int gc_thread_func(void *data)
if (kthread_should_stop())
break;
f2fs_balance_fs(sbi);
if (!test_opt(sbi, BG_GC))
if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
wait_ms = GC_THREAD_MAX_SLEEP_TIME;
continue;
}
/*
* [GC triggering condition]
......@@ -78,7 +78,8 @@ static int gc_thread_func(void *data)
sbi->bg_gc++;
if (f2fs_gc(sbi) == GC_NONE)
/* if return value is not zero, no victim was selected */
if (f2fs_gc(sbi))
wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
wait_ms = GC_THREAD_MAX_SLEEP_TIME;
......@@ -90,7 +91,10 @@ static int gc_thread_func(void *data)
int start_gc_thread(struct f2fs_sb_info *sbi)
{
struct f2fs_gc_kthread *gc_th;
dev_t dev = sbi->sb->s_bdev->bd_dev;
if (!test_opt(sbi, BG_GC))
return 0;
gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th)
return -ENOMEM;
......@@ -98,9 +102,10 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
sbi->gc_thread = gc_th;
init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
GC_THREAD_NAME);
"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(gc_th->f2fs_gc_task)) {
kfree(gc_th);
sbi->gc_thread = NULL;
return -ENOMEM;
}
return 0;
......@@ -141,6 +146,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
struct victim_sel_policy *p)
{
/* SSR allocates in a segment unit */
if (p->alloc_mode == SSR)
return 1 << sbi->log_blocks_per_seg;
if (p->gc_mode == GC_GREEDY)
return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
else if (p->gc_mode == GC_CB)
......@@ -356,7 +364,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
sentry = get_seg_entry(sbi, segno);
ret = f2fs_test_bit(offset, sentry->cur_valid_map);
mutex_unlock(&sit_i->sentry_lock);
return ret ? GC_OK : GC_NEXT;
return ret;
}
/*
......@@ -364,7 +372,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
static int gc_node_segment(struct f2fs_sb_info *sbi,
static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
bool initial = true;
......@@ -376,21 +384,12 @@ static int gc_node_segment(struct f2fs_sb_info *sbi,
for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
int err;
/*
* It makes sure that free segments are able to write
* all the dirty node pages before CP after this CP.
* So let's check the space of dirty node pages.
*/
if (should_do_checkpoint(sbi)) {
mutex_lock(&sbi->cp_mutex);
block_operations(sbi);
return GC_BLOCKED;
}
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
return;
err = check_valid_map(sbi, segno, off);
if (err == GC_NEXT)
if (check_valid_map(sbi, segno, off) == 0)
continue;
if (initial) {
......@@ -420,7 +419,6 @@ static int gc_node_segment(struct f2fs_sb_info *sbi,
};
sync_node_pages(sbi, 0, &wbc);
}
return GC_DONE;
}
/*
......@@ -463,13 +461,13 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
return GC_NEXT;
return 0;
get_node_info(sbi, nid, dni);
if (sum->version != dni->version) {
f2fs_put_page(node_page, 1);
return GC_NEXT;
return 0;
}
*nofs = ofs_of_node(node_page);
......@@ -477,8 +475,8 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
f2fs_put_page(node_page, 1);
if (source_blkaddr != blkaddr)
return GC_NEXT;
return GC_OK;
return 0;
return 1;
}
static void move_data_page(struct inode *inode, struct page *page, int gc_type)
......@@ -519,13 +517,13 @@ static void move_data_page(struct inode *inode, struct page *page, int gc_type)
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct list_head *ilist, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
struct f2fs_summary *entry;
block_t start_addr;
int err, off;
int off;
int phase = 0;
start_addr = START_BLOCK(sbi, segno);
......@@ -539,20 +537,11 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
unsigned int ofs_in_node, nofs;
block_t start_bidx;
/*
* It makes sure that free segments are able to write
* all the dirty node pages before CP after this CP.
* So let's check the space of dirty node pages.
*/
if (should_do_checkpoint(sbi)) {
mutex_lock(&sbi->cp_mutex);
block_operations(sbi);
err = GC_BLOCKED;
goto stop;
}
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
return;
err = check_valid_map(sbi, segno, off);
if (err == GC_NEXT)
if (check_valid_map(sbi, segno, off) == 0)
continue;
if (phase == 0) {
......@@ -561,8 +550,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
/* Get an inode by ino with checking validity */
err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs);
if (err == GC_NEXT)
if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
continue;
if (phase == 1) {
......@@ -574,7 +562,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 2) {
inode = f2fs_iget_nowait(sb, dni.ino);
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode))
continue;
......@@ -602,11 +590,9 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
if (++phase < 4)
goto next_step;
err = GC_DONE;
stop:
if (gc_type == FG_GC)
f2fs_submit_bio(sbi, DATA, true);
return err;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
......@@ -620,17 +606,16 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
return ret;
}
static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
struct list_head *ilist, int gc_type)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
int ret = GC_DONE;
/* read segment summary of victim */
sum_page = get_sum_page(sbi, segno);
if (IS_ERR(sum_page))
return GC_ERROR;
return;
/*
* CP needs to lock sum_page. In this time, we don't need
......@@ -642,17 +627,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
switch (GET_SUM_TYPE((&sum->footer))) {
case SUM_TYPE_NODE:
ret = gc_node_segment(sbi, sum->entries, segno, gc_type);
gc_node_segment(sbi, sum->entries, segno, gc_type);
break;
case SUM_TYPE_DATA:
ret = gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
break;
}
stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
stat_inc_call_count(sbi->stat_info);
f2fs_put_page(sum_page, 0);
return ret;
}
int f2fs_gc(struct f2fs_sb_info *sbi)
......@@ -660,40 +644,38 @@ int f2fs_gc(struct f2fs_sb_info *sbi)
struct list_head ilist;
unsigned int segno, i;
int gc_type = BG_GC;
int gc_status = GC_NONE;
int nfree = 0;
int ret = -1;
INIT_LIST_HEAD(&ilist);
gc_more:
if (!(sbi->sb->s_flags & MS_ACTIVE))
goto stop;
if (has_not_enough_free_secs(sbi))
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree))
gc_type = FG_GC;
if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
goto stop;
ret = 0;
for (i = 0; i < sbi->segs_per_sec; i++) {
/*
* do_garbage_collect will give us three gc_status:
* GC_ERROR, GC_DONE, and GC_BLOCKED.
* If GC is finished uncleanly, we have to return
* the victim to dirty segment list.
*/
gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
if (gc_status != GC_DONE)
break;
}
if (has_not_enough_free_secs(sbi)) {
write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
if (has_not_enough_free_secs(sbi))
goto gc_more;
}
for (i = 0; i < sbi->segs_per_sec; i++)
do_garbage_collect(sbi, segno + i, &ilist, gc_type);
if (gc_type == FG_GC &&
get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
nfree++;
if (has_not_enough_free_secs(sbi, nfree))
goto gc_more;
if (gc_type == FG_GC)
write_checkpoint(sbi, false);
stop:
mutex_unlock(&sbi->gc_mutex);
put_gc_inode(&ilist);
return gc_status;
return ret;
}
void build_gc_manager(struct f2fs_sb_info *sbi)
......
......@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define GC_THREAD_NAME "f2fs_gc_task"
#define GC_THREAD_MIN_WB_PAGES 1 /*
* a threshold to determine
* whether IO subsystem is idle
......@@ -23,15 +22,6 @@
/* Search max. number of dirty segments to select a victim segment */
#define MAX_VICTIM_SEARCH 20
enum {
GC_NONE = 0,
GC_ERROR,
GC_OK,
GC_NEXT,
GC_BLOCKED,
GC_DONE,
};
struct f2fs_gc_kthread {
struct task_struct *f2fs_gc_task;
wait_queue_head_t gc_wait_queue_head;
......@@ -104,14 +94,3 @@ static inline int is_idle(struct f2fs_sb_info *sbi)
struct request_list *rl = &q->root_rl;
return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
}
static inline bool should_do_checkpoint(struct f2fs_sb_info *sbi)
{
unsigned int pages_per_sec = sbi->segs_per_sec *
(1 << sbi->log_blocks_per_seg);
int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
>> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
>> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
return free_sections(sbi) <= (node_secs + 2 * dent_secs + 2);
}
......@@ -16,11 +16,6 @@
#include "f2fs.h"
#include "node.h"
struct f2fs_iget_args {
u64 ino;
int on_free;
};
void f2fs_set_inode_flags(struct inode *inode)
{
unsigned int flags = F2FS_I(inode)->i_flags;
......@@ -40,34 +35,6 @@ void f2fs_set_inode_flags(struct inode *inode)
inode->i_flags |= S_DIRSYNC;
}
static int f2fs_iget_test(struct inode *inode, void *data)
{
struct f2fs_iget_args *args = data;
if (inode->i_ino != args->ino)
return 0;
if (inode->i_state & (I_FREEING | I_WILL_FREE)) {
args->on_free = 1;
return 0;
}
return 1;
}
struct inode *f2fs_iget_nowait(struct super_block *sb, unsigned long ino)
{
struct f2fs_iget_args args = {
.ino = ino,
.on_free = 0
};
struct inode *inode = ilookup5(sb, ino, f2fs_iget_test, &args);
if (inode)
return inode;
if (!args.on_free)
return f2fs_iget(sb, ino);
return ERR_PTR(-ENOENT);
}
static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
......@@ -100,6 +67,10 @@ static int do_read_inode(struct inode *inode)
inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
inode->i_generation = le32_to_cpu(ri->i_generation);
if (ri->i_addr[0])
inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
else
inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
......@@ -203,6 +174,20 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
ri->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
ri->i_addr[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
ri->i_addr[1] = 0;
} else {
ri->i_addr[0] = 0;
ri->i_addr[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
ri->i_addr[2] = 0;
}
}
set_cold_node(inode, node_page);
set_page_dirty(node_page);
}
......@@ -260,6 +245,7 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
sb_start_intwrite(inode->i_sb);
set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
i_size_write(inode, 0);
......@@ -267,6 +253,7 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_truncate(inode);
remove_inode_page(inode);
sb_end_intwrite(inode->i_sb);
no_delete:
clear_inode(inode);
}
......@@ -104,7 +104,7 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
f2fs_put_page(page, 1);
continue;
}
page_cache_release(page);
f2fs_put_page(page, 0);
}
}
......@@ -660,7 +660,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
int err = 0, cont = 1;
int level, offset[4], noffset[4];
unsigned int nofs;
unsigned int nofs = 0;
struct f2fs_node *rn;
struct dnode_of_data dn;
struct page *page;
......@@ -780,7 +780,7 @@ int remove_inode_page(struct inode *inode)
return 0;
}
int new_inode_page(struct inode *inode, struct dentry *dentry)
int new_inode_page(struct inode *inode, const struct qstr *name)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct page *page;
......@@ -790,7 +790,7 @@ int new_inode_page(struct inode *inode, struct dentry *dentry)
set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
mutex_lock_op(sbi, NODE_NEW);
page = new_node_page(&dn, 0);
init_dent_inode(dentry, page);
init_dent_inode(name, page);
mutex_unlock_op(sbi, NODE_NEW);
if (IS_ERR(page))
return PTR_ERR(page);
......@@ -874,15 +874,11 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
return;
if (read_node_page(apage, READA))
goto unlock_out;
unlock_page(apage);
page_cache_release(apage);
return;
unlock_out:
unlock_page(apage);
release_out:
page_cache_release(apage);
f2fs_put_page(apage, 0);
return;
}
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
......@@ -1139,7 +1135,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
/* First check balancing cached NAT entries */
if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
write_checkpoint(sbi, false, false);
write_checkpoint(sbi, false);
return 0;
}
......
......@@ -42,7 +42,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
{
struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
struct f2fs_inode *raw_inode = &(raw_node->i);
struct dentry dent, parent;
struct qstr name;
struct f2fs_dir_entry *de;
struct page *page;
struct inode *dir;
......@@ -57,17 +57,15 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
goto out;
}
parent.d_inode = dir;
dent.d_parent = &parent;
dent.d_name.len = le32_to_cpu(raw_inode->i_namelen);
dent.d_name.name = raw_inode->i_name;
name.len = le32_to_cpu(raw_inode->i_namelen);
name.name = raw_inode->i_name;
de = f2fs_find_entry(dir, &dent.d_name, &page);
de = f2fs_find_entry(dir, &name, &page);
if (de) {
kunmap(page);
f2fs_put_page(page, 0);
} else {
err = f2fs_add_link(&dent, inode);
err = __f2fs_add_link(dir, &name, inode);
}
iput(dir);
out:
......@@ -226,7 +224,7 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
f2fs_put_page(node_page, 1);
/* Deallocate previous index in the node page */
inode = f2fs_iget_nowait(sbi->sb, ino);
inode = f2fs_iget(sbi->sb, ino);
if (IS_ERR(inode))
return;
......@@ -373,5 +371,5 @@ void recover_fsync_data(struct f2fs_sb_info *sbi)
out:
destroy_fsync_dnodes(sbi, &inode_list);
kmem_cache_destroy(fsync_entry_slab);
write_checkpoint(sbi, false, false);
write_checkpoint(sbi, false);
}
......@@ -29,7 +29,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
if (has_not_enough_free_secs(sbi)) {
if (has_not_enough_free_secs(sbi, 0)) {
mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi);
}
......@@ -308,7 +308,7 @@ static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
* If there is not enough reserved sections,
* we should not reuse prefree segments.
*/
if (has_not_enough_free_secs(sbi))
if (has_not_enough_free_secs(sbi, 0))
return NULL_SEGNO;
/*
......@@ -536,6 +536,23 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
}
}
static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
return v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR);
/* For data segments, let's do SSR more intensively */
for (; type >= CURSEG_HOT_DATA; type--)
if (v_ops->get_victim(sbi, &(curseg)->next_segno,
BG_GC, type, SSR))
return 1;
return 0;
}
/*
* flush out current segment and replace it with new segment
* This function should be returned with success, otherwise BUG
......@@ -600,6 +617,7 @@ static void f2fs_end_io_write(struct bio *bio, int err)
if (page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
p->sbi->sb->s_flags |= MS_RDONLY;
}
end_page_writeback(page);
dec_page_count(p->sbi, F2FS_WRITEBACK);
......@@ -815,15 +833,10 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
mutex_unlock(&curseg->curseg_mutex);
}
int write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
struct writeback_control *wbc)
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
{
if (wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE;
set_page_writeback(page);
submit_write_page(sbi, page, page->index, META);
return 0;
}
void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
......
......@@ -450,29 +450,16 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
return (free_sections(sbi) < overprovision_sections(sbi));
}
static inline int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
return DIRTY_I(sbi)->v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR);
}
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi)
{
unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) *
sbi->segs_per_sec;
int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
>> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
>> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
if (sbi->por_doing)
return false;
if (free_sections(sbi) <= (node_secs + 2 * dent_secs +
reserved_sections(sbi)))
return true;
return false;
return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
reserved_sections(sbi)));
}
static inline int utilization(struct f2fs_sb_info *sbi)
......
......@@ -112,7 +112,7 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_stats(sbi);
stop_gc_thread(sbi);
write_checkpoint(sbi, false, true);
write_checkpoint(sbi, true);
iput(sbi->node_inode);
iput(sbi->meta_inode);
......@@ -136,13 +136,29 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
return 0;
if (sync)
write_checkpoint(sbi, false, false);
write_checkpoint(sbi, false);
else
f2fs_balance_fs(sbi);
return 0;
}
static int f2fs_freeze(struct super_block *sb)
{
int err;
if (sb->s_flags & MS_RDONLY)
return 0;
err = f2fs_sync_fs(sb, 1);
return err;
}
static int f2fs_unfreeze(struct super_block *sb)
{
return 0;
}
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
......@@ -198,7 +214,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noacl");
#endif
if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
seq_puts(seq, ",disable_ext_indentify");
seq_puts(seq, ",disable_ext_identify");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
......@@ -213,6 +229,8 @@ static struct super_operations f2fs_sops = {
.evict_inode = f2fs_evict_inode,
.put_super = f2fs_put_super,
.sync_fs = f2fs_sync_fs,
.freeze_fs = f2fs_freeze,
.unfreeze_fs = f2fs_unfreeze,
.statfs = f2fs_statfs,
};
......@@ -366,14 +384,23 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
/* Currently, support only 4KB page cache size */
if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid page_cache_size (%lu), supports only 4KB\n",
PAGE_CACHE_SIZE);
return 1;
}
/* Currently, support only 4KB block size */
blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
if (blocksize != PAGE_CACHE_SIZE) {
if (blocksize != F2FS_BLKSIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid blocksize (%u), supports only 4KB\n",
blocksize);
return 1;
}
if (le32_to_cpu(raw_super->log_sectorsize) !=
F2FS_LOG_SECTOR_SIZE) {
f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
......@@ -387,10 +414,11 @@ static int sanity_check_raw_super(struct super_block *sb,
return 0;
}
static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
struct f2fs_checkpoint *ckpt)
static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
{
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
......@@ -401,6 +429,11 @@ static int sanity_check_ckpt(struct f2fs_super_block *raw_super,
if (fsmeta >= total)
return 1;
if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
}
return 0;
}
......@@ -429,6 +462,32 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
atomic_set(&sbi->nr_pages[i], 0);
}
static int validate_superblock(struct super_block *sb,
struct f2fs_super_block **raw_super,
struct buffer_head **raw_super_buf, sector_t block)
{
const char *super = (block == 0 ? "first" : "second");
/* read f2fs raw super block */
*raw_super_buf = sb_bread(sb, block);
if (!*raw_super_buf) {
f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
super);
return 1;
}
*raw_super = (struct f2fs_super_block *)
((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
/* sanity checking of raw super */
if (!sanity_check_raw_super(sb, *raw_super))
return 0;
f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
"in %s superblock", super);
return 1;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
......@@ -449,16 +508,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
/* read f2fs raw super block */
raw_super_buf = sb_bread(sb, 0);
if (!raw_super_buf) {
err = -EIO;
f2fs_msg(sb, KERN_ERR, "unable to read superblock");
goto free_sbi;
if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
brelse(raw_super_buf);
if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
goto free_sb_buf;
}
raw_super = (struct f2fs_super_block *)
((char *)raw_super_buf->b_data + F2FS_SUPER_OFFSET);
/* init some FS parameters */
sbi->active_logs = NR_CURSEG_TYPE;
......@@ -474,12 +528,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (parse_options(sb, sbi, (char *)data))
goto free_sb_buf;
/* sanity checking of raw super */
if (sanity_check_raw_super(sb, raw_super)) {
f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");
goto free_sb_buf;
}
sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
......@@ -525,7 +573,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
/* sanity checking of checkpoint */
err = -EINVAL;
if (sanity_check_ckpt(raw_super, sbi->ckpt)) {
if (sanity_check_ckpt(sbi)) {
f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
goto free_cp;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment