Commit f6c658df authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-f2fs-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this round, as Ted pointed out, fscrypto allows one more key prefix
  given by filesystem to resolve backward compatibility issues.  Other
  than that, we've fixed several error handling cases by introducing
  a fault injection facility.  We've also achieved performance
  improvement in some workloads as well as a bunch of bug fixes.

  Summary:

  Enhancements:
   - fs-specific prefix for fscrypto
   - fault injection facility
   - expose validity bitmaps for user to be aware of fragmentation
   - fallocate/rm/preallocation speed up
   - use percpu counters

  Bug fixes:
   - some inline_dentry/inline_data bugs
   - error handling for atomic/volatile/orphan inodes
   - recover broken superblock"

* tag 'for-f2fs-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (73 commits)
  f2fs: fix to update dirty page count correctly
  f2fs: flush pending bios right away when error occurs
  f2fs: avoid ENOSPC fault in the recovery process
  f2fs: make exit_f2fs_fs more clear
  f2fs: use percpu_counter for total_valid_inode_count
  f2fs: use percpu_counter for alloc_valid_block_count
  f2fs: use percpu_counter for # of dirty pages in inode
  f2fs: use percpu_counter for page counters
  f2fs: use bio count instead of F2FS_WRITEBACK page count
  f2fs: manipulate dirty file inodes when DATA_FLUSH is set
  f2fs: add fault injection to sysfs
  f2fs: no need inc dirty pages under inode lock
  f2fs: fix incorrect error path handling in f2fs_move_rehashed_dirents
  f2fs: fix i_current_depth during inline dentry conversion
  f2fs: correct return value type of f2fs_fill_super
  f2fs: fix deadlock when flush inline data
  f2fs: avoid f2fs_bug_on during recovery
  f2fs: show # of orphan inodes
  f2fs: support in batch fzero in dnode page
  f2fs: support in batch multi blocks preallocation
  ...
parents 07be1337 0f3311a8
...@@ -4793,7 +4793,7 @@ F: include/linux/fscache*.h ...@@ -4793,7 +4793,7 @@ F: include/linux/fscache*.h
F2FS FILE SYSTEM F2FS FILE SYSTEM
M: Jaegeuk Kim <jaegeuk@kernel.org> M: Jaegeuk Kim <jaegeuk@kernel.org>
M: Changman Lee <cm224.lee@samsung.com> M: Changman Lee <cm224.lee@samsung.com>
R: Chao Yu <chao2.yu@samsung.com> R: Chao Yu <yuchao0@huawei.com>
L: linux-f2fs-devel@lists.sourceforge.net L: linux-f2fs-devel@lists.sourceforge.net
W: http://en.wikipedia.org/wiki/F2FS W: http://en.wikipedia.org/wiki/F2FS
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
......
...@@ -78,6 +78,67 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], ...@@ -78,6 +78,67 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
return res; return res;
} }
static int validate_user_key(struct fscrypt_info *crypt_info,
struct fscrypt_context *ctx, u8 *raw_key,
u8 *prefix, int prefix_size)
{
u8 *full_key_descriptor;
struct key *keyring_key;
struct fscrypt_key *master_key;
const struct user_key_payload *ukp;
int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1;
int res;
full_key_descriptor = kmalloc(full_key_len, GFP_NOFS);
if (!full_key_descriptor)
return -ENOMEM;
memcpy(full_key_descriptor, prefix, prefix_size);
sprintf(full_key_descriptor + prefix_size,
"%*phN", FS_KEY_DESCRIPTOR_SIZE,
ctx->master_key_descriptor);
full_key_descriptor[full_key_len - 1] = '\0';
keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
kfree(full_key_descriptor);
if (IS_ERR(keyring_key))
return PTR_ERR(keyring_key);
if (keyring_key->type != &key_type_logon) {
printk_once(KERN_WARNING
"%s: key type must be logon\n", __func__);
res = -ENOKEY;
goto out;
}
down_read(&keyring_key->sem);
ukp = user_key_payload(keyring_key);
if (ukp->datalen != sizeof(struct fscrypt_key)) {
res = -EINVAL;
up_read(&keyring_key->sem);
goto out;
}
master_key = (struct fscrypt_key *)ukp->data;
BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
printk_once(KERN_WARNING
"%s: key size incorrect: %d\n",
__func__, master_key->size);
res = -ENOKEY;
up_read(&keyring_key->sem);
goto out;
}
res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
up_read(&keyring_key->sem);
if (res)
goto out;
crypt_info->ci_keyring_key = keyring_key;
return 0;
out:
key_put(keyring_key);
return res;
}
static void put_crypt_info(struct fscrypt_info *ci) static void put_crypt_info(struct fscrypt_info *ci)
{ {
if (!ci) if (!ci)
...@@ -91,12 +152,7 @@ static void put_crypt_info(struct fscrypt_info *ci) ...@@ -91,12 +152,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
int get_crypt_info(struct inode *inode) int get_crypt_info(struct inode *inode)
{ {
struct fscrypt_info *crypt_info; struct fscrypt_info *crypt_info;
u8 full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE +
(FS_KEY_DESCRIPTOR_SIZE * 2) + 1];
struct key *keyring_key = NULL;
struct fscrypt_key *master_key;
struct fscrypt_context ctx; struct fscrypt_context ctx;
const struct user_key_payload *ukp;
struct crypto_skcipher *ctfm; struct crypto_skcipher *ctfm;
const char *cipher_str; const char *cipher_str;
u8 raw_key[FS_MAX_KEY_SIZE]; u8 raw_key[FS_MAX_KEY_SIZE];
...@@ -167,48 +223,24 @@ int get_crypt_info(struct inode *inode) ...@@ -167,48 +223,24 @@ int get_crypt_info(struct inode *inode)
memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
goto got_key; goto got_key;
} }
memcpy(full_key_descriptor, FS_KEY_DESC_PREFIX,
FS_KEY_DESC_PREFIX_SIZE);
sprintf(full_key_descriptor + FS_KEY_DESC_PREFIX_SIZE,
"%*phN", FS_KEY_DESCRIPTOR_SIZE,
ctx.master_key_descriptor);
full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE +
(2 * FS_KEY_DESCRIPTOR_SIZE)] = '\0';
keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
if (IS_ERR(keyring_key)) {
res = PTR_ERR(keyring_key);
keyring_key = NULL;
goto out;
}
crypt_info->ci_keyring_key = keyring_key;
if (keyring_key->type != &key_type_logon) {
printk_once(KERN_WARNING
"%s: key type must be logon\n", __func__);
res = -ENOKEY;
goto out;
}
down_read(&keyring_key->sem);
ukp = user_key_payload(keyring_key);
if (ukp->datalen != sizeof(struct fscrypt_key)) {
res = -EINVAL;
up_read(&keyring_key->sem);
goto out;
}
master_key = (struct fscrypt_key *)ukp->data;
BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
if (master_key->size != FS_AES_256_XTS_KEY_SIZE) { res = validate_user_key(crypt_info, &ctx, raw_key,
printk_once(KERN_WARNING FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
"%s: key size incorrect: %d\n", if (res && inode->i_sb->s_cop->key_prefix) {
__func__, master_key->size); u8 *prefix = NULL;
int prefix_size, res2;
prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
res2 = validate_user_key(crypt_info, &ctx, raw_key,
prefix, prefix_size);
if (res2) {
if (res2 == -ENOKEY)
res = -ENOKEY; res = -ENOKEY;
up_read(&keyring_key->sem);
goto out; goto out;
} }
res = derive_key_aes(ctx.nonce, master_key->raw, raw_key); } else if (res) {
up_read(&keyring_key->sem);
if (res)
goto out; goto out;
}
got_key: got_key:
ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
if (!ctfm || IS_ERR(ctfm)) { if (!ctfm || IS_ERR(ctfm)) {
......
...@@ -94,3 +94,11 @@ config F2FS_IO_TRACE ...@@ -94,3 +94,11 @@ config F2FS_IO_TRACE
information and block IO patterns in the filesystem level. information and block IO patterns in the filesystem level.
If unsure, say N. If unsure, say N.
config F2FS_FAULT_INJECTION
bool "F2FS fault injection facility"
depends on F2FS_FS
help
Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
If unsure, say N.
...@@ -115,7 +115,7 @@ static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size) ...@@ -115,7 +115,7 @@ static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
struct f2fs_acl_entry *entry; struct f2fs_acl_entry *entry;
int i; int i;
f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count * f2fs_acl = f2fs_kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
sizeof(struct f2fs_acl_entry), GFP_NOFS); sizeof(struct f2fs_acl_entry), GFP_NOFS);
if (!f2fs_acl) if (!f2fs_acl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -175,7 +175,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type, ...@@ -175,7 +175,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage); retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
if (retval > 0) { if (retval > 0) {
value = kmalloc(retval, GFP_F2FS_ZERO); value = f2fs_kmalloc(retval, GFP_F2FS_ZERO);
if (!value) if (!value)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value, retval = f2fs_getxattr(inode, name_index, "", value,
......
...@@ -26,6 +26,14 @@ ...@@ -26,6 +26,14 @@
static struct kmem_cache *ino_entry_slab; static struct kmem_cache *ino_entry_slab;
struct kmem_cache *inode_entry_slab; struct kmem_cache *inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
sbi->sb->s_flags |= MS_RDONLY;
if (!end_io)
f2fs_flush_merged_bios(sbi);
}
/* /*
* We guarantee no failure on the returned page. * We guarantee no failure on the returned page.
*/ */
...@@ -34,7 +42,7 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) ...@@ -34,7 +42,7 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
struct address_space *mapping = META_MAPPING(sbi); struct address_space *mapping = META_MAPPING(sbi);
struct page *page = NULL; struct page *page = NULL;
repeat: repeat:
page = grab_cache_page(mapping, index); page = f2fs_grab_cache_page(mapping, index, false);
if (!page) { if (!page) {
cond_resched(); cond_resched();
goto repeat; goto repeat;
...@@ -64,7 +72,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, ...@@ -64,7 +72,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
if (unlikely(!is_meta)) if (unlikely(!is_meta))
fio.rw &= ~REQ_META; fio.rw &= ~REQ_META;
repeat: repeat:
page = grab_cache_page(mapping, index); page = f2fs_grab_cache_page(mapping, index, false);
if (!page) { if (!page) {
cond_resched(); cond_resched();
goto repeat; goto repeat;
...@@ -91,7 +99,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, ...@@ -91,7 +99,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
* meta page. * meta page.
*/ */
if (unlikely(!PageUptodate(page))) if (unlikely(!PageUptodate(page)))
f2fs_stop_checkpoint(sbi); f2fs_stop_checkpoint(sbi, false);
out: out:
return page; return page;
} }
...@@ -186,7 +194,8 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, ...@@ -186,7 +194,8 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
BUG(); BUG();
} }
page = grab_cache_page(META_MAPPING(sbi), fio.new_blkaddr); page = f2fs_grab_cache_page(META_MAPPING(sbi),
fio.new_blkaddr, false);
if (!page) if (!page)
continue; continue;
if (PageUptodate(page)) { if (PageUptodate(page)) {
...@@ -211,7 +220,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) ...@@ -211,7 +220,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
bool readahead = false; bool readahead = false;
page = find_get_page(META_MAPPING(sbi), index); page = find_get_page(META_MAPPING(sbi), index);
if (!page || (page && !PageUptodate(page))) if (!page || !PageUptodate(page))
readahead = true; readahead = true;
f2fs_put_page(page, 0); f2fs_put_page(page, 0);
...@@ -448,12 +457,12 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) ...@@ -448,12 +457,12 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
return e ? true : false; return e ? true : false;
} }
void release_ino_entry(struct f2fs_sb_info *sbi) void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
{ {
struct ino_entry *e, *tmp; struct ino_entry *e, *tmp;
int i; int i;
for (i = APPEND_INO; i <= UPDATE_INO; i++) { for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
struct inode_management *im = &sbi->im[i]; struct inode_management *im = &sbi->im[i];
spin_lock(&im->ino_lock); spin_lock(&im->ino_lock);
...@@ -473,6 +482,13 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi) ...@@ -473,6 +482,13 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
int err = 0; int err = 0;
spin_lock(&im->ino_lock); spin_lock(&im->ino_lock);
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
return -ENOSPC;
}
#endif
if (unlikely(im->ino_num >= sbi->max_orphans)) if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC; err = -ENOSPC;
else else
...@@ -777,43 +793,32 @@ void update_dirty_page(struct inode *inode, struct page *page) ...@@ -777,43 +793,32 @@ void update_dirty_page(struct inode *inode, struct page *page)
!S_ISLNK(inode->i_mode)) !S_ISLNK(inode->i_mode))
return; return;
if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) {
spin_lock(&sbi->inode_lock[type]); spin_lock(&sbi->inode_lock[type]);
__add_dirty_inode(inode, type); __add_dirty_inode(inode, type);
inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]); spin_unlock(&sbi->inode_lock[type]);
}
inode_inc_dirty_pages(inode);
SetPagePrivate(page); SetPagePrivate(page);
f2fs_trace_pid(page); f2fs_trace_pid(page);
} }
void add_dirty_dir_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
spin_lock(&sbi->inode_lock[DIR_INODE]);
__add_dirty_inode(inode, DIR_INODE);
spin_unlock(&sbi->inode_lock[DIR_INODE]);
}
void remove_dirty_inode(struct inode *inode) void remove_dirty_inode(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
!S_ISLNK(inode->i_mode)) !S_ISLNK(inode->i_mode))
return; return;
if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
return;
spin_lock(&sbi->inode_lock[type]); spin_lock(&sbi->inode_lock[type]);
__remove_dirty_inode(inode, type); __remove_dirty_inode(inode, type);
spin_unlock(&sbi->inode_lock[type]); spin_unlock(&sbi->inode_lock[type]);
/* Only from the recovery routine */
if (is_inode_flag_set(fi, FI_DELAY_IPUT)) {
clear_inode_flag(fi, FI_DELAY_IPUT);
iput(inode);
}
} }
int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
...@@ -892,7 +897,7 @@ static int block_operations(struct f2fs_sb_info *sbi) ...@@ -892,7 +897,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
if (get_pages(sbi, F2FS_DIRTY_NODES)) { if (get_pages(sbi, F2FS_DIRTY_NODES)) {
up_write(&sbi->node_write); up_write(&sbi->node_write);
err = sync_node_pages(sbi, 0, &wbc); err = sync_node_pages(sbi, &wbc);
if (err) { if (err) {
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
goto out; goto out;
...@@ -917,7 +922,7 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) ...@@ -917,7 +922,7 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
for (;;) { for (;;) {
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
if (!get_pages(sbi, F2FS_WRITEBACK)) if (!atomic_read(&sbi->nr_wb_bios))
break; break;
io_schedule_timeout(5*HZ); io_schedule_timeout(5*HZ);
...@@ -1082,7 +1087,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1082,7 +1087,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* update user_block_counts */ /* update user_block_counts */
sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->last_valid_block_count = sbi->total_valid_block_count;
sbi->alloc_valid_block_count = 0; percpu_counter_set(&sbi->alloc_valid_block_count, 0);
/* Here, we only have one bio having CP pack */ /* Here, we only have one bio having CP pack */
sync_meta_pages(sbi, META_FLUSH, LONG_MAX); sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
...@@ -1098,7 +1103,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1098,7 +1103,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
invalidate_mapping_pages(META_MAPPING(sbi), discard_blk, invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
discard_blk); discard_blk);
release_ino_entry(sbi); release_ino_entry(sbi, false);
if (unlikely(f2fs_cp_error(sbi))) if (unlikely(f2fs_cp_error(sbi)))
return -EIO; return -EIO;
......
...@@ -68,13 +68,12 @@ static void f2fs_write_end_io(struct bio *bio) ...@@ -68,13 +68,12 @@ static void f2fs_write_end_io(struct bio *bio)
if (unlikely(bio->bi_error)) { if (unlikely(bio->bi_error)) {
set_bit(AS_EIO, &page->mapping->flags); set_bit(AS_EIO, &page->mapping->flags);
f2fs_stop_checkpoint(sbi); f2fs_stop_checkpoint(sbi, true);
} }
end_page_writeback(page); end_page_writeback(page);
dec_page_count(sbi, F2FS_WRITEBACK);
} }
if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait)) wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait); wake_up(&sbi->cp_wait);
bio_put(bio); bio_put(bio);
...@@ -98,6 +97,14 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, ...@@ -98,6 +97,14 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
return bio; return bio;
} }
static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
struct bio *bio)
{
if (!is_read_io(rw))
atomic_inc(&sbi->nr_wb_bios);
submit_bio(rw, bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io) static void __submit_merged_bio(struct f2fs_bio_info *io)
{ {
struct f2fs_io_info *fio = &io->fio; struct f2fs_io_info *fio = &io->fio;
...@@ -110,7 +117,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) ...@@ -110,7 +117,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
else else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
submit_bio(fio->rw, io->bio); __submit_bio(io->sbi, fio->rw, io->bio);
io->bio = NULL; io->bio = NULL;
} }
...@@ -228,7 +235,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) ...@@ -228,7 +235,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
return -EFAULT; return -EFAULT;
} }
submit_bio(fio->rw, bio); __submit_bio(fio->sbi, fio->rw, bio);
return 0; return 0;
} }
...@@ -248,9 +255,6 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) ...@@ -248,9 +255,6 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
down_write(&io->io_rwsem); down_write(&io->io_rwsem);
if (!is_read)
inc_page_count(sbi, F2FS_WRITEBACK);
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
io->fio.rw != fio->rw)) io->fio.rw != fio->rw))
__submit_merged_bio(io); __submit_merged_bio(io);
...@@ -278,6 +282,16 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) ...@@ -278,6 +282,16 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
trace_f2fs_submit_page_mbio(fio->page, fio); trace_f2fs_submit_page_mbio(fio->page, fio);
} }
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn = F2FS_NODE(dn->node_page);
__le32 *addr_array;
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/* /*
* Lock ordering for the change of data block address: * Lock ordering for the change of data block address:
* ->data_page * ->data_page
...@@ -286,19 +300,9 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) ...@@ -286,19 +300,9 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
*/ */
void set_data_blkaddr(struct dnode_of_data *dn) void set_data_blkaddr(struct dnode_of_data *dn)
{ {
struct f2fs_node *rn; f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
__le32 *addr_array; __set_data_blkaddr(dn);
struct page *node_page = dn->node_page; if (set_page_dirty(dn->node_page))
unsigned int ofs_in_node = dn->ofs_in_node;
f2fs_wait_on_page_writeback(node_page, NODE, true);
rn = F2FS_NODE(node_page);
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
if (set_page_dirty(node_page))
dn->node_changed = true; dn->node_changed = true;
} }
...@@ -309,24 +313,53 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) ...@@ -309,24 +313,53 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
f2fs_update_extent_cache(dn); f2fs_update_extent_cache(dn);
} }
int reserve_new_block(struct dnode_of_data *dn) /* dn->ofs_in_node will be returned with up-to-date last block pointer */
int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
if (!count)
return 0;
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return -EPERM; return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC; return -ENOSPC;
trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
dn->ofs_in_node, count);
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
for (; count > 0; dn->ofs_in_node++) {
block_t blkaddr =
datablock_addr(dn->node_page, dn->ofs_in_node);
if (blkaddr == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR; dn->data_blkaddr = NEW_ADDR;
set_data_blkaddr(dn); __set_data_blkaddr(dn);
count--;
}
}
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
mark_inode_dirty(dn->inode); mark_inode_dirty(dn->inode);
sync_inode_page(dn); sync_inode_page(dn);
return 0; return 0;
} }
/* Should keep dn->ofs_in_node unchanged */
int reserve_new_block(struct dnode_of_data *dn)
{
unsigned int ofs_in_node = dn->ofs_in_node;
int ret;
ret = reserve_new_blocks(dn, 1);
dn->ofs_in_node = ofs_in_node;
return ret;
}
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{ {
bool need_put = dn->inode_page ? false : true; bool need_put = dn->inode_page ? false : true;
...@@ -545,6 +578,7 @@ static int __allocate_data_block(struct dnode_of_data *dn) ...@@ -545,6 +578,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
struct node_info ni; struct node_info ni;
int seg = CURSEG_WARM_DATA; int seg = CURSEG_WARM_DATA;
pgoff_t fofs; pgoff_t fofs;
blkcnt_t count = 1;
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return -EPERM; return -EPERM;
...@@ -553,7 +587,7 @@ static int __allocate_data_block(struct dnode_of_data *dn) ...@@ -553,7 +587,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
if (dn->data_blkaddr == NEW_ADDR) if (dn->data_blkaddr == NEW_ADDR)
goto alloc; goto alloc;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC; return -ENOSPC;
alloc: alloc:
...@@ -582,8 +616,8 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) ...@@ -582,8 +616,8 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
struct f2fs_map_blocks map; struct f2fs_map_blocks map;
ssize_t ret = 0; ssize_t ret = 0;
map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos); map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from)); map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
map.m_next_pgofs = NULL; map.m_next_pgofs = NULL;
if (f2fs_encrypted_inode(inode)) if (f2fs_encrypted_inode(inode))
...@@ -621,8 +655,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -621,8 +655,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
struct dnode_of_data dn; struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
pgoff_t pgofs, end_offset; pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1; int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
blkcnt_t prealloc;
struct extent_info ei; struct extent_info ei;
bool allocated = false; bool allocated = false;
block_t blkaddr; block_t blkaddr;
...@@ -632,6 +668,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -632,6 +668,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
/* it only supports block size == page size */ /* it only supports block size == page size */
pgofs = (pgoff_t)map->m_lblk; pgofs = (pgoff_t)map->m_lblk;
end = pgofs + maxblocks;
if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
map->m_pblk = ei.blk + pgofs - ei.fofs; map->m_pblk = ei.blk + pgofs - ei.fofs;
...@@ -648,6 +685,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -648,6 +685,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
set_new_dnode(&dn, inode, NULL, NULL, 0); set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode); err = get_dnode_of_data(&dn, pgofs, mode);
if (err) { if (err) {
if (flag == F2FS_GET_BLOCK_BMAP)
map->m_pblk = 0;
if (err == -ENOENT) { if (err == -ENOENT) {
err = 0; err = 0;
if (map->m_next_pgofs) if (map->m_next_pgofs)
...@@ -657,6 +696,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -657,6 +696,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
goto unlock_out; goto unlock_out;
} }
prealloc = 0;
ofs_in_node = dn.ofs_in_node;
end_offset = ADDRS_PER_PAGE(dn.node_page, inode); end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block: next_block:
...@@ -669,30 +710,40 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -669,30 +710,40 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
goto sync_out; goto sync_out;
} }
if (flag == F2FS_GET_BLOCK_PRE_AIO) { if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (blkaddr == NULL_ADDR) if (blkaddr == NULL_ADDR) {
err = reserve_new_block(&dn); prealloc++;
last_ofs_in_node = dn.ofs_in_node;
}
} else { } else {
err = __allocate_data_block(&dn); err = __allocate_data_block(&dn);
if (!err) {
set_inode_flag(F2FS_I(inode),
FI_APPEND_WRITE);
allocated = true;
}
} }
if (err) if (err)
goto sync_out; goto sync_out;
allocated = true;
map->m_flags = F2FS_MAP_NEW; map->m_flags = F2FS_MAP_NEW;
blkaddr = dn.data_blkaddr; blkaddr = dn.data_blkaddr;
} else { } else {
if (flag == F2FS_GET_BLOCK_BMAP) {
map->m_pblk = 0;
goto sync_out;
}
if (flag == F2FS_GET_BLOCK_FIEMAP && if (flag == F2FS_GET_BLOCK_FIEMAP &&
blkaddr == NULL_ADDR) { blkaddr == NULL_ADDR) {
if (map->m_next_pgofs) if (map->m_next_pgofs)
*map->m_next_pgofs = pgofs + 1; *map->m_next_pgofs = pgofs + 1;
} }
if (flag != F2FS_GET_BLOCK_FIEMAP || if (flag != F2FS_GET_BLOCK_FIEMAP ||
blkaddr != NEW_ADDR) { blkaddr != NEW_ADDR)
if (flag == F2FS_GET_BLOCK_BMAP)
err = -ENOENT;
goto sync_out; goto sync_out;
} }
} }
}
if (flag == F2FS_GET_BLOCK_PRE_AIO)
goto skip;
if (map->m_len == 0) { if (map->m_len == 0) {
/* preallocated unwritten block should be mapped for fiemap. */ /* preallocated unwritten block should be mapped for fiemap. */
...@@ -705,19 +756,37 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -705,19 +756,37 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
} else if ((map->m_pblk != NEW_ADDR && } else if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) || blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
flag == F2FS_GET_BLOCK_PRE_DIO || flag == F2FS_GET_BLOCK_PRE_DIO) {
flag == F2FS_GET_BLOCK_PRE_AIO) {
ofs++; ofs++;
map->m_len++; map->m_len++;
} else { } else {
goto sync_out; goto sync_out;
} }
skip:
dn.ofs_in_node++; dn.ofs_in_node++;
pgofs++; pgofs++;
if (map->m_len < maxblocks) { /* preallocate blocks in batch for one dnode page */
if (dn.ofs_in_node < end_offset) if (flag == F2FS_GET_BLOCK_PRE_AIO &&
(pgofs == end || dn.ofs_in_node == end_offset)) {
dn.ofs_in_node = ofs_in_node;
err = reserve_new_blocks(&dn, prealloc);
if (err)
goto sync_out;
map->m_len += dn.ofs_in_node - ofs_in_node;
if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
err = -ENOSPC;
goto sync_out;
}
dn.ofs_in_node = end_offset;
}
if (pgofs >= end)
goto sync_out;
else if (dn.ofs_in_node < end_offset)
goto next_block; goto next_block;
if (allocated) if (allocated)
...@@ -730,7 +799,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, ...@@ -730,7 +799,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
} }
allocated = false; allocated = false;
goto next_dnode; goto next_dnode;
}
sync_out: sync_out:
if (allocated) if (allocated)
...@@ -983,7 +1051,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, ...@@ -983,7 +1051,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
*/ */
if (bio && (last_block_in_bio != block_nr - 1)) { if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc: submit_and_realloc:
submit_bio(READ, bio); __submit_bio(F2FS_I_SB(inode), READ, bio);
bio = NULL; bio = NULL;
} }
if (bio == NULL) { if (bio == NULL) {
...@@ -1026,7 +1094,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, ...@@ -1026,7 +1094,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
goto next_page; goto next_page;
confused: confused:
if (bio) { if (bio) {
submit_bio(READ, bio); __submit_bio(F2FS_I_SB(inode), READ, bio);
bio = NULL; bio = NULL;
} }
unlock_page(page); unlock_page(page);
...@@ -1036,7 +1104,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping, ...@@ -1036,7 +1104,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
} }
BUG_ON(pages && !list_empty(pages)); BUG_ON(pages && !list_empty(pages));
if (bio) if (bio)
submit_bio(READ, bio); __submit_bio(F2FS_I_SB(inode), READ, bio);
return 0; return 0;
} }
...@@ -1177,8 +1245,10 @@ static int f2fs_write_data_page(struct page *page, ...@@ -1177,8 +1245,10 @@ static int f2fs_write_data_page(struct page *page,
goto redirty_out; goto redirty_out;
if (f2fs_is_drop_cache(inode)) if (f2fs_is_drop_cache(inode))
goto out; goto out;
if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && /* we should not write 0'th page having journal header */
available_free_memory(sbi, BASE_CHECK)) if (f2fs_is_volatile_file(inode) && (!page->index ||
(!wbc->for_reclaim &&
available_free_memory(sbi, BASE_CHECK))))
goto redirty_out; goto redirty_out;
/* Dentry blocks are controlled by checkpoint */ /* Dentry blocks are controlled by checkpoint */
...@@ -1480,6 +1550,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, ...@@ -1480,6 +1550,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
if (pos + len <= MAX_INLINE_DATA) { if (pos + len <= MAX_INLINE_DATA) {
read_inline_data(page, ipage); read_inline_data(page, ipage);
set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
if (inode->i_nlink)
set_inline_node(ipage); set_inline_node(ipage);
} else { } else {
err = f2fs_convert_inline_page(&dn, page); err = f2fs_convert_inline_page(&dn, page);
...@@ -1496,7 +1567,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, ...@@ -1496,7 +1567,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
} else { } else {
/* hole case */ /* hole case */
err = get_dnode_of_data(&dn, index, LOOKUP_NODE); err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err || (!err && dn.data_blkaddr == NULL_ADDR)) { if (err || dn.data_blkaddr == NULL_ADDR) {
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
locked = true; locked = true;
...@@ -1683,8 +1754,12 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ...@@ -1683,8 +1754,12 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio); err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
if (err < 0 && iov_iter_rw(iter) == WRITE) if (iov_iter_rw(iter) == WRITE) {
if (err > 0)
set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
else if (err < 0)
f2fs_write_failed(mapping, offset + count); f2fs_write_failed(mapping, offset + count);
}
trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
...@@ -1714,6 +1789,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset, ...@@ -1714,6 +1789,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
if (IS_ATOMIC_WRITTEN_PAGE(page)) if (IS_ATOMIC_WRITTEN_PAGE(page))
return; return;
set_page_private(page, 0);
ClearPagePrivate(page); ClearPagePrivate(page);
} }
...@@ -1727,6 +1803,7 @@ int f2fs_release_page(struct page *page, gfp_t wait) ...@@ -1727,6 +1803,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
if (IS_ATOMIC_WRITTEN_PAGE(page)) if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0; return 0;
set_page_private(page, 0);
ClearPagePrivate(page); ClearPagePrivate(page);
return 1; return 1;
} }
......
...@@ -48,7 +48,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -48,7 +48,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE]; si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->wb_pages = get_pages(sbi, F2FS_WRITEBACK); si->wb_bios = atomic_read(&sbi->nr_wb_bios);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi); si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi); si->overp_segs = overprovision_segments(sbi);
...@@ -58,6 +58,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) ...@@ -58,6 +58,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->inline_xattr = atomic_read(&sbi->inline_xattr); si->inline_xattr = atomic_read(&sbi->inline_xattr);
si->inline_inode = atomic_read(&sbi->inline_inode); si->inline_inode = atomic_read(&sbi->inline_inode);
si->inline_dir = atomic_read(&sbi->inline_dir); si->inline_dir = atomic_read(&sbi->inline_dir);
si->orphans = sbi->im[ORPHAN_INO].ino_num;
si->utilization = utilization(sbi); si->utilization = utilization(sbi);
si->free_segs = free_segments(sbi); si->free_segs = free_segments(sbi);
...@@ -143,6 +144,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -143,6 +144,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize; si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
si->base_mem += 2 * sizeof(struct f2fs_inode_info); si->base_mem += 2 * sizeof(struct f2fs_inode_info);
si->base_mem += sizeof(*sbi->ckpt); si->base_mem += sizeof(*sbi->ckpt);
si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
/* build sm */ /* build sm */
si->base_mem += sizeof(struct f2fs_sm_info); si->base_mem += sizeof(struct f2fs_sm_info);
...@@ -192,7 +194,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) ...@@ -192,7 +194,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->cache_mem += NM_I(sbi)->dirty_nat_cnt * si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set); sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
for (i = 0; i <= UPDATE_INO; i++) for (i = 0; i <= ORPHAN_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
si->cache_mem += atomic_read(&sbi->total_ext_tree) * si->cache_mem += atomic_read(&sbi->total_ext_tree) *
sizeof(struct extent_tree); sizeof(struct extent_tree);
...@@ -216,8 +218,9 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -216,8 +218,9 @@ static int stat_show(struct seq_file *s, void *v)
list_for_each_entry(si, &f2fs_stat_list, stat_list) { list_for_each_entry(si, &f2fs_stat_list, stat_list) {
update_general_status(si->sbi); update_general_status(si->sbi);
seq_printf(s, "\n=====[ partition info(%pg). #%d ]=====\n", seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n",
si->sbi->sb->s_bdev, i++); si->sbi->sb->s_bdev, i++,
f2fs_readonly(si->sbi->sb) ? "RO": "RW");
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs); si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d", seq_printf(s, "[SSA: %d] [MAIN: %d",
...@@ -237,6 +240,8 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -237,6 +240,8 @@ static int stat_show(struct seq_file *s, void *v)
si->inline_inode); si->inline_inode);
seq_printf(s, " - Inline_dentry Inode: %u\n", seq_printf(s, " - Inline_dentry Inode: %u\n",
si->inline_dir); si->inline_dir);
seq_printf(s, " - Orphan Inode: %u\n",
si->orphans);
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n", seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
si->main_area_segs, si->main_area_sections, si->main_area_segs, si->main_area_sections,
si->main_area_zones); si->main_area_zones);
...@@ -295,15 +300,15 @@ static int stat_show(struct seq_file *s, void *v) ...@@ -295,15 +300,15 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node); si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n"); seq_puts(s, "\nBalancing F2FS Async:\n");
seq_printf(s, " - inmem: %4d, wb: %4d\n", seq_printf(s, " - inmem: %4lld, wb_bios: %4d\n",
si->inmem_pages, si->wb_pages); si->inmem_pages, si->wb_bios);
seq_printf(s, " - nodes: %4d in %4d\n", seq_printf(s, " - nodes: %4lld in %4d\n",
si->ndirty_node, si->node_pages); si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4d in dirs:%4d\n", seq_printf(s, " - dents: %4lld in dirs:%4d\n",
si->ndirty_dent, si->ndirty_dirs); si->ndirty_dent, si->ndirty_dirs);
seq_printf(s, " - datas: %4d in files:%4d\n", seq_printf(s, " - datas: %4lld in files:%4d\n",
si->ndirty_data, si->ndirty_files); si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4d in %4d\n", seq_printf(s, " - meta: %4lld in %4d\n",
si->ndirty_meta, si->meta_pages); si->ndirty_meta, si->meta_pages);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n", seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits); si->dirty_nats, si->nats, si->dirty_sits, si->sits);
......
...@@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = { ...@@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_SYMLINK] = DT_LNK, [F2FS_FT_SYMLINK] = DT_LNK,
}; };
#define S_SHIFT 12
static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = { static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE, [S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = F2FS_FT_DIR, [S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
...@@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode) ...@@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT]; de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
} }
unsigned char get_de_type(struct f2fs_dir_entry *de)
{
if (de->file_type < F2FS_FT_MAX)
return f2fs_filetype_table[de->file_type];
return DT_UNKNOWN;
}
static unsigned long dir_block_index(unsigned int level, static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx) int dir_level, unsigned int idx)
{ {
...@@ -95,11 +101,6 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, ...@@ -95,11 +101,6 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
else else
kunmap(dentry_page); kunmap(dentry_page);
/*
* For the most part, it should be a bug when name_len is zero.
* We stop here for figuring out where the bugs has occurred.
*/
f2fs_bug_on(F2FS_P_SB(dentry_page), d.max < 0);
return de; return de;
} }
...@@ -124,6 +125,11 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname, ...@@ -124,6 +125,11 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
de = &d->dentry[bit_pos]; de = &d->dentry[bit_pos];
if (unlikely(!de->name_len)) {
bit_pos++;
continue;
}
/* encrypted case */ /* encrypted case */
de_name.name = d->filename[bit_pos]; de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len); de_name.len = le16_to_cpu(de->name_len);
...@@ -141,10 +147,6 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname, ...@@ -141,10 +147,6 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
*max_slots = max_len; *max_slots = max_len;
max_len = 0; max_len = 0;
/* remain bug on condition */
if (unlikely(!de->name_len))
d->max = -1;
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
} }
...@@ -389,9 +391,14 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -389,9 +391,14 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
return page; return page;
if (S_ISDIR(inode->i_mode)) { if (S_ISDIR(inode->i_mode)) {
/* in order to handle error case */
get_page(page);
err = make_empty_dir(inode, dir, page); err = make_empty_dir(inode, dir, page);
if (err) if (err) {
goto error; lock_page(page);
goto put_error;
}
put_page(page);
} }
err = f2fs_init_acl(inode, dir, page, dpage); err = f2fs_init_acl(inode, dir, page, dpage);
...@@ -435,13 +442,12 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, ...@@ -435,13 +442,12 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
return page; return page;
put_error: put_error:
f2fs_put_page(page, 1); /* truncate empty dir pages */
error:
/* once the failed inode becomes a bad inode, i_mode is S_IFREG */
truncate_inode_pages(&inode->i_data, 0); truncate_inode_pages(&inode->i_data, 0);
truncate_blocks(inode, 0, false);
remove_dirty_inode(inode); clear_nlink(inode);
remove_inode_page(inode); update_inode(inode, page);
f2fs_put_page(page, 1);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -509,11 +515,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, ...@@ -509,11 +515,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
} }
} }
/* int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
*/
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode) struct inode *inode, nid_t ino, umode_t mode)
{ {
unsigned int bit_pos; unsigned int bit_pos;
...@@ -526,28 +528,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, ...@@ -526,28 +528,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct f2fs_dentry_block *dentry_blk = NULL; struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d; struct f2fs_dentry_ptr d;
struct page *page = NULL; struct page *page = NULL;
struct fscrypt_name fname; int slots, err = 0;
struct qstr new_name;
int slots, err;
err = fscrypt_setup_filename(dir, name, 0, &fname);
if (err)
return err;
new_name.name = fname_name(&fname);
new_name.len = fname_len(&fname);
if (f2fs_has_inline_dentry(dir)) {
err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
if (!err || err != -EAGAIN)
goto out;
else
err = 0;
}
level = 0; level = 0;
slots = GET_DENTRY_SLOTS(new_name.len); slots = GET_DENTRY_SLOTS(new_name->len);
dentry_hash = f2fs_dentry_hash(&new_name); dentry_hash = f2fs_dentry_hash(new_name);
current_depth = F2FS_I(dir)->i_current_depth; current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) { if (F2FS_I(dir)->chash == dentry_hash) {
...@@ -556,10 +541,12 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, ...@@ -556,10 +541,12 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
} }
start: start:
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) { #ifdef CONFIG_F2FS_FAULT_INJECTION
err = -ENOSPC; if (time_to_inject(FAULT_DIR_DEPTH))
goto out; return -ENOSPC;
} #endif
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
return -ENOSPC;
/* Increase the depth, if required */ /* Increase the depth, if required */
if (level == current_depth) if (level == current_depth)
...@@ -573,10 +560,8 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, ...@@ -573,10 +560,8 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
for (block = bidx; block <= (bidx + nblock - 1); block++) { for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = get_new_data_page(dir, NULL, block, true); dentry_page = get_new_data_page(dir, NULL, block, true);
if (IS_ERR(dentry_page)) { if (IS_ERR(dentry_page))
err = PTR_ERR(dentry_page); return PTR_ERR(dentry_page);
goto out;
}
dentry_blk = kmap(dentry_page); dentry_blk = kmap(dentry_page);
bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
...@@ -596,7 +581,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, ...@@ -596,7 +581,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
if (inode) { if (inode) {
down_write(&F2FS_I(inode)->i_sem); down_write(&F2FS_I(inode)->i_sem);
page = init_inode_metadata(inode, dir, &new_name, NULL); page = init_inode_metadata(inode, dir, new_name, NULL);
if (IS_ERR(page)) { if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
goto fail; goto fail;
...@@ -606,7 +591,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, ...@@ -606,7 +591,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
} }
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1); make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos); f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
set_page_dirty(dentry_page); set_page_dirty(dentry_page);
...@@ -628,7 +613,34 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, ...@@ -628,7 +613,34 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
} }
kunmap(dentry_page); kunmap(dentry_page);
f2fs_put_page(dentry_page, 1); f2fs_put_page(dentry_page, 1);
out:
return err;
}
/*
* Caller should grab and release a rwsem by calling f2fs_lock_op() and
* f2fs_unlock_op().
*/
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
struct fscrypt_name fname;
struct qstr new_name;
int err;
err = fscrypt_setup_filename(dir, name, 0, &fname);
if (err)
return err;
new_name.name = fname_name(&fname);
new_name.len = fname_len(&fname);
err = -EAGAIN;
if (f2fs_has_inline_dentry(dir))
err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode);
fscrypt_free_filename(&fname); fscrypt_free_filename(&fname);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err; return err;
...@@ -792,10 +804,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, ...@@ -792,10 +804,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
continue; continue;
} }
if (de->file_type < F2FS_FT_MAX) d_type = get_de_type(de);
d_type = f2fs_filetype_table[de->file_type];
else
d_type = DT_UNKNOWN;
de_name.name = d->filename[bit_pos]; de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len); de_name.len = le16_to_cpu(de->name_len);
...@@ -804,7 +813,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, ...@@ -804,7 +813,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
int save_len = fstr->len; int save_len = fstr->len;
int ret; int ret;
de_name.name = kmalloc(de_name.len, GFP_NOFS); de_name.name = f2fs_kmalloc(de_name.len, GFP_NOFS);
if (!de_name.name) if (!de_name.name)
return false; return false;
...@@ -887,6 +896,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) ...@@ -887,6 +896,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
kunmap(dentry_page); kunmap(dentry_page);
f2fs_put_page(dentry_page, 1); f2fs_put_page(dentry_page, 1);
} }
err = 0;
out: out:
fscrypt_fname_free_buffer(&fstr); fscrypt_fname_free_buffer(&fstr);
return err; return err;
......
...@@ -196,8 +196,7 @@ bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) ...@@ -196,8 +196,7 @@ bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
if (!i_ext || !i_ext->len) if (!i_ext || !i_ext->len)
return false; return false;
set_extent_info(&ei, le32_to_cpu(i_ext->fofs), get_extent_info(&ei, i_ext);
le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
write_lock(&et->lock); write_lock(&et->lock);
if (atomic_read(&et->node_cnt)) if (atomic_read(&et->node_cnt))
......
...@@ -37,6 +37,57 @@ ...@@ -37,6 +37,57 @@
} while (0) } while (0)
#endif #endif
#ifdef CONFIG_F2FS_FAULT_INJECTION
enum {
FAULT_KMALLOC,
FAULT_PAGE_ALLOC,
FAULT_ALLOC_NID,
FAULT_ORPHAN,
FAULT_BLOCK,
FAULT_DIR_DEPTH,
FAULT_MAX,
};
struct f2fs_fault_info {
atomic_t inject_ops;
unsigned int inject_rate;
unsigned int inject_type;
};
extern struct f2fs_fault_info f2fs_fault;
extern char *fault_name[FAULT_MAX];
#define IS_FAULT_SET(type) (f2fs_fault.inject_type & (1 << (type)))
static inline bool time_to_inject(int type)
{
if (!f2fs_fault.inject_rate)
return false;
if (type == FAULT_KMALLOC && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_PAGE_ALLOC && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_ALLOC_NID && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_ORPHAN && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_BLOCK && !IS_FAULT_SET(type))
return false;
else if (type == FAULT_DIR_DEPTH && !IS_FAULT_SET(type))
return false;
atomic_inc(&f2fs_fault.inject_ops);
if (atomic_read(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) {
atomic_set(&f2fs_fault.inject_ops, 0);
printk("%sF2FS-fs : inject %s in %pF\n",
KERN_INFO,
fault_name[type],
__builtin_return_address(0));
return true;
}
return false;
}
#endif
/* /*
* For mount options * For mount options
*/ */
...@@ -56,6 +107,7 @@ ...@@ -56,6 +107,7 @@
#define F2FS_MOUNT_EXTENT_CACHE 0x00002000 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000
#define F2FS_MOUNT_FORCE_FG_GC 0x00004000 #define F2FS_MOUNT_FORCE_FG_GC 0x00004000
#define F2FS_MOUNT_DATA_FLUSH 0x00008000 #define F2FS_MOUNT_DATA_FLUSH 0x00008000
#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
...@@ -159,7 +211,6 @@ struct fsync_inode_entry { ...@@ -159,7 +211,6 @@ struct fsync_inode_entry {
struct inode *inode; /* vfs inode pointer */ struct inode *inode; /* vfs inode pointer */
block_t blkaddr; /* block address locating the last fsync */ block_t blkaddr; /* block address locating the last fsync */
block_t last_dentry; /* block address locating the last dentry */ block_t last_dentry; /* block address locating the last dentry */
block_t last_inode; /* block address locating the last inode */
}; };
#define nats_in_cursum(jnl) (le16_to_cpu(jnl->n_nats)) #define nats_in_cursum(jnl) (le16_to_cpu(jnl->n_nats))
...@@ -385,7 +436,7 @@ struct f2fs_inode_info { ...@@ -385,7 +436,7 @@ struct f2fs_inode_info {
/* Use below internally in f2fs*/ /* Use below internally in f2fs*/
unsigned long flags; /* use to pass per-file flags */ unsigned long flags; /* use to pass per-file flags */
struct rw_semaphore i_sem; /* protect fi info */ struct rw_semaphore i_sem; /* protect fi info */
atomic_t dirty_pages; /* # of dirty pages */ struct percpu_counter dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */ f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */ unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */ nid_t i_xattr_nid; /* node id that contains xattrs */
...@@ -398,11 +449,11 @@ struct f2fs_inode_info { ...@@ -398,11 +449,11 @@ struct f2fs_inode_info {
}; };
static inline void get_extent_info(struct extent_info *ext, static inline void get_extent_info(struct extent_info *ext,
struct f2fs_extent i_ext) struct f2fs_extent *i_ext)
{ {
ext->fofs = le32_to_cpu(i_ext.fofs); ext->fofs = le32_to_cpu(i_ext->fofs);
ext->blk = le32_to_cpu(i_ext.blk); ext->blk = le32_to_cpu(i_ext->blk);
ext->len = le32_to_cpu(i_ext.len); ext->len = le32_to_cpu(i_ext->len);
} }
static inline void set_raw_extent(struct extent_info *ext, static inline void set_raw_extent(struct extent_info *ext,
...@@ -599,7 +650,6 @@ struct f2fs_sm_info { ...@@ -599,7 +650,6 @@ struct f2fs_sm_info {
* dirty dentry blocks, dirty node blocks, and dirty meta blocks. * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/ */
enum count_type { enum count_type {
F2FS_WRITEBACK,
F2FS_DIRTY_DENTS, F2FS_DIRTY_DENTS,
F2FS_DIRTY_DATA, F2FS_DIRTY_DATA,
F2FS_DIRTY_NODES, F2FS_DIRTY_NODES,
...@@ -672,6 +722,7 @@ enum { ...@@ -672,6 +722,7 @@ enum {
SBI_IS_CLOSE, /* specify unmounting */ SBI_IS_CLOSE, /* specify unmounting */
SBI_NEED_FSCK, /* need fsck.f2fs to fix */ SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */ SBI_POR_DOING, /* recovery is doing or not */
SBI_NEED_SB_WRITE, /* need to recover superblock */
}; };
enum { enum {
...@@ -680,6 +731,10 @@ enum { ...@@ -680,6 +731,10 @@ enum {
MAX_TIME, MAX_TIME,
}; };
#ifdef CONFIG_F2FS_FS_ENCRYPTION
#define F2FS_KEY_DESC_PREFIX "f2fs:"
#define F2FS_KEY_DESC_PREFIX_SIZE 5
#endif
struct f2fs_sb_info { struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */ struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */ struct proc_dir_entry *s_proc; /* proc entry */
...@@ -687,6 +742,10 @@ struct f2fs_sb_info { ...@@ -687,6 +742,10 @@ struct f2fs_sb_info {
int valid_super_block; /* valid super block no */ int valid_super_block; /* valid super block no */
int s_flag; /* flags for sbi */ int s_flag; /* flags for sbi */
#ifdef CONFIG_F2FS_FS_ENCRYPTION
u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
u8 key_prefix_size;
#endif
/* for node-related operations */ /* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */ struct f2fs_nm_info *nm_info; /* node manager */
struct inode *node_inode; /* cache node blocks */ struct inode *node_inode; /* cache node blocks */
...@@ -742,18 +801,24 @@ struct f2fs_sb_info { ...@@ -742,18 +801,24 @@ struct f2fs_sb_info {
unsigned int total_sections; /* total section count */ unsigned int total_sections; /* total section count */
unsigned int total_node_count; /* total node block count */ unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */ unsigned int total_valid_node_count; /* valid node block count */
unsigned int total_valid_inode_count; /* valid inode count */
loff_t max_file_blocks; /* max block index of file */ loff_t max_file_blocks; /* max block index of file */
int active_logs; /* # of active logs */ int active_logs; /* # of active logs */
int dir_level; /* directory level */ int dir_level; /* directory level */
block_t user_block_count; /* # of user blocks */ block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */ block_t total_valid_block_count; /* # of valid blocks */
block_t alloc_valid_block_count; /* # of allocated blocks */
block_t discard_blks; /* discard command candidats */ block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */ block_t last_valid_block_count; /* for recovery */
u32 s_next_generation; /* for NFS support */ u32 s_next_generation; /* for NFS support */
atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ atomic_t nr_wb_bios; /* # of writeback bios */
/* # of pages, see count_type */
struct percpu_counter nr_pages[NR_COUNT_TYPE];
/* # of allocated blocks */
struct percpu_counter alloc_valid_block_count;
/* valid inode count */
struct percpu_counter total_valid_inode_count;
struct f2fs_mount_info mount_opt; /* mount options */ struct f2fs_mount_info mount_opt; /* mount options */
...@@ -1055,21 +1120,33 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs) ...@@ -1055,21 +1120,33 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
} }
static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t count) struct inode *inode, blkcnt_t *count)
{ {
block_t valid_block_count; block_t valid_block_count;
spin_lock(&sbi->stat_lock); spin_lock(&sbi->stat_lock);
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_BLOCK)) {
spin_unlock(&sbi->stat_lock);
return false;
}
#endif
valid_block_count = valid_block_count =
sbi->total_valid_block_count + (block_t)count; sbi->total_valid_block_count + (block_t)(*count);
if (unlikely(valid_block_count > sbi->user_block_count)) { if (unlikely(valid_block_count > sbi->user_block_count)) {
*count = sbi->user_block_count - sbi->total_valid_block_count;
if (!*count) {
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
return false; return false;
} }
inode->i_blocks += count; }
sbi->total_valid_block_count = valid_block_count; /* *count can be recalculated */
sbi->alloc_valid_block_count += (block_t)count; inode->i_blocks += *count;
sbi->total_valid_block_count =
sbi->total_valid_block_count + (block_t)(*count);
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
return true; return true;
} }
...@@ -1087,20 +1164,20 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, ...@@ -1087,20 +1164,20 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{ {
atomic_inc(&sbi->nr_pages[count_type]); percpu_counter_inc(&sbi->nr_pages[count_type]);
set_sbi_flag(sbi, SBI_IS_DIRTY); set_sbi_flag(sbi, SBI_IS_DIRTY);
} }
static inline void inode_inc_dirty_pages(struct inode *inode) static inline void inode_inc_dirty_pages(struct inode *inode)
{ {
atomic_inc(&F2FS_I(inode)->dirty_pages); percpu_counter_inc(&F2FS_I(inode)->dirty_pages);
inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
} }
static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
{ {
atomic_dec(&sbi->nr_pages[count_type]); percpu_counter_dec(&sbi->nr_pages[count_type]);
} }
static inline void inode_dec_dirty_pages(struct inode *inode) static inline void inode_dec_dirty_pages(struct inode *inode)
...@@ -1109,26 +1186,28 @@ static inline void inode_dec_dirty_pages(struct inode *inode) ...@@ -1109,26 +1186,28 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
!S_ISLNK(inode->i_mode)) !S_ISLNK(inode->i_mode))
return; return;
atomic_dec(&F2FS_I(inode)->dirty_pages); percpu_counter_dec(&F2FS_I(inode)->dirty_pages);
dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
} }
static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
{ {
return atomic_read(&sbi->nr_pages[count_type]); return percpu_counter_sum_positive(&sbi->nr_pages[count_type]);
} }
static inline int get_dirty_pages(struct inode *inode) static inline s64 get_dirty_pages(struct inode *inode)
{ {
return atomic_read(&F2FS_I(inode)->dirty_pages); return percpu_counter_sum_positive(&F2FS_I(inode)->dirty_pages);
} }
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{ {
unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
return ((get_pages(sbi, block_type) + pages_per_sec - 1) unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
>> sbi->log_blocks_per_seg) / sbi->segs_per_sec; sbi->log_blocks_per_seg;
return segs / sbi->segs_per_sec;
} }
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
...@@ -1217,11 +1296,11 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, ...@@ -1217,11 +1296,11 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
if (inode) if (inode)
inode->i_blocks++; inode->i_blocks++;
sbi->alloc_valid_block_count++;
sbi->total_valid_node_count++; sbi->total_valid_node_count++;
sbi->total_valid_block_count++; sbi->total_valid_block_count++;
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
percpu_counter_inc(&sbi->alloc_valid_block_count);
return true; return true;
} }
...@@ -1248,28 +1327,30 @@ static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) ...@@ -1248,28 +1327,30 @@ static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
{ {
spin_lock(&sbi->stat_lock); percpu_counter_inc(&sbi->total_valid_inode_count);
f2fs_bug_on(sbi, sbi->total_valid_inode_count == sbi->total_node_count);
sbi->total_valid_inode_count++;
spin_unlock(&sbi->stat_lock);
} }
static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
{ {
spin_lock(&sbi->stat_lock); percpu_counter_dec(&sbi->total_valid_inode_count);
f2fs_bug_on(sbi, !sbi->total_valid_inode_count);
sbi->total_valid_inode_count--;
spin_unlock(&sbi->stat_lock);
} }
static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
{ {
return sbi->total_valid_inode_count; return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
} }
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write) pgoff_t index, bool for_write)
{ {
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct page *page = find_lock_page(mapping, index);
if (page)
return page;
if (time_to_inject(FAULT_PAGE_ALLOC))
return NULL;
#endif
if (!for_write) if (!for_write)
return grab_cache_page(mapping, index); return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
...@@ -1435,7 +1516,6 @@ enum { ...@@ -1435,7 +1516,6 @@ enum {
FI_NO_ALLOC, /* should not allocate any blocks */ FI_NO_ALLOC, /* should not allocate any blocks */
FI_FREE_NID, /* free allocated nide */ FI_FREE_NID, /* free allocated nide */
FI_UPDATE_DIR, /* should update inode block for consistency */ FI_UPDATE_DIR, /* should update inode block for consistency */
FI_DELAY_IPUT, /* used for the recovery */
FI_NO_EXTENT, /* not to use the extent cache */ FI_NO_EXTENT, /* not to use the extent cache */
FI_INLINE_XATTR, /* used for inline xattr */ FI_INLINE_XATTR, /* used for inline xattr */
FI_INLINE_DATA, /* used for inline data*/ FI_INLINE_DATA, /* used for inline data*/
...@@ -1618,12 +1698,6 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) ...@@ -1618,12 +1698,6 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
} }
static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
{
set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
sbi->sb->s_flags |= MS_RDONLY;
}
static inline bool is_dot_dotdot(const struct qstr *str) static inline bool is_dot_dotdot(const struct qstr *str)
{ {
if (str->len == 1 && str->name[0] == '.') if (str->len == 1 && str->name[0] == '.')
...@@ -1644,6 +1718,15 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) ...@@ -1644,6 +1718,15 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
return S_ISREG(inode->i_mode); return S_ISREG(inode->i_mode);
} }
static inline void *f2fs_kmalloc(size_t size, gfp_t flags)
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_KMALLOC))
return NULL;
#endif
return kmalloc(size, flags);
}
static inline void *f2fs_kvmalloc(size_t size, gfp_t flags) static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
{ {
void *ret; void *ret;
...@@ -1710,7 +1793,7 @@ struct dentry *f2fs_get_parent(struct dentry *child); ...@@ -1710,7 +1793,7 @@ struct dentry *f2fs_get_parent(struct dentry *child);
*/ */
extern unsigned char f2fs_filetype_table[F2FS_FT_MAX]; extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
void set_de_type(struct f2fs_dir_entry *, umode_t); void set_de_type(struct f2fs_dir_entry *, umode_t);
unsigned char get_de_type(struct f2fs_dir_entry *);
struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *, struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
f2fs_hash_t, int *, struct f2fs_dentry_ptr *); f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *, bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
...@@ -1731,6 +1814,8 @@ void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, ...@@ -1731,6 +1814,8 @@ void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
int update_dent_inode(struct inode *, struct inode *, const struct qstr *); int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *, void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
const struct qstr *, f2fs_hash_t , unsigned int); const struct qstr *, f2fs_hash_t , unsigned int);
int f2fs_add_regular_entry(struct inode *, const struct qstr *,
struct inode *, nid_t, umode_t);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t, int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
umode_t); umode_t);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *, void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
...@@ -1781,7 +1866,10 @@ void ra_node_page(struct f2fs_sb_info *, nid_t); ...@@ -1781,7 +1866,10 @@ void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_node_page_ra(struct page *, int); struct page *get_node_page_ra(struct page *, int);
void sync_inode_page(struct dnode_of_data *); void sync_inode_page(struct dnode_of_data *);
int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); void move_node_page(struct page *, int);
int fsync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *,
bool);
int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
bool alloc_nid(struct f2fs_sb_info *, nid_t *); bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t); void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t); void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
...@@ -1843,6 +1931,7 @@ void destroy_segment_manager_caches(void); ...@@ -1843,6 +1931,7 @@ void destroy_segment_manager_caches(void);
/* /*
* checkpoint.c * checkpoint.c
*/ */
void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool);
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t); struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
...@@ -1852,7 +1941,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t); ...@@ -1852,7 +1941,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
void add_ino_entry(struct f2fs_sb_info *, nid_t, int type); void add_ino_entry(struct f2fs_sb_info *, nid_t, int type);
void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type); void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type);
void release_ino_entry(struct f2fs_sb_info *); void release_ino_entry(struct f2fs_sb_info *, bool);
bool exist_written_data(struct f2fs_sb_info *, nid_t, int); bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
int acquire_orphan_inode(struct f2fs_sb_info *); int acquire_orphan_inode(struct f2fs_sb_info *);
void release_orphan_inode(struct f2fs_sb_info *); void release_orphan_inode(struct f2fs_sb_info *);
...@@ -1861,7 +1950,6 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t); ...@@ -1861,7 +1950,6 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
int recover_orphan_inodes(struct f2fs_sb_info *); int recover_orphan_inodes(struct f2fs_sb_info *);
int get_valid_checkpoint(struct f2fs_sb_info *); int get_valid_checkpoint(struct f2fs_sb_info *);
void update_dirty_page(struct inode *, struct page *); void update_dirty_page(struct inode *, struct page *);
void add_dirty_dir_inode(struct inode *);
void remove_dirty_inode(struct inode *); void remove_dirty_inode(struct inode *);
int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type); int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type);
int write_checkpoint(struct f2fs_sb_info *, struct cp_control *); int write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
...@@ -1880,6 +1968,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *); ...@@ -1880,6 +1968,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *);
void f2fs_submit_page_mbio(struct f2fs_io_info *); void f2fs_submit_page_mbio(struct f2fs_io_info *);
void set_data_blkaddr(struct dnode_of_data *); void set_data_blkaddr(struct dnode_of_data *);
void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t); void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
int reserve_new_block(struct dnode_of_data *); int reserve_new_block(struct dnode_of_data *);
int f2fs_get_block(struct dnode_of_data *, pgoff_t); int f2fs_get_block(struct dnode_of_data *, pgoff_t);
ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *); ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
...@@ -1906,7 +1995,7 @@ void build_gc_manager(struct f2fs_sb_info *); ...@@ -1906,7 +1995,7 @@ void build_gc_manager(struct f2fs_sb_info *);
/* /*
* recovery.c * recovery.c
*/ */
int recover_fsync_data(struct f2fs_sb_info *); int recover_fsync_data(struct f2fs_sb_info *, bool);
bool space_for_roll_forward(struct f2fs_sb_info *); bool space_for_roll_forward(struct f2fs_sb_info *);
/* /*
...@@ -1921,12 +2010,12 @@ struct f2fs_stat_info { ...@@ -1921,12 +2010,12 @@ struct f2fs_stat_info {
unsigned long long hit_largest, hit_cached, hit_rbtree; unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext; unsigned long long hit_total, total_ext;
int ext_tree, zombie_tree, ext_node; int ext_tree, zombie_tree, ext_node;
int ndirty_node, ndirty_meta; s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, inmem_pages;
int ndirty_dent, ndirty_dirs, ndirty_data, ndirty_files; unsigned int ndirty_dirs, ndirty_files;
int nats, dirty_nats, sits, dirty_sits, fnids; int nats, dirty_nats, sits, dirty_sits, fnids;
int total_count, utilization; int total_count, utilization;
int bg_gc, inmem_pages, wb_pages; int bg_gc, wb_bios;
int inline_xattr, inline_inode, inline_dir; int inline_xattr, inline_inode, inline_dir, orphans;
unsigned int valid_count, valid_node_count, valid_inode_count; unsigned int valid_count, valid_node_count, valid_inode_count;
unsigned int bimodal, avg_vblocks; unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid; int util_free, util_valid, util_invalid;
......
...@@ -182,7 +182,8 @@ static void try_to_fix_pino(struct inode *inode) ...@@ -182,7 +182,8 @@ static void try_to_fix_pino(struct inode *inode)
} }
} }
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
int datasync, bool atomic)
{ {
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
...@@ -256,7 +257,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -256,7 +257,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out; goto out;
} }
sync_nodes: sync_nodes:
sync_node_pages(sbi, ino, &wbc); ret = fsync_node_pages(sbi, ino, &wbc, atomic);
if (ret)
goto out;
/* if cp_error was enabled, we should avoid infinite loop */ /* if cp_error was enabled, we should avoid infinite loop */
if (unlikely(f2fs_cp_error(sbi))) { if (unlikely(f2fs_cp_error(sbi))) {
...@@ -288,6 +291,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -288,6 +291,11 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
return ret; return ret;
} }
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
return f2fs_do_sync_file(file, start, end, datasync, false);
}
static pgoff_t __get_first_dirty_index(struct address_space *mapping, static pgoff_t __get_first_dirty_index(struct address_space *mapping,
pgoff_t pgofs, int whence) pgoff_t pgofs, int whence)
{ {
...@@ -555,6 +563,9 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) ...@@ -555,6 +563,9 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1); free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
if (free_from >= sbi->max_file_blocks)
goto free_partial;
if (lock) if (lock)
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
...@@ -573,7 +584,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) ...@@ -573,7 +584,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
} }
set_new_dnode(&dn, inode, ipage, NULL, 0); set_new_dnode(&dn, inode, ipage, NULL, 0);
err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
if (err) { if (err) {
if (err == -ENOENT) if (err == -ENOENT)
goto free_next; goto free_next;
...@@ -596,7 +607,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) ...@@ -596,7 +607,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
out: out:
if (lock) if (lock)
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
free_partial:
/* lastly zero out the first data page */ /* lastly zero out the first data page */
if (!err) if (!err)
err = truncate_partial_data_page(inode, from, truncate_page); err = truncate_partial_data_page(inode, from, truncate_page);
...@@ -986,6 +997,49 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) ...@@ -986,6 +997,49 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
return ret; return ret;
} }
static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
pgoff_t end)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
pgoff_t index = start;
unsigned int ofs_in_node = dn->ofs_in_node;
blkcnt_t count = 0;
int ret;
for (; index < end; index++, dn->ofs_in_node++) {
if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR)
count++;
}
dn->ofs_in_node = ofs_in_node;
ret = reserve_new_blocks(dn, count);
if (ret)
return ret;
dn->ofs_in_node = ofs_in_node;
for (index = start; index < end; index++, dn->ofs_in_node++) {
dn->data_blkaddr =
datablock_addr(dn->node_page, dn->ofs_in_node);
/*
* reserve_new_blocks will not guarantee entire block
* allocation.
*/
if (dn->data_blkaddr == NULL_ADDR) {
ret = -ENOSPC;
break;
}
if (dn->data_blkaddr != NEW_ADDR) {
invalidate_blocks(sbi, dn->data_blkaddr);
dn->data_blkaddr = NEW_ADDR;
set_data_blkaddr(dn);
}
}
f2fs_update_extent_cache_range(dn, start, 0, index - start);
return ret;
}
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
int mode) int mode)
{ {
...@@ -1036,35 +1090,32 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, ...@@ -1036,35 +1090,32 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
(loff_t)pg_start << PAGE_SHIFT); (loff_t)pg_start << PAGE_SHIFT);
} }
for (index = pg_start; index < pg_end; index++) { for (index = pg_start; index < pg_end;) {
struct dnode_of_data dn; struct dnode_of_data dn;
struct page *ipage; unsigned int end_offset;
pgoff_t end;
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
ipage = get_node_page(sbi, inode->i_ino); set_new_dnode(&dn, inode, NULL, NULL, 0);
if (IS_ERR(ipage)) { ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
ret = PTR_ERR(ipage);
f2fs_unlock_op(sbi);
goto out;
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
ret = f2fs_reserve_block(&dn, index);
if (ret) { if (ret) {
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
goto out; goto out;
} }
if (dn.data_blkaddr != NEW_ADDR) { end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
invalidate_blocks(sbi, dn.data_blkaddr); end = min(pg_end, end_offset - dn.ofs_in_node + index);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
} ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (ret)
goto out;
index = end;
new_size = max_t(loff_t, new_size, new_size = max_t(loff_t, new_size,
(loff_t)(index + 1) << PAGE_SHIFT); (loff_t)index << PAGE_SHIFT);
} }
if (off_end) { if (off_end) {
...@@ -1147,10 +1198,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset, ...@@ -1147,10 +1198,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
loff_t len, int mode) loff_t len, int mode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t index, pg_start, pg_end; struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
pgoff_t pg_end;
loff_t new_size = i_size_read(inode); loff_t new_size = i_size_read(inode);
loff_t off_start, off_end; loff_t off_end;
int ret = 0; int ret;
ret = inode_newsize_ok(inode, (len + offset)); ret = inode_newsize_ok(inode, (len + offset));
if (ret) if (ret)
...@@ -1162,43 +1214,35 @@ static int expand_inode_data(struct inode *inode, loff_t offset, ...@@ -1162,43 +1214,35 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
off_start = offset & (PAGE_SIZE - 1);
off_end = (offset + len) & (PAGE_SIZE - 1); off_end = (offset + len) & (PAGE_SIZE - 1);
f2fs_lock_op(sbi); map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
map.m_len = pg_end - map.m_lblk;
if (off_end)
map.m_len++;
for (index = pg_start; index <= pg_end; index++) { ret = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
struct dnode_of_data dn; if (ret) {
pgoff_t last_off;
if (index == pg_end && !off_end) if (!map.m_len)
goto noalloc; return ret;
set_new_dnode(&dn, inode, NULL, NULL, 0); last_off = map.m_lblk + map.m_len - 1;
ret = f2fs_reserve_block(&dn, index);
if (ret) /* update new size to the failed position */
break; new_size = (last_off == pg_end) ? offset + len:
noalloc: (loff_t)(last_off + 1) << PAGE_SHIFT;
if (pg_start == pg_end) } else {
new_size = offset + len; new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
else if (index == pg_start && off_start)
new_size = (loff_t)(index + 1) << PAGE_SHIFT;
else if (index == pg_end)
new_size = ((loff_t)index << PAGE_SHIFT) +
off_end;
else
new_size += PAGE_SIZE;
} }
if (!(mode & FALLOC_FL_KEEP_SIZE) && if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
i_size_read(inode) < new_size) {
i_size_write(inode, new_size); i_size_write(inode, new_size);
mark_inode_dirty(inode); mark_inode_dirty(inode);
update_inode_page(inode); update_inode_page(inode);
} }
f2fs_unlock_op(sbi);
return ret; return ret;
} }
...@@ -1254,10 +1298,19 @@ static long f2fs_fallocate(struct file *file, int mode, ...@@ -1254,10 +1298,19 @@ static long f2fs_fallocate(struct file *file, int mode,
static int f2fs_release_file(struct inode *inode, struct file *filp) static int f2fs_release_file(struct inode *inode, struct file *filp)
{ {
/*
* f2fs_relase_file is called at every close calls. So we should
* not drop any inmemory pages by close called by other process.
*/
if (!(filp->f_mode & FMODE_WRITE) ||
atomic_read(&inode->i_writecount) != 1)
return 0;
/* some remained atomic pages should discarded */ /* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode)) if (f2fs_is_atomic_file(inode))
drop_inmem_pages(inode); drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) { if (f2fs_is_volatile_file(inode)) {
clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
set_inode_flag(F2FS_I(inode), FI_DROP_CACHE); set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
filemap_fdatawrite(inode->i_mapping); filemap_fdatawrite(inode->i_mapping);
clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE); clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
...@@ -1294,20 +1347,16 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) ...@@ -1294,20 +1347,16 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
unsigned int oldflags; unsigned int oldflags;
int ret; int ret;
if (!inode_owner_or_capable(inode))
return -EACCES;
if (get_user(flags, (int __user *)arg))
return -EFAULT;
ret = mnt_want_write_file(filp); ret = mnt_want_write_file(filp);
if (ret) if (ret)
return ret; return ret;
if (!inode_owner_or_capable(inode)) {
ret = -EACCES;
goto out;
}
if (get_user(flags, (int __user *)arg)) {
ret = -EFAULT;
goto out;
}
flags = f2fs_mask_flags(inode->i_mode, flags); flags = f2fs_mask_flags(inode->i_mode, flags);
inode_lock(inode); inode_lock(inode);
...@@ -1350,17 +1399,35 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) ...@@ -1350,17 +1399,35 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode)) if (!inode_owner_or_capable(inode))
return -EACCES; return -EACCES;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
inode_lock(inode);
if (f2fs_is_atomic_file(inode)) if (f2fs_is_atomic_file(inode))
return 0; goto out;
ret = f2fs_convert_inline_inode(inode); ret = f2fs_convert_inline_inode(inode);
if (ret) if (ret)
return ret; goto out;
set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return 0; if (!get_dirty_pages(inode))
goto out;
f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
"Unexpected flush for atomic writes: ino=%lu, npages=%lld",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret)
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
} }
static int f2fs_ioc_commit_atomic_write(struct file *filp) static int f2fs_ioc_commit_atomic_write(struct file *filp)
...@@ -1371,13 +1438,15 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) ...@@ -1371,13 +1438,15 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode)) if (!inode_owner_or_capable(inode))
return -EACCES; return -EACCES;
if (f2fs_is_volatile_file(inode))
return 0;
ret = mnt_want_write_file(filp); ret = mnt_want_write_file(filp);
if (ret) if (ret)
return ret; return ret;
inode_lock(inode);
if (f2fs_is_volatile_file(inode))
goto err_out;
if (f2fs_is_atomic_file(inode)) { if (f2fs_is_atomic_file(inode)) {
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
ret = commit_inmem_pages(inode); ret = commit_inmem_pages(inode);
...@@ -1387,8 +1456,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) ...@@ -1387,8 +1456,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
} }
} }
ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0); ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
err_out: err_out:
inode_unlock(inode);
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
return ret; return ret;
} }
...@@ -1401,32 +1471,54 @@ static int f2fs_ioc_start_volatile_write(struct file *filp) ...@@ -1401,32 +1471,54 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (!inode_owner_or_capable(inode)) if (!inode_owner_or_capable(inode))
return -EACCES; return -EACCES;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
inode_lock(inode);
if (f2fs_is_volatile_file(inode)) if (f2fs_is_volatile_file(inode))
return 0; goto out;
ret = f2fs_convert_inline_inode(inode); ret = f2fs_convert_inline_inode(inode);
if (ret) if (ret)
return ret; goto out;
set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return 0; out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
} }
static int f2fs_ioc_release_volatile_write(struct file *filp) static int f2fs_ioc_release_volatile_write(struct file *filp)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
int ret;
if (!inode_owner_or_capable(inode)) if (!inode_owner_or_capable(inode))
return -EACCES; return -EACCES;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
inode_lock(inode);
if (!f2fs_is_volatile_file(inode)) if (!f2fs_is_volatile_file(inode))
return 0; goto out;
if (!f2fs_is_first_block_written(inode)) if (!f2fs_is_first_block_written(inode)) {
return truncate_partial_data_page(inode, 0, true); ret = truncate_partial_data_page(inode, 0, true);
goto out;
}
return punch_hole(inode, 0, F2FS_BLKSIZE); ret = punch_hole(inode, 0, F2FS_BLKSIZE);
out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
} }
static int f2fs_ioc_abort_volatile_write(struct file *filp) static int f2fs_ioc_abort_volatile_write(struct file *filp)
...@@ -1441,15 +1533,17 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp) ...@@ -1441,15 +1533,17 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
if (ret) if (ret)
return ret; return ret;
if (f2fs_is_atomic_file(inode)) { inode_lock(inode);
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
if (f2fs_is_atomic_file(inode))
drop_inmem_pages(inode); drop_inmem_pages(inode);
}
if (f2fs_is_volatile_file(inode)) { if (f2fs_is_volatile_file(inode)) {
clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0); ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
} }
inode_unlock(inode);
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return ret; return ret;
...@@ -1461,6 +1555,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) ...@@ -1461,6 +1555,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb; struct super_block *sb = sbi->sb;
__u32 in; __u32 in;
int ret;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -1468,31 +1563,38 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) ...@@ -1468,31 +1563,38 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (get_user(in, (__u32 __user *)arg)) if (get_user(in, (__u32 __user *)arg))
return -EFAULT; return -EFAULT;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
switch (in) { switch (in) {
case F2FS_GOING_DOWN_FULLSYNC: case F2FS_GOING_DOWN_FULLSYNC:
sb = freeze_bdev(sb->s_bdev); sb = freeze_bdev(sb->s_bdev);
if (sb && !IS_ERR(sb)) { if (sb && !IS_ERR(sb)) {
f2fs_stop_checkpoint(sbi); f2fs_stop_checkpoint(sbi, false);
thaw_bdev(sb->s_bdev, sb); thaw_bdev(sb->s_bdev, sb);
} }
break; break;
case F2FS_GOING_DOWN_METASYNC: case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */ /* do checkpoint only */
f2fs_sync_fs(sb, 1); f2fs_sync_fs(sb, 1);
f2fs_stop_checkpoint(sbi); f2fs_stop_checkpoint(sbi, false);
break; break;
case F2FS_GOING_DOWN_NOSYNC: case F2FS_GOING_DOWN_NOSYNC:
f2fs_stop_checkpoint(sbi); f2fs_stop_checkpoint(sbi, false);
break; break;
case F2FS_GOING_DOWN_METAFLUSH: case F2FS_GOING_DOWN_METAFLUSH:
sync_meta_pages(sbi, META, LONG_MAX); sync_meta_pages(sbi, META, LONG_MAX);
f2fs_stop_checkpoint(sbi); f2fs_stop_checkpoint(sbi, false);
break; break;
default: default:
return -EINVAL; ret = -EINVAL;
goto out;
} }
f2fs_update_time(sbi, REQ_TIME); f2fs_update_time(sbi, REQ_TIME);
return 0; out:
mnt_drop_write_file(filp);
return ret;
} }
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
...@@ -1513,9 +1615,14 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) ...@@ -1513,9 +1615,14 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
sizeof(range))) sizeof(range)))
return -EFAULT; return -EFAULT;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
range.minlen = max((unsigned int)range.minlen, range.minlen = max((unsigned int)range.minlen,
q->limits.discard_granularity); q->limits.discard_granularity);
ret = f2fs_trim_fs(F2FS_SB(sb), &range); ret = f2fs_trim_fs(F2FS_SB(sb), &range);
mnt_drop_write_file(filp);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1540,13 +1647,21 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) ...@@ -1540,13 +1647,21 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{ {
struct fscrypt_policy policy; struct fscrypt_policy policy;
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
int ret;
if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg, if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
sizeof(policy))) sizeof(policy)))
return -EFAULT; return -EFAULT;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return fscrypt_process_policy(inode, &policy); ret = fscrypt_process_policy(inode, &policy);
mnt_drop_write_file(filp);
return ret;
} }
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
...@@ -1603,6 +1718,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg) ...@@ -1603,6 +1718,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
__u32 sync; __u32 sync;
int ret;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -1613,20 +1729,30 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg) ...@@ -1613,20 +1729,30 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb)) if (f2fs_readonly(sbi->sb))
return -EROFS; return -EROFS;
ret = mnt_want_write_file(filp);
if (ret)
return ret;
if (!sync) { if (!sync) {
if (!mutex_trylock(&sbi->gc_mutex)) if (!mutex_trylock(&sbi->gc_mutex)) {
return -EBUSY; ret = -EBUSY;
goto out;
}
} else { } else {
mutex_lock(&sbi->gc_mutex); mutex_lock(&sbi->gc_mutex);
} }
return f2fs_gc(sbi, sync); ret = f2fs_gc(sbi, sync);
out:
mnt_drop_write_file(filp);
return ret;
} }
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{ {
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -1634,7 +1760,14 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) ...@@ -1634,7 +1760,14 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb)) if (f2fs_readonly(sbi->sb))
return -EROFS; return -EROFS;
return f2fs_sync_fs(sbi->sb, 1); ret = mnt_want_write_file(filp);
if (ret)
return ret;
ret = f2fs_sync_fs(sbi->sb, 1);
mnt_drop_write_file(filp);
return ret;
} }
static int f2fs_defragment_range(struct f2fs_sb_info *sbi, static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
......
...@@ -96,7 +96,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi) ...@@ -96,7 +96,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
dev_t dev = sbi->sb->s_bdev->bd_dev; dev_t dev = sbi->sb->s_bdev->bd_dev;
int err = 0; int err = 0;
gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); gc_th = f2fs_kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th) { if (!gc_th) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -465,15 +465,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi, ...@@ -465,15 +465,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
continue; continue;
} }
/* set page dirty and write it */ move_node_page(node_page, gc_type);
if (gc_type == FG_GC) {
f2fs_wait_on_page_writeback(node_page, NODE, true);
set_page_dirty(node_page);
} else {
if (!PageWriteback(node_page))
set_page_dirty(node_page);
}
f2fs_put_page(node_page, 1);
stat_inc_node_blk_count(sbi, 1, gc_type); stat_inc_node_blk_count(sbi, 1, gc_type);
} }
...@@ -834,18 +826,9 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, ...@@ -834,18 +826,9 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
f2fs_put_page(sum_page, 0); f2fs_put_page(sum_page, 0);
} }
if (gc_type == FG_GC) { if (gc_type == FG_GC)
if (type == SUM_TYPE_NODE) { f2fs_submit_merged_bio(sbi,
struct writeback_control wbc = { (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
sync_node_pages(sbi, 0, &wbc);
} else {
f2fs_submit_merged_bio(sbi, DATA, WRITE);
}
}
blk_finish_plug(&plug); blk_finish_plug(&plug);
......
...@@ -161,7 +161,7 @@ int f2fs_convert_inline_inode(struct inode *inode) ...@@ -161,7 +161,7 @@ int f2fs_convert_inline_inode(struct inode *inode)
if (!f2fs_has_inline_data(inode)) if (!f2fs_has_inline_data(inode))
return 0; return 0;
page = grab_cache_page(inode->i_mapping, 0); page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -303,11 +303,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, ...@@ -303,11 +303,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
else else
f2fs_put_page(ipage, 0); f2fs_put_page(ipage, 0);
/*
* For the most part, it should be a bug when name_len is zero.
* We stop here for figuring out where the bugs has occurred.
*/
f2fs_bug_on(sbi, d.max < 0);
return de; return de;
} }
...@@ -355,7 +350,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent, ...@@ -355,7 +350,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
* NOTE: ipage is grabbed by caller, but if any error occurs, we should * NOTE: ipage is grabbed by caller, but if any error occurs, we should
* release ipage in this function. * release ipage in this function.
*/ */
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry) struct f2fs_inline_dentry *inline_dentry)
{ {
struct page *page; struct page *page;
...@@ -363,7 +358,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, ...@@ -363,7 +358,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
struct f2fs_dentry_block *dentry_blk; struct f2fs_dentry_block *dentry_blk;
int err; int err;
page = grab_cache_page(dir->i_mapping, 0); page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
if (!page) { if (!page) {
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
return -ENOMEM; return -ENOMEM;
...@@ -405,6 +400,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, ...@@ -405,6 +400,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
stat_dec_inline_dir(dir); stat_dec_inline_dir(dir);
clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
F2FS_I(dir)->i_current_depth = 1;
if (i_size_read(dir) < PAGE_SIZE) { if (i_size_read(dir) < PAGE_SIZE) {
i_size_write(dir, PAGE_SIZE); i_size_write(dir, PAGE_SIZE);
set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
...@@ -416,6 +412,105 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, ...@@ -416,6 +412,105 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
return err; return err;
} }
static int f2fs_add_inline_entries(struct inode *dir,
struct f2fs_inline_dentry *inline_dentry)
{
struct f2fs_dentry_ptr d;
unsigned long bit_pos = 0;
int err = 0;
make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
while (bit_pos < d.max) {
struct f2fs_dir_entry *de;
struct qstr new_name;
nid_t ino;
umode_t fake_mode;
if (!test_bit_le(bit_pos, d.bitmap)) {
bit_pos++;
continue;
}
de = &d.dentry[bit_pos];
if (unlikely(!de->name_len)) {
bit_pos++;
continue;
}
new_name.name = d.filename[bit_pos];
new_name.len = de->name_len;
ino = le32_to_cpu(de->ino);
fake_mode = get_de_type(de) << S_SHIFT;
err = f2fs_add_regular_entry(dir, &new_name, NULL,
ino, fake_mode);
if (err)
goto punch_dentry_pages;
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
}
return 0;
punch_dentry_pages:
truncate_inode_pages(&dir->i_data, 0);
truncate_blocks(dir, 0, false);
remove_dirty_inode(dir);
return err;
}
static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry)
{
struct f2fs_inline_dentry *backup_dentry;
struct f2fs_inode_info *fi = F2FS_I(dir);
int err;
backup_dentry = f2fs_kmalloc(sizeof(struct f2fs_inline_dentry),
GFP_F2FS_ZERO);
if (!backup_dentry) {
f2fs_put_page(ipage, 1);
return -ENOMEM;
}
memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
truncate_inline_inode(ipage, 0);
unlock_page(ipage);
err = f2fs_add_inline_entries(dir, backup_dentry);
if (err)
goto recover;
lock_page(ipage);
stat_dec_inline_dir(dir);
clear_inode_flag(fi, FI_INLINE_DENTRY);
update_inode(dir, ipage);
kfree(backup_dentry);
return 0;
recover:
lock_page(ipage);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
fi->i_current_depth = 0;
i_size_write(dir, MAX_INLINE_DATA);
update_inode(dir, ipage);
f2fs_put_page(ipage, 1);
kfree(backup_dentry);
return err;
}
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry)
{
if (!F2FS_I(dir)->i_dir_level)
return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
else
return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
}
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode) struct inode *inode, nid_t ino, umode_t mode)
{ {
......
...@@ -283,7 +283,7 @@ int update_inode_page(struct inode *inode) ...@@ -283,7 +283,7 @@ int update_inode_page(struct inode *inode)
cond_resched(); cond_resched();
goto retry; goto retry;
} else if (err != -ENOENT) { } else if (err != -ENOENT) {
f2fs_stop_checkpoint(sbi); f2fs_stop_checkpoint(sbi, false);
} }
return 0; return 0;
} }
...@@ -344,7 +344,7 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -344,7 +344,7 @@ void f2fs_evict_inode(struct inode *inode)
sb_start_intwrite(inode->i_sb); sb_start_intwrite(inode->i_sb);
set_inode_flag(fi, FI_NO_ALLOC); set_inode_flag(fi, FI_NO_ALLOC);
i_size_write(inode, 0); i_size_write(inode, 0);
retry:
if (F2FS_HAS_BLOCKS(inode)) if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode, true); err = f2fs_truncate(inode, true);
...@@ -354,6 +354,12 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -354,6 +354,12 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
} }
/* give more chances, if ENOMEM case */
if (err == -ENOMEM) {
err = 0;
goto retry;
}
sb_end_intwrite(inode->i_sb); sb_end_intwrite(inode->i_sb);
no_delete: no_delete:
stat_dec_inline_xattr(inode); stat_dec_inline_xattr(inode);
...@@ -368,26 +374,11 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -368,26 +374,11 @@ void f2fs_evict_inode(struct inode *inode)
if (is_inode_flag_set(fi, FI_UPDATE_WRITE)) if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
add_ino_entry(sbi, inode->i_ino, UPDATE_INO); add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
if (is_inode_flag_set(fi, FI_FREE_NID)) { if (is_inode_flag_set(fi, FI_FREE_NID)) {
if (err && err != -ENOENT)
alloc_nid_done(sbi, inode->i_ino);
else
alloc_nid_failed(sbi, inode->i_ino); alloc_nid_failed(sbi, inode->i_ino);
clear_inode_flag(fi, FI_FREE_NID); clear_inode_flag(fi, FI_FREE_NID);
} }
f2fs_bug_on(sbi, err &&
if (err && err != -ENOENT) { !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
/*
* get here because we failed to release resource
* of inode previously, reminder our user to run fsck
* for fixing.
*/
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"inode (ino:%lu) resource leak, run fsck "
"to fix this issue!", inode->i_ino);
}
}
out_clear: out_clear:
fscrypt_put_encryption_info(inode, NULL); fscrypt_put_encryption_info(inode, NULL);
clear_inode(inode); clear_inode(inode);
...@@ -397,37 +388,32 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -397,37 +388,32 @@ void f2fs_evict_inode(struct inode *inode)
void handle_failed_inode(struct inode *inode) void handle_failed_inode(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int err = 0; struct node_info ni;
clear_nlink(inode); /* don't make bad inode, since it becomes a regular file. */
make_bad_inode(inode);
unlock_new_inode(inode); unlock_new_inode(inode);
i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode, false);
if (!err)
err = remove_inode_page(inode);
/* /*
* if we skip truncate_node in remove_inode_page bacause we failed
* before, it's better to find another way to release resource of
* this inode (e.g. valid block count, node block or nid). Here we
* choose to add this inode to orphan list, so that we can call iput
* for releasing in orphan recovery flow.
*
* Note: we should add inode to orphan list before f2fs_unlock_op() * Note: we should add inode to orphan list before f2fs_unlock_op()
* so we can prevent losing this orphan when encoutering checkpoint * so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off. * and following suddenly power-off.
*/ */
if (err && err != -ENOENT) { get_node_info(sbi, inode->i_ino, &ni);
err = acquire_orphan_inode(sbi);
if (!err) if (ni.blk_addr != NULL_ADDR) {
int err = acquire_orphan_inode(sbi);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
"Too many orphan inodes, run fsck to fix.");
} else {
add_orphan_inode(sbi, inode->i_ino); add_orphan_inode(sbi, inode->i_ino);
} }
alloc_nid_done(sbi, inode->i_ino);
} else {
set_inode_flag(F2FS_I(inode), FI_FREE_NID); set_inode_flag(F2FS_I(inode), FI_FREE_NID);
}
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
/* iput will drop the inode object */ /* iput will drop the inode object */
......
...@@ -407,6 +407,29 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) ...@@ -407,6 +407,29 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
up_write(&nm_i->nat_tree_lock); up_write(&nm_i->nat_tree_lock);
} }
/*
* readahead MAX_RA_NODE number of node pages.
*/
static void ra_node_pages(struct page *parent, int start, int n)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
struct blk_plug plug;
int i, end;
nid_t nid;
blk_start_plug(&plug);
/* Then, try readahead for siblings of the desired node */
end = start + n;
end = min(end, NIDS_PER_BLOCK);
for (i = start; i < end; i++) {
nid = get_nid(parent, i, false);
ra_node_page(sbi, nid);
}
blk_finish_plug(&plug);
}
pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
{ {
const long direct_index = ADDRS_PER_INODE(dn->inode); const long direct_index = ADDRS_PER_INODE(dn->inode);
...@@ -707,6 +730,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, ...@@ -707,6 +730,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
return PTR_ERR(page); return PTR_ERR(page);
} }
ra_node_pages(page, ofs, NIDS_PER_BLOCK);
rn = F2FS_NODE(page); rn = F2FS_NODE(page);
if (depth < 3) { if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
...@@ -784,6 +809,8 @@ static int truncate_partial_nodes(struct dnode_of_data *dn, ...@@ -784,6 +809,8 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
nid[i + 1] = get_nid(pages[i], offset[i + 1], false); nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
} }
ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
/* free direct nodes linked to a partial indirect node */ /* free direct nodes linked to a partial indirect node */
for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
child_nid = get_nid(pages[idx], i, false); child_nid = get_nid(pages[idx], i, false);
...@@ -832,7 +859,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from) ...@@ -832,7 +859,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
trace_f2fs_truncate_inode_blocks_enter(inode, from); trace_f2fs_truncate_inode_blocks_enter(inode, from);
level = get_node_path(inode, from, offset, noffset); level = get_node_path(inode, from, offset, noffset);
restart:
page = get_node_page(sbi, inode->i_ino); page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) { if (IS_ERR(page)) {
trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
...@@ -896,10 +923,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from) ...@@ -896,10 +923,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
if (offset[1] == 0 && if (offset[1] == 0 &&
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
lock_page(page); lock_page(page);
if (unlikely(page->mapping != NODE_MAPPING(sbi))) { BUG_ON(page->mapping != NODE_MAPPING(sbi));
f2fs_put_page(page, 1);
goto restart;
}
f2fs_wait_on_page_writeback(page, NODE, true); f2fs_wait_on_page_writeback(page, NODE, true);
ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
set_page_dirty(page); set_page_dirty(page);
...@@ -998,7 +1022,7 @@ struct page *new_node_page(struct dnode_of_data *dn, ...@@ -998,7 +1022,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
if (!page) if (!page)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1090,7 +1114,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) ...@@ -1090,7 +1114,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
if (apage) if (apage)
return; return;
apage = grab_cache_page(NODE_MAPPING(sbi), nid); apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!apage) if (!apage)
return; return;
...@@ -1098,29 +1122,6 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) ...@@ -1098,29 +1122,6 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
f2fs_put_page(apage, err ? 1 : 0); f2fs_put_page(apage, err ? 1 : 0);
} }
/*
* readahead MAX_RA_NODE number of node pages.
*/
static void ra_node_pages(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
struct blk_plug plug;
int i, end;
nid_t nid;
blk_start_plug(&plug);
/* Then, try readahead for siblings of the desired node */
end = start + MAX_RA_NODE;
end = min(end, NIDS_PER_BLOCK);
for (i = start; i < end; i++) {
nid = get_nid(parent, i, false);
ra_node_page(sbi, nid);
}
blk_finish_plug(&plug);
}
static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
struct page *parent, int start) struct page *parent, int start)
{ {
...@@ -1131,7 +1132,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, ...@@ -1131,7 +1132,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
f2fs_bug_on(sbi, check_nid_range(sbi, nid)); f2fs_bug_on(sbi, check_nid_range(sbi, nid));
repeat: repeat:
page = grab_cache_page(NODE_MAPPING(sbi), nid); page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!page) if (!page)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1144,7 +1145,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, ...@@ -1144,7 +1145,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
} }
if (parent) if (parent)
ra_node_pages(parent, start + 1); ra_node_pages(parent, start + 1, MAX_RA_NODE);
lock_page(page); lock_page(page);
...@@ -1196,19 +1197,17 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) ...@@ -1196,19 +1197,17 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
{ {
struct inode *inode; struct inode *inode;
struct page *page; struct page *page;
int ret;
/* should flush inline_data before evict_inode */ /* should flush inline_data before evict_inode */
inode = ilookup(sbi->sb, ino); inode = ilookup(sbi->sb, ino);
if (!inode) if (!inode)
return; return;
page = pagecache_get_page(inode->i_mapping, 0, FGP_NOWAIT, 0); page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
if (!page) if (!page)
goto iput_out; goto iput_out;
if (!trylock_page(page))
goto release_out;
if (!PageUptodate(page)) if (!PageUptodate(page))
goto page_out; goto page_out;
...@@ -1218,24 +1217,214 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) ...@@ -1218,24 +1217,214 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
if (!clear_page_dirty_for_io(page)) if (!clear_page_dirty_for_io(page))
goto page_out; goto page_out;
if (!f2fs_write_inline_data(inode, page)) ret = f2fs_write_inline_data(inode, page);
inode_dec_dirty_pages(inode); inode_dec_dirty_pages(inode);
else if (ret)
set_page_dirty(page); set_page_dirty(page);
page_out: page_out:
unlock_page(page); f2fs_put_page(page, 1);
release_out:
f2fs_put_page(page, 0);
iput_out: iput_out:
iput(inode); iput(inode);
} }
int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, void move_node_page(struct page *node_page, int gc_type)
struct writeback_control *wbc) {
if (gc_type == FG_GC) {
struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 1,
.for_reclaim = 0,
};
set_page_dirty(node_page);
f2fs_wait_on_page_writeback(node_page, NODE, true);
f2fs_bug_on(sbi, PageWriteback(node_page));
if (!clear_page_dirty_for_io(node_page))
goto out_page;
if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
unlock_page(node_page);
goto release_page;
} else {
/* set page dirty and write it */
if (!PageWriteback(node_page))
set_page_dirty(node_page);
}
out_page:
unlock_page(node_page);
release_page:
f2fs_put_page(node_page, 0);
}
static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{ {
pgoff_t index, end; pgoff_t index, end;
struct pagevec pvec; struct pagevec pvec;
int step = ino ? 2 : 0; struct page *last_page = NULL;
pagevec_init(&pvec, 0);
index = 0;
end = ULONG_MAX;
while (index <= end) {
int i, nr_pages;
nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
PAGECACHE_TAG_DIRTY,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_put_page(last_page, 0);
pagevec_release(&pvec);
return ERR_PTR(-EIO);
}
if (!IS_DNODE(page) || !is_cold_node(page))
continue;
if (ino_of_node(page) != ino)
continue;
lock_page(page);
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
unlock_page(page);
continue;
}
if (ino_of_node(page) != ino)
goto continue_unlock;
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (last_page)
f2fs_put_page(last_page, 0);
get_page(page);
last_page = page;
unlock_page(page);
}
pagevec_release(&pvec);
cond_resched();
}
return last_page;
}
int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
struct writeback_control *wbc, bool atomic)
{
pgoff_t index, end;
struct pagevec pvec;
int ret = 0;
struct page *last_page = NULL;
bool marked = false;
if (atomic) {
last_page = last_fsync_dnode(sbi, ino);
if (IS_ERR_OR_NULL(last_page))
return PTR_ERR_OR_ZERO(last_page);
}
retry:
pagevec_init(&pvec, 0);
index = 0;
end = ULONG_MAX;
while (index <= end) {
int i, nr_pages;
nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
PAGECACHE_TAG_DIRTY,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_put_page(last_page, 0);
pagevec_release(&pvec);
return -EIO;
}
if (!IS_DNODE(page) || !is_cold_node(page))
continue;
if (ino_of_node(page) != ino)
continue;
lock_page(page);
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
continue_unlock:
unlock_page(page);
continue;
}
if (ino_of_node(page) != ino)
goto continue_unlock;
if (!PageDirty(page) && page != last_page) {
/* someone wrote it for us */
goto continue_unlock;
}
f2fs_wait_on_page_writeback(page, NODE, true);
BUG_ON(PageWriteback(page));
if (!atomic || page == last_page) {
set_fsync_mark(page, 1);
if (IS_INODE(page))
set_dentry_mark(page,
need_dentry_mark(sbi, ino));
/* may be written by other thread */
if (!PageDirty(page))
set_page_dirty(page);
}
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
if (ret) {
unlock_page(page);
f2fs_put_page(last_page, 0);
break;
}
if (page == last_page) {
f2fs_put_page(page, 0);
marked = true;
break;
}
}
pagevec_release(&pvec);
cond_resched();
if (ret || marked)
break;
}
if (!ret && atomic && !marked) {
f2fs_msg(sbi->sb, KERN_DEBUG,
"Retry to write fsync mark: ino=%u, idx=%lx",
ino, last_page->index);
lock_page(last_page);
set_page_dirty(last_page);
unlock_page(last_page);
goto retry;
}
return ret ? -EIO: 0;
}
int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
{
pgoff_t index, end;
struct pagevec pvec;
int step = 0;
int nwritten = 0; int nwritten = 0;
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
...@@ -1274,15 +1463,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, ...@@ -1274,15 +1463,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
if (step == 2 && (!IS_DNODE(page) || if (step == 2 && (!IS_DNODE(page) ||
!is_cold_node(page))) !is_cold_node(page)))
continue; continue;
/*
* If an fsync mode,
* we should not skip writing node pages.
*/
lock_node: lock_node:
if (ino && ino_of_node(page) == ino) if (!trylock_page(page))
lock_page(page);
else if (!trylock_page(page))
continue; continue;
if (unlikely(page->mapping != NODE_MAPPING(sbi))) { if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
...@@ -1290,8 +1472,6 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, ...@@ -1290,8 +1472,6 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
unlock_page(page); unlock_page(page);
continue; continue;
} }
if (ino && ino_of_node(page) != ino)
goto continue_unlock;
if (!PageDirty(page)) { if (!PageDirty(page)) {
/* someone wrote it for us */ /* someone wrote it for us */
...@@ -1299,7 +1479,7 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, ...@@ -1299,7 +1479,7 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
} }
/* flush inline_data */ /* flush inline_data */
if (!ino && is_inline_node(page)) { if (is_inline_node(page)) {
clear_inline_node(page); clear_inline_node(page);
unlock_page(page); unlock_page(page);
flush_inline_data(sbi, ino_of_node(page)); flush_inline_data(sbi, ino_of_node(page));
...@@ -1312,17 +1492,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, ...@@ -1312,17 +1492,8 @@ int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
if (!clear_page_dirty_for_io(page)) if (!clear_page_dirty_for_io(page))
goto continue_unlock; goto continue_unlock;
/* called by fsync() */
if (ino && IS_DNODE(page)) {
set_fsync_mark(page, 1);
if (IS_INODE(page))
set_dentry_mark(page,
need_dentry_mark(sbi, ino));
nwritten++;
} else {
set_fsync_mark(page, 0); set_fsync_mark(page, 0);
set_dentry_mark(page, 0); set_dentry_mark(page, 0);
}
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
unlock_page(page); unlock_page(page);
...@@ -1470,7 +1641,7 @@ static int f2fs_write_node_pages(struct address_space *mapping, ...@@ -1470,7 +1641,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
diff = nr_pages_to_write(sbi, NODE, wbc); diff = nr_pages_to_write(sbi, NODE, wbc);
wbc->sync_mode = WB_SYNC_NONE; wbc->sync_mode = WB_SYNC_NONE;
sync_node_pages(sbi, 0, wbc); sync_node_pages(sbi, wbc);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return 0; return 0;
...@@ -1524,7 +1695,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) ...@@ -1524,7 +1695,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i; struct free_nid *i;
struct nat_entry *ne; struct nat_entry *ne;
bool allocated = false;
if (!available_free_memory(sbi, FREE_NIDS)) if (!available_free_memory(sbi, FREE_NIDS))
return -1; return -1;
...@@ -1538,8 +1708,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) ...@@ -1538,8 +1708,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
ne = __lookup_nat_cache(nm_i, nid); ne = __lookup_nat_cache(nm_i, nid);
if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR)) nat_get_blkaddr(ne) != NULL_ADDR))
allocated = true;
if (allocated)
return 0; return 0;
} }
...@@ -1672,6 +1840,10 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) ...@@ -1672,6 +1840,10 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL; struct free_nid *i = NULL;
retry: retry:
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(FAULT_ALLOC_NID))
return false;
#endif
if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
return false; return false;
...@@ -1846,7 +2018,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) ...@@ -1846,7 +2018,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (unlikely(old_ni.blk_addr != NULL_ADDR)) if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL; return -EINVAL;
ipage = grab_cache_page(NODE_MAPPING(sbi), ino); ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
if (!ipage) if (!ipage)
return -ENOMEM; return -ENOMEM;
......
...@@ -49,8 +49,9 @@ static struct kmem_cache *fsync_entry_slab; ...@@ -49,8 +49,9 @@ static struct kmem_cache *fsync_entry_slab;
bool space_for_roll_forward(struct f2fs_sb_info *sbi) bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{ {
if (sbi->last_valid_block_count + sbi->alloc_valid_block_count s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
> sbi->user_block_count)
if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false; return false;
return true; return true;
} }
...@@ -67,7 +68,30 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, ...@@ -67,7 +68,30 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
return NULL; return NULL;
} }
static int recover_dentry(struct inode *inode, struct page *ipage) static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
struct inode *inode)
{
struct fsync_inode_entry *entry;
entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
if (!entry)
return NULL;
entry->inode = inode;
list_add_tail(&entry->list, head);
return entry;
}
static void del_fsync_inode(struct fsync_inode_entry *entry)
{
iput(entry->inode);
list_del(&entry->list);
kmem_cache_free(fsync_entry_slab, entry);
}
static int recover_dentry(struct inode *inode, struct page *ipage,
struct list_head *dir_list)
{ {
struct f2fs_inode *raw_inode = F2FS_INODE(ipage); struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino); nid_t pino = le32_to_cpu(raw_inode->i_pino);
...@@ -75,26 +99,37 @@ static int recover_dentry(struct inode *inode, struct page *ipage) ...@@ -75,26 +99,37 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
struct qstr name; struct qstr name;
struct page *page; struct page *page;
struct inode *dir, *einode; struct inode *dir, *einode;
struct fsync_inode_entry *entry;
int err = 0; int err = 0;
entry = get_fsync_inode(dir_list, pino);
if (!entry) {
dir = f2fs_iget(inode->i_sb, pino); dir = f2fs_iget(inode->i_sb, pino);
if (IS_ERR(dir)) { if (IS_ERR(dir)) {
err = PTR_ERR(dir); err = PTR_ERR(dir);
goto out; goto out;
} }
if (file_enc_name(inode)) { entry = add_fsync_inode(dir_list, dir);
if (!entry) {
err = -ENOMEM;
iput(dir); iput(dir);
return 0; goto out;
}
} }
dir = entry->inode;
if (file_enc_name(inode))
return 0;
name.len = le32_to_cpu(raw_inode->i_namelen); name.len = le32_to_cpu(raw_inode->i_namelen);
name.name = raw_inode->i_name; name.name = raw_inode->i_name;
if (unlikely(name.len > F2FS_NAME_LEN)) { if (unlikely(name.len > F2FS_NAME_LEN)) {
WARN_ON(1); WARN_ON(1);
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
goto out_err; goto out;
} }
retry: retry:
de = f2fs_find_entry(dir, &name, &page); de = f2fs_find_entry(dir, &name, &page);
...@@ -120,23 +155,12 @@ static int recover_dentry(struct inode *inode, struct page *ipage) ...@@ -120,23 +155,12 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
goto retry; goto retry;
} }
err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode); err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
if (err)
goto out_err;
if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
iput(dir);
} else {
add_dirty_dir_inode(dir);
set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
}
goto out; goto out;
out_unmap_put: out_unmap_put:
f2fs_dentry_kunmap(dir, page); f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0); f2fs_put_page(page, 0);
out_err:
iput(dir);
out: out:
f2fs_msg(inode->i_sb, KERN_NOTICE, f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d", "%s: ino = %x, name = %s, dir = %lx, err = %d",
...@@ -198,6 +222,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -198,6 +222,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{ {
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg; struct curseg_info *curseg;
struct inode *inode;
struct page *page = NULL; struct page *page = NULL;
block_t blkaddr; block_t blkaddr;
int err = 0; int err = 0;
...@@ -206,8 +231,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -206,8 +231,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
while (1) { while (1) {
struct fsync_inode_entry *entry; struct fsync_inode_entry *entry;
...@@ -233,35 +256,32 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -233,35 +256,32 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
break; break;
} }
/* add this fsync inode to the list */
entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
if (!entry) {
err = -ENOMEM;
break;
}
/* /*
* CP | dnode(F) | inode(DF) * CP | dnode(F) | inode(DF)
* For this case, we should not give up now. * For this case, we should not give up now.
*/ */
entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); inode = f2fs_iget(sbi->sb, ino_of_node(page));
if (IS_ERR(entry->inode)) { if (IS_ERR(inode)) {
err = PTR_ERR(entry->inode); err = PTR_ERR(inode);
kmem_cache_free(fsync_entry_slab, entry);
if (err == -ENOENT) { if (err == -ENOENT) {
err = 0; err = 0;
goto next; goto next;
} }
break; break;
} }
list_add_tail(&entry->list, head);
/* add this fsync inode to the list */
entry = add_fsync_inode(head, inode);
if (!entry) {
err = -ENOMEM;
iput(inode);
break;
}
} }
entry->blkaddr = blkaddr; entry->blkaddr = blkaddr;
if (IS_INODE(page)) { if (IS_INODE(page) && is_dent_dnode(page))
entry->last_inode = blkaddr;
if (is_dent_dnode(page))
entry->last_dentry = blkaddr; entry->last_dentry = blkaddr;
}
next: next:
/* check next segment */ /* check next segment */
blkaddr = next_blkaddr_of_node(page); blkaddr = next_blkaddr_of_node(page);
...@@ -277,11 +297,8 @@ static void destroy_fsync_dnodes(struct list_head *head) ...@@ -277,11 +297,8 @@ static void destroy_fsync_dnodes(struct list_head *head)
{ {
struct fsync_inode_entry *entry, *tmp; struct fsync_inode_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, head, list) { list_for_each_entry_safe(entry, tmp, head, list)
iput(entry->inode); del_fsync_inode(entry);
list_del(&entry->list);
kmem_cache_free(fsync_entry_slab, entry);
}
} }
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
...@@ -444,8 +461,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -444,8 +461,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
*/ */
if (dest == NEW_ADDR) { if (dest == NEW_ADDR) {
truncate_data_blocks_range(&dn, 1); truncate_data_blocks_range(&dn, 1);
err = reserve_new_block(&dn); reserve_new_block(&dn);
f2fs_bug_on(sbi, err);
continue; continue;
} }
...@@ -454,6 +470,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -454,6 +470,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (src == NULL_ADDR) { if (src == NULL_ADDR) {
err = reserve_new_block(&dn); err = reserve_new_block(&dn);
#ifdef CONFIG_F2FS_FAULT_INJECTION
while (err)
err = reserve_new_block(&dn);
#endif
/* We should not get -ENOSPC */ /* We should not get -ENOSPC */
f2fs_bug_on(sbi, err); f2fs_bug_on(sbi, err);
} }
...@@ -486,7 +506,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, ...@@ -486,7 +506,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
return err; return err;
} }
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
struct list_head *dir_list)
{ {
unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg; struct curseg_info *curseg;
...@@ -513,7 +534,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -513,7 +534,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head)
break; break;
} }
entry = get_fsync_inode(head, ino_of_node(page)); entry = get_fsync_inode(inode_list, ino_of_node(page));
if (!entry) if (!entry)
goto next; goto next;
/* /*
...@@ -521,10 +542,10 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -521,10 +542,10 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head)
* In this case, we can lose the latest inode(x). * In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update. * So, call recover_inode for the inode update.
*/ */
if (entry->last_inode == blkaddr) if (IS_INODE(page))
recover_inode(entry->inode, page); recover_inode(entry->inode, page);
if (entry->last_dentry == blkaddr) { if (entry->last_dentry == blkaddr) {
err = recover_dentry(entry->inode, page); err = recover_dentry(entry->inode, page, dir_list);
if (err) { if (err) {
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
break; break;
...@@ -536,11 +557,8 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -536,11 +557,8 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head)
break; break;
} }
if (entry->blkaddr == blkaddr) { if (entry->blkaddr == blkaddr)
iput(entry->inode); del_fsync_inode(entry);
list_del(&entry->list);
kmem_cache_free(fsync_entry_slab, entry);
}
next: next:
/* check next segment */ /* check next segment */
blkaddr = next_blkaddr_of_node(page); blkaddr = next_blkaddr_of_node(page);
...@@ -551,12 +569,14 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) ...@@ -551,12 +569,14 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head)
return err; return err;
} }
int recover_fsync_data(struct f2fs_sb_info *sbi) int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{ {
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list; struct list_head inode_list;
struct list_head dir_list;
block_t blkaddr; block_t blkaddr;
int err; int err;
int ret = 0;
bool need_writecp = false; bool need_writecp = false;
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
...@@ -565,6 +585,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -565,6 +585,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&inode_list); INIT_LIST_HEAD(&inode_list);
INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */ /* prevent checkpoint */
mutex_lock(&sbi->cp_mutex); mutex_lock(&sbi->cp_mutex);
...@@ -573,21 +594,22 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -573,21 +594,22 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
/* step #1: find fsynced inode numbers */ /* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list); err = find_fsync_dnodes(sbi, &inode_list);
if (err) if (err || list_empty(&inode_list))
goto out; goto out;
if (list_empty(&inode_list)) if (check_only) {
ret = 1;
goto out; goto out;
}
need_writecp = true; need_writecp = true;
/* step #2: recover data */ /* step #2: recover data */
err = recover_data(sbi, &inode_list); err = recover_data(sbi, &inode_list, &dir_list);
if (!err) if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list)); f2fs_bug_on(sbi, !list_empty(&inode_list));
out: out:
destroy_fsync_dnodes(&inode_list); destroy_fsync_dnodes(&inode_list);
kmem_cache_destroy(fsync_entry_slab);
/* truncate meta pages to be used by the recovery */ /* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi), truncate_inode_pages_range(META_MAPPING(sbi),
...@@ -625,5 +647,8 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) ...@@ -625,5 +647,8 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
} else { } else {
mutex_unlock(&sbi->cp_mutex); mutex_unlock(&sbi->cp_mutex);
} }
return err;
destroy_fsync_dnodes(&dir_list);
kmem_cache_destroy(fsync_entry_slab);
return ret ? ret: err;
} }
...@@ -223,9 +223,11 @@ static int __revoke_inmem_pages(struct inode *inode, ...@@ -223,9 +223,11 @@ static int __revoke_inmem_pages(struct inode *inode,
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
} }
next: next:
/* we don't need to invalidate this in the sccessful status */
if (drop || recover)
ClearPageUptodate(page); ClearPageUptodate(page);
set_page_private(page, 0); set_page_private(page, 0);
ClearPageUptodate(page); ClearPagePrivate(page);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
list_del(&cur->list); list_del(&cur->list);
...@@ -239,6 +241,8 @@ void drop_inmem_pages(struct inode *inode) ...@@ -239,6 +241,8 @@ void drop_inmem_pages(struct inode *inode)
{ {
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
mutex_lock(&fi->inmem_lock); mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
mutex_unlock(&fi->inmem_lock); mutex_unlock(&fi->inmem_lock);
......
...@@ -158,16 +158,17 @@ struct victim_sel_policy { ...@@ -158,16 +158,17 @@ struct victim_sel_policy {
}; };
struct seg_entry { struct seg_entry {
unsigned short valid_blocks; /* # of valid blocks */ unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
unsigned int valid_blocks:10; /* # of valid blocks */
unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
unsigned int padding:6; /* padding */
unsigned char *cur_valid_map; /* validity bitmap of blocks */ unsigned char *cur_valid_map; /* validity bitmap of blocks */
/* /*
* # of valid blocks and the validity bitmap stored in the the last * # of valid blocks and the validity bitmap stored in the the last
* checkpoint pack. This information is used by the SSR mode. * checkpoint pack. This information is used by the SSR mode.
*/ */
unsigned short ckpt_valid_blocks; unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
unsigned char *ckpt_valid_map;
unsigned char *discard_map; unsigned char *discard_map;
unsigned char type; /* segment type like CURSEG_XXX_TYPE */
unsigned long long mtime; /* modification time of the segment */ unsigned long long mtime; /* modification time of the segment */
}; };
......
...@@ -39,6 +39,30 @@ static struct proc_dir_entry *f2fs_proc_root; ...@@ -39,6 +39,30 @@ static struct proc_dir_entry *f2fs_proc_root;
static struct kmem_cache *f2fs_inode_cachep; static struct kmem_cache *f2fs_inode_cachep;
static struct kset *f2fs_kset; static struct kset *f2fs_kset;
#ifdef CONFIG_F2FS_FAULT_INJECTION
struct f2fs_fault_info f2fs_fault;
char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
};
static void f2fs_build_fault_attr(unsigned int rate)
{
if (rate) {
atomic_set(&f2fs_fault.inject_ops, 0);
f2fs_fault.inject_rate = rate;
f2fs_fault.inject_type = (1 << FAULT_MAX) - 1;
} else {
memset(&f2fs_fault, 0, sizeof(struct f2fs_fault_info));
}
}
#endif
/* f2fs-wide shrinker description */ /* f2fs-wide shrinker description */
static struct shrinker f2fs_shrinker_info = { static struct shrinker f2fs_shrinker_info = {
.scan_objects = f2fs_shrink_scan, .scan_objects = f2fs_shrink_scan,
...@@ -68,6 +92,7 @@ enum { ...@@ -68,6 +92,7 @@ enum {
Opt_noextent_cache, Opt_noextent_cache,
Opt_noinline_data, Opt_noinline_data,
Opt_data_flush, Opt_data_flush,
Opt_fault_injection,
Opt_err, Opt_err,
}; };
...@@ -93,6 +118,7 @@ static match_table_t f2fs_tokens = { ...@@ -93,6 +118,7 @@ static match_table_t f2fs_tokens = {
{Opt_noextent_cache, "noextent_cache"}, {Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"}, {Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"}, {Opt_data_flush, "data_flush"},
{Opt_fault_injection, "fault_injection=%u"},
{Opt_err, NULL}, {Opt_err, NULL},
}; };
...@@ -102,6 +128,10 @@ enum { ...@@ -102,6 +128,10 @@ enum {
SM_INFO, /* struct f2fs_sm_info */ SM_INFO, /* struct f2fs_sm_info */
NM_INFO, /* struct f2fs_nm_info */ NM_INFO, /* struct f2fs_nm_info */
F2FS_SBI, /* struct f2fs_sb_info */ F2FS_SBI, /* struct f2fs_sb_info */
#ifdef CONFIG_F2FS_FAULT_INJECTION
FAULT_INFO_RATE, /* struct f2fs_fault_info */
FAULT_INFO_TYPE, /* struct f2fs_fault_info */
#endif
}; };
struct f2fs_attr { struct f2fs_attr {
...@@ -123,6 +153,11 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) ...@@ -123,6 +153,11 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
return (unsigned char *)NM_I(sbi); return (unsigned char *)NM_I(sbi);
else if (struct_type == F2FS_SBI) else if (struct_type == F2FS_SBI)
return (unsigned char *)sbi; return (unsigned char *)sbi;
#ifdef CONFIG_F2FS_FAULT_INJECTION
else if (struct_type == FAULT_INFO_RATE ||
struct_type == FAULT_INFO_TYPE)
return (unsigned char *)&f2fs_fault;
#endif
return NULL; return NULL;
} }
...@@ -172,6 +207,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, ...@@ -172,6 +207,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
ret = kstrtoul(skip_spaces(buf), 0, &t); ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret < 0) if (ret < 0)
return ret; return ret;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
return -EINVAL;
#endif
*ui = t; *ui = t;
return count; return count;
} }
...@@ -237,6 +276,10 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search); ...@@ -237,6 +276,10 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
#ifdef CONFIG_F2FS_FAULT_INJECTION
F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
#endif
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr) #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
...@@ -273,6 +316,22 @@ static struct kobj_type f2fs_ktype = { ...@@ -273,6 +316,22 @@ static struct kobj_type f2fs_ktype = {
.release = f2fs_sb_release, .release = f2fs_sb_release,
}; };
#ifdef CONFIG_F2FS_FAULT_INJECTION
/* sysfs for f2fs fault injection */
static struct kobject f2fs_fault_inject;
static struct attribute *f2fs_fault_attrs[] = {
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
NULL
};
static struct kobj_type f2fs_fault_ktype = {
.default_attrs = f2fs_fault_attrs,
.sysfs_ops = &f2fs_attr_ops,
};
#endif
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{ {
struct va_format vaf; struct va_format vaf;
...@@ -300,6 +359,10 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -300,6 +359,10 @@ static int parse_options(struct super_block *sb, char *options)
char *p, *name; char *p, *name;
int arg = 0; int arg = 0;
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(0);
#endif
if (!options) if (!options)
return 0; return 0;
...@@ -433,6 +496,16 @@ static int parse_options(struct super_block *sb, char *options) ...@@ -433,6 +496,16 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_data_flush: case Opt_data_flush:
set_opt(sbi, DATA_FLUSH); set_opt(sbi, DATA_FLUSH);
break; break;
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(arg);
#else
f2fs_msg(sb, KERN_INFO,
"FAULT_INJECTION was not selected");
#endif
break;
default: default:
f2fs_msg(sb, KERN_ERR, f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value", "Unrecognized mount option \"%s\" or missing value",
...@@ -453,9 +526,13 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) ...@@ -453,9 +526,13 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
init_once((void *) fi); init_once((void *) fi);
if (percpu_counter_init(&fi->dirty_pages, 0, GFP_NOFS)) {
kmem_cache_free(f2fs_inode_cachep, fi);
return NULL;
}
/* Initialize f2fs-specific inode info */ /* Initialize f2fs-specific inode info */
fi->vfs_inode.i_version = 1; fi->vfs_inode.i_version = 1;
atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1; fi->i_current_depth = 1;
fi->i_advise = 0; fi->i_advise = 0;
init_rwsem(&fi->i_sem); init_rwsem(&fi->i_sem);
...@@ -530,15 +607,27 @@ static void f2fs_i_callback(struct rcu_head *head) ...@@ -530,15 +607,27 @@ static void f2fs_i_callback(struct rcu_head *head)
static void f2fs_destroy_inode(struct inode *inode) static void f2fs_destroy_inode(struct inode *inode)
{ {
percpu_counter_destroy(&F2FS_I(inode)->dirty_pages);
call_rcu(&inode->i_rcu, f2fs_i_callback); call_rcu(&inode->i_rcu, f2fs_i_callback);
} }
static void destroy_percpu_info(struct f2fs_sb_info *sbi)
{
int i;
for (i = 0; i < NR_COUNT_TYPE; i++)
percpu_counter_destroy(&sbi->nr_pages[i]);
percpu_counter_destroy(&sbi->alloc_valid_block_count);
percpu_counter_destroy(&sbi->total_valid_inode_count);
}
static void f2fs_put_super(struct super_block *sb) static void f2fs_put_super(struct super_block *sb)
{ {
struct f2fs_sb_info *sbi = F2FS_SB(sb); struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (sbi->s_proc) { if (sbi->s_proc) {
remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry("segment_info", sbi->s_proc);
remove_proc_entry("segment_bits", sbi->s_proc);
remove_proc_entry(sb->s_id, f2fs_proc_root); remove_proc_entry(sb->s_id, f2fs_proc_root);
} }
kobject_del(&sbi->s_kobj); kobject_del(&sbi->s_kobj);
...@@ -568,14 +657,13 @@ static void f2fs_put_super(struct super_block *sb) ...@@ -568,14 +657,13 @@ static void f2fs_put_super(struct super_block *sb)
* normally superblock is clean, so we need to release this. * normally superblock is clean, so we need to release this.
* In addition, EIO will skip do checkpoint, we need this as well. * In addition, EIO will skip do checkpoint, we need this as well.
*/ */
release_ino_entry(sbi); release_ino_entry(sbi, true);
release_discard_addrs(sbi); release_discard_addrs(sbi);
f2fs_leave_shrinker(sbi); f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex); mutex_unlock(&sbi->umount_mutex);
/* our cp_error case, we can wait for any writeback page */ /* our cp_error case, we can wait for any writeback page */
if (get_pages(sbi, F2FS_WRITEBACK))
f2fs_flush_merged_bios(sbi); f2fs_flush_merged_bios(sbi);
iput(sbi->node_inode); iput(sbi->node_inode);
...@@ -593,6 +681,8 @@ static void f2fs_put_super(struct super_block *sb) ...@@ -593,6 +681,8 @@ static void f2fs_put_super(struct super_block *sb)
if (sbi->s_chksum_driver) if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver); crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi->raw_super); kfree(sbi->raw_super);
destroy_percpu_info(sbi);
kfree(sbi); kfree(sbi);
} }
...@@ -745,19 +835,47 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset) ...@@ -745,19 +835,47 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
return 0; return 0;
} }
static int segment_info_open_fs(struct inode *inode, struct file *file) static int segment_bits_seq_show(struct seq_file *seq, void *offset)
{ {
return single_open(file, segment_info_seq_show, PDE_DATA(inode)); struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
unsigned int total_segs =
le32_to_cpu(sbi->raw_super->segment_count_main);
int i, j;
seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
"segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
for (i = 0; i < total_segs; i++) {
struct seg_entry *se = get_seg_entry(sbi, i);
seq_printf(seq, "%-10d", i);
seq_printf(seq, "%d|%-3u|", se->type,
get_valid_blocks(sbi, i, 1));
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
seq_printf(seq, "%x ", se->cur_valid_map[j]);
seq_putc(seq, '\n');
}
return 0;
} }
static const struct file_operations f2fs_seq_segment_info_fops = { #define F2FS_PROC_FILE_DEF(_name) \
.owner = THIS_MODULE, static int _name##_open_fs(struct inode *inode, struct file *file) \
.open = segment_info_open_fs, { \
.read = seq_read, return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
.llseek = seq_lseek, } \
.release = single_release, \
static const struct file_operations f2fs_seq_##_name##_fops = { \
.owner = THIS_MODULE, \
.open = _name##_open_fs, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}; };
F2FS_PROC_FILE_DEF(segment_info);
F2FS_PROC_FILE_DEF(segment_bits);
static void default_options(struct f2fs_sb_info *sbi) static void default_options(struct f2fs_sb_info *sbi)
{ {
/* init some FS parameters */ /* init some FS parameters */
...@@ -791,13 +909,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -791,13 +909,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
org_mount_opt = sbi->mount_opt; org_mount_opt = sbi->mount_opt;
active_logs = sbi->active_logs; active_logs = sbi->active_logs;
if (*flags & MS_RDONLY) { /* recover superblocks we couldn't write due to previous RO mount */
set_opt(sbi, FASTBOOT); if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
set_sbi_flag(sbi, SBI_IS_DIRTY); err = f2fs_commit_super(sbi, false);
f2fs_msg(sb, KERN_INFO,
"Try to recover all the superblocks, ret: %d", err);
if (!err)
clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
} }
sync_filesystem(sb);
sbi->mount_opt.opt = 0; sbi->mount_opt.opt = 0;
default_options(sbi); default_options(sbi);
...@@ -829,7 +949,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -829,7 +949,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) { if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
if (sbi->gc_thread) { if (sbi->gc_thread) {
stop_gc_thread(sbi); stop_gc_thread(sbi);
f2fs_sync_fs(sb, 1);
need_restart_gc = true; need_restart_gc = true;
} }
} else if (!sbi->gc_thread) { } else if (!sbi->gc_thread) {
...@@ -839,6 +958,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -839,6 +958,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true; need_stop_gc = true;
} }
if (*flags & MS_RDONLY) {
writeback_inodes_sb(sb, WB_REASON_SYNC);
sync_inodes_sb(sb);
set_sbi_flag(sbi, SBI_IS_DIRTY);
set_sbi_flag(sbi, SBI_IS_CLOSE);
f2fs_sync_fs(sb, 1);
clear_sbi_flag(sbi, SBI_IS_CLOSE);
}
/* /*
* We stop issue flush thread if FS is mounted as RO * We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option. * or if flush_merge is not passed in mount option.
...@@ -854,6 +983,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -854,6 +983,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
/* Update the POSIXACL Flag */ /* Update the POSIXACL Flag */
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
return 0; return 0;
restore_gc: restore_gc:
if (need_restart_gc) { if (need_restart_gc) {
...@@ -893,6 +1023,12 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len) ...@@ -893,6 +1023,12 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
ctx, len, NULL); ctx, len, NULL);
} }
static int f2fs_key_prefix(struct inode *inode, u8 **key)
{
*key = F2FS_I_SB(inode)->key_prefix;
return F2FS_I_SB(inode)->key_prefix_size;
}
static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data) void *fs_data)
{ {
...@@ -909,6 +1045,7 @@ static unsigned f2fs_max_namelen(struct inode *inode) ...@@ -909,6 +1045,7 @@ static unsigned f2fs_max_namelen(struct inode *inode)
static struct fscrypt_operations f2fs_cryptops = { static struct fscrypt_operations f2fs_cryptops = {
.get_context = f2fs_get_context, .get_context = f2fs_get_context,
.key_prefix = f2fs_key_prefix,
.set_context = f2fs_set_context, .set_context = f2fs_set_context,
.is_encrypted = f2fs_encrypted_inode, .is_encrypted = f2fs_encrypted_inode,
.empty_dir = f2fs_empty_dir, .empty_dir = f2fs_empty_dir,
...@@ -998,11 +1135,12 @@ static int __f2fs_commit_super(struct buffer_head *bh, ...@@ -998,11 +1135,12 @@ static int __f2fs_commit_super(struct buffer_head *bh,
return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
} }
static inline bool sanity_check_area_boundary(struct super_block *sb, static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
struct buffer_head *bh) struct buffer_head *bh)
{ {
struct f2fs_super_block *raw_super = (struct f2fs_super_block *) struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
(bh->b_data + F2FS_SUPER_OFFSET); (bh->b_data + F2FS_SUPER_OFFSET);
struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
...@@ -1081,6 +1219,7 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, ...@@ -1081,6 +1219,7 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
segment0_blkaddr) >> log_blocks_per_seg); segment0_blkaddr) >> log_blocks_per_seg);
if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) { if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
res = "internally"; res = "internally";
} else { } else {
err = __f2fs_commit_super(bh, NULL); err = __f2fs_commit_super(bh, NULL);
...@@ -1098,11 +1237,12 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, ...@@ -1098,11 +1237,12 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
return false; return false;
} }
static int sanity_check_raw_super(struct super_block *sb, static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
struct buffer_head *bh) struct buffer_head *bh)
{ {
struct f2fs_super_block *raw_super = (struct f2fs_super_block *) struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
(bh->b_data + F2FS_SUPER_OFFSET); (bh->b_data + F2FS_SUPER_OFFSET);
struct super_block *sb = sbi->sb;
unsigned int blocksize; unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
...@@ -1169,7 +1309,7 @@ static int sanity_check_raw_super(struct super_block *sb, ...@@ -1169,7 +1309,7 @@ static int sanity_check_raw_super(struct super_block *sb,
} }
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sb, bh)) if (sanity_check_area_boundary(sbi, bh))
return 1; return 1;
return 0; return 0;
...@@ -1201,7 +1341,6 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi) ...@@ -1201,7 +1341,6 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
static void init_sb_info(struct f2fs_sb_info *sbi) static void init_sb_info(struct f2fs_sb_info *sbi)
{ {
struct f2fs_super_block *raw_super = sbi->raw_super; struct f2fs_super_block *raw_super = sbi->raw_super;
int i;
sbi->log_sectors_per_block = sbi->log_sectors_per_block =
le32_to_cpu(raw_super->log_sectors_per_block); le32_to_cpu(raw_super->log_sectors_per_block);
...@@ -1221,9 +1360,6 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -1221,9 +1360,6 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->cur_victim_sec = NULL_SECNO; sbi->cur_victim_sec = NULL_SECNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
for (i = 0; i < NR_COUNT_TYPE; i++)
atomic_set(&sbi->nr_pages[i], 0);
sbi->dir_level = DEF_DIR_LEVEL; sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
...@@ -1231,6 +1367,30 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -1231,6 +1367,30 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->s_list); INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex); mutex_init(&sbi->umount_mutex);
#ifdef CONFIG_F2FS_FS_ENCRYPTION
memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
F2FS_KEY_DESC_PREFIX_SIZE);
sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
#endif
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
{
int i, err;
for (i = 0; i < NR_COUNT_TYPE; i++) {
err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
if (err)
return err;
}
err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
if (err)
return err;
return percpu_counter_init(&sbi->total_valid_inode_count, 0,
GFP_KERNEL);
} }
/* /*
...@@ -1239,10 +1399,11 @@ static void init_sb_info(struct f2fs_sb_info *sbi) ...@@ -1239,10 +1399,11 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
* to get the first valid one. If any one of them is broken, we pass * to get the first valid one. If any one of them is broken, we pass
* them recovery flag back to the caller. * them recovery flag back to the caller.
*/ */
static int read_raw_super_block(struct super_block *sb, static int read_raw_super_block(struct f2fs_sb_info *sbi,
struct f2fs_super_block **raw_super, struct f2fs_super_block **raw_super,
int *valid_super_block, int *recovery) int *valid_super_block, int *recovery)
{ {
struct super_block *sb = sbi->sb;
int block; int block;
struct buffer_head *bh; struct buffer_head *bh;
struct f2fs_super_block *super; struct f2fs_super_block *super;
...@@ -1262,7 +1423,7 @@ static int read_raw_super_block(struct super_block *sb, ...@@ -1262,7 +1423,7 @@ static int read_raw_super_block(struct super_block *sb,
} }
/* sanity checking of raw super */ /* sanity checking of raw super */
if (sanity_check_raw_super(sb, bh)) { if (sanity_check_raw_super(sbi, bh)) {
f2fs_msg(sb, KERN_ERR, f2fs_msg(sb, KERN_ERR,
"Can't find valid F2FS filesystem in %dth superblock", "Can't find valid F2FS filesystem in %dth superblock",
block + 1); block + 1);
...@@ -1298,6 +1459,12 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) ...@@ -1298,6 +1459,12 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
struct buffer_head *bh; struct buffer_head *bh;
int err; int err;
if ((recover && f2fs_readonly(sbi->sb)) ||
bdev_read_only(sbi->sb->s_bdev)) {
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
return -EROFS;
}
/* write back-up superblock first */ /* write back-up superblock first */
bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1); bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
if (!bh) if (!bh)
...@@ -1323,7 +1490,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1323,7 +1490,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
struct f2fs_sb_info *sbi; struct f2fs_sb_info *sbi;
struct f2fs_super_block *raw_super; struct f2fs_super_block *raw_super;
struct inode *root; struct inode *root;
long err; int err;
bool retry = true, need_fsck = false; bool retry = true, need_fsck = false;
char *options = NULL; char *options = NULL;
int recovery, i, valid_super_block; int recovery, i, valid_super_block;
...@@ -1340,6 +1507,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1340,6 +1507,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi) if (!sbi)
return -ENOMEM; return -ENOMEM;
sbi->sb = sb;
/* Load the checksum driver */ /* Load the checksum driver */
sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
if (IS_ERR(sbi->s_chksum_driver)) { if (IS_ERR(sbi->s_chksum_driver)) {
...@@ -1355,7 +1524,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1355,7 +1524,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi; goto free_sbi;
} }
err = read_raw_super_block(sb, &raw_super, &valid_super_block, err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
&recovery); &recovery);
if (err) if (err)
goto free_sbi; goto free_sbi;
...@@ -1390,7 +1559,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1390,7 +1559,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */ /* init f2fs-specific super block info */
sbi->sb = sb;
sbi->raw_super = raw_super; sbi->raw_super = raw_super;
sbi->valid_super_block = valid_super_block; sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex); mutex_init(&sbi->gc_mutex);
...@@ -1415,6 +1583,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1415,6 +1583,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
init_waitqueue_head(&sbi->cp_wait); init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi); init_sb_info(sbi);
err = init_percpu_info(sbi);
if (err)
goto free_options;
/* get an inode for meta space */ /* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) { if (IS_ERR(sbi->meta_inode)) {
...@@ -1431,13 +1603,13 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1431,13 +1603,13 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
sbi->total_valid_node_count = sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count); le32_to_cpu(sbi->ckpt->valid_node_count);
sbi->total_valid_inode_count = percpu_counter_set(&sbi->total_valid_inode_count,
le32_to_cpu(sbi->ckpt->valid_inode_count); le32_to_cpu(sbi->ckpt->valid_inode_count));
sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
sbi->total_valid_block_count = sbi->total_valid_block_count =
le64_to_cpu(sbi->ckpt->valid_block_count); le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->last_valid_block_count = sbi->total_valid_block_count;
sbi->alloc_valid_block_count = 0;
for (i = 0; i < NR_INODE_TYPE; i++) { for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]); INIT_LIST_HEAD(&sbi->inode_list[i]);
spin_lock_init(&sbi->inode_lock[i]); spin_lock_init(&sbi->inode_lock[i]);
...@@ -1515,9 +1687,12 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1515,9 +1687,12 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (f2fs_proc_root) if (f2fs_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
if (sbi->s_proc) if (sbi->s_proc) {
proc_create_data("segment_info", S_IRUGO, sbi->s_proc, proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
&f2fs_seq_segment_info_fops, sb); &f2fs_seq_segment_info_fops, sb);
proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
&f2fs_seq_segment_bits_fops, sb);
}
sbi->s_kobj.kset = f2fs_kset; sbi->s_kobj.kset = f2fs_kset;
init_completion(&sbi->s_kobj_unregister); init_completion(&sbi->s_kobj_unregister);
...@@ -1541,14 +1716,24 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1541,14 +1716,24 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (need_fsck) if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_NEED_FSCK);
err = recover_fsync_data(sbi); err = recover_fsync_data(sbi, false);
if (err) { if (err < 0) {
need_fsck = true; need_fsck = true;
f2fs_msg(sb, KERN_ERR, f2fs_msg(sb, KERN_ERR,
"Cannot recover all fsync data errno=%ld", err); "Cannot recover all fsync data errno=%d", err);
goto free_kobj;
}
} else {
err = recover_fsync_data(sbi, true);
if (!f2fs_readonly(sb) && err > 0) {
err = -EINVAL;
f2fs_msg(sb, KERN_ERR,
"Need to recover fsync data");
goto free_kobj; goto free_kobj;
} }
} }
/* recover_fsync_data() cleared this already */ /* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING); clear_sbi_flag(sbi, SBI_POR_DOING);
...@@ -1565,10 +1750,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1565,10 +1750,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
kfree(options); kfree(options);
/* recover broken superblock */ /* recover broken superblock */
if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { if (recovery) {
err = f2fs_commit_super(sbi, true); err = f2fs_commit_super(sbi, true);
f2fs_msg(sb, KERN_INFO, f2fs_msg(sb, KERN_INFO,
"Try to recover %dth superblock, ret: %ld", "Try to recover %dth superblock, ret: %d",
sbi->valid_super_block ? 1 : 2, err); sbi->valid_super_block ? 1 : 2, err);
} }
...@@ -1583,6 +1768,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1583,6 +1768,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
free_proc: free_proc:
if (sbi->s_proc) { if (sbi->s_proc) {
remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry("segment_info", sbi->s_proc);
remove_proc_entry("segment_bits", sbi->s_proc);
remove_proc_entry(sb->s_id, f2fs_proc_root); remove_proc_entry(sb->s_id, f2fs_proc_root);
} }
f2fs_destroy_stats(sbi); f2fs_destroy_stats(sbi);
...@@ -1603,6 +1789,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1603,6 +1789,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
make_bad_inode(sbi->meta_inode); make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode); iput(sbi->meta_inode);
free_options: free_options:
destroy_percpu_info(sbi);
kfree(options); kfree(options);
free_sb_buf: free_sb_buf:
kfree(raw_super); kfree(raw_super);
...@@ -1688,6 +1875,16 @@ static int __init init_f2fs_fs(void) ...@@ -1688,6 +1875,16 @@ static int __init init_f2fs_fs(void)
err = -ENOMEM; err = -ENOMEM;
goto free_extent_cache; goto free_extent_cache;
} }
#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_fault_inject.kset = f2fs_kset;
f2fs_build_fault_attr(0);
err = kobject_init_and_add(&f2fs_fault_inject, &f2fs_fault_ktype,
NULL, "fault_injection");
if (err) {
f2fs_fault_inject.kset = NULL;
goto free_kset;
}
#endif
err = register_shrinker(&f2fs_shrinker_info); err = register_shrinker(&f2fs_shrinker_info);
if (err) if (err)
goto free_kset; goto free_kset;
...@@ -1706,6 +1903,10 @@ static int __init init_f2fs_fs(void) ...@@ -1706,6 +1903,10 @@ static int __init init_f2fs_fs(void)
free_shrinker: free_shrinker:
unregister_shrinker(&f2fs_shrinker_info); unregister_shrinker(&f2fs_shrinker_info);
free_kset: free_kset:
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (f2fs_fault_inject.kset)
kobject_put(&f2fs_fault_inject);
#endif
kset_unregister(f2fs_kset); kset_unregister(f2fs_kset);
free_extent_cache: free_extent_cache:
destroy_extent_cache(); destroy_extent_cache();
...@@ -1725,14 +1926,17 @@ static void __exit exit_f2fs_fs(void) ...@@ -1725,14 +1926,17 @@ static void __exit exit_f2fs_fs(void)
{ {
remove_proc_entry("fs/f2fs", NULL); remove_proc_entry("fs/f2fs", NULL);
f2fs_destroy_root_stats(); f2fs_destroy_root_stats();
unregister_shrinker(&f2fs_shrinker_info);
unregister_filesystem(&f2fs_fs_type); unregister_filesystem(&f2fs_fs_type);
unregister_shrinker(&f2fs_shrinker_info);
#ifdef CONFIG_F2FS_FAULT_INJECTION
kobject_put(&f2fs_fault_inject);
#endif
kset_unregister(f2fs_kset);
destroy_extent_cache(); destroy_extent_cache();
destroy_checkpoint_caches(); destroy_checkpoint_caches();
destroy_segment_manager_caches(); destroy_segment_manager_caches();
destroy_node_manager_caches(); destroy_node_manager_caches();
destroy_inodecache(); destroy_inodecache();
kset_unregister(f2fs_kset);
f2fs_destroy_trace_ios(); f2fs_destroy_trace_ios();
} }
......
...@@ -498,7 +498,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, ...@@ -498,7 +498,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
free = free + ENTRY_SIZE(here); free = free + ENTRY_SIZE(here);
if (unlikely(free < newsize)) { if (unlikely(free < newsize)) {
error = -ENOSPC; error = -E2BIG;
goto exit; goto exit;
} }
} }
...@@ -526,7 +526,6 @@ static int __f2fs_setxattr(struct inode *inode, int index, ...@@ -526,7 +526,6 @@ static int __f2fs_setxattr(struct inode *inode, int index,
* Before we come here, old entry is removed. * Before we come here, old entry is removed.
* We just write new entry. * We just write new entry.
*/ */
memset(last, 0, newsize);
last->e_name_index = index; last->e_name_index = index;
last->e_name_len = len; last->e_name_len = len;
memcpy(last->e_name, name, len); memcpy(last->e_name, name, len);
......
...@@ -508,4 +508,6 @@ enum { ...@@ -508,4 +508,6 @@ enum {
F2FS_FT_MAX F2FS_FT_MAX
}; };
#define S_SHIFT 12
#endif /* _LINUX_F2FS_FS_H */ #endif /* _LINUX_F2FS_FS_H */
...@@ -175,6 +175,7 @@ struct fscrypt_name { ...@@ -175,6 +175,7 @@ struct fscrypt_name {
*/ */
struct fscrypt_operations { struct fscrypt_operations {
int (*get_context)(struct inode *, void *, size_t); int (*get_context)(struct inode *, void *, size_t);
int (*key_prefix)(struct inode *, u8 **);
int (*prepare_context)(struct inode *); int (*prepare_context)(struct inode *);
int (*set_context)(struct inode *, const void *, size_t, void *); int (*set_context)(struct inode *, const void *, size_t, void *);
int (*dummy_context)(struct inode *); int (*dummy_context)(struct inode *);
......
...@@ -694,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit, ...@@ -694,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit,
__entry->ret) __entry->ret)
); );
TRACE_EVENT(f2fs_reserve_new_block, TRACE_EVENT(f2fs_reserve_new_blocks,
TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node), TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
blkcnt_t count),
TP_ARGS(inode, nid, ofs_in_node), TP_ARGS(inode, nid, ofs_in_node, count),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(nid_t, nid) __field(nid_t, nid)
__field(unsigned int, ofs_in_node) __field(unsigned int, ofs_in_node)
__field(blkcnt_t, count)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->nid = nid; __entry->nid = nid;
__entry->ofs_in_node = ofs_in_node; __entry->ofs_in_node = ofs_in_node;
__entry->count = count;
), ),
TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u", TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
show_dev(__entry), show_dev(__entry),
(unsigned int)__entry->nid, (unsigned int)__entry->nid,
__entry->ofs_in_node) __entry->ofs_in_node,
(unsigned long long)__entry->count)
); );
DECLARE_EVENT_CLASS(f2fs__submit_page_bio, DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
...@@ -1271,14 +1275,14 @@ TRACE_EVENT(f2fs_destroy_extent_tree, ...@@ -1271,14 +1275,14 @@ TRACE_EVENT(f2fs_destroy_extent_tree,
DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
TP_PROTO(struct super_block *sb, int type, int count), TP_PROTO(struct super_block *sb, int type, s64 count),
TP_ARGS(sb, type, count), TP_ARGS(sb, type, count),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(int, type) __field(int, type)
__field(int, count) __field(s64, count)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -1287,7 +1291,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, ...@@ -1287,7 +1291,7 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
__entry->count = count; __entry->count = count;
), ),
TP_printk("dev = (%d,%d), %s, dirty count = %d", TP_printk("dev = (%d,%d), %s, dirty count = %lld",
show_dev(__entry), show_dev(__entry),
show_file_type(__entry->type), show_file_type(__entry->type),
__entry->count) __entry->count)
...@@ -1295,14 +1299,14 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, ...@@ -1295,14 +1299,14 @@ DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter, DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter,
TP_PROTO(struct super_block *sb, int type, int count), TP_PROTO(struct super_block *sb, int type, s64 count),
TP_ARGS(sb, type, count) TP_ARGS(sb, type, count)
); );
DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit, DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit,
TP_PROTO(struct super_block *sb, int type, int count), TP_PROTO(struct super_block *sb, int type, s64 count),
TP_ARGS(sb, type, count) TP_ARGS(sb, type, count)
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment