Commit 72ece201 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'f2fs-for-6.10.rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this round, we've tried to address some performance issues on zoned
  storage such as direct IO and write_hints. In addition, we've migrated
  some IO paths using folio. Meanwhile, there are multiple bug fixes in
  the compression paths, sanity check conditions, and error handlers.

  Enhancements:
   - allow direct io of pinned files for zoned storage
   - assign the write hint per stream by default
   - convert read paths and test_writeback to folio
   - avoid allocating WARM_DATA segment for direct IO

  Bug fixes:
   - fix false alarm on invalid block address
   - fix to add missing iput() in gc_data_segment()
   - fix to release node block count in error path of
     f2fs_new_node_page()
   - compress:
       - don't allow unaligned truncation on released compress inode
       - cover {reserve,release}_compress_blocks() w/ cp_rwsem lock
       - fix error path of inc_valid_block_count()
       - fix to update i_compr_blocks correctly
   - fix block migration when section is not aligned to pow2
   - don't trigger OPU on pinfile for direct IO
   - fix to do sanity check on i_xattr_nid in sanity_check_inode()
   - write missing last sum blk of file pinning section
   - clear writeback when compression failed
   - fix to adjust appropirate defragment pg_end

  As usual, there are several minor code clean-ups, and fixes to manage
  missing corner cases in the error paths"

* tag 'f2fs-for-6.10.rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (50 commits)
  f2fs: initialize last_block_in_bio variable
  f2fs: Add inline to f2fs_build_fault_attr() stub
  f2fs: fix some ambiguous comments
  f2fs: fix to add missing iput() in gc_data_segment()
  f2fs: allow dirty sections with zero valid block for checkpoint disabled
  f2fs: compress: don't allow unaligned truncation on released compress inode
  f2fs: fix to release node block count in error path of f2fs_new_node_page()
  f2fs: compress: fix to cover {reserve,release}_compress_blocks() w/ cp_rwsem lock
  f2fs: compress: fix error path of inc_valid_block_count()
  f2fs: compress: fix typo in f2fs_reserve_compress_blocks()
  f2fs: compress: fix to update i_compr_blocks correctly
  f2fs: check validation of fault attrs in f2fs_build_fault_attr()
  f2fs: fix to limit gc_pin_file_threshold
  f2fs: remove unused GC_FAILURE_PIN
  f2fs: use f2fs_{err,info}_ratelimited() for cleanup
  f2fs: fix block migration when section is not aligned to pow2
  f2fs: zone: fix to don't trigger OPU on pinfile for direct IO
  f2fs: fix to do sanity check on i_xattr_nid in sanity_check_inode()
  f2fs: fix to avoid allocating WARM_DATA segment for direct IO
  f2fs: remove redundant parameter in is_next_segment_free()
  ...
parents 119d1b8a 16409fdb
...@@ -331,7 +331,7 @@ Date: January 2018 ...@@ -331,7 +331,7 @@ Date: January 2018
Contact: Jaegeuk Kim <jaegeuk@kernel.org> Contact: Jaegeuk Kim <jaegeuk@kernel.org>
Description: This indicates how many GC can be failed for the pinned Description: This indicates how many GC can be failed for the pinned
file. If it exceeds this, F2FS doesn't guarantee its pinning file. If it exceeds this, F2FS doesn't guarantee its pinning
state. 2048 trials is set by default. state. 2048 trials is set by default, and 65535 as maximum.
What: /sys/fs/f2fs/<disk>/extension_list What: /sys/fs/f2fs/<disk>/extension_list
Date: February 2018 Date: February 2018
......
...@@ -774,6 +774,35 @@ In order to identify whether the data in the victim segment are valid or not, ...@@ -774,6 +774,35 @@ In order to identify whether the data in the victim segment are valid or not,
F2FS manages a bitmap. Each bit represents the validity of a block, and the F2FS manages a bitmap. Each bit represents the validity of a block, and the
bitmap is composed of a bit stream covering whole blocks in main area. bitmap is composed of a bit stream covering whole blocks in main area.
Write-hint Policy
-----------------
F2FS sets the whint all the time with the below policy.
===================== ======================== ===================
User F2FS Block
===================== ======================== ===================
N/A META WRITE_LIFE_NONE|REQ_META
N/A HOT_NODE WRITE_LIFE_NONE
N/A WARM_NODE WRITE_LIFE_MEDIUM
N/A COLD_NODE WRITE_LIFE_LONG
ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
extension list " "
-- buffered io
N/A COLD_DATA WRITE_LIFE_EXTREME
N/A HOT_DATA WRITE_LIFE_SHORT
N/A WARM_DATA WRITE_LIFE_NOT_SET
-- direct io
WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
WRITE_LIFE_NONE " WRITE_LIFE_NONE
WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
WRITE_LIFE_LONG " WRITE_LIFE_LONG
===================== ======================== ===================
Fallocate(2) Policy Fallocate(2) Policy
------------------- -------------------
......
...@@ -179,22 +179,22 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, ...@@ -179,22 +179,22 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
break; break;
case META_SIT: case META_SIT:
if (unlikely(blkaddr >= SIT_BLK_CNT(sbi))) if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
goto err; goto check_only;
break; break;
case META_SSA: case META_SSA:
if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) || if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
blkaddr < SM_I(sbi)->ssa_blkaddr)) blkaddr < SM_I(sbi)->ssa_blkaddr))
goto err; goto check_only;
break; break;
case META_CP: case META_CP:
if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
blkaddr < __start_cp_addr(sbi))) blkaddr < __start_cp_addr(sbi)))
goto err; goto check_only;
break; break;
case META_POR: case META_POR:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
blkaddr < MAIN_BLKADDR(sbi))) blkaddr < MAIN_BLKADDR(sbi)))
goto err; goto check_only;
break; break;
case DATA_GENERIC: case DATA_GENERIC:
case DATA_GENERIC_ENHANCE: case DATA_GENERIC_ENHANCE:
...@@ -228,6 +228,7 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, ...@@ -228,6 +228,7 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
return true; return true;
err: err:
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
check_only:
return false; return false;
} }
...@@ -345,7 +346,7 @@ static int __f2fs_write_meta_page(struct page *page, ...@@ -345,7 +346,7 @@ static int __f2fs_write_meta_page(struct page *page,
{ {
struct f2fs_sb_info *sbi = F2FS_P_SB(page); struct f2fs_sb_info *sbi = F2FS_P_SB(page);
trace_f2fs_writepage(page, META); trace_f2fs_writepage(page_folio(page), META);
if (unlikely(f2fs_cp_error(sbi))) { if (unlikely(f2fs_cp_error(sbi))) {
if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
...@@ -492,7 +493,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, ...@@ -492,7 +493,7 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
static bool f2fs_dirty_meta_folio(struct address_space *mapping, static bool f2fs_dirty_meta_folio(struct address_space *mapping,
struct folio *folio) struct folio *folio)
{ {
trace_f2fs_set_page_dirty(&folio->page, META); trace_f2fs_set_page_dirty(folio, META);
if (!folio_test_uptodate(folio)) if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
......
...@@ -198,8 +198,8 @@ static int lzo_compress_pages(struct compress_ctx *cc) ...@@ -198,8 +198,8 @@ static int lzo_compress_pages(struct compress_ctx *cc)
ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
&cc->clen, cc->private); &cc->clen, cc->private);
if (ret != LZO_E_OK) { if (ret != LZO_E_OK) {
printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n", f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret); "lzo compress failed, ret:%d", ret);
return -EIO; return -EIO;
} }
return 0; return 0;
...@@ -212,17 +212,15 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic) ...@@ -212,17 +212,15 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic)
ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen, ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
dic->rbuf, &dic->rlen); dic->rbuf, &dic->rlen);
if (ret != LZO_E_OK) { if (ret != LZO_E_OK) {
printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n", f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret); "lzo decompress failed, ret:%d", ret);
return -EIO; return -EIO;
} }
if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) { if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, " f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
"expected:%lu\n", KERN_ERR, "lzo invalid rlen:%zu, expected:%lu",
F2FS_I_SB(dic->inode)->sb->s_id, dic->rlen, PAGE_SIZE << dic->log_cluster_size);
dic->rlen,
PAGE_SIZE << dic->log_cluster_size);
return -EIO; return -EIO;
} }
return 0; return 0;
...@@ -294,16 +292,15 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic) ...@@ -294,16 +292,15 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf, ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
dic->clen, dic->rlen); dic->clen, dic->rlen);
if (ret < 0) { if (ret < 0) {
printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n", f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret); "lz4 decompress failed, ret:%d", ret);
return -EIO; return -EIO;
} }
if (ret != PAGE_SIZE << dic->log_cluster_size) { if (ret != PAGE_SIZE << dic->log_cluster_size) {
printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, " f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
"expected:%lu\n", KERN_ERR, "lz4 invalid ret:%d, expected:%lu",
F2FS_I_SB(dic->inode)->sb->s_id, ret, ret, PAGE_SIZE << dic->log_cluster_size);
PAGE_SIZE << dic->log_cluster_size);
return -EIO; return -EIO;
} }
return 0; return 0;
...@@ -350,9 +347,8 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc) ...@@ -350,9 +347,8 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
stream = zstd_init_cstream(&params, 0, workspace, workspace_size); stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
if (!stream) { if (!stream) {
printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n", f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, "%s zstd_init_cstream failed", __func__);
__func__);
kvfree(workspace); kvfree(workspace);
return -EIO; return -EIO;
} }
...@@ -390,16 +386,16 @@ static int zstd_compress_pages(struct compress_ctx *cc) ...@@ -390,16 +386,16 @@ static int zstd_compress_pages(struct compress_ctx *cc)
ret = zstd_compress_stream(stream, &outbuf, &inbuf); ret = zstd_compress_stream(stream, &outbuf, &inbuf);
if (zstd_is_error(ret)) { if (zstd_is_error(ret)) {
printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n", f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, "%s zstd_compress_stream failed, ret: %d",
__func__, zstd_get_error_code(ret)); __func__, zstd_get_error_code(ret));
return -EIO; return -EIO;
} }
ret = zstd_end_stream(stream, &outbuf); ret = zstd_end_stream(stream, &outbuf);
if (zstd_is_error(ret)) { if (zstd_is_error(ret)) {
printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n", f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, "%s zstd_end_stream returned %d",
__func__, zstd_get_error_code(ret)); __func__, zstd_get_error_code(ret));
return -EIO; return -EIO;
} }
...@@ -432,9 +428,8 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) ...@@ -432,9 +428,8 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
stream = zstd_init_dstream(max_window_size, workspace, workspace_size); stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
if (!stream) { if (!stream) {
printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n", f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, "%s zstd_init_dstream failed", __func__);
__func__);
kvfree(workspace); kvfree(workspace);
return -EIO; return -EIO;
} }
...@@ -469,16 +464,15 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic) ...@@ -469,16 +464,15 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
ret = zstd_decompress_stream(stream, &outbuf, &inbuf); ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
if (zstd_is_error(ret)) { if (zstd_is_error(ret)) {
printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n", f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, "%s zstd_decompress_stream failed, ret: %d",
__func__, zstd_get_error_code(ret)); __func__, zstd_get_error_code(ret));
return -EIO; return -EIO;
} }
if (dic->rlen != outbuf.pos) { if (dic->rlen != outbuf.pos) {
printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, " f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
"expected:%lu\n", KERN_ERR, "%s ZSTD invalid rlen:%zu, expected:%lu",
F2FS_I_SB(dic->inode)->sb->s_id,
__func__, dic->rlen, __func__, dic->rlen,
PAGE_SIZE << dic->log_cluster_size); PAGE_SIZE << dic->log_cluster_size);
return -EIO; return -EIO;
...@@ -1031,6 +1025,31 @@ static void set_cluster_writeback(struct compress_ctx *cc) ...@@ -1031,6 +1025,31 @@ static void set_cluster_writeback(struct compress_ctx *cc)
} }
} }
static void cancel_cluster_writeback(struct compress_ctx *cc,
struct compress_io_ctx *cic, int submitted)
{
int i;
/* Wait for submitted IOs. */
if (submitted > 1) {
f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA);
while (atomic_read(&cic->pending_pages) !=
(cc->valid_nr_cpages - submitted + 1))
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
}
/* Cancel writeback and stay locked. */
for (i = 0; i < cc->cluster_size; i++) {
if (i < submitted) {
inode_inc_dirty_pages(cc->inode);
lock_page(cc->rpages[i]);
}
clear_page_private_gcing(cc->rpages[i]);
if (folio_test_writeback(page_folio(cc->rpages[i])))
end_page_writeback(cc->rpages[i]);
}
}
static void set_cluster_dirty(struct compress_ctx *cc) static void set_cluster_dirty(struct compress_ctx *cc)
{ {
int i; int i;
...@@ -1232,7 +1251,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, ...@@ -1232,7 +1251,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.page = NULL, .page = NULL,
.encrypted_page = NULL, .encrypted_page = NULL,
.compressed_page = NULL, .compressed_page = NULL,
.submitted = 0,
.io_type = io_type, .io_type = io_type,
.io_wbc = wbc, .io_wbc = wbc,
.encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ? .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
...@@ -1358,7 +1376,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, ...@@ -1358,7 +1376,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
fio.compressed_page = cc->cpages[i - 1]; fio.compressed_page = cc->cpages[i - 1];
cc->cpages[i - 1] = NULL; cc->cpages[i - 1] = NULL;
fio.submitted = 0;
f2fs_outplace_write_data(&dn, &fio); f2fs_outplace_write_data(&dn, &fio);
if (unlikely(!fio.submitted)) {
cancel_cluster_writeback(cc, cic, i);
/* To call fscrypt_finalize_bounce_page */
i = cc->valid_nr_cpages;
*submitted = 0;
goto out_destroy_crypt;
}
(*submitted)++; (*submitted)++;
unlock_continue: unlock_continue:
inode_dec_dirty_pages(cc->inode); inode_dec_dirty_pages(cc->inode);
...@@ -1392,8 +1419,11 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, ...@@ -1392,8 +1419,11 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
out_destroy_crypt: out_destroy_crypt:
page_array_free(cc->inode, cic->rpages, cc->cluster_size); page_array_free(cc->inode, cic->rpages, cc->cluster_size);
for (--i; i >= 0; i--) for (--i; i >= 0; i--) {
if (!cc->cpages[i])
continue;
fscrypt_finalize_bounce_page(&cc->cpages[i]); fscrypt_finalize_bounce_page(&cc->cpages[i]);
}
out_put_cic: out_put_cic:
kmem_cache_free(cic_entry_slab, cic); kmem_cache_free(cic_entry_slab, cic);
out_put_dnode: out_put_dnode:
...@@ -1484,7 +1514,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc, ...@@ -1484,7 +1514,7 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
if (!PageDirty(cc->rpages[i])) if (!PageDirty(cc->rpages[i]))
goto continue_unlock; goto continue_unlock;
if (PageWriteback(cc->rpages[i])) { if (folio_test_writeback(page_folio(cc->rpages[i]))) {
if (wbc->sync_mode == WB_SYNC_NONE) if (wbc->sync_mode == WB_SYNC_NONE)
goto continue_unlock; goto continue_unlock;
f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true); f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
......
This diff is collapsed.
...@@ -72,7 +72,7 @@ enum { ...@@ -72,7 +72,7 @@ enum {
struct f2fs_fault_info { struct f2fs_fault_info {
atomic_t inject_ops; atomic_t inject_ops;
unsigned int inject_rate; int inject_rate;
unsigned int inject_type; unsigned int inject_type;
}; };
...@@ -765,11 +765,6 @@ enum { ...@@ -765,11 +765,6 @@ enum {
#define DEF_DIR_LEVEL 0 #define DEF_DIR_LEVEL 0
enum {
GC_FAILURE_PIN,
MAX_GC_FAILURE
};
/* used for f2fs_inode_info->flags */ /* used for f2fs_inode_info->flags */
enum { enum {
FI_NEW_INODE, /* indicate newly allocated inode */ FI_NEW_INODE, /* indicate newly allocated inode */
...@@ -816,9 +811,10 @@ struct f2fs_inode_info { ...@@ -816,9 +811,10 @@ struct f2fs_inode_info {
unsigned long i_flags; /* keep an inode flags for ioctl */ unsigned long i_flags; /* keep an inode flags for ioctl */
unsigned char i_advise; /* use to give file attribute hints */ unsigned char i_advise; /* use to give file attribute hints */
unsigned char i_dir_level; /* use for dentry level for large dir */ unsigned char i_dir_level; /* use for dentry level for large dir */
unsigned int i_current_depth; /* only for directory depth */ union {
/* for gc failure statistic */ unsigned int i_current_depth; /* only for directory depth */
unsigned int i_gc_failures[MAX_GC_FAILURE]; unsigned short i_gc_failures; /* for gc failure statistic */
};
unsigned int i_pino; /* parent inode number */ unsigned int i_pino; /* parent inode number */
umode_t i_acl_mode; /* keep file acl mode temporarily */ umode_t i_acl_mode; /* keep file acl mode temporarily */
...@@ -1557,6 +1553,7 @@ struct f2fs_sb_info { ...@@ -1557,6 +1553,7 @@ struct f2fs_sb_info {
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */ unsigned int blocks_per_blkz; /* F2FS blocks per zone */
unsigned int max_open_zones; /* max open zone resources of the zoned device */
#endif #endif
/* for node-related operations */ /* for node-related operations */
...@@ -1676,7 +1673,7 @@ struct f2fs_sb_info { ...@@ -1676,7 +1673,7 @@ struct f2fs_sb_info {
unsigned long long skipped_gc_rwsem; /* FG_GC only */ unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */ /* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold; unsigned short gc_pin_file_threshold;
struct f2fs_rwsem pin_sem; struct f2fs_rwsem pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */ /* maximum # of trials to find a victim segment for SSR and GC */
...@@ -2309,7 +2306,7 @@ static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); ...@@ -2309,7 +2306,7 @@ static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t *count, bool partial) struct inode *inode, blkcnt_t *count, bool partial)
{ {
blkcnt_t diff = 0, release = 0; long long diff = 0, release = 0;
block_t avail_user_block_count; block_t avail_user_block_count;
int ret; int ret;
...@@ -2329,26 +2326,27 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, ...@@ -2329,26 +2326,27 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
spin_lock(&sbi->stat_lock); spin_lock(&sbi->stat_lock);
sbi->total_valid_block_count += (block_t)(*count);
avail_user_block_count = get_available_block_count(sbi, inode, true);
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { avail_user_block_count = get_available_block_count(sbi, inode, true);
diff = (long long)sbi->total_valid_block_count + *count -
avail_user_block_count;
if (unlikely(diff > 0)) {
if (!partial) { if (!partial) {
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
release = *count;
goto enospc; goto enospc;
} }
diff = sbi->total_valid_block_count - avail_user_block_count;
if (diff > *count) if (diff > *count)
diff = *count; diff = *count;
*count -= diff; *count -= diff;
release = diff; release = diff;
sbi->total_valid_block_count -= diff;
if (!*count) { if (!*count) {
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
goto enospc; goto enospc;
} }
} }
sbi->total_valid_block_count += (block_t)(*count);
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
if (unlikely(release)) { if (unlikely(release)) {
...@@ -3132,7 +3130,7 @@ static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) ...@@ -3132,7 +3130,7 @@ static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
static inline void f2fs_i_gc_failures_write(struct inode *inode, static inline void f2fs_i_gc_failures_write(struct inode *inode,
unsigned int count) unsigned int count)
{ {
F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; F2FS_I(inode)->i_gc_failures = count;
f2fs_mark_inode_dirty_sync(inode, true); f2fs_mark_inode_dirty_sync(inode, true);
} }
...@@ -3497,6 +3495,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, ...@@ -3497,6 +3495,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr); struct iattr *attr);
int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
bool readonly);
int f2fs_precache_extents(struct inode *inode); int f2fs_precache_extents(struct inode *inode);
int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
int f2fs_fileattr_set(struct mnt_idmap *idmap, int f2fs_fileattr_set(struct mnt_idmap *idmap,
...@@ -3719,6 +3719,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, ...@@ -3719,6 +3719,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
block_t old_addr, block_t new_addr, block_t old_addr, block_t new_addr,
unsigned char version, bool recover_curseg, unsigned char version, bool recover_curseg,
bool recover_newaddr); bool recover_newaddr);
int f2fs_get_segment_temp(int seg_type);
int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
block_t old_blkaddr, block_t *new_blkaddr, block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type, struct f2fs_summary *sum, int type,
...@@ -3741,7 +3742,9 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); ...@@ -3741,7 +3742,9 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init f2fs_create_segment_manager_caches(void); int __init f2fs_create_segment_manager_caches(void);
void f2fs_destroy_segment_manager_caches(void); void f2fs_destroy_segment_manager_caches(void);
int f2fs_rw_hint_to_seg_type(enum rw_hint hint); int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint);
enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp);
unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int segno); unsigned int segno);
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
...@@ -4148,10 +4151,10 @@ extern struct kmem_cache *f2fs_inode_entry_slab; ...@@ -4148,10 +4151,10 @@ extern struct kmem_cache *f2fs_inode_entry_slab;
bool f2fs_may_inline_data(struct inode *inode); bool f2fs_may_inline_data(struct inode *inode);
bool f2fs_sanity_check_inline_data(struct inode *inode); bool f2fs_sanity_check_inline_data(struct inode *inode);
bool f2fs_may_inline_dentry(struct inode *inode); bool f2fs_may_inline_dentry(struct inode *inode);
void f2fs_do_read_inline_data(struct page *page, struct page *ipage); void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage);
void f2fs_truncate_inline_inode(struct inode *inode, void f2fs_truncate_inline_inode(struct inode *inode,
struct page *ipage, u64 from); struct page *ipage, u64 from);
int f2fs_read_inline_data(struct inode *inode, struct page *page); int f2fs_read_inline_data(struct inode *inode, struct folio *folio);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode); int f2fs_convert_inline_inode(struct inode *inode);
int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
...@@ -4596,10 +4599,14 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) ...@@ -4596,10 +4599,14 @@ static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
} }
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, extern int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
unsigned int type); unsigned long type);
#else #else
#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) static inline int f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
unsigned long rate, unsigned long type)
{
return 0;
}
#endif #endif
static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
...@@ -4657,7 +4664,7 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi, ...@@ -4657,7 +4664,7 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
page = find_get_page(META_MAPPING(sbi), blkaddr + i); page = find_get_page(META_MAPPING(sbi), blkaddr + i);
if (page) { if (page) {
if (PageWriteback(page)) if (folio_test_writeback(page_folio(page)))
need_submit = true; need_submit = true;
f2fs_put_page(page, 0); f2fs_put_page(page, 0);
} }
......
This diff is collapsed.
...@@ -1434,7 +1434,7 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type, ...@@ -1434,7 +1434,7 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
goto out; goto out;
if (gc_type == BG_GC) { if (gc_type == BG_GC) {
if (PageWriteback(page)) { if (folio_test_writeback(page_folio(page))) {
err = -EAGAIN; err = -EAGAIN;
goto out; goto out;
} }
...@@ -1554,10 +1554,15 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, ...@@ -1554,10 +1554,15 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
int err; int err;
inode = f2fs_iget(sb, dni.ino); inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode) || if (IS_ERR(inode))
special_file(inode->i_mode))
continue; continue;
if (is_bad_inode(inode) ||
special_file(inode->i_mode)) {
iput(inode);
continue;
}
err = f2fs_gc_pinned_control(inode, gc_type, segno); err = f2fs_gc_pinned_control(inode, gc_type, segno);
if (err == -EAGAIN) { if (err == -EAGAIN) {
iput(inode); iput(inode);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */ #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
#define DEF_GC_FAILED_PINNED_FILES 2048 #define DEF_GC_FAILED_PINNED_FILES 2048
#define MAX_GC_FAILED_PINNED_FILES USHRT_MAX
/* Search max. number of dirty segments to select a victim segment */ /* Search max. number of dirty segments to select a victim segment */
#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */ #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
......
...@@ -61,22 +61,22 @@ bool f2fs_may_inline_dentry(struct inode *inode) ...@@ -61,22 +61,22 @@ bool f2fs_may_inline_dentry(struct inode *inode)
return true; return true;
} }
void f2fs_do_read_inline_data(struct page *page, struct page *ipage) void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio_file_mapping(folio)->host;
if (PageUptodate(page)) if (folio_test_uptodate(folio))
return; return;
f2fs_bug_on(F2FS_P_SB(page), page->index); f2fs_bug_on(F2FS_I_SB(inode), folio_index(folio));
zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE); folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio));
/* Copy the whole inline data block */ /* Copy the whole inline data block */
memcpy_to_page(page, 0, inline_data_addr(inode, ipage), memcpy_to_folio(folio, 0, inline_data_addr(inode, ipage),
MAX_INLINE_DATA(inode)); MAX_INLINE_DATA(inode));
if (!PageUptodate(page)) if (!folio_test_uptodate(folio))
SetPageUptodate(page); folio_mark_uptodate(folio);
} }
void f2fs_truncate_inline_inode(struct inode *inode, void f2fs_truncate_inline_inode(struct inode *inode,
...@@ -97,13 +97,13 @@ void f2fs_truncate_inline_inode(struct inode *inode, ...@@ -97,13 +97,13 @@ void f2fs_truncate_inline_inode(struct inode *inode,
clear_inode_flag(inode, FI_DATA_EXIST); clear_inode_flag(inode, FI_DATA_EXIST);
} }
int f2fs_read_inline_data(struct inode *inode, struct page *page) int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
{ {
struct page *ipage; struct page *ipage;
ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
if (IS_ERR(ipage)) { if (IS_ERR(ipage)) {
unlock_page(page); folio_unlock(folio);
return PTR_ERR(ipage); return PTR_ERR(ipage);
} }
...@@ -112,15 +112,15 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page) ...@@ -112,15 +112,15 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
return -EAGAIN; return -EAGAIN;
} }
if (page->index) if (folio_index(folio))
zero_user_segment(page, 0, PAGE_SIZE); folio_zero_segment(folio, 0, folio_size(folio));
else else
f2fs_do_read_inline_data(page, ipage); f2fs_do_read_inline_data(folio, ipage);
if (!PageUptodate(page)) if (!folio_test_uptodate(folio))
SetPageUptodate(page); folio_mark_uptodate(folio);
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
unlock_page(page); folio_unlock(folio);
return 0; return 0;
} }
...@@ -164,9 +164,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) ...@@ -164,9 +164,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page)); f2fs_bug_on(F2FS_P_SB(page), folio_test_writeback(page_folio(page)));
f2fs_do_read_inline_data(page, dn->inode_page); f2fs_do_read_inline_data(page_folio(page), dn->inode_page);
set_page_dirty(page); set_page_dirty(page);
/* clear dirty state */ /* clear dirty state */
......
...@@ -161,7 +161,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) ...@@ -161,7 +161,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
if (!f2fs_enable_inode_chksum(sbi, page)) if (!f2fs_enable_inode_chksum(sbi, page))
#else #else
if (!f2fs_enable_inode_chksum(sbi, page) || if (!f2fs_enable_inode_chksum(sbi, page) ||
PageDirty(page) || PageWriteback(page)) PageDirty(page) ||
folio_test_writeback(page_folio(page)))
#endif #endif
return true; return true;
...@@ -361,6 +362,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) ...@@ -361,6 +362,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false; return false;
} }
if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
__func__, inode->i_ino, fi->i_xattr_nid);
return false;
}
return true; return true;
} }
...@@ -408,8 +415,7 @@ static int do_read_inode(struct inode *inode) ...@@ -408,8 +415,7 @@ static int do_read_inode(struct inode *inode)
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
fi->i_current_depth = le32_to_cpu(ri->i_current_depth); fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
else if (S_ISREG(inode->i_mode)) else if (S_ISREG(inode->i_mode))
fi->i_gc_failures[GC_FAILURE_PIN] = fi->i_gc_failures = le16_to_cpu(ri->i_gc_failures);
le16_to_cpu(ri->i_gc_failures);
fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
fi->i_flags = le32_to_cpu(ri->i_flags); fi->i_flags = le32_to_cpu(ri->i_flags);
if (S_ISREG(inode->i_mode)) if (S_ISREG(inode->i_mode))
...@@ -679,8 +685,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page) ...@@ -679,8 +685,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
ri->i_current_depth = ri->i_current_depth =
cpu_to_le32(F2FS_I(inode)->i_current_depth); cpu_to_le32(F2FS_I(inode)->i_current_depth);
else if (S_ISREG(inode->i_mode)) else if (S_ISREG(inode->i_mode))
ri->i_gc_failures = ri->i_gc_failures = cpu_to_le16(F2FS_I(inode)->i_gc_failures);
cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
...@@ -804,6 +809,7 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -804,6 +809,7 @@ void f2fs_evict_inode(struct inode *inode)
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
nid_t xnid = fi->i_xattr_nid; nid_t xnid = fi->i_xattr_nid;
int err = 0; int err = 0;
bool freeze_protected = false;
f2fs_abort_atomic_write(inode, true); f2fs_abort_atomic_write(inode, true);
...@@ -843,8 +849,10 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -843,8 +849,10 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) {
sb_start_intwrite(inode->i_sb); sb_start_intwrite(inode->i_sb);
freeze_protected = true;
}
set_inode_flag(inode, FI_NO_ALLOC); set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0); i_size_write(inode, 0);
retry: retry:
...@@ -887,7 +895,7 @@ void f2fs_evict_inode(struct inode *inode) ...@@ -887,7 +895,7 @@ void f2fs_evict_inode(struct inode *inode)
if (dquot_initialize_needed(inode)) if (dquot_initialize_needed(inode))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
} }
if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING)) if (freeze_protected)
sb_end_intwrite(inode->i_sb); sb_end_intwrite(inode->i_sb);
no_delete: no_delete:
dquot_drop(inode); dquot_drop(inode);
......
...@@ -1187,7 +1187,17 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) ...@@ -1187,7 +1187,17 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
default: default:
BUG(); BUG();
} }
if (err < 0 && err != -ENOENT) if (err == -ENOENT) {
set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
f2fs_err_ratelimited(sbi,
"truncate node fail, ino:%lu, nid:%u, "
"offset[0]:%d, offset[1]:%d, nofs:%d",
inode->i_ino, dn.nid, offset[0],
offset[1], nofs);
err = 0;
}
if (err < 0)
goto fail; goto fail;
if (offset[1] == 0 && if (offset[1] == 0 &&
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
...@@ -1319,6 +1329,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) ...@@ -1319,6 +1329,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
} }
if (unlikely(new_ni.blk_addr != NULL_ADDR)) { if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
dec_valid_node_count(sbi, dn->inode, !ofs);
set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
goto fail; goto fail;
...@@ -1345,7 +1356,6 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) ...@@ -1345,7 +1356,6 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
if (ofs == 0) if (ofs == 0)
inc_valid_inode_count(sbi); inc_valid_inode_count(sbi);
return page; return page;
fail: fail:
clear_node_page_dirty(page); clear_node_page_dirty(page);
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
...@@ -1614,7 +1624,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, ...@@ -1614,7 +1624,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
}; };
unsigned int seq; unsigned int seq;
trace_f2fs_writepage(page, NODE); trace_f2fs_writepage(page_folio(page), NODE);
if (unlikely(f2fs_cp_error(sbi))) { if (unlikely(f2fs_cp_error(sbi))) {
/* keep node pages in remount-ro mode */ /* keep node pages in remount-ro mode */
...@@ -1733,7 +1743,7 @@ int f2fs_move_node_page(struct page *node_page, int gc_type) ...@@ -1733,7 +1743,7 @@ int f2fs_move_node_page(struct page *node_page, int gc_type)
goto release_page; goto release_page;
} else { } else {
/* set page dirty and write it */ /* set page dirty and write it */
if (!PageWriteback(node_page)) if (!folio_test_writeback(page_folio(node_page)))
set_page_dirty(node_page); set_page_dirty(node_page);
} }
out_page: out_page:
...@@ -2161,7 +2171,7 @@ static int f2fs_write_node_pages(struct address_space *mapping, ...@@ -2161,7 +2171,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
static bool f2fs_dirty_node_folio(struct address_space *mapping, static bool f2fs_dirty_node_folio(struct address_space *mapping,
struct folio *folio) struct folio *folio)
{ {
trace_f2fs_set_page_dirty(&folio->page, NODE); trace_f2fs_set_page_dirty(folio, NODE);
if (!folio_test_uptodate(folio)) if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
......
...@@ -330,8 +330,7 @@ static int recover_inode(struct inode *inode, struct page *page) ...@@ -330,8 +330,7 @@ static int recover_inode(struct inode *inode, struct page *page)
F2FS_I(inode)->i_advise = raw->i_advise; F2FS_I(inode)->i_advise = raw->i_advise;
F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags); F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
f2fs_set_inode_flags(inode); f2fs_set_inode_flags(inode);
F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = F2FS_I(inode)->i_gc_failures = le16_to_cpu(raw->i_gc_failures);
le16_to_cpu(raw->i_gc_failures);
recover_inline_flags(inode, raw); recover_inline_flags(inode, raw);
......
...@@ -771,8 +771,10 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, ...@@ -771,8 +771,10 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
block_t valid_blocks = block_t valid_blocks =
get_valid_blocks(sbi, segno, true); get_valid_blocks(sbi, segno, true);
f2fs_bug_on(sbi, unlikely(!valid_blocks || f2fs_bug_on(sbi,
valid_blocks == CAP_BLKS_PER_SEC(sbi))); (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
!valid_blocks) ||
valid_blocks == CAP_BLKS_PER_SEC(sbi));
if (!IS_CURSEC(sbi, secno)) if (!IS_CURSEC(sbi, secno))
set_bit(secno, dirty_i->dirty_secmap); set_bit(secno, dirty_i->dirty_secmap);
...@@ -1109,9 +1111,8 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi, ...@@ -1109,9 +1111,8 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
dc->error = 0; dc->error = 0;
if (dc->error) if (dc->error)
printk_ratelimited( f2fs_info_ratelimited(sbi,
"%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d", "Issue discard(%u, %u, %u) failed, ret: %d",
KERN_INFO, sbi->sb->s_id,
dc->di.lstart, dc->di.start, dc->di.len, dc->error); dc->di.lstart, dc->di.start, dc->di.len, dc->error);
__detach_discard_cmd(dcc, dc); __detach_discard_cmd(dcc, dc);
} }
...@@ -2645,7 +2646,7 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi, ...@@ -2645,7 +2646,7 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi,
} }
static int is_next_segment_free(struct f2fs_sb_info *sbi, static int is_next_segment_free(struct f2fs_sb_info *sbi,
struct curseg_info *curseg, int type) struct curseg_info *curseg)
{ {
unsigned int segno = curseg->segno + 1; unsigned int segno = curseg->segno + 1;
struct free_segmap_info *free_i = FREE_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi);
...@@ -3073,8 +3074,7 @@ static bool need_new_seg(struct f2fs_sb_info *sbi, int type) ...@@ -3073,8 +3074,7 @@ static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
curseg->seg_type == CURSEG_WARM_NODE) curseg->seg_type == CURSEG_WARM_NODE)
return true; return true;
if (curseg->alloc_type == LFS && if (curseg->alloc_type == LFS && is_next_segment_free(sbi, curseg) &&
is_next_segment_free(sbi, curseg, type) &&
likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return true; return true;
if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
...@@ -3352,8 +3352,14 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) ...@@ -3352,8 +3352,14 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
return err; return err;
} }
int f2fs_rw_hint_to_seg_type(enum rw_hint hint) int f2fs_rw_hint_to_seg_type(struct f2fs_sb_info *sbi, enum rw_hint hint)
{ {
if (F2FS_OPTION(sbi).active_logs == 2)
return CURSEG_HOT_DATA;
else if (F2FS_OPTION(sbi).active_logs == 4)
return CURSEG_COLD_DATA;
/* active_log == 6 */
switch (hint) { switch (hint) {
case WRITE_LIFE_SHORT: case WRITE_LIFE_SHORT:
return CURSEG_HOT_DATA; return CURSEG_HOT_DATA;
...@@ -3364,6 +3370,65 @@ int f2fs_rw_hint_to_seg_type(enum rw_hint hint) ...@@ -3364,6 +3370,65 @@ int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
} }
} }
/*
* This returns write hints for each segment type. This hints will be
* passed down to block layer as below by default.
*
* User F2FS Block
* ---- ---- -----
* META WRITE_LIFE_NONE|REQ_META
* HOT_NODE WRITE_LIFE_NONE
* WARM_NODE WRITE_LIFE_MEDIUM
* COLD_NODE WRITE_LIFE_LONG
* ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
* extension list " "
*
* -- buffered io
* COLD_DATA WRITE_LIFE_EXTREME
* HOT_DATA WRITE_LIFE_SHORT
* WARM_DATA WRITE_LIFE_NOT_SET
*
* -- direct io
* WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
* WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
* WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
* WRITE_LIFE_NONE " WRITE_LIFE_NONE
* WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
* WRITE_LIFE_LONG " WRITE_LIFE_LONG
*/
enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
enum page_type type, enum temp_type temp)
{
switch (type) {
case DATA:
switch (temp) {
case WARM:
return WRITE_LIFE_NOT_SET;
case HOT:
return WRITE_LIFE_SHORT;
case COLD:
return WRITE_LIFE_EXTREME;
default:
return WRITE_LIFE_NONE;
}
case NODE:
switch (temp) {
case WARM:
return WRITE_LIFE_MEDIUM;
case HOT:
return WRITE_LIFE_NONE;
case COLD:
return WRITE_LIFE_LONG;
default:
return WRITE_LIFE_NONE;
}
case META:
return WRITE_LIFE_NONE;
default:
return WRITE_LIFE_NONE;
}
}
static int __get_segment_type_2(struct f2fs_io_info *fio) static int __get_segment_type_2(struct f2fs_io_info *fio)
{ {
if (fio->type == DATA) if (fio->type == DATA)
...@@ -3434,7 +3499,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) ...@@ -3434,7 +3499,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
is_inode_flag_set(inode, FI_HOT_DATA) || is_inode_flag_set(inode, FI_HOT_DATA) ||
f2fs_is_cow_file(inode)) f2fs_is_cow_file(inode))
return CURSEG_HOT_DATA; return CURSEG_HOT_DATA;
return f2fs_rw_hint_to_seg_type(inode->i_write_hint); return f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
inode->i_write_hint);
} else { } else {
if (IS_DNODE(fio->page)) if (IS_DNODE(fio->page))
return is_cold_node(fio->page) ? CURSEG_WARM_NODE : return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
...@@ -3443,6 +3509,15 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) ...@@ -3443,6 +3509,15 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
} }
} }
int f2fs_get_segment_temp(int seg_type)
{
if (IS_HOT(seg_type))
return HOT;
else if (IS_WARM(seg_type))
return WARM;
return COLD;
}
static int __get_segment_type(struct f2fs_io_info *fio) static int __get_segment_type(struct f2fs_io_info *fio)
{ {
int type = 0; int type = 0;
...@@ -3461,12 +3536,8 @@ static int __get_segment_type(struct f2fs_io_info *fio) ...@@ -3461,12 +3536,8 @@ static int __get_segment_type(struct f2fs_io_info *fio)
f2fs_bug_on(fio->sbi, true); f2fs_bug_on(fio->sbi, true);
} }
if (IS_HOT(type)) fio->temp = f2fs_get_segment_temp(type);
fio->temp = HOT;
else if (IS_WARM(type))
fio->temp = WARM;
else
fio->temp = COLD;
return type; return type;
} }
...@@ -3559,6 +3630,8 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, ...@@ -3559,6 +3630,8 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
if (segment_full) { if (segment_full) {
if (type == CURSEG_COLD_DATA_PINNED && if (type == CURSEG_COLD_DATA_PINNED &&
!((curseg->segno + 1) % sbi->segs_per_sec)) { !((curseg->segno + 1) % sbi->segs_per_sec)) {
write_sum_page(sbi, curseg->sum_blk,
GET_SUM_BLOCK(sbi, curseg->segno));
reset_curseg_fields(curseg); reset_curseg_fields(curseg);
goto skip_new_segment; goto skip_new_segment;
} }
...@@ -3612,13 +3685,13 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, ...@@ -3612,13 +3685,13 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
f2fs_up_read(&SM_I(sbi)->curseg_lock); f2fs_up_read(&SM_I(sbi)->curseg_lock);
return 0; return 0;
out_err: out_err:
*new_blkaddr = NULL_ADDR; *new_blkaddr = NULL_ADDR;
up_write(&sit_i->sentry_lock); up_write(&sit_i->sentry_lock);
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
f2fs_up_read(&SM_I(sbi)->curseg_lock); f2fs_up_read(&SM_I(sbi)->curseg_lock);
return ret; return ret;
} }
void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
...@@ -3660,8 +3733,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) ...@@ -3660,8 +3733,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
&fio->new_blkaddr, sum, type, fio)) { &fio->new_blkaddr, sum, type, fio)) {
if (fscrypt_inode_uses_fs_layer_crypto(fio->page->mapping->host)) if (fscrypt_inode_uses_fs_layer_crypto(fio->page->mapping->host))
fscrypt_finalize_bounce_page(&fio->encrypted_page); fscrypt_finalize_bounce_page(&fio->encrypted_page);
if (PageWriteback(fio->page)) end_page_writeback(fio->page);
end_page_writeback(fio->page);
if (f2fs_in_warm_node_list(fio->sbi, fio->page)) if (f2fs_in_warm_node_list(fio->sbi, fio->page))
f2fs_del_fsync_node_entry(fio->sbi, fio->page); f2fs_del_fsync_node_entry(fio->sbi, fio->page);
goto out; goto out;
...@@ -3904,7 +3976,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, ...@@ -3904,7 +3976,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
void f2fs_wait_on_page_writeback(struct page *page, void f2fs_wait_on_page_writeback(struct page *page,
enum page_type type, bool ordered, bool locked) enum page_type type, bool ordered, bool locked)
{ {
if (PageWriteback(page)) { if (folio_test_writeback(page_folio(page))) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page); struct f2fs_sb_info *sbi = F2FS_P_SB(page);
/* submit cached LFS IO */ /* submit cached LFS IO */
...@@ -3913,7 +3985,8 @@ void f2fs_wait_on_page_writeback(struct page *page, ...@@ -3913,7 +3985,8 @@ void f2fs_wait_on_page_writeback(struct page *page,
f2fs_submit_merged_ipu_write(sbi, NULL, page); f2fs_submit_merged_ipu_write(sbi, NULL, page);
if (ordered) { if (ordered) {
wait_on_page_writeback(page); wait_on_page_writeback(page);
f2fs_bug_on(sbi, locked && PageWriteback(page)); f2fs_bug_on(sbi, locked &&
folio_test_writeback(page_folio(page)));
} else { } else {
wait_for_stable_page(page); wait_for_stable_page(page);
} }
...@@ -4959,17 +5032,6 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi) ...@@ -4959,17 +5032,6 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
} }
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
static const char *f2fs_zone_status[BLK_ZONE_COND_OFFLINE + 1] = {
[BLK_ZONE_COND_NOT_WP] = "NOT_WP",
[BLK_ZONE_COND_EMPTY] = "EMPTY",
[BLK_ZONE_COND_IMP_OPEN] = "IMPLICIT_OPEN",
[BLK_ZONE_COND_EXP_OPEN] = "EXPLICIT_OPEN",
[BLK_ZONE_COND_CLOSED] = "CLOSED",
[BLK_ZONE_COND_READONLY] = "READONLY",
[BLK_ZONE_COND_FULL] = "FULL",
[BLK_ZONE_COND_OFFLINE] = "OFFLINE",
};
static int check_zone_write_pointer(struct f2fs_sb_info *sbi, static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
struct f2fs_dev_info *fdev, struct f2fs_dev_info *fdev,
struct blk_zone *zone) struct blk_zone *zone)
...@@ -5000,7 +5062,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi, ...@@ -5000,7 +5062,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) { if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, zone_segno))) {
f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]", f2fs_notice(sbi, "Open zones: valid block[0x%x,0x%x] cond[%s]",
zone_segno, valid_block_cnt, zone_segno, valid_block_cnt,
f2fs_zone_status[zone->cond]); blk_zone_cond_str(zone->cond));
return 0; return 0;
} }
...@@ -5011,7 +5073,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi, ...@@ -5011,7 +5073,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
if (!valid_block_cnt) { if (!valid_block_cnt) {
f2fs_notice(sbi, "Zone without valid block has non-zero write " f2fs_notice(sbi, "Zone without valid block has non-zero write "
"pointer. Reset the write pointer: cond[%s]", "pointer. Reset the write pointer: cond[%s]",
f2fs_zone_status[zone->cond]); blk_zone_cond_str(zone->cond));
ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block, ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
zone->len >> log_sectors_per_block); zone->len >> log_sectors_per_block);
if (ret) if (ret)
...@@ -5029,7 +5091,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi, ...@@ -5029,7 +5091,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
*/ */
f2fs_notice(sbi, "Valid blocks are not aligned with write " f2fs_notice(sbi, "Valid blocks are not aligned with write "
"pointer: valid block[0x%x,0x%x] cond[%s]", "pointer: valid block[0x%x,0x%x] cond[%s]",
zone_segno, valid_block_cnt, f2fs_zone_status[zone->cond]); zone_segno, valid_block_cnt, blk_zone_cond_str(zone->cond));
nofs_flags = memalloc_nofs_save(); nofs_flags = memalloc_nofs_save();
ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH, ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
......
...@@ -66,21 +66,31 @@ const char *f2fs_fault_name[FAULT_MAX] = { ...@@ -66,21 +66,31 @@ const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_NO_SEGMENT] = "no free segment", [FAULT_NO_SEGMENT] = "no free segment",
}; };
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,
unsigned int type) unsigned long type)
{ {
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (rate) { if (rate) {
if (rate > INT_MAX)
return -EINVAL;
atomic_set(&ffi->inject_ops, 0); atomic_set(&ffi->inject_ops, 0);
ffi->inject_rate = rate; ffi->inject_rate = (int)rate;
} }
if (type) if (type) {
ffi->inject_type = type; if (type >= BIT(FAULT_MAX))
return -EINVAL;
ffi->inject_type = (unsigned int)type;
}
if (!rate && !type) if (!rate && !type)
memset(ffi, 0, sizeof(struct f2fs_fault_info)); memset(ffi, 0, sizeof(struct f2fs_fault_info));
else
f2fs_info(sbi,
"build fault injection attr: rate: %lu, type: 0x%lx",
rate, type);
return 0;
} }
#endif #endif
...@@ -886,14 +896,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) ...@@ -886,14 +896,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
case Opt_fault_injection: case Opt_fault_injection:
if (args->from && match_int(args, &arg)) if (args->from && match_int(args, &arg))
return -EINVAL; return -EINVAL;
f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE); if (f2fs_build_fault_attr(sbi, arg,
F2FS_ALL_FAULT_TYPE))
return -EINVAL;
set_opt(sbi, FAULT_INJECTION); set_opt(sbi, FAULT_INJECTION);
break; break;
case Opt_fault_type: case Opt_fault_type:
if (args->from && match_int(args, &arg)) if (args->from && match_int(args, &arg))
return -EINVAL; return -EINVAL;
f2fs_build_fault_attr(sbi, 0, arg); if (f2fs_build_fault_attr(sbi, 0, arg))
return -EINVAL;
set_opt(sbi, FAULT_INJECTION); set_opt(sbi, FAULT_INJECTION);
break; break;
#else #else
...@@ -2132,8 +2145,6 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount) ...@@ -2132,8 +2145,6 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL; F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE; F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
sbi->sb->s_flags &= ~SB_INLINECRYPT;
set_opt(sbi, INLINE_XATTR); set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY); set_opt(sbi, INLINE_DENTRY);
...@@ -2326,6 +2337,17 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -2326,6 +2337,17 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if (err) if (err)
goto restore_opts; goto restore_opts;
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) &&
sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
f2fs_err(sbi,
"zoned: max open zones %u is too small, need at least %u open zones",
sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
err = -EINVAL;
goto restore_opts;
}
#endif
/* flush outstanding errors before changing fs state */ /* flush outstanding errors before changing fs state */
flush_work(&sbi->s_error_work); flush_work(&sbi->s_error_work);
...@@ -2547,6 +2569,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) ...@@ -2547,6 +2569,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
return err; return err;
} }
static void f2fs_shutdown(struct super_block *sb)
{
f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false);
}
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
static bool f2fs_need_recovery(struct f2fs_sb_info *sbi) static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
{ {
...@@ -3146,6 +3173,7 @@ static const struct super_operations f2fs_sops = { ...@@ -3146,6 +3173,7 @@ static const struct super_operations f2fs_sops = {
.unfreeze_fs = f2fs_unfreeze, .unfreeze_fs = f2fs_unfreeze,
.statfs = f2fs_statfs, .statfs = f2fs_statfs,
.remount_fs = f2fs_remount, .remount_fs = f2fs_remount,
.shutdown = f2fs_shutdown,
}; };
#ifdef CONFIG_FS_ENCRYPTION #ifdef CONFIG_FS_ENCRYPTION
...@@ -3441,7 +3469,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, ...@@ -3441,7 +3469,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
} }
} }
/* Currently, support only 4KB block size */ /* only support block_size equals to PAGE_SIZE */
if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) { if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
le32_to_cpu(raw_super->log_blocksize), le32_to_cpu(raw_super->log_blocksize),
...@@ -3862,11 +3890,24 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) ...@@ -3862,11 +3890,24 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
sector_t nr_sectors = bdev_nr_sectors(bdev); sector_t nr_sectors = bdev_nr_sectors(bdev);
struct f2fs_report_zones_args rep_zone_arg; struct f2fs_report_zones_args rep_zone_arg;
u64 zone_sectors; u64 zone_sectors;
unsigned int max_open_zones;
int ret; int ret;
if (!f2fs_sb_has_blkzoned(sbi)) if (!f2fs_sb_has_blkzoned(sbi))
return 0; return 0;
if (bdev_is_zoned(FDEV(devi).bdev)) {
max_open_zones = bdev_max_open_zones(bdev);
if (max_open_zones && (max_open_zones < sbi->max_open_zones))
sbi->max_open_zones = max_open_zones;
if (sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
f2fs_err(sbi,
"zoned: max open zones %u is too small, need at least %u open zones",
sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
return -EINVAL;
}
}
zone_sectors = bdev_zone_sectors(bdev); zone_sectors = bdev_zone_sectors(bdev);
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
SECTOR_TO_BLOCK(zone_sectors)) SECTOR_TO_BLOCK(zone_sectors))
...@@ -4131,9 +4172,15 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, ...@@ -4131,9 +4172,15 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
if (shutdown) if (shutdown)
set_sbi_flag(sbi, SBI_IS_SHUTDOWN); set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
/* continue filesystem operators if errors=continue */ /*
if (continue_fs || f2fs_readonly(sb)) * Continue filesystem operators if errors=continue. Should not set
* RO by shutdown, since RO bypasses thaw_super which can hang the
* system.
*/
if (continue_fs || f2fs_readonly(sb) || shutdown) {
f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
return; return;
}
f2fs_warn(sbi, "Remounting filesystem read-only"); f2fs_warn(sbi, "Remounting filesystem read-only");
/* /*
...@@ -4180,6 +4227,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) ...@@ -4180,6 +4227,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev); logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
sbi->aligned_blksize = true; sbi->aligned_blksize = true;
#ifdef CONFIG_BLK_DEV_ZONED
sbi->max_open_zones = UINT_MAX;
#endif
for (i = 0; i < max_devices; i++) { for (i = 0; i < max_devices; i++) {
if (i == 0) if (i == 0)
...@@ -4894,12 +4944,6 @@ static int __init init_f2fs_fs(void) ...@@ -4894,12 +4944,6 @@ static int __init init_f2fs_fs(void)
{ {
int err; int err;
if (PAGE_SIZE != F2FS_BLKSIZE) {
printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
PAGE_SIZE, F2FS_BLKSIZE);
return -EINVAL;
}
err = init_inodecache(); err = init_inodecache();
if (err) if (err)
goto fail; goto fail;
......
...@@ -484,10 +484,16 @@ static ssize_t __sbi_store(struct f2fs_attr *a, ...@@ -484,10 +484,16 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
if (ret < 0) if (ret < 0)
return ret; return ret;
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (a->struct_type == FAULT_INFO_TYPE && t >= BIT(FAULT_MAX)) if (a->struct_type == FAULT_INFO_TYPE) {
return -EINVAL; if (f2fs_build_fault_attr(sbi, 0, t))
if (a->struct_type == FAULT_INFO_RATE && t >= UINT_MAX) return -EINVAL;
return -EINVAL; return count;
}
if (a->struct_type == FAULT_INFO_RATE) {
if (f2fs_build_fault_attr(sbi, t, 0))
return -EINVAL;
return count;
}
#endif #endif
if (a->struct_type == RESERVED_BLOCKS) { if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock); spin_lock(&sbi->stat_lock);
...@@ -675,6 +681,13 @@ static ssize_t __sbi_store(struct f2fs_attr *a, ...@@ -675,6 +681,13 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
return count; return count;
} }
if (!strcmp(a->attr.name, "gc_pin_file_threshold")) {
if (t > MAX_GC_FAILED_PINNED_FILES)
return -EINVAL;
sbi->gc_pin_file_threshold = t;
return count;
}
if (!strcmp(a->attr.name, "gc_reclaimed_segments")) { if (!strcmp(a->attr.name, "gc_reclaimed_segments")) {
if (t != 0) if (t != 0)
return -EINVAL; return -EINVAL;
......
...@@ -394,7 +394,8 @@ struct f2fs_nat_block { ...@@ -394,7 +394,8 @@ struct f2fs_nat_block {
/* /*
* F2FS uses 4 bytes to represent block address. As a result, supported size of * F2FS uses 4 bytes to represent block address. As a result, supported size of
* disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments. * disk is 16 TB for a 4K page size and 64 TB for a 16K page size and it equals
* to 16 * 1024 * 1024 / 2 segments.
*/ */
#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2) #define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2)
...@@ -424,8 +425,10 @@ struct f2fs_sit_block { ...@@ -424,8 +425,10 @@ struct f2fs_sit_block {
/* /*
* For segment summary * For segment summary
* *
* One summary block contains exactly 512 summary entries, which represents * One summary block with 4KB size contains exactly 512 summary entries, which
* exactly one segment by default. Not allow to change the basic units. * represents exactly one segment with 2MB size.
* Similarly, in the case of block with 16KB size, it represents one segment with 8MB size.
* Not allow to change the basic units.
* *
* NOTE: For initializing fields, you must use set_summary * NOTE: For initializing fields, you must use set_summary
* *
...@@ -556,6 +559,7 @@ typedef __le32 f2fs_hash_t; ...@@ -556,6 +559,7 @@ typedef __le32 f2fs_hash_t;
/* /*
* space utilization of regular dentry and inline dentry (w/o extra reservation) * space utilization of regular dentry and inline dentry (w/o extra reservation)
* when block size is 4KB.
* regular dentry inline dentry (def) inline dentry (min) * regular dentry inline dentry (def) inline dentry (min)
* bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1 * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
* reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1 * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
......
...@@ -1304,11 +1304,11 @@ TRACE_EVENT(f2fs_write_end, ...@@ -1304,11 +1304,11 @@ TRACE_EVENT(f2fs_write_end,
__entry->copied) __entry->copied)
); );
DECLARE_EVENT_CLASS(f2fs__page, DECLARE_EVENT_CLASS(f2fs__folio,
TP_PROTO(struct page *page, int type), TP_PROTO(struct folio *folio, int type),
TP_ARGS(page, type), TP_ARGS(folio, type),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
...@@ -1321,14 +1321,14 @@ DECLARE_EVENT_CLASS(f2fs__page, ...@@ -1321,14 +1321,14 @@ DECLARE_EVENT_CLASS(f2fs__page,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = page_file_mapping(page)->host->i_sb->s_dev; __entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev;
__entry->ino = page_file_mapping(page)->host->i_ino; __entry->ino = folio_file_mapping(folio)->host->i_ino;
__entry->type = type; __entry->type = type;
__entry->dir = __entry->dir =
S_ISDIR(page_file_mapping(page)->host->i_mode); S_ISDIR(folio_file_mapping(folio)->host->i_mode);
__entry->index = page->index; __entry->index = folio_index(folio);
__entry->dirty = PageDirty(page); __entry->dirty = folio_test_dirty(folio);
__entry->uptodate = PageUptodate(page); __entry->uptodate = folio_test_uptodate(folio);
), ),
TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, " TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, "
...@@ -1341,32 +1341,32 @@ DECLARE_EVENT_CLASS(f2fs__page, ...@@ -1341,32 +1341,32 @@ DECLARE_EVENT_CLASS(f2fs__page,
__entry->uptodate) __entry->uptodate)
); );
DEFINE_EVENT(f2fs__page, f2fs_writepage, DEFINE_EVENT(f2fs__folio, f2fs_writepage,
TP_PROTO(struct page *page, int type), TP_PROTO(struct folio *folio, int type),
TP_ARGS(page, type) TP_ARGS(folio, type)
); );
DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page, DEFINE_EVENT(f2fs__folio, f2fs_do_write_data_page,
TP_PROTO(struct page *page, int type), TP_PROTO(struct folio *folio, int type),
TP_ARGS(page, type) TP_ARGS(folio, type)
); );
DEFINE_EVENT(f2fs__page, f2fs_readpage, DEFINE_EVENT(f2fs__folio, f2fs_readpage,
TP_PROTO(struct page *page, int type), TP_PROTO(struct folio *folio, int type),
TP_ARGS(page, type) TP_ARGS(folio, type)
); );
DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty, DEFINE_EVENT(f2fs__folio, f2fs_set_page_dirty,
TP_PROTO(struct page *page, int type), TP_PROTO(struct folio *folio, int type),
TP_ARGS(page, type) TP_ARGS(folio, type)
); );
TRACE_EVENT(f2fs_replace_atomic_write_block, TRACE_EVENT(f2fs_replace_atomic_write_block,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment