f2fs: Convert to release_folio

While converting f2fs_release_page() to f2fs_release_folio(), cache the
sb_info so we don't need to retrieve it twice, and remove the redundant
call to set_page_private().  The use of folios should be pushed further
into f2fs from here.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
parent 3c402f15
...@@ -468,7 +468,7 @@ const struct address_space_operations f2fs_meta_aops = { ...@@ -468,7 +468,7 @@ const struct address_space_operations f2fs_meta_aops = {
.writepages = f2fs_write_meta_pages, .writepages = f2fs_write_meta_pages,
.dirty_folio = f2fs_dirty_meta_folio, .dirty_folio = f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio, .invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page, .release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page, .migratepage = f2fs_migrate_page,
#endif #endif
......
...@@ -1746,7 +1746,7 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) ...@@ -1746,7 +1746,7 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
} }
const struct address_space_operations f2fs_compress_aops = { const struct address_space_operations f2fs_compress_aops = {
.releasepage = f2fs_release_page, .release_folio = f2fs_release_folio,
.invalidate_folio = f2fs_invalidate_folio, .invalidate_folio = f2fs_invalidate_folio,
}; };
......
...@@ -3528,28 +3528,30 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length) ...@@ -3528,28 +3528,30 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
folio_detach_private(folio); folio_detach_private(folio);
} }
int f2fs_release_page(struct page *page, gfp_t wait) bool f2fs_release_folio(struct folio *folio, gfp_t wait)
{ {
/* If this is dirty page, keep PagePrivate */ struct f2fs_sb_info *sbi;
if (PageDirty(page))
return 0; /* If this is dirty folio, keep private data */
if (folio_test_dirty(folio))
return false;
/* This is atomic written page, keep Private */ /* This is atomic written page, keep Private */
if (page_private_atomic(page)) if (page_private_atomic(&folio->page))
return 0; return false;
if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) { sbi = F2FS_M_SB(folio->mapping);
struct inode *inode = page->mapping->host; if (test_opt(sbi, COMPRESS_CACHE)) {
struct inode *inode = folio->mapping->host;
if (inode->i_ino == F2FS_COMPRESS_INO(F2FS_I_SB(inode))) if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
clear_page_private_data(page); clear_page_private_data(&folio->page);
} }
clear_page_private_gcing(page); clear_page_private_gcing(&folio->page);
detach_page_private(page); folio_detach_private(folio);
set_page_private(page, 0); return true;
return 1;
} }
static bool f2fs_dirty_data_folio(struct address_space *mapping, static bool f2fs_dirty_data_folio(struct address_space *mapping,
...@@ -3944,7 +3946,7 @@ const struct address_space_operations f2fs_dblock_aops = { ...@@ -3944,7 +3946,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.write_end = f2fs_write_end, .write_end = f2fs_write_end,
.dirty_folio = f2fs_dirty_data_folio, .dirty_folio = f2fs_dirty_data_folio,
.invalidate_folio = f2fs_invalidate_folio, .invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page, .release_folio = f2fs_release_folio,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
.bmap = f2fs_bmap, .bmap = f2fs_bmap,
.swap_activate = f2fs_swap_activate, .swap_activate = f2fs_swap_activate,
......
...@@ -3768,7 +3768,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted, ...@@ -3768,7 +3768,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks, bool allow_balance); int compr_blocks, bool allow_balance);
void f2fs_write_failed(struct inode *inode, loff_t to); void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length); void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int f2fs_release_page(struct page *page, gfp_t wait); bool f2fs_release_folio(struct folio *folio, gfp_t wait);
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode); struct page *page, enum migrate_mode mode);
......
...@@ -2165,7 +2165,7 @@ const struct address_space_operations f2fs_node_aops = { ...@@ -2165,7 +2165,7 @@ const struct address_space_operations f2fs_node_aops = {
.writepages = f2fs_write_node_pages, .writepages = f2fs_write_node_pages,
.dirty_folio = f2fs_dirty_node_folio, .dirty_folio = f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio, .invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page, .release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page, .migratepage = f2fs_migrate_page,
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment