Commit db92e6c7 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: convert f2fs_mpage_readpages() to use folio

Convert f2fs_mpage_readpages() to use folio and related
functionality.
Signed-off-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 7643f3fe
......@@ -2345,7 +2345,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
* Major change was from block_size == page_size in f2fs by default.
*/
static int f2fs_mpage_readpages(struct inode *inode,
struct readahead_control *rac, struct page *page)
struct readahead_control *rac, struct folio *folio)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
......@@ -2365,6 +2365,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
#endif
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
pgoff_t index;
int ret = 0;
map.m_pblk = 0;
......@@ -2378,64 +2379,63 @@ static int f2fs_mpage_readpages(struct inode *inode,
for (; nr_pages; nr_pages--) {
if (rac) {
page = readahead_page(rac);
prefetchw(&page->flags);
folio = readahead_folio(rac);
prefetchw(&folio->flags);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
/* there are remained compressed pages, submit them */
if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
rac != NULL, false);
f2fs_destroy_compress_ctx(&cc, false);
if (ret)
goto set_error_page;
}
if (cc.cluster_idx == NULL_CLUSTER) {
if (nc_cluster_idx ==
page->index >> cc.log_cluster_size) {
goto read_single_page;
}
ret = f2fs_is_compressed_cluster(inode, page->index);
if (ret < 0)
goto set_error_page;
else if (!ret) {
nc_cluster_idx =
page->index >> cc.log_cluster_size;
goto read_single_page;
}
index = folio_index(folio);
nc_cluster_idx = NULL_CLUSTER;
}
ret = f2fs_init_compress_ctx(&cc);
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (!f2fs_compressed_file(inode))
goto read_single_page;
/* there are remained compressed pages, submit them */
if (!f2fs_cluster_can_merge_page(&cc, index)) {
ret = f2fs_read_multi_pages(&cc, &bio,
max_nr_pages,
&last_block_in_bio,
rac != NULL, false);
f2fs_destroy_compress_ctx(&cc, false);
if (ret)
goto set_error_page;
}
if (cc.cluster_idx == NULL_CLUSTER) {
if (nc_cluster_idx == index >> cc.log_cluster_size)
goto read_single_page;
f2fs_compress_ctx_add_page(&cc, page);
ret = f2fs_is_compressed_cluster(inode, index);
if (ret < 0)
goto set_error_page;
else if (!ret) {
nc_cluster_idx =
index >> cc.log_cluster_size;
goto read_single_page;
}
goto next_page;
nc_cluster_idx = NULL_CLUSTER;
}
ret = f2fs_init_compress_ctx(&cc);
if (ret)
goto set_error_page;
f2fs_compress_ctx_add_page(&cc, &folio->page);
goto next_page;
read_single_page:
#endif
ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
ret = f2fs_read_single_page(inode, &folio->page, max_nr_pages, &map,
&bio, &last_block_in_bio, rac);
if (ret) {
#ifdef CONFIG_F2FS_FS_COMPRESSION
set_error_page:
#endif
zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
folio_zero_segment(folio, 0, folio_size(folio));
folio_unlock(folio);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
next_page:
#endif
if (rac)
put_page(page);
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
......@@ -2472,7 +2472,7 @@ static int f2fs_read_data_folio(struct file *file, struct folio *folio)
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
ret = f2fs_mpage_readpages(inode, NULL, page);
ret = f2fs_mpage_readpages(inode, NULL, folio);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment