Commit 0615090c authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

erofs: convert compressed files from readpages to readahead

Use the new readahead operation in erofs.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Acked-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-20-willy@infradead.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0c07a9f9
...@@ -1305,28 +1305,23 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi, ...@@ -1305,28 +1305,23 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
return nr <= sbi->max_sync_decompress_pages; return nr <= sbi->max_sync_decompress_pages;
} }
static int z_erofs_readpages(struct file *filp, struct address_space *mapping, static void z_erofs_readahead(struct readahead_control *rac)
struct list_head *pages, unsigned int nr_pages)
{ {
struct inode *const inode = mapping->host; struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
bool sync = should_decompress_synchronously(sbi, nr_pages); bool sync = should_decompress_synchronously(sbi, readahead_count(rac));
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); struct page *page, *head = NULL;
struct page *head = NULL;
LIST_HEAD(pagepool); LIST_HEAD(pagepool);
trace_erofs_readpages(mapping->host, lru_to_page(pages)->index, trace_erofs_readpages(inode, readahead_index(rac),
nr_pages, false); readahead_count(rac), false);
f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; f.headoffset = readahead_pos(rac);
for (; nr_pages; --nr_pages) {
struct page *page = lru_to_page(pages);
while ((page = readahead_page(rac))) {
prefetchw(&page->flags); prefetchw(&page->flags);
list_del(&page->lru);
/* /*
* A pure asynchronous readahead is indicated if * A pure asynchronous readahead is indicated if
...@@ -1335,11 +1330,6 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping, ...@@ -1335,11 +1330,6 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
*/ */
sync &= !(PageReadahead(page) && !head); sync &= !(PageReadahead(page) && !head);
if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
list_add(&page->lru, &pagepool);
continue;
}
set_page_private(page, (unsigned long)head); set_page_private(page, (unsigned long)head);
head = page; head = page;
} }
...@@ -1368,11 +1358,10 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping, ...@@ -1368,11 +1358,10 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
/* clean up the remaining free pages */ /* clean up the remaining free pages */
put_pages_list(&pagepool); put_pages_list(&pagepool);
return 0;
} }
const struct address_space_operations z_erofs_aops = { const struct address_space_operations z_erofs_aops = {
.readpage = z_erofs_readpage, .readpage = z_erofs_readpage,
.readpages = z_erofs_readpages, .readahead = z_erofs_readahead,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment