Commit 6ab5eed6 authored by Gao Xiang's avatar Gao Xiang

erofs: avoid on-stack pagepool directly passed by arguments

On-stack pagepool is used so that short-lived temporary pages could be
shared within a single I/O request (e.g. among multiple pclusters).

Moving the remaining frontend-related uses into
z_erofs_decompress_frontend to avoid too many arguments.
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Reviewed-by: default avatarYue Hu <huyue2@coolpad.com>
Link: https://lore.kernel.org/r/20230526201459.128169-3-hsiangkao@linux.alibaba.com
parent 05b63d2b
...@@ -240,13 +240,14 @@ static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter, ...@@ -240,13 +240,14 @@ static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvec *bvec, struct z_erofs_bvec *bvec,
struct page **candidate_bvpage) struct page **candidate_bvpage,
struct page **pagepool)
{ {
if (iter->cur >= iter->nr) { if (iter->cur >= iter->nr) {
struct page *nextpage = *candidate_bvpage; struct page *nextpage = *candidate_bvpage;
if (!nextpage) { if (!nextpage) {
nextpage = alloc_page(GFP_NOFS); nextpage = erofs_allocpage(pagepool, GFP_NOFS);
if (!nextpage) if (!nextpage)
return -ENOMEM; return -ENOMEM;
set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
...@@ -547,6 +548,7 @@ struct z_erofs_decompress_frontend { ...@@ -547,6 +548,7 @@ struct z_erofs_decompress_frontend {
struct erofs_map_blocks map; struct erofs_map_blocks map;
struct z_erofs_bvec_iter biter; struct z_erofs_bvec_iter biter;
struct page *pagepool;
struct page *candidate_bvpage; struct page *candidate_bvpage;
struct z_erofs_pcluster *pcl, *tailpcl; struct z_erofs_pcluster *pcl, *tailpcl;
z_erofs_next_pcluster_t owned_head; z_erofs_next_pcluster_t owned_head;
...@@ -581,8 +583,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) ...@@ -581,8 +583,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
return false; return false;
} }
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
struct page **pagepool)
{ {
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl; struct z_erofs_pcluster *pcl = fe->pcl;
...@@ -623,7 +624,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, ...@@ -623,7 +624,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
* succeeds or fallback to in-place I/O instead * succeeds or fallback to in-place I/O instead
* to avoid any direct reclaim. * to avoid any direct reclaim.
*/ */
newpage = erofs_allocpage(pagepool, gfp); newpage = erofs_allocpage(&fe->pagepool, gfp);
if (!newpage) if (!newpage)
continue; continue;
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
...@@ -636,7 +637,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, ...@@ -636,7 +637,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
if (page) if (page)
put_page(page); put_page(page);
else if (newpage) else if (newpage)
erofs_pagepool_add(pagepool, newpage); erofs_pagepool_add(&fe->pagepool, newpage);
} }
/* /*
...@@ -734,7 +735,8 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, ...@@ -734,7 +735,8 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
!fe->candidate_bvpage) !fe->candidate_bvpage)
fe->candidate_bvpage = bvec->page; fe->candidate_bvpage = bvec->page;
} }
ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage); ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
&fe->pagepool);
fe->pcl->vcnt += (ret >= 0); fe->pcl->vcnt += (ret >= 0);
return ret; return ret;
} }
...@@ -959,7 +961,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos, ...@@ -959,7 +961,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
} }
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct page *page, struct page **pagepool) struct page *page)
{ {
struct inode *const inode = fe->inode; struct inode *const inode = fe->inode;
struct erofs_map_blocks *const map = &fe->map; struct erofs_map_blocks *const map = &fe->map;
...@@ -1017,7 +1019,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -1017,7 +1019,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} else { } else {
/* bind cache first when cached decompression is preferred */ /* bind cache first when cached decompression is preferred */
z_erofs_bind_cache(fe, pagepool); z_erofs_bind_cache(fe);
} }
hitted: hitted:
/* /*
...@@ -1660,7 +1662,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio) ...@@ -1660,7 +1662,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
} }
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
struct page **pagepool,
struct z_erofs_decompressqueue *fgq, struct z_erofs_decompressqueue *fgq,
bool *force_fg, bool readahead) bool *force_fg, bool readahead)
{ {
...@@ -1723,8 +1724,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, ...@@ -1723,8 +1724,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
do { do {
struct page *page; struct page *page;
page = pickup_page_for_submission(pcl, i++, pagepool, page = pickup_page_for_submission(pcl, i++,
mc); &f->pagepool, mc);
if (!page) if (!page)
continue; continue;
...@@ -1789,16 +1790,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, ...@@ -1789,16 +1790,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
} }
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
struct page **pagepool, bool force_fg, bool ra) bool force_fg, bool ra)
{ {
struct z_erofs_decompressqueue io[NR_JOBQUEUES]; struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
return; return;
z_erofs_submit_queue(f, pagepool, io, &force_fg, ra); z_erofs_submit_queue(f, io, &force_fg, ra);
/* handle bypass queue (no i/o pclusters) immediately */ /* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
if (!force_fg) if (!force_fg)
return; return;
...@@ -1807,7 +1808,7 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, ...@@ -1807,7 +1808,7 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
wait_for_completion_io(&io[JQ_SUBMIT].u.done); wait_for_completion_io(&io[JQ_SUBMIT].u.done);
/* handle synchronous decompress queue in the caller context */ /* handle synchronous decompress queue in the caller context */
z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool); z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
} }
/* /*
...@@ -1815,8 +1816,7 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, ...@@ -1815,8 +1816,7 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
* approximate readmore strategies as a start. * approximate readmore strategies as a start.
*/ */
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
struct readahead_control *rac, struct readahead_control *rac, bool backmost)
struct page **pagepool, bool backmost)
{ {
struct inode *inode = f->inode; struct inode *inode = f->inode;
struct erofs_map_blocks *map = &f->map; struct erofs_map_blocks *map = &f->map;
...@@ -1858,7 +1858,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, ...@@ -1858,7 +1858,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
if (PageUptodate(page)) { if (PageUptodate(page)) {
unlock_page(page); unlock_page(page);
} else { } else {
err = z_erofs_do_read_page(f, page, pagepool); err = z_erofs_do_read_page(f, page);
if (err) if (err)
erofs_err(inode->i_sb, erofs_err(inode->i_sb,
"readmore error at page %lu @ nid %llu", "readmore error at page %lu @ nid %llu",
...@@ -1879,27 +1879,24 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) ...@@ -1879,27 +1879,24 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
struct inode *const inode = page->mapping->host; struct inode *const inode = page->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
struct page *pagepool = NULL;
int err; int err;
trace_erofs_readpage(page, false); trace_erofs_readpage(page, false);
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
z_erofs_pcluster_readmore(&f, NULL, &pagepool, true); z_erofs_pcluster_readmore(&f, NULL, true);
err = z_erofs_do_read_page(&f, page, &pagepool); err = z_erofs_do_read_page(&f, page);
z_erofs_pcluster_readmore(&f, NULL, &pagepool, false); z_erofs_pcluster_readmore(&f, NULL, false);
(void)z_erofs_collector_end(&f); (void)z_erofs_collector_end(&f);
/* if some compressed cluster ready, need submit them anyway */ /* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(&f, &pagepool, z_erofs_is_sync_decompress(sbi, 0), z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
false);
if (err) if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err); erofs_err(inode->i_sb, "failed to read, err [%d]", err);
erofs_put_metabuf(&f.map.buf); erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&pagepool); erofs_release_pages(&f.pagepool);
return err; return err;
} }
...@@ -1908,12 +1905,12 @@ static void z_erofs_readahead(struct readahead_control *rac) ...@@ -1908,12 +1905,12 @@ static void z_erofs_readahead(struct readahead_control *rac)
struct inode *const inode = rac->mapping->host; struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
struct page *pagepool = NULL, *head = NULL, *page; struct page *head = NULL, *page;
unsigned int nr_pages; unsigned int nr_pages;
f.headoffset = readahead_pos(rac); f.headoffset = readahead_pos(rac);
z_erofs_pcluster_readmore(&f, rac, &pagepool, true); z_erofs_pcluster_readmore(&f, rac, true);
nr_pages = readahead_count(rac); nr_pages = readahead_count(rac);
trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false); trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
...@@ -1929,20 +1926,19 @@ static void z_erofs_readahead(struct readahead_control *rac) ...@@ -1929,20 +1926,19 @@ static void z_erofs_readahead(struct readahead_control *rac)
/* traversal in reverse order */ /* traversal in reverse order */
head = (void *)page_private(page); head = (void *)page_private(page);
err = z_erofs_do_read_page(&f, page, &pagepool); err = z_erofs_do_read_page(&f, page);
if (err) if (err)
erofs_err(inode->i_sb, erofs_err(inode->i_sb,
"readahead error at page %lu @ nid %llu", "readahead error at page %lu @ nid %llu",
page->index, EROFS_I(inode)->nid); page->index, EROFS_I(inode)->nid);
put_page(page); put_page(page);
} }
z_erofs_pcluster_readmore(&f, rac, &pagepool, false); z_erofs_pcluster_readmore(&f, rac, false);
(void)z_erofs_collector_end(&f); (void)z_erofs_collector_end(&f);
z_erofs_runqueue(&f, &pagepool, z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
z_erofs_is_sync_decompress(sbi, nr_pages), true);
erofs_put_metabuf(&f.map.buf); erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&pagepool); erofs_release_pages(&f.pagepool);
} }
const struct address_space_operations z_erofs_aops = { const struct address_space_operations z_erofs_aops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment