Commit 1282dea3 authored by Gao Xiang's avatar Gao Xiang

erofs: clean up cached I/O strategies

After commit 4c7e4255 ("erofs: remove useless cache strategy of
DELAYEDALLOC"), only one cached I/O allocation strategy is supported:

  When cached I/O is preferred, page allocation is applied without
  direct reclaim.  If allocation fails, fall back to inplace I/O.

Let's get rid of z_erofs_cache_alloctype.  No logical changes.
Reviewed-by: default avatarYue Hu <huyue2@coolpad.com>
Reviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarYue Hu <huyue2@coolpad.com>
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20221206060352.152830-1-xiang@kernel.org
parent 2109901d
...@@ -175,16 +175,6 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl) ...@@ -175,16 +175,6 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
DBG_BUGON(1); DBG_BUGON(1);
} }
/* how to allocate cached pages for a pcluster */
enum z_erofs_cache_alloctype {
DONTALLOC, /* don't allocate any cached pages */
/*
* try to use cached I/O if page allocation succeeds or fallback
* to in-place I/O instead to avoid any direct reclaim.
*/
TRYALLOC,
};
/* /*
* tagged pointer with 1-bit tag for all compressed pages * tagged pointer with 1-bit tag for all compressed pages
* tag 0 - the page is just found with an extra page reference * tag 0 - the page is just found with an extra page reference
...@@ -292,12 +282,29 @@ struct z_erofs_decompress_frontend { ...@@ -292,12 +282,29 @@ struct z_erofs_decompress_frontend {
.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
.mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true } .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
{
unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy;
if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
return false;
if (fe->backmost)
return true;
if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
fe->map.m_la < fe->headoffset)
return true;
return false;
}
static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
enum z_erofs_cache_alloctype type,
struct page **pagepool) struct page **pagepool)
{ {
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl; struct z_erofs_pcluster *pcl = fe->pcl;
bool shouldalloc = z_erofs_should_alloc_cache(fe);
bool standalone = true; bool standalone = true;
/* /*
* optimistic allocation without direct reclaim since inplace I/O * optimistic allocation without direct reclaim since inplace I/O
...@@ -326,18 +333,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, ...@@ -326,18 +333,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
} else { } else {
/* I/O is needed, no possible to decompress directly */ /* I/O is needed, no possible to decompress directly */
standalone = false; standalone = false;
switch (type) { if (!shouldalloc)
case TRYALLOC: continue;
/*
* try to use cached I/O if page allocation
* succeeds or fallback to in-place I/O instead
* to avoid any direct reclaim.
*/
newpage = erofs_allocpage(pagepool, gfp); newpage = erofs_allocpage(pagepool, gfp);
if (!newpage) if (!newpage)
continue; continue;
set_page_private(newpage, set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
Z_EROFS_PREALLOCATED_PAGE);
t = tag_compressed_page_justfound(newpage); t = tag_compressed_page_justfound(newpage);
break;
default: /* DONTALLOC */
continue;
}
} }
if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL,
...@@ -637,20 +645,6 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) ...@@ -637,20 +645,6 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
return true; return true;
} }
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
unsigned int cachestrategy,
erofs_off_t la)
{
if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
return false;
if (fe->backmost)
return true;
return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
la < fe->headoffset;
}
static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos, static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
struct page *page, unsigned int pageofs, struct page *page, unsigned int pageofs,
unsigned int len) unsigned int len)
...@@ -687,12 +681,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -687,12 +681,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct page *page, struct page **pagepool) struct page *page, struct page **pagepool)
{ {
struct inode *const inode = fe->inode; struct inode *const inode = fe->inode;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct erofs_map_blocks *const map = &fe->map; struct erofs_map_blocks *const map = &fe->map;
const loff_t offset = page_offset(page); const loff_t offset = page_offset(page);
bool tight = true, exclusive; bool tight = true, exclusive;
enum z_erofs_cache_alloctype cache_strategy;
unsigned int cur, end, spiltted; unsigned int cur, end, spiltted;
int err = 0; int err = 0;
...@@ -746,13 +737,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -746,13 +737,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} else { } else {
/* bind cache first when cached decompression is preferred */ /* bind cache first when cached decompression is preferred */
if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy, z_erofs_bind_cache(fe, pagepool);
map->m_la))
cache_strategy = TRYALLOC;
else
cache_strategy = DONTALLOC;
z_erofs_bind_cache(fe, cache_strategy, pagepool);
} }
hitted: hitted:
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment