Commit 5fb76bb0 authored by Gao Xiang's avatar Gao Xiang Committed by Greg Kroah-Hartman

staging: erofs: cleanup `z_erofs_vle_normalaccess_readpages'

This patch introduces `__should_decompress_synchronously' to
cleanup `z_erofs_vle_normalaccess_readpages'.
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e5e3abba
...@@ -95,6 +95,9 @@ struct erofs_sb_info { ...@@ -95,6 +95,9 @@ struct erofs_sb_info {
/* the dedicated workstation for compression */ /* the dedicated workstation for compression */
struct radix_tree_root workstn_tree; struct radix_tree_root workstn_tree;
/* threshold for decompression synchronously */
unsigned int max_sync_decompress_pages;
#ifdef EROFS_FS_HAS_MANAGED_CACHE #ifdef EROFS_FS_HAS_MANAGED_CACHE
struct inode *managed_cache; struct inode *managed_cache;
#endif #endif
...@@ -273,6 +276,14 @@ extern int erofs_try_to_free_cached_page(struct address_space *mapping, ...@@ -273,6 +276,14 @@ extern int erofs_try_to_free_cached_page(struct address_space *mapping,
struct page *page); struct page *page);
#endif #endif
#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
static inline bool __should_decompress_synchronously(struct erofs_sb_info *sbi,
unsigned int nr)
{
return nr <= sbi->max_sync_decompress_pages;
}
#endif #endif
/* we strictly follow PAGE_SIZE and no buffer head yet */ /* we strictly follow PAGE_SIZE and no buffer head yet */
......
...@@ -162,6 +162,11 @@ static void erofs_build_fault_attr(struct erofs_sb_info *sbi, ...@@ -162,6 +162,11 @@ static void erofs_build_fault_attr(struct erofs_sb_info *sbi,
static void default_options(struct erofs_sb_info *sbi) static void default_options(struct erofs_sb_info *sbi)
{ {
/* set up some FS parameters */
#ifdef CONFIG_EROFS_FS_ZIP
sbi->max_sync_decompress_pages = DEFAULT_MAX_SYNC_DECOMPRESS_PAGES;
#endif
#ifdef CONFIG_EROFS_FS_XATTR #ifdef CONFIG_EROFS_FS_XATTR
set_opt(sbi, XATTR_USER); set_opt(sbi, XATTR_USER);
#endif #endif
......
...@@ -1308,12 +1308,14 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file, ...@@ -1308,12 +1308,14 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
return 0; return 0;
} }
static inline int __z_erofs_vle_normalaccess_readpages( static int z_erofs_vle_normalaccess_readpages(struct file *filp,
struct file *filp, struct address_space *mapping,
struct address_space *mapping, struct list_head *pages,
struct list_head *pages, unsigned int nr_pages, bool sync) unsigned int nr_pages)
{ {
struct inode *const inode = mapping->host; struct inode *const inode = mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
const bool sync = __should_decompress_synchronously(sbi, nr_pages);
struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode); struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
...@@ -1372,16 +1374,6 @@ static inline int __z_erofs_vle_normalaccess_readpages( ...@@ -1372,16 +1374,6 @@ static inline int __z_erofs_vle_normalaccess_readpages(
return 0; return 0;
} }
static int z_erofs_vle_normalaccess_readpages(
struct file *filp,
struct address_space *mapping,
struct list_head *pages, unsigned int nr_pages)
{
return __z_erofs_vle_normalaccess_readpages(filp,
mapping, pages, nr_pages,
nr_pages < 4 /* sync */);
}
const struct address_space_operations z_erofs_vle_normalaccess_aops = { const struct address_space_operations z_erofs_vle_normalaccess_aops = {
.readpage = z_erofs_vle_normalaccess_readpage, .readpage = z_erofs_vle_normalaccess_readpage,
.readpages = z_erofs_vle_normalaccess_readpages, .readpages = z_erofs_vle_normalaccess_readpages,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment