Commit e2d73c30 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "No major kernel updates for this round since I'm fully diving into
  LZMA algorithm internals now to provide high CR XZ algorihm support.
  That needs more work and time for me to get a better compression time.

  Summary:

   - Introduce superblock checksum support

   - Set iowait when waiting I/O for sync decompression path

   - Several code cleanups"

* tag 'erofs-for-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: remove unnecessary output in erofs_show_options()
  erofs: drop all vle annotations for runtime names
  erofs: support superblock checksum
  erofs: set iowait for sync decompression
  erofs: clean up decompress queue stuffs
  erofs: get rid of __stagingpage_alloc helper
  erofs: remove dead code since managed cache is now built-in
  erofs: clean up collection handling routines
parents 21b26d26 3dcb5fa2
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
config EROFS_FS config EROFS_FS
tristate "EROFS filesystem support" tristate "EROFS filesystem support"
depends on BLOCK depends on BLOCK
select LIBCRC32C
help help
EROFS (Enhanced Read-Only File System) is a lightweight EROFS (Enhanced Read-Only File System) is a lightweight
read-only file system with modern designs (eg. page-sized read-only file system with modern designs (eg. page-sized
......
...@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, ...@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
victim = availables[--top]; victim = availables[--top];
get_page(victim); get_page(victim);
} else { } else {
victim = erofs_allocpage(pagepool, GFP_KERNEL, false); victim = erofs_allocpage(pagepool, GFP_KERNEL);
if (!victim) if (!victim)
return -ENOMEM; return -ENOMEM;
victim->mapping = Z_EROFS_MAPPING_STAGING; victim->mapping = Z_EROFS_MAPPING_STAGING;
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#define EROFS_SUPER_OFFSET 1024 #define EROFS_SUPER_OFFSET 1024
#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
/* /*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should * Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
* be incompatible with this kernel version. * be incompatible with this kernel version.
...@@ -37,7 +39,6 @@ struct erofs_super_block { ...@@ -37,7 +39,6 @@ struct erofs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */ __u8 uuid[16]; /* 128-bit uuid for volume */
__u8 volume_name[16]; /* volume name */ __u8 volume_name[16]; /* volume name */
__le32 feature_incompat; __le32 feature_incompat;
__u8 reserved2[44]; __u8 reserved2[44];
}; };
......
...@@ -85,6 +85,7 @@ struct erofs_sb_info { ...@@ -85,6 +85,7 @@ struct erofs_sb_info {
u8 uuid[16]; /* 128-bit uuid for volume */ u8 uuid[16]; /* 128-bit uuid for volume */
u8 volume_name[16]; /* volume name */ u8 volume_name[16]; /* volume name */
u32 feature_compat;
u32 feature_incompat; u32 feature_incompat;
unsigned int mount_opt; unsigned int mount_opt;
...@@ -278,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value) ...@@ -278,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value)
extern const struct super_operations erofs_sops; extern const struct super_operations erofs_sops;
extern const struct address_space_operations erofs_raw_access_aops; extern const struct address_space_operations erofs_raw_access_aops;
#ifdef CONFIG_EROFS_FS_ZIP extern const struct address_space_operations z_erofs_aops;
extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
#endif
/* /*
* Logical to physical block mapping, used by erofs_map_blocks() * Logical to physical block mapping, used by erofs_map_blocks()
...@@ -382,7 +381,7 @@ int erofs_namei(struct inode *dir, struct qstr *name, ...@@ -382,7 +381,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
extern const struct file_operations erofs_dir_fops; extern const struct file_operations erofs_dir_fops;
/* utils.c / zdata.c */ /* utils.c / zdata.c */
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#if (EROFS_PCPUBUF_NR_PAGES > 0) #if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr); void *erofs_get_pcpubuf(unsigned int pagenr);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/statfs.h> #include <linux/statfs.h>
#include <linux/parser.h> #include <linux/parser.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/crc32c.h>
#include "xattr.h" #include "xattr.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
...@@ -46,6 +47,30 @@ void _erofs_info(struct super_block *sb, const char *function, ...@@ -46,6 +47,30 @@ void _erofs_info(struct super_block *sb, const char *function,
va_end(args); va_end(args);
} }
static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
{
struct erofs_super_block *dsb;
u32 expected_crc, crc;
dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
if (!dsb)
return -ENOMEM;
expected_crc = le32_to_cpu(dsb->checksum);
dsb->checksum = 0;
/* to allow for x86 boot sectors and other oddities. */
crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
kfree(dsb);
if (crc != expected_crc) {
erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
crc, expected_crc);
return -EBADMSG;
}
return 0;
}
static void erofs_inode_init_once(void *ptr) static void erofs_inode_init_once(void *ptr)
{ {
struct erofs_inode *vi = ptr; struct erofs_inode *vi = ptr;
...@@ -112,7 +137,7 @@ static int erofs_read_superblock(struct super_block *sb) ...@@ -112,7 +137,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi = EROFS_SB(sb); sbi = EROFS_SB(sb);
data = kmap_atomic(page); data = kmap(page);
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ret = -EINVAL; ret = -EINVAL;
...@@ -121,6 +146,13 @@ static int erofs_read_superblock(struct super_block *sb) ...@@ -121,6 +146,13 @@ static int erofs_read_superblock(struct super_block *sb)
goto out; goto out;
} }
sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) {
ret = erofs_superblock_csum_verify(sb, data);
if (ret)
goto out;
}
blkszbits = dsb->blkszbits; blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) { if (blkszbits != LOG_BLOCK_SIZE) {
...@@ -155,7 +187,7 @@ static int erofs_read_superblock(struct super_block *sb) ...@@ -155,7 +187,7 @@ static int erofs_read_superblock(struct super_block *sb)
} }
ret = 0; ret = 0;
out: out:
kunmap_atomic(data); kunmap(page);
put_page(page); put_page(page);
return ret; return ret;
} }
...@@ -566,9 +598,6 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) ...@@ -566,9 +598,6 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",cache_strategy=readahead"); seq_puts(seq, ",cache_strategy=readahead");
} else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
seq_puts(seq, ",cache_strategy=readaround"); seq_puts(seq, ",cache_strategy=readaround");
} else {
seq_puts(seq, ",cache_strategy=(unknown)");
DBG_BUGON(1);
} }
#endif #endif
return 0; return 0;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "internal.h" #include "internal.h"
#include <linux/pagevec.h> #include <linux/pagevec.h>
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
{ {
struct page *page; struct page *page;
...@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) ...@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
DBG_BUGON(page_ref_count(page) != 1); DBG_BUGON(page_ref_count(page) != 1);
list_del(&page->lru); list_del(&page->lru);
} else { } else {
page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); page = alloc_page(gfp);
} }
return page; return page;
} }
...@@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) ...@@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
} }
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp, struct erofs_workgroup *grp)
bool cleanup)
{ {
/* /*
* If managed cache is on, refcount of workgroups * If managed cache is on, refcount of workgroups
...@@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, ...@@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
} }
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned long nr_shrink, unsigned long nr_shrink)
bool cleanup)
{ {
pgoff_t first_index = 0; pgoff_t first_index = 0;
void *batch[PAGEVEC_SIZE]; void *batch[PAGEVEC_SIZE];
...@@ -208,7 +206,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, ...@@ -208,7 +206,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
first_index = grp->index + 1; first_index = grp->index + 1;
/* try to shrink each valid workgroup */ /* try to shrink each valid workgroup */
if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) if (!erofs_try_to_release_workgroup(sbi, grp))
continue; continue;
++freed; ++freed;
...@@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb) ...@@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex); mutex_lock(&sbi->umount_mutex);
erofs_shrink_workstation(sbi, ~0UL, true); /* clean up all remaining workgroups in memory */
erofs_shrink_workstation(sbi, ~0UL);
spin_lock(&erofs_sb_list_lock); spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list); list_del(&sbi->list);
...@@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink, ...@@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
spin_unlock(&erofs_sb_list_lock); spin_unlock(&erofs_sb_list_lock);
sbi->shrinker_run_no = run_no; sbi->shrinker_run_no = run_no;
freed += erofs_shrink_workstation(sbi, nr, false); freed += erofs_shrink_workstation(sbi, nr);
spin_lock(&erofs_sb_list_lock); spin_lock(&erofs_sb_list_lock);
/* Get the next list element before we move this one */ /* Get the next list element before we move this one */
......
This diff is collapsed.
...@@ -84,7 +84,8 @@ struct z_erofs_pcluster { ...@@ -84,7 +84,8 @@ struct z_erofs_pcluster {
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
struct z_erofs_unzip_io { struct z_erofs_decompressqueue {
struct super_block *sb;
atomic_t pending_bios; atomic_t pending_bios;
z_erofs_next_pcluster_t head; z_erofs_next_pcluster_t head;
...@@ -94,11 +95,6 @@ struct z_erofs_unzip_io { ...@@ -94,11 +95,6 @@ struct z_erofs_unzip_io {
} u; } u;
}; };
struct z_erofs_unzip_io_sb {
struct z_erofs_unzip_io io;
struct super_block *sb;
};
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page) struct page *page)
......
...@@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode) ...@@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode)
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
} }
inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; inode->i_mapping->a_ops = &z_erofs_aops;
return 0; return 0;
} }
static int fill_inode_lazy(struct inode *inode) static int z_erofs_fill_inode_lazy(struct inode *inode)
{ {
struct erofs_inode *const vi = EROFS_I(inode); struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb; struct super_block *const sb = inode->i_sb;
...@@ -138,8 +138,8 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, ...@@ -138,8 +138,8 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
return 0; return 0;
} }
static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned long lcn) unsigned long lcn)
{ {
struct inode *const inode = m->inode; struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode); struct erofs_inode *const vi = EROFS_I(inode);
...@@ -311,13 +311,13 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, ...@@ -311,13 +311,13 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
} }
static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn) unsigned int lcn)
{ {
const unsigned int datamode = EROFS_I(m->inode)->datalayout; const unsigned int datamode = EROFS_I(m->inode)->datalayout;
if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
return vle_legacy_load_cluster_from_disk(m, lcn); return legacy_load_cluster_from_disk(m, lcn);
if (datamode == EROFS_INODE_FLAT_COMPRESSION) if (datamode == EROFS_INODE_FLAT_COMPRESSION)
return compacted_load_cluster_from_disk(m, lcn); return compacted_load_cluster_from_disk(m, lcn);
...@@ -325,8 +325,8 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, ...@@ -325,8 +325,8 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
return -EINVAL; return -EINVAL;
} }
static int vle_extent_lookback(struct z_erofs_maprecorder *m, static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned int lookback_distance) unsigned int lookback_distance)
{ {
struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_inode *const vi = EROFS_I(m->inode);
struct erofs_map_blocks *const map = m->map; struct erofs_map_blocks *const map = m->map;
...@@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, ...@@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
/* load extent head logical cluster if needed */ /* load extent head logical cluster if needed */
lcn -= lookback_distance; lcn -= lookback_distance;
err = vle_load_cluster_from_disk(m, lcn); err = z_erofs_load_cluster_from_disk(m, lcn);
if (err) if (err)
return err; return err;
...@@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m, ...@@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
DBG_BUGON(1); DBG_BUGON(1);
return -EFSCORRUPTED; return -EFSCORRUPTED;
} }
return vle_extent_lookback(m, m->delta[0]); return z_erofs_extent_lookback(m, m->delta[0]);
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
map->m_flags &= ~EROFS_MAP_ZIPPED; map->m_flags &= ~EROFS_MAP_ZIPPED;
/* fallthrough */ /* fallthrough */
...@@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
goto out; goto out;
} }
err = fill_inode_lazy(inode); err = z_erofs_fill_inode_lazy(inode);
if (err) if (err)
goto out; goto out;
...@@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
m.lcn = ofs >> lclusterbits; m.lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1); endoff = ofs & ((1 << lclusterbits) - 1);
err = vle_load_cluster_from_disk(&m, m.lcn); err = z_erofs_load_cluster_from_disk(&m, m.lcn);
if (err) if (err)
goto unmap_out; goto unmap_out;
...@@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* fallthrough */ /* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */ /* get the correspoinding first chunk */
err = vle_extent_lookback(&m, m.delta[0]); err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err) if (err)
goto unmap_out; goto unmap_out;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment