Commit 5c6154ff authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-6.11-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs fixes from Gao Xiang:
 "As I mentioned in the merge window pull request, there is a regression
  which could cause system hang due to page migration. The corresponding
  fix landed upstream through MM tree last week (commit 2e6506e1:
  "mm/migrate: fix deadlock in migrate_pages_batch() on large folios"),
  therefore large folios can be safely allowed for compressed inodes and
  stress tests have been running on my fleet for over 20 days without
  any regression. Users have explicitly requested this for months, so
  let's allow large folios for EROFS full cases now for wider testing.

  Additionally, there is a fix which addresses invalid memory accesses
  on a failure path triggered by fault injection and two minor cleanups
  to simplify the codebase.

  Summary:

   - Allow large folios on compressed inodes

   - Fix invalid memory accesses if z_erofs_gbuf_growsize() partially
     fails

   - Two minor cleanups"

* tag 'erofs-for-6.11-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: fix out-of-bound access when z_erofs_gbuf_growsize() partially fails
  erofs: allow large folios for compressed files
  erofs: get rid of check_layout_compatibility()
  erofs: simplify readdir operation
parents b311c1b4 0005e01e
......@@ -75,7 +75,7 @@ Here are the main features of EROFS:
- Support merging tail-end data into a special inode as fragments.
- Support large folios for uncompressed files.
- Support large folios to make use of THPs (Transparent Hugepages);
- Support direct I/O on uncompressed files to avoid double caching for loop
devices;
......
......@@ -8,19 +8,15 @@
static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
void *dentry_blk, struct erofs_dirent *de,
unsigned int nameoff, unsigned int maxsize)
unsigned int nameoff0, unsigned int maxsize)
{
const struct erofs_dirent *end = dentry_blk + nameoff;
const struct erofs_dirent *end = dentry_blk + nameoff0;
while (de < end) {
const char *de_name;
unsigned char d_type = fs_ftype_to_dtype(de->file_type);
unsigned int nameoff = le16_to_cpu(de->nameoff);
const char *de_name = (char *)dentry_blk + nameoff;
unsigned int de_namelen;
unsigned char d_type;
d_type = fs_ftype_to_dtype(de->file_type);
nameoff = le16_to_cpu(de->nameoff);
de_name = (char *)dentry_blk + nameoff;
/* the last dirent in the block? */
if (de + 1 >= end)
......@@ -52,21 +48,20 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct super_block *sb = dir->i_sb;
unsigned long bsz = sb->s_blocksize;
const size_t dirsize = i_size_read(dir);
unsigned int i = erofs_blknr(sb, ctx->pos);
unsigned int ofs = erofs_blkoff(sb, ctx->pos);
int err = 0;
bool initial = true;
buf.mapping = dir->i_mapping;
while (ctx->pos < dirsize) {
while (ctx->pos < dir->i_size) {
erofs_off_t dbstart = ctx->pos - ofs;
struct erofs_dirent *de;
unsigned int nameoff, maxsize;
de = erofs_bread(&buf, erofs_pos(sb, i), EROFS_KMAP);
de = erofs_bread(&buf, dbstart, EROFS_KMAP);
if (IS_ERR(de)) {
erofs_err(sb, "fail to readdir of logical block %u of nid %llu",
i, EROFS_I(dir)->nid);
erofs_blknr(sb, dbstart), EROFS_I(dir)->nid);
err = PTR_ERR(de);
break;
}
......@@ -79,25 +74,19 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx)
break;
}
maxsize = min_t(unsigned int, dirsize - ctx->pos + ofs, bsz);
maxsize = min_t(unsigned int, dir->i_size - dbstart, bsz);
/* search dirents at the arbitrary position */
if (initial) {
initial = false;
ofs = roundup(ofs, sizeof(struct erofs_dirent));
ctx->pos = erofs_pos(sb, i) + ofs;
if (ofs >= nameoff)
goto skip_this;
ctx->pos = dbstart + ofs;
}
err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs,
nameoff, maxsize);
if (err)
break;
skip_this:
ctx->pos = erofs_pos(sb, i) + maxsize;
++i;
ctx->pos = dbstart + maxsize;
ofs = 0;
}
erofs_put_metabuf(&buf);
......
......@@ -257,25 +257,23 @@ static int erofs_fill_inode(struct inode *inode)
goto out_unlock;
}
mapping_set_large_folios(inode->i_mapping);
if (erofs_inode_is_data_compressed(vi->datalayout)) {
#ifdef CONFIG_EROFS_FS_ZIP
DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT,
erofs_info, inode->i_sb,
"EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
inode->i_mapping->a_ops = &z_erofs_aops;
err = 0;
goto out_unlock;
#endif
#else
err = -EOPNOTSUPP;
goto out_unlock;
}
inode->i_mapping->a_ops = &erofs_raw_access_aops;
mapping_set_large_folios(inode->i_mapping);
#endif
} else {
inode->i_mapping->a_ops = &erofs_raw_access_aops;
#ifdef CONFIG_EROFS_FS_ONDEMAND
if (erofs_is_fscache_mode(inode->i_sb))
inode->i_mapping->a_ops = &erofs_fscache_access_aops;
if (erofs_is_fscache_mode(inode->i_sb))
inode->i_mapping->a_ops = &erofs_fscache_access_aops;
#endif
}
out_unlock:
erofs_put_metabuf(&buf);
return err;
......
......@@ -220,7 +220,7 @@ struct erofs_buf {
};
#define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL })
#define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits)
#define erofs_blknr(sb, addr) ((erofs_blk_t)((addr) >> (sb)->s_blocksize_bits))
#define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
#define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
#define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
......
......@@ -108,22 +108,6 @@ static void erofs_free_inode(struct inode *inode)
kmem_cache_free(erofs_inode_cachep, vi);
}
static bool check_layout_compatibility(struct super_block *sb,
struct erofs_super_block *dsb)
{
const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
EROFS_SB(sb)->feature_incompat = feature;
/* check if current kernel meets all mandatory requirements */
if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
feature & ~EROFS_ALL_FEATURE_INCOMPAT);
return false;
}
return true;
}
/* read variable-sized metadata, offset will be aligned by 4-byte */
void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
erofs_off_t *offset, int *lengthp)
......@@ -279,7 +263,7 @@ static int erofs_scan_devices(struct super_block *sb,
static int erofs_read_superblock(struct super_block *sb)
{
struct erofs_sb_info *sbi;
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct erofs_super_block *dsb;
void *data;
......@@ -291,9 +275,7 @@ static int erofs_read_superblock(struct super_block *sb)
return PTR_ERR(data);
}
sbi = EROFS_SB(sb);
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ret = -EINVAL;
if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
erofs_err(sb, "cannot find valid erofs superblock");
......@@ -318,8 +300,12 @@ static int erofs_read_superblock(struct super_block *sb)
}
ret = -EINVAL;
if (!check_layout_compatibility(sb, dsb))
sbi->feature_incompat = le32_to_cpu(dsb->feature_incompat);
if (sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT) {
erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT);
goto out;
}
sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
......
......@@ -111,7 +111,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages)
out:
if (i < z_erofs_gbuf_count && tmp_pages) {
for (j = 0; j < nrpages; ++j)
if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j])
if (tmp_pages[j] && (j >= gbuf->nrpages ||
tmp_pages[j] != gbuf->pages[j]))
__free_page(tmp_pages[j]);
kfree(tmp_pages);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment