Commit 586f14a6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "Updates for folio conversions for compressed inodes: While large folio
  support for compressed data could work now, it remains disabled since
  the stress test could hang due to page migration in a few hours after
  enabling it. I need more time to investigate further before enabling
  this feature.

  Additionally, clean up stream decompressors and tracepoints for
  simplicity.

  Summary:

   - More folio conversions for compressed inodes

   - Stream decompressor (LZMA/DEFLATE/ZSTD) cleanups

   - Minor tracepoint cleanup"

* tag 'erofs-for-6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: silence uninitialized variable warning in z_erofs_scan_folio()
  erofs: avoid refcounting short-lived pages
  erofs: get rid of z_erofs_map_blocks_iter_* tracepoints
  erofs: tidy up stream decompressors
  erofs: refine z_erofs_{init,exit}_subsystem()
  erofs: move each decompressor to its own source file
  erofs: tidy up `struct z_erofs_bvec`
  erofs: teach z_erofs_scan_folios() to handle multi-page folios
  erofs: convert z_erofs_read_fragment() to folios
  erofs: convert z_erofs_pcluster_readmore() to folios
parents 586a7a85 a3c10bed
...@@ -24,6 +24,8 @@ struct z_erofs_decompressor { ...@@ -24,6 +24,8 @@ struct z_erofs_decompressor {
void *data, int size); void *data, int size);
int (*decompress)(struct z_erofs_decompress_req *rq, int (*decompress)(struct z_erofs_decompress_req *rq,
struct page **pagepool); struct page **pagepool);
int (*init)(void);
void (*exit)(void);
char *name; char *name;
}; };
...@@ -52,17 +54,14 @@ struct z_erofs_decompressor { ...@@ -52,17 +54,14 @@ struct z_erofs_decompressor {
*/ */
/* /*
* short-lived pages are pages directly from buddy system with specific * Currently, short-lived pages are pages directly from buddy system
* page->private (no need to set PagePrivate since these are non-LRU / * with specific page->private (Z_EROFS_SHORTLIVED_PAGE).
* non-movable pages and bypass reclaim / migration code). * In the future world of Memdescs, it should be type 0 (Misc) memory
* which type can be checked with a new helper.
*/ */
static inline bool z_erofs_is_shortlived_page(struct page *page) static inline bool z_erofs_is_shortlived_page(struct page *page)
{ {
if (page->private != Z_EROFS_SHORTLIVED_PAGE) return page->private == Z_EROFS_SHORTLIVED_PAGE;
return false;
DBG_BUGON(page->mapping);
return true;
} }
static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
...@@ -70,32 +69,32 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, ...@@ -70,32 +69,32 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool,
{ {
if (!z_erofs_is_shortlived_page(page)) if (!z_erofs_is_shortlived_page(page))
return false; return false;
erofs_pagepool_add(pagepool, page);
/* short-lived pages should not be used by others at the same time */
if (page_ref_count(page) > 1) {
put_page(page);
} else {
/* follow the pcluster rule above. */
erofs_pagepool_add(pagepool, page);
}
return true; return true;
} }
extern const struct z_erofs_decompressor z_erofs_lzma_decomp;
extern const struct z_erofs_decompressor z_erofs_deflate_decomp;
extern const struct z_erofs_decompressor z_erofs_zstd_decomp;
extern const struct z_erofs_decompressor *z_erofs_decomp[];
struct z_erofs_stream_dctx {
struct z_erofs_decompress_req *rq;
unsigned int inpages, outpages; /* # of {en,de}coded pages */
int no, ni; /* the current {en,de}coded page # */
unsigned int avail_out; /* remaining bytes in the decoded buffer */
unsigned int inbuf_pos, inbuf_sz;
/* current status of the encoded buffer */
u8 *kin, *kout; /* buffer mapped pointers */
void *bounce; /* bounce buffer for inplace I/Os */
bool bounced; /* is the bounce buffer used now? */
};
int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
void **src, struct page **pgpl);
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize); unsigned int padbufsize);
extern const struct z_erofs_decompressor erofs_decompressors[]; int __init z_erofs_init_decompressor(void);
void z_erofs_exit_decompressor(void);
/* prototypes for specific algorithms */
int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size);
int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size);
int z_erofs_load_zstd_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size);
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool);
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool);
int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
struct page **pgpl);
#endif #endif
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* /*
* Copyright (C) 2019 HUAWEI, Inc. * Copyright (C) 2019 HUAWEI, Inc.
* https://www.huawei.com/ * https://www.huawei.com/
* Copyright (C) 2024 Alibaba Cloud
*/ */
#include "compress.h" #include "compress.h"
#include <linux/lz4.h> #include <linux/lz4.h>
...@@ -109,7 +110,6 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -109,7 +110,6 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
if (top) { if (top) {
victim = availables[--top]; victim = availables[--top];
get_page(victim);
} else { } else {
victim = __erofs_allocpage(pagepool, rq->gfp, true); victim = __erofs_allocpage(pagepool, rq->gfp, true);
if (!victim) if (!victim)
...@@ -371,40 +371,113 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, ...@@ -371,40 +371,113 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
return 0; return 0;
} }
const struct z_erofs_decompressor erofs_decompressors[] = { int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
[Z_EROFS_COMPRESSION_SHIFTED] = { void **src, struct page **pgpl)
{
struct z_erofs_decompress_req *rq = dctx->rq;
struct super_block *sb = rq->sb;
struct page **pgo, *tmppage;
unsigned int j;
if (!dctx->avail_out) {
if (++dctx->no >= dctx->outpages || !rq->outputsize) {
erofs_err(sb, "insufficient space for decompressed data");
return -EFSCORRUPTED;
}
if (dctx->kout)
kunmap_local(dctx->kout);
dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
rq->outputsize -= dctx->avail_out;
pgo = &rq->out[dctx->no];
if (!*pgo && rq->fillgaps) { /* deduped */
*pgo = erofs_allocpage(pgpl, rq->gfp);
if (!*pgo) {
dctx->kout = NULL;
return -ENOMEM;
}
set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
}
if (*pgo) {
dctx->kout = kmap_local_page(*pgo);
*dst = dctx->kout + rq->pageofs_out;
} else {
*dst = dctx->kout = NULL;
}
rq->pageofs_out = 0;
}
if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
if (++dctx->ni >= dctx->inpages) {
erofs_err(sb, "invalid compressed data");
return -EFSCORRUPTED;
}
if (dctx->kout) /* unlike kmap(), take care of the orders */
kunmap_local(dctx->kout);
kunmap_local(dctx->kin);
dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
rq->inputsize -= dctx->inbuf_sz;
dctx->kin = kmap_local_page(rq->in[dctx->ni]);
*src = dctx->kin;
dctx->bounced = false;
if (dctx->kout) {
j = (u8 *)*dst - dctx->kout;
dctx->kout = kmap_local_page(rq->out[dctx->no]);
*dst = dctx->kout + j;
}
dctx->inbuf_pos = 0;
}
/*
* Handle overlapping: Use the given bounce buffer if the input data is
* under processing; Or utilize short-lived pages from the on-stack page
* pool, where pages are shared among the same request. Note that only
* a few inplace I/O pages need to be doubled.
*/
if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
memcpy(dctx->bounce, *src, dctx->inbuf_sz);
*src = dctx->bounce;
dctx->bounced = true;
}
for (j = dctx->ni + 1; j < dctx->inpages; ++j) {
if (rq->out[dctx->no] != rq->in[j])
continue;
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage)
return -ENOMEM;
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
}
return 0;
}
const struct z_erofs_decompressor *z_erofs_decomp[] = {
[Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
.decompress = z_erofs_transform_plain, .decompress = z_erofs_transform_plain,
.name = "shifted" .name = "shifted"
}, },
[Z_EROFS_COMPRESSION_INTERLACED] = { [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
.decompress = z_erofs_transform_plain, .decompress = z_erofs_transform_plain,
.name = "interlaced" .name = "interlaced"
}, },
[Z_EROFS_COMPRESSION_LZ4] = { [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
.config = z_erofs_load_lz4_config, .config = z_erofs_load_lz4_config,
.decompress = z_erofs_lz4_decompress, .decompress = z_erofs_lz4_decompress,
.init = z_erofs_gbuf_init,
.exit = z_erofs_gbuf_exit,
.name = "lz4" .name = "lz4"
}, },
#ifdef CONFIG_EROFS_FS_ZIP_LZMA #ifdef CONFIG_EROFS_FS_ZIP_LZMA
[Z_EROFS_COMPRESSION_LZMA] = { [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
.config = z_erofs_load_lzma_config,
.decompress = z_erofs_lzma_decompress,
.name = "lzma"
},
#endif #endif
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
[Z_EROFS_COMPRESSION_DEFLATE] = { [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
.config = z_erofs_load_deflate_config,
.decompress = z_erofs_deflate_decompress,
.name = "deflate"
},
#endif #endif
#ifdef CONFIG_EROFS_FS_ZIP_ZSTD #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
[Z_EROFS_COMPRESSION_ZSTD] = { [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
.config = z_erofs_load_zstd_config,
.decompress = z_erofs_zstd_decompress,
.name = "zstd"
},
#endif #endif
}; };
...@@ -432,6 +505,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) ...@@ -432,6 +505,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
offset = EROFS_SUPER_OFFSET + sbi->sb_size; offset = EROFS_SUPER_OFFSET + sbi->sb_size;
alg = 0; alg = 0;
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
void *data; void *data;
if (!(algs & 1)) if (!(algs & 1))
...@@ -443,16 +517,13 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) ...@@ -443,16 +517,13 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
break; break;
} }
if (alg >= ARRAY_SIZE(erofs_decompressors) || if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
!erofs_decompressors[alg].config) { ret = dec->config(sb, dsb, data, size);
} else {
erofs_err(sb, "algorithm %d isn't enabled on this kernel", erofs_err(sb, "algorithm %d isn't enabled on this kernel",
alg); alg);
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
} else {
ret = erofs_decompressors[alg].config(sb,
dsb, data, size);
} }
kfree(data); kfree(data);
if (ret) if (ret)
break; break;
...@@ -460,3 +531,28 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) ...@@ -460,3 +531,28 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
erofs_put_metabuf(&buf); erofs_put_metabuf(&buf);
return ret; return ret;
} }
int __init z_erofs_init_decompressor(void)
{
int i, err;
for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
if (err) {
while (--i)
if (z_erofs_decomp[i])
z_erofs_decomp[i]->exit();
return err;
}
}
return 0;
}
void z_erofs_exit_decompressor(void)
{
int i;
for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
if (z_erofs_decomp[i])
z_erofs_decomp[i]->exit();
}
...@@ -15,7 +15,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq); ...@@ -15,7 +15,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq);
module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444); module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444);
void z_erofs_deflate_exit(void) static void z_erofs_deflate_exit(void)
{ {
/* there should be no running fs instance */ /* there should be no running fs instance */
while (z_erofs_deflate_avail_strms) { while (z_erofs_deflate_avail_strms) {
...@@ -41,7 +41,7 @@ void z_erofs_deflate_exit(void) ...@@ -41,7 +41,7 @@ void z_erofs_deflate_exit(void)
} }
} }
int __init z_erofs_deflate_init(void) static int __init z_erofs_deflate_init(void)
{ {
/* by default, use # of possible CPUs instead */ /* by default, use # of possible CPUs instead */
if (!z_erofs_deflate_nstrms) if (!z_erofs_deflate_nstrms)
...@@ -49,7 +49,7 @@ int __init z_erofs_deflate_init(void) ...@@ -49,7 +49,7 @@ int __init z_erofs_deflate_init(void)
return 0; return 0;
} }
int z_erofs_load_deflate_config(struct super_block *sb, static int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size) struct erofs_super_block *dsb, void *data, int size)
{ {
struct z_erofs_deflate_cfgs *dfl = data; struct z_erofs_deflate_cfgs *dfl = data;
...@@ -97,27 +97,26 @@ int z_erofs_load_deflate_config(struct super_block *sb, ...@@ -97,27 +97,26 @@ int z_erofs_load_deflate_config(struct super_block *sb,
return -ENOMEM; return -ENOMEM;
} }
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
struct page **pgpl) struct page **pgpl)
{ {
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
const unsigned int nrpages_in =
PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
struct super_block *sb = rq->sb; struct super_block *sb = rq->sb;
unsigned int insz, outsz, pofs; struct z_erofs_stream_dctx dctx = {
.rq = rq,
.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
.outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
>> PAGE_SHIFT,
.no = -1, .ni = 0,
};
struct z_erofs_deflate *strm; struct z_erofs_deflate *strm;
u8 *kin, *kout = NULL; int zerr, err;
bool bounced = false;
int no = -1, ni = 0, j = 0, zerr, err;
/* 1. get the exact DEFLATE compressed size */ /* 1. get the exact DEFLATE compressed size */
kin = kmap_local_page(*rq->in); dctx.kin = kmap_local_page(*rq->in);
err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
min_t(unsigned int, rq->inputsize, min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
sb->s_blocksize - rq->pageofs_in));
if (err) { if (err) {
kunmap_local(kin); kunmap_local(dctx.kin);
return err; return err;
} }
...@@ -134,102 +133,35 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, ...@@ -134,102 +133,35 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
spin_unlock(&z_erofs_deflate_lock); spin_unlock(&z_erofs_deflate_lock);
/* 3. multi-call decompress */ /* 3. multi-call decompress */
insz = rq->inputsize;
outsz = rq->outputsize;
zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS); zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
if (zerr != Z_OK) { if (zerr != Z_OK) {
err = -EIO; err = -EIO;
goto failed_zinit; goto failed_zinit;
} }
pofs = rq->pageofs_out; rq->fillgaps = true; /* DEFLATE doesn't support NULL output buffer */
strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
insz -= strm->z.avail_in; rq->inputsize -= strm->z.avail_in;
strm->z.next_in = kin + rq->pageofs_in; strm->z.next_in = dctx.kin + rq->pageofs_in;
strm->z.avail_out = 0; strm->z.avail_out = 0;
dctx.bounce = strm->bounce;
while (1) { while (1) {
if (!strm->z.avail_out) { dctx.avail_out = strm->z.avail_out;
if (++no >= nrpages_out || !outsz) { dctx.inbuf_sz = strm->z.avail_in;
erofs_err(sb, "insufficient space for decompressed data"); err = z_erofs_stream_switch_bufs(&dctx,
err = -EFSCORRUPTED; (void **)&strm->z.next_out,
break; (void **)&strm->z.next_in, pgpl);
} if (err)
break;
if (kout) strm->z.avail_out = dctx.avail_out;
kunmap_local(kout); strm->z.avail_in = dctx.inbuf_sz;
strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
outsz -= strm->z.avail_out;
if (!rq->out[no]) {
rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
if (!rq->out[no]) {
kout = NULL;
err = -ENOMEM;
break;
}
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
kout = kmap_local_page(rq->out[no]);
strm->z.next_out = kout + pofs;
pofs = 0;
}
if (!strm->z.avail_in && insz) {
if (++ni >= nrpages_in) {
erofs_err(sb, "invalid compressed data");
err = -EFSCORRUPTED;
break;
}
if (kout) { /* unlike kmap(), take care of the orders */
j = strm->z.next_out - kout;
kunmap_local(kout);
}
kunmap_local(kin);
strm->z.avail_in = min_t(u32, insz, PAGE_SIZE);
insz -= strm->z.avail_in;
kin = kmap_local_page(rq->in[ni]);
strm->z.next_in = kin;
bounced = false;
if (kout) {
kout = kmap_local_page(rq->out[no]);
strm->z.next_out = kout + j;
}
}
/*
* Handle overlapping: Use bounced buffer if the compressed
* data is under processing; Or use short-lived pages from the
* on-stack pagepool where pages share among the same request
* and not _all_ inplace I/O pages are needed to be doubled.
*/
if (!bounced && rq->out[no] == rq->in[ni]) {
memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in);
strm->z.next_in = strm->bounce;
bounced = true;
}
for (j = ni + 1; j < nrpages_in; ++j) {
struct page *tmppage;
if (rq->out[no] != rq->in[j])
continue;
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) {
err = -ENOMEM;
goto failed;
}
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
}
zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH); zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH);
if (zerr != Z_OK || !(outsz + strm->z.avail_out)) { if (zerr != Z_OK || !(rq->outputsize + strm->z.avail_out)) {
if (zerr == Z_OK && rq->partial_decoding) if (zerr == Z_OK && rq->partial_decoding)
break; break;
if (zerr == Z_STREAM_END && !outsz) if (zerr == Z_STREAM_END && !rq->outputsize)
break; break;
erofs_err(sb, "failed to decompress %d in[%u] out[%u]", erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
zerr, rq->inputsize, rq->outputsize); zerr, rq->inputsize, rq->outputsize);
...@@ -237,13 +169,12 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, ...@@ -237,13 +169,12 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
break; break;
} }
} }
failed:
if (zlib_inflateEnd(&strm->z) != Z_OK && !err) if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
err = -EIO; err = -EIO;
if (kout) if (dctx.kout)
kunmap_local(kout); kunmap_local(dctx.kout);
failed_zinit: failed_zinit:
kunmap_local(kin); kunmap_local(dctx.kin);
/* 4. push back DEFLATE stream context to the global list */ /* 4. push back DEFLATE stream context to the global list */
spin_lock(&z_erofs_deflate_lock); spin_lock(&z_erofs_deflate_lock);
strm->next = z_erofs_deflate_head; strm->next = z_erofs_deflate_head;
...@@ -252,3 +183,11 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, ...@@ -252,3 +183,11 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
wake_up(&z_erofs_deflate_wq); wake_up(&z_erofs_deflate_wq);
return err; return err;
} }
const struct z_erofs_decompressor z_erofs_deflate_decomp = {
.config = z_erofs_load_deflate_config,
.decompress = z_erofs_deflate_decompress,
.init = z_erofs_deflate_init,
.exit = z_erofs_deflate_exit,
.name = "deflate",
};
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
struct z_erofs_lzma { struct z_erofs_lzma {
struct z_erofs_lzma *next; struct z_erofs_lzma *next;
struct xz_dec_microlzma *state; struct xz_dec_microlzma *state;
struct xz_buf buf;
u8 bounce[PAGE_SIZE]; u8 bounce[PAGE_SIZE];
}; };
...@@ -18,7 +17,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq); ...@@ -18,7 +17,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq);
module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444); module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444);
void z_erofs_lzma_exit(void) static void z_erofs_lzma_exit(void)
{ {
/* there should be no running fs instance */ /* there should be no running fs instance */
while (z_erofs_lzma_avail_strms) { while (z_erofs_lzma_avail_strms) {
...@@ -46,7 +45,7 @@ void z_erofs_lzma_exit(void) ...@@ -46,7 +45,7 @@ void z_erofs_lzma_exit(void)
} }
} }
int __init z_erofs_lzma_init(void) static int __init z_erofs_lzma_init(void)
{ {
unsigned int i; unsigned int i;
...@@ -70,7 +69,7 @@ int __init z_erofs_lzma_init(void) ...@@ -70,7 +69,7 @@ int __init z_erofs_lzma_init(void)
return 0; return 0;
} }
int z_erofs_load_lzma_config(struct super_block *sb, static int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size) struct erofs_super_block *dsb, void *data, int size)
{ {
static DEFINE_MUTEX(lzma_resize_mutex); static DEFINE_MUTEX(lzma_resize_mutex);
...@@ -147,26 +146,28 @@ int z_erofs_load_lzma_config(struct super_block *sb, ...@@ -147,26 +146,28 @@ int z_erofs_load_lzma_config(struct super_block *sb,
return err; return err;
} }
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pgpl) struct page **pgpl)
{ {
const unsigned int nrpages_out = struct super_block *sb = rq->sb;
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; struct z_erofs_stream_dctx dctx = {
const unsigned int nrpages_in = .rq = rq,
PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
unsigned int inlen, outlen, pageofs; .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
>> PAGE_SHIFT,
.no = -1, .ni = 0,
};
struct xz_buf buf = {};
struct z_erofs_lzma *strm; struct z_erofs_lzma *strm;
u8 *kin; enum xz_ret xz_err;
bool bounced = false; int err;
int no, ni, j, err = 0;
/* 1. get the exact LZMA compressed size */ /* 1. get the exact LZMA compressed size */
kin = kmap(*rq->in); dctx.kin = kmap_local_page(*rq->in);
err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
min_t(unsigned int, rq->inputsize, min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
rq->sb->s_blocksize - rq->pageofs_in));
if (err) { if (err) {
kunmap(*rq->in); kunmap_local(dctx.kin);
return err; return err;
} }
...@@ -183,108 +184,45 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, ...@@ -183,108 +184,45 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
spin_unlock(&z_erofs_lzma_lock); spin_unlock(&z_erofs_lzma_lock);
/* 3. multi-call decompress */ /* 3. multi-call decompress */
inlen = rq->inputsize; xz_dec_microlzma_reset(strm->state, rq->inputsize, rq->outputsize,
outlen = rq->outputsize;
xz_dec_microlzma_reset(strm->state, inlen, outlen,
!rq->partial_decoding); !rq->partial_decoding);
pageofs = rq->pageofs_out; buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
strm->buf.in = kin + rq->pageofs_in; rq->inputsize -= buf.in_size;
strm->buf.in_pos = 0; buf.in = dctx.kin + rq->pageofs_in,
strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in); dctx.bounce = strm->bounce;
inlen -= strm->buf.in_size; do {
strm->buf.out = NULL; dctx.avail_out = buf.out_size - buf.out_pos;
strm->buf.out_pos = 0; dctx.inbuf_sz = buf.in_size;
strm->buf.out_size = 0; dctx.inbuf_pos = buf.in_pos;
err = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out,
for (ni = 0, no = -1;;) { (void **)&buf.in, pgpl);
enum xz_ret xz_err; if (err)
break;
if (strm->buf.out_pos == strm->buf.out_size) {
if (strm->buf.out) {
kunmap(rq->out[no]);
strm->buf.out = NULL;
}
if (++no >= nrpages_out || !outlen) {
erofs_err(rq->sb, "decompressed buf out of bound");
err = -EFSCORRUPTED;
break;
}
strm->buf.out_pos = 0;
strm->buf.out_size = min_t(u32, outlen,
PAGE_SIZE - pageofs);
outlen -= strm->buf.out_size;
if (!rq->out[no] && rq->fillgaps) { /* deduped */
rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
if (!rq->out[no]) {
err = -ENOMEM;
break;
}
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
if (rq->out[no])
strm->buf.out = kmap(rq->out[no]) + pageofs;
pageofs = 0;
} else if (strm->buf.in_pos == strm->buf.in_size) {
kunmap(rq->in[ni]);
if (++ni >= nrpages_in || !inlen) {
erofs_err(rq->sb, "compressed buf out of bound");
err = -EFSCORRUPTED;
break;
}
strm->buf.in_pos = 0;
strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE);
inlen -= strm->buf.in_size;
kin = kmap(rq->in[ni]);
strm->buf.in = kin;
bounced = false;
}
/* if (buf.out_size == buf.out_pos) {
* Handle overlapping: Use bounced buffer if the compressed buf.out_size = dctx.avail_out;
* data is under processing; Otherwise, Use short-lived pages buf.out_pos = 0;
* from the on-stack pagepool where pages share with the same
* request.
*/
if (!bounced && rq->out[no] == rq->in[ni]) {
memcpy(strm->bounce, strm->buf.in, strm->buf.in_size);
strm->buf.in = strm->bounce;
bounced = true;
} }
for (j = ni + 1; j < nrpages_in; ++j) { buf.in_size = dctx.inbuf_sz;
struct page *tmppage; buf.in_pos = dctx.inbuf_pos;
if (rq->out[no] != rq->in[j]) xz_err = xz_dec_microlzma_run(strm->state, &buf);
continue; DBG_BUGON(buf.out_pos > buf.out_size);
tmppage = erofs_allocpage(pgpl, rq->gfp); DBG_BUGON(buf.in_pos > buf.in_size);
if (!tmppage) {
err = -ENOMEM;
goto failed;
}
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
}
xz_err = xz_dec_microlzma_run(strm->state, &strm->buf);
DBG_BUGON(strm->buf.out_pos > strm->buf.out_size);
DBG_BUGON(strm->buf.in_pos > strm->buf.in_size);
if (xz_err != XZ_OK) { if (xz_err != XZ_OK) {
if (xz_err == XZ_STREAM_END && !outlen) if (xz_err == XZ_STREAM_END && !rq->outputsize)
break; break;
erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]", erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
xz_err, rq->inputsize, rq->outputsize); xz_err, rq->inputsize, rq->outputsize);
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
break; break;
} }
} } while (1);
failed:
if (no < nrpages_out && strm->buf.out) if (dctx.kout)
kunmap(rq->out[no]); kunmap_local(dctx.kout);
if (ni < nrpages_in) kunmap_local(dctx.kin);
kunmap(rq->in[ni]);
/* 4. push back LZMA stream context to the global list */ /* 4. push back LZMA stream context to the global list */
spin_lock(&z_erofs_lzma_lock); spin_lock(&z_erofs_lzma_lock);
strm->next = z_erofs_lzma_head; strm->next = z_erofs_lzma_head;
...@@ -293,3 +231,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, ...@@ -293,3 +231,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
wake_up(&z_erofs_lzma_wq); wake_up(&z_erofs_lzma_wq);
return err; return err;
} }
const struct z_erofs_decompressor z_erofs_lzma_decomp = {
.config = z_erofs_load_lzma_config,
.decompress = z_erofs_lzma_decompress,
.init = z_erofs_lzma_init,
.exit = z_erofs_lzma_exit,
.name = "lzma"
};
...@@ -34,7 +34,7 @@ static struct z_erofs_zstd *z_erofs_isolate_strms(bool all) ...@@ -34,7 +34,7 @@ static struct z_erofs_zstd *z_erofs_isolate_strms(bool all)
return strm; return strm;
} }
void z_erofs_zstd_exit(void) static void z_erofs_zstd_exit(void)
{ {
while (z_erofs_zstd_avail_strms) { while (z_erofs_zstd_avail_strms) {
struct z_erofs_zstd *strm, *n; struct z_erofs_zstd *strm, *n;
...@@ -49,7 +49,7 @@ void z_erofs_zstd_exit(void) ...@@ -49,7 +49,7 @@ void z_erofs_zstd_exit(void)
} }
} }
int __init z_erofs_zstd_init(void) static int __init z_erofs_zstd_init(void)
{ {
/* by default, use # of possible CPUs instead */ /* by default, use # of possible CPUs instead */
if (!z_erofs_zstd_nstrms) if (!z_erofs_zstd_nstrms)
...@@ -72,7 +72,7 @@ int __init z_erofs_zstd_init(void) ...@@ -72,7 +72,7 @@ int __init z_erofs_zstd_init(void)
return 0; return 0;
} }
int z_erofs_load_zstd_config(struct super_block *sb, static int z_erofs_load_zstd_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size) struct erofs_super_block *dsb, void *data, int size)
{ {
static DEFINE_MUTEX(zstd_resize_mutex); static DEFINE_MUTEX(zstd_resize_mutex);
...@@ -135,30 +135,29 @@ int z_erofs_load_zstd_config(struct super_block *sb, ...@@ -135,30 +135,29 @@ int z_erofs_load_zstd_config(struct super_block *sb,
return strm ? -ENOMEM : 0; return strm ? -ENOMEM : 0;
} }
int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
struct page **pgpl) struct page **pgpl)
{ {
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
const unsigned int nrpages_in =
PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
zstd_dstream *stream;
struct super_block *sb = rq->sb; struct super_block *sb = rq->sb;
unsigned int insz, outsz, pofs; struct z_erofs_stream_dctx dctx = {
struct z_erofs_zstd *strm; .rq = rq,
.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT,
.outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize)
>> PAGE_SHIFT,
.no = -1, .ni = 0,
};
zstd_in_buffer in_buf = { NULL, 0, 0 }; zstd_in_buffer in_buf = { NULL, 0, 0 };
zstd_out_buffer out_buf = { NULL, 0, 0 }; zstd_out_buffer out_buf = { NULL, 0, 0 };
u8 *kin, *kout = NULL; struct z_erofs_zstd *strm;
bool bounced = false; zstd_dstream *stream;
int no = -1, ni = 0, j = 0, zerr, err; int zerr, err;
/* 1. get the exact compressed size */ /* 1. get the exact compressed size */
kin = kmap_local_page(*rq->in); dctx.kin = kmap_local_page(*rq->in);
err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
min_t(unsigned int, rq->inputsize, min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
sb->s_blocksize - rq->pageofs_in));
if (err) { if (err) {
kunmap_local(kin); kunmap_local(dctx.kin);
return err; return err;
} }
...@@ -166,109 +165,48 @@ int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, ...@@ -166,109 +165,48 @@ int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
strm = z_erofs_isolate_strms(false); strm = z_erofs_isolate_strms(false);
/* 3. multi-call decompress */ /* 3. multi-call decompress */
insz = rq->inputsize;
outsz = rq->outputsize;
stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz); stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz);
if (!stream) { if (!stream) {
err = -EIO; err = -EIO;
goto failed_zinit; goto failed_zinit;
} }
pofs = rq->pageofs_out; rq->fillgaps = true; /* ZSTD doesn't support NULL output buffer */
in_buf.size = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in);
insz -= in_buf.size; rq->inputsize -= in_buf.size;
in_buf.src = kin + rq->pageofs_in; in_buf.src = dctx.kin + rq->pageofs_in;
dctx.bounce = strm->bounce;
do { do {
if (out_buf.size == out_buf.pos) { dctx.avail_out = out_buf.size - out_buf.pos;
if (++no >= nrpages_out || !outsz) { dctx.inbuf_sz = in_buf.size;
erofs_err(sb, "insufficient space for decompressed data"); dctx.inbuf_pos = in_buf.pos;
err = -EFSCORRUPTED; err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst,
break; (void **)&in_buf.src, pgpl);
} if (err)
break;
if (kout) if (out_buf.size == out_buf.pos) {
kunmap_local(kout); out_buf.size = dctx.avail_out;
out_buf.size = min_t(u32, outsz, PAGE_SIZE - pofs);
outsz -= out_buf.size;
if (!rq->out[no]) {
rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
if (!rq->out[no]) {
kout = NULL;
err = -ENOMEM;
break;
}
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
kout = kmap_local_page(rq->out[no]);
out_buf.dst = kout + pofs;
out_buf.pos = 0; out_buf.pos = 0;
pofs = 0;
}
if (in_buf.size == in_buf.pos && insz) {
if (++ni >= nrpages_in) {
erofs_err(sb, "invalid compressed data");
err = -EFSCORRUPTED;
break;
}
if (kout) /* unlike kmap(), take care of the orders */
kunmap_local(kout);
kunmap_local(kin);
in_buf.size = min_t(u32, insz, PAGE_SIZE);
insz -= in_buf.size;
kin = kmap_local_page(rq->in[ni]);
in_buf.src = kin;
in_buf.pos = 0;
bounced = false;
if (kout) {
j = (u8 *)out_buf.dst - kout;
kout = kmap_local_page(rq->out[no]);
out_buf.dst = kout + j;
}
} }
in_buf.size = dctx.inbuf_sz;
in_buf.pos = dctx.inbuf_pos;
/*
* Handle overlapping: Use bounced buffer if the compressed
* data is under processing; Or use short-lived pages from the
* on-stack pagepool where pages share among the same request
* and not _all_ inplace I/O pages are needed to be doubled.
*/
if (!bounced && rq->out[no] == rq->in[ni]) {
memcpy(strm->bounce, in_buf.src, in_buf.size);
in_buf.src = strm->bounce;
bounced = true;
}
for (j = ni + 1; j < nrpages_in; ++j) {
struct page *tmppage;
if (rq->out[no] != rq->in[j])
continue;
tmppage = erofs_allocpage(pgpl, rq->gfp);
if (!tmppage) {
err = -ENOMEM;
goto failed;
}
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
}
zerr = zstd_decompress_stream(stream, &out_buf, &in_buf); zerr = zstd_decompress_stream(stream, &out_buf, &in_buf);
if (zstd_is_error(zerr) || (!zerr && outsz)) { if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) {
erofs_err(sb, "failed to decompress in[%u] out[%u]: %s", erofs_err(sb, "failed to decompress in[%u] out[%u]: %s",
rq->inputsize, rq->outputsize, rq->inputsize, rq->outputsize,
zerr ? zstd_get_error_name(zerr) : "unexpected end of stream"); zerr ? zstd_get_error_name(zerr) : "unexpected end of stream");
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
break; break;
} }
} while (outsz || out_buf.pos < out_buf.size); } while (rq->outputsize || out_buf.pos < out_buf.size);
failed:
if (kout) if (dctx.kout)
kunmap_local(kout); kunmap_local(dctx.kout);
failed_zinit: failed_zinit:
kunmap_local(kin); kunmap_local(dctx.kin);
/* 4. push back ZSTD stream context to the global list */ /* 4. push back ZSTD stream context to the global list */
spin_lock(&z_erofs_zstd_lock); spin_lock(&z_erofs_zstd_lock);
strm->next = z_erofs_zstd_head; strm->next = z_erofs_zstd_head;
...@@ -277,3 +215,11 @@ int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, ...@@ -277,3 +215,11 @@ int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
wake_up(&z_erofs_zstd_wq); wake_up(&z_erofs_zstd_wq);
return err; return err;
} }
const struct z_erofs_decompressor z_erofs_zstd_decomp = {
.config = z_erofs_load_zstd_config,
.decompress = z_erofs_zstd_decompress,
.init = z_erofs_zstd_init,
.exit = z_erofs_zstd_exit,
.name = "zstd",
};
...@@ -312,17 +312,13 @@ static inline unsigned int erofs_inode_datalayout(unsigned int ifmt) ...@@ -312,17 +312,13 @@ static inline unsigned int erofs_inode_datalayout(unsigned int ifmt)
return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK; return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK;
} }
/* /* reclaiming is never triggered when allocating new folios. */
* Different from grab_cache_page_nowait(), reclaiming is never triggered static inline struct folio *erofs_grab_folio_nowait(struct address_space *as,
* when allocating new pages. pgoff_t index)
*/
static inline
struct page *erofs_grab_cache_page_nowait(struct address_space *mapping,
pgoff_t index)
{ {
return pagecache_get_page(mapping, index, return __filemap_get_folio(as, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
readahead_gfp_mask(mapping) & ~__GFP_RECLAIM); readahead_gfp_mask(as) & ~__GFP_RECLAIM);
} }
/* Has a disk mapping */ /* Has a disk mapping */
...@@ -458,8 +454,8 @@ void erofs_shrinker_register(struct super_block *sb); ...@@ -458,8 +454,8 @@ void erofs_shrinker_register(struct super_block *sb);
void erofs_shrinker_unregister(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb);
int __init erofs_init_shrinker(void); int __init erofs_init_shrinker(void);
void erofs_exit_shrinker(void); void erofs_exit_shrinker(void);
int __init z_erofs_init_zip_subsystem(void); int __init z_erofs_init_subsystem(void);
void z_erofs_exit_zip_subsystem(void); void z_erofs_exit_subsystem(void);
int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp); struct erofs_workgroup *egrp);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
...@@ -476,37 +472,11 @@ static inline void erofs_shrinker_register(struct super_block *sb) {} ...@@ -476,37 +472,11 @@ static inline void erofs_shrinker_register(struct super_block *sb) {}
static inline void erofs_shrinker_unregister(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {}
static inline int erofs_init_shrinker(void) { return 0; } static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {} static inline void erofs_exit_shrinker(void) {}
static inline int z_erofs_init_zip_subsystem(void) { return 0; } static inline int z_erofs_init_subsystem(void) { return 0; }
static inline void z_erofs_exit_zip_subsystem(void) {} static inline void z_erofs_exit_subsystem(void) {}
static inline int z_erofs_gbuf_init(void) { return 0; }
static inline void z_erofs_gbuf_exit(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */ #endif /* !CONFIG_EROFS_FS_ZIP */
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
int __init z_erofs_lzma_init(void);
void z_erofs_lzma_exit(void);
#else
static inline int z_erofs_lzma_init(void) { return 0; }
static inline int z_erofs_lzma_exit(void) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP_LZMA */
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
int __init z_erofs_deflate_init(void);
void z_erofs_deflate_exit(void);
#else
static inline int z_erofs_deflate_init(void) { return 0; }
static inline int z_erofs_deflate_exit(void) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */
#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
int __init z_erofs_zstd_init(void);
void z_erofs_zstd_exit(void);
#else
static inline int z_erofs_zstd_init(void) { return 0; }
static inline int z_erofs_zstd_exit(void) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP_ZSTD */
#ifdef CONFIG_EROFS_FS_ONDEMAND #ifdef CONFIG_EROFS_FS_ONDEMAND
int erofs_fscache_register_fs(struct super_block *sb); int erofs_fscache_register_fs(struct super_block *sb);
void erofs_fscache_unregister_fs(struct super_block *sb); void erofs_fscache_unregister_fs(struct super_block *sb);
......
...@@ -849,23 +849,7 @@ static int __init erofs_module_init(void) ...@@ -849,23 +849,7 @@ static int __init erofs_module_init(void)
if (err) if (err)
goto shrinker_err; goto shrinker_err;
err = z_erofs_lzma_init(); err = z_erofs_init_subsystem();
if (err)
goto lzma_err;
err = z_erofs_deflate_init();
if (err)
goto deflate_err;
err = z_erofs_zstd_init();
if (err)
goto zstd_err;
err = z_erofs_gbuf_init();
if (err)
goto gbuf_err;
err = z_erofs_init_zip_subsystem();
if (err) if (err)
goto zip_err; goto zip_err;
...@@ -882,16 +866,8 @@ static int __init erofs_module_init(void) ...@@ -882,16 +866,8 @@ static int __init erofs_module_init(void)
fs_err: fs_err:
erofs_exit_sysfs(); erofs_exit_sysfs();
sysfs_err: sysfs_err:
z_erofs_exit_zip_subsystem(); z_erofs_exit_subsystem();
zip_err: zip_err:
z_erofs_gbuf_exit();
gbuf_err:
z_erofs_zstd_exit();
zstd_err:
z_erofs_deflate_exit();
deflate_err:
z_erofs_lzma_exit();
lzma_err:
erofs_exit_shrinker(); erofs_exit_shrinker();
shrinker_err: shrinker_err:
kmem_cache_destroy(erofs_inode_cachep); kmem_cache_destroy(erofs_inode_cachep);
...@@ -906,13 +882,9 @@ static void __exit erofs_module_exit(void) ...@@ -906,13 +882,9 @@ static void __exit erofs_module_exit(void)
rcu_barrier(); rcu_barrier();
erofs_exit_sysfs(); erofs_exit_sysfs();
z_erofs_exit_zip_subsystem(); z_erofs_exit_subsystem();
z_erofs_zstd_exit();
z_erofs_deflate_exit();
z_erofs_lzma_exit();
erofs_exit_shrinker(); erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep); kmem_cache_destroy(erofs_inode_cachep);
z_erofs_gbuf_exit();
} }
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
......
...@@ -19,10 +19,7 @@ ...@@ -19,10 +19,7 @@
typedef void *z_erofs_next_pcluster_t; typedef void *z_erofs_next_pcluster_t;
struct z_erofs_bvec { struct z_erofs_bvec {
union { struct page *page;
struct page *page;
struct folio *folio;
};
int offset; int offset;
unsigned int end; unsigned int end;
}; };
...@@ -449,44 +446,51 @@ static inline int erofs_cpu_hotplug_init(void) { return 0; } ...@@ -449,44 +446,51 @@ static inline int erofs_cpu_hotplug_init(void) { return 0; }
static inline void erofs_cpu_hotplug_destroy(void) {} static inline void erofs_cpu_hotplug_destroy(void) {}
#endif #endif
void z_erofs_exit_zip_subsystem(void) void z_erofs_exit_subsystem(void)
{ {
erofs_cpu_hotplug_destroy(); erofs_cpu_hotplug_destroy();
erofs_destroy_percpu_workers(); erofs_destroy_percpu_workers();
destroy_workqueue(z_erofs_workqueue); destroy_workqueue(z_erofs_workqueue);
z_erofs_destroy_pcluster_pool(); z_erofs_destroy_pcluster_pool();
z_erofs_exit_decompressor();
} }
int __init z_erofs_init_zip_subsystem(void) int __init z_erofs_init_subsystem(void)
{ {
int err = z_erofs_create_pcluster_pool(); int err = z_erofs_init_decompressor();
if (err) if (err)
goto out_error_pcluster_pool; goto err_decompressor;
err = z_erofs_create_pcluster_pool();
if (err)
goto err_pcluster_pool;
z_erofs_workqueue = alloc_workqueue("erofs_worker", z_erofs_workqueue = alloc_workqueue("erofs_worker",
WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus());
if (!z_erofs_workqueue) { if (!z_erofs_workqueue) {
err = -ENOMEM; err = -ENOMEM;
goto out_error_workqueue_init; goto err_workqueue_init;
} }
err = erofs_init_percpu_workers(); err = erofs_init_percpu_workers();
if (err) if (err)
goto out_error_pcpu_worker; goto err_pcpu_worker;
err = erofs_cpu_hotplug_init(); err = erofs_cpu_hotplug_init();
if (err < 0) if (err < 0)
goto out_error_cpuhp_init; goto err_cpuhp_init;
return err; return err;
out_error_cpuhp_init: err_cpuhp_init:
erofs_destroy_percpu_workers(); erofs_destroy_percpu_workers();
out_error_pcpu_worker: err_pcpu_worker:
destroy_workqueue(z_erofs_workqueue); destroy_workqueue(z_erofs_workqueue);
out_error_workqueue_init: err_workqueue_init:
z_erofs_destroy_pcluster_pool(); z_erofs_destroy_pcluster_pool();
out_error_pcluster_pool: err_pcluster_pool:
z_erofs_exit_decompressor();
err_decompressor:
return err; return err;
} }
...@@ -617,32 +621,31 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) ...@@ -617,32 +621,31 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} }
/* called by erofs_shrinker to get rid of all cached compressed bvecs */ /* (erofs_shrinker) disconnect cached encoded data with pclusters */
int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp) struct erofs_workgroup *grp)
{ {
struct z_erofs_pcluster *const pcl = struct z_erofs_pcluster *const pcl =
container_of(grp, struct z_erofs_pcluster, obj); container_of(grp, struct z_erofs_pcluster, obj);
unsigned int pclusterpages = z_erofs_pclusterpages(pcl); unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
struct folio *folio;
int i; int i;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
/* There is no actice user since the pcluster is now freezed */ /* Each cached folio contains one page unless bs > ps is supported */
for (i = 0; i < pclusterpages; ++i) { for (i = 0; i < pclusterpages; ++i) {
struct folio *folio = pcl->compressed_bvecs[i].folio; if (pcl->compressed_bvecs[i].page) {
folio = page_folio(pcl->compressed_bvecs[i].page);
if (!folio) /* Avoid reclaiming or migrating this folio */
continue; if (!folio_trylock(folio))
return -EBUSY;
/* Avoid reclaiming or migrating this folio */ if (!erofs_folio_is_managed(sbi, folio))
if (!folio_trylock(folio)) continue;
return -EBUSY; pcl->compressed_bvecs[i].page = NULL;
folio_detach_private(folio);
if (!erofs_folio_is_managed(sbi, folio)) folio_unlock(folio);
continue; }
pcl->compressed_bvecs[i].folio = NULL;
folio_detach_private(folio);
folio_unlock(folio);
} }
return 0; return 0;
} }
...@@ -650,9 +653,9 @@ int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, ...@@ -650,9 +653,9 @@ int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{ {
struct z_erofs_pcluster *pcl = folio_get_private(folio); struct z_erofs_pcluster *pcl = folio_get_private(folio);
unsigned int pclusterpages = z_erofs_pclusterpages(pcl); struct z_erofs_bvec *bvec = pcl->compressed_bvecs;
struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl);
bool ret; bool ret;
int i;
if (!folio_test_private(folio)) if (!folio_test_private(folio))
return true; return true;
...@@ -661,9 +664,9 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) ...@@ -661,9 +664,9 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
spin_lock(&pcl->obj.lockref.lock); spin_lock(&pcl->obj.lockref.lock);
if (pcl->obj.lockref.count <= 0) { if (pcl->obj.lockref.count <= 0) {
DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
for (i = 0; i < pclusterpages; ++i) { for (; bvec < end; ++bvec) {
if (pcl->compressed_bvecs[i].folio == folio) { if (bvec->page && page_folio(bvec->page) == folio) {
pcl->compressed_bvecs[i].folio = NULL; bvec->page = NULL;
folio_detach_private(folio); folio_detach_private(folio);
ret = true; ret = true;
break; break;
...@@ -925,7 +928,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe) ...@@ -925,7 +928,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
fe->pcl = NULL; fe->pcl = NULL;
} }
static int z_erofs_read_fragment(struct super_block *sb, struct page *page, static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio,
unsigned int cur, unsigned int end, erofs_off_t pos) unsigned int cur, unsigned int end, erofs_off_t pos)
{ {
struct inode *packed_inode = EROFS_SB(sb)->packed_inode; struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
...@@ -938,113 +941,109 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, ...@@ -938,113 +941,109 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
buf.mapping = packed_inode->i_mapping; buf.mapping = packed_inode->i_mapping;
for (; cur < end; cur += cnt, pos += cnt) { for (; cur < end; cur += cnt, pos += cnt) {
cnt = min_t(unsigned int, end - cur, cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
sb->s_blocksize - erofs_blkoff(sb, pos));
src = erofs_bread(&buf, pos, EROFS_KMAP); src = erofs_bread(&buf, pos, EROFS_KMAP);
if (IS_ERR(src)) { if (IS_ERR(src)) {
erofs_put_metabuf(&buf); erofs_put_metabuf(&buf);
return PTR_ERR(src); return PTR_ERR(src);
} }
memcpy_to_page(page, cur, src, cnt); memcpy_to_folio(folio, cur, src, cnt);
} }
erofs_put_metabuf(&buf); erofs_put_metabuf(&buf);
return 0; return 0;
} }
static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *fe, static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
struct folio *folio, bool ra) struct folio *folio, bool ra)
{ {
struct inode *const inode = fe->inode; struct inode *const inode = f->inode;
struct erofs_map_blocks *const map = &fe->map; struct erofs_map_blocks *const map = &f->map;
const loff_t offset = folio_pos(folio); const loff_t offset = folio_pos(folio);
const unsigned int bs = i_blocksize(inode), fs = folio_size(folio); const unsigned int bs = i_blocksize(inode);
bool tight = true, exclusive; unsigned int end = folio_size(folio), split = 0, cur, pgs;
unsigned int cur, end, len, split; bool tight, excl;
int err = 0; int err = 0;
tight = (bs == PAGE_SIZE);
z_erofs_onlinefolio_init(folio); z_erofs_onlinefolio_init(folio);
split = 0; do {
end = fs; if (offset + end - 1 < map->m_la ||
repeat: offset + end - 1 >= map->m_la + map->m_llen) {
if (offset + end - 1 < map->m_la || z_erofs_pcluster_end(f);
offset + end - 1 >= map->m_la + map->m_llen) { map->m_la = offset + end - 1;
z_erofs_pcluster_end(fe); map->m_llen = 0;
map->m_la = offset + end - 1; err = z_erofs_map_blocks_iter(inode, map, 0);
map->m_llen = 0; if (err)
err = z_erofs_map_blocks_iter(inode, map, 0); break;
if (err) }
goto out;
}
cur = offset > map->m_la ? 0 : map->m_la - offset;
/* bump split parts first to avoid several separate cases */
++split;
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
folio_zero_segment(folio, cur, end);
tight = false;
goto next_part;
}
if (map->m_flags & EROFS_MAP_FRAGMENT) {
erofs_off_t fpos = offset + cur - map->m_la;
len = min_t(unsigned int, map->m_llen - fpos, end - cur); cur = offset > map->m_la ? 0 : map->m_la - offset;
err = z_erofs_read_fragment(inode->i_sb, &folio->page, cur, pgs = round_down(cur, PAGE_SIZE);
cur + len, EROFS_I(inode)->z_fragmentoff + fpos); /* bump split parts first to avoid several separate cases */
if (err) ++split;
goto out;
tight = false; if (!(map->m_flags & EROFS_MAP_MAPPED)) {
goto next_part; folio_zero_segment(folio, cur, end);
} tight = false;
} else if (map->m_flags & EROFS_MAP_FRAGMENT) {
erofs_off_t fpos = offset + cur - map->m_la;
err = z_erofs_read_fragment(inode->i_sb, folio, cur,
cur + min(map->m_llen - fpos, end - cur),
EROFS_I(inode)->z_fragmentoff + fpos);
if (err)
break;
tight = false;
} else {
if (!f->pcl) {
err = z_erofs_pcluster_begin(f);
if (err)
break;
f->pcl->besteffort |= !ra;
}
if (!fe->pcl) { pgs = round_down(end - 1, PAGE_SIZE);
err = z_erofs_pcluster_begin(fe); /*
if (err) * Ensure this partial page belongs to this submit chain
goto out; * rather than other concurrent submit chains or
fe->pcl->besteffort |= !ra; * noio(bypass) chains since those chains are handled
} * asynchronously thus it cannot be used for inplace I/O
* or bvpage (should be processed in the strict order.)
*/
tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
excl = false;
if (cur <= pgs) {
excl = (split <= 1) || tight;
cur = pgs;
}
/* err = z_erofs_attach_page(f, &((struct z_erofs_bvec) {
* Ensure the current partial folio belongs to this submit chain rather .page = folio_page(folio, pgs >> PAGE_SHIFT),
* than other concurrent submit chains or the noio(bypass) chain since .offset = offset + pgs - map->m_la,
* those chains are handled asynchronously thus the folio cannot be used .end = end - pgs, }), excl);
* for inplace I/O or bvpage (should be processed in a strict order.) if (err)
*/ break;
tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
exclusive = (!cur && ((split <= 1) || (tight && bs == fs)));
if (cur)
tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
.page = &folio->page,
.offset = offset - map->m_la,
.end = end,
}), exclusive);
if (err)
goto out;
z_erofs_onlinefolio_split(folio);
if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
fe->pcl->multibases = true;
if (fe->pcl->length < offset + end - map->m_la) {
fe->pcl->length = offset + end - map->m_la;
fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
}
if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
!(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
fe->pcl->length == map->m_llen)
fe->pcl->partial = false;
next_part:
/* shorten the remaining extent to update progress */
map->m_llen = offset + cur - map->m_la;
map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
end = cur;
if (end > 0)
goto repeat;
out: z_erofs_onlinefolio_split(folio);
if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
f->pcl->multibases = true;
if (f->pcl->length < offset + end - map->m_la) {
f->pcl->length = offset + end - map->m_la;
f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
}
if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
!(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
f->pcl->length == map->m_llen)
f->pcl->partial = false;
}
/* shorten the remaining extent to update progress */
map->m_llen = offset + cur - map->m_la;
map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
if (cur <= pgs) {
split = cur < pgs;
tight = (bs == PAGE_SIZE);
}
} while ((end = cur) > 0);
z_erofs_onlinefolio_end(folio, err); z_erofs_onlinefolio_end(folio, err);
return err; return err;
} }
...@@ -1066,7 +1065,7 @@ static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi, ...@@ -1066,7 +1065,7 @@ static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
static bool z_erofs_page_is_invalidated(struct page *page) static bool z_erofs_page_is_invalidated(struct page *page)
{ {
return !page->mapping && !z_erofs_is_shortlived_page(page); return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
} }
struct z_erofs_decompress_backend { struct z_erofs_decompress_backend {
...@@ -1221,8 +1220,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, ...@@ -1221,8 +1220,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
struct z_erofs_pcluster *pcl = be->pcl; struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl); unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
const struct z_erofs_decompressor *decomp = const struct z_erofs_decompressor *decomp =
&erofs_decompressors[pcl->algorithmformat]; z_erofs_decomp[pcl->algorithmformat];
int i, err2; int i, j, jtop, err2;
struct page *page; struct page *page;
bool overlapped; bool overlapped;
...@@ -1280,10 +1279,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, ...@@ -1280,10 +1279,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
put_page(page); put_page(page);
} else { } else {
/* managed folios are still left in compressed_bvecs[] */
for (i = 0; i < pclusterpages; ++i) { for (i = 0; i < pclusterpages; ++i) {
/* consider shortlived pages added when decompressing */
page = be->compressed_pages[i]; page = be->compressed_pages[i];
if (!page || if (!page ||
erofs_folio_is_managed(sbi, page_folio(page))) erofs_folio_is_managed(sbi, page_folio(page)))
continue; continue;
...@@ -1294,21 +1292,31 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, ...@@ -1294,21 +1292,31 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
if (be->compressed_pages < be->onstack_pages || if (be->compressed_pages < be->onstack_pages ||
be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
kvfree(be->compressed_pages); kvfree(be->compressed_pages);
z_erofs_fill_other_copies(be, err);
jtop = 0;
z_erofs_fill_other_copies(be, err);
for (i = 0; i < be->nr_pages; ++i) { for (i = 0; i < be->nr_pages; ++i) {
page = be->decompressed_pages[i]; page = be->decompressed_pages[i];
if (!page) if (!page)
continue; continue;
DBG_BUGON(z_erofs_page_is_invalidated(page)); DBG_BUGON(z_erofs_page_is_invalidated(page));
if (!z_erofs_is_shortlived_page(page)) {
/* recycle all individual short-lived pages */ z_erofs_onlinefolio_end(page_folio(page), err);
if (z_erofs_put_shortlivedpage(be->pagepool, page))
continue; continue;
z_erofs_onlinefolio_end(page_folio(page), err); }
if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) {
erofs_pagepool_add(be->pagepool, page);
continue;
}
for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j)
;
if (j >= jtop) /* this bounce page is newly detected */
be->decompressed_pages[jtop++] = page;
} }
while (jtop)
erofs_pagepool_add(be->pagepool,
be->decompressed_pages[--jtop]);
if (be->decompressed_pages != be->onstack_pages) if (be->decompressed_pages != be->onstack_pages)
kvfree(be->decompressed_pages); kvfree(be->decompressed_pages);
...@@ -1419,7 +1427,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, ...@@ -1419,7 +1427,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
bool tocache = false; bool tocache = false;
struct z_erofs_bvec zbv; struct z_erofs_bvec zbv;
struct address_space *mapping; struct address_space *mapping;
struct page *page; struct folio *folio;
int bs = i_blocksize(f->inode); int bs = i_blocksize(f->inode);
/* Except for inplace folios, the entire folio can be used for I/Os */ /* Except for inplace folios, the entire folio can be used for I/Os */
...@@ -1429,23 +1437,25 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, ...@@ -1429,23 +1437,25 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
spin_lock(&pcl->obj.lockref.lock); spin_lock(&pcl->obj.lockref.lock);
zbv = pcl->compressed_bvecs[nr]; zbv = pcl->compressed_bvecs[nr];
spin_unlock(&pcl->obj.lockref.lock); spin_unlock(&pcl->obj.lockref.lock);
if (!zbv.folio) if (!zbv.page)
goto out_allocfolio; goto out_allocfolio;
bvec->bv_page = &zbv.folio->page; bvec->bv_page = zbv.page;
DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page)); DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page));
folio = page_folio(zbv.page);
/* /*
* Handle preallocated cached folios. We tried to allocate such folios * Handle preallocated cached folios. We tried to allocate such folios
* without triggering direct reclaim. If allocation failed, inplace * without triggering direct reclaim. If allocation failed, inplace
* file-backed folios will be used instead. * file-backed folios will be used instead.
*/ */
if (zbv.folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) {
zbv.folio->private = 0; folio->private = 0;
tocache = true; tocache = true;
goto out_tocache; goto out_tocache;
} }
mapping = READ_ONCE(zbv.folio->mapping); mapping = READ_ONCE(folio->mapping);
/* /*
* File-backed folios for inplace I/Os are all locked steady, * File-backed folios for inplace I/Os are all locked steady,
* therefore it is impossible for `mapping` to be NULL. * therefore it is impossible for `mapping` to be NULL.
...@@ -1457,21 +1467,21 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, ...@@ -1457,21 +1467,21 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
return; return;
} }
folio_lock(zbv.folio); folio_lock(folio);
if (zbv.folio->mapping == mc) { if (folio->mapping == mc) {
/* /*
* The cached folio is still in managed cache but without * The cached folio is still in managed cache but without
* a valid `->private` pcluster hint. Let's reconnect them. * a valid `->private` pcluster hint. Let's reconnect them.
*/ */
if (!folio_test_private(zbv.folio)) { if (!folio_test_private(folio)) {
folio_attach_private(zbv.folio, pcl); folio_attach_private(folio, pcl);
/* compressed_bvecs[] already takes a ref before */ /* compressed_bvecs[] already takes a ref before */
folio_put(zbv.folio); folio_put(folio);
} }
/* no need to submit if it is already up-to-date */ /* no need to submit if it is already up-to-date */
if (folio_test_uptodate(zbv.folio)) { if (folio_test_uptodate(folio)) {
folio_unlock(zbv.folio); folio_unlock(folio);
bvec->bv_page = NULL; bvec->bv_page = NULL;
} }
return; return;
...@@ -1481,32 +1491,31 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, ...@@ -1481,32 +1491,31 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
* It has been truncated, so it's unsafe to reuse this one. Let's * It has been truncated, so it's unsafe to reuse this one. Let's
* allocate a new page for compressed data. * allocate a new page for compressed data.
*/ */
DBG_BUGON(zbv.folio->mapping); DBG_BUGON(folio->mapping);
tocache = true; tocache = true;
folio_unlock(zbv.folio); folio_unlock(folio);
folio_put(zbv.folio); folio_put(folio);
out_allocfolio: out_allocfolio:
page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
spin_lock(&pcl->obj.lockref.lock); spin_lock(&pcl->obj.lockref.lock);
if (pcl->compressed_bvecs[nr].folio) { if (pcl->compressed_bvecs[nr].page) {
erofs_pagepool_add(&f->pagepool, page); erofs_pagepool_add(&f->pagepool, zbv.page);
spin_unlock(&pcl->obj.lockref.lock); spin_unlock(&pcl->obj.lockref.lock);
cond_resched(); cond_resched();
goto repeat; goto repeat;
} }
pcl->compressed_bvecs[nr].folio = zbv.folio = page_folio(page); bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page;
folio = page_folio(zbv.page);
/* first mark it as a temporary shortlived folio (now 1 ref) */
folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
spin_unlock(&pcl->obj.lockref.lock); spin_unlock(&pcl->obj.lockref.lock);
bvec->bv_page = page;
out_tocache: out_tocache:
if (!tocache || bs != PAGE_SIZE || if (!tocache || bs != PAGE_SIZE ||
filemap_add_folio(mc, zbv.folio, pcl->obj.index + nr, gfp)) { filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp))
/* turn into a temporary shortlived folio (1 ref) */
zbv.folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE;
return; return;
} folio_attach_private(folio, pcl);
folio_attach_private(zbv.folio, pcl);
/* drop a refcount added by allocpage (then 2 refs in total here) */ /* drop a refcount added by allocpage (then 2 refs in total here) */
folio_put(zbv.folio); folio_put(folio);
} }
static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb,
...@@ -1767,7 +1776,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, ...@@ -1767,7 +1776,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
end = round_up(end, PAGE_SIZE); end = round_up(end, PAGE_SIZE);
} else { } else {
end = round_up(map->m_la, PAGE_SIZE); end = round_up(map->m_la, PAGE_SIZE);
if (!map->m_llen) if (!map->m_llen)
return; return;
} }
...@@ -1775,15 +1783,15 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, ...@@ -1775,15 +1783,15 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
cur = map->m_la + map->m_llen - 1; cur = map->m_la + map->m_llen - 1;
while ((cur >= end) && (cur < i_size_read(inode))) { while ((cur >= end) && (cur < i_size_read(inode))) {
pgoff_t index = cur >> PAGE_SHIFT; pgoff_t index = cur >> PAGE_SHIFT;
struct page *page; struct folio *folio;
page = erofs_grab_cache_page_nowait(inode->i_mapping, index); folio = erofs_grab_folio_nowait(inode->i_mapping, index);
if (page) { if (!IS_ERR_OR_NULL(folio)) {
if (PageUptodate(page)) if (folio_test_uptodate(folio))
unlock_page(page); folio_unlock(folio);
else else
z_erofs_scan_folio(f, page_folio(page), !!rac); z_erofs_scan_folio(f, folio, !!rac);
put_page(page); folio_put(folio);
} }
if (cur < PAGE_SIZE) if (cur < PAGE_SIZE)
......
...@@ -686,7 +686,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, ...@@ -686,7 +686,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
struct erofs_inode *const vi = EROFS_I(inode); struct erofs_inode *const vi = EROFS_I(inode);
int err = 0; int err = 0;
trace_z_erofs_map_blocks_iter_enter(inode, map, flags); trace_erofs_map_blocks_enter(inode, map, flags);
/* when trying to read beyond EOF, leave it unmapped */ /* when trying to read beyond EOF, leave it unmapped */
if (map->m_la >= inode->i_size) { if (map->m_la >= inode->i_size) {
...@@ -713,7 +713,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, ...@@ -713,7 +713,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
out: out:
if (err) if (err)
map->m_llen = 0; map->m_llen = 0;
trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); trace_erofs_map_blocks_exit(inode, map, flags, err);
return err; return err;
} }
......
...@@ -143,7 +143,8 @@ TRACE_EVENT(erofs_readpages, ...@@ -143,7 +143,8 @@ TRACE_EVENT(erofs_readpages,
__entry->raw) __entry->raw)
); );
DECLARE_EVENT_CLASS(erofs__map_blocks_enter, TRACE_EVENT(erofs_map_blocks_enter,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags), unsigned int flags),
...@@ -171,21 +172,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter, ...@@ -171,21 +172,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
__entry->flags ? show_map_flags(__entry->flags) : "NULL") __entry->flags ? show_map_flags(__entry->flags) : "NULL")
); );
DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter, TRACE_EVENT(erofs_map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned flags),
TP_ARGS(inode, map, flags)
);
DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags),
TP_ARGS(inode, map, flags)
);
DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags, int ret), unsigned int flags, int ret),
...@@ -223,20 +211,6 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit, ...@@ -223,20 +211,6 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
show_mflags(__entry->mflags), __entry->ret) show_mflags(__entry->mflags), __entry->ret)
); );
DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned flags, int ret),
TP_ARGS(inode, map, flags, ret)
);
DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags, int ret),
TP_ARGS(inode, map, flags, ret)
);
TRACE_EVENT(erofs_destroy_inode, TRACE_EVENT(erofs_destroy_inode,
TP_PROTO(struct inode *inode), TP_PROTO(struct inode *inode),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment