Commit d67aee76 authored by Gao Xiang's avatar Gao Xiang

erofs: tidy up z_erofs_lz4_decompress

To prepare for the upcoming ztailpacking feature and further
cleanups, introduce a unique z_erofs_lz4_decompress_ctx to keep
the context, including inpages, outpages and oend, which are
frequently used by the lz4 decompressor.

No logic changes.

Link: https://lore.kernel.org/r/20211228054604.114518-2-hsiangkao@linux.alibaba.comReviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
parent 469407a3
...@@ -16,6 +16,14 @@ ...@@ -16,6 +16,14 @@
#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
#endif #endif
struct z_erofs_lz4_decompress_ctx {
struct z_erofs_decompress_req *rq;
/* # of encoded, decoded pages */
unsigned int inpages, outpages;
/* decoded block total length (used for in-place decompression) */
unsigned int oend;
};
int z_erofs_load_lz4_config(struct super_block *sb, int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb, struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int size) struct z_erofs_lz4_cfgs *lz4, int size)
...@@ -56,11 +64,10 @@ int z_erofs_load_lz4_config(struct super_block *sb, ...@@ -56,11 +64,10 @@ int z_erofs_load_lz4_config(struct super_block *sb,
* Fill all gaps with bounce pages if it's a sparse page list. Also check if * Fill all gaps with bounce pages if it's a sparse page list. Also check if
* all physical pages are consecutive, which can be seen for moderate CR. * all physical pages are consecutive, which can be seen for moderate CR.
*/ */
static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq, static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
struct page **pagepool) struct page **pagepool)
{ {
const unsigned int nr = struct z_erofs_decompress_req *rq = ctx->rq;
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
BITS_PER_LONG)] = { 0 }; BITS_PER_LONG)] = { 0 };
...@@ -70,7 +77,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq, ...@@ -70,7 +77,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
unsigned int i, j, top; unsigned int i, j, top;
top = 0; top = 0;
for (i = j = 0; i < nr; ++i, ++j) { for (i = j = 0; i < ctx->outpages; ++i, ++j) {
struct page *const page = rq->out[i]; struct page *const page = rq->out[i];
struct page *victim; struct page *victim;
...@@ -112,41 +119,36 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq, ...@@ -112,41 +119,36 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
return kaddr ? 1 : 0; return kaddr ? 1 : 0;
} }
static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq, static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
void *inpage, unsigned int *inputmargin, int *maptype, void *inpage, unsigned int *inputmargin, int *maptype,
bool support_0padding) bool support_0padding)
{ {
unsigned int nrpages_in, nrpages_out; struct z_erofs_decompress_req *rq = ctx->rq;
unsigned int ofull, oend, inputsize, total, i, j; unsigned int omargin, total, i, j;
struct page **in; struct page **in;
void *src, *tmp; void *src, *tmp;
inputsize = rq->inputsize;
nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
oend = rq->pageofs_out + rq->outputsize;
ofull = PAGE_ALIGN(oend);
nrpages_out = ofull >> PAGE_SHIFT;
if (rq->inplace_io) { if (rq->inplace_io) {
omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
if (rq->partial_decoding || !support_0padding || if (rq->partial_decoding || !support_0padding ||
ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize)) omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
goto docopy; goto docopy;
for (i = 0; i < nrpages_in; ++i) { for (i = 0; i < ctx->inpages; ++i) {
DBG_BUGON(rq->in[i] == NULL); DBG_BUGON(rq->in[i] == NULL);
for (j = 0; j < nrpages_out - nrpages_in + i; ++j) for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
if (rq->out[j] == rq->in[i]) if (rq->out[j] == rq->in[i])
goto docopy; goto docopy;
} }
} }
if (nrpages_in <= 1) { if (ctx->inpages <= 1) {
*maptype = 0; *maptype = 0;
return inpage; return inpage;
} }
kunmap_atomic(inpage); kunmap_atomic(inpage);
might_sleep(); might_sleep();
src = erofs_vm_map_ram(rq->in, nrpages_in); src = erofs_vm_map_ram(rq->in, ctx->inpages);
if (!src) if (!src)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
*maptype = 1; *maptype = 1;
...@@ -155,7 +157,7 @@ static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq, ...@@ -155,7 +157,7 @@ static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq,
docopy: docopy:
/* Or copy compressed data which can be overlapped to per-CPU buffer */ /* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq->in; in = rq->in;
src = erofs_get_pcpubuf(nrpages_in); src = erofs_get_pcpubuf(ctx->inpages);
if (!src) { if (!src) {
DBG_BUGON(1); DBG_BUGON(1);
kunmap_atomic(inpage); kunmap_atomic(inpage);
...@@ -182,9 +184,10 @@ static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq, ...@@ -182,9 +184,10 @@ static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq,
return src; return src;
} }
static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
u8 *out) u8 *out)
{ {
struct z_erofs_decompress_req *rq = ctx->rq;
unsigned int inputmargin; unsigned int inputmargin;
u8 *headpage, *src; u8 *headpage, *src;
bool support_0padding; bool support_0padding;
...@@ -210,8 +213,8 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, ...@@ -210,8 +213,8 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
} }
rq->inputsize -= inputmargin; rq->inputsize -= inputmargin;
src = z_erofs_lz4_handle_inplace_io(rq, headpage, &inputmargin, src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
&maptype, support_0padding); &maptype, support_0padding);
if (IS_ERR(src)) if (IS_ERR(src))
return PTR_ERR(src); return PTR_ERR(src);
...@@ -240,9 +243,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, ...@@ -240,9 +243,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
} }
if (maptype == 0) { if (maptype == 0) {
kunmap_atomic(src); kunmap_atomic(headpage);
} else if (maptype == 1) { } else if (maptype == 1) {
vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT); vm_unmap_ram(src, ctx->inpages);
} else if (maptype == 2) { } else if (maptype == 2) {
erofs_put_pcpubuf(src); erofs_put_pcpubuf(src);
} else { } else {
...@@ -255,14 +258,18 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, ...@@ -255,14 +258,18 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool) struct page **pagepool)
{ {
const unsigned int nrpages_out = struct z_erofs_lz4_decompress_ctx ctx;
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
unsigned int dst_maptype; unsigned int dst_maptype;
void *dst; void *dst;
int ret; int ret;
ctx.rq = rq;
ctx.oend = rq->pageofs_out + rq->outputsize;
ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
/* one optimized fast path only for non bigpcluster cases yet */ /* one optimized fast path only for non bigpcluster cases yet */
if (rq->inputsize <= PAGE_SIZE && nrpages_out == 1 && !rq->inplace_io) { if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out); DBG_BUGON(!*rq->out);
dst = kmap_atomic(*rq->out); dst = kmap_atomic(*rq->out);
dst_maptype = 0; dst_maptype = 0;
...@@ -270,27 +277,25 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, ...@@ -270,27 +277,25 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
} }
/* general decoding path which can be used for all cases */ /* general decoding path which can be used for all cases */
ret = z_erofs_lz4_prepare_dstpages(rq, pagepool); ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
if (ret < 0) if (ret < 0) {
return ret; return ret;
if (ret) { } else if (ret > 0) {
dst = page_address(*rq->out); dst = page_address(*rq->out);
dst_maptype = 1; dst_maptype = 1;
goto dstmap_out; } else {
dst = erofs_vm_map_ram(rq->out, ctx.outpages);
if (!dst)
return -ENOMEM;
dst_maptype = 2;
} }
dst = erofs_vm_map_ram(rq->out, nrpages_out);
if (!dst)
return -ENOMEM;
dst_maptype = 2;
dstmap_out: dstmap_out:
ret = z_erofs_lz4_decompress_mem(rq, dst + rq->pageofs_out); ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
if (!dst_maptype) if (!dst_maptype)
kunmap_atomic(dst); kunmap_atomic(dst);
else if (dst_maptype == 2) else if (dst_maptype == 2)
vm_unmap_ram(dst, nrpages_out); vm_unmap_ram(dst, ctx.outpages);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment