Commit 598162d0 authored by Gao Xiang's avatar Gao Xiang

erofs: support decompress big pcluster for lz4 backend

Prior to big pcluster, there was only one compressed page so it'd
easy to map this. However, when big pcluster is enabled, more work
needs to be done to handle multiple compressed pages. In detail,

 - (maptype 0) if there is only one compressed page + no need
   to copy inplace I/O, just map it directly what we did before;

 - (maptype 1) if there are more compressed pages + no need to
   copy inplace I/O, vmap such compressed pages instead;

 - (maptype 2) if inplace I/O needs to be copied, use per-CPU
   buffers for decompression then.

Another thing is how to detect inplace decompression is feasable or
not (it's still quite easy for non big pclusters), apart from the
inplace margin calculation, inplace I/O page reusing order is also
needed to be considered for each compressed page. Currently, if the
compressed page is the xth page, it shouldn't be reused as [0 ...
nrpages_out - nrpages_in + x], otherwise a full copy will be triggered.

Although there are some extra optimization ideas for this, I'd like
to make big pcluster work correctly first and obviously it can be
further optimized later since it has nothing with the on-disk format
at all.

Link: https://lore.kernel.org/r/20210407043927.10623-10-xiang@kernel.orgAcked-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <hsiangkao@redhat.com>
parent b86269f4
......@@ -120,44 +120,85 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
return kaddr ? 1 : 0;
}
static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
u8 *src, unsigned int pageofs_in)
static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
void *inpage, unsigned int *inputmargin, int *maptype,
bool support_0padding)
{
/*
* if in-place decompression is ongoing, those decompressed
* pages should be copied in order to avoid being overlapped.
*/
struct page **in = rq->in;
u8 *const tmp = erofs_get_pcpubuf(1);
u8 *tmpp = tmp;
unsigned int inlen = rq->inputsize - pageofs_in;
unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
unsigned int nrpages_in, nrpages_out;
unsigned int ofull, oend, inputsize, total, i, j;
struct page **in;
void *src, *tmp;
inputsize = rq->inputsize;
nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
oend = rq->pageofs_out + rq->outputsize;
ofull = PAGE_ALIGN(oend);
nrpages_out = ofull >> PAGE_SHIFT;
if (rq->inplace_io) {
if (rq->partial_decoding || !support_0padding ||
ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
goto docopy;
for (i = 0; i < nrpages_in; ++i) {
DBG_BUGON(rq->in[i] == NULL);
for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
if (rq->out[j] == rq->in[i])
goto docopy;
}
}
while (tmpp < tmp + inlen) {
if (nrpages_in <= 1) {
*maptype = 0;
return inpage;
}
kunmap_atomic(inpage);
might_sleep();
src = erofs_vm_map_ram(rq->in, nrpages_in);
if (!src)
src = kmap_atomic(*in);
memcpy(tmpp, src + pageofs_in, count);
kunmap_atomic(src);
src = NULL;
tmpp += count;
pageofs_in = 0;
count = PAGE_SIZE;
return ERR_PTR(-ENOMEM);
*maptype = 1;
return src;
docopy:
/* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq->in;
src = erofs_get_pcpubuf(nrpages_in);
if (!src) {
DBG_BUGON(1);
kunmap_atomic(inpage);
return ERR_PTR(-EFAULT);
}
tmp = src;
total = rq->inputsize;
while (total) {
unsigned int page_copycnt =
min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
if (!inpage)
inpage = kmap_atomic(*in);
memcpy(tmp, inpage + *inputmargin, page_copycnt);
kunmap_atomic(inpage);
inpage = NULL;
tmp += page_copycnt;
total -= page_copycnt;
++in;
*inputmargin = 0;
}
return tmp;
*maptype = 2;
return src;
}
static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
{
unsigned int inputmargin, inlen;
u8 *src;
bool copied, support_0padding;
int ret;
if (rq->inputsize > PAGE_SIZE)
return -EOPNOTSUPP;
unsigned int inputmargin;
u8 *headpage, *src;
bool support_0padding;
int ret, maptype;
src = kmap_atomic(*rq->in);
DBG_BUGON(*rq->in == NULL);
headpage = kmap_atomic(*rq->in);
inputmargin = 0;
support_0padding = false;
......@@ -165,50 +206,37 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
support_0padding = true;
while (!src[inputmargin & ~PAGE_MASK])
while (!headpage[inputmargin & ~PAGE_MASK])
if (!(++inputmargin & ~PAGE_MASK))
break;
if (inputmargin >= rq->inputsize) {
kunmap_atomic(src);
kunmap_atomic(headpage);
return -EIO;
}
}
copied = false;
inlen = rq->inputsize - inputmargin;
if (rq->inplace_io) {
const uint oend = (rq->pageofs_out +
rq->outputsize) & ~PAGE_MASK;
const uint nr = PAGE_ALIGN(rq->pageofs_out +
rq->outputsize) >> PAGE_SHIFT;
if (rq->partial_decoding || !support_0padding ||
rq->out[nr - 1] != rq->in[0] ||
rq->inputsize - oend <
LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
src = generic_copy_inplace_data(rq, src, inputmargin);
inputmargin = 0;
copied = true;
}
}
rq->inputsize -= inputmargin;
src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
support_0padding);
if (IS_ERR(src))
return PTR_ERR(src);
/* legacy format could compress extra data in a pcluster. */
if (rq->partial_decoding || !support_0padding)
ret = LZ4_decompress_safe_partial(src + inputmargin, out,
inlen, rq->outputsize,
rq->outputsize);
rq->inputsize, rq->outputsize, rq->outputsize);
else
ret = LZ4_decompress_safe(src + inputmargin, out,
inlen, rq->outputsize);
rq->inputsize, rq->outputsize);
if (ret != rq->outputsize) {
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
ret, inlen, inputmargin, rq->outputsize);
ret, rq->inputsize, inputmargin, rq->outputsize);
WARN_ON(1);
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
16, 1, src + inputmargin, inlen, true);
16, 1, src + inputmargin, rq->inputsize, true);
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
16, 1, out, rq->outputsize, true);
......@@ -217,10 +245,16 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
ret = -EIO;
}
if (copied)
erofs_put_pcpubuf(src);
else
if (maptype == 0) {
kunmap_atomic(src);
} else if (maptype == 1) {
vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
} else if (maptype == 2) {
erofs_put_pcpubuf(src);
} else {
DBG_BUGON(1);
return -EFAULT;
}
return ret;
}
......@@ -270,8 +304,10 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
const struct z_erofs_decompressor *alg = decompressors + rq->alg;
unsigned int dst_maptype;
void *dst;
int ret, i;
int ret;
/* two optimized fast paths only for non bigpcluster cases yet */
if (rq->inputsize <= PAGE_SIZE) {
if (nrpages_out == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out);
dst = kmap_atomic(*rq->out);
......@@ -298,29 +334,21 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
erofs_put_pcpubuf(dst);
return ret;
}
}
/* general decoding path which can be used for all cases */
ret = alg->prepare_destpages(rq, pagepool);
if (ret < 0) {
if (ret < 0)
return ret;
} else if (ret) {
if (ret) {
dst = page_address(*rq->out);
dst_maptype = 1;
goto dstmap_out;
}
i = 0;
while (1) {
dst = vm_map_ram(rq->out, nrpages_out, -1);
/* retry two more times (totally 3 times) */
if (dst || ++i >= 3)
break;
vm_unmap_aliases();
}
dst = erofs_vm_map_ram(rq->out, nrpages_out);
if (!dst)
return -ENOMEM;
dst_maptype = 2;
dstmap_out:
......
......@@ -402,6 +402,21 @@ int erofs_namei(struct inode *dir, struct qstr *name,
/* dir.c */
extern const struct file_operations erofs_dir_fops;
static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
{
int retried = 0;
while (1) {
void *p = vm_map_ram(pages, count, -1);
/* retry two more times (totally 3 times) */
if (p || ++retried >= 3)
return p;
vm_unmap_aliases();
}
return NULL;
}
/* pcpubuf.c */
void *erofs_get_pcpubuf(unsigned int requiredpages);
void erofs_put_pcpubuf(void *ptr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment