Commit b6a76183 authored by Gao Xiang's avatar Gao Xiang Committed by Greg Kroah-Hartman

staging: erofs: integrate decompression inplace

Decompressor needs to know whether it's a partial
or full decompression since only full decompression
can be decompressed in-place.

On kirin980 platform, sequential read is finally
increased to 812MiB/s after decompression inplace
is enabled.
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 88aaf5a7
...@@ -441,6 +441,7 @@ extern const struct address_space_operations z_erofs_vle_normalaccess_aops; ...@@ -441,6 +441,7 @@ extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
*/ */
enum { enum {
BH_Zipped = BH_PrivateStart, BH_Zipped = BH_PrivateStart,
BH_FullMapped,
}; };
/* Has a disk mapping */ /* Has a disk mapping */
...@@ -449,6 +450,8 @@ enum { ...@@ -449,6 +450,8 @@ enum {
#define EROFS_MAP_META (1 << BH_Meta) #define EROFS_MAP_META (1 << BH_Meta)
/* The extent has been compressed */ /* The extent has been compressed */
#define EROFS_MAP_ZIPPED (1 << BH_Zipped) #define EROFS_MAP_ZIPPED (1 << BH_Zipped)
/* The length of extent is full */
#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
struct erofs_map_blocks { struct erofs_map_blocks {
erofs_off_t m_pa, m_la; erofs_off_t m_pa, m_la;
......
...@@ -469,6 +469,9 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f, ...@@ -469,6 +469,9 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
Z_EROFS_VLE_WORKGRP_FMT_LZ4 : Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
Z_EROFS_VLE_WORKGRP_FMT_PLAIN); Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
if (map->m_flags & EROFS_MAP_FULL_MAPPED)
grp->flags |= Z_EROFS_VLE_WORKGRP_FULL_LENGTH;
/* new workgrps have been claimed as type 1 */ /* new workgrps have been claimed as type 1 */
WRITE_ONCE(grp->next, *f->owned_head); WRITE_ONCE(grp->next, *f->owned_head);
/* primary and followed work for all new workgrps */ /* primary and followed work for all new workgrps */
...@@ -901,7 +904,7 @@ static int z_erofs_vle_unzip(struct super_block *sb, ...@@ -901,7 +904,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
unsigned int i, outputsize; unsigned int i, outputsize;
enum z_erofs_page_type page_type; enum z_erofs_page_type page_type;
bool overlapped; bool overlapped, partial;
struct z_erofs_vle_work *work; struct z_erofs_vle_work *work;
int err; int err;
...@@ -1009,10 +1012,13 @@ static int z_erofs_vle_unzip(struct super_block *sb, ...@@ -1009,10 +1012,13 @@ static int z_erofs_vle_unzip(struct super_block *sb,
if (unlikely(err)) if (unlikely(err))
goto out; goto out;
if (nr_pages << PAGE_SHIFT >= work->pageofs + grp->llen) if (nr_pages << PAGE_SHIFT >= work->pageofs + grp->llen) {
outputsize = grp->llen; outputsize = grp->llen;
else partial = !(grp->flags & Z_EROFS_VLE_WORKGRP_FULL_LENGTH);
} else {
outputsize = (nr_pages << PAGE_SHIFT) - work->pageofs; outputsize = (nr_pages << PAGE_SHIFT) - work->pageofs;
partial = true;
}
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN)
algorithm = Z_EROFS_COMPRESSION_SHIFTED; algorithm = Z_EROFS_COMPRESSION_SHIFTED;
...@@ -1028,7 +1034,8 @@ static int z_erofs_vle_unzip(struct super_block *sb, ...@@ -1028,7 +1034,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
.outputsize = outputsize, .outputsize = outputsize,
.alg = algorithm, .alg = algorithm,
.inplace_io = overlapped, .inplace_io = overlapped,
.partial_decoding = true }, page_pool); .partial_decoding = partial
}, page_pool);
out: out:
/* must handle all compressed pages before endding pages */ /* must handle all compressed pages before endding pages */
......
...@@ -46,6 +46,7 @@ struct z_erofs_vle_work { ...@@ -46,6 +46,7 @@ struct z_erofs_vle_work {
#define Z_EROFS_VLE_WORKGRP_FMT_PLAIN 0 #define Z_EROFS_VLE_WORKGRP_FMT_PLAIN 0
#define Z_EROFS_VLE_WORKGRP_FMT_LZ4 1 #define Z_EROFS_VLE_WORKGRP_FMT_LZ4 1
#define Z_EROFS_VLE_WORKGRP_FMT_MASK 1 #define Z_EROFS_VLE_WORKGRP_FMT_MASK 1
#define Z_EROFS_VLE_WORKGRP_FULL_LENGTH 2
typedef void *z_erofs_vle_owned_workgrp_t; typedef void *z_erofs_vle_owned_workgrp_t;
......
...@@ -424,6 +424,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -424,6 +424,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
goto unmap_out; goto unmap_out;
} }
end = (m.lcn << lclusterbits) | m.clusterofs; end = (m.lcn << lclusterbits) | m.clusterofs;
map->m_flags |= EROFS_MAP_FULL_MAPPED;
m.delta[0] = 1; m.delta[0] = 1;
/* fallthrough */ /* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment