Commit 42e99bd9 authored by Jiang Liu's avatar Jiang Liu Committed by Greg Kroah-Hartman

zram: optimize memory operations with clear_page()/copy_page()

Some architectures provides architecture-specific, optimized version of
clear_page()/copy_page(), which may have better performance than
memset()/memcpy(). So use clear_page()/copy_page() to optimize zram
performance if possible.
Signed-off-by: default avatarJiang Liu <jiang.liu@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0f0e3ba3
...@@ -128,23 +128,26 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -128,23 +128,26 @@ static void zram_free_page(struct zram *zram, size_t index)
meta->table[index].size = 0; meta->table[index].size = 0;
} }
static inline int is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
static void handle_zero_page(struct bio_vec *bvec) static void handle_zero_page(struct bio_vec *bvec)
{ {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
void *user_mem; void *user_mem;
user_mem = kmap_atomic(page); user_mem = kmap_atomic(page);
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); if (is_partial_io(bvec))
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
else
clear_page(user_mem);
kunmap_atomic(user_mem); kunmap_atomic(user_mem);
flush_dcache_page(page); flush_dcache_page(page);
} }
static inline int is_partial_io(struct bio_vec *bvec)
{
return bvec->bv_len != PAGE_SIZE;
}
static int zram_decompress_page(struct zram *zram, char *mem, u32 index) static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{ {
int ret = LZO_E_OK; int ret = LZO_E_OK;
...@@ -154,13 +157,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) ...@@ -154,13 +157,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
unsigned long handle = meta->table[index].handle; unsigned long handle = meta->table[index].handle;
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
memset(mem, 0, PAGE_SIZE); clear_page(mem);
return 0; return 0;
} }
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (meta->table[index].size == PAGE_SIZE) if (meta->table[index].size == PAGE_SIZE)
memcpy(mem, cmem, PAGE_SIZE); copy_page(mem, cmem);
else else
ret = lzo1x_decompress_safe(cmem, meta->table[index].size, ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
mem, &clen); mem, &clen);
...@@ -309,11 +312,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -309,11 +312,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
} }
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page); src = kmap_atomic(page);
memcpy(cmem, src, clen); copy_page(cmem, src);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
kunmap_atomic(src); kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
}
zs_unmap_object(meta->mem_pool, handle); zs_unmap_object(meta->mem_pool, handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment