Commit 8e19d540 authored by zhouxianrong's avatar zhouxianrong Committed by Linus Torvalds

zram: extend zero pages to same element pages

The idea is that without doing more calculations we extend zero pages to
same element pages for zram.  zero page is special case of same element
page with zero element.

1. the test is done under android 7.0
2. startup too many applications circularly
3. sample the zero pages, same pages (none-zero element)
   and total pages in function page_zero_filled

the result is listed as below:

ZERO	SAME	TOTAL
36214	17842	598196

		ZERO/TOTAL	 SAME/TOTAL	  (ZERO+SAME)/TOTAL ZERO/SAME
AVERAGE	0.060631909	 0.024990816  0.085622726		2.663825038
STDEV	0.00674612	 0.005887625  0.009707034		2.115881328
MAX		0.069698422	 0.030046087  0.094975336		7.56043956
MIN		0.03959586	 0.007332205  0.056055193		1.928985507

from the above data, the benefit is about 2.5% and up to 3% of total
swapout pages.

The defect of the patch is that when we recovery a page from non-zero
element the operations are low efficient for partial read.

This patch extends zero_page to same_page so if there is any user to
have monitored zero_pages, he will be surprised if the number is
increased but it's not harmful, I believe.

[minchan@kernel.org: do not free same element pages in zram_meta_free]
  Link: http://lkml.kernel.org/r/20170207065741.GA2567@bbox
Link: http://lkml.kernel.org/r/1483692145-75357-1-git-send-email-zhouxianrong@huawei.com
Link: http://lkml.kernel.org/r/1486307804-27903-1-git-send-email-minchan@kernel.orgSigned-off-by: default avatarzhouxianrong <zhouxianrong@huawei.com>
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 517663ed
...@@ -201,8 +201,8 @@ File /sys/block/zram<id>/mm_stat ...@@ -201,8 +201,8 @@ File /sys/block/zram<id>/mm_stat
The stat file represents device's mm statistics. It consists of a single The stat file represents device's mm statistics. It consists of a single
line of text and contains the following stats separated by whitespace: line of text and contains the following stats separated by whitespace:
orig_data_size uncompressed size of data stored in this disk. orig_data_size uncompressed size of data stored in this disk.
This excludes zero-filled pages (zero_pages) since no This excludes same-element-filled pages (same_pages) since
memory is allocated for them. no memory is allocated for them.
Unit: bytes Unit: bytes
compr_data_size compressed size of data stored in this disk compr_data_size compressed size of data stored in this disk
mem_used_total the amount of memory allocated for this disk. This mem_used_total the amount of memory allocated for this disk. This
...@@ -214,7 +214,7 @@ line of text and contains the following stats separated by whitespace: ...@@ -214,7 +214,7 @@ line of text and contains the following stats separated by whitespace:
the compressed data the compressed data
mem_used_max the maximum amount of memory zram have consumed to mem_used_max the maximum amount of memory zram have consumed to
store the data store the data
zero_pages the number of zero filled pages written to this disk. same_pages the number of same element filled pages written to this disk.
No memory is allocated for such pages. No memory is allocated for such pages.
pages_compacted the number of pages freed during compaction pages_compacted the number of pages freed during compaction
......
...@@ -74,6 +74,17 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index, ...@@ -74,6 +74,17 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index,
meta->table[index].value &= ~BIT(flag); meta->table[index].value &= ~BIT(flag);
} }
static inline void zram_set_element(struct zram_meta *meta, u32 index,
unsigned long element)
{
meta->table[index].element = element;
}
static inline void zram_clear_element(struct zram_meta *meta, u32 index)
{
meta->table[index].element = 0;
}
static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{ {
return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
...@@ -146,31 +157,46 @@ static inline void update_used_max(struct zram *zram, ...@@ -146,31 +157,46 @@ static inline void update_used_max(struct zram *zram,
} while (old_max != cur_max); } while (old_max != cur_max);
} }
static bool page_zero_filled(void *ptr) static inline void zram_fill_page(char *ptr, unsigned long len,
unsigned long value)
{
int i;
unsigned long *page = (unsigned long *)ptr;
WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
if (likely(value == 0)) {
memset(ptr, 0, len);
} else {
for (i = 0; i < len / sizeof(*page); i++)
page[i] = value;
}
}
static bool page_same_filled(void *ptr, unsigned long *element)
{ {
unsigned int pos; unsigned int pos;
unsigned long *page; unsigned long *page;
page = (unsigned long *)ptr; page = (unsigned long *)ptr;
for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { for (pos = 0; pos < PAGE_SIZE / sizeof(*page) - 1; pos++) {
if (page[pos]) if (page[pos] != page[pos + 1])
return false; return false;
} }
*element = page[pos];
return true; return true;
} }
static void handle_zero_page(struct bio_vec *bvec) static void handle_same_page(struct bio_vec *bvec, unsigned long element)
{ {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
void *user_mem; void *user_mem;
user_mem = kmap_atomic(page); user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
else
clear_page(user_mem);
kunmap_atomic(user_mem); kunmap_atomic(user_mem);
flush_dcache_page(page); flush_dcache_page(page);
...@@ -363,7 +389,7 @@ static ssize_t mm_stat_show(struct device *dev, ...@@ -363,7 +389,7 @@ static ssize_t mm_stat_show(struct device *dev,
mem_used << PAGE_SHIFT, mem_used << PAGE_SHIFT,
zram->limit_pages << PAGE_SHIFT, zram->limit_pages << PAGE_SHIFT,
max_used << PAGE_SHIFT, max_used << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.zero_pages), (u64)atomic64_read(&zram->stats.same_pages),
pool_stats.pages_compacted); pool_stats.pages_compacted);
up_read(&zram->init_lock); up_read(&zram->init_lock);
...@@ -399,8 +425,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize) ...@@ -399,8 +425,11 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
/* Free all pages that are still in this zram device */ /* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++) { for (index = 0; index < num_pages; index++) {
unsigned long handle = meta->table[index].handle; unsigned long handle = meta->table[index].handle;
/*
if (!handle) * No memory is allocated for same element filled pages.
* Simply clear same page flag.
*/
if (!handle || zram_test_flag(meta, index, ZRAM_SAME))
continue; continue;
zs_free(meta->mem_pool, handle); zs_free(meta->mem_pool, handle);
...@@ -450,18 +479,20 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -450,18 +479,20 @@ static void zram_free_page(struct zram *zram, size_t index)
struct zram_meta *meta = zram->meta; struct zram_meta *meta = zram->meta;
unsigned long handle = meta->table[index].handle; unsigned long handle = meta->table[index].handle;
if (unlikely(!handle)) {
/* /*
* No memory is allocated for zero filled pages. * No memory is allocated for same element filled pages.
* Simply clear zero page flag. * Simply clear same page flag.
*/ */
if (zram_test_flag(meta, index, ZRAM_ZERO)) { if (zram_test_flag(meta, index, ZRAM_SAME)) {
zram_clear_flag(meta, index, ZRAM_ZERO); zram_clear_flag(meta, index, ZRAM_SAME);
atomic64_dec(&zram->stats.zero_pages); zram_clear_element(meta, index);
} atomic64_dec(&zram->stats.same_pages);
return; return;
} }
if (!handle)
return;
zs_free(meta->mem_pool, handle); zs_free(meta->mem_pool, handle);
atomic64_sub(zram_get_obj_size(meta, index), atomic64_sub(zram_get_obj_size(meta, index),
...@@ -484,9 +515,9 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) ...@@ -484,9 +515,9 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
handle = meta->table[index].handle; handle = meta->table[index].handle;
size = zram_get_obj_size(meta, index); size = zram_get_obj_size(meta, index);
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
clear_page(mem); zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
return 0; return 0;
} }
...@@ -522,9 +553,9 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -522,9 +553,9 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
if (unlikely(!meta->table[index].handle) || if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) { zram_test_flag(meta, index, ZRAM_SAME)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
handle_zero_page(bvec); handle_same_page(bvec, meta->table[index].element);
return 0; return 0;
} }
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
...@@ -572,6 +603,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -572,6 +603,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct zram_meta *meta = zram->meta; struct zram_meta *meta = zram->meta;
struct zcomp_strm *zstrm = NULL; struct zcomp_strm *zstrm = NULL;
unsigned long alloced_pages; unsigned long alloced_pages;
unsigned long element;
page = bvec->bv_page; page = bvec->bv_page;
if (is_partial_io(bvec)) { if (is_partial_io(bvec)) {
...@@ -600,16 +632,17 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -600,16 +632,17 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
uncmem = user_mem; uncmem = user_mem;
} }
if (page_zero_filled(uncmem)) { if (page_same_filled(uncmem, &element)) {
if (user_mem) if (user_mem)
kunmap_atomic(user_mem); kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */ /* Free memory associated with this sector now. */
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index); zram_free_page(zram, index);
zram_set_flag(meta, index, ZRAM_ZERO); zram_set_flag(meta, index, ZRAM_SAME);
zram_set_element(meta, index, element);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
atomic64_inc(&zram->stats.zero_pages); atomic64_inc(&zram->stats.same_pages);
ret = 0; ret = 0;
goto out; goto out;
} }
......
...@@ -61,7 +61,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; ...@@ -61,7 +61,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */ /* Flags for zram pages (table[page_no].value) */
enum zram_pageflags { enum zram_pageflags {
/* Page consists entirely of zeros */ /* Page consists entirely of zeros */
ZRAM_ZERO = ZRAM_FLAG_SHIFT, ZRAM_SAME = ZRAM_FLAG_SHIFT,
ZRAM_ACCESS, /* page is now accessed */ ZRAM_ACCESS, /* page is now accessed */
__NR_ZRAM_PAGEFLAGS, __NR_ZRAM_PAGEFLAGS,
...@@ -71,7 +71,10 @@ enum zram_pageflags { ...@@ -71,7 +71,10 @@ enum zram_pageflags {
/* Allocated for each disk page */ /* Allocated for each disk page */
struct zram_table_entry { struct zram_table_entry {
union {
unsigned long handle; unsigned long handle;
unsigned long element;
};
unsigned long value; unsigned long value;
}; };
...@@ -83,7 +86,7 @@ struct zram_stats { ...@@ -83,7 +86,7 @@ struct zram_stats {
atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t failed_writes; /* can happen when memory is too low */
atomic64_t invalid_io; /* non-page-aligned I/O requests */ atomic64_t invalid_io; /* non-page-aligned I/O requests */
atomic64_t notify_free; /* no. of swap slot free notifications */ atomic64_t notify_free; /* no. of swap slot free notifications */
atomic64_t zero_pages; /* no. of zero filled pages */ atomic64_t same_pages; /* no. of same element filled pages */
atomic64_t pages_stored; /* no. of pages currently stored */ atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */ atomic_long_t max_used_pages; /* no. of maximum pages stored */
atomic64_t writestall; /* no. of write slow paths */ atomic64_t writestall; /* no. of write slow paths */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment