Commit d178a07c authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Greg Kroah-Hartman

staging: zram: drop zram_stat_dec/inc functions

It seems like an overkill to have adding and subtracting
1 functions from the 32bit counters. Just do it directly.
Signed-off-by: default avatarDavidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cad683fb
...@@ -42,16 +42,6 @@ struct zram *zram_devices; ...@@ -42,16 +42,6 @@ struct zram *zram_devices;
/* Module params (documentation at end) */ /* Module params (documentation at end) */
static unsigned int num_devices = 1; static unsigned int num_devices = 1;
static void zram_stat_inc(u32 *v)
{
*v = *v + 1;
}
static void zram_stat_dec(u32 *v)
{
*v = *v - 1;
}
static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc) static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
{ {
spin_lock(&zram->stat64_lock); spin_lock(&zram->stat64_lock);
...@@ -144,22 +134,22 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -144,22 +134,22 @@ static void zram_free_page(struct zram *zram, size_t index)
*/ */
if (zram_test_flag(zram, index, ZRAM_ZERO)) { if (zram_test_flag(zram, index, ZRAM_ZERO)) {
zram_clear_flag(zram, index, ZRAM_ZERO); zram_clear_flag(zram, index, ZRAM_ZERO);
zram_stat_dec(&zram->stats.pages_zero); zram->stats.pages_zero--;
} }
return; return;
} }
if (unlikely(size > max_zpage_size)) if (unlikely(size > max_zpage_size))
zram_stat_dec(&zram->stats.bad_compress); zram->stats.bad_compress--;
zs_free(zram->mem_pool, handle); zs_free(zram->mem_pool, handle);
if (size <= PAGE_SIZE / 2) if (size <= PAGE_SIZE / 2)
zram_stat_dec(&zram->stats.good_compress); zram->stats.good_compress--;
zram_stat64_sub(zram, &zram->stats.compr_size, zram_stat64_sub(zram, &zram->stats.compr_size,
zram->table[index].size); zram->table[index].size);
zram_stat_dec(&zram->stats.pages_stored); zram->stats.pages_stored--;
zram->table[index].handle = 0; zram->table[index].handle = 0;
zram->table[index].size = 0; zram->table[index].size = 0;
...@@ -311,7 +301,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -311,7 +301,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
kunmap_atomic(user_mem); kunmap_atomic(user_mem);
if (is_partial_io(bvec)) if (is_partial_io(bvec))
kfree(uncmem); kfree(uncmem);
zram_stat_inc(&zram->stats.pages_zero); zram->stats.pages_zero++;
zram_set_flag(zram, index, ZRAM_ZERO); zram_set_flag(zram, index, ZRAM_ZERO);
ret = 0; ret = 0;
goto out; goto out;
...@@ -330,7 +320,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -330,7 +320,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
} }
if (unlikely(clen > max_zpage_size)) { if (unlikely(clen > max_zpage_size)) {
zram_stat_inc(&zram->stats.bad_compress); zram->stats.bad_compress++;
src = uncmem; src = uncmem;
clen = PAGE_SIZE; clen = PAGE_SIZE;
} }
...@@ -353,9 +343,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -353,9 +343,9 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
/* Update stats */ /* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen); zram_stat64_add(zram, &zram->stats.compr_size, clen);
zram_stat_inc(&zram->stats.pages_stored); zram->stats.pages_stored++;
if (clen <= PAGE_SIZE / 2) if (clen <= PAGE_SIZE / 2)
zram_stat_inc(&zram->stats.good_compress); zram->stats.good_compress++;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment