Commit 0a5f079b authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: decouple class actions from zspage works

This patch moves class stat update out of obj_malloc since it's not
related to zspage operation.  This is a preparation to introduce new
lock scheme in next patch.

Link: https://lkml.kernel.org/r/20211115185909.3949505-4-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3828a764
...@@ -1360,17 +1360,19 @@ size_t zs_huge_class_size(struct zs_pool *pool) ...@@ -1360,17 +1360,19 @@ size_t zs_huge_class_size(struct zs_pool *pool)
} }
EXPORT_SYMBOL_GPL(zs_huge_class_size); EXPORT_SYMBOL_GPL(zs_huge_class_size);
static unsigned long obj_malloc(struct size_class *class, static unsigned long obj_malloc(struct zs_pool *pool,
struct zspage *zspage, unsigned long handle) struct zspage *zspage, unsigned long handle)
{ {
int i, nr_page, offset; int i, nr_page, offset;
unsigned long obj; unsigned long obj;
struct link_free *link; struct link_free *link;
struct size_class *class;
struct page *m_page; struct page *m_page;
unsigned long m_offset; unsigned long m_offset;
void *vaddr; void *vaddr;
class = pool->size_class[zspage->class];
handle |= OBJ_ALLOCATED_TAG; handle |= OBJ_ALLOCATED_TAG;
obj = get_freeobj(zspage); obj = get_freeobj(zspage);
...@@ -1394,7 +1396,6 @@ static unsigned long obj_malloc(struct size_class *class, ...@@ -1394,7 +1396,6 @@ static unsigned long obj_malloc(struct size_class *class,
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
mod_zspage_inuse(zspage, 1); mod_zspage_inuse(zspage, 1);
class_stat_inc(class, OBJ_USED, 1);
obj = location_to_obj(m_page, obj); obj = location_to_obj(m_page, obj);
...@@ -1433,10 +1434,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1433,10 +1434,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
spin_lock(&class->lock); spin_lock(&class->lock);
zspage = find_get_zspage(class); zspage = find_get_zspage(class);
if (likely(zspage)) { if (likely(zspage)) {
obj = obj_malloc(class, zspage, handle); obj = obj_malloc(pool, zspage, handle);
/* Now move the zspage to another fullness group, if required */ /* Now move the zspage to another fullness group, if required */
fix_fullness_group(class, zspage); fix_fullness_group(class, zspage);
record_obj(handle, obj); record_obj(handle, obj);
class_stat_inc(class, OBJ_USED, 1);
spin_unlock(&class->lock); spin_unlock(&class->lock);
return handle; return handle;
...@@ -1451,7 +1453,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1451,7 +1453,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
} }
spin_lock(&class->lock); spin_lock(&class->lock);
obj = obj_malloc(class, zspage, handle); obj = obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage); newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg); insert_zspage(class, zspage, newfg);
set_zspage_mapping(zspage, class->index, newfg); set_zspage_mapping(zspage, class->index, newfg);
...@@ -1459,6 +1461,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1459,6 +1461,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
atomic_long_add(class->pages_per_zspage, atomic_long_add(class->pages_per_zspage,
&pool->pages_allocated); &pool->pages_allocated);
class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
class_stat_inc(class, OBJ_USED, 1);
/* We completely set up zspage so mark them as movable */ /* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage); SetZsPageMovable(pool, zspage);
...@@ -1468,7 +1471,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) ...@@ -1468,7 +1471,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
} }
EXPORT_SYMBOL_GPL(zs_malloc); EXPORT_SYMBOL_GPL(zs_malloc);
static void obj_free(struct size_class *class, unsigned long obj) static void obj_free(int class_size, unsigned long obj)
{ {
struct link_free *link; struct link_free *link;
struct zspage *zspage; struct zspage *zspage;
...@@ -1478,7 +1481,7 @@ static void obj_free(struct size_class *class, unsigned long obj) ...@@ -1478,7 +1481,7 @@ static void obj_free(struct size_class *class, unsigned long obj)
void *vaddr; void *vaddr;
obj_to_location(obj, &f_page, &f_objidx); obj_to_location(obj, &f_page, &f_objidx);
f_offset = (class->size * f_objidx) & ~PAGE_MASK; f_offset = (class_size * f_objidx) & ~PAGE_MASK;
zspage = get_zspage(f_page); zspage = get_zspage(f_page);
vaddr = kmap_atomic(f_page); vaddr = kmap_atomic(f_page);
...@@ -1489,7 +1492,6 @@ static void obj_free(struct size_class *class, unsigned long obj) ...@@ -1489,7 +1492,6 @@ static void obj_free(struct size_class *class, unsigned long obj)
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
set_freeobj(zspage, f_objidx); set_freeobj(zspage, f_objidx);
mod_zspage_inuse(zspage, -1); mod_zspage_inuse(zspage, -1);
class_stat_dec(class, OBJ_USED, 1);
} }
void zs_free(struct zs_pool *pool, unsigned long handle) void zs_free(struct zs_pool *pool, unsigned long handle)
...@@ -1513,7 +1515,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle) ...@@ -1513,7 +1515,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
class = zspage_class(pool, zspage); class = zspage_class(pool, zspage);
spin_lock(&class->lock); spin_lock(&class->lock);
obj_free(class, obj); obj_free(class->size, obj);
class_stat_dec(class, OBJ_USED, 1);
fullness = fix_fullness_group(class, zspage); fullness = fix_fullness_group(class, zspage);
if (fullness != ZS_EMPTY) { if (fullness != ZS_EMPTY) {
migrate_read_unlock(zspage); migrate_read_unlock(zspage);
...@@ -1671,7 +1674,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, ...@@ -1671,7 +1674,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
} }
used_obj = handle_to_obj(handle); used_obj = handle_to_obj(handle);
free_obj = obj_malloc(class, get_zspage(d_page), handle); free_obj = obj_malloc(pool, get_zspage(d_page), handle);
zs_object_copy(class, free_obj, used_obj); zs_object_copy(class, free_obj, used_obj);
obj_idx++; obj_idx++;
/* /*
...@@ -1683,7 +1686,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, ...@@ -1683,7 +1686,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
free_obj |= BIT(HANDLE_PIN_BIT); free_obj |= BIT(HANDLE_PIN_BIT);
record_obj(handle, free_obj); record_obj(handle, free_obj);
unpin_tag(handle); unpin_tag(handle);
obj_free(class, used_obj); obj_free(class->size, used_obj);
} }
/* Remember last position in this iteration */ /* Remember last position in this iteration */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment