Commit c7806261 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: factor out obj_[malloc|free]

In later patch, migration needs some part of functions in zs_malloc and
zs_free so this patch factor out them.
Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Juneho Choi <juno.choi@lge.com>
Cc: Gunho Lee <gunho.lee@lge.com>
Cc: Luigi Semenzato <semenzato@google.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Seth Jennings <sjennings@variantweb.net>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2e40e163
...@@ -525,11 +525,10 @@ static void remove_zspage(struct page *page, struct size_class *class, ...@@ -525,11 +525,10 @@ static void remove_zspage(struct page *page, struct size_class *class,
* page from the freelist of the old fullness group to that of the new * page from the freelist of the old fullness group to that of the new
* fullness group. * fullness group.
*/ */
static enum fullness_group fix_fullness_group(struct zs_pool *pool, static enum fullness_group fix_fullness_group(struct size_class *class,
struct page *page) struct page *page)
{ {
int class_idx; int class_idx;
struct size_class *class;
enum fullness_group currfg, newfg; enum fullness_group currfg, newfg;
BUG_ON(!is_first_page(page)); BUG_ON(!is_first_page(page));
...@@ -539,7 +538,6 @@ static enum fullness_group fix_fullness_group(struct zs_pool *pool, ...@@ -539,7 +538,6 @@ static enum fullness_group fix_fullness_group(struct zs_pool *pool,
if (newfg == currfg) if (newfg == currfg)
goto out; goto out;
class = pool->size_class[class_idx];
remove_zspage(page, class, currfg); remove_zspage(page, class, currfg);
insert_zspage(page, class, newfg); insert_zspage(page, class, newfg);
set_zspage_mapping(page, class_idx, newfg); set_zspage_mapping(page, class_idx, newfg);
...@@ -1281,6 +1279,33 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) ...@@ -1281,6 +1279,33 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
} }
EXPORT_SYMBOL_GPL(zs_unmap_object); EXPORT_SYMBOL_GPL(zs_unmap_object);
static unsigned long obj_malloc(struct page *first_page,
struct size_class *class, unsigned long handle)
{
unsigned long obj;
struct link_free *link;
struct page *m_page;
unsigned long m_objidx, m_offset;
void *vaddr;
obj = (unsigned long)first_page->freelist;
obj_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
vaddr = kmap_atomic(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
first_page->freelist = link->next;
/* record handle in the header of allocated chunk */
link->handle = handle;
kunmap_atomic(vaddr);
first_page->inuse++;
zs_stat_inc(class, OBJ_USED, 1);
return obj;
}
/** /**
* zs_malloc - Allocate block of given size from pool. * zs_malloc - Allocate block of given size from pool.
* @pool: pool to allocate from * @pool: pool to allocate from
...@@ -1293,12 +1318,8 @@ EXPORT_SYMBOL_GPL(zs_unmap_object); ...@@ -1293,12 +1318,8 @@ EXPORT_SYMBOL_GPL(zs_unmap_object);
unsigned long zs_malloc(struct zs_pool *pool, size_t size) unsigned long zs_malloc(struct zs_pool *pool, size_t size)
{ {
unsigned long handle, obj; unsigned long handle, obj;
struct link_free *link;
struct size_class *class; struct size_class *class;
void *vaddr; struct page *first_page;
struct page *first_page, *m_page;
unsigned long m_objidx, m_offset;
if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE)) if (unlikely(!size || (size + ZS_HANDLE_SIZE) > ZS_MAX_ALLOC_SIZE))
return 0; return 0;
...@@ -1331,22 +1352,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size) ...@@ -1331,22 +1352,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
class->size, class->pages_per_zspage)); class->size, class->pages_per_zspage));
} }
obj = (unsigned long)first_page->freelist; obj = obj_malloc(first_page, class, handle);
obj_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
vaddr = kmap_atomic(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
first_page->freelist = link->next;
/* record handle in the header of allocated chunk */
link->handle = handle;
kunmap_atomic(vaddr);
first_page->inuse++;
zs_stat_inc(class, OBJ_USED, 1);
/* Now move the zspage to another fullness group, if required */ /* Now move the zspage to another fullness group, if required */
fix_fullness_group(pool, first_page); fix_fullness_group(class, first_page);
record_obj(handle, obj); record_obj(handle, obj);
spin_unlock(&class->lock); spin_unlock(&class->lock);
...@@ -1354,46 +1362,60 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size) ...@@ -1354,46 +1362,60 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
} }
EXPORT_SYMBOL_GPL(zs_malloc); EXPORT_SYMBOL_GPL(zs_malloc);
void zs_free(struct zs_pool *pool, unsigned long handle) static void obj_free(struct zs_pool *pool, struct size_class *class,
unsigned long obj)
{ {
struct link_free *link; struct link_free *link;
struct page *first_page, *f_page; struct page *first_page, *f_page;
unsigned long obj, f_objidx, f_offset; unsigned long f_objidx, f_offset;
void *vaddr; void *vaddr;
int class_idx; int class_idx;
struct size_class *class;
enum fullness_group fullness; enum fullness_group fullness;
if (unlikely(!handle)) BUG_ON(!obj);
return;
obj = handle_to_obj(handle);
free_handle(pool, handle);
obj_to_location(obj, &f_page, &f_objidx); obj_to_location(obj, &f_page, &f_objidx);
first_page = get_first_page(f_page); first_page = get_first_page(f_page);
get_zspage_mapping(first_page, &class_idx, &fullness); get_zspage_mapping(first_page, &class_idx, &fullness);
class = pool->size_class[class_idx];
f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
spin_lock(&class->lock); vaddr = kmap_atomic(f_page);
/* Insert this object in containing zspage's freelist */ /* Insert this object in containing zspage's freelist */
vaddr = kmap_atomic(f_page);
link = (struct link_free *)(vaddr + f_offset); link = (struct link_free *)(vaddr + f_offset);
link->next = first_page->freelist; link->next = first_page->freelist;
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
first_page->freelist = (void *)obj; first_page->freelist = (void *)obj;
first_page->inuse--; first_page->inuse--;
fullness = fix_fullness_group(pool, first_page);
zs_stat_dec(class, OBJ_USED, 1); zs_stat_dec(class, OBJ_USED, 1);
}
void zs_free(struct zs_pool *pool, unsigned long handle)
{
struct page *first_page, *f_page;
unsigned long obj, f_objidx;
int class_idx;
struct size_class *class;
enum fullness_group fullness;
if (unlikely(!handle))
return;
obj = handle_to_obj(handle);
free_handle(pool, handle);
obj_to_location(obj, &f_page, &f_objidx);
first_page = get_first_page(f_page);
get_zspage_mapping(first_page, &class_idx, &fullness);
class = pool->size_class[class_idx];
spin_lock(&class->lock);
obj_free(pool, class, obj);
fullness = fix_fullness_group(class, first_page);
if (fullness == ZS_EMPTY) if (fullness == ZS_EMPTY)
zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
class->size, class->pages_per_zspage)); class->size, class->pages_per_zspage));
spin_unlock(&class->lock); spin_unlock(&class->lock);
if (fullness == ZS_EMPTY) { if (fullness == ZS_EMPTY) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment