Commit 3b1d9ca6 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

zsmalloc: use OBJ_TAG_BIT for bit shifter

Static check warns using tag as bit shifter.  It doesn't break current
working but not good for redability.  Let's use OBJ_TAG_BIT as bit
shifter instead of OBJ_ALLOCATED_TAG.

Link: http://lkml.kernel.org/r/20160607045146.GF26230@bboxSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Reported-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9bc482d3
...@@ -1052,7 +1052,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) ...@@ -1052,7 +1052,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
link = (struct link_free *)vaddr + off / sizeof(*link); link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) { while ((off += class->size) < PAGE_SIZE) {
link->next = freeobj++ << OBJ_ALLOCATED_TAG; link->next = freeobj++ << OBJ_TAG_BITS;
link += class->size / sizeof(*link); link += class->size / sizeof(*link);
} }
...@@ -1063,13 +1063,13 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) ...@@ -1063,13 +1063,13 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
*/ */
next_page = get_next_page(page); next_page = get_next_page(page);
if (next_page) { if (next_page) {
link->next = freeobj++ << OBJ_ALLOCATED_TAG; link->next = freeobj++ << OBJ_TAG_BITS;
} else { } else {
/* /*
* Reset OBJ_ALLOCATED_TAG bit to last link to tell * Reset OBJ_TAG_BITS bit to last link to tell
* whether it's allocated object or not. * whether it's allocated object or not.
*/ */
link->next = -1 << OBJ_ALLOCATED_TAG; link->next = -1 << OBJ_TAG_BITS;
} }
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
page = next_page; page = next_page;
...@@ -1514,7 +1514,7 @@ static unsigned long obj_malloc(struct size_class *class, ...@@ -1514,7 +1514,7 @@ static unsigned long obj_malloc(struct size_class *class,
vaddr = kmap_atomic(m_page); vaddr = kmap_atomic(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link); link = (struct link_free *)vaddr + m_offset / sizeof(*link);
set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
if (likely(!PageHugeObject(m_page))) if (likely(!PageHugeObject(m_page)))
/* record handle in the header of allocated chunk */ /* record handle in the header of allocated chunk */
link->handle = handle; link->handle = handle;
...@@ -1616,7 +1616,7 @@ static void obj_free(struct size_class *class, unsigned long obj) ...@@ -1616,7 +1616,7 @@ static void obj_free(struct size_class *class, unsigned long obj)
/* Insert this object in containing zspage's freelist */ /* Insert this object in containing zspage's freelist */
link = (struct link_free *)(vaddr + f_offset); link = (struct link_free *)(vaddr + f_offset);
link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG; link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
set_freeobj(zspage, f_objidx); set_freeobj(zspage, f_objidx);
mod_zspage_inuse(zspage, -1); mod_zspage_inuse(zspage, -1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment