Commit 5538c562 authored by Dan Streetman's avatar Dan Streetman Committed by Linus Torvalds

zsmalloc: simplify init_zspage free obj linking

Change zsmalloc init_zspage() logic to iterate through each object on each
of its pages, checking the offset to verify the object is on the current
page before linking it into the zspage.

The current zsmalloc init_zspage free object linking code has logic that
relies on there only being one page per zspage when PAGE_SIZE is a
multiple of class->size.  It calculates the number of objects for the
current page, and iterates through all of them plus one, to account for
the assumed partial object at the end of the page.  While this currently
works, the logic can be simplified to just link the object at each
successive offset until the offset is larger than PAGE_SIZE, which does
not rely on PAGE_SIZE being a multiple of class->size.
Signed-off-by: default avatarDan Streetman <ddstreet@ieee.org>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Seth Jennings <sjennings@variantweb.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6dd9737e
...@@ -628,7 +628,7 @@ static void init_zspage(struct page *first_page, struct size_class *class) ...@@ -628,7 +628,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
while (page) { while (page) {
struct page *next_page; struct page *next_page;
struct link_free *link; struct link_free *link;
unsigned int i, objs_on_page; unsigned int i = 1;
/* /*
* page->index stores offset of first object starting * page->index stores offset of first object starting
...@@ -641,15 +641,11 @@ static void init_zspage(struct page *first_page, struct size_class *class) ...@@ -641,15 +641,11 @@ static void init_zspage(struct page *first_page, struct size_class *class)
link = (struct link_free *)kmap_atomic(page) + link = (struct link_free *)kmap_atomic(page) +
off / sizeof(*link); off / sizeof(*link);
objs_on_page = (PAGE_SIZE - off) / class->size;
for (i = 1; i <= objs_on_page; i++) { while ((off += class->size) < PAGE_SIZE) {
off += class->size; link->next = obj_location_to_handle(page, i++);
if (off < PAGE_SIZE) {
link->next = obj_location_to_handle(page, i);
link += class->size / sizeof(*link); link += class->size / sizeof(*link);
} }
}
/* /*
* We now come to the last (full or partial) object on this * We now come to the last (full or partial) object on this
...@@ -660,7 +656,7 @@ static void init_zspage(struct page *first_page, struct size_class *class) ...@@ -660,7 +656,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
link->next = obj_location_to_handle(next_page, 0); link->next = obj_location_to_handle(next_page, 0);
kunmap_atomic(link); kunmap_atomic(link);
page = next_page; page = next_page;
off = (off + class->size) % PAGE_SIZE; off %= PAGE_SIZE;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment