Commit 0c3aa83e authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Pekka Enberg

slab: change return type of kmem_getpages() to struct page

It is more understandable that kmem_getpages() return struct page.
And, with this, we can reduce one translation from virt addr to page and
makes better code than before. Below is a change of this patch.

* Before
   text	   data	    bss	    dec	    hex	filename
  22123	  23434	      4	  45561	   b1f9	mm/slab.o

* After
   text	   data	    bss	    dec	    hex	filename
  22074	  23434	      4	  45512	   b1c8	mm/slab.o

And this help following patch to remove struct slab's colouroff.
Acked-by: default avatarAndi Kleen <ak@linux.intel.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarPekka Enberg <penberg@iki.fi>
parent 73293c2f
...@@ -205,7 +205,7 @@ typedef unsigned int kmem_bufctl_t; ...@@ -205,7 +205,7 @@ typedef unsigned int kmem_bufctl_t;
struct slab_rcu { struct slab_rcu {
struct rcu_head head; struct rcu_head head;
struct kmem_cache *cachep; struct kmem_cache *cachep;
void *addr; struct page *page;
}; };
/* /*
...@@ -1737,7 +1737,8 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) ...@@ -1737,7 +1737,8 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
* did not request dmaable memory, we might get it, but that * did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable. * would be relatively rare and ignorable.
*/ */
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{ {
struct page *page; struct page *page;
int nr_pages; int nr_pages;
...@@ -1790,16 +1791,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -1790,16 +1791,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
kmemcheck_mark_unallocated_pages(page, nr_pages); kmemcheck_mark_unallocated_pages(page, nr_pages);
} }
return page_address(page); return page;
} }
/* /*
* Interface to system's page release. * Interface to system's page release.
*/ */
static void kmem_freepages(struct kmem_cache *cachep, void *addr) static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{ {
unsigned long i = (1 << cachep->gfporder); unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i; const unsigned long nr_freed = i;
kmemcheck_free_shadow(page, cachep->gfporder); kmemcheck_free_shadow(page, cachep->gfporder);
...@@ -1821,7 +1821,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) ...@@ -1821,7 +1821,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
memcg_release_pages(cachep, cachep->gfporder); memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed; current->reclaim_state->reclaimed_slab += nr_freed;
free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); __free_memcg_kmem_pages(page, cachep->gfporder);
} }
static void kmem_rcu_free(struct rcu_head *head) static void kmem_rcu_free(struct rcu_head *head)
...@@ -1829,7 +1829,7 @@ static void kmem_rcu_free(struct rcu_head *head) ...@@ -1829,7 +1829,7 @@ static void kmem_rcu_free(struct rcu_head *head)
struct slab_rcu *slab_rcu = (struct slab_rcu *)head; struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
struct kmem_cache *cachep = slab_rcu->cachep; struct kmem_cache *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr); kmem_freepages(cachep, slab_rcu->page);
if (OFF_SLAB(cachep)) if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slab_rcu); kmem_cache_free(cachep->slabp_cache, slab_rcu);
} }
...@@ -2048,7 +2048,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab ...@@ -2048,7 +2048,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
*/ */
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
{ {
void *addr = slabp->s_mem - slabp->colouroff; struct page *page = virt_to_head_page(slabp->s_mem);
slab_destroy_debugcheck(cachep, slabp); slab_destroy_debugcheck(cachep, slabp);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
...@@ -2056,10 +2056,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) ...@@ -2056,10 +2056,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
slab_rcu = (struct slab_rcu *)slabp; slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->cachep = cachep; slab_rcu->cachep = cachep;
slab_rcu->addr = addr; slab_rcu->page = page;
call_rcu(&slab_rcu->head, kmem_rcu_free); call_rcu(&slab_rcu->head, kmem_rcu_free);
} else { } else {
kmem_freepages(cachep, addr); kmem_freepages(cachep, page);
if (OFF_SLAB(cachep)) if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slabp); kmem_cache_free(cachep->slabp_cache, slabp);
} }
...@@ -2604,11 +2604,12 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) ...@@ -2604,11 +2604,12 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
* kmem_find_general_cachep till the initialization is complete. * kmem_find_general_cachep till the initialization is complete.
* Hence we cannot have slabp_cache same as the original cache. * Hence we cannot have slabp_cache same as the original cache.
*/ */
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, static struct slab *alloc_slabmgmt(struct kmem_cache *cachep,
int colour_off, gfp_t local_flags, struct page *page, int colour_off,
int nodeid) gfp_t local_flags, int nodeid)
{ {
struct slab *slabp; struct slab *slabp;
void *addr = page_address(page);
if (OFF_SLAB(cachep)) { if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */ /* Slab management obj is off-slab. */
...@@ -2625,12 +2626,12 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, ...@@ -2625,12 +2626,12 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
if (!slabp) if (!slabp)
return NULL; return NULL;
} else { } else {
slabp = objp + colour_off; slabp = addr + colour_off;
colour_off += cachep->slab_size; colour_off += cachep->slab_size;
} }
slabp->inuse = 0; slabp->inuse = 0;
slabp->colouroff = colour_off; slabp->colouroff = colour_off;
slabp->s_mem = objp + colour_off; slabp->s_mem = addr + colour_off;
slabp->nodeid = nodeid; slabp->nodeid = nodeid;
slabp->free = 0; slabp->free = 0;
return slabp; return slabp;
...@@ -2741,12 +2742,9 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, ...@@ -2741,12 +2742,9 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
* virtual address for kfree, ksize, and slab debugging. * virtual address for kfree, ksize, and slab debugging.
*/ */
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
void *addr) struct page *page)
{ {
int nr_pages; int nr_pages;
struct page *page;
page = virt_to_page(addr);
nr_pages = 1; nr_pages = 1;
if (likely(!PageCompound(page))) if (likely(!PageCompound(page)))
...@@ -2764,7 +2762,7 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, ...@@ -2764,7 +2762,7 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
* kmem_cache_alloc() when there are no active objs left in a cache. * kmem_cache_alloc() when there are no active objs left in a cache.
*/ */
static int cache_grow(struct kmem_cache *cachep, static int cache_grow(struct kmem_cache *cachep,
gfp_t flags, int nodeid, void *objp) gfp_t flags, int nodeid, struct page *page)
{ {
struct slab *slabp; struct slab *slabp;
size_t offset; size_t offset;
...@@ -2807,18 +2805,18 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2807,18 +2805,18 @@ static int cache_grow(struct kmem_cache *cachep,
* Get mem for the objs. Attempt to allocate a physical page from * Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'. * 'nodeid'.
*/ */
if (!objp) if (!page)
objp = kmem_getpages(cachep, local_flags, nodeid); page = kmem_getpages(cachep, local_flags, nodeid);
if (!objp) if (!page)
goto failed; goto failed;
/* Get slab management. */ /* Get slab management. */
slabp = alloc_slabmgmt(cachep, objp, offset, slabp = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, nodeid); local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
if (!slabp) if (!slabp)
goto opps1; goto opps1;
slab_map_pages(cachep, slabp, objp); slab_map_pages(cachep, slabp, page);
cache_init_objs(cachep, slabp); cache_init_objs(cachep, slabp);
...@@ -2834,7 +2832,7 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2834,7 +2832,7 @@ static int cache_grow(struct kmem_cache *cachep,
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
return 1; return 1;
opps1: opps1:
kmem_freepages(cachep, objp); kmem_freepages(cachep, page);
failed: failed:
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
...@@ -3250,18 +3248,20 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) ...@@ -3250,18 +3248,20 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
* We may trigger various forms of reclaim on the allowed * We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary. * set and go into memory reserves if necessary.
*/ */
struct page *page;
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_enable(); local_irq_enable();
kmem_flagcheck(cache, flags); kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, local_flags, numa_mem_id()); page = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
if (obj) { if (page) {
/* /*
* Insert into the appropriate per node queues * Insert into the appropriate per node queues
*/ */
nid = page_to_nid(virt_to_page(obj)); nid = page_to_nid(page);
if (cache_grow(cache, flags, nid, obj)) { if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache, obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid); flags | GFP_THISNODE, nid);
if (!obj) if (!obj)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment