Commit 34bf6ef9 authored by Dave Hansen's avatar Dave Hansen Committed by Pekka Enberg

mm: slab/slub: use page->list consistently instead of page->lru

'struct page' has two list_head fields: 'lru' and 'list'.  Conveniently,
they are unioned together.  This means that code can use them
interchangably, which gets horribly confusing like with this nugget from
slab.c:

>	list_del(&page->lru);
>	if (page->active == cachep->num)
>		list_add(&page->list, &n->slabs_full);

This patch makes the slab and slub code use page->lru universally instead
of mixing ->list and ->lru.

So, the new rule is: page->lru is what the you use if you want to keep
your page on a list.  Don't like the fact that it's not called ->list?
Too bad.
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 5f0985bb
...@@ -124,6 +124,8 @@ struct page { ...@@ -124,6 +124,8 @@ struct page {
union { union {
struct list_head lru; /* Pageout list, eg. active_list struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock ! * protected by zone->lru_lock !
* Can be used as a generic list
* by the page owner.
*/ */
struct { /* slub per cpu partial pages */ struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */ struct page *next; /* Next partial slab */
...@@ -136,7 +138,6 @@ struct page { ...@@ -136,7 +138,6 @@ struct page {
#endif #endif
}; };
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */ struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU * when destroying via RCU
......
...@@ -2922,9 +2922,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2922,9 +2922,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&page->lru); list_del(&page->lru);
if (page->active == cachep->num) if (page->active == cachep->num)
list_add(&page->list, &n->slabs_full); list_add(&page->lru, &n->slabs_full);
else else
list_add(&page->list, &n->slabs_partial); list_add(&page->lru, &n->slabs_partial);
} }
must_grow: must_grow:
......
...@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp) ...@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)
static void set_slob_page_free(struct page *sp, struct list_head *list) static void set_slob_page_free(struct page *sp, struct list_head *list)
{ {
list_add(&sp->list, list); list_add(&sp->lru, list);
__SetPageSlobFree(sp); __SetPageSlobFree(sp);
} }
static inline void clear_slob_page_free(struct page *sp) static inline void clear_slob_page_free(struct page *sp)
{ {
list_del(&sp->list); list_del(&sp->lru);
__ClearPageSlobFree(sp); __ClearPageSlobFree(sp);
} }
...@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */ /* Iterate through each partially free page, try to find room */
list_for_each_entry(sp, slob_list, list) { list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
* If there's a node specification, search for a partial * If there's a node specification, search for a partial
...@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
continue; continue;
/* Attempt to alloc */ /* Attempt to alloc */
prev = sp->list.prev; prev = sp->lru.prev;
b = slob_page_alloc(sp, size, align); b = slob_page_alloc(sp, size, align);
if (!b) if (!b)
continue; continue;
...@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE); sp->units = SLOB_UNITS(PAGE_SIZE);
sp->freelist = b; sp->freelist = b;
INIT_LIST_HEAD(&sp->list); INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list); set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align); b = slob_page_alloc(sp, size, align);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment