Commit bf3a3407 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux

Pull slab changes from Pekka Enberg:
 "The biggest change is byte-sized freelist indices which reduces slab
  freelist memory usage:

    https://lkml.org/lkml/2013/12/2/64"

* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
  mm: slab/slub: use page->list consistently instead of page->lru
  mm/slab.c: cleanup outdated comments and unify variables naming
  slab: fix wrongly used macro
  slub: fix high order page allocation problem with __GFP_NOFAIL
  slab: Make allocations with GFP_ZERO slightly more efficient
  slab: make more slab management structure off the slab
  slab: introduce byte sized index for the freelist of a slab
  slab: restrict the number of objects in a slab
  slab: introduce helper functions to get/set free object
  slab: factor out calculate nr objects in cache_estimate
parents 321d03c8 34bf6ef9
...@@ -124,6 +124,8 @@ struct page { ...@@ -124,6 +124,8 @@ struct page {
union { union {
struct list_head lru; /* Pageout list, eg. active_list struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock ! * protected by zone->lru_lock !
* Can be used as a generic list
* by the page owner.
*/ */
struct { /* slub per cpu partial pages */ struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */ struct page *next; /* Next partial slab */
...@@ -136,7 +138,6 @@ struct page { ...@@ -136,7 +138,6 @@ struct page {
#endif #endif
}; };
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */ struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU * when destroying via RCU
......
...@@ -242,6 +242,17 @@ struct kmem_cache { ...@@ -242,6 +242,17 @@ struct kmem_cache {
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif #endif
/*
* This restriction comes from byte sized index implementation.
* Page size is normally 2^12 bytes and, in this case, if we want to use
* byte sized index which can represent 2^8 entries, the size of the object
* should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
* If minimum size of kmalloc is less than 16, we use it as minimum object
* size and give up to use byte sized index.
*/
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
#ifndef CONFIG_SLOB #ifndef CONFIG_SLOB
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
......
This diff is collapsed.
...@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp) ...@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)
static void set_slob_page_free(struct page *sp, struct list_head *list) static void set_slob_page_free(struct page *sp, struct list_head *list)
{ {
list_add(&sp->list, list); list_add(&sp->lru, list);
__SetPageSlobFree(sp); __SetPageSlobFree(sp);
} }
static inline void clear_slob_page_free(struct page *sp) static inline void clear_slob_page_free(struct page *sp)
{ {
list_del(&sp->list); list_del(&sp->lru);
__ClearPageSlobFree(sp); __ClearPageSlobFree(sp);
} }
...@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */ /* Iterate through each partially free page, try to find room */
list_for_each_entry(sp, slob_list, list) { list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
* If there's a node specification, search for a partial * If there's a node specification, search for a partial
...@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
continue; continue;
/* Attempt to alloc */ /* Attempt to alloc */
prev = sp->list.prev; prev = sp->lru.prev;
b = slob_page_alloc(sp, size, align); b = slob_page_alloc(sp, size, align);
if (!b) if (!b)
continue; continue;
...@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE); sp->units = SLOB_UNITS(PAGE_SIZE);
sp->freelist = b; sp->freelist = b;
INIT_LIST_HEAD(&sp->list); INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list); set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align); b = slob_page_alloc(sp, size, align);
......
...@@ -1352,11 +1352,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1352,11 +1352,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page = alloc_slab_page(alloc_gfp, node, oo); page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) { if (unlikely(!page)) {
oo = s->min; oo = s->min;
alloc_gfp = flags;
/* /*
* Allocation may have failed due to fragmentation. * Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible * Try a lower order alloc if possible
*/ */
page = alloc_slab_page(flags, node, oo); page = alloc_slab_page(alloc_gfp, node, oo);
if (page) if (page)
stat(s, ORDER_FALLBACK); stat(s, ORDER_FALLBACK);
...@@ -1366,7 +1367,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1366,7 +1367,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo); int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
/* /*
* Objects from caches that have a constructor don't get * Objects from caches that have a constructor don't get
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment