Commit 24f971ab authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux

Pull SLAB changes from Pekka Enberg:
 "The patches from Joonsoo Kim switch mm/slab.c to use 'struct page' for
  slab internals similar to mm/slub.c.  This reduces memory usage and
  improves performance:

    https://lkml.org/lkml/2013/10/16/155

  Rest of the changes are bug fixes from various people"

* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (21 commits)
  mm, slub: fix the typo in mm/slub.c
  mm, slub: fix the typo in include/linux/slub_def.h
  slub: Handle NULL parameter in kmem_cache_flags
  slab: replace non-existing 'struct freelist *' with 'void *'
  slab: fix to calm down kmemleak warning
  slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
  slab: rename slab_bufctl to slab_freelist
  slab: remove useless statement for checking pfmemalloc
  slab: use struct page for slab management
  slab: replace free and inuse in struct slab with newly introduced active
  slab: remove SLAB_LIMIT
  slab: remove kmem_bufctl_t
  slab: change the management method of free objects of the slab
  slab: use __GFP_COMP flag for allocating slab pages
  slab: use well-defined macro, virt_to_slab()
  slab: overloading the RCU head over the LRU for RCU free
  slab: remove cachep in struct slab_rcu
  slab: remove nodeid in struct slab
  slab: remove colouroff in struct slab
  slab: change return type of kmem_getpages() to struct page
  ...
parents 3bab0bf0 721ae22a
...@@ -44,6 +44,7 @@ struct page { ...@@ -44,6 +44,7 @@ struct page {
/* First double word block */ /* First double word block */
unsigned long flags; /* Atomic flags, some possibly unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */ * updated asynchronously */
union {
struct address_space *mapping; /* If low bit clear, points to struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL. * inode address_space, or NULL.
* If page mapped as anonymous * If page mapped as anonymous
...@@ -51,11 +52,14 @@ struct page { ...@@ -51,11 +52,14 @@ struct page {
* it points to anon_vma object: * it points to anon_vma object:
* see PAGE_MAPPING_ANON below. * see PAGE_MAPPING_ANON below.
*/ */
void *s_mem; /* slab first object */
};
/* Second double word */ /* Second double word */
struct { struct {
union { union {
pgoff_t index; /* Our offset within mapping. */ pgoff_t index; /* Our offset within mapping. */
void *freelist; /* slub/slob first free object */ void *freelist; /* sl[aou]b first free object */
bool pfmemalloc; /* If set by the page allocator, bool pfmemalloc; /* If set by the page allocator,
* ALLOC_NO_WATERMARKS was set * ALLOC_NO_WATERMARKS was set
* and the low watermark was not * and the low watermark was not
...@@ -111,6 +115,7 @@ struct page { ...@@ -111,6 +115,7 @@ struct page {
}; };
atomic_t _count; /* Usage count, see below. */ atomic_t _count; /* Usage count, see below. */
}; };
unsigned int active; /* SLAB */
}; };
}; };
...@@ -132,6 +137,9 @@ struct page { ...@@ -132,6 +137,9 @@ struct page {
struct list_head list; /* slobs list of pages */ struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */ struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page->ptl */ pgtable_t pmd_huge_pte; /* protected by page->ptl */
#endif #endif
......
...@@ -53,7 +53,14 @@ ...@@ -53,7 +53,14 @@
* } * }
* rcu_read_unlock(); * rcu_read_unlock();
* *
* See also the comment on struct slab_rcu in mm/slab.c. * This is useful if we need to approach a kernel structure obliquely,
* from its address obtained without the usual locking. We can lock
* the structure to stabilize it and check it's still at the given address,
* only if we can be sure that the memory has not been meanwhile reused
* for some other kind of object (which our subsystem's lock might corrupt).
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*/ */
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
......
...@@ -27,8 +27,8 @@ struct kmem_cache { ...@@ -27,8 +27,8 @@ struct kmem_cache {
size_t colour; /* cache colouring range */ size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache; struct kmem_cache *freelist_cache;
unsigned int slab_size; unsigned int freelist_size;
/* constructor func */ /* constructor func */
void (*ctor)(void *obj); void (*ctor)(void *obj);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
enum stat_item { enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
FREE_FASTPATH, /* Free to cpu slub */ FREE_FASTPATH, /* Free to cpu slab */
FREE_SLOWPATH, /* Freeing not to cpu slab */ FREE_SLOWPATH, /* Freeing not to cpu slab */
FREE_FROZEN, /* Freeing to frozen slab */ FREE_FROZEN, /* Freeing to frozen slab */
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
......
...@@ -163,72 +163,6 @@ ...@@ -163,72 +163,6 @@
*/ */
static bool pfmemalloc_active __read_mostly; static bool pfmemalloc_active __read_mostly;
/*
* kmem_bufctl_t:
*
* Bufctl's are used for linking objs within a slab
* linked offsets.
*
* This implementation relies on "struct page" for locating the cache &
* slab an object belongs to.
* This allows the bufctl structure to be small (one int), but limits
* the number of objects a slab (not a cache) can contain when off-slab
* bufctls are used. The limit is the size of the largest general cache
* that does not use off-slab slabs.
* For 32bit archs with 4 kB pages, is this 56.
* This is not serious, as it is only for large objects, when it is unwise
* to have too many per slab.
* Note: This limit can be raised by introducing a general cache whose size
* is less than 512 (PAGE_SIZE<<3), but greater than 256.
*/
typedef unsigned int kmem_bufctl_t;
#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
/*
* struct slab_rcu
*
* slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
* arrange for kmem_freepages to be called via RCU. This is useful if
* we need to approach a kernel structure obliquely, from its address
* obtained without the usual locking. We can lock the structure to
* stabilize it and check it's still at the given address, only if we
* can be sure that the memory has not been meanwhile reused for some
* other kind of object (which our subsystem's lock might corrupt).
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*/
struct slab_rcu {
struct rcu_head head;
struct kmem_cache *cachep;
void *addr;
};
/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
* for a slab, or allocated from an general cache.
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
union {
struct {
struct list_head list;
unsigned long colouroff;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
kmem_bufctl_t free;
unsigned short nodeid;
};
struct slab_rcu __slab_cover_slab_rcu;
};
};
/* /*
* struct array_cache * struct array_cache
* *
...@@ -456,18 +390,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj) ...@@ -456,18 +390,10 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
return page->slab_cache; return page->slab_cache;
} }
static inline struct slab *virt_to_slab(const void *obj) static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
{
struct page *page = virt_to_head_page(obj);
VM_BUG_ON(!PageSlab(page));
return page->slab_page;
}
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
unsigned int idx) unsigned int idx)
{ {
return slab->s_mem + cache->size * idx; return page->s_mem + cache->size * idx;
} }
/* /*
...@@ -477,9 +403,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, ...@@ -477,9 +403,9 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
* reciprocal_divide(offset, cache->reciprocal_buffer_size) * reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/ */
static inline unsigned int obj_to_index(const struct kmem_cache *cache, static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct slab *slab, void *obj) const struct page *page, void *obj)
{ {
u32 offset = (obj - slab->s_mem); u32 offset = (obj - page->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size); return reciprocal_divide(offset, cache->reciprocal_buffer_size);
} }
...@@ -641,7 +567,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) ...@@ -641,7 +567,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
static size_t slab_mgmt_size(size_t nr_objs, size_t align) static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{ {
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); return ALIGN(nr_objs * sizeof(unsigned int), align);
} }
/* /*
...@@ -660,8 +586,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -660,8 +586,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* on it. For the latter case, the memory allocated for a * on it. For the latter case, the memory allocated for a
* slab is used for: * slab is used for:
* *
* - The struct slab * - One unsigned int for each object
* - One kmem_bufctl_t for each object
* - Padding to respect alignment of @align * - Padding to respect alignment of @align
* - @buffer_size bytes for each object * - @buffer_size bytes for each object
* *
...@@ -674,8 +599,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -674,8 +599,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
mgmt_size = 0; mgmt_size = 0;
nr_objs = slab_size / buffer_size; nr_objs = slab_size / buffer_size;
if (nr_objs > SLAB_LIMIT)
nr_objs = SLAB_LIMIT;
} else { } else {
/* /*
* Ignore padding for the initial guess. The padding * Ignore padding for the initial guess. The padding
...@@ -685,8 +608,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -685,8 +608,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* into the memory allocation when taking the padding * into the memory allocation when taking the padding
* into account. * into account.
*/ */
nr_objs = (slab_size - sizeof(struct slab)) / nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
(buffer_size + sizeof(kmem_bufctl_t));
/* /*
* This calculated number will be either the right * This calculated number will be either the right
...@@ -696,9 +618,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -696,9 +618,6 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
> slab_size) > slab_size)
nr_objs--; nr_objs--;
if (nr_objs > SLAB_LIMIT)
nr_objs = SLAB_LIMIT;
mgmt_size = slab_mgmt_size(nr_objs, align); mgmt_size = slab_mgmt_size(nr_objs, align);
} }
*num = nr_objs; *num = nr_objs;
...@@ -829,10 +748,8 @@ static struct array_cache *alloc_arraycache(int node, int entries, ...@@ -829,10 +748,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
return nc; return nc;
} }
static inline bool is_slab_pfmemalloc(struct slab *slabp) static inline bool is_slab_pfmemalloc(struct page *page)
{ {
struct page *page = virt_to_page(slabp->s_mem);
return PageSlabPfmemalloc(page); return PageSlabPfmemalloc(page);
} }
...@@ -841,23 +758,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep, ...@@ -841,23 +758,23 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
struct array_cache *ac) struct array_cache *ac)
{ {
struct kmem_cache_node *n = cachep->node[numa_mem_id()]; struct kmem_cache_node *n = cachep->node[numa_mem_id()];
struct slab *slabp; struct page *page;
unsigned long flags; unsigned long flags;
if (!pfmemalloc_active) if (!pfmemalloc_active)
return; return;
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(slabp, &n->slabs_full, list) list_for_each_entry(page, &n->slabs_full, lru)
if (is_slab_pfmemalloc(slabp)) if (is_slab_pfmemalloc(page))
goto out; goto out;
list_for_each_entry(slabp, &n->slabs_partial, list) list_for_each_entry(page, &n->slabs_partial, lru)
if (is_slab_pfmemalloc(slabp)) if (is_slab_pfmemalloc(page))
goto out; goto out;
list_for_each_entry(slabp, &n->slabs_free, list) list_for_each_entry(page, &n->slabs_free, lru)
if (is_slab_pfmemalloc(slabp)) if (is_slab_pfmemalloc(page))
goto out; goto out;
pfmemalloc_active = false; pfmemalloc_active = false;
...@@ -897,8 +814,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, ...@@ -897,8 +814,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
*/ */
n = cachep->node[numa_mem_id()]; n = cachep->node[numa_mem_id()];
if (!list_empty(&n->slabs_free) && force_refill) { if (!list_empty(&n->slabs_free) && force_refill) {
struct slab *slabp = virt_to_slab(objp); struct page *page = virt_to_head_page(objp);
ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); ClearPageSlabPfmemalloc(page);
clear_obj_pfmemalloc(&objp); clear_obj_pfmemalloc(&objp);
recheck_pfmemalloc_active(cachep, ac); recheck_pfmemalloc_active(cachep, ac);
return objp; return objp;
...@@ -1099,8 +1016,7 @@ static void drain_alien_cache(struct kmem_cache *cachep, ...@@ -1099,8 +1016,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{ {
struct slab *slabp = virt_to_slab(objp); int nodeid = page_to_nid(virt_to_page(objp));
int nodeid = slabp->nodeid;
struct kmem_cache_node *n; struct kmem_cache_node *n;
struct array_cache *alien = NULL; struct array_cache *alien = NULL;
int node; int node;
...@@ -1111,7 +1027,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) ...@@ -1111,7 +1027,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* Make sure we are not freeing a object from another node to the array * Make sure we are not freeing a object from another node to the array
* cache on this cpu. * cache on this cpu.
*/ */
if (likely(slabp->nodeid == node)) if (likely(nodeid == node))
return 0; return 0;
n = cachep->node[node]; n = cachep->node[node];
...@@ -1512,6 +1428,8 @@ void __init kmem_cache_init(void) ...@@ -1512,6 +1428,8 @@ void __init kmem_cache_init(void)
{ {
int i; int i;
BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
sizeof(struct rcu_head));
kmem_cache = &kmem_cache_boot; kmem_cache = &kmem_cache_boot;
setup_node_pointer(kmem_cache); setup_node_pointer(kmem_cache);
...@@ -1687,7 +1605,7 @@ static noinline void ...@@ -1687,7 +1605,7 @@ static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{ {
struct kmem_cache_node *n; struct kmem_cache_node *n;
struct slab *slabp; struct page *page;
unsigned long flags; unsigned long flags;
int node; int node;
...@@ -1706,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) ...@@ -1706,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
continue; continue;
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(slabp, &n->slabs_full, list) { list_for_each_entry(page, &n->slabs_full, lru) {
active_objs += cachep->num; active_objs += cachep->num;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_partial, list) { list_for_each_entry(page, &n->slabs_partial, lru) {
active_objs += slabp->inuse; active_objs += page->active;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_free, list) list_for_each_entry(page, &n->slabs_free, lru)
num_slabs++; num_slabs++;
free_objects += n->free_objects; free_objects += n->free_objects;
...@@ -1736,19 +1654,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) ...@@ -1736,19 +1654,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
* did not request dmaable memory, we might get it, but that * did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable. * would be relatively rare and ignorable.
*/ */
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{ {
struct page *page; struct page *page;
int nr_pages; int nr_pages;
int i;
#ifndef CONFIG_MMU
/*
* Nommu uses slab's for process anonymous memory allocations, and thus
* requires __GFP_COMP to properly refcount higher order allocations
*/
flags |= __GFP_COMP;
#endif
flags |= cachep->allocflags; flags |= cachep->allocflags;
if (cachep->flags & SLAB_RECLAIM_ACCOUNT) if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
...@@ -1772,12 +1682,9 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -1772,12 +1682,9 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
else else
add_zone_page_state(page_zone(page), add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages); NR_SLAB_UNRECLAIMABLE, nr_pages);
for (i = 0; i < nr_pages; i++) { __SetPageSlab(page);
__SetPageSlab(page + i);
if (page->pfmemalloc) if (page->pfmemalloc)
SetPageSlabPfmemalloc(page + i); SetPageSlabPfmemalloc(page);
}
memcg_bind_pages(cachep, cachep->gfporder); memcg_bind_pages(cachep, cachep->gfporder);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
...@@ -1789,17 +1696,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) ...@@ -1789,17 +1696,15 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
kmemcheck_mark_unallocated_pages(page, nr_pages); kmemcheck_mark_unallocated_pages(page, nr_pages);
} }
return page_address(page); return page;
} }
/* /*
* Interface to system's page release. * Interface to system's page release.
*/ */
static void kmem_freepages(struct kmem_cache *cachep, void *addr) static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{ {
unsigned long i = (1 << cachep->gfporder); const unsigned long nr_freed = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
kmemcheck_free_shadow(page, cachep->gfporder); kmemcheck_free_shadow(page, cachep->gfporder);
...@@ -1809,27 +1714,28 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) ...@@ -1809,27 +1714,28 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
else else
sub_zone_page_state(page_zone(page), sub_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_freed); NR_SLAB_UNRECLAIMABLE, nr_freed);
while (i--) {
BUG_ON(!PageSlab(page)); BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page); __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page); __ClearPageSlab(page);
page++; page_mapcount_reset(page);
} page->mapping = NULL;
memcg_release_pages(cachep, cachep->gfporder); memcg_release_pages(cachep, cachep->gfporder);
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed; current->reclaim_state->reclaimed_slab += nr_freed;
free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); __free_memcg_kmem_pages(page, cachep->gfporder);
} }
static void kmem_rcu_free(struct rcu_head *head) static void kmem_rcu_free(struct rcu_head *head)
{ {
struct slab_rcu *slab_rcu = (struct slab_rcu *)head; struct kmem_cache *cachep;
struct kmem_cache *cachep = slab_rcu->cachep; struct page *page;
kmem_freepages(cachep, slab_rcu->addr); page = container_of(head, struct page, rcu_head);
if (OFF_SLAB(cachep)) cachep = page->slab_cache;
kmem_cache_free(cachep->slabp_cache, slab_rcu);
kmem_freepages(cachep, page);
} }
#if DEBUG #if DEBUG
...@@ -1978,19 +1884,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) ...@@ -1978,19 +1884,19 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
/* Print some data about the neighboring objects, if they /* Print some data about the neighboring objects, if they
* exist: * exist:
*/ */
struct slab *slabp = virt_to_slab(objp); struct page *page = virt_to_head_page(objp);
unsigned int objnr; unsigned int objnr;
objnr = obj_to_index(cachep, slabp, objp); objnr = obj_to_index(cachep, page, objp);
if (objnr) { if (objnr) {
objp = index_to_obj(cachep, slabp, objnr - 1); objp = index_to_obj(cachep, page, objnr - 1);
realobj = (char *)objp + obj_offset(cachep); realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n", printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size); realobj, size);
print_objinfo(cachep, objp, 2); print_objinfo(cachep, objp, 2);
} }
if (objnr + 1 < cachep->num) { if (objnr + 1 < cachep->num) {
objp = index_to_obj(cachep, slabp, objnr + 1); objp = index_to_obj(cachep, page, objnr + 1);
realobj = (char *)objp + obj_offset(cachep); realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n", printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size); realobj, size);
...@@ -2001,11 +1907,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) ...@@ -2001,11 +1907,12 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
#endif #endif
#if DEBUG #if DEBUG
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) static void slab_destroy_debugcheck(struct kmem_cache *cachep,
struct page *page)
{ {
int i; int i;
for (i = 0; i < cachep->num; i++) { for (i = 0; i < cachep->num; i++) {
void *objp = index_to_obj(cachep, slabp, i); void *objp = index_to_obj(cachep, page, i);
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
...@@ -2030,7 +1937,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab ...@@ -2030,7 +1937,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
} }
} }
#else #else
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp) static void slab_destroy_debugcheck(struct kmem_cache *cachep,
struct page *page)
{ {
} }
#endif #endif
...@@ -2044,23 +1952,34 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab ...@@ -2044,23 +1952,34 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
* Before calling the slab must have been unlinked from the cache. The * Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed. * cache-lock is not held/needed.
*/ */
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{ {
void *addr = slabp->s_mem - slabp->colouroff; void *freelist;
slab_destroy_debugcheck(cachep, slabp); freelist = page->freelist;
slab_destroy_debugcheck(cachep, page);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu; struct rcu_head *head;
/*
* RCU free overloads the RCU head over the LRU.
* slab_page has been overloeaded over the LRU,
* however it is not used from now on so that
* we can use it safely.
*/
head = (void *)&page->rcu_head;
call_rcu(head, kmem_rcu_free);
slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->cachep = cachep;
slab_rcu->addr = addr;
call_rcu(&slab_rcu->head, kmem_rcu_free);
} else { } else {
kmem_freepages(cachep, addr); kmem_freepages(cachep, page);
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slabp);
} }
/*
* From now on, we don't use freelist
* although actual page can be freed in rcu context
*/
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->freelist_cache, freelist);
} }
/** /**
...@@ -2097,8 +2016,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -2097,8 +2016,8 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
* use off-slab slabs. Needed to avoid a possible * use off-slab slabs. Needed to avoid a possible
* looping condition in cache_grow(). * looping condition in cache_grow().
*/ */
offslab_limit = size - sizeof(struct slab); offslab_limit = size;
offslab_limit /= sizeof(kmem_bufctl_t); offslab_limit /= sizeof(unsigned int);
if (num > offslab_limit) if (num > offslab_limit)
break; break;
...@@ -2220,7 +2139,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -2220,7 +2139,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
int int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{ {
size_t left_over, slab_size, ralign; size_t left_over, freelist_size, ralign;
gfp_t gfp; gfp_t gfp;
int err; int err;
size_t size = cachep->size; size_t size = cachep->size;
...@@ -2339,22 +2258,21 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2339,22 +2258,21 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (!cachep->num) if (!cachep->num)
return -E2BIG; return -E2BIG;
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) freelist_size =
+ sizeof(struct slab), cachep->align); ALIGN(cachep->num * sizeof(unsigned int), cachep->align);
/* /*
* If the slab has been placed off-slab, and we have enough space then * If the slab has been placed off-slab, and we have enough space then
* move it on-slab. This is at the expense of any extra colouring. * move it on-slab. This is at the expense of any extra colouring.
*/ */
if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) {
flags &= ~CFLGS_OFF_SLAB; flags &= ~CFLGS_OFF_SLAB;
left_over -= slab_size; left_over -= freelist_size;
} }
if (flags & CFLGS_OFF_SLAB) { if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */ /* really off slab. No need for manual alignment */
slab_size = freelist_size = cachep->num * sizeof(unsigned int);
cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
#ifdef CONFIG_PAGE_POISONING #ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages() /* If we're going to use the generic kernel_map_pages()
...@@ -2371,16 +2289,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2371,16 +2289,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (cachep->colour_off < cachep->align) if (cachep->colour_off < cachep->align)
cachep->colour_off = cachep->align; cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off; cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size; cachep->freelist_size = freelist_size;
cachep->flags = flags; cachep->flags = flags;
cachep->allocflags = 0; cachep->allocflags = __GFP_COMP;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep->allocflags |= GFP_DMA; cachep->allocflags |= GFP_DMA;
cachep->size = size; cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size); cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) { if (flags & CFLGS_OFF_SLAB) {
cachep->slabp_cache = kmalloc_slab(slab_size, 0u); cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
/* /*
* This is a possibility for one of the malloc_sizes caches. * This is a possibility for one of the malloc_sizes caches.
* But since we go off slab only for object size greater than * But since we go off slab only for object size greater than
...@@ -2388,7 +2306,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) ...@@ -2388,7 +2306,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* this should not happen at all. * this should not happen at all.
* But leave a BUG_ON for some lucky dude. * But leave a BUG_ON for some lucky dude.
*/ */
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache));
} }
err = setup_cpu_cache(cachep, gfp); err = setup_cpu_cache(cachep, gfp);
...@@ -2494,7 +2412,7 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2494,7 +2412,7 @@ static int drain_freelist(struct kmem_cache *cache,
{ {
struct list_head *p; struct list_head *p;
int nr_freed; int nr_freed;
struct slab *slabp; struct page *page;
nr_freed = 0; nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) { while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
...@@ -2506,18 +2424,18 @@ static int drain_freelist(struct kmem_cache *cache, ...@@ -2506,18 +2424,18 @@ static int drain_freelist(struct kmem_cache *cache,
goto out; goto out;
} }
slabp = list_entry(p, struct slab, list); page = list_entry(p, struct page, lru);
#if DEBUG #if DEBUG
BUG_ON(slabp->inuse); BUG_ON(page->active);
#endif #endif
list_del(&slabp->list); list_del(&page->lru);
/* /*
* Safe to drop the lock. The slab is no longer linked * Safe to drop the lock. The slab is no longer linked
* to the cache. * to the cache.
*/ */
n->free_objects -= cache->num; n->free_objects -= cache->num;
spin_unlock_irq(&n->list_lock); spin_unlock_irq(&n->list_lock);
slab_destroy(cache, slabp); slab_destroy(cache, page);
nr_freed++; nr_freed++;
} }
out: out:
...@@ -2600,52 +2518,42 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) ...@@ -2600,52 +2518,42 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
* descriptors in kmem_cache_create, we search through the malloc_sizes array. * descriptors in kmem_cache_create, we search through the malloc_sizes array.
* If we are creating a malloc_sizes cache here it would not be visible to * If we are creating a malloc_sizes cache here it would not be visible to
* kmem_find_general_cachep till the initialization is complete. * kmem_find_general_cachep till the initialization is complete.
* Hence we cannot have slabp_cache same as the original cache. * Hence we cannot have freelist_cache same as the original cache.
*/ */
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, static void *alloc_slabmgmt(struct kmem_cache *cachep,
int colour_off, gfp_t local_flags, struct page *page, int colour_off,
int nodeid) gfp_t local_flags, int nodeid)
{ {
struct slab *slabp; void *freelist;
void *addr = page_address(page);
if (OFF_SLAB(cachep)) { if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */ /* Slab management obj is off-slab. */
slabp = kmem_cache_alloc_node(cachep->slabp_cache, freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid); local_flags, nodeid);
/* if (!freelist)
* If the first object in the slab is leaked (it's allocated
* but no one has a reference to it), we want to make sure
* kmemleak does not treat the ->s_mem pointer as a reference
* to the object. Otherwise we will not report the leak.
*/
kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
local_flags);
if (!slabp)
return NULL; return NULL;
} else { } else {
slabp = objp + colour_off; freelist = addr + colour_off;
colour_off += cachep->slab_size; colour_off += cachep->freelist_size;
} }
slabp->inuse = 0; page->active = 0;
slabp->colouroff = colour_off; page->s_mem = addr + colour_off;
slabp->s_mem = objp + colour_off; return freelist;
slabp->nodeid = nodeid;
slabp->free = 0;
return slabp;
} }
static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) static inline unsigned int *slab_freelist(struct page *page)
{ {
return (kmem_bufctl_t *) (slabp + 1); return (unsigned int *)(page->freelist);
} }
static void cache_init_objs(struct kmem_cache *cachep, static void cache_init_objs(struct kmem_cache *cachep,
struct slab *slabp) struct page *page)
{ {
int i; int i;
for (i = 0; i < cachep->num; i++) { for (i = 0; i < cachep->num; i++) {
void *objp = index_to_obj(cachep, slabp, i); void *objp = index_to_obj(cachep, page, i);
#if DEBUG #if DEBUG
/* need to poison the objs? */ /* need to poison the objs? */
if (cachep->flags & SLAB_POISON) if (cachep->flags & SLAB_POISON)
...@@ -2681,9 +2589,8 @@ static void cache_init_objs(struct kmem_cache *cachep, ...@@ -2681,9 +2589,8 @@ static void cache_init_objs(struct kmem_cache *cachep,
if (cachep->ctor) if (cachep->ctor)
cachep->ctor(objp); cachep->ctor(objp);
#endif #endif
slab_bufctl(slabp)[i] = i + 1; slab_freelist(page)[i] = i;
} }
slab_bufctl(slabp)[i - 1] = BUFCTL_END;
} }
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
...@@ -2696,41 +2603,41 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) ...@@ -2696,41 +2603,41 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
} }
} }
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
int nodeid) int nodeid)
{ {
void *objp = index_to_obj(cachep, slabp, slabp->free); void *objp;
kmem_bufctl_t next;
slabp->inuse++; objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]);
next = slab_bufctl(slabp)[slabp->free]; page->active++;
#if DEBUG #if DEBUG
slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
WARN_ON(slabp->nodeid != nodeid);
#endif #endif
slabp->free = next;
return objp; return objp;
} }
static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
void *objp, int nodeid) void *objp, int nodeid)
{ {
unsigned int objnr = obj_to_index(cachep, slabp, objp); unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG #if DEBUG
unsigned int i;
/* Verify that the slab belongs to the intended node */ /* Verify that the slab belongs to the intended node */
WARN_ON(slabp->nodeid != nodeid); WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { /* Verify double free bug */
for (i = page->active; i < cachep->num; i++) {
if (slab_freelist(page)[i] == objnr) {
printk(KERN_ERR "slab: double free detected in cache " printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp); "'%s', objp %p\n", cachep->name, objp);
BUG(); BUG();
} }
}
#endif #endif
slab_bufctl(slabp)[objnr] = slabp->free; page->active--;
slabp->free = objnr; slab_freelist(page)[page->active] = objnr;
slabp->inuse--;
} }
/* /*
...@@ -2738,23 +2645,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, ...@@ -2738,23 +2645,11 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
* for the slab allocator to be able to lookup the cache and slab of a * for the slab allocator to be able to lookup the cache and slab of a
* virtual address for kfree, ksize, and slab debugging. * virtual address for kfree, ksize, and slab debugging.
*/ */
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, static void slab_map_pages(struct kmem_cache *cache, struct page *page,
void *addr) void *freelist)
{ {
int nr_pages;
struct page *page;
page = virt_to_page(addr);
nr_pages = 1;
if (likely(!PageCompound(page)))
nr_pages <<= cache->gfporder;
do {
page->slab_cache = cache; page->slab_cache = cache;
page->slab_page = slab; page->freelist = freelist;
page++;
} while (--nr_pages);
} }
/* /*
...@@ -2762,9 +2657,9 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, ...@@ -2762,9 +2657,9 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
* kmem_cache_alloc() when there are no active objs left in a cache. * kmem_cache_alloc() when there are no active objs left in a cache.
*/ */
static int cache_grow(struct kmem_cache *cachep, static int cache_grow(struct kmem_cache *cachep,
gfp_t flags, int nodeid, void *objp) gfp_t flags, int nodeid, struct page *page)
{ {
struct slab *slabp; void *freelist;
size_t offset; size_t offset;
gfp_t local_flags; gfp_t local_flags;
struct kmem_cache_node *n; struct kmem_cache_node *n;
...@@ -2805,20 +2700,20 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2805,20 +2700,20 @@ static int cache_grow(struct kmem_cache *cachep,
* Get mem for the objs. Attempt to allocate a physical page from * Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'. * 'nodeid'.
*/ */
if (!objp) if (!page)
objp = kmem_getpages(cachep, local_flags, nodeid); page = kmem_getpages(cachep, local_flags, nodeid);
if (!objp) if (!page)
goto failed; goto failed;
/* Get slab management. */ /* Get slab management. */
slabp = alloc_slabmgmt(cachep, objp, offset, freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, nodeid); local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
if (!slabp) if (!freelist)
goto opps1; goto opps1;
slab_map_pages(cachep, slabp, objp); slab_map_pages(cachep, page, freelist);
cache_init_objs(cachep, slabp); cache_init_objs(cachep, page);
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
...@@ -2826,13 +2721,13 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2826,13 +2721,13 @@ static int cache_grow(struct kmem_cache *cachep,
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
/* Make slab active. */ /* Make slab active. */
list_add_tail(&slabp->list, &(n->slabs_free)); list_add_tail(&page->lru, &(n->slabs_free));
STATS_INC_GROWN(cachep); STATS_INC_GROWN(cachep);
n->free_objects += cachep->num; n->free_objects += cachep->num;
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
return 1; return 1;
opps1: opps1:
kmem_freepages(cachep, objp); kmem_freepages(cachep, page);
failed: failed:
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
...@@ -2880,9 +2775,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) ...@@ -2880,9 +2775,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
unsigned long caller) unsigned long caller)
{ {
struct page *page;
unsigned int objnr; unsigned int objnr;
struct slab *slabp; struct page *page;
BUG_ON(virt_to_cache(objp) != cachep); BUG_ON(virt_to_cache(objp) != cachep);
...@@ -2890,8 +2784,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -2890,8 +2784,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp); kfree_debugcheck(objp);
page = virt_to_head_page(objp); page = virt_to_head_page(objp);
slabp = page->slab_page;
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp); verify_redzone_free(cachep, objp);
*dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone1(cachep, objp) = RED_INACTIVE;
...@@ -2900,14 +2792,11 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -2900,14 +2792,11 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER) if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = (void *)caller; *dbg_userword(cachep, objp) = (void *)caller;
objnr = obj_to_index(cachep, slabp, objp); objnr = obj_to_index(cachep, page, objp);
BUG_ON(objnr >= cachep->num); BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); BUG_ON(objp != index_to_obj(cachep, page, objnr));
#ifdef CONFIG_DEBUG_SLAB_LEAK
slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
#endif
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
...@@ -2924,33 +2813,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, ...@@ -2924,33 +2813,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
return objp; return objp;
} }
static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
{
kmem_bufctl_t i;
int entries = 0;
/* Check slab's freelist to see if this obj is there. */
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
entries++;
if (entries > cachep->num || i >= cachep->num)
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
bad:
printk(KERN_ERR "slab: Internal list corruption detected in "
"cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
cachep->name, cachep->num, slabp, slabp->inuse,
print_tainted());
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
1);
BUG();
}
}
#else #else
#define kfree_debugcheck(x) do { } while(0) #define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp) #define cache_free_debugcheck(x,objp,z) (objp)
#define check_slabp(x,y) do { } while(0)
#endif #endif
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
...@@ -2989,7 +2854,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2989,7 +2854,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
while (batchcount > 0) { while (batchcount > 0) {
struct list_head *entry; struct list_head *entry;
struct slab *slabp; struct page *page;
/* Get slab alloc is to come from. */ /* Get slab alloc is to come from. */
entry = n->slabs_partial.next; entry = n->slabs_partial.next;
if (entry == &n->slabs_partial) { if (entry == &n->slabs_partial) {
...@@ -2999,8 +2864,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -2999,8 +2864,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
goto must_grow; goto must_grow;
} }
slabp = list_entry(entry, struct slab, list); page = list_entry(entry, struct page, lru);
check_slabp(cachep, slabp);
check_spinlock_acquired(cachep); check_spinlock_acquired(cachep);
/* /*
...@@ -3008,24 +2872,23 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, ...@@ -3008,24 +2872,23 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
* there must be at least one object available for * there must be at least one object available for
* allocation. * allocation.
*/ */
BUG_ON(slabp->inuse >= cachep->num); BUG_ON(page->active >= cachep->num);
while (slabp->inuse < cachep->num && batchcount--) { while (page->active < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep); STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep); STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep); STATS_SET_HIGH(cachep);
ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp, ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
node)); node));
} }
check_slabp(cachep, slabp);
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&slabp->list); list_del(&page->lru);
if (slabp->free == BUFCTL_END) if (page->active == cachep->num)
list_add(&slabp->list, &n->slabs_full); list_add(&page->list, &n->slabs_full);
else else
list_add(&slabp->list, &n->slabs_partial); list_add(&page->list, &n->slabs_partial);
} }
must_grow: must_grow:
...@@ -3097,16 +2960,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -3097,16 +2960,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
*dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE;
} }
#ifdef CONFIG_DEBUG_SLAB_LEAK
{
struct slab *slabp;
unsigned objnr;
slabp = virt_to_head_page(objp)->slab_page;
objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
}
#endif
objp += obj_offset(cachep); objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp); cachep->ctor(objp);
...@@ -3248,18 +3101,20 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) ...@@ -3248,18 +3101,20 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
* We may trigger various forms of reclaim on the allowed * We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary. * set and go into memory reserves if necessary.
*/ */
struct page *page;
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_enable(); local_irq_enable();
kmem_flagcheck(cache, flags); kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, local_flags, numa_mem_id()); page = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT) if (local_flags & __GFP_WAIT)
local_irq_disable(); local_irq_disable();
if (obj) { if (page) {
/* /*
* Insert into the appropriate per node queues * Insert into the appropriate per node queues
*/ */
nid = page_to_nid(virt_to_page(obj)); nid = page_to_nid(page);
if (cache_grow(cache, flags, nid, obj)) { if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache, obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid); flags | GFP_THISNODE, nid);
if (!obj) if (!obj)
...@@ -3288,7 +3143,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3288,7 +3143,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid) int nodeid)
{ {
struct list_head *entry; struct list_head *entry;
struct slab *slabp; struct page *page;
struct kmem_cache_node *n; struct kmem_cache_node *n;
void *obj; void *obj;
int x; int x;
...@@ -3308,26 +3163,24 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, ...@@ -3308,26 +3163,24 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
goto must_grow; goto must_grow;
} }
slabp = list_entry(entry, struct slab, list); page = list_entry(entry, struct page, lru);
check_spinlock_acquired_node(cachep, nodeid); check_spinlock_acquired_node(cachep, nodeid);
check_slabp(cachep, slabp);
STATS_INC_NODEALLOCS(cachep); STATS_INC_NODEALLOCS(cachep);
STATS_INC_ACTIVE(cachep); STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep); STATS_SET_HIGH(cachep);
BUG_ON(slabp->inuse == cachep->num); BUG_ON(page->active == cachep->num);
obj = slab_get_obj(cachep, slabp, nodeid); obj = slab_get_obj(cachep, page, nodeid);
check_slabp(cachep, slabp);
n->free_objects--; n->free_objects--;
/* move slabp to correct slabp list: */ /* move slabp to correct slabp list: */
list_del(&slabp->list); list_del(&page->lru);
if (slabp->free == BUFCTL_END) if (page->active == cachep->num)
list_add(&slabp->list, &n->slabs_full); list_add(&page->lru, &n->slabs_full);
else else
list_add(&slabp->list, &n->slabs_partial); list_add(&page->lru, &n->slabs_partial);
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
goto done; goto done;
...@@ -3477,23 +3330,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, ...@@ -3477,23 +3330,21 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
for (i = 0; i < nr_objects; i++) { for (i = 0; i < nr_objects; i++) {
void *objp; void *objp;
struct slab *slabp; struct page *page;
clear_obj_pfmemalloc(&objpp[i]); clear_obj_pfmemalloc(&objpp[i]);
objp = objpp[i]; objp = objpp[i];
slabp = virt_to_slab(objp); page = virt_to_head_page(objp);
n = cachep->node[node]; n = cachep->node[node];
list_del(&slabp->list); list_del(&page->lru);
check_spinlock_acquired_node(cachep, node); check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp); slab_put_obj(cachep, page, objp, node);
slab_put_obj(cachep, slabp, objp, node);
STATS_DEC_ACTIVE(cachep); STATS_DEC_ACTIVE(cachep);
n->free_objects++; n->free_objects++;
check_slabp(cachep, slabp);
/* fixup slab chains */ /* fixup slab chains */
if (slabp->inuse == 0) { if (page->active == 0) {
if (n->free_objects > n->free_limit) { if (n->free_objects > n->free_limit) {
n->free_objects -= cachep->num; n->free_objects -= cachep->num;
/* No need to drop any previously held /* No need to drop any previously held
...@@ -3502,16 +3353,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, ...@@ -3502,16 +3353,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
* a different cache, refer to comments before * a different cache, refer to comments before
* alloc_slabmgmt. * alloc_slabmgmt.
*/ */
slab_destroy(cachep, slabp); slab_destroy(cachep, page);
} else { } else {
list_add(&slabp->list, &n->slabs_free); list_add(&page->lru, &n->slabs_free);
} }
} else { } else {
/* Unconditionally move a slab to the end of the /* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the * partial list on free - maximum time for the
* other objects to be freed, too. * other objects to be freed, too.
*/ */
list_add_tail(&slabp->list, &n->slabs_partial); list_add_tail(&page->lru, &n->slabs_partial);
} }
} }
} }
...@@ -3551,10 +3402,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) ...@@ -3551,10 +3402,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
p = n->slabs_free.next; p = n->slabs_free.next;
while (p != &(n->slabs_free)) { while (p != &(n->slabs_free)) {
struct slab *slabp; struct page *page;
slabp = list_entry(p, struct slab, list); page = list_entry(p, struct page, lru);
BUG_ON(slabp->inuse); BUG_ON(page->active);
i++; i++;
p = p->next; p = p->next;
...@@ -4158,7 +4009,7 @@ static void cache_reap(struct work_struct *w) ...@@ -4158,7 +4009,7 @@ static void cache_reap(struct work_struct *w)
#ifdef CONFIG_SLABINFO #ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{ {
struct slab *slabp; struct page *page;
unsigned long active_objs; unsigned long active_objs;
unsigned long num_objs; unsigned long num_objs;
unsigned long active_slabs = 0; unsigned long active_slabs = 0;
...@@ -4178,23 +4029,23 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -4178,23 +4029,23 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on(); check_irq_on();
spin_lock_irq(&n->list_lock); spin_lock_irq(&n->list_lock);
list_for_each_entry(slabp, &n->slabs_full, list) { list_for_each_entry(page, &n->slabs_full, lru) {
if (slabp->inuse != cachep->num && !error) if (page->active != cachep->num && !error)
error = "slabs_full accounting error"; error = "slabs_full accounting error";
active_objs += cachep->num; active_objs += cachep->num;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_partial, list) { list_for_each_entry(page, &n->slabs_partial, lru) {
if (slabp->inuse == cachep->num && !error) if (page->active == cachep->num && !error)
error = "slabs_partial inuse accounting error"; error = "slabs_partial accounting error";
if (!slabp->inuse && !error) if (!page->active && !error)
error = "slabs_partial/inuse accounting error"; error = "slabs_partial accounting error";
active_objs += slabp->inuse; active_objs += page->active;
active_slabs++; active_slabs++;
} }
list_for_each_entry(slabp, &n->slabs_free, list) { list_for_each_entry(page, &n->slabs_free, lru) {
if (slabp->inuse && !error) if (page->active && !error)
error = "slabs_free/inuse accounting error"; error = "slabs_free accounting error";
num_slabs++; num_slabs++;
} }
free_objects += n->free_objects; free_objects += n->free_objects;
...@@ -4346,15 +4197,27 @@ static inline int add_caller(unsigned long *n, unsigned long v) ...@@ -4346,15 +4197,27 @@ static inline int add_caller(unsigned long *n, unsigned long v)
return 1; return 1;
} }
static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) static void handle_slab(unsigned long *n, struct kmem_cache *c,
struct page *page)
{ {
void *p; void *p;
int i; int i, j;
if (n[0] == n[1]) if (n[0] == n[1])
return; return;
for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) bool active = true;
for (j = page->active; j < c->num; j++) {
/* Skip freed item */
if (slab_freelist(page)[j] == i) {
active = false;
break;
}
}
if (!active)
continue; continue;
if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
return; return;
} }
...@@ -4379,7 +4242,7 @@ static void show_symbol(struct seq_file *m, unsigned long address) ...@@ -4379,7 +4242,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p) static int leaks_show(struct seq_file *m, void *p)
{ {
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
struct slab *slabp; struct page *page;
struct kmem_cache_node *n; struct kmem_cache_node *n;
const char *name; const char *name;
unsigned long *x = m->private; unsigned long *x = m->private;
...@@ -4403,10 +4266,10 @@ static int leaks_show(struct seq_file *m, void *p) ...@@ -4403,10 +4266,10 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on(); check_irq_on();
spin_lock_irq(&n->list_lock); spin_lock_irq(&n->list_lock);
list_for_each_entry(slabp, &n->slabs_full, list) list_for_each_entry(page, &n->slabs_full, lru)
handle_slab(x, cachep, slabp); handle_slab(x, cachep, page);
list_for_each_entry(slabp, &n->slabs_partial, list) list_for_each_entry(page, &n->slabs_partial, lru)
handle_slab(x, cachep, slabp); handle_slab(x, cachep, page);
spin_unlock_irq(&n->list_lock); spin_unlock_irq(&n->list_lock);
} }
name = cachep->name; name = cachep->name;
......
...@@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) ...@@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
/* /*
* Maximum number of desirable partial slabs. * Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink * The existence of more partial slabs makes kmem_cache_shrink
* sort the partial list by the number of objects in the. * sort the partial list by the number of objects in use.
*/ */
#define MAX_PARTIAL 10 #define MAX_PARTIAL 10
...@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, ...@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical * Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all. * production configuration these hooks all should produce no code at all.
*/ */
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
}
static inline void kfree_hook(const void *x)
{
kmemleak_free(x);
}
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ {
flags &= gfp_allowed_mask; flags &= gfp_allowed_mask;
...@@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size, ...@@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
/* /*
* Enable debugging if selected on the kernel commandline. * Enable debugging if selected on the kernel commandline.
*/ */
if (slub_debug && (!slub_debug_slabs || if (slub_debug && (!slub_debug_slabs || (name &&
!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
flags |= slub_debug; flags |= slub_debug;
return flags; return flags;
...@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, ...@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {} int objects) {}
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
}
static inline void kfree_hook(const void *x)
{
kmemleak_free(x);
}
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; } { return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
void *object) {} void *object)
{
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
flags & gfp_allowed_mask);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x) {} static inline void slab_free_hook(struct kmem_cache *s, void *x)
{
kmemleak_free_recursive(x, s->flags);
}
#endif /* CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLUB_DEBUG */
...@@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node; ...@@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node;
* slab on the node for this slabcache. There are no concurrent accesses * slab on the node for this slabcache. There are no concurrent accesses
* possible. * possible.
* *
* Note that this function only works on the kmalloc_node_cache * Note that this function only works on the kmem_cache_node
* when allocating for the kmalloc_node_cache. This is used for bootstrapping * when allocating for the kmem_cache_node. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet. * memory on a fresh node that has no slab structures yet.
*/ */
static void early_kmem_cache_node_alloc(int node) static void early_kmem_cache_node_alloc(int node)
...@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) ...@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page) if (page)
ptr = page_address(page); ptr = page_address(page);
kmemleak_alloc(ptr, size, 1, flags); kmalloc_large_node_hook(ptr, size, flags);
return ptr; return ptr;
} }
...@@ -3336,7 +3363,7 @@ void kfree(const void *x) ...@@ -3336,7 +3363,7 @@ void kfree(const void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page)); BUG_ON(!PageCompound(page));
kmemleak_free(x); kfree_hook(x);
__free_memcg_kmem_pages(page, compound_order(page)); __free_memcg_kmem_pages(page, compound_order(page));
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment