Commit 1501278b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'slab-for-6.1-rc1-hotfix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab hotfix from Vlastimil Babka:
 "A single fix for the common-kmalloc series, for warnings on mips and
  sparc64 reported by Guenter Roeck"

* tag 'slab-for-6.1-rc1-hotfix' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm/slab: use kmalloc_node() for off slab freelist_idx_t array allocation
parents 36d8a3ed e36ce448
...@@ -33,7 +33,6 @@ struct kmem_cache { ...@@ -33,7 +33,6 @@ struct kmem_cache {
size_t colour; /* cache colouring range */ size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */ unsigned int colour_off; /* colour offset */
struct kmem_cache *freelist_cache;
unsigned int freelist_size; unsigned int freelist_size;
/* constructor func */ /* constructor func */
......
...@@ -1619,7 +1619,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab) ...@@ -1619,7 +1619,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
* although actual page can be freed in rcu context * although actual page can be freed in rcu context
*/ */
if (OFF_SLAB(cachep)) if (OFF_SLAB(cachep))
kmem_cache_free(cachep->freelist_cache, freelist); kfree(freelist);
} }
/* /*
...@@ -1671,21 +1671,27 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, ...@@ -1671,21 +1671,27 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (flags & CFLGS_OFF_SLAB) { if (flags & CFLGS_OFF_SLAB) {
struct kmem_cache *freelist_cache; struct kmem_cache *freelist_cache;
size_t freelist_size; size_t freelist_size;
size_t freelist_cache_size;
freelist_size = num * sizeof(freelist_idx_t); freelist_size = num * sizeof(freelist_idx_t);
freelist_cache = kmalloc_slab(freelist_size, 0u); if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
if (!freelist_cache) freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
continue; } else {
freelist_cache = kmalloc_slab(freelist_size, 0u);
/* if (!freelist_cache)
* Needed to avoid possible looping condition continue;
* in cache_grow_begin() freelist_cache_size = freelist_cache->size;
*/
if (OFF_SLAB(freelist_cache)) /*
continue; * Needed to avoid possible looping condition
* in cache_grow_begin()
*/
if (OFF_SLAB(freelist_cache))
continue;
}
/* check if off slab has enough benefit */ /* check if off slab has enough benefit */
if (freelist_cache->size > cachep->size / 2) if (freelist_cache_size > cachep->size / 2)
continue; continue;
} }
...@@ -2061,11 +2067,6 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) ...@@ -2061,11 +2067,6 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif #endif
if (OFF_SLAB(cachep)) {
cachep->freelist_cache =
kmalloc_slab(cachep->freelist_size, 0u);
}
err = setup_cpu_cache(cachep, gfp); err = setup_cpu_cache(cachep, gfp);
if (err) { if (err) {
__kmem_cache_release(cachep); __kmem_cache_release(cachep);
...@@ -2292,7 +2293,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, ...@@ -2292,7 +2293,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
freelist = NULL; freelist = NULL;
else if (OFF_SLAB(cachep)) { else if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */ /* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache, freelist = kmalloc_node(cachep->freelist_size,
local_flags, nodeid); local_flags, nodeid);
} else { } else {
/* We will use last bytes at the slab for freelist */ /* We will use last bytes at the slab for freelist */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment