Commit 865762a8 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by Linus Torvalds

slab/slub: adjust kmem_cache_alloc_bulk API

Adjust kmem_cache_alloc_bulk API before we have any real users.

Adjust API to return type 'int' instead of previously type 'bool'.  This
is done to allow future extension of the bulk alloc API.

A future extension could be to allow SLUB to stop at a page boundary, when
specified by a flag, and then return the number of objects.

The advantage of this approach, would make it easier to make bulk alloc
run without local IRQs disabled.  With an approach of cmpxchg "stealing"
the entire c->freelist or page->freelist.  To avoid overshooting we would
stop processing at a slab-page boundary.  Else we always end up returning
some objects at the cost of another cmpxchg.

To keep compatible with future users of this API linking against an older
kernel when using the new flag, we need to return the number of allocated
objects with this API change.
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 03374518
...@@ -316,7 +316,7 @@ void kmem_cache_free(struct kmem_cache *, void *); ...@@ -316,7 +316,7 @@ void kmem_cache_free(struct kmem_cache *, void *);
* Note that interrupts must be enabled when calling these functions. * Note that interrupts must be enabled when calling these functions.
*/ */
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
......
...@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) ...@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
} }
EXPORT_SYMBOL(kmem_cache_free_bulk); EXPORT_SYMBOL(kmem_cache_free_bulk);
bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p) void **p)
{ {
return __kmem_cache_alloc_bulk(s, flags, size, p); return __kmem_cache_alloc_bulk(s, flags, size, p);
......
...@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, ...@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
* may be allocated or freed using these operations. * may be allocated or freed using these operations.
*/ */
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
/* /*
......
...@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) ...@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
kmem_cache_free(s, p[i]); kmem_cache_free(s, p[i]);
} }
bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
void **p) void **p)
{ {
size_t i; size_t i;
...@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, ...@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
void *x = p[i] = kmem_cache_alloc(s, flags); void *x = p[i] = kmem_cache_alloc(s, flags);
if (!x) { if (!x) {
__kmem_cache_free_bulk(s, i, p); __kmem_cache_free_bulk(s, i, p);
return false; return 0;
} }
} }
return true; return i;
} }
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
......
...@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) ...@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
} }
EXPORT_SYMBOL(kmem_cache_free_bulk); EXPORT_SYMBOL(kmem_cache_free_bulk);
bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p) void **p)
{ {
return __kmem_cache_alloc_bulk(s, flags, size, p); return __kmem_cache_alloc_bulk(s, flags, size, p);
......
...@@ -2909,7 +2909,7 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) ...@@ -2909,7 +2909,7 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
EXPORT_SYMBOL(kmem_cache_free_bulk); EXPORT_SYMBOL(kmem_cache_free_bulk);
/* Note that interrupts must be enabled when calling this function. */ /* Note that interrupts must be enabled when calling this function. */
bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p) void **p)
{ {
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
...@@ -2959,12 +2959,12 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -2959,12 +2959,12 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
/* memcg and kmem_cache debug support */ /* memcg and kmem_cache debug support */
slab_post_alloc_hook(s, flags, size, p); slab_post_alloc_hook(s, flags, size, p);
return true; return i;
error: error:
local_irq_enable(); local_irq_enable();
slab_post_alloc_hook(s, flags, i, p); slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p); __kmem_cache_free_bulk(s, i, p);
return false; return 0;
} }
EXPORT_SYMBOL(kmem_cache_alloc_bulk); EXPORT_SYMBOL(kmem_cache_alloc_bulk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment