Commit a9e0b9f2 authored by Vlastimil Babka's avatar Vlastimil Babka

mm/slab: remove CONFIG_SLAB code from slab common code

In slab_common.c and slab.h headers, we can now remove all code behind
CONFIG_SLAB and CONFIG_DEBUG_SLAB ifdefs, and remove all CONFIG_SLUB
ifdefs.
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Tested-by: default avatarDavid Rientjes <rientjes@google.com>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 70da1d01
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
/* /*
* Flags to pass to kmem_cache_create(). * Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/ */
/* DEBUG: Perform (expensive) checks on alloc/free */ /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
...@@ -302,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void) ...@@ -302,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void)
* Kmalloc array related definitions * Kmalloc array related definitions
*/ */
#ifdef CONFIG_SLAB
/* /*
* SLAB and SLUB directly allocates requests fitting in to an order-1 page * SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator. * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/ */
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW #ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
#endif
#ifdef CONFIG_SLUB
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3 #define KMALLOC_SHIFT_LOW 3
#endif #endif
#endif
/* Maximum allocatable size */ /* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
......
...@@ -42,21 +42,6 @@ typedef union { ...@@ -42,21 +42,6 @@ typedef union {
struct slab { struct slab {
unsigned long __page_flags; unsigned long __page_flags;
#if defined(CONFIG_SLAB)
struct kmem_cache *slab_cache;
union {
struct {
struct list_head slab_list;
void *freelist; /* array of free object indexes */
void *s_mem; /* first object */
};
struct rcu_head rcu_head;
};
unsigned int active;
#elif defined(CONFIG_SLUB)
struct kmem_cache *slab_cache; struct kmem_cache *slab_cache;
union { union {
struct { struct {
...@@ -91,10 +76,6 @@ struct slab { ...@@ -91,10 +76,6 @@ struct slab {
}; };
unsigned int __unused; unsigned int __unused;
#else
#error "Unexpected slab allocator configured"
#endif
atomic_t __page_refcount; atomic_t __page_refcount;
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
unsigned long memcg_data; unsigned long memcg_data;
...@@ -111,7 +92,7 @@ SLAB_MATCH(memcg_data, memcg_data); ...@@ -111,7 +92,7 @@ SLAB_MATCH(memcg_data, memcg_data);
#endif #endif
#undef SLAB_MATCH #undef SLAB_MATCH
static_assert(sizeof(struct slab) <= sizeof(struct page)); static_assert(sizeof(struct slab) <= sizeof(struct page));
#if defined(system_has_freelist_aba) && defined(CONFIG_SLUB) #if defined(system_has_freelist_aba)
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t))); static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
#endif #endif
...@@ -228,13 +209,7 @@ static inline size_t slab_size(const struct slab *slab) ...@@ -228,13 +209,7 @@ static inline size_t slab_size(const struct slab *slab)
return PAGE_SIZE << slab_order(slab); return PAGE_SIZE << slab_order(slab);
} }
#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif
#ifdef CONFIG_SLUB
#include <linux/slub_def.h> #include <linux/slub_def.h>
#endif
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
...@@ -320,26 +295,16 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s) ...@@ -320,26 +295,16 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
SLAB_CACHE_DMA32 | SLAB_PANIC | \ SLAB_CACHE_DMA32 | SLAB_PANIC | \
SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
#if defined(CONFIG_DEBUG_SLAB) #ifdef CONFIG_SLUB_DEBUG
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#elif defined(CONFIG_SLUB_DEBUG)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
#else #else
#define SLAB_DEBUG_FLAGS (0) #define SLAB_DEBUG_FLAGS (0)
#endif #endif
#if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
SLAB_ACCOUNT | SLAB_NO_MERGE)
#elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | SLAB_ACCOUNT | \ SLAB_TEMPORARY | SLAB_ACCOUNT | \
SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE) SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
#else
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
#endif
/* Common flags available with current configuration */ /* Common flags available with current configuration */
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
...@@ -672,18 +637,14 @@ size_t __ksize(const void *objp); ...@@ -672,18 +637,14 @@ size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s) static inline size_t slab_ksize(const struct kmem_cache *s)
{ {
#ifndef CONFIG_SLUB #ifdef CONFIG_SLUB_DEBUG
return s->object_size;
#else /* CONFIG_SLUB */
# ifdef CONFIG_SLUB_DEBUG
/* /*
* Debugging requires use of the padding between object * Debugging requires use of the padding between object
* and whatever may come after it. * and whatever may come after it.
*/ */
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
return s->object_size; return s->object_size;
# endif #endif
if (s->flags & SLAB_KASAN) if (s->flags & SLAB_KASAN)
return s->object_size; return s->object_size;
/* /*
...@@ -697,7 +658,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s) ...@@ -697,7 +658,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
* Else we can use all the padding etc for the allocation * Else we can use all the padding etc for the allocation
*/ */
return s->size; return s->size;
#endif
} }
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
...@@ -775,23 +735,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, ...@@ -775,23 +735,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
* The slab lists for all objects. * The slab lists for all objects.
*/ */
struct kmem_cache_node { struct kmem_cache_node {
#ifdef CONFIG_SLAB
raw_spinlock_t list_lock;
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
unsigned long total_slabs; /* length of all slab lists */
unsigned long free_slabs; /* length of free slab list only */
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
struct array_cache *shared; /* shared per node */
struct alien_cache **alien; /* on other nodes */
unsigned long next_reap; /* updated without locking */
int free_touched; /* updated without locking */
#endif
#ifdef CONFIG_SLUB
spinlock_t list_lock; spinlock_t list_lock;
unsigned long nr_partial; unsigned long nr_partial;
struct list_head partial; struct list_head partial;
...@@ -800,8 +743,6 @@ struct kmem_cache_node { ...@@ -800,8 +743,6 @@ struct kmem_cache_node {
atomic_long_t total_objects; atomic_long_t total_objects;
struct list_head full; struct list_head full;
#endif #endif
#endif
}; };
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
...@@ -818,7 +759,7 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) ...@@ -818,7 +759,7 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
if ((__n = get_node(__s, __node))) if ((__n = get_node(__s, __node)))
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) #ifdef CONFIG_SLUB_DEBUG
void dump_unreclaimable_slab(void); void dump_unreclaimable_slab(void);
#else #else
static inline void dump_unreclaimable_slab(void) static inline void dump_unreclaimable_slab(void)
......
...@@ -71,10 +71,8 @@ static int __init setup_slab_merge(char *str) ...@@ -71,10 +71,8 @@ static int __init setup_slab_merge(char *str)
return 1; return 1;
} }
#ifdef CONFIG_SLUB
__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
__setup_param("slub_merge", slub_merge, setup_slab_merge, 0); __setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
#endif
__setup("slab_nomerge", setup_slab_nomerge); __setup("slab_nomerge", setup_slab_nomerge);
__setup("slab_merge", setup_slab_merge); __setup("slab_merge", setup_slab_merge);
...@@ -197,10 +195,6 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, ...@@ -197,10 +195,6 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
if (s->size - size >= sizeof(void *)) if (s->size - size >= sizeof(void *))
continue; continue;
if (IS_ENABLED(CONFIG_SLAB) && align &&
(align > s->align || s->align % align))
continue;
return s; return s;
} }
return NULL; return NULL;
...@@ -1222,12 +1216,8 @@ void cache_random_seq_destroy(struct kmem_cache *cachep) ...@@ -1222,12 +1216,8 @@ void cache_random_seq_destroy(struct kmem_cache *cachep)
} }
#endif /* CONFIG_SLAB_FREELIST_RANDOM */ #endif /* CONFIG_SLAB_FREELIST_RANDOM */
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) #ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLAB
#define SLABINFO_RIGHTS (0600)
#else
#define SLABINFO_RIGHTS (0400) #define SLABINFO_RIGHTS (0400)
#endif
static void print_slabinfo_header(struct seq_file *m) static void print_slabinfo_header(struct seq_file *m)
{ {
...@@ -1235,18 +1225,10 @@ static void print_slabinfo_header(struct seq_file *m) ...@@ -1235,18 +1225,10 @@ static void print_slabinfo_header(struct seq_file *m)
* Output format version, so at least we can change it * Output format version, so at least we can change it
* without _too_ many complaints. * without _too_ many complaints.
*/ */
#ifdef CONFIG_DEBUG_SLAB
seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
#else
seq_puts(m, "slabinfo - version: 2.1\n"); seq_puts(m, "slabinfo - version: 2.1\n");
#endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#ifdef CONFIG_DEBUG_SLAB
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
...@@ -1370,7 +1352,7 @@ static int __init slab_proc_init(void) ...@@ -1370,7 +1352,7 @@ static int __init slab_proc_init(void)
} }
module_init(slab_proc_init); module_init(slab_proc_init);
#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLUB_DEBUG */
static __always_inline __realloc_size(2) void * static __always_inline __realloc_size(2) void *
__do_krealloc(const void *p, size_t new_size, gfp_t flags) __do_krealloc(const void *p, size_t new_size, gfp_t flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment