Commit 2892c15d authored by Eric Sandeen's avatar Eric Sandeen Committed by Theodore Ts'o

ext4: make grpinfo slab cache names static

In 2.6.37 I was running into oopses with repeated module
loads & unloads.  I tracked this down to:

fb1813f4 ext4: use dedicated slab caches for group_info structures

(this was in addition to the features advert unload problem)

The kstrdup & subsequent kfree of the cache name was causing
a double free.  In slub, at least, if I read it right it allocates
& frees the name itself, slab seems to do something different...
so in slub I think we were leaking -our- cachep->name, and double
freeing the one allocated by slub.

After getting lost in slab/slub/slob a bit, I just looked at other
sized-caches that get allocated.  jbd2, biovec, sgpool all do it
more or less the way jbd2 does.  Below patch follows the jbd2
method of dynamically allocating a cache at mount time from
a list of static names.

(This might also possibly fix a race creating the caches with
parallel mounts running).

[Folded in a fix from Dan Carpenter which fixed an off-by-one error in
the original patch]

Cc: stable@kernel.org
Signed-off-by: default avatarEric Sandeen <sandeen@redhat.com>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent d50bdd5a
...@@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep; ...@@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep;
/* We create slab caches for groupinfo data structures based on the /* We create slab caches for groupinfo data structures based on the
* superblock block size. There will be one per mounted filesystem for * superblock block size. There will be one per mounted filesystem for
* each unique s_blocksize_bits */ * each unique s_blocksize_bits */
#define NR_GRPINFO_CACHES \ #define NR_GRPINFO_CACHES 8
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
"ext4_groupinfo_64k", "ext4_groupinfo_128k"
};
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group); ext4_group_t group);
static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
...@@ -2414,6 +2419,55 @@ static int ext4_mb_init_backend(struct super_block *sb) ...@@ -2414,6 +2419,55 @@ static int ext4_mb_init_backend(struct super_block *sb)
return -ENOMEM; return -ENOMEM;
} }
static void ext4_groupinfo_destroy_slabs(void)
{
int i;
for (i = 0; i < NR_GRPINFO_CACHES; i++) {
if (ext4_groupinfo_caches[i])
kmem_cache_destroy(ext4_groupinfo_caches[i]);
ext4_groupinfo_caches[i] = NULL;
}
}
static int ext4_groupinfo_create_slab(size_t size)
{
static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
int slab_size;
int blocksize_bits = order_base_2(size);
int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
struct kmem_cache *cachep;
if (cache_index >= NR_GRPINFO_CACHES)
return -EINVAL;
if (unlikely(cache_index < 0))
cache_index = 0;
mutex_lock(&ext4_grpinfo_slab_create_mutex);
if (ext4_groupinfo_caches[cache_index]) {
mutex_unlock(&ext4_grpinfo_slab_create_mutex);
return 0; /* Already created */
}
slab_size = offsetof(struct ext4_group_info,
bb_counters[blocksize_bits + 2]);
cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
slab_size, 0, SLAB_RECLAIM_ACCOUNT,
NULL);
mutex_unlock(&ext4_grpinfo_slab_create_mutex);
if (!cachep) {
printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
return -ENOMEM;
}
ext4_groupinfo_caches[cache_index] = cachep;
return 0;
}
int ext4_mb_init(struct super_block *sb, int needs_recovery) int ext4_mb_init(struct super_block *sb, int needs_recovery)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
...@@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) ...@@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
unsigned offset; unsigned offset;
unsigned max; unsigned max;
int ret; int ret;
int cache_index;
struct kmem_cache *cachep;
char *namep = NULL;
i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
...@@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) ...@@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
goto out; goto out;
} }
cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; ret = ext4_groupinfo_create_slab(sb->s_blocksize);
cachep = ext4_groupinfo_caches[cache_index]; if (ret < 0)
if (!cachep) {
char name[32];
int len = offsetof(struct ext4_group_info,
bb_counters[sb->s_blocksize_bits + 2]);
sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
namep = kstrdup(name, GFP_KERNEL);
if (!namep) {
ret = -ENOMEM;
goto out;
}
/* Need to free the kmem_cache_name() when we
* destroy the slab */
cachep = kmem_cache_create(namep, len, 0,
SLAB_RECLAIM_ACCOUNT, NULL);
if (!cachep) {
ret = -ENOMEM;
goto out; goto out;
}
ext4_groupinfo_caches[cache_index] = cachep;
}
/* order 0 is regular bitmap */ /* order 0 is regular bitmap */
sbi->s_mb_maxs[0] = sb->s_blocksize << 3; sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
...@@ -2520,7 +2550,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) ...@@ -2520,7 +2550,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
if (ret) { if (ret) {
kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs); kfree(sbi->s_mb_maxs);
kfree(namep);
} }
return ret; return ret;
} }
...@@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void) ...@@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void)
void ext4_exit_mballoc(void) void ext4_exit_mballoc(void)
{ {
int i;
/* /*
* Wait for completion of call_rcu()'s on ext4_pspace_cachep * Wait for completion of call_rcu()'s on ext4_pspace_cachep
* before destroying the slab cache. * before destroying the slab cache.
...@@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void) ...@@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void)
kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_pspace_cachep);
kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_ac_cachep);
kmem_cache_destroy(ext4_free_ext_cachep); kmem_cache_destroy(ext4_free_ext_cachep);
ext4_groupinfo_destroy_slabs();
for (i = 0; i < NR_GRPINFO_CACHES; i++) {
struct kmem_cache *cachep = ext4_groupinfo_caches[i];
if (cachep) {
char *name = (char *)kmem_cache_name(cachep);
kmem_cache_destroy(cachep);
kfree(name);
}
}
ext4_remove_debugfs_entry(); ext4_remove_debugfs_entry();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment