Commit b33a7bad authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] slab: consolidate panic code

Many places do:

	if (kmem_cache_create(...) == NULL)
		panic(...);

We can consolidate all that by passing another flag to kmem_cache_create()
which says "panic if it doesn't work".
parent 108e3158
......@@ -64,14 +64,9 @@ static void aio_kick_handler(void *);
static int __init aio_setup(void)
{
kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!kiocb_cachep)
panic("unable to create kiocb cache\n");
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!kioctx_cachep)
panic("unable to create kioctx cache");
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
aio_wq = create_workqueue("aio");
......
......@@ -808,9 +808,7 @@ static void __init biovec_init_pools(void)
size = bp->nr_vecs * sizeof(struct bio_vec);
bp->slab = kmem_cache_create(bp->name, size, 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!bp->slab)
panic("biovec: can't init slab cache\n");
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
if (i >= scale)
pool_entries >>= 1;
......@@ -825,16 +823,16 @@ static void __init biovec_init_pools(void)
static int __init init_bio(void)
{
bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!bio_slab)
panic("bio: can't create slab cache\n");
bio_pool = mempool_create(BIO_POOL_SIZE, mempool_alloc_slab, mempool_free_slab, bio_slab);
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
bio_pool = mempool_create(BIO_POOL_SIZE, mempool_alloc_slab,
mempool_free_slab, bio_slab);
if (!bio_pool)
panic("bio: can't create mempool\n");
biovec_init_pools();
bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, bio_pair_alloc, bio_pair_free, NULL);
bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES,
bio_pair_alloc, bio_pair_free, NULL);
if (!bio_split_pool)
panic("bio: can't create split pool\n");
......
......@@ -306,14 +306,9 @@ struct super_block *blockdev_superblock;
void __init bdev_cache_init(void)
{
int err;
bdev_cachep = kmem_cache_create("bdev_cache",
sizeof(struct bdev_inode),
0,
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
init_once,
NULL);
if (!bdev_cachep)
panic("Cannot create bdev_cache SLAB cache");
bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
init_once, NULL);
err = register_filesystem(&bd_type);
if (err)
panic("Cannot register bdev pseudo-fs");
......
......@@ -3100,7 +3100,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
0, init_buffer_head, NULL);
SLAB_PANIC, init_buffer_head, NULL);
for (i = 0; i < ARRAY_SIZE(bh_wait_queue_heads); i++)
init_waitqueue_head(&bh_wait_queue_heads[i].wqh);
......
......@@ -1570,10 +1570,8 @@ static void __init dcache_init(unsigned long mempages)
dentry_cache = kmem_cache_create("dentry_cache",
sizeof(struct dentry),
0,
SLAB_RECLAIM_ACCOUNT,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
NULL, NULL);
if (!dentry_cache)
panic("Cannot create dentry cache");
set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
......@@ -1638,17 +1636,11 @@ void __init vfs_caches_init(unsigned long mempages)
reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache",
PATH_MAX, 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!names_cachep)
panic("Cannot create names SLAB cache");
filp_cachep = kmem_cache_create("filp",
sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN, filp_ctor, filp_dtor);
if(!filp_cachep)
panic("Cannot create filp SLAB cache");
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, filp_ctor, filp_dtor);
dcache_init(mempages);
inode_init(mempages);
......
......@@ -173,9 +173,7 @@ EXPORT_SYMBOL_GPL(dnotify_parent);
static int __init dnotify_init(void)
{
dn_cache = kmem_cache_create("dnotify_cache",
sizeof(struct dnotify_struct), 0, 0, NULL, NULL);
if (!dn_cache)
panic("cannot create dnotify slab cache");
sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL, NULL);
return 0;
}
......
......@@ -1733,9 +1733,8 @@ static int __init dquot_init(void)
dquot_cachep = kmem_cache_create("dquot",
sizeof(struct dquot), sizeof(unsigned long) * 4,
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, NULL, NULL);
if (!dquot_cachep)
panic("Cannot create dquot SLAB cache");
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC,
NULL, NULL);
order = 0;
dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
......
......@@ -1695,22 +1695,14 @@ static int __init eventpoll_init(void)
ep_poll_safewake_init(&psw);
/* Allocates slab cache used to allocate "struct epitem" items */
error = -ENOMEM;
epi_cache = kmem_cache_create("eventpoll_epi",
sizeof(struct epitem),
0,
SLAB_HWCACHE_ALIGN | EPI_SLAB_DEBUG, NULL, NULL);
if (!epi_cache)
goto eexit_1;
epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC,
NULL, NULL);
/* Allocates slab cache used to allocate "struct eppoll_entry" */
error = -ENOMEM;
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry),
0,
EPI_SLAB_DEBUG, NULL, NULL);
if (!pwq_cache)
goto eexit_2;
sizeof(struct eppoll_entry), 0,
EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL);
/*
* Register the virtual file system that will be the source of inodes
......@@ -1718,27 +1710,20 @@ static int __init eventpoll_init(void)
*/
error = register_filesystem(&eventpoll_fs_type);
if (error)
goto eexit_3;
goto epanic;
/* Mount the above commented virtual file system */
eventpoll_mnt = kern_mount(&eventpoll_fs_type);
error = PTR_ERR(eventpoll_mnt);
if (IS_ERR(eventpoll_mnt))
goto eexit_4;
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: successfully initialized.\n", current));
goto epanic;
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: successfully initialized.\n",
current));
return 0;
eexit_4:
unregister_filesystem(&eventpoll_fs_type);
eexit_3:
kmem_cache_destroy(pwq_cache);
eexit_2:
kmem_cache_destroy(epi_cache);
eexit_1:
return error;
epanic:
panic("eventpoll_init() failed\n");
}
......@@ -1755,4 +1740,3 @@ module_init(eventpoll_init);
module_exit(eventpoll_exit);
MODULE_LICENSE("GPL");
......@@ -627,15 +627,12 @@ void kill_fasync(struct fasync_struct **fp, int sig, int band)
read_unlock(&fasync_lock);
}
}
EXPORT_SYMBOL(kill_fasync);
static int __init fasync_init(void)
{
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, 0, NULL, NULL);
if (!fasync_cache)
panic("cannot create fasync slab cache");
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL);
return 0;
}
......
......@@ -1396,11 +1396,8 @@ void __init inode_init(unsigned long mempages)
/* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
0, SLAB_HWCACHE_ALIGN, init_once,
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, init_once,
NULL);
if (!inode_cachep)
panic("cannot create inode slab cache");
set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
}
......@@ -1421,5 +1418,4 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n",
mode);
}
EXPORT_SYMBOL(init_special_inode);
......@@ -1994,15 +1994,13 @@ void steal_locks(fl_owner_t from)
}
unlock_kernel();
}
EXPORT_SYMBOL(steal_locks);
static int __init filelock_init(void)
{
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, 0, init_once, NULL);
if (!filelock_cache)
panic("cannot create file lock slab cache");
sizeof(struct file_lock), 0, SLAB_PANIC,
init_once, NULL);
return 0;
}
......
......@@ -1206,9 +1206,7 @@ void __init mnt_init(unsigned long mempages)
int i;
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!mnt_cache)
panic("Cannot create vfsmount cache");
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
order = 0;
mount_hashtable = (struct list_head *)
......
......@@ -44,6 +44,7 @@ typedef struct kmem_cache_s kmem_cache_t;
#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate
what is reclaimable later*/
#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */
/* flags passed to a constructor func */
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
......
......@@ -216,11 +216,8 @@ void __init fork_init(unsigned long mempages)
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct",
sizeof(struct task_struct),ARCH_MIN_TASKALIGN,
0, NULL, NULL);
if (!task_struct_cachep)
panic("fork_init(): cannot create task_struct SLAB cache");
kmem_cache_create("task_struct", sizeof(struct task_struct),
ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
#endif
/*
......@@ -1249,37 +1246,20 @@ void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!sighand_cachep)
panic("Cannot create sighand SLAB cache");
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!signal_cachep)
panic("Cannot create signal SLAB cache");
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!files_cachep)
panic("Cannot create files SLAB cache");
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (!fs_cachep)
panic("Cannot create fs_struct SLAB cache");
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
vm_area_cachep = kmem_cache_create("vm_area_struct",
sizeof(struct vm_area_struct), 0,
0, NULL, NULL);
if(!vm_area_cachep)
panic("vma_init: Cannot alloc vm_area_struct SLAB cache");
SLAB_PANIC, NULL, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if(!mm_cachep)
panic("vma_init: Cannot alloc mm_struct SLAB cache");
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
}
......@@ -2573,7 +2573,5 @@ void __init signals_init(void)
kmem_cache_create("sigqueue",
sizeof(struct sigqueue),
__alignof__(struct sigqueue),
0, NULL, NULL);
if (!sigqueue_cachep)
panic("signals_init(): cannot create sigqueue SLAB cache");
SLAB_PANIC, NULL, NULL);
}
......@@ -149,10 +149,7 @@ static int __init uid_cache_init(void)
int n;
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if(!uid_cachep)
panic("Cannot create uid taskcount SLAB cache\n");
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
INIT_LIST_HEAD(uidhash_table + n);
......
......@@ -799,9 +799,7 @@ void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
0, radix_tree_node_ctor, NULL);
if (!radix_tree_node_cachep)
panic ("Failed to create radix_tree_node cache\n");
SLAB_PANIC, radix_tree_node_ctor, NULL);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}
......@@ -977,10 +977,7 @@ void __init pte_chain_init(void)
pte_chain_cache = kmem_cache_create( "pte_chain",
sizeof(struct pte_chain),
sizeof(struct pte_chain),
0,
SLAB_PANIC,
pte_chain_ctor,
NULL);
if (!pte_chain_cache)
panic("failed to create pte_chain cache!\n");
}
......@@ -135,11 +135,11 @@
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
SLAB_NO_REAP | SLAB_CACHE_DMA | \
SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT )
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
SLAB_RECLAIM_ACCOUNT)
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC)
#endif
/*
......@@ -1402,9 +1402,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
up(&cache_chain_sem);
unlock_cpu_hotplug();
opps:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
static inline void check_irq_off(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment