Commit b9a06623 authored by Artem Bityutskiy's avatar Artem Bityutskiy

UBI: get rid of ubi_ltree_slab

This slab cache is not really needed since the number of objects
is low and the constructor does not make much sense because we
allocate oblects when doint I/O, which is way slower then allocation.
Suggested-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarArtem Bityutskiy <Artem.Bityutskiy@nokia.com>
parent 4fac9f69
......@@ -66,9 +66,6 @@ static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
/* Slab cache for lock-tree entries */
struct kmem_cache *ubi_ltree_slab;
/* Slab cache for wear-leveling entries */
struct kmem_cache *ubi_wl_entry_slab;
......@@ -857,20 +854,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return 0;
}
/**
* ltree_entry_ctor - lock tree entries slab cache constructor.
* @obj: the lock-tree entry to construct
* @cache: the lock tree entry slab cache
* @flags: constructor flags
*/
static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
{
struct ubi_ltree_entry *le = obj;
le->users = 0;
init_rwsem(&le->mutex);
}
/**
* find_mtd_device - open an MTD device by its name or number.
* @mtd_dev: name or number of the device
......@@ -933,17 +916,11 @@ static int __init ubi_init(void)
goto out_version;
}
ubi_ltree_slab = kmem_cache_create("ubi_ltree_slab",
sizeof(struct ubi_ltree_entry), 0,
0, &ltree_entry_ctor);
if (!ubi_ltree_slab)
goto out_dev_unreg;
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
if (!ubi_wl_entry_slab)
goto out_ltree;
goto out_dev_unreg;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
......@@ -980,8 +957,6 @@ static int __init ubi_init(void)
mutex_unlock(&ubi_devices_mutex);
}
kmem_cache_destroy(ubi_wl_entry_slab);
out_ltree:
kmem_cache_destroy(ubi_ltree_slab);
out_dev_unreg:
misc_deregister(&ubi_ctrl_cdev);
out_version:
......@@ -1005,7 +980,6 @@ static void __exit ubi_exit(void)
mutex_unlock(&ubi_devices_mutex);
}
kmem_cache_destroy(ubi_wl_entry_slab);
kmem_cache_destroy(ubi_ltree_slab);
misc_deregister(&ubi_ctrl_cdev);
class_remove_file(ubi_class, &ubi_version);
class_destroy(ubi_class);
......
......@@ -137,10 +137,12 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
{
struct ubi_ltree_entry *le, *le1, *le_free;
le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
if (!le)
return ERR_PTR(-ENOMEM);
le->users = 0;
init_rwsem(&le->mutex);
le->vol_id = vol_id;
le->lnum = lnum;
......@@ -188,7 +190,7 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
spin_unlock(&ubi->ltree_lock);
if (le_free)
kmem_cache_free(ubi_ltree_slab, le_free);
kfree(le_free);
return le;
}
......@@ -236,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_read(&le->mutex);
if (free)
kmem_cache_free(ubi_ltree_slab, le);
kfree(le);
}
/**
......@@ -292,7 +294,7 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
free = 0;
spin_unlock(&ubi->ltree_lock);
if (free)
kmem_cache_free(ubi_ltree_slab, le);
kfree(le);
return 1;
}
......@@ -321,7 +323,7 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_write(&le->mutex);
if (free)
kmem_cache_free(ubi_ltree_slab, le);
kfree(le);
}
/**
......
......@@ -399,7 +399,6 @@ struct ubi_device {
#endif
};
extern struct kmem_cache *ubi_ltree_slab;
extern struct kmem_cache *ubi_wl_entry_slab;
extern struct file_operations ubi_ctrl_cdev_operations;
extern struct file_operations ubi_cdev_operations;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment