Commit 0892fac8 authored by Kent Overstreet's avatar Kent Overstreet Committed by Jens Axboe

drbd: convert to bioset_init()/mempool_init()

Convert drbd to embedded bio sets and mempools.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 338aa96d
...@@ -977,7 +977,7 @@ static void drbd_bm_endio(struct bio *bio) ...@@ -977,7 +977,7 @@ static void drbd_bm_endio(struct bio *bio)
bm_page_unlock_io(device, idx); bm_page_unlock_io(device, idx);
if (ctx->flags & BM_AIO_COPY_PAGES) if (ctx->flags & BM_AIO_COPY_PAGES)
mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); mempool_free(bio->bi_io_vec[0].bv_page, &drbd_md_io_page_pool);
bio_put(bio); bio_put(bio);
...@@ -1014,7 +1014,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho ...@@ -1014,7 +1014,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_set_page_unchanged(b->bm_pages[page_nr]); bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) { if (ctx->flags & BM_AIO_COPY_PAGES) {
page = mempool_alloc(drbd_md_io_page_pool, page = mempool_alloc(&drbd_md_io_page_pool,
GFP_NOIO | __GFP_HIGHMEM); GFP_NOIO | __GFP_HIGHMEM);
copy_highpage(page, b->bm_pages[page_nr]); copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr); bm_store_page_idx(page, page_nr);
......
...@@ -1405,8 +1405,8 @@ extern struct kmem_cache *drbd_request_cache; ...@@ -1405,8 +1405,8 @@ extern struct kmem_cache *drbd_request_cache;
extern struct kmem_cache *drbd_ee_cache; /* peer requests */ extern struct kmem_cache *drbd_ee_cache; /* peer requests */
extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool; extern mempool_t drbd_request_mempool;
extern mempool_t *drbd_ee_mempool; extern mempool_t drbd_ee_mempool;
/* drbd's page pool, used to buffer data received from the peer, /* drbd's page pool, used to buffer data received from the peer,
* or data requested by the peer. * or data requested by the peer.
...@@ -1432,16 +1432,16 @@ extern wait_queue_head_t drbd_pp_wait; ...@@ -1432,16 +1432,16 @@ extern wait_queue_head_t drbd_pp_wait;
* 128 should be plenty, currently we probably can get away with as few as 1. * 128 should be plenty, currently we probably can get away with as few as 1.
*/ */
#define DRBD_MIN_POOL_PAGES 128 #define DRBD_MIN_POOL_PAGES 128
extern mempool_t *drbd_md_io_page_pool; extern mempool_t drbd_md_io_page_pool;
/* We also need to make sure we get a bio /* We also need to make sure we get a bio
* when we need it for housekeeping purposes */ * when we need it for housekeeping purposes */
extern struct bio_set *drbd_md_io_bio_set; extern struct bio_set drbd_md_io_bio_set;
/* to allocate from that set */ /* to allocate from that set */
extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
/* And a bio_set for cloning */ /* And a bio_set for cloning */
extern struct bio_set *drbd_io_bio_set; extern struct bio_set drbd_io_bio_set;
extern struct mutex resources_mutex; extern struct mutex resources_mutex;
......
...@@ -124,11 +124,11 @@ struct kmem_cache *drbd_request_cache; ...@@ -124,11 +124,11 @@ struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache; /* peer requests */ struct kmem_cache *drbd_ee_cache; /* peer requests */
struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool; mempool_t drbd_request_mempool;
mempool_t *drbd_ee_mempool; mempool_t drbd_ee_mempool;
mempool_t *drbd_md_io_page_pool; mempool_t drbd_md_io_page_pool;
struct bio_set *drbd_md_io_bio_set; struct bio_set drbd_md_io_bio_set;
struct bio_set *drbd_io_bio_set; struct bio_set drbd_io_bio_set;
/* I do not use a standard mempool, because: /* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first. 1) I want to hand out the pre-allocated objects first.
...@@ -153,10 +153,10 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask) ...@@ -153,10 +153,10 @@ struct bio *bio_alloc_drbd(gfp_t gfp_mask)
{ {
struct bio *bio; struct bio *bio;
if (!drbd_md_io_bio_set) if (!bioset_initialized(&drbd_md_io_bio_set))
return bio_alloc(gfp_mask, 1); return bio_alloc(gfp_mask, 1);
bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
if (!bio) if (!bio)
return NULL; return NULL;
return bio; return bio;
...@@ -2097,16 +2097,11 @@ static void drbd_destroy_mempools(void) ...@@ -2097,16 +2097,11 @@ static void drbd_destroy_mempools(void)
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */ /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
if (drbd_io_bio_set) bioset_exit(&drbd_io_bio_set);
bioset_free(drbd_io_bio_set); bioset_exit(&drbd_md_io_bio_set);
if (drbd_md_io_bio_set) mempool_exit(&drbd_md_io_page_pool);
bioset_free(drbd_md_io_bio_set); mempool_exit(&drbd_ee_mempool);
if (drbd_md_io_page_pool) mempool_exit(&drbd_request_mempool);
mempool_destroy(drbd_md_io_page_pool);
if (drbd_ee_mempool)
mempool_destroy(drbd_ee_mempool);
if (drbd_request_mempool)
mempool_destroy(drbd_request_mempool);
if (drbd_ee_cache) if (drbd_ee_cache)
kmem_cache_destroy(drbd_ee_cache); kmem_cache_destroy(drbd_ee_cache);
if (drbd_request_cache) if (drbd_request_cache)
...@@ -2116,11 +2111,6 @@ static void drbd_destroy_mempools(void) ...@@ -2116,11 +2111,6 @@ static void drbd_destroy_mempools(void)
if (drbd_al_ext_cache) if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache); kmem_cache_destroy(drbd_al_ext_cache);
drbd_io_bio_set = NULL;
drbd_md_io_bio_set = NULL;
drbd_md_io_page_pool = NULL;
drbd_ee_mempool = NULL;
drbd_request_mempool = NULL;
drbd_ee_cache = NULL; drbd_ee_cache = NULL;
drbd_request_cache = NULL; drbd_request_cache = NULL;
drbd_bm_ext_cache = NULL; drbd_bm_ext_cache = NULL;
...@@ -2133,18 +2123,7 @@ static int drbd_create_mempools(void) ...@@ -2133,18 +2123,7 @@ static int drbd_create_mempools(void)
{ {
struct page *page; struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count; const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
int i; int i, ret;
/* prepare our caches and mempools */
drbd_request_mempool = NULL;
drbd_ee_cache = NULL;
drbd_request_cache = NULL;
drbd_bm_ext_cache = NULL;
drbd_al_ext_cache = NULL;
drbd_pp_pool = NULL;
drbd_md_io_page_pool = NULL;
drbd_md_io_bio_set = NULL;
drbd_io_bio_set = NULL;
/* caches */ /* caches */
drbd_request_cache = kmem_cache_create( drbd_request_cache = kmem_cache_create(
...@@ -2168,26 +2147,26 @@ static int drbd_create_mempools(void) ...@@ -2168,26 +2147,26 @@ static int drbd_create_mempools(void)
goto Enomem; goto Enomem;
/* mempools */ /* mempools */
drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0); ret = bioset_init(&drbd_io_bio_set, BIO_POOL_SIZE, 0, 0);
if (drbd_io_bio_set == NULL) if (ret)
goto Enomem; goto Enomem;
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0, ret = bioset_init(&drbd_md_io_bio_set, DRBD_MIN_POOL_PAGES, 0,
BIOSET_NEED_BVECS); BIOSET_NEED_BVECS);
if (drbd_md_io_bio_set == NULL) if (ret)
goto Enomem; goto Enomem;
drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0); ret = mempool_init_page_pool(&drbd_md_io_page_pool, DRBD_MIN_POOL_PAGES, 0);
if (drbd_md_io_page_pool == NULL) if (ret)
goto Enomem; goto Enomem;
drbd_request_mempool = mempool_create_slab_pool(number, ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache); drbd_request_cache);
if (drbd_request_mempool == NULL) if (ret)
goto Enomem; goto Enomem;
drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache); ret = mempool_init_slab_pool(&drbd_ee_mempool, number, drbd_ee_cache);
if (drbd_ee_mempool == NULL) if (ret)
goto Enomem; goto Enomem;
/* drbd's page pool */ /* drbd's page pool */
......
...@@ -378,7 +378,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto ...@@ -378,7 +378,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
if (drbd_insert_fault(device, DRBD_FAULT_AL_EE)) if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL; return NULL;
peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
if (!peer_req) { if (!peer_req) {
if (!(gfp_mask & __GFP_NOWARN)) if (!(gfp_mask & __GFP_NOWARN))
drbd_err(device, "%s: allocation failed\n", __func__); drbd_err(device, "%s: allocation failed\n", __func__);
...@@ -409,7 +409,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto ...@@ -409,7 +409,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return peer_req; return peer_req;
fail: fail:
mempool_free(peer_req, drbd_ee_mempool); mempool_free(peer_req, &drbd_ee_mempool);
return NULL; return NULL;
} }
...@@ -426,7 +426,7 @@ void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request * ...@@ -426,7 +426,7 @@ void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
drbd_al_complete_io(device, &peer_req->i); drbd_al_complete_io(device, &peer_req->i);
} }
mempool_free(peer_req, drbd_ee_mempool); mempool_free(peer_req, &drbd_ee_mempool);
} }
int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
......
...@@ -55,7 +55,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio ...@@ -55,7 +55,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
{ {
struct drbd_request *req; struct drbd_request *req;
req = mempool_alloc(drbd_request_mempool, GFP_NOIO); req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
if (!req) if (!req)
return NULL; return NULL;
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
...@@ -184,7 +184,7 @@ void drbd_req_destroy(struct kref *kref) ...@@ -184,7 +184,7 @@ void drbd_req_destroy(struct kref *kref)
} }
} }
mempool_free(req, drbd_request_mempool); mempool_free(req, &drbd_request_mempool);
} }
static void wake_all_senders(struct drbd_connection *connection) static void wake_all_senders(struct drbd_connection *connection)
......
...@@ -269,7 +269,7 @@ enum drbd_req_state_bits { ...@@ -269,7 +269,7 @@ enum drbd_req_state_bits {
static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
{ {
struct bio *bio; struct bio *bio;
bio = bio_clone_fast(bio_src, GFP_NOIO, drbd_io_bio_set); bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set);
req->private_bio = bio; req->private_bio = bio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment