Commit 066ff571 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: turn bio_kmalloc into a simple kmalloc wrapper

Remove the magic autofree semantics and require the callers to explicitly
call bio_init to initialize the bio.

This allows bio_free to catch accidental bio_put calls on bio_init()ed
bios as well.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarColy Li <colyli@suse.de>
Acked-by: default avatarMike Snitzer <snitzer@kernel.org>
Link: https://lore.kernel.org/r/20220406061228.410163-5-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7655db80
...@@ -224,24 +224,13 @@ EXPORT_SYMBOL(bio_uninit); ...@@ -224,24 +224,13 @@ EXPORT_SYMBOL(bio_uninit);
static void bio_free(struct bio *bio) static void bio_free(struct bio *bio)
{ {
struct bio_set *bs = bio->bi_pool; struct bio_set *bs = bio->bi_pool;
void *p; void *p = bio;
bio_uninit(bio);
if (bs) { WARN_ON_ONCE(!bs);
bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
/* bio_uninit(bio);
* If we have front padding, adjust the bio pointer before freeing bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
*/ mempool_free(p - bs->front_pad, &bs->bio_pool);
p = bio;
p -= bs->front_pad;
mempool_free(p, &bs->bio_pool);
} else {
/* Bio was allocated by bio_kmalloc() */
kfree(bio);
}
} }
/* /*
...@@ -568,28 +557,28 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, ...@@ -568,28 +557,28 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
EXPORT_SYMBOL(bio_alloc_bioset); EXPORT_SYMBOL(bio_alloc_bioset);
/** /**
* bio_kmalloc - kmalloc a bio for I/O * bio_kmalloc - kmalloc a bio
* @nr_vecs: number of bio_vecs to allocate
* @gfp_mask: the GFP_* mask given to the slab allocator * @gfp_mask: the GFP_* mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
* *
* Use kmalloc to allocate and initialize a bio. * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
* using bio_init() before use. To free a bio returned from this function use
* kfree() after calling bio_uninit(). A bio returned from this function can
* be reused by calling bio_uninit() before calling bio_init() again.
*
* Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
* function are not backed by a mempool can can fail. Do not use this function
* for allocations in the file system I/O path.
* *
* Returns: Pointer to new bio on success, NULL on failure. * Returns: Pointer to new bio on success, NULL on failure.
*/ */
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs) struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
{ {
struct bio *bio; struct bio *bio;
if (nr_iovecs > UIO_MAXIOV) if (nr_vecs > UIO_MAXIOV)
return NULL; return NULL;
return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
if (unlikely(!bio))
return NULL;
bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs,
0);
bio->bi_pool = NULL;
return bio;
} }
EXPORT_SYMBOL(bio_kmalloc); EXPORT_SYMBOL(bio_kmalloc);
......
...@@ -152,23 +152,25 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) ...@@ -152,23 +152,25 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
src_bio->bi_status = enc_bio->bi_status; src_bio->bi_status = enc_bio->bi_status;
bio_put(enc_bio); bio_uninit(enc_bio);
kfree(enc_bio);
bio_endio(src_bio); bio_endio(src_bio);
} }
static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
{ {
unsigned int nr_segs = bio_segments(bio_src);
struct bvec_iter iter; struct bvec_iter iter;
struct bio_vec bv; struct bio_vec bv;
struct bio *bio; struct bio *bio;
bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src)); bio = bio_kmalloc(nr_segs, GFP_NOIO);
if (!bio) if (!bio)
return NULL; return NULL;
bio->bi_bdev = bio_src->bi_bdev; bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs,
bio_src->bi_opf);
if (bio_flagged(bio_src, BIO_REMAPPED)) if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED); bio_set_flag(bio, BIO_REMAPPED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
...@@ -363,8 +365,8 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) ...@@ -363,8 +365,8 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
blk_crypto_put_keyslot(slot); blk_crypto_put_keyslot(slot);
out_put_enc_bio: out_put_enc_bio:
if (enc_bio) if (enc_bio)
bio_put(enc_bio); bio_uninit(enc_bio);
kfree(enc_bio);
return ret; return ret;
} }
......
...@@ -152,10 +152,10 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, ...@@ -152,10 +152,10 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
ret = -ENOMEM; ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages); bio = bio_kmalloc(nr_pages, gfp_mask);
if (!bio) if (!bio)
goto out_bmd; goto out_bmd;
bio->bi_opf |= req_op(rq); bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
if (map_data) { if (map_data) {
nr_pages = 1 << map_data->page_order; nr_pages = 1 << map_data->page_order;
...@@ -224,7 +224,8 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, ...@@ -224,7 +224,8 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
cleanup: cleanup:
if (!map_data) if (!map_data)
bio_free_pages(bio); bio_free_pages(bio);
bio_put(bio); bio_uninit(bio);
kfree(bio);
out_bmd: out_bmd:
kfree(bmd); kfree(bmd);
return ret; return ret;
...@@ -234,6 +235,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, ...@@ -234,6 +235,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
unsigned int max_sectors = queue_max_hw_sectors(rq->q); unsigned int max_sectors = queue_max_hw_sectors(rq->q);
unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
struct bio *bio; struct bio *bio;
int ret; int ret;
int j; int j;
...@@ -241,10 +243,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, ...@@ -241,10 +243,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
if (!iov_iter_count(iter)) if (!iov_iter_count(iter))
return -EINVAL; return -EINVAL;
bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS)); bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio) if (!bio)
return -ENOMEM; return -ENOMEM;
bio->bi_opf |= req_op(rq); bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
while (iov_iter_count(iter)) { while (iov_iter_count(iter)) {
struct page **pages; struct page **pages;
...@@ -303,7 +305,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, ...@@ -303,7 +305,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
out_unmap: out_unmap:
bio_release_pages(bio, false); bio_release_pages(bio, false);
bio_put(bio); bio_uninit(bio);
kfree(bio);
return ret; return ret;
} }
...@@ -323,7 +326,8 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio) ...@@ -323,7 +326,8 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio)
static void bio_map_kern_endio(struct bio *bio) static void bio_map_kern_endio(struct bio *bio)
{ {
bio_invalidate_vmalloc_pages(bio); bio_invalidate_vmalloc_pages(bio);
bio_put(bio); bio_uninit(bio);
kfree(bio);
} }
/** /**
...@@ -348,9 +352,10 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data, ...@@ -348,9 +352,10 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
int offset, i; int offset, i;
struct bio *bio; struct bio *bio;
bio = bio_kmalloc(gfp_mask, nr_pages); bio = bio_kmalloc(nr_pages, gfp_mask);
if (!bio) if (!bio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
if (is_vmalloc) { if (is_vmalloc) {
flush_kernel_vmap_range(data, len); flush_kernel_vmap_range(data, len);
...@@ -374,7 +379,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data, ...@@ -374,7 +379,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
if (bio_add_pc_page(q, bio, page, bytes, if (bio_add_pc_page(q, bio, page, bytes,
offset) < bytes) { offset) < bytes) {
/* we don't support partial mappings */ /* we don't support partial mappings */
bio_put(bio); bio_uninit(bio);
kfree(bio);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -390,7 +396,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data, ...@@ -390,7 +396,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
static void bio_copy_kern_endio(struct bio *bio) static void bio_copy_kern_endio(struct bio *bio)
{ {
bio_free_pages(bio); bio_free_pages(bio);
bio_put(bio); bio_uninit(bio);
kfree(bio);
} }
static void bio_copy_kern_endio_read(struct bio *bio) static void bio_copy_kern_endio_read(struct bio *bio)
...@@ -435,9 +442,10 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, ...@@ -435,9 +442,10 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
nr_pages = end - start; nr_pages = end - start;
bio = bio_kmalloc(gfp_mask, nr_pages); bio = bio_kmalloc(nr_pages, gfp_mask);
if (!bio) if (!bio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
while (len) { while (len) {
struct page *page; struct page *page;
...@@ -471,7 +479,8 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, ...@@ -471,7 +479,8 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
cleanup: cleanup:
bio_free_pages(bio); bio_free_pages(bio);
bio_put(bio); bio_uninit(bio);
kfree(bio);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -602,7 +611,8 @@ int blk_rq_unmap_user(struct bio *bio) ...@@ -602,7 +611,8 @@ int blk_rq_unmap_user(struct bio *bio)
next_bio = bio; next_bio = bio;
bio = bio->bi_next; bio = bio->bi_next;
bio_put(next_bio); bio_uninit(next_bio);
kfree(next_bio);
} }
return ret; return ret;
...@@ -648,8 +658,10 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, ...@@ -648,8 +658,10 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
bio->bi_opf |= req_op(rq); bio->bi_opf |= req_op(rq);
ret = blk_rq_append_bio(rq, bio); ret = blk_rq_append_bio(rq, bio);
if (unlikely(ret)) if (unlikely(ret)) {
bio_put(bio); bio_uninit(bio);
kfree(bio);
}
return ret; return ret;
} }
EXPORT_SYMBOL(blk_rq_map_kern); EXPORT_SYMBOL(blk_rq_map_kern);
...@@ -522,9 +522,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames) ...@@ -522,9 +522,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
goto no_pkt; goto no_pkt;
pkt->frames = frames; pkt->frames = frames;
pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames); pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
if (!pkt->w_bio) if (!pkt->w_bio)
goto no_bio; goto no_bio;
bio_init(pkt->w_bio, NULL, pkt->w_bio->bi_inline_vecs, frames, 0);
for (i = 0; i < frames / FRAMES_PER_PAGE; i++) { for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
...@@ -536,10 +537,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames) ...@@ -536,10 +537,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
bio_list_init(&pkt->orig_bios); bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) { for (i = 0; i < frames; i++) {
struct bio *bio = bio_kmalloc(GFP_KERNEL, 1); struct bio *bio = bio_kmalloc(1, GFP_KERNEL);
if (!bio) if (!bio)
goto no_rd_bio; goto no_rd_bio;
bio_init(bio, NULL, bio->bi_inline_vecs, 1, 0);
pkt->r_bios[i] = bio; pkt->r_bios[i] = bio;
} }
...@@ -547,16 +548,16 @@ static struct packet_data *pkt_alloc_packet_data(int frames) ...@@ -547,16 +548,16 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
no_rd_bio: no_rd_bio:
for (i = 0; i < frames; i++) { for (i = 0; i < frames; i++) {
struct bio *bio = pkt->r_bios[i]; if (pkt->r_bios[i])
if (bio) bio_uninit(pkt->r_bios[i]);
bio_put(bio); kfree(pkt->r_bios[i]);
} }
no_page: no_page:
for (i = 0; i < frames / FRAMES_PER_PAGE; i++) for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
if (pkt->pages[i]) if (pkt->pages[i])
__free_page(pkt->pages[i]); __free_page(pkt->pages[i]);
bio_put(pkt->w_bio); bio_uninit(pkt->w_bio);
kfree(pkt->w_bio);
no_bio: no_bio:
kfree(pkt); kfree(pkt);
no_pkt: no_pkt:
...@@ -571,13 +572,13 @@ static void pkt_free_packet_data(struct packet_data *pkt) ...@@ -571,13 +572,13 @@ static void pkt_free_packet_data(struct packet_data *pkt)
int i; int i;
for (i = 0; i < pkt->frames; i++) { for (i = 0; i < pkt->frames; i++) {
struct bio *bio = pkt->r_bios[i]; bio_uninit(pkt->r_bios[i]);
if (bio) kfree(pkt->r_bios[i]);
bio_put(bio);
} }
for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
__free_page(pkt->pages[i]); __free_page(pkt->pages[i]);
bio_put(pkt->w_bio); bio_uninit(pkt->w_bio);
kfree(pkt->w_bio);
kfree(pkt); kfree(pkt);
} }
......
...@@ -107,15 +107,16 @@ void bch_btree_verify(struct btree *b) ...@@ -107,15 +107,16 @@ void bch_btree_verify(struct btree *b)
void bch_data_verify(struct cached_dev *dc, struct bio *bio) void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{ {
unsigned int nr_segs = bio_segments(bio);
struct bio *check; struct bio *check;
struct bio_vec bv, cbv; struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 }; struct bvec_iter iter, citer = { 0 };
check = bio_kmalloc(GFP_NOIO, bio_segments(bio)); check = bio_kmalloc(nr_segs, GFP_NOIO);
if (!check) if (!check)
return; return;
bio_set_dev(check, bio->bi_bdev); bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
check->bi_opf = REQ_OP_READ; REQ_OP_READ);
check->bi_iter.bi_sector = bio->bi_iter.bi_sector; check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size; check->bi_iter.bi_size = bio->bi_iter.bi_size;
...@@ -146,7 +147,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) ...@@ -146,7 +147,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
bio_free_pages(check); bio_free_pages(check);
out_put: out_put:
bio_put(check); bio_uninit(check);
kfree(check);
} }
#endif #endif
......
...@@ -611,7 +611,8 @@ static void bio_complete(struct bio *bio) ...@@ -611,7 +611,8 @@ static void bio_complete(struct bio *bio)
{ {
struct dm_buffer *b = bio->bi_private; struct dm_buffer *b = bio->bi_private;
blk_status_t status = bio->bi_status; blk_status_t status = bio->bi_status;
bio_put(bio); bio_uninit(bio);
kfree(bio);
b->end_io(b, status); b->end_io(b, status);
} }
...@@ -626,16 +627,14 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector, ...@@ -626,16 +627,14 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
vec_size += 2; vec_size += 2;
bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size); bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) { if (!bio) {
dmio: dmio:
use_dmio(b, rw, sector, n_sectors, offset); use_dmio(b, rw, sector, n_sectors, offset);
return; return;
} }
bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, b->c->bdev);
bio_set_op_attrs(bio, rw, 0);
bio->bi_end_io = bio_complete; bio->bi_end_io = bio_complete;
bio->bi_private = b; bio->bi_private = b;
......
...@@ -165,9 +165,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -165,9 +165,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios : 1 for reading, n-1 for writing * Allocate bios : 1 for reading, n-1 for writing
*/ */
for (j = pi->raid_disks ; j-- ; ) { for (j = pi->raid_disks ; j-- ; ) {
bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio) if (!bio)
goto out_free_bio; goto out_free_bio;
bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r1_bio->bios[j] = bio; r1_bio->bios[j] = bio;
} }
/* /*
...@@ -206,8 +207,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -206,8 +207,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_free_pages(&rps[j]); resync_free_pages(&rps[j]);
out_free_bio: out_free_bio:
while (++j < pi->raid_disks) while (++j < pi->raid_disks) {
bio_put(r1_bio->bios[j]); bio_uninit(r1_bio->bios[j]);
kfree(r1_bio->bios[j]);
}
kfree(rps); kfree(rps);
out_free_r1bio: out_free_r1bio:
...@@ -225,7 +228,8 @@ static void r1buf_pool_free(void *__r1_bio, void *data) ...@@ -225,7 +228,8 @@ static void r1buf_pool_free(void *__r1_bio, void *data)
for (i = pi->raid_disks; i--; ) { for (i = pi->raid_disks; i--; ) {
rp = get_resync_pages(r1bio->bios[i]); rp = get_resync_pages(r1bio->bios[i]);
resync_free_pages(rp); resync_free_pages(rp);
bio_put(r1bio->bios[i]); bio_uninit(r1bio->bios[i]);
kfree(r1bio->bios[i]);
} }
/* resync pages array stored in the 1st bio's .bi_private */ /* resync pages array stored in the 1st bio's .bi_private */
......
...@@ -145,15 +145,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -145,15 +145,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios. * Allocate bios.
*/ */
for (j = nalloc ; j-- ; ) { for (j = nalloc ; j-- ; ) {
bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio) if (!bio)
goto out_free_bio; goto out_free_bio;
bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r10_bio->devs[j].bio = bio; r10_bio->devs[j].bio = bio;
if (!conf->have_replacement) if (!conf->have_replacement)
continue; continue;
bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio) if (!bio)
goto out_free_bio; goto out_free_bio;
bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
r10_bio->devs[j].repl_bio = bio; r10_bio->devs[j].repl_bio = bio;
} }
/* /*
...@@ -197,9 +199,11 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -197,9 +199,11 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
out_free_bio: out_free_bio:
for ( ; j < nalloc; j++) { for ( ; j < nalloc; j++) {
if (r10_bio->devs[j].bio) if (r10_bio->devs[j].bio)
bio_put(r10_bio->devs[j].bio); bio_uninit(r10_bio->devs[j].bio);
kfree(r10_bio->devs[j].bio);
if (r10_bio->devs[j].repl_bio) if (r10_bio->devs[j].repl_bio)
bio_put(r10_bio->devs[j].repl_bio); bio_uninit(r10_bio->devs[j].repl_bio);
kfree(r10_bio->devs[j].repl_bio);
} }
kfree(rps); kfree(rps);
out_free_r10bio: out_free_r10bio:
...@@ -220,12 +224,15 @@ static void r10buf_pool_free(void *__r10_bio, void *data) ...@@ -220,12 +224,15 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
if (bio) { if (bio) {
rp = get_resync_pages(bio); rp = get_resync_pages(bio);
resync_free_pages(rp); resync_free_pages(rp);
bio_put(bio); bio_uninit(bio);
kfree(bio);
} }
bio = r10bio->devs[j].repl_bio; bio = r10bio->devs[j].repl_bio;
if (bio) if (bio) {
bio_put(bio); bio_uninit(bio);
kfree(bio);
}
} }
/* resync pages array stored in the 1st bio's .bi_private */ /* resync pages array stored in the 1st bio's .bi_private */
......
...@@ -818,7 +818,8 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b) ...@@ -818,7 +818,8 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
static void pscsi_bi_endio(struct bio *bio) static void pscsi_bi_endio(struct bio *bio)
{ {
bio_put(bio); bio_uninit(bio);
kfree(bio);
} }
static sense_reason_t static sense_reason_t
...@@ -861,14 +862,13 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, ...@@ -861,14 +862,13 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio) { if (!bio) {
new_bio: new_bio:
nr_vecs = bio_max_segs(nr_pages); nr_vecs = bio_max_segs(nr_pages);
bio = bio_kmalloc(GFP_KERNEL, nr_vecs); bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
if (!bio) if (!bio)
goto fail; goto fail;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs,
rw ? REQ_OP_WRITE : REQ_OP_READ);
bio->bi_end_io = pscsi_bi_endio; bio->bi_end_io = pscsi_bi_endio;
if (rw)
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
pr_debug("PSCSI: Allocated bio: %p," pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio, " dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs); (rw) ? "rw" : "r", nr_vecs);
......
...@@ -86,12 +86,10 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, ...@@ -86,12 +86,10 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
int error, i; int error, i;
struct bio *bio; struct bio *bio;
bio = bio_kmalloc(GFP_NOIO, page_count); bio = bio_kmalloc(page_count, GFP_NOIO);
if (!bio) if (!bio)
return -ENOMEM; return -ENOMEM;
bio_set_dev(bio, sb->s_bdev); bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ);
bio->bi_opf = REQ_OP_READ;
bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT); bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
for (i = 0; i < page_count; ++i) { for (i = 0; i < page_count; ++i) {
...@@ -121,7 +119,8 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, ...@@ -121,7 +119,8 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
out_free_bio: out_free_bio:
bio_free_pages(bio); bio_free_pages(bio);
bio_put(bio); bio_uninit(bio);
kfree(bio);
return error; return error;
} }
...@@ -185,7 +184,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, ...@@ -185,7 +184,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
length |= data[0] << 8; length |= data[0] << 8;
} }
bio_free_pages(bio); bio_free_pages(bio);
bio_put(bio); bio_uninit(bio);
kfree(bio);
compressed = SQUASHFS_COMPRESSED(length); compressed = SQUASHFS_COMPRESSED(length);
length = SQUASHFS_COMPRESSED_SIZE(length); length = SQUASHFS_COMPRESSED_SIZE(length);
...@@ -219,7 +219,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, ...@@ -219,7 +219,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
out_free_bio: out_free_bio:
bio_free_pages(bio); bio_free_pages(bio);
bio_put(bio); bio_uninit(bio);
kfree(bio);
out: out:
if (res < 0) { if (res < 0) {
ERROR("Failed to read block 0x%llx: %d\n", index, res); ERROR("Failed to read block 0x%llx: %d\n", index, res);
......
...@@ -405,7 +405,7 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); ...@@ -405,7 +405,7 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask, unsigned int opf, gfp_t gfp_mask,
struct bio_set *bs); struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *); extern void bio_put(struct bio *);
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment